xref: /dragonfly/sys/vm/vm_object.c (revision fcfde60f)
1 /*
2  * Copyright (c) 1991, 1993, 2013
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
61  */
62 
63 /*
64  *	Virtual memory object module.
65  */
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h>		/* for curproc, pageproc */
70 #include <sys/thread.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/refcount.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_zone.h>
91 
92 #include <vm/vm_page2.h>
93 
94 #include <machine/specialreg.h>
95 
96 #define EASY_SCAN_FACTOR	8
97 
98 static void	vm_object_qcollapse(vm_object_t object,
99 				    vm_object_t backing_object);
100 static void	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
101 					     int pagerflags);
102 static void	vm_object_lock_init(vm_object_t);
103 
104 /*
105  *	Virtual memory objects maintain the actual data
106  *	associated with allocated virtual memory.  A given
107  *	page of memory exists within exactly one object.
108  *
109  *	An object is only deallocated when all "references"
110  *	are given up.  Only one "reference" to a given
111  *	region of an object should be writeable.
112  *
113  *	Associated with each object is a list of all resident
114  *	memory pages belonging to that object; this list is
115  *	maintained by the "vm_page" module, and locked by the object's
116  *	lock.
117  *
118  *	Each object also records a "pager" routine which is
119  *	used to retrieve (and store) pages to the proper backing
120  *	storage.  In addition, objects may be backed by other
121  *	objects from which they were virtual-copied.
122  *
123  *	The only items within the object structure which are
124  *	modified after time of creation are:
125  *		reference count		locked by object's lock
126  *		pager routine		locked by object's lock
127  *
128  */
129 
130 struct vm_object kernel_object;
131 
132 static long object_collapses;
133 static long object_bypasses;
134 
135 struct vm_object_hash vm_object_hash[VMOBJ_HSIZE];
136 
137 MALLOC_DEFINE(M_VM_OBJECT, "vm_object", "vm_object structures");
138 
139 #define VMOBJ_HASH_PRIME1	66555444443333333ULL
140 #define VMOBJ_HASH_PRIME2	989042931893ULL
141 
142 int vm_object_debug;
143 SYSCTL_INT(_vm, OID_AUTO, object_debug, CTLFLAG_RW, &vm_object_debug, 0, "");
144 
145 static __inline
146 struct vm_object_hash *
147 vmobj_hash(vm_object_t obj)
148 {
149 	uintptr_t hash1;
150 	uintptr_t hash2;
151 
152 	hash1 = (uintptr_t)obj + ((uintptr_t)obj >> 18);
153 	hash1 %= VMOBJ_HASH_PRIME1;
154 	hash2 = ((uintptr_t)obj >> 8) + ((uintptr_t)obj >> 24);
155 	hash2 %= VMOBJ_HASH_PRIME2;
156 	return (&vm_object_hash[(hash1 ^ hash2) & VMOBJ_HMASK]);
157 }
158 
159 #if defined(DEBUG_LOCKS)
160 
161 #define vm_object_vndeallocate(obj, vpp)	\
162                 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__)
163 
164 /*
165  * Debug helper to track hold/drop/ref/deallocate calls.
166  */
167 static void
168 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem)
169 {
170 	int i;
171 
172 	i = atomic_fetchadd_int(&obj->debug_index, 1);
173 	i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1);
174 	ksnprintf(obj->debug_hold_thrs[i],
175 		  sizeof(obj->debug_hold_thrs[i]),
176 		  "%c%d:(%d):%s",
177 		  (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')),
178 		  (curthread->td_proc ? curthread->td_proc->p_pid : -1),
179 		  obj->ref_count,
180 		  curthread->td_comm);
181 	obj->debug_hold_file[i] = file;
182 	obj->debug_hold_line[i] = line;
183 #if 0
184 	/* Uncomment for debugging obj refs/derefs in reproducable cases */
185 	if (strcmp(curthread->td_comm, "sshd") == 0) {
186 		kprintf("%d %p refs=%d ar=%d file: %s/%d\n",
187 			(curthread->td_proc ? curthread->td_proc->p_pid : -1),
188 			obj, obj->ref_count, addrem, file, line);
189 	}
190 #endif
191 }
192 
193 #endif
194 
195 /*
196  * Misc low level routines
197  */
198 static void
199 vm_object_lock_init(vm_object_t obj)
200 {
201 #if defined(DEBUG_LOCKS)
202 	int i;
203 
204 	obj->debug_index = 0;
205 	for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
206 		obj->debug_hold_thrs[i][0] = 0;
207 		obj->debug_hold_file[i] = NULL;
208 		obj->debug_hold_line[i] = 0;
209 	}
210 #endif
211 }
212 
213 void
214 vm_object_lock_swap(void)
215 {
216 	lwkt_token_swap();
217 }
218 
219 void
220 vm_object_lock(vm_object_t obj)
221 {
222 	lwkt_gettoken(&obj->token);
223 }
224 
225 /*
226  * Returns TRUE on sucesss
227  */
228 static int
229 vm_object_lock_try(vm_object_t obj)
230 {
231 	return(lwkt_trytoken(&obj->token));
232 }
233 
234 void
235 vm_object_lock_shared(vm_object_t obj)
236 {
237 	lwkt_gettoken_shared(&obj->token);
238 }
239 
240 void
241 vm_object_unlock(vm_object_t obj)
242 {
243 	lwkt_reltoken(&obj->token);
244 }
245 
246 void
247 vm_object_upgrade(vm_object_t obj)
248 {
249 	lwkt_reltoken(&obj->token);
250 	lwkt_gettoken(&obj->token);
251 }
252 
253 void
254 vm_object_downgrade(vm_object_t obj)
255 {
256 	lwkt_reltoken(&obj->token);
257 	lwkt_gettoken_shared(&obj->token);
258 }
259 
260 static __inline void
261 vm_object_assert_held(vm_object_t obj)
262 {
263 	ASSERT_LWKT_TOKEN_HELD(&obj->token);
264 }
265 
266 static __inline int
267 vm_quickcolor(void)
268 {
269 	globaldata_t gd = mycpu;
270 	int pg_color;
271 
272 	pg_color = (int)(intptr_t)gd->gd_curthread >> 10;
273 	pg_color += gd->gd_quick_color;
274 	gd->gd_quick_color += PQ_PRIME2;
275 
276 	return pg_color;
277 }
278 
279 void
280 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS)
281 {
282 	KKASSERT(obj != NULL);
283 
284 	/*
285 	 * Object must be held (object allocation is stable due to callers
286 	 * context, typically already holding the token on a parent object)
287 	 * prior to potentially blocking on the lock, otherwise the object
288 	 * can get ripped away from us.
289 	 */
290 	refcount_acquire(&obj->hold_count);
291 	vm_object_lock(obj);
292 
293 #if defined(DEBUG_LOCKS)
294 	debugvm_object_add(obj, file, line, 1);
295 #endif
296 }
297 
298 int
299 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS)
300 {
301 	KKASSERT(obj != NULL);
302 
303 	/*
304 	 * Object must be held (object allocation is stable due to callers
305 	 * context, typically already holding the token on a parent object)
306 	 * prior to potentially blocking on the lock, otherwise the object
307 	 * can get ripped away from us.
308 	 */
309 	refcount_acquire(&obj->hold_count);
310 	if (vm_object_lock_try(obj) == 0) {
311 		if (refcount_release(&obj->hold_count)) {
312 			if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD))
313 				kfree(obj, M_VM_OBJECT);
314 		}
315 		return(0);
316 	}
317 
318 #if defined(DEBUG_LOCKS)
319 	debugvm_object_add(obj, file, line, 1);
320 #endif
321 	return(1);
322 }
323 
324 void
325 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS)
326 {
327 	KKASSERT(obj != NULL);
328 
329 	/*
330 	 * Object must be held (object allocation is stable due to callers
331 	 * context, typically already holding the token on a parent object)
332 	 * prior to potentially blocking on the lock, otherwise the object
333 	 * can get ripped away from us.
334 	 */
335 	refcount_acquire(&obj->hold_count);
336 	vm_object_lock_shared(obj);
337 
338 #if defined(DEBUG_LOCKS)
339 	debugvm_object_add(obj, file, line, 1);
340 #endif
341 }
342 
343 /*
344  * Drop the token and hold_count on the object.
345  *
346  * WARNING! Token might be shared.
347  */
348 void
349 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS)
350 {
351 	if (obj == NULL)
352 		return;
353 
354 	/*
355 	 * No new holders should be possible once we drop hold_count 1->0 as
356 	 * there is no longer any way to reference the object.
357 	 */
358 	KKASSERT(obj->hold_count > 0);
359 	if (refcount_release(&obj->hold_count)) {
360 #if defined(DEBUG_LOCKS)
361 		debugvm_object_add(obj, file, line, -1);
362 #endif
363 
364 		if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) {
365 			vm_object_unlock(obj);
366 			kfree(obj, M_VM_OBJECT);
367 		} else {
368 			vm_object_unlock(obj);
369 		}
370 	} else {
371 #if defined(DEBUG_LOCKS)
372 		debugvm_object_add(obj, file, line, -1);
373 #endif
374 		vm_object_unlock(obj);
375 	}
376 }
377 
378 /*
379  * Initialize a freshly allocated object, returning a held object.
380  *
381  * Used only by vm_object_allocate(), zinitna() and vm_object_init().
382  *
383  * No requirements.
384  */
385 void
386 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
387 {
388 	struct vm_object_hash *hash;
389 
390 	RB_INIT(&object->rb_memq);
391 	LIST_INIT(&object->shadow_head);
392 	lwkt_token_init(&object->token, "vmobj");
393 
394 	object->type = type;
395 	object->size = size;
396 	object->ref_count = 1;
397 	object->memattr = VM_MEMATTR_DEFAULT;
398 	object->hold_count = 0;
399 	object->flags = 0;
400 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
401 		vm_object_set_flag(object, OBJ_ONEMAPPING);
402 	object->paging_in_progress = 0;
403 	object->resident_page_count = 0;
404 	object->shadow_count = 0;
405 	/* cpu localization twist */
406 	object->pg_color = vm_quickcolor();
407 	object->handle = NULL;
408 	object->backing_object = NULL;
409 	object->backing_object_offset = (vm_ooffset_t)0;
410 
411 	atomic_add_int(&object->generation, 1);
412 	object->swblock_count = 0;
413 	RB_INIT(&object->swblock_root);
414 	vm_object_lock_init(object);
415 	pmap_object_init(object);
416 
417 	vm_object_hold(object);
418 
419 	hash = vmobj_hash(object);
420 	lwkt_gettoken(&hash->token);
421 	TAILQ_INSERT_TAIL(&hash->list, object, object_list);
422 	lwkt_reltoken(&hash->token);
423 }
424 
425 /*
426  * Initialize a VM object.
427  */
428 void
429 vm_object_init(vm_object_t object, vm_pindex_t size)
430 {
431 	_vm_object_allocate(OBJT_DEFAULT, size, object);
432 	vm_object_drop(object);
433 }
434 
435 /*
436  * Initialize the VM objects module.
437  *
438  * Called from the low level boot code only.  Note that this occurs before
439  * kmalloc is initialized so we cannot allocate any VM objects.
440  */
441 void
442 vm_object_init1(void)
443 {
444 	int i;
445 
446 	for (i = 0; i < VMOBJ_HSIZE; ++i) {
447 		TAILQ_INIT(&vm_object_hash[i].list);
448 		lwkt_token_init(&vm_object_hash[i].token, "vmobjlst");
449 	}
450 
451 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
452 			    &kernel_object);
453 	vm_object_drop(&kernel_object);
454 }
455 
456 void
457 vm_object_init2(void)
458 {
459 	kmalloc_set_unlimited(M_VM_OBJECT);
460 }
461 
462 /*
463  * Allocate and return a new object of the specified type and size.
464  *
465  * No requirements.
466  */
467 vm_object_t
468 vm_object_allocate(objtype_t type, vm_pindex_t size)
469 {
470 	vm_object_t obj;
471 
472 	obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
473 	_vm_object_allocate(type, size, obj);
474 	vm_object_drop(obj);
475 
476 	return (obj);
477 }
478 
479 /*
480  * This version returns a held object, allowing further atomic initialization
481  * of the object.
482  */
483 vm_object_t
484 vm_object_allocate_hold(objtype_t type, vm_pindex_t size)
485 {
486 	vm_object_t obj;
487 
488 	obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
489 	_vm_object_allocate(type, size, obj);
490 
491 	return (obj);
492 }
493 
494 /*
495  * Add an additional reference to a vm_object.  The object must already be
496  * held.  The original non-lock version is no longer supported.  The object
497  * must NOT be chain locked by anyone at the time the reference is added.
498  *
499  * Referencing a chain-locked object can blow up the fairly sensitive
500  * ref_count and shadow_count tests in the deallocator.  Most callers
501  * will call vm_object_chain_wait() prior to calling
502  * vm_object_reference_locked() to avoid the case.  The held token
503  * allows the caller to pair the wait and ref.
504  *
505  * The object must be held, but may be held shared if desired (hence why
506  * we use an atomic op).
507  */
508 void
509 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS)
510 {
511 	KKASSERT(object != NULL);
512 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
513 	KKASSERT((object->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) == 0);
514 	atomic_add_int(&object->ref_count, 1);
515 	if (object->type == OBJT_VNODE) {
516 		vref(object->handle);
517 		/* XXX what if the vnode is being destroyed? */
518 	}
519 #if defined(DEBUG_LOCKS)
520 	debugvm_object_add(object, file, line, 1);
521 #endif
522 }
523 
524 /*
525  * This version explicitly allows the chain to be held (i.e. by the
526  * caller).  The token must also be held.
527  */
528 void
529 VMOBJDEBUG(vm_object_reference_locked_chain_held)(vm_object_t object
530 	   VMOBJDBARGS)
531 {
532 	KKASSERT(object != NULL);
533 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
534 	atomic_add_int(&object->ref_count, 1);
535 	if (object->type == OBJT_VNODE) {
536 		vref(object->handle);
537 		/* XXX what if the vnode is being destroyed? */
538 	}
539 #if defined(DEBUG_LOCKS)
540 	debugvm_object_add(object, file, line, 1);
541 #endif
542 }
543 
544 /*
545  * This version is only allowed for vnode objects.
546  */
547 void
548 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS)
549 {
550 	KKASSERT(object->type == OBJT_VNODE);
551 	atomic_add_int(&object->ref_count, 1);
552 	vref(object->handle);
553 #if defined(DEBUG_LOCKS)
554 	debugvm_object_add(object, file, line, 1);
555 #endif
556 }
557 
558 /*
559  * Object OBJ_CHAINLOCK lock handling.
560  *
561  * The caller can chain-lock backing objects recursively and then
562  * use vm_object_chain_release_all() to undo the whole chain.
563  *
564  * Chain locks are used to prevent collapses and are only applicable
565  * to OBJT_DEFAULT and OBJT_SWAP objects.  Chain locking operations
566  * on other object types are ignored.  This is also important because
567  * it allows e.g. the vnode underlying a memory mapping to take concurrent
568  * faults.
569  *
570  * The object must usually be held on entry, though intermediate
571  * objects need not be held on release.  The object must be held exclusively,
572  * NOT shared.  Note that the prefault path checks the shared state and
573  * avoids using the chain functions.
574  */
575 void
576 vm_object_chain_wait(vm_object_t object, int shared)
577 {
578 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
579 	for (;;) {
580 		uint32_t chainlk = object->chainlk;
581 
582 		cpu_ccfence();
583 		if (shared) {
584 			if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) {
585 				tsleep_interlock(object, 0);
586 				if (atomic_cmpset_int(&object->chainlk,
587 						      chainlk,
588 						      chainlk | CHAINLK_WAIT)) {
589 					tsleep(object, PINTERLOCKED,
590 					       "objchns", 0);
591 				}
592 				/* retry */
593 			} else {
594 				break;
595 			}
596 			/* retry */
597 		} else {
598 			if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) {
599 				tsleep_interlock(object, 0);
600 				if (atomic_cmpset_int(&object->chainlk,
601 						      chainlk,
602 						      chainlk | CHAINLK_WAIT))
603 				{
604 					tsleep(object, PINTERLOCKED,
605 					       "objchnx", 0);
606 				}
607 				/* retry */
608 			} else {
609 				if (atomic_cmpset_int(&object->chainlk,
610 						      chainlk,
611 						      chainlk & ~CHAINLK_WAIT))
612 				{
613 					if (chainlk & CHAINLK_WAIT)
614 						wakeup(object);
615 					break;
616 				}
617 				/* retry */
618 			}
619 		}
620 		/* retry */
621 	}
622 }
623 
624 void
625 vm_object_chain_acquire(vm_object_t object, int shared)
626 {
627 	if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP)
628 		return;
629 	if (vm_shared_fault == 0)
630 		shared = 0;
631 
632 	for (;;) {
633 		uint32_t chainlk = object->chainlk;
634 
635 		cpu_ccfence();
636 		if (shared) {
637 			if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) {
638 				tsleep_interlock(object, 0);
639 				if (atomic_cmpset_int(&object->chainlk,
640 						      chainlk,
641 						      chainlk | CHAINLK_WAIT)) {
642 					tsleep(object, PINTERLOCKED,
643 					       "objchns", 0);
644 				}
645 				/* retry */
646 			} else if (atomic_cmpset_int(&object->chainlk,
647 					      chainlk, chainlk + 1)) {
648 				break;
649 			}
650 			/* retry */
651 		} else {
652 			if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) {
653 				tsleep_interlock(object, 0);
654 				if (atomic_cmpset_int(&object->chainlk,
655 						      chainlk,
656 						      chainlk |
657 						       CHAINLK_WAIT |
658 						       CHAINLK_EXCLREQ)) {
659 					tsleep(object, PINTERLOCKED,
660 					       "objchnx", 0);
661 				}
662 				/* retry */
663 			} else {
664 				if (atomic_cmpset_int(&object->chainlk,
665 						      chainlk,
666 						      (chainlk | CHAINLK_EXCL) &
667 						      ~(CHAINLK_EXCLREQ |
668 							CHAINLK_WAIT))) {
669 					if (chainlk & CHAINLK_WAIT)
670 						wakeup(object);
671 					break;
672 				}
673 				/* retry */
674 			}
675 		}
676 		/* retry */
677 	}
678 }
679 
680 void
681 vm_object_chain_release(vm_object_t object)
682 {
683 	/*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/
684 	if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP)
685 		return;
686 	KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL));
687 	for (;;) {
688 		uint32_t chainlk = object->chainlk;
689 
690 		cpu_ccfence();
691 		if (chainlk & CHAINLK_MASK) {
692 			if ((chainlk & CHAINLK_MASK) == 1 &&
693 			    atomic_cmpset_int(&object->chainlk,
694 					      chainlk,
695 					      (chainlk - 1) & ~CHAINLK_WAIT)) {
696 				if (chainlk & CHAINLK_WAIT)
697 					wakeup(object);
698 				break;
699 			}
700 			if ((chainlk & CHAINLK_MASK) > 1 &&
701 			    atomic_cmpset_int(&object->chainlk,
702 					      chainlk, chainlk - 1)) {
703 				break;
704 			}
705 			/* retry */
706 		} else {
707 			KKASSERT(chainlk & CHAINLK_EXCL);
708 			if (atomic_cmpset_int(&object->chainlk,
709 					      chainlk,
710 					      chainlk & ~(CHAINLK_EXCL |
711 							  CHAINLK_WAIT))) {
712 				if (chainlk & CHAINLK_WAIT)
713 					wakeup(object);
714 				break;
715 			}
716 		}
717 	}
718 }
719 
720 /*
721  * Release the chain from first_object through and including stopobj.
722  * The caller is typically holding the first and last object locked
723  * (shared or exclusive) to prevent destruction races.
724  *
725  * We release stopobj first as an optimization as this object is most
726  * likely to be shared across multiple processes.
727  */
728 void
729 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj)
730 {
731 	vm_object_t backing_object;
732 	vm_object_t object;
733 
734 	vm_object_chain_release(stopobj);
735 	object = first_object;
736 
737 	while (object != stopobj) {
738 		KKASSERT(object);
739 		backing_object = object->backing_object;
740 		vm_object_chain_release(object);
741 		object = backing_object;
742 	}
743 }
744 
745 /*
746  * Dereference an object and its underlying vnode.  The object may be
747  * held shared.  On return the object will remain held.
748  *
749  * This function may return a vnode in *vpp which the caller must release
750  * after the caller drops its own lock.  If vpp is NULL, we assume that
751  * the caller was holding an exclusive lock on the object and we vrele()
752  * the vp ourselves.
753  */
754 static void
755 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp
756 				   VMOBJDBARGS)
757 {
758 	struct vnode *vp = (struct vnode *) object->handle;
759 
760 	KASSERT(object->type == OBJT_VNODE,
761 	    ("vm_object_vndeallocate: not a vnode object"));
762 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
763 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
764 #ifdef INVARIANTS
765 	if (object->ref_count == 0) {
766 		vprint("vm_object_vndeallocate", vp);
767 		panic("vm_object_vndeallocate: bad object reference count");
768 	}
769 #endif
770 	for (;;) {
771 		int count = object->ref_count;
772 		cpu_ccfence();
773 		if (count == 1) {
774 			vm_object_upgrade(object);
775 			if (atomic_cmpset_int(&object->ref_count, count, 0)) {
776 				vclrflags(vp, VTEXT);
777 				break;
778 			}
779 		} else {
780 			if (atomic_cmpset_int(&object->ref_count,
781 					      count, count - 1)) {
782 				break;
783 			}
784 		}
785 		/* retry */
786 	}
787 #if defined(DEBUG_LOCKS)
788 	debugvm_object_add(object, file, line, -1);
789 #endif
790 
791 	/*
792 	 * vrele or return the vp to vrele.  We can only safely vrele(vp)
793 	 * if the object was locked exclusively.  But there are two races
794 	 * here.
795 	 *
796 	 * We had to upgrade the object above to safely clear VTEXT
797 	 * but the alternative path where the shared lock is retained
798 	 * can STILL race to 0 in other paths and cause our own vrele()
799 	 * to terminate the vnode.  We can't allow that if the VM object
800 	 * is still locked shared.
801 	 */
802 	if (vpp)
803 		*vpp = vp;
804 	else
805 		vrele(vp);
806 }
807 
808 /*
809  * Release a reference to the specified object, gained either through a
810  * vm_object_allocate or a vm_object_reference call.  When all references
811  * are gone, storage associated with this object may be relinquished.
812  *
813  * The caller does not have to hold the object locked but must have control
814  * over the reference in question in order to guarantee that the object
815  * does not get ripped out from under us.
816  *
817  * XXX Currently all deallocations require an exclusive lock.
818  */
819 void
820 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS)
821 {
822 	struct vnode *vp;
823 	int count;
824 
825 	if (object == NULL)
826 		return;
827 
828 	for (;;) {
829 		count = object->ref_count;
830 		cpu_ccfence();
831 
832 		/*
833 		 * If decrementing the count enters into special handling
834 		 * territory (0, 1, or 2) we have to do it the hard way.
835 		 * Fortunate though, objects with only a few refs like this
836 		 * are not likely to be heavily contended anyway.
837 		 *
838 		 * For vnode objects we only care about 1->0 transitions.
839 		 */
840 		if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) {
841 #if defined(DEBUG_LOCKS)
842 			debugvm_object_add(object, file, line, 0);
843 #endif
844 			vm_object_hold(object);
845 			vm_object_deallocate_locked(object);
846 			vm_object_drop(object);
847 			break;
848 		}
849 
850 		/*
851 		 * Try to decrement ref_count without acquiring a hold on
852 		 * the object.  This is particularly important for the exec*()
853 		 * and exit*() code paths because the program binary may
854 		 * have a great deal of sharing and an exclusive lock will
855 		 * crowbar performance in those circumstances.
856 		 */
857 		if (object->type == OBJT_VNODE) {
858 			vp = (struct vnode *)object->handle;
859 			if (atomic_cmpset_int(&object->ref_count,
860 					      count, count - 1)) {
861 #if defined(DEBUG_LOCKS)
862 				debugvm_object_add(object, file, line, -1);
863 #endif
864 
865 				vrele(vp);
866 				break;
867 			}
868 			/* retry */
869 		} else {
870 			if (atomic_cmpset_int(&object->ref_count,
871 					      count, count - 1)) {
872 #if defined(DEBUG_LOCKS)
873 				debugvm_object_add(object, file, line, -1);
874 #endif
875 				break;
876 			}
877 			/* retry */
878 		}
879 		/* retry */
880 	}
881 }
882 
883 void
884 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS)
885 {
886 	struct vm_object_dealloc_list *dlist = NULL;
887 	struct vm_object_dealloc_list *dtmp;
888 	vm_object_t temp;
889 	int must_drop = 0;
890 
891 	/*
892 	 * We may chain deallocate object, but additional objects may
893 	 * collect on the dlist which also have to be deallocated.  We
894 	 * must avoid a recursion, vm_object chains can get deep.
895 	 */
896 
897 again:
898 	while (object != NULL) {
899 		/*
900 		 * vnode case, caller either locked the object exclusively
901 		 * or this is a recursion with must_drop != 0 and the vnode
902 		 * object will be locked shared.
903 		 *
904 		 * If locked shared we have to drop the object before we can
905 		 * call vrele() or risk a shared/exclusive livelock.
906 		 */
907 		if (object->type == OBJT_VNODE) {
908 			ASSERT_LWKT_TOKEN_HELD(&object->token);
909 			if (must_drop) {
910 				struct vnode *tmp_vp;
911 
912 				vm_object_vndeallocate(object, &tmp_vp);
913 				vm_object_drop(object);
914 				must_drop = 0;
915 				object = NULL;
916 				vrele(tmp_vp);
917 			} else {
918 				vm_object_vndeallocate(object, NULL);
919 			}
920 			break;
921 		}
922 		ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token);
923 
924 		/*
925 		 * Normal case (object is locked exclusively)
926 		 */
927 		if (object->ref_count == 0) {
928 			panic("vm_object_deallocate: object deallocated "
929 			      "too many times: %d", object->type);
930 		}
931 		if (object->ref_count > 2) {
932 			atomic_add_int(&object->ref_count, -1);
933 #if defined(DEBUG_LOCKS)
934 			debugvm_object_add(object, file, line, -1);
935 #endif
936 			break;
937 		}
938 
939 		/*
940 		 * Here on ref_count of one or two, which are special cases for
941 		 * objects.
942 		 *
943 		 * Nominal ref_count > 1 case if the second ref is not from
944 		 * a shadow.
945 		 *
946 		 * (ONEMAPPING only applies to DEFAULT AND SWAP objects)
947 		 */
948 		if (object->ref_count == 2 && object->shadow_count == 0) {
949 			if (object->type == OBJT_DEFAULT ||
950 			    object->type == OBJT_SWAP) {
951 				vm_object_set_flag(object, OBJ_ONEMAPPING);
952 			}
953 			atomic_add_int(&object->ref_count, -1);
954 #if defined(DEBUG_LOCKS)
955 			debugvm_object_add(object, file, line, -1);
956 #endif
957 			break;
958 		}
959 
960 		/*
961 		 * If the second ref is from a shadow we chain along it
962 		 * upwards if object's handle is exhausted.
963 		 *
964 		 * We have to decrement object->ref_count before potentially
965 		 * collapsing the first shadow object or the collapse code
966 		 * will not be able to handle the degenerate case to remove
967 		 * object.  However, if we do it too early the object can
968 		 * get ripped out from under us.
969 		 */
970 		if (object->ref_count == 2 && object->shadow_count == 1 &&
971 		    object->handle == NULL && (object->type == OBJT_DEFAULT ||
972 					       object->type == OBJT_SWAP)) {
973 			temp = LIST_FIRST(&object->shadow_head);
974 			KKASSERT(temp != NULL);
975 			vm_object_hold(temp);
976 
977 			/*
978 			 * Wait for any paging to complete so the collapse
979 			 * doesn't (or isn't likely to) qcollapse.  pip
980 			 * waiting must occur before we acquire the
981 			 * chainlock.
982 			 */
983 			while (
984 				temp->paging_in_progress ||
985 				object->paging_in_progress
986 			) {
987 				vm_object_pip_wait(temp, "objde1");
988 				vm_object_pip_wait(object, "objde2");
989 			}
990 
991 			/*
992 			 * If the parent is locked we have to give up, as
993 			 * otherwise we would be acquiring locks in the
994 			 * wrong order and potentially deadlock.
995 			 */
996 			if (temp->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) {
997 				vm_object_drop(temp);
998 				goto skip;
999 			}
1000 			vm_object_chain_acquire(temp, 0);
1001 
1002 			/*
1003 			 * Recheck/retry after the hold and the paging
1004 			 * wait, both of which can block us.
1005 			 */
1006 			if (object->ref_count != 2 ||
1007 			    object->shadow_count != 1 ||
1008 			    object->handle ||
1009 			    LIST_FIRST(&object->shadow_head) != temp ||
1010 			    (object->type != OBJT_DEFAULT &&
1011 			     object->type != OBJT_SWAP)) {
1012 				vm_object_chain_release(temp);
1013 				vm_object_drop(temp);
1014 				continue;
1015 			}
1016 
1017 			/*
1018 			 * We can safely drop object's ref_count now.
1019 			 */
1020 			KKASSERT(object->ref_count == 2);
1021 			atomic_add_int(&object->ref_count, -1);
1022 #if defined(DEBUG_LOCKS)
1023 			debugvm_object_add(object, file, line, -1);
1024 #endif
1025 
1026 			/*
1027 			 * If our single parent is not collapseable just
1028 			 * decrement ref_count (2->1) and stop.
1029 			 */
1030 			if (temp->handle || (temp->type != OBJT_DEFAULT &&
1031 					     temp->type != OBJT_SWAP)) {
1032 				vm_object_chain_release(temp);
1033 				vm_object_drop(temp);
1034 				break;
1035 			}
1036 
1037 			/*
1038 			 * At this point we have already dropped object's
1039 			 * ref_count so it is possible for a race to
1040 			 * deallocate obj out from under us.  Any collapse
1041 			 * will re-check the situation.  We must not block
1042 			 * until we are able to collapse.
1043 			 *
1044 			 * Bump temp's ref_count to avoid an unwanted
1045 			 * degenerate recursion (can't call
1046 			 * vm_object_reference_locked() because it asserts
1047 			 * that CHAINLOCK is not set).
1048 			 */
1049 			atomic_add_int(&temp->ref_count, 1);
1050 			KKASSERT(temp->ref_count > 1);
1051 
1052 			/*
1053 			 * Collapse temp, then deallocate the extra ref
1054 			 * formally.
1055 			 */
1056 			vm_object_collapse(temp, &dlist);
1057 			vm_object_chain_release(temp);
1058 			if (must_drop) {
1059 				vm_object_lock_swap();
1060 				vm_object_drop(object);
1061 			}
1062 			object = temp;
1063 			must_drop = 1;
1064 			continue;
1065 		}
1066 
1067 		/*
1068 		 * Drop the ref and handle termination on the 1->0 transition.
1069 		 * We may have blocked above so we have to recheck.
1070 		 */
1071 skip:
1072 		KKASSERT(object->ref_count != 0);
1073 		if (object->ref_count >= 2) {
1074 			atomic_add_int(&object->ref_count, -1);
1075 #if defined(DEBUG_LOCKS)
1076 			debugvm_object_add(object, file, line, -1);
1077 #endif
1078 			break;
1079 		}
1080 		KKASSERT(object->ref_count == 1);
1081 
1082 		/*
1083 		 * 1->0 transition.  Chain through the backing_object.
1084 		 * Maintain the ref until we've located the backing object,
1085 		 * then re-check.
1086 		 */
1087 		while ((temp = object->backing_object) != NULL) {
1088 			if (temp->type == OBJT_VNODE)
1089 				vm_object_hold_shared(temp);
1090 			else
1091 				vm_object_hold(temp);
1092 			if (temp == object->backing_object)
1093 				break;
1094 			vm_object_drop(temp);
1095 		}
1096 
1097 		/*
1098 		 * 1->0 transition verified, retry if ref_count is no longer
1099 		 * 1.  Otherwise disconnect the backing_object (temp) and
1100 		 * clean up.
1101 		 */
1102 		if (object->ref_count != 1) {
1103 			vm_object_drop(temp);
1104 			continue;
1105 		}
1106 
1107 		/*
1108 		 * It shouldn't be possible for the object to be chain locked
1109 		 * if we're removing the last ref on it.
1110 		 *
1111 		 * Removing object from temp's shadow list requires dropping
1112 		 * temp, which we will do on loop.
1113 		 *
1114 		 * NOTE! vnodes do not use the shadow list, but still have
1115 		 *	 the backing_object reference.
1116 		 */
1117 		KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0);
1118 
1119 		if (temp) {
1120 			if (object->flags & OBJ_ONSHADOW) {
1121 				LIST_REMOVE(object, shadow_list);
1122 				temp->shadow_count--;
1123 				atomic_add_int(&temp->generation, 1);
1124 				vm_object_clear_flag(object, OBJ_ONSHADOW);
1125 			}
1126 			object->backing_object = NULL;
1127 		}
1128 
1129 		atomic_add_int(&object->ref_count, -1);
1130 		if ((object->flags & OBJ_DEAD) == 0)
1131 			vm_object_terminate(object);
1132 		if (must_drop && temp)
1133 			vm_object_lock_swap();
1134 		if (must_drop)
1135 			vm_object_drop(object);
1136 		object = temp;
1137 		must_drop = 1;
1138 	}
1139 
1140 	if (must_drop && object)
1141 		vm_object_drop(object);
1142 
1143 	/*
1144 	 * Additional tail recursion on dlist.  Avoid a recursion.  Objects
1145 	 * on the dlist have a hold count but are not locked.
1146 	 */
1147 	if ((dtmp = dlist) != NULL) {
1148 		dlist = dtmp->next;
1149 		object = dtmp->object;
1150 		kfree(dtmp, M_TEMP);
1151 
1152 		vm_object_lock(object);	/* already held, add lock */
1153 		must_drop = 1;		/* and we're responsible for it */
1154 		goto again;
1155 	}
1156 }
1157 
1158 /*
1159  * Destroy the specified object, freeing up related resources.
1160  *
1161  * The object must have zero references.
1162  *
1163  * The object must held.  The caller is responsible for dropping the object
1164  * after terminate returns.  Terminate does NOT drop the object.
1165  */
1166 static int vm_object_terminate_callback(vm_page_t p, void *data);
1167 
1168 void
1169 vm_object_terminate(vm_object_t object)
1170 {
1171 	struct rb_vm_page_scan_info info;
1172 	struct vm_object_hash *hash;
1173 
1174 	/*
1175 	 * Make sure no one uses us.  Once we set OBJ_DEAD we should be
1176 	 * able to safely block.
1177 	 */
1178 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1179 	KKASSERT((object->flags & OBJ_DEAD) == 0);
1180 	vm_object_set_flag(object, OBJ_DEAD);
1181 
1182 	/*
1183 	 * Wait for the pageout daemon to be done with the object
1184 	 */
1185 	vm_object_pip_wait(object, "objtrm1");
1186 
1187 	KASSERT(!object->paging_in_progress,
1188 		("vm_object_terminate: pageout in progress"));
1189 
1190 	/*
1191 	 * Clean and free the pages, as appropriate. All references to the
1192 	 * object are gone, so we don't need to lock it.
1193 	 */
1194 	if (object->type == OBJT_VNODE) {
1195 		struct vnode *vp;
1196 
1197 		/*
1198 		 * Clean pages and flush buffers.
1199 		 *
1200 		 * NOTE!  TMPFS buffer flushes do not typically flush the
1201 		 *	  actual page to swap as this would be highly
1202 		 *	  inefficient, and normal filesystems usually wrap
1203 		 *	  page flushes with buffer cache buffers.
1204 		 *
1205 		 *	  To deal with this we have to call vinvalbuf() both
1206 		 *	  before and after the vm_object_page_clean().
1207 		 */
1208 		vp = (struct vnode *) object->handle;
1209 		vinvalbuf(vp, V_SAVE, 0, 0);
1210 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
1211 		vinvalbuf(vp, V_SAVE, 0, 0);
1212 	}
1213 
1214 	/*
1215 	 * Wait for any I/O to complete, after which there had better not
1216 	 * be any references left on the object.
1217 	 */
1218 	vm_object_pip_wait(object, "objtrm2");
1219 
1220 	if (object->ref_count != 0) {
1221 		panic("vm_object_terminate: object with references, "
1222 		      "ref_count=%d", object->ref_count);
1223 	}
1224 
1225 	/*
1226 	 * Cleanup any shared pmaps associated with this object.
1227 	 */
1228 	pmap_object_free(object);
1229 
1230 	/*
1231 	 * Now free any remaining pages. For internal objects, this also
1232 	 * removes them from paging queues. Don't free wired pages, just
1233 	 * remove them from the object.
1234 	 */
1235 	info.count = 0;
1236 	info.object = object;
1237 	do {
1238 		info.error = 0;
1239 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1240 					vm_object_terminate_callback, &info);
1241 	} while (info.error);
1242 
1243 	/*
1244 	 * Let the pager know object is dead.
1245 	 */
1246 	vm_pager_deallocate(object);
1247 
1248 	/*
1249 	 * Wait for the object hold count to hit 1, clean out pages as
1250 	 * we go.  vmobj_token interlocks any race conditions that might
1251 	 * pick the object up from the vm_object_list after we have cleared
1252 	 * rb_memq.
1253 	 */
1254 	for (;;) {
1255 		if (RB_ROOT(&object->rb_memq) == NULL)
1256 			break;
1257 		kprintf("vm_object_terminate: Warning, object %p "
1258 			"still has %ld pages\n",
1259 			object, object->resident_page_count);
1260 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1261 					vm_object_terminate_callback, &info);
1262 	}
1263 
1264 	/*
1265 	 * There had better not be any pages left
1266 	 */
1267 	KKASSERT(object->resident_page_count == 0);
1268 
1269 	/*
1270 	 * Remove the object from the global object list.
1271 	 */
1272 	hash = vmobj_hash(object);
1273 	lwkt_gettoken(&hash->token);
1274 	TAILQ_REMOVE(&hash->list, object, object_list);
1275 	lwkt_reltoken(&hash->token);
1276 
1277 	if (object->ref_count != 0) {
1278 		panic("vm_object_terminate2: object with references, "
1279 		      "ref_count=%d", object->ref_count);
1280 	}
1281 
1282 	/*
1283 	 * NOTE: The object hold_count is at least 1, so we cannot kfree()
1284 	 *	 the object here.  See vm_object_drop().
1285 	 */
1286 }
1287 
1288 /*
1289  * The caller must hold the object.
1290  */
1291 static int
1292 vm_object_terminate_callback(vm_page_t p, void *data)
1293 {
1294 	struct rb_vm_page_scan_info *info = data;
1295 	vm_object_t object;
1296 
1297 	object = p->object;
1298 	KKASSERT(object == info->object);
1299 	if (vm_page_busy_try(p, TRUE)) {
1300 		vm_page_sleep_busy(p, TRUE, "vmotrm");
1301 		info->error = 1;
1302 		return 0;
1303 	}
1304 	if (object != p->object) {
1305 		/* XXX remove once we determine it can't happen */
1306 		kprintf("vm_object_terminate: Warning: Encountered "
1307 			"busied page %p on queue %d\n", p, p->queue);
1308 		vm_page_wakeup(p);
1309 		info->error = 1;
1310 	} else if (p->wire_count == 0) {
1311 		/*
1312 		 * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
1313 		 */
1314 		vm_page_free(p);
1315 		mycpu->gd_cnt.v_pfree++;
1316 	} else {
1317 		if (p->queue != PQ_NONE) {
1318 			kprintf("vm_object_terminate: Warning: Encountered "
1319 				"wired page %p on queue %d\n", p, p->queue);
1320 			if (vm_object_debug > 0) {
1321 				--vm_object_debug;
1322 				print_backtrace(10);
1323 			}
1324 		}
1325 		vm_page_remove(p);
1326 		vm_page_wakeup(p);
1327 	}
1328 
1329 	/*
1330 	 * Must be at end to avoid SMP races, caller holds object token
1331 	 */
1332 	if ((++info->count & 63) == 0)
1333 		lwkt_user_yield();
1334 	return(0);
1335 }
1336 
1337 /*
1338  * Clean all dirty pages in the specified range of object.  Leaves page
1339  * on whatever queue it is currently on.   If NOSYNC is set then do not
1340  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
1341  * leaving the object dirty.
1342  *
1343  * When stuffing pages asynchronously, allow clustering.  XXX we need a
1344  * synchronous clustering mode implementation.
1345  *
1346  * Odd semantics: if start == end, we clean everything.
1347  *
1348  * The object must be locked? XXX
1349  */
1350 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
1351 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
1352 
1353 void
1354 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1355 		     int flags)
1356 {
1357 	struct rb_vm_page_scan_info info;
1358 	struct vnode *vp;
1359 	int wholescan;
1360 	int pagerflags;
1361 	int generation;
1362 
1363 	vm_object_hold(object);
1364 	if (object->type != OBJT_VNODE ||
1365 	    (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
1366 		vm_object_drop(object);
1367 		return;
1368 	}
1369 
1370 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
1371 			VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1372 	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
1373 
1374 	vp = object->handle;
1375 
1376 	/*
1377 	 * Interlock other major object operations.  This allows us to
1378 	 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
1379 	 */
1380 	vm_object_set_flag(object, OBJ_CLEANING);
1381 
1382 	/*
1383 	 * Handle 'entire object' case
1384 	 */
1385 	info.start_pindex = start;
1386 	if (end == 0) {
1387 		info.end_pindex = object->size - 1;
1388 	} else {
1389 		info.end_pindex = end - 1;
1390 	}
1391 	wholescan = (start == 0 && info.end_pindex == object->size - 1);
1392 	info.limit = flags;
1393 	info.pagerflags = pagerflags;
1394 	info.object = object;
1395 
1396 	/*
1397 	 * If cleaning the entire object do a pass to mark the pages read-only.
1398 	 * If everything worked out ok, clear OBJ_WRITEABLE and
1399 	 * OBJ_MIGHTBEDIRTY.
1400 	 */
1401 	if (wholescan) {
1402 		info.error = 0;
1403 		info.count = 0;
1404 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1405 					vm_object_page_clean_pass1, &info);
1406 		if (info.error == 0) {
1407 			vm_object_clear_flag(object,
1408 					     OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1409 			if (object->type == OBJT_VNODE &&
1410 			    (vp = (struct vnode *)object->handle) != NULL) {
1411 				/*
1412 				 * Use new-style interface to clear VISDIRTY
1413 				 * because the vnode is not necessarily removed
1414 				 * from the syncer list(s) as often as it was
1415 				 * under the old interface, which can leave
1416 				 * the vnode on the syncer list after reclaim.
1417 				 */
1418 				vclrobjdirty(vp);
1419 			}
1420 		}
1421 	}
1422 
1423 	/*
1424 	 * Do a pass to clean all the dirty pages we find.
1425 	 */
1426 	do {
1427 		info.error = 0;
1428 		info.count = 0;
1429 		generation = object->generation;
1430 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1431 					vm_object_page_clean_pass2, &info);
1432 	} while (info.error || generation != object->generation);
1433 
1434 	vm_object_clear_flag(object, OBJ_CLEANING);
1435 	vm_object_drop(object);
1436 }
1437 
1438 /*
1439  * The caller must hold the object.
1440  */
1441 static
1442 int
1443 vm_object_page_clean_pass1(struct vm_page *p, void *data)
1444 {
1445 	struct rb_vm_page_scan_info *info = data;
1446 
1447 	KKASSERT(p->object == info->object);
1448 
1449 	vm_page_flag_set(p, PG_CLEANCHK);
1450 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1451 		info->error = 1;
1452 	} else if (vm_page_busy_try(p, FALSE)) {
1453 		info->error = 1;
1454 	} else {
1455 		KKASSERT(p->object == info->object);
1456 		vm_page_protect(p, VM_PROT_READ);
1457 		vm_page_wakeup(p);
1458 	}
1459 
1460 	/*
1461 	 * Must be at end to avoid SMP races, caller holds object token
1462 	 */
1463 	if ((++info->count & 63) == 0)
1464 		lwkt_user_yield();
1465 	return(0);
1466 }
1467 
1468 /*
1469  * The caller must hold the object
1470  */
1471 static
1472 int
1473 vm_object_page_clean_pass2(struct vm_page *p, void *data)
1474 {
1475 	struct rb_vm_page_scan_info *info = data;
1476 	int generation;
1477 
1478 	KKASSERT(p->object == info->object);
1479 
1480 	/*
1481 	 * Do not mess with pages that were inserted after we started
1482 	 * the cleaning pass.
1483 	 */
1484 	if ((p->flags & PG_CLEANCHK) == 0)
1485 		goto done;
1486 
1487 	generation = info->object->generation;
1488 
1489 	if (vm_page_busy_try(p, TRUE)) {
1490 		vm_page_sleep_busy(p, TRUE, "vpcwai");
1491 		info->error = 1;
1492 		goto done;
1493 	}
1494 
1495 	KKASSERT(p->object == info->object &&
1496 		 info->object->generation == generation);
1497 
1498 	/*
1499 	 * Before wasting time traversing the pmaps, check for trivial
1500 	 * cases where the page cannot be dirty.
1501 	 */
1502 	if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
1503 		KKASSERT((p->dirty & p->valid) == 0 &&
1504 			 (p->flags & PG_NEED_COMMIT) == 0);
1505 		vm_page_wakeup(p);
1506 		goto done;
1507 	}
1508 
1509 	/*
1510 	 * Check whether the page is dirty or not.  The page has been set
1511 	 * to be read-only so the check will not race a user dirtying the
1512 	 * page.
1513 	 */
1514 	vm_page_test_dirty(p);
1515 	if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) {
1516 		vm_page_flag_clear(p, PG_CLEANCHK);
1517 		vm_page_wakeup(p);
1518 		goto done;
1519 	}
1520 
1521 	/*
1522 	 * If we have been asked to skip nosync pages and this is a
1523 	 * nosync page, skip it.  Note that the object flags were
1524 	 * not cleared in this case (because pass1 will have returned an
1525 	 * error), so we do not have to set them.
1526 	 */
1527 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1528 		vm_page_flag_clear(p, PG_CLEANCHK);
1529 		vm_page_wakeup(p);
1530 		goto done;
1531 	}
1532 
1533 	/*
1534 	 * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
1535 	 * the pages that get successfully flushed.  Set info->error if
1536 	 * we raced an object modification.
1537 	 */
1538 	vm_object_page_collect_flush(info->object, p, info->pagerflags);
1539 	/* vm_wait_nominal(); this can deadlock the system in syncer/pageout */
1540 
1541 	/*
1542 	 * Must be at end to avoid SMP races, caller holds object token
1543 	 */
1544 done:
1545 	if ((++info->count & 63) == 0)
1546 		lwkt_user_yield();
1547 	return(0);
1548 }
1549 
1550 /*
1551  * Collect the specified page and nearby pages and flush them out.
1552  * The number of pages flushed is returned.  The passed page is busied
1553  * by the caller and we are responsible for its disposition.
1554  *
1555  * The caller must hold the object.
1556  */
1557 static void
1558 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
1559 {
1560 	int error;
1561 	int is;
1562 	int ib;
1563 	int i;
1564 	int page_base;
1565 	vm_pindex_t pi;
1566 	vm_page_t ma[BLIST_MAX_ALLOC];
1567 
1568 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1569 
1570 	pi = p->pindex;
1571 	page_base = pi % BLIST_MAX_ALLOC;
1572 	ma[page_base] = p;
1573 	ib = page_base - 1;
1574 	is = page_base + 1;
1575 
1576 	while (ib >= 0) {
1577 		vm_page_t tp;
1578 
1579 		tp = vm_page_lookup_busy_try(object, pi - page_base + ib,
1580 					     TRUE, &error);
1581 		if (error)
1582 			break;
1583 		if (tp == NULL)
1584 			break;
1585 		if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1586 		    (tp->flags & PG_CLEANCHK) == 0) {
1587 			vm_page_wakeup(tp);
1588 			break;
1589 		}
1590 		if ((tp->queue - tp->pc) == PQ_CACHE) {
1591 			vm_page_flag_clear(tp, PG_CLEANCHK);
1592 			vm_page_wakeup(tp);
1593 			break;
1594 		}
1595 		vm_page_test_dirty(tp);
1596 		if ((tp->dirty & tp->valid) == 0 &&
1597 		    (tp->flags & PG_NEED_COMMIT) == 0) {
1598 			vm_page_flag_clear(tp, PG_CLEANCHK);
1599 			vm_page_wakeup(tp);
1600 			break;
1601 		}
1602 		ma[ib] = tp;
1603 		--ib;
1604 	}
1605 	++ib;	/* fixup */
1606 
1607 	while (is < BLIST_MAX_ALLOC &&
1608 	       pi - page_base + is < object->size) {
1609 		vm_page_t tp;
1610 
1611 		tp = vm_page_lookup_busy_try(object, pi - page_base + is,
1612 					     TRUE, &error);
1613 		if (error)
1614 			break;
1615 		if (tp == NULL)
1616 			break;
1617 		if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1618 		    (tp->flags & PG_CLEANCHK) == 0) {
1619 			vm_page_wakeup(tp);
1620 			break;
1621 		}
1622 		if ((tp->queue - tp->pc) == PQ_CACHE) {
1623 			vm_page_flag_clear(tp, PG_CLEANCHK);
1624 			vm_page_wakeup(tp);
1625 			break;
1626 		}
1627 		vm_page_test_dirty(tp);
1628 		if ((tp->dirty & tp->valid) == 0 &&
1629 		    (tp->flags & PG_NEED_COMMIT) == 0) {
1630 			vm_page_flag_clear(tp, PG_CLEANCHK);
1631 			vm_page_wakeup(tp);
1632 			break;
1633 		}
1634 		ma[is] = tp;
1635 		++is;
1636 	}
1637 
1638 	/*
1639 	 * All pages in the ma[] array are busied now
1640 	 */
1641 	for (i = ib; i < is; ++i) {
1642 		vm_page_flag_clear(ma[i], PG_CLEANCHK);
1643 		vm_page_hold(ma[i]);	/* XXX need this any more? */
1644 	}
1645 	vm_pageout_flush(&ma[ib], is - ib, pagerflags);
1646 	for (i = ib; i < is; ++i)	/* XXX need this any more? */
1647 		vm_page_unhold(ma[i]);
1648 }
1649 
1650 /*
1651  * Same as vm_object_pmap_copy, except range checking really
1652  * works, and is meant for small sections of an object.
1653  *
1654  * This code protects resident pages by making them read-only
1655  * and is typically called on a fork or split when a page
1656  * is converted to copy-on-write.
1657  *
1658  * NOTE: If the page is already at VM_PROT_NONE, calling
1659  * vm_page_protect will have no effect.
1660  */
1661 void
1662 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1663 {
1664 	vm_pindex_t idx;
1665 	vm_page_t p;
1666 
1667 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
1668 		return;
1669 
1670 	vm_object_hold(object);
1671 	for (idx = start; idx < end; idx++) {
1672 		p = vm_page_lookup(object, idx);
1673 		if (p == NULL)
1674 			continue;
1675 		vm_page_protect(p, VM_PROT_READ);
1676 	}
1677 	vm_object_drop(object);
1678 }
1679 
1680 /*
1681  * Removes all physical pages in the specified object range from all
1682  * physical maps.
1683  *
1684  * The object must *not* be locked.
1685  */
1686 
1687 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
1688 
1689 void
1690 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1691 {
1692 	struct rb_vm_page_scan_info info;
1693 
1694 	if (object == NULL)
1695 		return;
1696 	if (start == end)
1697 		return;
1698 	info.start_pindex = start;
1699 	info.end_pindex = end - 1;
1700 	info.count = 0;
1701 	info.object = object;
1702 
1703 	vm_object_hold(object);
1704 	do {
1705 		info.error = 0;
1706 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1707 					vm_object_pmap_remove_callback, &info);
1708 	} while (info.error);
1709 	if (start == 0 && end == object->size)
1710 		vm_object_clear_flag(object, OBJ_WRITEABLE);
1711 	vm_object_drop(object);
1712 }
1713 
1714 /*
1715  * The caller must hold the object
1716  */
1717 static int
1718 vm_object_pmap_remove_callback(vm_page_t p, void *data)
1719 {
1720 	struct rb_vm_page_scan_info *info = data;
1721 
1722 	if (info->object != p->object ||
1723 	    p->pindex < info->start_pindex ||
1724 	    p->pindex > info->end_pindex) {
1725 		kprintf("vm_object_pmap_remove_callback: obj/pg race %p/%p\n",
1726 			info->object, p);
1727 		info->error = 1;
1728 		return(0);
1729 	}
1730 
1731 	vm_page_protect(p, VM_PROT_NONE);
1732 
1733 	/*
1734 	 * Must be at end to avoid SMP races, caller holds object token
1735 	 */
1736 	if ((++info->count & 63) == 0)
1737 		lwkt_user_yield();
1738 	return(0);
1739 }
1740 
1741 /*
1742  * Implements the madvise function at the object/page level.
1743  *
1744  * MADV_WILLNEED	(any object)
1745  *
1746  *	Activate the specified pages if they are resident.
1747  *
1748  * MADV_DONTNEED	(any object)
1749  *
1750  *	Deactivate the specified pages if they are resident.
1751  *
1752  * MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1753  *
1754  *	Deactivate and clean the specified pages if they are
1755  *	resident.  This permits the process to reuse the pages
1756  *	without faulting or the kernel to reclaim the pages
1757  *	without I/O.
1758  *
1759  * No requirements.
1760  */
1761 void
1762 vm_object_madvise(vm_object_t object, vm_pindex_t pindex,
1763 		  vm_pindex_t count, int advise)
1764 {
1765 	vm_pindex_t end, tpindex;
1766 	vm_object_t tobject;
1767 	vm_object_t xobj;
1768 	vm_page_t m;
1769 	int error;
1770 
1771 	if (object == NULL)
1772 		return;
1773 
1774 	end = pindex + count;
1775 
1776 	vm_object_hold(object);
1777 	tobject = object;
1778 
1779 	/*
1780 	 * Locate and adjust resident pages
1781 	 */
1782 	for (; pindex < end; pindex += 1) {
1783 relookup:
1784 		if (tobject != object)
1785 			vm_object_drop(tobject);
1786 		tobject = object;
1787 		tpindex = pindex;
1788 shadowlookup:
1789 		/*
1790 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1791 		 * and those pages must be OBJ_ONEMAPPING.
1792 		 */
1793 		if (advise == MADV_FREE) {
1794 			if ((tobject->type != OBJT_DEFAULT &&
1795 			     tobject->type != OBJT_SWAP) ||
1796 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1797 				continue;
1798 			}
1799 		}
1800 
1801 		m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error);
1802 
1803 		if (error) {
1804 			vm_page_sleep_busy(m, TRUE, "madvpo");
1805 			goto relookup;
1806 		}
1807 		if (m == NULL) {
1808 			/*
1809 			 * There may be swap even if there is no backing page
1810 			 */
1811 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1812 				swap_pager_freespace(tobject, tpindex, 1);
1813 
1814 			/*
1815 			 * next object
1816 			 */
1817 			while ((xobj = tobject->backing_object) != NULL) {
1818 				KKASSERT(xobj != object);
1819 				vm_object_hold(xobj);
1820 				if (xobj == tobject->backing_object)
1821 					break;
1822 				vm_object_drop(xobj);
1823 			}
1824 			if (xobj == NULL)
1825 				continue;
1826 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1827 			if (tobject != object) {
1828 				vm_object_lock_swap();
1829 				vm_object_drop(tobject);
1830 			}
1831 			tobject = xobj;
1832 			goto shadowlookup;
1833 		}
1834 
1835 		/*
1836 		 * If the page is not in a normal active state, we skip it.
1837 		 * If the page is not managed there are no page queues to
1838 		 * mess with.  Things can break if we mess with pages in
1839 		 * any of the below states.
1840 		 */
1841 		if (m->wire_count ||
1842 		    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1843 		    m->valid != VM_PAGE_BITS_ALL
1844 		) {
1845 			vm_page_wakeup(m);
1846 			continue;
1847 		}
1848 
1849 		/*
1850 		 * Theoretically once a page is known not to be busy, an
1851 		 * interrupt cannot come along and rip it out from under us.
1852 		 */
1853 
1854 		if (advise == MADV_WILLNEED) {
1855 			vm_page_activate(m);
1856 		} else if (advise == MADV_DONTNEED) {
1857 			vm_page_dontneed(m);
1858 		} else if (advise == MADV_FREE) {
1859 			/*
1860 			 * Mark the page clean.  This will allow the page
1861 			 * to be freed up by the system.  However, such pages
1862 			 * are often reused quickly by malloc()/free()
1863 			 * so we do not do anything that would cause
1864 			 * a page fault if we can help it.
1865 			 *
1866 			 * Specifically, we do not try to actually free
1867 			 * the page now nor do we try to put it in the
1868 			 * cache (which would cause a page fault on reuse).
1869 			 *
1870 			 * But we do make the page is freeable as we
1871 			 * can without actually taking the step of unmapping
1872 			 * it.
1873 			 */
1874 			pmap_clear_modify(m);
1875 			m->dirty = 0;
1876 			m->act_count = 0;
1877 			vm_page_dontneed(m);
1878 			if (tobject->type == OBJT_SWAP)
1879 				swap_pager_freespace(tobject, tpindex, 1);
1880 		}
1881 		vm_page_wakeup(m);
1882 	}
1883 	if (tobject != object)
1884 		vm_object_drop(tobject);
1885 	vm_object_drop(object);
1886 }
1887 
1888 /*
1889  * Create a new object which is backed by the specified existing object
1890  * range.  Replace the pointer and offset that was pointing at the existing
1891  * object with the pointer/offset for the new object.
1892  *
1893  * If addref is non-zero the returned object is given an additional reference.
1894  * This mechanic exists to avoid the situation where refs might be 1 and
1895  * race against a collapse when the caller intends to bump it.  So the
1896  * caller cannot add the ref after the fact.  Used when the caller is
1897  * duplicating a vm_map_entry.
1898  *
1899  * No other requirements.
1900  */
1901 void
1902 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length,
1903 		 int addref)
1904 {
1905 	vm_object_t source;
1906 	vm_object_t result;
1907 	int useshadowlist;
1908 
1909 	source = *objectp;
1910 
1911 	/*
1912 	 * Don't create the new object if the old object isn't shared.
1913 	 * We have to chain wait before adding the reference to avoid
1914 	 * racing a collapse or deallocation.
1915 	 *
1916 	 * Clear OBJ_ONEMAPPING flag when shadowing.
1917 	 *
1918 	 * The caller owns a ref on source via *objectp which we are going
1919 	 * to replace.  This ref is inherited by the backing_object assignment.
1920 	 * from nobject and does not need to be incremented here.
1921 	 *
1922 	 * However, we add a temporary extra reference to the original source
1923 	 * prior to holding nobject in case we block, to avoid races where
1924 	 * someone else might believe that the source can be collapsed.
1925 	 */
1926 	useshadowlist = 0;
1927 	if (source) {
1928 		if (source->type != OBJT_VNODE) {
1929 			useshadowlist = 1;
1930 			vm_object_hold(source);
1931 			vm_object_chain_wait(source, 0);
1932 			if (source->ref_count == 1 &&
1933 			    source->handle == NULL &&
1934 			    (source->type == OBJT_DEFAULT ||
1935 			     source->type == OBJT_SWAP)) {
1936 				if (addref) {
1937 					vm_object_reference_locked(source);
1938 					vm_object_clear_flag(source,
1939 							     OBJ_ONEMAPPING);
1940 				}
1941 				vm_object_drop(source);
1942 				return;
1943 			}
1944 			vm_object_reference_locked(source);
1945 			vm_object_clear_flag(source, OBJ_ONEMAPPING);
1946 		} else {
1947 			vm_object_reference_quick(source);
1948 			vm_object_clear_flag(source, OBJ_ONEMAPPING);
1949 		}
1950 	}
1951 
1952 	/*
1953 	 * Allocate a new object with the given length.  The new object
1954 	 * is returned referenced but we may have to add another one.
1955 	 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
1956 	 * (typically because the caller is about to clone a vm_map_entry).
1957 	 *
1958 	 * The source object currently has an extra reference to prevent
1959 	 * collapses into it while we mess with its shadow list, which
1960 	 * we will remove later in this routine.
1961 	 *
1962 	 * The target object may require a second reference if asked for one
1963 	 * by the caller.
1964 	 */
1965 	result = vm_object_allocate(OBJT_DEFAULT, length);
1966 	if (result == NULL)
1967 		panic("vm_object_shadow: no object for shadowing");
1968 	vm_object_hold(result);
1969 	if (addref) {
1970 		vm_object_reference_locked(result);
1971 		vm_object_clear_flag(result, OBJ_ONEMAPPING);
1972 	}
1973 
1974 	/*
1975 	 * The new object shadows the source object.  Chain wait before
1976 	 * adjusting shadow_count or the shadow list to avoid races.
1977 	 *
1978 	 * Try to optimize the result object's page color when shadowing
1979 	 * in order to maintain page coloring consistency in the combined
1980 	 * shadowed object.
1981 	 *
1982 	 * The backing_object reference to source requires adding a ref to
1983 	 * source.  We simply inherit the ref from the original *objectp
1984 	 * (which we are replacing) so no additional refs need to be added.
1985 	 * (we must still clean up the extra ref we had to prevent collapse
1986 	 * races).
1987 	 *
1988 	 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
1989 	 */
1990 	KKASSERT(result->backing_object == NULL);
1991 	result->backing_object = source;
1992 	if (source) {
1993 		if (useshadowlist) {
1994 			vm_object_chain_wait(source, 0);
1995 			LIST_INSERT_HEAD(&source->shadow_head,
1996 					 result, shadow_list);
1997 			source->shadow_count++;
1998 			atomic_add_int(&source->generation, 1);
1999 			vm_object_set_flag(result, OBJ_ONSHADOW);
2000 		}
2001 		/* cpu localization twist */
2002 		result->pg_color = vm_quickcolor();
2003 	}
2004 
2005 	/*
2006 	 * Adjust the return storage.  Drop the ref on source before
2007 	 * returning.
2008 	 */
2009 	result->backing_object_offset = *offset;
2010 	vm_object_drop(result);
2011 	*offset = 0;
2012 	if (source) {
2013 		if (useshadowlist) {
2014 			vm_object_deallocate_locked(source);
2015 			vm_object_drop(source);
2016 		} else {
2017 			vm_object_deallocate(source);
2018 		}
2019 	}
2020 
2021 	/*
2022 	 * Return the new things
2023 	 */
2024 	*objectp = result;
2025 }
2026 
2027 #define	OBSC_TEST_ALL_SHADOWED	0x0001
2028 #define	OBSC_COLLAPSE_NOWAIT	0x0002
2029 #define	OBSC_COLLAPSE_WAIT	0x0004
2030 
2031 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
2032 
2033 /*
2034  * The caller must hold the object.
2035  */
2036 static __inline int
2037 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op)
2038 {
2039 	struct rb_vm_page_scan_info info;
2040 	struct vm_object_hash *hash;
2041 
2042 	vm_object_assert_held(object);
2043 	vm_object_assert_held(backing_object);
2044 
2045 	KKASSERT(backing_object == object->backing_object);
2046 	info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
2047 
2048 	/*
2049 	 * Initial conditions
2050 	 */
2051 	if (op & OBSC_TEST_ALL_SHADOWED) {
2052 		/*
2053 		 * We do not want to have to test for the existence of
2054 		 * swap pages in the backing object.  XXX but with the
2055 		 * new swapper this would be pretty easy to do.
2056 		 *
2057 		 * XXX what about anonymous MAP_SHARED memory that hasn't
2058 		 * been ZFOD faulted yet?  If we do not test for this, the
2059 		 * shadow test may succeed! XXX
2060 		 */
2061 		if (backing_object->type != OBJT_DEFAULT)
2062 			return(0);
2063 	}
2064 	if (op & OBSC_COLLAPSE_WAIT) {
2065 		KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
2066 		vm_object_set_flag(backing_object, OBJ_DEAD);
2067 
2068 		hash = vmobj_hash(backing_object);
2069 		lwkt_gettoken(&hash->token);
2070 		TAILQ_REMOVE(&hash->list, backing_object, object_list);
2071 		lwkt_reltoken(&hash->token);
2072 	}
2073 
2074 	/*
2075 	 * Our scan.   We have to retry if a negative error code is returned,
2076 	 * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
2077 	 * the scan had to be stopped because the parent does not completely
2078 	 * shadow the child.
2079 	 */
2080 	info.object = object;
2081 	info.backing_object = backing_object;
2082 	info.limit = op;
2083 	info.count = 0;
2084 	do {
2085 		info.error = 1;
2086 		vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
2087 					vm_object_backing_scan_callback,
2088 					&info);
2089 	} while (info.error < 0);
2090 
2091 	return(info.error);
2092 }
2093 
2094 /*
2095  * The caller must hold the object.
2096  */
2097 static int
2098 vm_object_backing_scan_callback(vm_page_t p, void *data)
2099 {
2100 	struct rb_vm_page_scan_info *info = data;
2101 	vm_object_t backing_object;
2102 	vm_object_t object;
2103 	vm_pindex_t pindex;
2104 	vm_pindex_t new_pindex;
2105 	vm_pindex_t backing_offset_index;
2106 	int op;
2107 
2108 	pindex = p->pindex;
2109 	new_pindex = pindex - info->backing_offset_index;
2110 	op = info->limit;
2111 	object = info->object;
2112 	backing_object = info->backing_object;
2113 	backing_offset_index = info->backing_offset_index;
2114 
2115 	if (op & OBSC_TEST_ALL_SHADOWED) {
2116 		vm_page_t pp;
2117 
2118 		/*
2119 		 * Ignore pages outside the parent object's range
2120 		 * and outside the parent object's mapping of the
2121 		 * backing object.
2122 		 *
2123 		 * note that we do not busy the backing object's
2124 		 * page.
2125 		 */
2126 		if (pindex < backing_offset_index ||
2127 		    new_pindex >= object->size
2128 		) {
2129 			return(0);
2130 		}
2131 
2132 		/*
2133 		 * See if the parent has the page or if the parent's
2134 		 * object pager has the page.  If the parent has the
2135 		 * page but the page is not valid, the parent's
2136 		 * object pager must have the page.
2137 		 *
2138 		 * If this fails, the parent does not completely shadow
2139 		 * the object and we might as well give up now.
2140 		 */
2141 		pp = vm_page_lookup(object, new_pindex);
2142 		if ((pp == NULL || pp->valid == 0) &&
2143 		    !vm_pager_has_page(object, new_pindex)
2144 		) {
2145 			info->error = 0;	/* problemo */
2146 			return(-1);		/* stop the scan */
2147 		}
2148 	}
2149 
2150 	/*
2151 	 * Check for busy page.  Note that we may have lost (p) when we
2152 	 * possibly blocked above.
2153 	 */
2154 	if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
2155 		vm_page_t pp;
2156 
2157 		if (vm_page_busy_try(p, TRUE)) {
2158 			if (op & OBSC_COLLAPSE_NOWAIT) {
2159 				return(0);
2160 			} else {
2161 				/*
2162 				 * If we slept, anything could have
2163 				 * happened.   Ask that the scan be restarted.
2164 				 *
2165 				 * Since the object is marked dead, the
2166 				 * backing offset should not have changed.
2167 				 */
2168 				vm_page_sleep_busy(p, TRUE, "vmocol");
2169 				info->error = -1;
2170 				return(-1);
2171 			}
2172 		}
2173 
2174 		/*
2175 		 * If (p) is no longer valid restart the scan.
2176 		 */
2177 		if (p->object != backing_object || p->pindex != pindex) {
2178 			kprintf("vm_object_backing_scan: Warning: page "
2179 				"%p ripped out from under us\n", p);
2180 			vm_page_wakeup(p);
2181 			info->error = -1;
2182 			return(-1);
2183 		}
2184 
2185 		if (op & OBSC_COLLAPSE_NOWAIT) {
2186 			if (p->valid == 0 ||
2187 			    p->wire_count ||
2188 			    (p->flags & PG_NEED_COMMIT)) {
2189 				vm_page_wakeup(p);
2190 				return(0);
2191 			}
2192 		} else {
2193 			/* XXX what if p->valid == 0 , hold_count, etc? */
2194 		}
2195 
2196 		KASSERT(
2197 		    p->object == backing_object,
2198 		    ("vm_object_qcollapse(): object mismatch")
2199 		);
2200 
2201 		/*
2202 		 * Destroy any associated swap
2203 		 */
2204 		if (backing_object->type == OBJT_SWAP)
2205 			swap_pager_freespace(backing_object, p->pindex, 1);
2206 
2207 		if (
2208 		    p->pindex < backing_offset_index ||
2209 		    new_pindex >= object->size
2210 		) {
2211 			/*
2212 			 * Page is out of the parent object's range, we
2213 			 * can simply destroy it.
2214 			 */
2215 			vm_page_protect(p, VM_PROT_NONE);
2216 			vm_page_free(p);
2217 			return(0);
2218 		}
2219 
2220 		pp = vm_page_lookup(object, new_pindex);
2221 		if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
2222 			/*
2223 			 * page already exists in parent OR swap exists
2224 			 * for this location in the parent.  Destroy
2225 			 * the original page from the backing object.
2226 			 *
2227 			 * Leave the parent's page alone
2228 			 */
2229 			vm_page_protect(p, VM_PROT_NONE);
2230 			vm_page_free(p);
2231 			return(0);
2232 		}
2233 
2234 		/*
2235 		 * Page does not exist in parent, rename the
2236 		 * page from the backing object to the main object.
2237 		 *
2238 		 * If the page was mapped to a process, it can remain
2239 		 * mapped through the rename.
2240 		 */
2241 		if ((p->queue - p->pc) == PQ_CACHE)
2242 			vm_page_deactivate(p);
2243 
2244 		vm_page_rename(p, object, new_pindex);
2245 		vm_page_wakeup(p);
2246 		/* page automatically made dirty by rename */
2247 	}
2248 	return(0);
2249 }
2250 
2251 /*
2252  * This version of collapse allows the operation to occur earlier and
2253  * when paging_in_progress is true for an object...  This is not a complete
2254  * operation, but should plug 99.9% of the rest of the leaks.
2255  *
2256  * The caller must hold the object and backing_object and both must be
2257  * chainlocked.
2258  *
2259  * (only called from vm_object_collapse)
2260  */
2261 static void
2262 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object)
2263 {
2264 	if (backing_object->ref_count == 1) {
2265 		atomic_add_int(&backing_object->ref_count, 2);
2266 #if defined(DEBUG_LOCKS)
2267 		debugvm_object_add(backing_object, "qcollapse", 1, 2);
2268 #endif
2269 		vm_object_backing_scan(object, backing_object,
2270 				       OBSC_COLLAPSE_NOWAIT);
2271 		atomic_add_int(&backing_object->ref_count, -2);
2272 #if defined(DEBUG_LOCKS)
2273 		debugvm_object_add(backing_object, "qcollapse", 2, -2);
2274 #endif
2275 	}
2276 }
2277 
2278 /*
2279  * Collapse an object with the object backing it.  Pages in the backing
2280  * object are moved into the parent, and the backing object is deallocated.
2281  * Any conflict is resolved in favor of the parent's existing pages.
2282  *
2283  * object must be held and chain-locked on call.
2284  *
2285  * The caller must have an extra ref on object to prevent a race from
2286  * destroying it during the collapse.
2287  */
2288 void
2289 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
2290 {
2291 	struct vm_object_dealloc_list *dlist = NULL;
2292 	vm_object_t backing_object;
2293 
2294 	/*
2295 	 * Only one thread is attempting a collapse at any given moment.
2296 	 * There are few restrictions for (object) that callers of this
2297 	 * function check so reentrancy is likely.
2298 	 */
2299 	KKASSERT(object != NULL);
2300 	vm_object_assert_held(object);
2301 	KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL));
2302 
2303 	for (;;) {
2304 		vm_object_t bbobj;
2305 		int dodealloc;
2306 
2307 		/*
2308 		 * We can only collapse a DEFAULT/SWAP object with a
2309 		 * DEFAULT/SWAP object.
2310 		 */
2311 		if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) {
2312 			backing_object = NULL;
2313 			break;
2314 		}
2315 
2316 		backing_object = object->backing_object;
2317 		if (backing_object == NULL)
2318 			break;
2319 		if (backing_object->type != OBJT_DEFAULT &&
2320 		    backing_object->type != OBJT_SWAP) {
2321 			backing_object = NULL;
2322 			break;
2323 		}
2324 
2325 		/*
2326 		 * Hold (token lock) the backing_object and retest conditions.
2327 		 */
2328 		vm_object_hold(backing_object);
2329 		if (backing_object != object->backing_object ||
2330 		    (backing_object->type != OBJT_DEFAULT &&
2331 		     backing_object->type != OBJT_SWAP)) {
2332 			vm_object_drop(backing_object);
2333 			continue;
2334 		}
2335 
2336 		/*
2337 		 * Chain-lock the backing object too because if we
2338 		 * successfully merge its pages into the top object we
2339 		 * will collapse backing_object->backing_object as the
2340 		 * new backing_object.  Re-check that it is still our
2341 		 * backing object.
2342 		 */
2343 		vm_object_chain_acquire(backing_object, 0);
2344 		if (backing_object != object->backing_object) {
2345 			vm_object_chain_release(backing_object);
2346 			vm_object_drop(backing_object);
2347 			continue;
2348 		}
2349 
2350 		/*
2351 		 * We check the backing object first, because it is most
2352 		 * likely not collapsable.
2353 		 */
2354 		if (backing_object->handle != NULL ||
2355 		    (backing_object->type != OBJT_DEFAULT &&
2356 		     backing_object->type != OBJT_SWAP) ||
2357 		    (backing_object->flags & OBJ_DEAD) ||
2358 		    object->handle != NULL ||
2359 		    (object->type != OBJT_DEFAULT &&
2360 		     object->type != OBJT_SWAP) ||
2361 		    (object->flags & OBJ_DEAD)) {
2362 			break;
2363 		}
2364 
2365 		/*
2366 		 * If paging is in progress we can't do a normal collapse.
2367 		 */
2368 		if (object->paging_in_progress != 0 ||
2369 		    backing_object->paging_in_progress != 0
2370 		) {
2371 			vm_object_qcollapse(object, backing_object);
2372 			break;
2373 		}
2374 
2375 		/*
2376 		 * We know that we can either collapse the backing object (if
2377 		 * the parent is the only reference to it) or (perhaps) have
2378 		 * the parent bypass the object if the parent happens to shadow
2379 		 * all the resident pages in the entire backing object.
2380 		 *
2381 		 * This is ignoring pager-backed pages such as swap pages.
2382 		 * vm_object_backing_scan fails the shadowing test in this
2383 		 * case.
2384 		 */
2385 		if (backing_object->ref_count == 1) {
2386 			/*
2387 			 * If there is exactly one reference to the backing
2388 			 * object, we can collapse it into the parent.
2389 			 */
2390 			KKASSERT(object->backing_object == backing_object);
2391 			vm_object_backing_scan(object, backing_object,
2392 					       OBSC_COLLAPSE_WAIT);
2393 
2394 			/*
2395 			 * Move the pager from backing_object to object.
2396 			 */
2397 			if (backing_object->type == OBJT_SWAP) {
2398 				vm_object_pip_add(backing_object, 1);
2399 
2400 				/*
2401 				 * scrap the paging_offset junk and do a
2402 				 * discrete copy.  This also removes major
2403 				 * assumptions about how the swap-pager
2404 				 * works from where it doesn't belong.  The
2405 				 * new swapper is able to optimize the
2406 				 * destroy-source case.
2407 				 */
2408 				vm_object_pip_add(object, 1);
2409 				swap_pager_copy(backing_object, object,
2410 				    OFF_TO_IDX(object->backing_object_offset),
2411 				    TRUE);
2412 				vm_object_pip_wakeup(object);
2413 				vm_object_pip_wakeup(backing_object);
2414 			}
2415 
2416 			/*
2417 			 * Object now shadows whatever backing_object did.
2418 			 * Remove object from backing_object's shadow_list.
2419 			 *
2420 			 * Removing object from backing_objects shadow list
2421 			 * requires releasing object, which we will do below.
2422 			 */
2423 			KKASSERT(object->backing_object == backing_object);
2424 			if (object->flags & OBJ_ONSHADOW) {
2425 				LIST_REMOVE(object, shadow_list);
2426 				backing_object->shadow_count--;
2427 				atomic_add_int(&backing_object->generation, 1);
2428 				vm_object_clear_flag(object, OBJ_ONSHADOW);
2429 			}
2430 
2431 			/*
2432 			 * backing_object->backing_object moves from within
2433 			 * backing_object to within object.
2434 			 *
2435 			 * OBJT_VNODE bbobj's should have empty shadow lists.
2436 			 */
2437 			while ((bbobj = backing_object->backing_object) != NULL) {
2438 				if (bbobj->type == OBJT_VNODE)
2439 					vm_object_hold_shared(bbobj);
2440 				else
2441 					vm_object_hold(bbobj);
2442 				if (bbobj == backing_object->backing_object)
2443 					break;
2444 				vm_object_drop(bbobj);
2445 			}
2446 
2447 			/*
2448 			 * We are removing backing_object from bbobj's
2449 			 * shadow list and adding object to bbobj's shadow
2450 			 * list, so the ref_count on bbobj is unchanged.
2451 			 */
2452 			if (bbobj) {
2453 				if (backing_object->flags & OBJ_ONSHADOW) {
2454 					/* not locked exclusively if vnode */
2455 					KKASSERT(bbobj->type != OBJT_VNODE);
2456 					LIST_REMOVE(backing_object,
2457 						    shadow_list);
2458 					bbobj->shadow_count--;
2459 					atomic_add_int(&bbobj->generation, 1);
2460 					vm_object_clear_flag(backing_object,
2461 							     OBJ_ONSHADOW);
2462 				}
2463 				backing_object->backing_object = NULL;
2464 			}
2465 			object->backing_object = bbobj;
2466 			if (bbobj) {
2467 				if (bbobj->type != OBJT_VNODE) {
2468 					LIST_INSERT_HEAD(&bbobj->shadow_head,
2469 							 object, shadow_list);
2470 					bbobj->shadow_count++;
2471 					atomic_add_int(&bbobj->generation, 1);
2472 					vm_object_set_flag(object,
2473 							   OBJ_ONSHADOW);
2474 				}
2475 			}
2476 
2477 			object->backing_object_offset +=
2478 				backing_object->backing_object_offset;
2479 
2480 			vm_object_drop(bbobj);
2481 
2482 			/*
2483 			 * Discard the old backing_object.  Nothing should be
2484 			 * able to ref it, other than a vm_map_split(),
2485 			 * and vm_map_split() will stall on our chain lock.
2486 			 * And we control the parent so it shouldn't be
2487 			 * possible for it to go away either.
2488 			 *
2489 			 * Since the backing object has no pages, no pager
2490 			 * left, and no object references within it, all
2491 			 * that is necessary is to dispose of it.
2492 			 */
2493 			KASSERT(backing_object->ref_count == 1,
2494 				("backing_object %p was somehow "
2495 				 "re-referenced during collapse!",
2496 				 backing_object));
2497 			KASSERT(RB_EMPTY(&backing_object->rb_memq),
2498 				("backing_object %p somehow has left "
2499 				 "over pages during collapse!",
2500 				 backing_object));
2501 
2502 			/*
2503 			 * The object can be destroyed.
2504 			 *
2505 			 * XXX just fall through and dodealloc instead
2506 			 *     of forcing destruction?
2507 			 */
2508 			atomic_add_int(&backing_object->ref_count, -1);
2509 #if defined(DEBUG_LOCKS)
2510 			debugvm_object_add(backing_object, "collapse", 1, -1);
2511 #endif
2512 			if ((backing_object->flags & OBJ_DEAD) == 0)
2513 				vm_object_terminate(backing_object);
2514 			object_collapses++;
2515 			dodealloc = 0;
2516 		} else {
2517 			/*
2518 			 * If we do not entirely shadow the backing object,
2519 			 * there is nothing we can do so we give up.
2520 			 */
2521 			if (vm_object_backing_scan(object, backing_object,
2522 						OBSC_TEST_ALL_SHADOWED) == 0) {
2523 				break;
2524 			}
2525 
2526 			/*
2527 			 * bbobj is backing_object->backing_object.  Since
2528 			 * object completely shadows backing_object we can
2529 			 * bypass it and become backed by bbobj instead.
2530 			 *
2531 			 * The shadow list for vnode backing objects is not
2532 			 * used and a shared hold is allowed.
2533 			 */
2534 			while ((bbobj = backing_object->backing_object) != NULL) {
2535 				if (bbobj->type == OBJT_VNODE)
2536 					vm_object_hold_shared(bbobj);
2537 				else
2538 					vm_object_hold(bbobj);
2539 				if (bbobj == backing_object->backing_object)
2540 					break;
2541 				vm_object_drop(bbobj);
2542 			}
2543 
2544 			/*
2545 			 * Make object shadow bbobj instead of backing_object.
2546 			 * Remove object from backing_object's shadow list.
2547 			 *
2548 			 * Deallocating backing_object will not remove
2549 			 * it, since its reference count is at least 2.
2550 			 *
2551 			 * Removing object from backing_object's shadow
2552 			 * list requires releasing a ref, which we do
2553 			 * below by setting dodealloc to 1.
2554 			 */
2555 			KKASSERT(object->backing_object == backing_object);
2556 			if (object->flags & OBJ_ONSHADOW) {
2557 				LIST_REMOVE(object, shadow_list);
2558 				backing_object->shadow_count--;
2559 				atomic_add_int(&backing_object->generation, 1);
2560 				vm_object_clear_flag(object, OBJ_ONSHADOW);
2561 			}
2562 
2563 			/*
2564 			 * Add a ref to bbobj, bbobj now shadows object.
2565 			 *
2566 			 * NOTE: backing_object->backing_object still points
2567 			 *	 to bbobj.  That relationship remains intact
2568 			 *	 because backing_object has > 1 ref, so
2569 			 *	 someone else is pointing to it (hence why
2570 			 *	 we can't collapse it into object and can
2571 			 *	 only handle the all-shadowed bypass case).
2572 			 */
2573 			if (bbobj) {
2574 				if (bbobj->type != OBJT_VNODE) {
2575 					vm_object_chain_wait(bbobj, 0);
2576 					vm_object_reference_locked(bbobj);
2577 					LIST_INSERT_HEAD(&bbobj->shadow_head,
2578 							 object, shadow_list);
2579 					bbobj->shadow_count++;
2580 					atomic_add_int(&bbobj->generation, 1);
2581 					vm_object_set_flag(object,
2582 							   OBJ_ONSHADOW);
2583 				} else {
2584 					vm_object_reference_quick(bbobj);
2585 				}
2586 				object->backing_object_offset +=
2587 					backing_object->backing_object_offset;
2588 				object->backing_object = bbobj;
2589 				vm_object_drop(bbobj);
2590 			} else {
2591 				object->backing_object = NULL;
2592 			}
2593 
2594 			/*
2595 			 * Drop the reference count on backing_object.  To
2596 			 * handle ref_count races properly we can't assume
2597 			 * that the ref_count is still at least 2 so we
2598 			 * have to actually call vm_object_deallocate()
2599 			 * (after clearing the chainlock).
2600 			 */
2601 			object_bypasses++;
2602 			dodealloc = 1;
2603 		}
2604 
2605 		/*
2606 		 * Ok, we want to loop on the new object->bbobj association,
2607 		 * possibly collapsing it further.  However if dodealloc is
2608 		 * non-zero we have to deallocate the backing_object which
2609 		 * itself can potentially undergo a collapse, creating a
2610 		 * recursion depth issue with the LWKT token subsystem.
2611 		 *
2612 		 * In the case where we must deallocate the backing_object
2613 		 * it is possible now that the backing_object has a single
2614 		 * shadow count on some other object (not represented here
2615 		 * as yet), since it no longer shadows us.  Thus when we
2616 		 * call vm_object_deallocate() it may attempt to collapse
2617 		 * itself into its remaining parent.
2618 		 */
2619 		if (dodealloc) {
2620 			struct vm_object_dealloc_list *dtmp;
2621 
2622 			vm_object_chain_release(backing_object);
2623 			vm_object_unlock(backing_object);
2624 			/* backing_object remains held */
2625 
2626 			/*
2627 			 * Auto-deallocation list for caller convenience.
2628 			 */
2629 			if (dlistp == NULL)
2630 				dlistp = &dlist;
2631 
2632 			dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK);
2633 			dtmp->object = backing_object;
2634 			dtmp->next = *dlistp;
2635 			*dlistp = dtmp;
2636 		} else {
2637 			vm_object_chain_release(backing_object);
2638 			vm_object_drop(backing_object);
2639 		}
2640 		/* backing_object = NULL; not needed */
2641 		/* loop */
2642 	}
2643 
2644 	/*
2645 	 * Clean up any left over backing_object
2646 	 */
2647 	if (backing_object) {
2648 		vm_object_chain_release(backing_object);
2649 		vm_object_drop(backing_object);
2650 	}
2651 
2652 	/*
2653 	 * Clean up any auto-deallocation list.  This is a convenience
2654 	 * for top-level callers so they don't have to pass &dlist.
2655 	 * Do not clean up any caller-passed dlistp, the caller will
2656 	 * do that.
2657 	 */
2658 	if (dlist)
2659 		vm_object_deallocate_list(&dlist);
2660 
2661 }
2662 
2663 /*
2664  * vm_object_collapse() may collect additional objects in need of
2665  * deallocation.  This routine deallocates these objects.  The
2666  * deallocation itself can trigger additional collapses (which the
2667  * deallocate function takes care of).  This procedure is used to
2668  * reduce procedural recursion since these vm_object shadow chains
2669  * can become quite long.
2670  */
2671 void
2672 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp)
2673 {
2674 	struct vm_object_dealloc_list *dlist;
2675 
2676 	while ((dlist = *dlistp) != NULL) {
2677 		*dlistp = dlist->next;
2678 		vm_object_lock(dlist->object);
2679 		vm_object_deallocate_locked(dlist->object);
2680 		vm_object_drop(dlist->object);
2681 		kfree(dlist, M_TEMP);
2682 	}
2683 }
2684 
2685 /*
2686  * Removes all physical pages in the specified object range from the
2687  * object's list of pages.
2688  *
2689  * No requirements.
2690  */
2691 static int vm_object_page_remove_callback(vm_page_t p, void *data);
2692 
2693 void
2694 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
2695 		      boolean_t clean_only)
2696 {
2697 	struct rb_vm_page_scan_info info;
2698 	int all;
2699 
2700 	/*
2701 	 * Degenerate cases and assertions
2702 	 */
2703 	vm_object_hold(object);
2704 	if (object == NULL ||
2705 	    (object->resident_page_count == 0 && object->swblock_count == 0)) {
2706 		vm_object_drop(object);
2707 		return;
2708 	}
2709 	KASSERT(object->type != OBJT_PHYS,
2710 		("attempt to remove pages from a physical object"));
2711 
2712 	/*
2713 	 * Indicate that paging is occuring on the object
2714 	 */
2715 	vm_object_pip_add(object, 1);
2716 
2717 	/*
2718 	 * Figure out the actual removal range and whether we are removing
2719 	 * the entire contents of the object or not.  If removing the entire
2720 	 * contents, be sure to get all pages, even those that might be
2721 	 * beyond the end of the object.
2722 	 */
2723 	info.object = object;
2724 	info.start_pindex = start;
2725 	if (end == 0)
2726 		info.end_pindex = (vm_pindex_t)-1;
2727 	else
2728 		info.end_pindex = end - 1;
2729 	info.limit = clean_only;
2730 	info.count = 0;
2731 	all = (start == 0 && info.end_pindex >= object->size - 1);
2732 
2733 	/*
2734 	 * Loop until we are sure we have gotten them all.
2735 	 */
2736 	do {
2737 		info.error = 0;
2738 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2739 					vm_object_page_remove_callback, &info);
2740 	} while (info.error);
2741 
2742 	/*
2743 	 * Remove any related swap if throwing away pages, or for
2744 	 * non-swap objects (the swap is a clean copy in that case).
2745 	 */
2746 	if (object->type != OBJT_SWAP || clean_only == FALSE) {
2747 		if (all)
2748 			swap_pager_freespace_all(object);
2749 		else
2750 			swap_pager_freespace(object, info.start_pindex,
2751 			     info.end_pindex - info.start_pindex + 1);
2752 	}
2753 
2754 	/*
2755 	 * Cleanup
2756 	 */
2757 	vm_object_pip_wakeup(object);
2758 	vm_object_drop(object);
2759 }
2760 
2761 /*
2762  * The caller must hold the object.
2763  *
2764  * NOTE: User yields are allowed when removing more than one page, but not
2765  *	 allowed if only removing one page (the path for single page removals
2766  *	 might hold a spinlock).
2767  */
2768 static int
2769 vm_object_page_remove_callback(vm_page_t p, void *data)
2770 {
2771 	struct rb_vm_page_scan_info *info = data;
2772 
2773 	if (info->object != p->object ||
2774 	    p->pindex < info->start_pindex ||
2775 	    p->pindex > info->end_pindex) {
2776 		kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n",
2777 			info->object, p);
2778 		return(0);
2779 	}
2780 	if (vm_page_busy_try(p, TRUE)) {
2781 		vm_page_sleep_busy(p, TRUE, "vmopar");
2782 		info->error = 1;
2783 		return(0);
2784 	}
2785 	if (info->object != p->object) {
2786 		/* this should never happen */
2787 		kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n",
2788 			info->object, p);
2789 		vm_page_wakeup(p);
2790 		return(0);
2791 	}
2792 
2793 	/*
2794 	 * Wired pages cannot be destroyed, but they can be invalidated
2795 	 * and we do so if clean_only (limit) is not set.
2796 	 *
2797 	 * WARNING!  The page may be wired due to being part of a buffer
2798 	 *	     cache buffer, and the buffer might be marked B_CACHE.
2799 	 *	     This is fine as part of a truncation but VFSs must be
2800 	 *	     sure to fix the buffer up when re-extending the file.
2801 	 *
2802 	 * NOTE!     PG_NEED_COMMIT is ignored.
2803 	 */
2804 	if (p->wire_count != 0) {
2805 		vm_page_protect(p, VM_PROT_NONE);
2806 		if (info->limit == 0)
2807 			p->valid = 0;
2808 		vm_page_wakeup(p);
2809 		goto done;
2810 	}
2811 
2812 	/*
2813 	 * limit is our clean_only flag.  If set and the page is dirty or
2814 	 * requires a commit, do not free it.  If set and the page is being
2815 	 * held by someone, do not free it.
2816 	 */
2817 	if (info->limit && p->valid) {
2818 		vm_page_test_dirty(p);
2819 		if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) {
2820 			vm_page_wakeup(p);
2821 			goto done;
2822 		}
2823 	}
2824 
2825 	/*
2826 	 * Destroy the page
2827 	 */
2828 	vm_page_protect(p, VM_PROT_NONE);
2829 	vm_page_free(p);
2830 
2831 	/*
2832 	 * Must be at end to avoid SMP races, caller holds object token
2833 	 */
2834 done:
2835 	if ((++info->count & 63) == 0)
2836 		lwkt_user_yield();
2837 
2838 	return(0);
2839 }
2840 
2841 /*
2842  * Try to extend prev_object into an adjoining region of virtual
2843  * memory, return TRUE on success.
2844  *
2845  * The caller does not need to hold (prev_object) but must have a stable
2846  * pointer to it (typically by holding the vm_map locked).
2847  *
2848  * This function only works for anonymous memory objects which either
2849  * have (a) one reference or (b) we are extending the object's size.
2850  * Otherwise the related VM pages we want to use for the object might
2851  * be in use by another mapping.
2852  */
2853 boolean_t
2854 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
2855 		   vm_size_t prev_size, vm_size_t next_size)
2856 {
2857 	vm_pindex_t next_pindex;
2858 
2859 	if (prev_object == NULL)
2860 		return (TRUE);
2861 
2862 	vm_object_hold(prev_object);
2863 
2864 	if (prev_object->type != OBJT_DEFAULT &&
2865 	    prev_object->type != OBJT_SWAP) {
2866 		vm_object_drop(prev_object);
2867 		return (FALSE);
2868 	}
2869 
2870 	/*
2871 	 * Try to collapse the object first
2872 	 */
2873 	vm_object_chain_acquire(prev_object, 0);
2874 	vm_object_collapse(prev_object, NULL);
2875 
2876 	/*
2877 	 * We can't coalesce if we shadow another object (figuring out the
2878 	 * relationships become too complex).
2879 	 */
2880 	if (prev_object->backing_object != NULL) {
2881 		vm_object_chain_release(prev_object);
2882 		vm_object_drop(prev_object);
2883 		return (FALSE);
2884 	}
2885 
2886 	prev_size >>= PAGE_SHIFT;
2887 	next_size >>= PAGE_SHIFT;
2888 	next_pindex = prev_pindex + prev_size;
2889 
2890 	/*
2891 	 * We can't if the object has more than one ref count unless we
2892 	 * are extending it into newly minted space.
2893 	 */
2894 	if (prev_object->ref_count > 1 &&
2895 	    prev_object->size != next_pindex) {
2896 		vm_object_chain_release(prev_object);
2897 		vm_object_drop(prev_object);
2898 		return (FALSE);
2899 	}
2900 
2901 	/*
2902 	 * Remove any pages that may still be in the object from a previous
2903 	 * deallocation.
2904 	 */
2905 	if (next_pindex < prev_object->size) {
2906 		vm_object_page_remove(prev_object,
2907 				      next_pindex,
2908 				      next_pindex + next_size, FALSE);
2909 		if (prev_object->type == OBJT_SWAP)
2910 			swap_pager_freespace(prev_object,
2911 					     next_pindex, next_size);
2912 	}
2913 
2914 	/*
2915 	 * Extend the object if necessary.
2916 	 */
2917 	if (next_pindex + next_size > prev_object->size)
2918 		prev_object->size = next_pindex + next_size;
2919 	vm_object_chain_release(prev_object);
2920 	vm_object_drop(prev_object);
2921 
2922 	return (TRUE);
2923 }
2924 
2925 /*
2926  * Make the object writable and flag is being possibly dirty.
2927  *
2928  * The object might not be held (or might be held but held shared),
2929  * the related vnode is probably not held either.  Object and vnode are
2930  * stable by virtue of the vm_page busied by the caller preventing
2931  * destruction.
2932  *
2933  * If the related mount is flagged MNTK_THR_SYNC we need to call
2934  * vsetobjdirty().  Filesystems using this option usually shortcut
2935  * synchronization by only scanning the syncer list.
2936  */
2937 void
2938 vm_object_set_writeable_dirty(vm_object_t object)
2939 {
2940 	struct vnode *vp;
2941 
2942 	/*vm_object_assert_held(object);*/
2943 	/*
2944 	 * Avoid contention in vm fault path by checking the state before
2945 	 * issuing an atomic op on it.
2946 	 */
2947 	if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) !=
2948 	    (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) {
2949 		vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
2950 	}
2951 	if (object->type == OBJT_VNODE &&
2952 	    (vp = (struct vnode *)object->handle) != NULL) {
2953 		if ((vp->v_flag & VOBJDIRTY) == 0) {
2954 			if (vp->v_mount &&
2955 			    (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) {
2956 				/*
2957 				 * New style THR_SYNC places vnodes on the
2958 				 * syncer list more deterministically.
2959 				 */
2960 				vsetobjdirty(vp);
2961 			} else {
2962 				/*
2963 				 * Old style scan would not necessarily place
2964 				 * a vnode on the syncer list when possibly
2965 				 * modified via mmap.
2966 				 */
2967 				vsetflags(vp, VOBJDIRTY);
2968 			}
2969 		}
2970 	}
2971 }
2972 
2973 #include "opt_ddb.h"
2974 #ifdef DDB
2975 #include <sys/cons.h>
2976 
2977 #include <ddb/ddb.h>
2978 
2979 static int	_vm_object_in_map (vm_map_t map, vm_object_t object,
2980 				       vm_map_entry_t entry);
2981 static int	vm_object_in_map (vm_object_t object);
2982 
2983 /*
2984  * The caller must hold the object.
2985  */
2986 static int
2987 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2988 {
2989 	vm_map_t tmpm;
2990 	vm_map_entry_t tmpe;
2991 	vm_object_t obj, nobj;
2992 	int entcount;
2993 
2994 	if (map == NULL)
2995 		return 0;
2996 	if (entry == NULL) {
2997 		tmpe = RB_MIN(vm_map_rb_tree, &map->rb_root);
2998 		entcount = map->nentries;
2999 		while (entcount-- && tmpe) {
3000 			if( _vm_object_in_map(map, object, tmpe)) {
3001 				return 1;
3002 			}
3003 			tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
3004 		}
3005 		return (0);
3006 	}
3007 	switch(entry->maptype) {
3008 	case VM_MAPTYPE_SUBMAP:
3009 		tmpm = entry->object.sub_map;
3010 		tmpe = RB_MIN(vm_map_rb_tree, &tmpm->rb_root);
3011 		entcount = tmpm->nentries;
3012 		while (entcount-- && tmpe) {
3013 			if( _vm_object_in_map(tmpm, object, tmpe)) {
3014 				return 1;
3015 			}
3016 			tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
3017 		}
3018 		break;
3019 	case VM_MAPTYPE_NORMAL:
3020 	case VM_MAPTYPE_VPAGETABLE:
3021 		obj = entry->object.vm_object;
3022 		while (obj) {
3023 			if (obj == object) {
3024 				if (obj != entry->object.vm_object)
3025 					vm_object_drop(obj);
3026 				return 1;
3027 			}
3028 			while ((nobj = obj->backing_object) != NULL) {
3029 				vm_object_hold(nobj);
3030 				if (nobj == obj->backing_object)
3031 					break;
3032 				vm_object_drop(nobj);
3033 			}
3034 			if (obj != entry->object.vm_object) {
3035 				if (nobj)
3036 					vm_object_lock_swap();
3037 				vm_object_drop(obj);
3038 			}
3039 			obj = nobj;
3040 		}
3041 		break;
3042 	default:
3043 		break;
3044 	}
3045 	return 0;
3046 }
3047 
3048 static int vm_object_in_map_callback(struct proc *p, void *data);
3049 
3050 struct vm_object_in_map_info {
3051 	vm_object_t object;
3052 	int rv;
3053 };
3054 
3055 /*
3056  * Debugging only
3057  */
3058 static int
3059 vm_object_in_map(vm_object_t object)
3060 {
3061 	struct vm_object_in_map_info info;
3062 
3063 	info.rv = 0;
3064 	info.object = object;
3065 
3066 	allproc_scan(vm_object_in_map_callback, &info, 0);
3067 	if (info.rv)
3068 		return 1;
3069 	if( _vm_object_in_map(&kernel_map, object, 0))
3070 		return 1;
3071 	if( _vm_object_in_map(&pager_map, object, 0))
3072 		return 1;
3073 	if( _vm_object_in_map(&buffer_map, object, 0))
3074 		return 1;
3075 	return 0;
3076 }
3077 
3078 /*
3079  * Debugging only
3080  */
3081 static int
3082 vm_object_in_map_callback(struct proc *p, void *data)
3083 {
3084 	struct vm_object_in_map_info *info = data;
3085 
3086 	if (p->p_vmspace) {
3087 		if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
3088 			info->rv = 1;
3089 			return -1;
3090 		}
3091 	}
3092 	return (0);
3093 }
3094 
3095 DB_SHOW_COMMAND(vmochk, vm_object_check)
3096 {
3097 	struct vm_object_hash *hash;
3098 	vm_object_t object;
3099 	int n;
3100 
3101 	/*
3102 	 * make sure that internal objs are in a map somewhere
3103 	 * and none have zero ref counts.
3104 	 */
3105 	for (n = 0; n < VMOBJ_HSIZE; ++n) {
3106 		hash = &vm_object_hash[n];
3107 		for (object = TAILQ_FIRST(&hash->list);
3108 				object != NULL;
3109 				object = TAILQ_NEXT(object, object_list)) {
3110 			if (object->type == OBJT_MARKER)
3111 				continue;
3112 			if (object->handle != NULL ||
3113 			    (object->type != OBJT_DEFAULT &&
3114 			     object->type != OBJT_SWAP)) {
3115 				continue;
3116 			}
3117 			if (object->ref_count == 0) {
3118 				db_printf("vmochk: internal obj has "
3119 					  "zero ref count: %ld\n",
3120 					  (long)object->size);
3121 			}
3122 			if (vm_object_in_map(object))
3123 				continue;
3124 			db_printf("vmochk: internal obj is not in a map: "
3125 				  "ref: %d, size: %lu: 0x%lx, "
3126 				  "backing_object: %p\n",
3127 				  object->ref_count, (u_long)object->size,
3128 				  (u_long)object->size,
3129 				  (void *)object->backing_object);
3130 		}
3131 	}
3132 }
3133 
3134 /*
3135  * Debugging only
3136  */
3137 DB_SHOW_COMMAND(object, vm_object_print_static)
3138 {
3139 	/* XXX convert args. */
3140 	vm_object_t object = (vm_object_t)addr;
3141 	boolean_t full = have_addr;
3142 
3143 	vm_page_t p;
3144 
3145 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
3146 #define	count	was_count
3147 
3148 	int count;
3149 
3150 	if (object == NULL)
3151 		return;
3152 
3153 	db_iprintf(
3154 	    "Object %p: type=%d, size=0x%lx, res=%ld, ref=%d, flags=0x%x\n",
3155 	    object, (int)object->type, (u_long)object->size,
3156 	    object->resident_page_count, object->ref_count, object->flags);
3157 	/*
3158 	 * XXX no %qd in kernel.  Truncate object->backing_object_offset.
3159 	 */
3160 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
3161 	    object->shadow_count,
3162 	    object->backing_object ? object->backing_object->ref_count : 0,
3163 	    object->backing_object, (long)object->backing_object_offset);
3164 
3165 	if (!full)
3166 		return;
3167 
3168 	db_indent += 2;
3169 	count = 0;
3170 	RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
3171 		if (count == 0)
3172 			db_iprintf("memory:=");
3173 		else if (count == 6) {
3174 			db_printf("\n");
3175 			db_iprintf(" ...");
3176 			count = 0;
3177 		} else
3178 			db_printf(",");
3179 		count++;
3180 
3181 		db_printf("(off=0x%lx,page=0x%lx)",
3182 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
3183 	}
3184 	if (count != 0)
3185 		db_printf("\n");
3186 	db_indent -= 2;
3187 }
3188 
3189 /* XXX. */
3190 #undef count
3191 
3192 /*
3193  * XXX need this non-static entry for calling from vm_map_print.
3194  *
3195  * Debugging only
3196  */
3197 void
3198 vm_object_print(/* db_expr_t */ long addr,
3199 		boolean_t have_addr,
3200 		/* db_expr_t */ long count,
3201 		char *modif)
3202 {
3203 	vm_object_print_static(addr, have_addr, count, modif);
3204 }
3205 
3206 /*
3207  * Debugging only
3208  */
3209 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
3210 {
3211 	struct vm_object_hash *hash;
3212 	vm_object_t object;
3213 	int nl = 0;
3214 	int c;
3215 	int n;
3216 
3217 	for (n = 0; n < VMOBJ_HSIZE; ++n) {
3218 		hash = &vm_object_hash[n];
3219 		for (object = TAILQ_FIRST(&hash->list);
3220 				object != NULL;
3221 				object = TAILQ_NEXT(object, object_list)) {
3222 			vm_pindex_t idx, fidx;
3223 			vm_pindex_t osize;
3224 			vm_paddr_t pa = -1, padiff;
3225 			int rcount;
3226 			vm_page_t m;
3227 
3228 			if (object->type == OBJT_MARKER)
3229 				continue;
3230 			db_printf("new object: %p\n", (void *)object);
3231 			if ( nl > 18) {
3232 				c = cngetc();
3233 				if (c != ' ')
3234 					return;
3235 				nl = 0;
3236 			}
3237 			nl++;
3238 			rcount = 0;
3239 			fidx = 0;
3240 			osize = object->size;
3241 			if (osize > 128)
3242 				osize = 128;
3243 			for (idx = 0; idx < osize; idx++) {
3244 				m = vm_page_lookup(object, idx);
3245 				if (m == NULL) {
3246 					if (rcount) {
3247 						db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3248 							(long)fidx, rcount, (long)pa);
3249 						if ( nl > 18) {
3250 							c = cngetc();
3251 							if (c != ' ')
3252 								return;
3253 							nl = 0;
3254 						}
3255 						nl++;
3256 						rcount = 0;
3257 					}
3258 					continue;
3259 				}
3260 
3261 				if (rcount &&
3262 					(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
3263 					++rcount;
3264 					continue;
3265 				}
3266 				if (rcount) {
3267 					padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
3268 					padiff >>= PAGE_SHIFT;
3269 					padiff &= PQ_L2_MASK;
3270 					if (padiff == 0) {
3271 						pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
3272 						++rcount;
3273 						continue;
3274 					}
3275 					db_printf(" index(%ld)run(%d)pa(0x%lx)",
3276 						(long)fidx, rcount, (long)pa);
3277 					db_printf("pd(%ld)\n", (long)padiff);
3278 					if ( nl > 18) {
3279 						c = cngetc();
3280 						if (c != ' ')
3281 							return;
3282 						nl = 0;
3283 					}
3284 					nl++;
3285 				}
3286 				fidx = idx;
3287 				pa = VM_PAGE_TO_PHYS(m);
3288 				rcount = 1;
3289 			}
3290 			if (rcount) {
3291 				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3292 					(long)fidx, rcount, (long)pa);
3293 				if ( nl > 18) {
3294 					c = cngetc();
3295 					if (c != ' ')
3296 						return;
3297 					nl = 0;
3298 				}
3299 				nl++;
3300 			}
3301 		}
3302 	}
3303 }
3304 #endif /* DDB */
3305