xref: /dragonfly/sys/vm/vm_object.c (revision ad9f8794)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
67  */
68 
69 /*
70  *	Virtual memory object module.
71  */
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>		/* for curproc, pageproc */
76 #include <sys/thread.h>
77 #include <sys/vnode.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/mount.h>
81 #include <sys/kernel.h>
82 #include <sys/sysctl.h>
83 #include <sys/refcount.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
97 
98 #define EASY_SCAN_FACTOR	8
99 
100 static void	vm_object_qcollapse(vm_object_t object);
101 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
102 					     int pagerflags);
103 static void	vm_object_lock_init(vm_object_t);
104 static void	vm_object_hold_wait(vm_object_t);
105 
106 
107 /*
108  *	Virtual memory objects maintain the actual data
109  *	associated with allocated virtual memory.  A given
110  *	page of memory exists within exactly one object.
111  *
112  *	An object is only deallocated when all "references"
113  *	are given up.  Only one "reference" to a given
114  *	region of an object should be writeable.
115  *
116  *	Associated with each object is a list of all resident
117  *	memory pages belonging to that object; this list is
118  *	maintained by the "vm_page" module, and locked by the object's
119  *	lock.
120  *
121  *	Each object also records a "pager" routine which is
122  *	used to retrieve (and store) pages to the proper backing
123  *	storage.  In addition, objects may be backed by other
124  *	objects from which they were virtual-copied.
125  *
126  *	The only items within the object structure which are
127  *	modified after time of creation are:
128  *		reference count		locked by object's lock
129  *		pager routine		locked by object's lock
130  *
131  */
132 
133 struct object_q vm_object_list;		/* locked by vmobj_token */
134 struct vm_object kernel_object;
135 
136 static long vm_object_count;		/* locked by vmobj_token */
137 extern int vm_pageout_page_count;
138 
139 static long object_collapses;
140 static long object_bypasses;
141 static int next_index;
142 static vm_zone_t obj_zone;
143 static struct vm_zone obj_zone_store;
144 #define VM_OBJECTS_INIT 256
145 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
146 
147 /*
148  * Misc low level routines
149  */
150 static void
151 vm_object_lock_init(vm_object_t obj)
152 {
153 #if defined(DEBUG_LOCKS)
154 	int i;
155 
156 	obj->debug_hold_bitmap = 0;
157 	obj->debug_hold_ovfl = 0;
158 	for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
159 		obj->debug_hold_thrs[i] = NULL;
160 		obj->debug_hold_file[i] = NULL;
161 		obj->debug_hold_line[i] = 0;
162 	}
163 #endif
164 }
165 
166 void
167 vm_object_lock_swap(void)
168 {
169 	lwkt_token_swap();
170 }
171 
172 void
173 vm_object_lock(vm_object_t obj)
174 {
175 	lwkt_getpooltoken(obj);
176 }
177 
178 void
179 vm_object_unlock(vm_object_t obj)
180 {
181 	lwkt_relpooltoken(obj);
182 }
183 
184 static __inline void
185 vm_object_assert_held(vm_object_t obj)
186 {
187 	ASSERT_LWKT_TOKEN_HELD(lwkt_token_pool_lookup(obj));
188 }
189 
190 void
191 #ifndef DEBUG_LOCKS
192 vm_object_hold(vm_object_t obj)
193 #else
194 debugvm_object_hold(vm_object_t obj, char *file, int line)
195 #endif
196 {
197 	if (obj == NULL)
198 		return;
199 
200 	/*
201 	 * Object must be held (object allocation is stable due to callers
202 	 * context, typically already holding the token on a parent object)
203 	 * prior to potentially blocking on the lock, otherwise the object
204 	 * can get ripped away from us.
205 	 */
206 	refcount_acquire(&obj->hold_count);
207 	vm_object_lock(obj);
208 
209 #if defined(DEBUG_LOCKS)
210 	int i;
211 
212 	i = ffs(~obj->debug_hold_bitmap) - 1;
213 	if (i == -1) {
214 		kprintf("vm_object hold count > VMOBJ_DEBUG_ARRAY_SIZE");
215 		obj->debug_hold_ovfl = 1;
216 	}
217 
218 	obj->debug_hold_bitmap |= (1 << i);
219 	obj->debug_hold_thrs[i] = curthread;
220 	obj->debug_hold_file[i] = file;
221 	obj->debug_hold_line[i] = line;
222 #endif
223 }
224 
225 void
226 vm_object_drop(vm_object_t obj)
227 {
228 	if (obj == NULL)
229 		return;
230 
231 #if defined(DEBUG_LOCKS)
232 	int found = 0;
233 	int i;
234 
235 	for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
236 		if ((obj->debug_hold_bitmap & (1 << i)) &&
237 		    (obj->debug_hold_thrs[i] == curthread)) {
238 			obj->debug_hold_bitmap &= ~(1 << i);
239 			obj->debug_hold_thrs[i] = NULL;
240 			obj->debug_hold_file[i] = NULL;
241 			obj->debug_hold_line[i] = 0;
242 			found = 1;
243 			break;
244 		}
245 	}
246 
247 	if (found == 0 && obj->debug_hold_ovfl == 0)
248 		panic("vm_object: attempt to drop hold on non-self-held obj");
249 #endif
250 
251 	/*
252 	 * The lock is a pool token, keep holding it across potential
253 	 * wakeups to interlock the tsleep/wakeup.
254 	 */
255 	if (refcount_release(&obj->hold_count))
256 		wakeup(obj);
257 	vm_object_unlock(obj);
258 }
259 
260 /*
261  * This can only be called while the caller holds the object
262  * with the OBJ_DEAD interlock.  Since there are no refs this
263  * is the only thing preventing an object destruction race.
264  */
265 static void
266 vm_object_hold_wait(vm_object_t obj)
267 {
268 	vm_object_lock(obj);
269 
270 #if defined(DEBUG_LOCKS)
271 	int i;
272 
273 	for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
274 		if ((obj->debug_hold_bitmap & (1 << i)) &&
275 		    (obj->debug_hold_thrs[i] == curthread))  {
276 			kprintf("vm_object %p: self-hold in at %s:%d\n", obj,
277 				obj->debug_hold_file[i], obj->debug_hold_line[i]);
278 			panic("vm_object: self-hold in terminate or collapse");
279 		}
280 	}
281 #endif
282 
283 	while (obj->hold_count)
284 		tsleep(obj, 0, "vmobjhld", 0);
285 
286 	vm_object_unlock(obj);
287 }
288 
289 
290 /*
291  * Initialize a freshly allocated object
292  *
293  * Used only by vm_object_allocate() and zinitna().
294  *
295  * No requirements.
296  */
297 void
298 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
299 {
300 	int incr;
301 
302 	RB_INIT(&object->rb_memq);
303 	LIST_INIT(&object->shadow_head);
304 
305 	object->type = type;
306 	object->size = size;
307 	object->ref_count = 1;
308 	object->hold_count = 0;
309 	object->flags = 0;
310 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
311 		vm_object_set_flag(object, OBJ_ONEMAPPING);
312 	object->paging_in_progress = 0;
313 	object->resident_page_count = 0;
314 	object->agg_pv_list_count = 0;
315 	object->shadow_count = 0;
316 	object->pg_color = next_index;
317 	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
318 		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
319 	else
320 		incr = size;
321 	next_index = (next_index + incr) & PQ_L2_MASK;
322 	object->handle = NULL;
323 	object->backing_object = NULL;
324 	object->backing_object_offset = (vm_ooffset_t) 0;
325 
326 	object->generation++;
327 	object->swblock_count = 0;
328 	RB_INIT(&object->swblock_root);
329 	vm_object_lock_init(object);
330 
331 	lwkt_gettoken(&vmobj_token);
332 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
333 	vm_object_count++;
334 	lwkt_reltoken(&vmobj_token);
335 }
336 
337 /*
338  * Initialize the VM objects module.
339  *
340  * Called from the low level boot code only.
341  */
342 void
343 vm_object_init(void)
344 {
345 	TAILQ_INIT(&vm_object_list);
346 
347 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
348 			    &kernel_object);
349 
350 	obj_zone = &obj_zone_store;
351 	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
352 		vm_objects_init, VM_OBJECTS_INIT);
353 }
354 
355 void
356 vm_object_init2(void)
357 {
358 	zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
359 }
360 
361 /*
362  * Allocate and return a new object of the specified type and size.
363  *
364  * No requirements.
365  */
366 vm_object_t
367 vm_object_allocate(objtype_t type, vm_pindex_t size)
368 {
369 	vm_object_t result;
370 
371 	result = (vm_object_t) zalloc(obj_zone);
372 
373 	_vm_object_allocate(type, size, result);
374 
375 	return (result);
376 }
377 
378 /*
379  * Add an additional reference to a vm_object.
380  *
381  * Object passed by caller must be stable or caller must already
382  * hold vmobj_token to avoid races.
383  */
384 void
385 vm_object_reference(vm_object_t object)
386 {
387 	lwkt_gettoken(&vmobj_token);
388 	vm_object_hold(object);
389 	vm_object_reference_locked(object);
390 	vm_object_drop(object);
391 	lwkt_reltoken(&vmobj_token);
392 }
393 
394 void
395 vm_object_reference_locked(vm_object_t object)
396 {
397 	if (object) {
398 		ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
399 		/*NOTYET*/
400 		/*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/
401 		object->ref_count++;
402 		if (object->type == OBJT_VNODE) {
403 			vref(object->handle);
404 			/* XXX what if the vnode is being destroyed? */
405 		}
406 	}
407 }
408 
409 /*
410  * Dereference an object and its underlying vnode.
411  *
412  * The caller must hold vmobj_token.
413  * The object must be locked but not held.  This function will eat the lock.
414  */
415 static void
416 vm_object_vndeallocate(vm_object_t object)
417 {
418 	struct vnode *vp = (struct vnode *) object->handle;
419 
420 	KASSERT(object->type == OBJT_VNODE,
421 	    ("vm_object_vndeallocate: not a vnode object"));
422 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
423 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
424 #ifdef INVARIANTS
425 	if (object->ref_count == 0) {
426 		vprint("vm_object_vndeallocate", vp);
427 		panic("vm_object_vndeallocate: bad object reference count");
428 	}
429 #endif
430 
431 	object->ref_count--;
432 	if (object->ref_count == 0)
433 		vclrflags(vp, VTEXT);
434 	vm_object_unlock(object);
435 	vrele(vp);
436 }
437 
438 /*
439  * Release a reference to the specified object, gained either through a
440  * vm_object_allocate or a vm_object_reference call.  When all references
441  * are gone, storage associated with this object may be relinquished.
442  *
443  * The caller does not have to hold the object locked but must have control
444  * over the reference in question in order to guarantee that the object
445  * does not get ripped out from under us.
446  */
447 void
448 vm_object_deallocate(vm_object_t object)
449 {
450 	lwkt_gettoken(&vmobj_token);
451 	vm_object_deallocate_locked(object);
452 	lwkt_reltoken(&vmobj_token);
453 }
454 
455 void
456 vm_object_deallocate_locked(vm_object_t object)
457 {
458 	vm_object_t temp;
459 
460 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
461 
462 	if (object)
463 		vm_object_lock(object);
464 
465 	while (object != NULL) {
466 		if (object->type == OBJT_VNODE) {
467 			vm_object_vndeallocate(object);
468 			/* vndeallocate ate the lock */
469 			break;
470 		}
471 
472 		if (object->ref_count == 0) {
473 			panic("vm_object_deallocate: object deallocated "
474 			      "too many times: %d", object->type);
475 		}
476 		if (object->ref_count > 2) {
477 			object->ref_count--;
478 			vm_object_unlock(object);
479 			break;
480 		}
481 
482 		/*
483 		 * We currently need the vm_token from this point on, and
484 		 * we must recheck ref_count after acquiring it.
485 		 */
486 		lwkt_gettoken(&vm_token);
487 
488 		if (object->ref_count > 2) {
489 			object->ref_count--;
490 			lwkt_reltoken(&vm_token);
491 			vm_object_unlock(object);
492 			break;
493 		}
494 
495 		/*
496 		 * Here on ref_count of one or two, which are special cases for
497 		 * objects.
498 		 *
499 		 * Nominal ref_count > 1 case if the second ref is not from
500 		 * a shadow.
501 		 */
502 		if (object->ref_count == 2 && object->shadow_count == 0) {
503 			vm_object_set_flag(object, OBJ_ONEMAPPING);
504 			object->ref_count--;
505 			lwkt_reltoken(&vm_token);
506 			vm_object_unlock(object);
507 			break;
508 		}
509 
510 		/*
511 		 * If the second ref is from a shadow we chain along it
512 		 * if object's handle is exhausted.
513 		 */
514 		if (object->ref_count == 2 && object->shadow_count == 1) {
515 			if (object->handle == NULL &&
516 			    (object->type == OBJT_DEFAULT ||
517 			     object->type == OBJT_SWAP)) {
518 				temp = LIST_FIRST(&object->shadow_head);
519 				KASSERT(temp != NULL,
520 					("vm_object_deallocate: ref_count: "
521 					"%d, shadow_count: %d",
522 					object->ref_count,
523 					object->shadow_count));
524 				lwkt_reltoken(&vm_token);
525 				vm_object_lock(temp);
526 
527 				if ((temp->handle == NULL) &&
528 				    (temp->type == OBJT_DEFAULT ||
529 				     temp->type == OBJT_SWAP)) {
530 					/*
531 					 * Special case, must handle ref_count
532 					 * manually to avoid recursion.
533 					 */
534 					temp->ref_count++;
535 					vm_object_lock_swap();
536 
537 					while (
538 						temp->paging_in_progress ||
539 						object->paging_in_progress
540 					) {
541 						vm_object_pip_wait(temp,
542 								   "objde1");
543 						vm_object_pip_wait(object,
544 								   "objde2");
545 					}
546 
547 					if (temp->ref_count == 1) {
548 						object->ref_count--;
549 						temp->ref_count--;
550 						vm_object_unlock(object);
551 						object = temp;
552 						goto doterm;
553 					}
554 
555 					lwkt_gettoken(&vm_token);
556 					vm_object_collapse(temp);
557 					lwkt_reltoken(&vm_token);
558 					object->ref_count--;
559 					vm_object_unlock(object);
560 					object = temp;
561 					continue;
562 				}
563 				vm_object_unlock(temp);
564 			} else {
565 				lwkt_reltoken(&vm_token);
566 			}
567 			object->ref_count--;
568 			vm_object_unlock(object);
569 			break;
570 		}
571 
572 		/*
573 		 * Normal dereferencing path
574 		 */
575 		object->ref_count--;
576 		if (object->ref_count != 0) {
577 			lwkt_reltoken(&vm_token);
578 			vm_object_unlock(object);
579 			break;
580 		}
581 
582 		/*
583 		 * Termination path
584 		 *
585 		 * We may have to loop to resolve races if we block getting
586 		 * temp's lock.  If temp is non NULL we have to swap the
587 		 * lock order so the original object lock as at the top
588 		 * of the lock heap.
589 		 */
590 		lwkt_reltoken(&vm_token);
591 doterm:
592 		while ((temp = object->backing_object) != NULL) {
593 			vm_object_lock(temp);
594 			if (temp == object->backing_object)
595 				break;
596 			vm_object_unlock(temp);
597 		}
598 		if (temp) {
599 			LIST_REMOVE(object, shadow_list);
600 			temp->shadow_count--;
601 			temp->generation++;
602 			object->backing_object = NULL;
603 			vm_object_lock_swap();
604 		}
605 
606 		/*
607 		 * Don't double-terminate, we could be in a termination
608 		 * recursion due to the terminate having to sync data
609 		 * to disk.
610 		 */
611 		if ((object->flags & OBJ_DEAD) == 0) {
612 			vm_object_terminate(object);
613 			/* termination ate the object lock */
614 		} else {
615 			vm_object_unlock(object);
616 		}
617 		object = temp;
618 	}
619 }
620 
621 /*
622  * Destroy the specified object, freeing up related resources.
623  *
624  * The object must have zero references.
625  *
626  * The caller must be holding vmobj_token and properly interlock with
627  * OBJ_DEAD (at the moment).
628  *
629  * The caller must have locked the object only, and not be holding it.
630  * This function will eat the caller's lock on the object.
631  */
632 static int vm_object_terminate_callback(vm_page_t p, void *data);
633 
634 void
635 vm_object_terminate(vm_object_t object)
636 {
637 	/*
638 	 * Make sure no one uses us.  Once we set OBJ_DEAD we should be
639 	 * able to safely block.
640 	 */
641 	KKASSERT((object->flags & OBJ_DEAD) == 0);
642 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
643 	vm_object_set_flag(object, OBJ_DEAD);
644 
645 	/*
646 	 * Wait for the pageout daemon to be done with the object
647 	 */
648 	vm_object_pip_wait(object, "objtrm1");
649 
650 	KASSERT(!object->paging_in_progress,
651 		("vm_object_terminate: pageout in progress"));
652 
653 	/*
654 	 * Clean and free the pages, as appropriate. All references to the
655 	 * object are gone, so we don't need to lock it.
656 	 */
657 	if (object->type == OBJT_VNODE) {
658 		struct vnode *vp;
659 
660 		/*
661 		 * Clean pages and flush buffers.
662 		 */
663 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
664 
665 		vp = (struct vnode *) object->handle;
666 		vinvalbuf(vp, V_SAVE, 0, 0);
667 	}
668 
669 	/*
670 	 * Wait for any I/O to complete, after which there had better not
671 	 * be any references left on the object.
672 	 */
673 	vm_object_pip_wait(object, "objtrm2");
674 
675 	if (object->ref_count != 0) {
676 		panic("vm_object_terminate: object with references, "
677 		      "ref_count=%d", object->ref_count);
678 	}
679 
680 	/*
681 	 * Now free any remaining pages. For internal objects, this also
682 	 * removes them from paging queues. Don't free wired pages, just
683 	 * remove them from the object.
684 	 */
685 	lwkt_gettoken(&vm_token);
686 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
687 				vm_object_terminate_callback, NULL);
688 	lwkt_reltoken(&vm_token);
689 
690 	/*
691 	 * Let the pager know object is dead.
692 	 */
693 	vm_pager_deallocate(object);
694 
695 	/*
696 	 * Wait for the object hold count to hit zero, clean out pages as
697 	 * we go.
698 	 */
699 	lwkt_gettoken(&vm_token);
700 	for (;;) {
701 		vm_object_hold_wait(object);
702 		if (RB_ROOT(&object->rb_memq) == NULL)
703 			break;
704 		kprintf("vm_object_terminate: Warning, object %p "
705 			"still has %d pages\n",
706 			object, object->resident_page_count);
707 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
708 					vm_object_terminate_callback, NULL);
709 	}
710 	lwkt_reltoken(&vm_token);
711 
712 	/*
713 	 * There had better not be any pages left
714 	 */
715 	KKASSERT(object->resident_page_count == 0);
716 
717 	/*
718 	 * Remove the object from the global object list.
719 	 *
720 	 * (we are holding vmobj_token)
721 	 */
722 	TAILQ_REMOVE(&vm_object_list, object, object_list);
723 	vm_object_count--;
724 	vm_object_dead_wakeup(object);
725 	vm_object_unlock(object);
726 
727 	if (object->ref_count != 0) {
728 		panic("vm_object_terminate2: object with references, "
729 		      "ref_count=%d", object->ref_count);
730 	}
731 
732 	/*
733 	 * Free the space for the object.
734 	 */
735 	zfree(obj_zone, object);
736 }
737 
738 /*
739  * The caller must hold vm_token.
740  */
741 static int
742 vm_object_terminate_callback(vm_page_t p, void *data __unused)
743 {
744 	if (p->busy || (p->flags & PG_BUSY))
745 		panic("vm_object_terminate: freeing busy page %p", p);
746 	if (p->wire_count == 0) {
747 		vm_page_busy(p);
748 		vm_page_free(p);
749 		mycpu->gd_cnt.v_pfree++;
750 	} else {
751 		if (p->queue != PQ_NONE)
752 			kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p, p->queue);
753 		vm_page_busy(p);
754 		vm_page_remove(p);
755 		vm_page_wakeup(p);
756 	}
757 	return(0);
758 }
759 
760 /*
761  * The object is dead but still has an object<->pager association.  Sleep
762  * and return.  The caller typically retests the association in a loop.
763  *
764  * Must be called with the vmobj_token held.
765  */
766 void
767 vm_object_dead_sleep(vm_object_t object, const char *wmesg)
768 {
769 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
770 	if (object->handle) {
771 		vm_object_set_flag(object, OBJ_DEADWNT);
772 		tsleep(object, 0, wmesg, 0);
773 		/* object may be invalid after this point */
774 	}
775 }
776 
777 /*
778  * Wakeup anyone waiting for the object<->pager disassociation on
779  * a dead object.
780  *
781  * Must be called with the vmobj_token held.
782  */
783 void
784 vm_object_dead_wakeup(vm_object_t object)
785 {
786 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
787 	if (object->flags & OBJ_DEADWNT) {
788 		vm_object_clear_flag(object, OBJ_DEADWNT);
789 		wakeup(object);
790 	}
791 }
792 
793 /*
794  * Clean all dirty pages in the specified range of object.  Leaves page
795  * on whatever queue it is currently on.   If NOSYNC is set then do not
796  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
797  * leaving the object dirty.
798  *
799  * When stuffing pages asynchronously, allow clustering.  XXX we need a
800  * synchronous clustering mode implementation.
801  *
802  * Odd semantics: if start == end, we clean everything.
803  *
804  * The object must be locked? XXX
805  */
806 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
807 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
808 
809 void
810 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
811 		     int flags)
812 {
813 	struct rb_vm_page_scan_info info;
814 	struct vnode *vp;
815 	int wholescan;
816 	int pagerflags;
817 	int curgeneration;
818 
819 	vm_object_hold(object);
820 	if (object->type != OBJT_VNODE ||
821 	    (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
822 		vm_object_drop(object);
823 		return;
824 	}
825 
826 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
827 			VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
828 	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
829 
830 	vp = object->handle;
831 
832 	/*
833 	 * Interlock other major object operations.  This allows us to
834 	 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
835 	 */
836 	crit_enter();
837 	vm_object_set_flag(object, OBJ_CLEANING);
838 
839 	/*
840 	 * Handle 'entire object' case
841 	 */
842 	info.start_pindex = start;
843 	if (end == 0) {
844 		info.end_pindex = object->size - 1;
845 	} else {
846 		info.end_pindex = end - 1;
847 	}
848 	wholescan = (start == 0 && info.end_pindex == object->size - 1);
849 	info.limit = flags;
850 	info.pagerflags = pagerflags;
851 	info.object = object;
852 
853 	/*
854 	 * If cleaning the entire object do a pass to mark the pages read-only.
855 	 * If everything worked out ok, clear OBJ_WRITEABLE and
856 	 * OBJ_MIGHTBEDIRTY.
857 	 */
858 	if (wholescan) {
859 		info.error = 0;
860 		lwkt_gettoken(&vm_token);
861 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
862 					vm_object_page_clean_pass1, &info);
863 		lwkt_reltoken(&vm_token);
864 		if (info.error == 0) {
865 			vm_object_clear_flag(object,
866 					     OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
867 			if (object->type == OBJT_VNODE &&
868 			    (vp = (struct vnode *)object->handle) != NULL) {
869 				if (vp->v_flag & VOBJDIRTY)
870 					vclrflags(vp, VOBJDIRTY);
871 			}
872 		}
873 	}
874 
875 	/*
876 	 * Do a pass to clean all the dirty pages we find.
877 	 */
878 	do {
879 		info.error = 0;
880 		curgeneration = object->generation;
881 		lwkt_gettoken(&vm_token);
882 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
883 					vm_object_page_clean_pass2, &info);
884 		lwkt_reltoken(&vm_token);
885 	} while (info.error || curgeneration != object->generation);
886 
887 	vm_object_clear_flag(object, OBJ_CLEANING);
888 	crit_exit();
889 	vm_object_drop(object);
890 }
891 
892 /*
893  * The caller must hold vm_token.
894  */
895 static
896 int
897 vm_object_page_clean_pass1(struct vm_page *p, void *data)
898 {
899 	struct rb_vm_page_scan_info *info = data;
900 
901 	vm_page_flag_set(p, PG_CLEANCHK);
902 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
903 		info->error = 1;
904 	else
905 		vm_page_protect(p, VM_PROT_READ);	/* must not block */
906 	return(0);
907 }
908 
909 /*
910  * The caller must hold vm_token.
911  */
912 static
913 int
914 vm_object_page_clean_pass2(struct vm_page *p, void *data)
915 {
916 	struct rb_vm_page_scan_info *info = data;
917 	int n;
918 
919 	/*
920 	 * Do not mess with pages that were inserted after we started
921 	 * the cleaning pass.
922 	 */
923 	if ((p->flags & PG_CLEANCHK) == 0)
924 		return(0);
925 
926 	/*
927 	 * Before wasting time traversing the pmaps, check for trivial
928 	 * cases where the page cannot be dirty.
929 	 */
930 	if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
931 		KKASSERT((p->dirty & p->valid) == 0);
932 		return(0);
933 	}
934 
935 	/*
936 	 * Check whether the page is dirty or not.  The page has been set
937 	 * to be read-only so the check will not race a user dirtying the
938 	 * page.
939 	 */
940 	vm_page_test_dirty(p);
941 	if ((p->dirty & p->valid) == 0) {
942 		vm_page_flag_clear(p, PG_CLEANCHK);
943 		return(0);
944 	}
945 
946 	/*
947 	 * If we have been asked to skip nosync pages and this is a
948 	 * nosync page, skip it.  Note that the object flags were
949 	 * not cleared in this case (because pass1 will have returned an
950 	 * error), so we do not have to set them.
951 	 */
952 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
953 		vm_page_flag_clear(p, PG_CLEANCHK);
954 		return(0);
955 	}
956 
957 	/*
958 	 * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
959 	 * the pages that get successfully flushed.  Set info->error if
960 	 * we raced an object modification.
961 	 */
962 	n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
963 	if (n == 0)
964 		info->error = 1;
965 	return(0);
966 }
967 
968 /*
969  * Collect the specified page and nearby pages and flush them out.
970  * The number of pages flushed is returned.
971  *
972  * The caller must hold vm_token.
973  */
974 static int
975 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
976 {
977 	int runlen;
978 	int maxf;
979 	int chkb;
980 	int maxb;
981 	int i;
982 	int curgeneration;
983 	vm_pindex_t pi;
984 	vm_page_t maf[vm_pageout_page_count];
985 	vm_page_t mab[vm_pageout_page_count];
986 	vm_page_t ma[vm_pageout_page_count];
987 
988 	curgeneration = object->generation;
989 
990 	pi = p->pindex;
991 	while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
992 		if (object->generation != curgeneration) {
993 			return(0);
994 		}
995 	}
996 	KKASSERT(p->object == object && p->pindex == pi);
997 
998 	maxf = 0;
999 	for(i = 1; i < vm_pageout_page_count; i++) {
1000 		vm_page_t tp;
1001 
1002 		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
1003 			if ((tp->flags & PG_BUSY) ||
1004 				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1005 				 (tp->flags & PG_CLEANCHK) == 0) ||
1006 				(tp->busy != 0))
1007 				break;
1008 			if((tp->queue - tp->pc) == PQ_CACHE) {
1009 				vm_page_flag_clear(tp, PG_CLEANCHK);
1010 				break;
1011 			}
1012 			vm_page_test_dirty(tp);
1013 			if ((tp->dirty & tp->valid) == 0) {
1014 				vm_page_flag_clear(tp, PG_CLEANCHK);
1015 				break;
1016 			}
1017 			maf[ i - 1 ] = tp;
1018 			maxf++;
1019 			continue;
1020 		}
1021 		break;
1022 	}
1023 
1024 	maxb = 0;
1025 	chkb = vm_pageout_page_count -  maxf;
1026 	if (chkb) {
1027 		for(i = 1; i < chkb;i++) {
1028 			vm_page_t tp;
1029 
1030 			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
1031 				if ((tp->flags & PG_BUSY) ||
1032 					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1033 					 (tp->flags & PG_CLEANCHK) == 0) ||
1034 					(tp->busy != 0))
1035 					break;
1036 				if((tp->queue - tp->pc) == PQ_CACHE) {
1037 					vm_page_flag_clear(tp, PG_CLEANCHK);
1038 					break;
1039 				}
1040 				vm_page_test_dirty(tp);
1041 				if ((tp->dirty & tp->valid) == 0) {
1042 					vm_page_flag_clear(tp, PG_CLEANCHK);
1043 					break;
1044 				}
1045 				mab[ i - 1 ] = tp;
1046 				maxb++;
1047 				continue;
1048 			}
1049 			break;
1050 		}
1051 	}
1052 
1053 	for(i = 0; i < maxb; i++) {
1054 		int index = (maxb - i) - 1;
1055 		ma[index] = mab[i];
1056 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
1057 	}
1058 	vm_page_flag_clear(p, PG_CLEANCHK);
1059 	ma[maxb] = p;
1060 	for(i = 0; i < maxf; i++) {
1061 		int index = (maxb + i) + 1;
1062 		ma[index] = maf[i];
1063 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
1064 	}
1065 	runlen = maxb + maxf + 1;
1066 
1067 	vm_pageout_flush(ma, runlen, pagerflags);
1068 	for (i = 0; i < runlen; i++) {
1069 		if (ma[i]->valid & ma[i]->dirty) {
1070 			vm_page_protect(ma[i], VM_PROT_READ);
1071 			vm_page_flag_set(ma[i], PG_CLEANCHK);
1072 
1073 			/*
1074 			 * maxf will end up being the actual number of pages
1075 			 * we wrote out contiguously, non-inclusive of the
1076 			 * first page.  We do not count look-behind pages.
1077 			 */
1078 			if (i >= maxb + 1 && (maxf > i - maxb - 1))
1079 				maxf = i - maxb - 1;
1080 		}
1081 	}
1082 	return(maxf + 1);
1083 }
1084 
1085 /*
1086  * Same as vm_object_pmap_copy, except range checking really
1087  * works, and is meant for small sections of an object.
1088  *
1089  * This code protects resident pages by making them read-only
1090  * and is typically called on a fork or split when a page
1091  * is converted to copy-on-write.
1092  *
1093  * NOTE: If the page is already at VM_PROT_NONE, calling
1094  * vm_page_protect will have no effect.
1095  */
1096 void
1097 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1098 {
1099 	vm_pindex_t idx;
1100 	vm_page_t p;
1101 
1102 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
1103 		return;
1104 
1105 	/*
1106 	 * spl protection needed to prevent races between the lookup,
1107 	 * an interrupt unbusy/free, and our protect call.
1108 	 */
1109 	crit_enter();
1110 	lwkt_gettoken(&vm_token);
1111 	for (idx = start; idx < end; idx++) {
1112 		p = vm_page_lookup(object, idx);
1113 		if (p == NULL)
1114 			continue;
1115 		vm_page_protect(p, VM_PROT_READ);
1116 	}
1117 	lwkt_reltoken(&vm_token);
1118 	crit_exit();
1119 }
1120 
1121 /*
1122  * Removes all physical pages in the specified object range from all
1123  * physical maps.
1124  *
1125  * The object must *not* be locked.
1126  */
1127 
1128 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
1129 
1130 void
1131 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1132 {
1133 	struct rb_vm_page_scan_info info;
1134 
1135 	if (object == NULL)
1136 		return;
1137 	info.start_pindex = start;
1138 	info.end_pindex = end - 1;
1139 
1140 	crit_enter();
1141 	lwkt_gettoken(&vm_token);
1142 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1143 				vm_object_pmap_remove_callback, &info);
1144 	if (start == 0 && end == object->size)
1145 		vm_object_clear_flag(object, OBJ_WRITEABLE);
1146 	lwkt_reltoken(&vm_token);
1147 	crit_exit();
1148 }
1149 
1150 /*
1151  * The caller must hold vm_token.
1152  */
1153 static int
1154 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
1155 {
1156 	vm_page_protect(p, VM_PROT_NONE);
1157 	return(0);
1158 }
1159 
1160 /*
1161  * Implements the madvise function at the object/page level.
1162  *
1163  * MADV_WILLNEED	(any object)
1164  *
1165  *	Activate the specified pages if they are resident.
1166  *
1167  * MADV_DONTNEED	(any object)
1168  *
1169  *	Deactivate the specified pages if they are resident.
1170  *
1171  * MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1172  *
1173  *	Deactivate and clean the specified pages if they are
1174  *	resident.  This permits the process to reuse the pages
1175  *	without faulting or the kernel to reclaim the pages
1176  *	without I/O.
1177  *
1178  * No requirements.
1179  */
1180 void
1181 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1182 {
1183 	vm_pindex_t end, tpindex;
1184 	vm_object_t tobject;
1185 	vm_page_t m;
1186 
1187 	if (object == NULL)
1188 		return;
1189 
1190 	end = pindex + count;
1191 
1192 	lwkt_gettoken(&vm_token);
1193 
1194 	/*
1195 	 * Locate and adjust resident pages
1196 	 */
1197 	for (; pindex < end; pindex += 1) {
1198 relookup:
1199 		tobject = object;
1200 		tpindex = pindex;
1201 shadowlookup:
1202 		/*
1203 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1204 		 * and those pages must be OBJ_ONEMAPPING.
1205 		 */
1206 		if (advise == MADV_FREE) {
1207 			if ((tobject->type != OBJT_DEFAULT &&
1208 			     tobject->type != OBJT_SWAP) ||
1209 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1210 				continue;
1211 			}
1212 		}
1213 
1214 		/*
1215 		 * spl protection is required to avoid a race between the
1216 		 * lookup, an interrupt unbusy/free, and our busy check.
1217 		 */
1218 
1219 		crit_enter();
1220 		m = vm_page_lookup(tobject, tpindex);
1221 
1222 		if (m == NULL) {
1223 			/*
1224 			 * There may be swap even if there is no backing page
1225 			 */
1226 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1227 				swap_pager_freespace(tobject, tpindex, 1);
1228 
1229 			/*
1230 			 * next object
1231 			 */
1232 			crit_exit();
1233 			if (tobject->backing_object == NULL)
1234 				continue;
1235 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1236 			tobject = tobject->backing_object;
1237 			goto shadowlookup;
1238 		}
1239 
1240 		/*
1241 		 * If the page is busy or not in a normal active state,
1242 		 * we skip it.  If the page is not managed there are no
1243 		 * page queues to mess with.  Things can break if we mess
1244 		 * with pages in any of the below states.
1245 		 */
1246 		if (
1247 		    m->hold_count ||
1248 		    m->wire_count ||
1249 		    (m->flags & PG_UNMANAGED) ||
1250 		    m->valid != VM_PAGE_BITS_ALL
1251 		) {
1252 			crit_exit();
1253 			continue;
1254 		}
1255 
1256  		if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
1257 			crit_exit();
1258   			goto relookup;
1259 		}
1260 		vm_page_busy(m);
1261 		crit_exit();
1262 
1263 		/*
1264 		 * Theoretically once a page is known not to be busy, an
1265 		 * interrupt cannot come along and rip it out from under us.
1266 		 */
1267 
1268 		if (advise == MADV_WILLNEED) {
1269 			vm_page_activate(m);
1270 		} else if (advise == MADV_DONTNEED) {
1271 			vm_page_dontneed(m);
1272 		} else if (advise == MADV_FREE) {
1273 			/*
1274 			 * Mark the page clean.  This will allow the page
1275 			 * to be freed up by the system.  However, such pages
1276 			 * are often reused quickly by malloc()/free()
1277 			 * so we do not do anything that would cause
1278 			 * a page fault if we can help it.
1279 			 *
1280 			 * Specifically, we do not try to actually free
1281 			 * the page now nor do we try to put it in the
1282 			 * cache (which would cause a page fault on reuse).
1283 			 *
1284 			 * But we do make the page is freeable as we
1285 			 * can without actually taking the step of unmapping
1286 			 * it.
1287 			 */
1288 			pmap_clear_modify(m);
1289 			m->dirty = 0;
1290 			m->act_count = 0;
1291 			vm_page_dontneed(m);
1292 			if (tobject->type == OBJT_SWAP)
1293 				swap_pager_freespace(tobject, tpindex, 1);
1294 		}
1295 		vm_page_wakeup(m);
1296 	}
1297 	lwkt_reltoken(&vm_token);
1298 }
1299 
1300 /*
1301  * Create a new object which is backed by the specified existing object
1302  * range.  The source object reference is deallocated.
1303  *
1304  * The new object and offset into that object are returned in the source
1305  * parameters.
1306  *
1307  * No other requirements.
1308  */
1309 void
1310 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length)
1311 {
1312 	vm_object_t source;
1313 	vm_object_t result;
1314 
1315 	source = *object;
1316 
1317 	/*
1318 	 * Don't create the new object if the old object isn't shared.
1319 	 */
1320 	lwkt_gettoken(&vm_token);
1321 
1322 	if (source != NULL &&
1323 	    source->ref_count == 1 &&
1324 	    source->handle == NULL &&
1325 	    (source->type == OBJT_DEFAULT ||
1326 	     source->type == OBJT_SWAP)) {
1327 		lwkt_reltoken(&vm_token);
1328 		return;
1329 	}
1330 
1331 	/*
1332 	 * Allocate a new object with the given length
1333 	 */
1334 
1335 	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1336 		panic("vm_object_shadow: no object for shadowing");
1337 
1338 	/*
1339 	 * The new object shadows the source object, adding a reference to it.
1340 	 * Our caller changes his reference to point to the new object,
1341 	 * removing a reference to the source object.  Net result: no change
1342 	 * of reference count.
1343 	 *
1344 	 * Try to optimize the result object's page color when shadowing
1345 	 * in order to maintain page coloring consistency in the combined
1346 	 * shadowed object.
1347 	 */
1348 	result->backing_object = source;
1349 	if (source) {
1350 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1351 		source->shadow_count++;
1352 		source->generation++;
1353 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1354 	}
1355 
1356 	/*
1357 	 * Store the offset into the source object, and fix up the offset into
1358 	 * the new object.
1359 	 */
1360 	result->backing_object_offset = *offset;
1361 	lwkt_reltoken(&vm_token);
1362 
1363 	/*
1364 	 * Return the new things
1365 	 */
1366 	*offset = 0;
1367 	*object = result;
1368 }
1369 
1370 #define	OBSC_TEST_ALL_SHADOWED	0x0001
1371 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1372 #define	OBSC_COLLAPSE_WAIT	0x0004
1373 
1374 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1375 
1376 /*
1377  * The caller must hold vm_token.
1378  */
1379 static __inline int
1380 vm_object_backing_scan(vm_object_t object, int op)
1381 {
1382 	struct rb_vm_page_scan_info info;
1383 	vm_object_t backing_object;
1384 
1385 	crit_enter();
1386 
1387 	backing_object = object->backing_object;
1388 	info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1389 
1390 	/*
1391 	 * Initial conditions
1392 	 */
1393 
1394 	if (op & OBSC_TEST_ALL_SHADOWED) {
1395 		/*
1396 		 * We do not want to have to test for the existence of
1397 		 * swap pages in the backing object.  XXX but with the
1398 		 * new swapper this would be pretty easy to do.
1399 		 *
1400 		 * XXX what about anonymous MAP_SHARED memory that hasn't
1401 		 * been ZFOD faulted yet?  If we do not test for this, the
1402 		 * shadow test may succeed! XXX
1403 		 */
1404 		if (backing_object->type != OBJT_DEFAULT) {
1405 			crit_exit();
1406 			return(0);
1407 		}
1408 	}
1409 	if (op & OBSC_COLLAPSE_WAIT) {
1410 		KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1411 		vm_object_set_flag(backing_object, OBJ_DEAD);
1412 	}
1413 
1414 	/*
1415 	 * Our scan.   We have to retry if a negative error code is returned,
1416 	 * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1417 	 * the scan had to be stopped because the parent does not completely
1418 	 * shadow the child.
1419 	 */
1420 	info.object = object;
1421 	info.backing_object = backing_object;
1422 	info.limit = op;
1423 	do {
1424 		info.error = 1;
1425 		vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1426 					vm_object_backing_scan_callback,
1427 					&info);
1428 	} while (info.error < 0);
1429 	crit_exit();
1430 	return(info.error);
1431 }
1432 
1433 /*
1434  * The caller must hold vm_token.
1435  */
1436 static int
1437 vm_object_backing_scan_callback(vm_page_t p, void *data)
1438 {
1439 	struct rb_vm_page_scan_info *info = data;
1440 	vm_object_t backing_object;
1441 	vm_object_t object;
1442 	vm_pindex_t new_pindex;
1443 	vm_pindex_t backing_offset_index;
1444 	int op;
1445 
1446 	new_pindex = p->pindex - info->backing_offset_index;
1447 	op = info->limit;
1448 	object = info->object;
1449 	backing_object = info->backing_object;
1450 	backing_offset_index = info->backing_offset_index;
1451 
1452 	if (op & OBSC_TEST_ALL_SHADOWED) {
1453 		vm_page_t pp;
1454 
1455 		/*
1456 		 * Ignore pages outside the parent object's range
1457 		 * and outside the parent object's mapping of the
1458 		 * backing object.
1459 		 *
1460 		 * note that we do not busy the backing object's
1461 		 * page.
1462 		 */
1463 		if (
1464 		    p->pindex < backing_offset_index ||
1465 		    new_pindex >= object->size
1466 		) {
1467 			return(0);
1468 		}
1469 
1470 		/*
1471 		 * See if the parent has the page or if the parent's
1472 		 * object pager has the page.  If the parent has the
1473 		 * page but the page is not valid, the parent's
1474 		 * object pager must have the page.
1475 		 *
1476 		 * If this fails, the parent does not completely shadow
1477 		 * the object and we might as well give up now.
1478 		 */
1479 
1480 		pp = vm_page_lookup(object, new_pindex);
1481 		if ((pp == NULL || pp->valid == 0) &&
1482 		    !vm_pager_has_page(object, new_pindex)
1483 		) {
1484 			info->error = 0;	/* problemo */
1485 			return(-1);		/* stop the scan */
1486 		}
1487 	}
1488 
1489 	/*
1490 	 * Check for busy page
1491 	 */
1492 
1493 	if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1494 		vm_page_t pp;
1495 
1496 		if (op & OBSC_COLLAPSE_NOWAIT) {
1497 			if (
1498 			    (p->flags & PG_BUSY) ||
1499 			    !p->valid ||
1500 			    p->hold_count ||
1501 			    p->wire_count ||
1502 			    p->busy
1503 			) {
1504 				return(0);
1505 			}
1506 		} else if (op & OBSC_COLLAPSE_WAIT) {
1507 			if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1508 				/*
1509 				 * If we slept, anything could have
1510 				 * happened.   Ask that the scan be restarted.
1511 				 *
1512 				 * Since the object is marked dead, the
1513 				 * backing offset should not have changed.
1514 				 */
1515 				info->error = -1;
1516 				return(-1);
1517 			}
1518 		}
1519 
1520 		/*
1521 		 * Busy the page
1522 		 */
1523 		vm_page_busy(p);
1524 
1525 		KASSERT(
1526 		    p->object == backing_object,
1527 		    ("vm_object_qcollapse(): object mismatch")
1528 		);
1529 
1530 		/*
1531 		 * Destroy any associated swap
1532 		 */
1533 		if (backing_object->type == OBJT_SWAP)
1534 			swap_pager_freespace(backing_object, p->pindex, 1);
1535 
1536 		if (
1537 		    p->pindex < backing_offset_index ||
1538 		    new_pindex >= object->size
1539 		) {
1540 			/*
1541 			 * Page is out of the parent object's range, we
1542 			 * can simply destroy it.
1543 			 */
1544 			vm_page_protect(p, VM_PROT_NONE);
1545 			vm_page_free(p);
1546 			return(0);
1547 		}
1548 
1549 		pp = vm_page_lookup(object, new_pindex);
1550 		if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
1551 			/*
1552 			 * page already exists in parent OR swap exists
1553 			 * for this location in the parent.  Destroy
1554 			 * the original page from the backing object.
1555 			 *
1556 			 * Leave the parent's page alone
1557 			 */
1558 			vm_page_protect(p, VM_PROT_NONE);
1559 			vm_page_free(p);
1560 			return(0);
1561 		}
1562 
1563 		/*
1564 		 * Page does not exist in parent, rename the
1565 		 * page from the backing object to the main object.
1566 		 *
1567 		 * If the page was mapped to a process, it can remain
1568 		 * mapped through the rename.
1569 		 */
1570 		if ((p->queue - p->pc) == PQ_CACHE)
1571 			vm_page_deactivate(p);
1572 
1573 		vm_page_rename(p, object, new_pindex);
1574 		/* page automatically made dirty by rename */
1575 	}
1576 	return(0);
1577 }
1578 
1579 /*
1580  * This version of collapse allows the operation to occur earlier and
1581  * when paging_in_progress is true for an object...  This is not a complete
1582  * operation, but should plug 99.9% of the rest of the leaks.
1583  *
1584  * The caller must hold vm_token and vmobj_token.
1585  * (only called from vm_object_collapse)
1586  */
1587 static void
1588 vm_object_qcollapse(vm_object_t object)
1589 {
1590 	vm_object_t backing_object = object->backing_object;
1591 
1592 	if (backing_object->ref_count != 1)
1593 		return;
1594 
1595 	backing_object->ref_count += 2;
1596 
1597 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1598 
1599 	backing_object->ref_count -= 2;
1600 }
1601 
1602 /*
1603  * Collapse an object with the object backing it.  Pages in the backing
1604  * object are moved into the parent, and the backing object is deallocated.
1605  *
1606  * The caller must hold (object).
1607  */
1608 void
1609 vm_object_collapse(vm_object_t object)
1610 {
1611 	ASSERT_LWKT_TOKEN_HELD(&vm_token);
1612 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1613 	vm_object_assert_held(object);
1614 
1615 	while (TRUE) {
1616 		vm_object_t backing_object;
1617 
1618 		/*
1619 		 * Verify that the conditions are right for collapse:
1620 		 *
1621 		 * The object exists and the backing object exists.
1622 		 */
1623 		if (object == NULL)
1624 			break;
1625 
1626 		if ((backing_object = object->backing_object) == NULL)
1627 			break;
1628 
1629 		vm_object_hold(backing_object);
1630 		if (backing_object != object->backing_object) {
1631 			vm_object_drop(backing_object);
1632 			continue;
1633 		}
1634 
1635 		/*
1636 		 * we check the backing object first, because it is most likely
1637 		 * not collapsable.
1638 		 */
1639 		if (backing_object->handle != NULL ||
1640 		    (backing_object->type != OBJT_DEFAULT &&
1641 		     backing_object->type != OBJT_SWAP) ||
1642 		    (backing_object->flags & OBJ_DEAD) ||
1643 		    object->handle != NULL ||
1644 		    (object->type != OBJT_DEFAULT &&
1645 		     object->type != OBJT_SWAP) ||
1646 		    (object->flags & OBJ_DEAD)) {
1647 			vm_object_drop(backing_object);
1648 			break;
1649 		}
1650 
1651 		if (
1652 		    object->paging_in_progress != 0 ||
1653 		    backing_object->paging_in_progress != 0
1654 		) {
1655 			vm_object_drop(backing_object);
1656 			vm_object_qcollapse(object);
1657 			break;
1658 		}
1659 
1660 		/*
1661 		 * We know that we can either collapse the backing object (if
1662 		 * the parent is the only reference to it) or (perhaps) have
1663 		 * the parent bypass the object if the parent happens to shadow
1664 		 * all the resident pages in the entire backing object.
1665 		 *
1666 		 * This is ignoring pager-backed pages such as swap pages.
1667 		 * vm_object_backing_scan fails the shadowing test in this
1668 		 * case.
1669 		 */
1670 
1671 		if (backing_object->ref_count == 1) {
1672 			/*
1673 			 * If there is exactly one reference to the backing
1674 			 * object, we can collapse it into the parent.
1675 			 */
1676 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1677 
1678 			/*
1679 			 * Move the pager from backing_object to object.
1680 			 */
1681 
1682 			if (backing_object->type == OBJT_SWAP) {
1683 				vm_object_pip_add(backing_object, 1);
1684 
1685 				/*
1686 				 * scrap the paging_offset junk and do a
1687 				 * discrete copy.  This also removes major
1688 				 * assumptions about how the swap-pager
1689 				 * works from where it doesn't belong.  The
1690 				 * new swapper is able to optimize the
1691 				 * destroy-source case.
1692 				 */
1693 
1694 				vm_object_pip_add(object, 1);
1695 				swap_pager_copy(
1696 				    backing_object,
1697 				    object,
1698 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1699 				vm_object_pip_wakeup(object);
1700 
1701 				vm_object_pip_wakeup(backing_object);
1702 			}
1703 			/*
1704 			 * Object now shadows whatever backing_object did.
1705 			 * Note that the reference to
1706 			 * backing_object->backing_object moves from within
1707 			 * backing_object to within object.
1708 			 */
1709 
1710 			LIST_REMOVE(object, shadow_list);
1711 			object->backing_object->shadow_count--;
1712 			object->backing_object->generation++;
1713 			if (backing_object->backing_object) {
1714 				LIST_REMOVE(backing_object, shadow_list);
1715 				backing_object->backing_object->shadow_count--;
1716 				backing_object->backing_object->generation++;
1717 			}
1718 			object->backing_object = backing_object->backing_object;
1719 			if (object->backing_object) {
1720 				LIST_INSERT_HEAD(
1721 				    &object->backing_object->shadow_head,
1722 				    object,
1723 				    shadow_list
1724 				);
1725 				object->backing_object->shadow_count++;
1726 				object->backing_object->generation++;
1727 			}
1728 
1729 			object->backing_object_offset +=
1730 			    backing_object->backing_object_offset;
1731 
1732 			/*
1733 			 * Discard backing_object.
1734 			 *
1735 			 * Since the backing object has no pages, no pager left,
1736 			 * and no object references within it, all that is
1737 			 * necessary is to dispose of it.
1738 			 */
1739 
1740 			KASSERT(backing_object->ref_count == 1,
1741 				("backing_object %p was somehow "
1742 				 "re-referenced during collapse!",
1743 				 backing_object));
1744 			KASSERT(RB_EMPTY(&backing_object->rb_memq),
1745 				("backing_object %p somehow has left "
1746 				 "over pages during collapse!",
1747 				 backing_object));
1748 
1749 			/*
1750 			 * Wait for hold count to hit zero
1751 			 */
1752 			vm_object_drop(backing_object);
1753 			vm_object_hold_wait(backing_object);
1754 
1755 			/* (we are holding vmobj_token) */
1756 			TAILQ_REMOVE(&vm_object_list, backing_object,
1757 				     object_list);
1758 			vm_object_count--;
1759 
1760 			zfree(obj_zone, backing_object);
1761 
1762 			object_collapses++;
1763 		} else {
1764 			vm_object_t new_backing_object;
1765 
1766 			/*
1767 			 * If we do not entirely shadow the backing object,
1768 			 * there is nothing we can do so we give up.
1769 			 */
1770 
1771 			if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1772 				vm_object_drop(backing_object);
1773 				break;
1774 			}
1775 
1776 			/*
1777 			 * Make the parent shadow the next object in the
1778 			 * chain.  Deallocating backing_object will not remove
1779 			 * it, since its reference count is at least 2.
1780 			 */
1781 
1782 			LIST_REMOVE(object, shadow_list);
1783 			backing_object->shadow_count--;
1784 			backing_object->generation++;
1785 
1786 			new_backing_object = backing_object->backing_object;
1787 			if ((object->backing_object = new_backing_object) != NULL) {
1788 				vm_object_reference(new_backing_object);
1789 				LIST_INSERT_HEAD(
1790 				    &new_backing_object->shadow_head,
1791 				    object,
1792 				    shadow_list
1793 				);
1794 				new_backing_object->shadow_count++;
1795 				new_backing_object->generation++;
1796 				object->backing_object_offset +=
1797 					backing_object->backing_object_offset;
1798 			}
1799 
1800 			/*
1801 			 * Drop the reference count on backing_object. Since
1802 			 * its ref_count was at least 2, it will not vanish;
1803 			 * so we don't need to call vm_object_deallocate, but
1804 			 * we do anyway.
1805 			 */
1806 			vm_object_drop(backing_object);
1807 			vm_object_deallocate_locked(backing_object);
1808 			object_bypasses++;
1809 		}
1810 
1811 		/*
1812 		 * Try again with this object's new backing object.
1813 		 */
1814 	}
1815 }
1816 
1817 /*
1818  * Removes all physical pages in the specified object range from the
1819  * object's list of pages.
1820  *
1821  * No requirements.
1822  */
1823 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1824 
1825 void
1826 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1827 		      boolean_t clean_only)
1828 {
1829 	struct rb_vm_page_scan_info info;
1830 	int all;
1831 
1832 	/*
1833 	 * Degenerate cases and assertions
1834 	 */
1835 	lwkt_gettoken(&vm_token);
1836 	if (object == NULL ||
1837 	    (object->resident_page_count == 0 && object->swblock_count == 0)) {
1838 		lwkt_reltoken(&vm_token);
1839 		return;
1840 	}
1841 	KASSERT(object->type != OBJT_PHYS,
1842 		("attempt to remove pages from a physical object"));
1843 
1844 	/*
1845 	 * Indicate that paging is occuring on the object
1846 	 */
1847 	crit_enter();
1848 	vm_object_pip_add(object, 1);
1849 
1850 	/*
1851 	 * Figure out the actual removal range and whether we are removing
1852 	 * the entire contents of the object or not.  If removing the entire
1853 	 * contents, be sure to get all pages, even those that might be
1854 	 * beyond the end of the object.
1855 	 */
1856 	info.start_pindex = start;
1857 	if (end == 0)
1858 		info.end_pindex = (vm_pindex_t)-1;
1859 	else
1860 		info.end_pindex = end - 1;
1861 	info.limit = clean_only;
1862 	all = (start == 0 && info.end_pindex >= object->size - 1);
1863 
1864 	/*
1865 	 * Loop until we are sure we have gotten them all.
1866 	 */
1867 	do {
1868 		info.error = 0;
1869 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1870 					vm_object_page_remove_callback, &info);
1871 	} while (info.error);
1872 
1873 	/*
1874 	 * Remove any related swap if throwing away pages, or for
1875 	 * non-swap objects (the swap is a clean copy in that case).
1876 	 */
1877 	if (object->type != OBJT_SWAP || clean_only == FALSE) {
1878 		if (all)
1879 			swap_pager_freespace_all(object);
1880 		else
1881 			swap_pager_freespace(object, info.start_pindex,
1882 			     info.end_pindex - info.start_pindex + 1);
1883 	}
1884 
1885 	/*
1886 	 * Cleanup
1887 	 */
1888 	vm_object_pip_wakeup(object);
1889 	crit_exit();
1890 	lwkt_reltoken(&vm_token);
1891 }
1892 
1893 /*
1894  * The caller must hold vm_token.
1895  */
1896 static int
1897 vm_object_page_remove_callback(vm_page_t p, void *data)
1898 {
1899 	struct rb_vm_page_scan_info *info = data;
1900 
1901 	/*
1902 	 * Wired pages cannot be destroyed, but they can be invalidated
1903 	 * and we do so if clean_only (limit) is not set.
1904 	 *
1905 	 * WARNING!  The page may be wired due to being part of a buffer
1906 	 *	     cache buffer, and the buffer might be marked B_CACHE.
1907 	 *	     This is fine as part of a truncation but VFSs must be
1908 	 *	     sure to fix the buffer up when re-extending the file.
1909 	 */
1910 	if (p->wire_count != 0) {
1911 		vm_page_protect(p, VM_PROT_NONE);
1912 		if (info->limit == 0)
1913 			p->valid = 0;
1914 		return(0);
1915 	}
1916 
1917 	/*
1918 	 * The busy flags are only cleared at
1919 	 * interrupt -- minimize the spl transitions
1920 	 */
1921 
1922 	if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1923 		info->error = 1;
1924 		return(0);
1925 	}
1926 
1927 	/*
1928 	 * limit is our clean_only flag.  If set and the page is dirty, do
1929 	 * not free it.  If set and the page is being held by someone, do
1930 	 * not free it.
1931 	 */
1932 	if (info->limit && p->valid) {
1933 		vm_page_test_dirty(p);
1934 		if (p->valid & p->dirty)
1935 			return(0);
1936 		if (p->hold_count)
1937 			return(0);
1938 	}
1939 
1940 	/*
1941 	 * Destroy the page
1942 	 */
1943 	vm_page_busy(p);
1944 	vm_page_protect(p, VM_PROT_NONE);
1945 	vm_page_free(p);
1946 	return(0);
1947 }
1948 
1949 /*
1950  * Coalesces two objects backing up adjoining regions of memory into a
1951  * single object.
1952  *
1953  * returns TRUE if objects were combined.
1954  *
1955  * NOTE: Only works at the moment if the second object is NULL -
1956  *	 if it's not, which object do we lock first?
1957  *
1958  * Parameters:
1959  *	prev_object	First object to coalesce
1960  *	prev_offset	Offset into prev_object
1961  *	next_object	Second object into coalesce
1962  *	next_offset	Offset into next_object
1963  *
1964  *	prev_size	Size of reference to prev_object
1965  *	next_size	Size of reference to next_object
1966  *
1967  * The caller must hold vm_token and vmobj_token.
1968  *
1969  * The caller does not need to hold (prev_object) but must have a stable
1970  * pointer to it (typically by holding the vm_map locked).
1971  */
1972 boolean_t
1973 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1974 		   vm_size_t prev_size, vm_size_t next_size)
1975 {
1976 	vm_pindex_t next_pindex;
1977 
1978 	ASSERT_LWKT_TOKEN_HELD(&vm_token);
1979 	ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1980 
1981 	if (prev_object == NULL) {
1982 		return (TRUE);
1983 	}
1984 
1985 	vm_object_hold(prev_object);
1986 
1987 	if (prev_object->type != OBJT_DEFAULT &&
1988 	    prev_object->type != OBJT_SWAP) {
1989 		vm_object_drop(prev_object);
1990 		return (FALSE);
1991 	}
1992 
1993 	/*
1994 	 * Try to collapse the object first
1995 	 */
1996 	vm_object_collapse(prev_object);
1997 
1998 	/*
1999 	 * Can't coalesce if: . more than one reference . paged out . shadows
2000 	 * another object . has a copy elsewhere (any of which mean that the
2001 	 * pages not mapped to prev_entry may be in use anyway)
2002 	 */
2003 
2004 	if (prev_object->backing_object != NULL) {
2005 		vm_object_drop(prev_object);
2006 		return (FALSE);
2007 	}
2008 
2009 	prev_size >>= PAGE_SHIFT;
2010 	next_size >>= PAGE_SHIFT;
2011 	next_pindex = prev_pindex + prev_size;
2012 
2013 	if ((prev_object->ref_count > 1) &&
2014 	    (prev_object->size != next_pindex)) {
2015 		vm_object_drop(prev_object);
2016 		return (FALSE);
2017 	}
2018 
2019 	/*
2020 	 * Remove any pages that may still be in the object from a previous
2021 	 * deallocation.
2022 	 */
2023 	if (next_pindex < prev_object->size) {
2024 		vm_object_page_remove(prev_object,
2025 				      next_pindex,
2026 				      next_pindex + next_size, FALSE);
2027 		if (prev_object->type == OBJT_SWAP)
2028 			swap_pager_freespace(prev_object,
2029 					     next_pindex, next_size);
2030 	}
2031 
2032 	/*
2033 	 * Extend the object if necessary.
2034 	 */
2035 	if (next_pindex + next_size > prev_object->size)
2036 		prev_object->size = next_pindex + next_size;
2037 
2038 	vm_object_drop(prev_object);
2039 	return (TRUE);
2040 }
2041 
2042 /*
2043  * Make the object writable and flag is being possibly dirty.
2044  *
2045  * No requirements.
2046  */
2047 void
2048 vm_object_set_writeable_dirty(vm_object_t object)
2049 {
2050 	struct vnode *vp;
2051 
2052 	lwkt_gettoken(&vm_token);
2053 	vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
2054 	if (object->type == OBJT_VNODE &&
2055 	    (vp = (struct vnode *)object->handle) != NULL) {
2056 		if ((vp->v_flag & VOBJDIRTY) == 0) {
2057 			vsetflags(vp, VOBJDIRTY);
2058 		}
2059 	}
2060 	lwkt_reltoken(&vm_token);
2061 }
2062 
2063 #include "opt_ddb.h"
2064 #ifdef DDB
2065 #include <sys/kernel.h>
2066 
2067 #include <sys/cons.h>
2068 
2069 #include <ddb/ddb.h>
2070 
2071 static int	_vm_object_in_map (vm_map_t map, vm_object_t object,
2072 				       vm_map_entry_t entry);
2073 static int	vm_object_in_map (vm_object_t object);
2074 
2075 /*
2076  * The caller must hold vm_token.
2077  */
2078 static int
2079 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2080 {
2081 	vm_map_t tmpm;
2082 	vm_map_entry_t tmpe;
2083 	vm_object_t obj;
2084 	int entcount;
2085 
2086 	if (map == 0)
2087 		return 0;
2088 	if (entry == 0) {
2089 		tmpe = map->header.next;
2090 		entcount = map->nentries;
2091 		while (entcount-- && (tmpe != &map->header)) {
2092 			if( _vm_object_in_map(map, object, tmpe)) {
2093 				return 1;
2094 			}
2095 			tmpe = tmpe->next;
2096 		}
2097 		return (0);
2098 	}
2099 	switch(entry->maptype) {
2100 	case VM_MAPTYPE_SUBMAP:
2101 		tmpm = entry->object.sub_map;
2102 		tmpe = tmpm->header.next;
2103 		entcount = tmpm->nentries;
2104 		while (entcount-- && tmpe != &tmpm->header) {
2105 			if( _vm_object_in_map(tmpm, object, tmpe)) {
2106 				return 1;
2107 			}
2108 			tmpe = tmpe->next;
2109 		}
2110 		break;
2111 	case VM_MAPTYPE_NORMAL:
2112 	case VM_MAPTYPE_VPAGETABLE:
2113 		obj = entry->object.vm_object;
2114 		while (obj) {
2115 			if (obj == object)
2116 				return 1;
2117 			obj = obj->backing_object;
2118 		}
2119 		break;
2120 	default:
2121 		break;
2122 	}
2123 	return 0;
2124 }
2125 
2126 static int vm_object_in_map_callback(struct proc *p, void *data);
2127 
2128 struct vm_object_in_map_info {
2129 	vm_object_t object;
2130 	int rv;
2131 };
2132 
2133 /*
2134  * Debugging only
2135  */
2136 static int
2137 vm_object_in_map(vm_object_t object)
2138 {
2139 	struct vm_object_in_map_info info;
2140 
2141 	info.rv = 0;
2142 	info.object = object;
2143 
2144 	allproc_scan(vm_object_in_map_callback, &info);
2145 	if (info.rv)
2146 		return 1;
2147 	if( _vm_object_in_map(&kernel_map, object, 0))
2148 		return 1;
2149 	if( _vm_object_in_map(&pager_map, object, 0))
2150 		return 1;
2151 	if( _vm_object_in_map(&buffer_map, object, 0))
2152 		return 1;
2153 	return 0;
2154 }
2155 
2156 /*
2157  * Debugging only
2158  */
2159 static int
2160 vm_object_in_map_callback(struct proc *p, void *data)
2161 {
2162 	struct vm_object_in_map_info *info = data;
2163 
2164 	if (p->p_vmspace) {
2165 		if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
2166 			info->rv = 1;
2167 			return -1;
2168 		}
2169 	}
2170 	return (0);
2171 }
2172 
2173 DB_SHOW_COMMAND(vmochk, vm_object_check)
2174 {
2175 	vm_object_t object;
2176 
2177 	/*
2178 	 * make sure that internal objs are in a map somewhere
2179 	 * and none have zero ref counts.
2180 	 */
2181 	for (object = TAILQ_FIRST(&vm_object_list);
2182 			object != NULL;
2183 			object = TAILQ_NEXT(object, object_list)) {
2184 		if (object->type == OBJT_MARKER)
2185 			continue;
2186 		if (object->handle == NULL &&
2187 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2188 			if (object->ref_count == 0) {
2189 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2190 					(long)object->size);
2191 			}
2192 			if (!vm_object_in_map(object)) {
2193 				db_printf(
2194 			"vmochk: internal obj is not in a map: "
2195 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2196 				    object->ref_count, (u_long)object->size,
2197 				    (u_long)object->size,
2198 				    (void *)object->backing_object);
2199 			}
2200 		}
2201 	}
2202 }
2203 
2204 /*
2205  * Debugging only
2206  */
2207 DB_SHOW_COMMAND(object, vm_object_print_static)
2208 {
2209 	/* XXX convert args. */
2210 	vm_object_t object = (vm_object_t)addr;
2211 	boolean_t full = have_addr;
2212 
2213 	vm_page_t p;
2214 
2215 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2216 #define	count	was_count
2217 
2218 	int count;
2219 
2220 	if (object == NULL)
2221 		return;
2222 
2223 	db_iprintf(
2224 	    "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
2225 	    object, (int)object->type, (u_long)object->size,
2226 	    object->resident_page_count, object->ref_count, object->flags);
2227 	/*
2228 	 * XXX no %qd in kernel.  Truncate object->backing_object_offset.
2229 	 */
2230 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
2231 	    object->shadow_count,
2232 	    object->backing_object ? object->backing_object->ref_count : 0,
2233 	    object->backing_object, (long)object->backing_object_offset);
2234 
2235 	if (!full)
2236 		return;
2237 
2238 	db_indent += 2;
2239 	count = 0;
2240 	RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
2241 		if (count == 0)
2242 			db_iprintf("memory:=");
2243 		else if (count == 6) {
2244 			db_printf("\n");
2245 			db_iprintf(" ...");
2246 			count = 0;
2247 		} else
2248 			db_printf(",");
2249 		count++;
2250 
2251 		db_printf("(off=0x%lx,page=0x%lx)",
2252 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
2253 	}
2254 	if (count != 0)
2255 		db_printf("\n");
2256 	db_indent -= 2;
2257 }
2258 
2259 /* XXX. */
2260 #undef count
2261 
2262 /*
2263  * XXX need this non-static entry for calling from vm_map_print.
2264  *
2265  * Debugging only
2266  */
2267 void
2268 vm_object_print(/* db_expr_t */ long addr,
2269 		boolean_t have_addr,
2270 		/* db_expr_t */ long count,
2271 		char *modif)
2272 {
2273 	vm_object_print_static(addr, have_addr, count, modif);
2274 }
2275 
2276 /*
2277  * Debugging only
2278  */
2279 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2280 {
2281 	vm_object_t object;
2282 	int nl = 0;
2283 	int c;
2284 	for (object = TAILQ_FIRST(&vm_object_list);
2285 			object != NULL;
2286 			object = TAILQ_NEXT(object, object_list)) {
2287 		vm_pindex_t idx, fidx;
2288 		vm_pindex_t osize;
2289 		vm_paddr_t pa = -1, padiff;
2290 		int rcount;
2291 		vm_page_t m;
2292 
2293 		if (object->type == OBJT_MARKER)
2294 			continue;
2295 		db_printf("new object: %p\n", (void *)object);
2296 		if ( nl > 18) {
2297 			c = cngetc();
2298 			if (c != ' ')
2299 				return;
2300 			nl = 0;
2301 		}
2302 		nl++;
2303 		rcount = 0;
2304 		fidx = 0;
2305 		osize = object->size;
2306 		if (osize > 128)
2307 			osize = 128;
2308 		for (idx = 0; idx < osize; idx++) {
2309 			m = vm_page_lookup(object, idx);
2310 			if (m == NULL) {
2311 				if (rcount) {
2312 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2313 						(long)fidx, rcount, (long)pa);
2314 					if ( nl > 18) {
2315 						c = cngetc();
2316 						if (c != ' ')
2317 							return;
2318 						nl = 0;
2319 					}
2320 					nl++;
2321 					rcount = 0;
2322 				}
2323 				continue;
2324 			}
2325 
2326 
2327 			if (rcount &&
2328 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2329 				++rcount;
2330 				continue;
2331 			}
2332 			if (rcount) {
2333 				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2334 				padiff >>= PAGE_SHIFT;
2335 				padiff &= PQ_L2_MASK;
2336 				if (padiff == 0) {
2337 					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2338 					++rcount;
2339 					continue;
2340 				}
2341 				db_printf(" index(%ld)run(%d)pa(0x%lx)",
2342 					(long)fidx, rcount, (long)pa);
2343 				db_printf("pd(%ld)\n", (long)padiff);
2344 				if ( nl > 18) {
2345 					c = cngetc();
2346 					if (c != ' ')
2347 						return;
2348 					nl = 0;
2349 				}
2350 				nl++;
2351 			}
2352 			fidx = idx;
2353 			pa = VM_PAGE_TO_PHYS(m);
2354 			rcount = 1;
2355 		}
2356 		if (rcount) {
2357 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2358 				(long)fidx, rcount, (long)pa);
2359 			if ( nl > 18) {
2360 				c = cngetc();
2361 				if (c != ' ')
2362 					return;
2363 				nl = 0;
2364 			}
2365 			nl++;
2366 		}
2367 	}
2368 }
2369 #endif /* DDB */
2370