xref: /dragonfly/sys/vm/vm_object.c (revision 23265324)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
65  * $DragonFly: src/sys/vm/vm_object.c,v 1.29 2006/12/28 21:24:02 dillon Exp $
66  */
67 
68 /*
69  *	Virtual memory object module.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/vnode.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_zone.h>
94 
95 #define EASY_SCAN_FACTOR	8
96 
97 static void	vm_object_qcollapse(vm_object_t object);
98 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
99 					     int pagerflags);
100 
101 /*
102  *	Virtual memory objects maintain the actual data
103  *	associated with allocated virtual memory.  A given
104  *	page of memory exists within exactly one object.
105  *
106  *	An object is only deallocated when all "references"
107  *	are given up.  Only one "reference" to a given
108  *	region of an object should be writeable.
109  *
110  *	Associated with each object is a list of all resident
111  *	memory pages belonging to that object; this list is
112  *	maintained by the "vm_page" module, and locked by the object's
113  *	lock.
114  *
115  *	Each object also records a "pager" routine which is
116  *	used to retrieve (and store) pages to the proper backing
117  *	storage.  In addition, objects may be backed by other
118  *	objects from which they were virtual-copied.
119  *
120  *	The only items within the object structure which are
121  *	modified after time of creation are:
122  *		reference count		locked by object's lock
123  *		pager routine		locked by object's lock
124  *
125  */
126 
127 struct object_q vm_object_list;
128 struct vm_object kernel_object;
129 
130 static long vm_object_count;		/* count of all objects */
131 extern int vm_pageout_page_count;
132 
133 static long object_collapses;
134 static long object_bypasses;
135 static int next_index;
136 static vm_zone_t obj_zone;
137 static struct vm_zone obj_zone_store;
138 static int object_hash_rand;
139 #define VM_OBJECTS_INIT 256
140 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
141 
142 void
143 _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object)
144 {
145 	int incr;
146 	RB_INIT(&object->rb_memq);
147 	LIST_INIT(&object->shadow_head);
148 
149 	object->type = type;
150 	object->size = size;
151 	object->ref_count = 1;
152 	object->flags = 0;
153 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
154 		vm_object_set_flag(object, OBJ_ONEMAPPING);
155 	object->paging_in_progress = 0;
156 	object->resident_page_count = 0;
157 	object->shadow_count = 0;
158 	object->pg_color = next_index;
159 	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
160 		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
161 	else
162 		incr = size;
163 	next_index = (next_index + incr) & PQ_L2_MASK;
164 	object->handle = NULL;
165 	object->backing_object = NULL;
166 	object->backing_object_offset = (vm_ooffset_t) 0;
167 	/*
168 	 * Try to generate a number that will spread objects out in the
169 	 * hash table.  We 'wipe' new objects across the hash in 128 page
170 	 * increments plus 1 more to offset it a little more by the time
171 	 * it wraps around.
172 	 */
173 	object->hash_rand = object_hash_rand - 129;
174 
175 	object->generation++;
176 
177 	crit_enter();
178 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
179 	vm_object_count++;
180 	object_hash_rand = object->hash_rand;
181 	crit_exit();
182 }
183 
184 /*
185  *	vm_object_init:
186  *
187  *	Initialize the VM objects module.
188  */
189 void
190 vm_object_init(void)
191 {
192 	TAILQ_INIT(&vm_object_list);
193 
194 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
195 			    &kernel_object);
196 
197 	obj_zone = &obj_zone_store;
198 	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
199 		vm_objects_init, VM_OBJECTS_INIT);
200 }
201 
202 void
203 vm_object_init2(void)
204 {
205 	zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
206 }
207 
208 /*
209  *	vm_object_allocate:
210  *
211  *	Returns a new object with the given size.
212  */
213 
214 vm_object_t
215 vm_object_allocate(objtype_t type, vm_size_t size)
216 {
217 	vm_object_t result;
218 
219 	result = (vm_object_t) zalloc(obj_zone);
220 
221 	_vm_object_allocate(type, size, result);
222 
223 	return (result);
224 }
225 
226 
227 /*
228  *	vm_object_reference:
229  *
230  *	Gets another reference to the given object.
231  */
232 void
233 vm_object_reference(vm_object_t object)
234 {
235 	if (object == NULL)
236 		return;
237 
238 	object->ref_count++;
239 	if (object->type == OBJT_VNODE) {
240 		vref(object->handle);
241 		/* XXX what if the vnode is being destroyed? */
242 	}
243 }
244 
245 static void
246 vm_object_vndeallocate(vm_object_t object)
247 {
248 	struct vnode *vp = (struct vnode *) object->handle;
249 
250 	KASSERT(object->type == OBJT_VNODE,
251 	    ("vm_object_vndeallocate: not a vnode object"));
252 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
253 #ifdef INVARIANTS
254 	if (object->ref_count == 0) {
255 		vprint("vm_object_vndeallocate", vp);
256 		panic("vm_object_vndeallocate: bad object reference count");
257 	}
258 #endif
259 
260 	object->ref_count--;
261 	if (object->ref_count == 0)
262 		vp->v_flag &= ~VTEXT;
263 	vrele(vp);
264 }
265 
266 /*
267  *	vm_object_deallocate:
268  *
269  *	Release a reference to the specified object,
270  *	gained either through a vm_object_allocate
271  *	or a vm_object_reference call.  When all references
272  *	are gone, storage associated with this object
273  *	may be relinquished.
274  *
275  *	No object may be locked.
276  */
277 void
278 vm_object_deallocate(vm_object_t object)
279 {
280 	vm_object_t temp;
281 
282 	while (object != NULL) {
283 		if (object->type == OBJT_VNODE) {
284 			vm_object_vndeallocate(object);
285 			return;
286 		}
287 
288 		if (object->ref_count == 0) {
289 			panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
290 		} else if (object->ref_count > 2) {
291 			object->ref_count--;
292 			return;
293 		}
294 
295 		/*
296 		 * Here on ref_count of one or two, which are special cases for
297 		 * objects.
298 		 */
299 		if ((object->ref_count == 2) && (object->shadow_count == 0)) {
300 			vm_object_set_flag(object, OBJ_ONEMAPPING);
301 			object->ref_count--;
302 			return;
303 		} else if ((object->ref_count == 2) && (object->shadow_count == 1)) {
304 			object->ref_count--;
305 			if ((object->handle == NULL) &&
306 			    (object->type == OBJT_DEFAULT ||
307 			     object->type == OBJT_SWAP)) {
308 				vm_object_t robject;
309 
310 				robject = LIST_FIRST(&object->shadow_head);
311 				KASSERT(robject != NULL,
312 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
313 					 object->ref_count,
314 					 object->shadow_count));
315 				if ((robject->handle == NULL) &&
316 				    (robject->type == OBJT_DEFAULT ||
317 				     robject->type == OBJT_SWAP)) {
318 
319 					robject->ref_count++;
320 
321 					while (
322 						robject->paging_in_progress ||
323 						object->paging_in_progress
324 					) {
325 						vm_object_pip_sleep(robject, "objde1");
326 						vm_object_pip_sleep(object, "objde2");
327 					}
328 
329 					if (robject->ref_count == 1) {
330 						robject->ref_count--;
331 						object = robject;
332 						goto doterm;
333 					}
334 
335 					object = robject;
336 					vm_object_collapse(object);
337 					continue;
338 				}
339 			}
340 
341 			return;
342 
343 		} else {
344 			object->ref_count--;
345 			if (object->ref_count != 0)
346 				return;
347 		}
348 
349 doterm:
350 
351 		temp = object->backing_object;
352 		if (temp) {
353 			LIST_REMOVE(object, shadow_list);
354 			temp->shadow_count--;
355 			temp->generation++;
356 			object->backing_object = NULL;
357 		}
358 
359 		/*
360 		 * Don't double-terminate, we could be in a termination
361 		 * recursion due to the terminate having to sync data
362 		 * to disk.
363 		 */
364 		if ((object->flags & OBJ_DEAD) == 0)
365 			vm_object_terminate(object);
366 		object = temp;
367 	}
368 }
369 
370 /*
371  *	vm_object_terminate actually destroys the specified object, freeing
372  *	up all previously used resources.
373  *
374  *	The object must be locked.
375  *	This routine may block.
376  */
377 static int vm_object_terminate_callback(vm_page_t p, void *data);
378 
379 void
380 vm_object_terminate(vm_object_t object)
381 {
382 	/*
383 	 * Make sure no one uses us.
384 	 */
385 	vm_object_set_flag(object, OBJ_DEAD);
386 
387 	/*
388 	 * wait for the pageout daemon to be done with the object
389 	 */
390 	vm_object_pip_wait(object, "objtrm");
391 
392 	KASSERT(!object->paging_in_progress,
393 		("vm_object_terminate: pageout in progress"));
394 
395 	/*
396 	 * Clean and free the pages, as appropriate. All references to the
397 	 * object are gone, so we don't need to lock it.
398 	 */
399 	if (object->type == OBJT_VNODE) {
400 		struct vnode *vp;
401 
402 		/*
403 		 * Clean pages and flush buffers.
404 		 */
405 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
406 
407 		vp = (struct vnode *) object->handle;
408 		vinvalbuf(vp, V_SAVE, 0, 0);
409 	}
410 
411 	/*
412 	 * Wait for any I/O to complete, after which there had better not
413 	 * be any references left on the object.
414 	 */
415 	vm_object_pip_wait(object, "objtrm");
416 
417 	if (object->ref_count != 0)
418 		panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count);
419 
420 	/*
421 	 * Now free any remaining pages. For internal objects, this also
422 	 * removes them from paging queues. Don't free wired pages, just
423 	 * remove them from the object.
424 	 */
425 	crit_enter();
426 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
427 				vm_object_terminate_callback, NULL);
428 	crit_exit();
429 
430 	/*
431 	 * Let the pager know object is dead.
432 	 */
433 	vm_pager_deallocate(object);
434 
435 	/*
436 	 * Remove the object from the global object list.
437 	 */
438 	crit_enter();
439 	TAILQ_REMOVE(&vm_object_list, object, object_list);
440 	vm_object_count--;
441 	crit_exit();
442 
443 	wakeup(object);
444 	if (object->ref_count != 0)
445 		panic("vm_object_terminate2: object with references, ref_count=%d", object->ref_count);
446 
447 	/*
448 	 * Free the space for the object.
449 	 */
450 	zfree(obj_zone, object);
451 }
452 
453 static int
454 vm_object_terminate_callback(vm_page_t p, void *data __unused)
455 {
456 	if (p->busy || (p->flags & PG_BUSY))
457 		panic("vm_object_terminate: freeing busy page %p", p);
458 	if (p->wire_count == 0) {
459 		vm_page_busy(p);
460 		vm_page_free(p);
461 		mycpu->gd_cnt.v_pfree++;
462 	} else {
463 		vm_page_busy(p);
464 		vm_page_remove(p);
465 		vm_page_wakeup(p);
466 	}
467 	return(0);
468 }
469 
470 /*
471  *	vm_object_page_clean
472  *
473  *	Clean all dirty pages in the specified range of object.  Leaves page
474  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
475  *	write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
476  *	leaving the object dirty.
477  *
478  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
479  *	synchronous clustering mode implementation.
480  *
481  *	Odd semantics: if start == end, we clean everything.
482  */
483 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
484 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
485 
486 void
487 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
488 		     int flags)
489 {
490 	struct rb_vm_page_scan_info info;
491 	struct vnode *vp;
492 	int wholescan;
493 	int pagerflags;
494 	int curgeneration;
495 
496 	if (object->type != OBJT_VNODE ||
497 		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
498 		return;
499 
500 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
501 			VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
502 	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
503 
504 	vp = object->handle;
505 
506 	/*
507 	 * Interlock other major object operations.  This allows us to
508 	 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
509 	 */
510 	crit_enter();
511 	vm_object_set_flag(object, OBJ_CLEANING);
512 
513 	/*
514 	 * Handle 'entire object' case
515 	 */
516 	info.start_pindex = start;
517 	if (end == 0) {
518 		info.end_pindex = object->size - 1;
519 	} else {
520 		info.end_pindex = end - 1;
521 	}
522 	wholescan = (start == 0 && info.end_pindex == object->size - 1);
523 	info.limit = flags;
524 	info.pagerflags = pagerflags;
525 	info.object = object;
526 
527 	/*
528 	 * If cleaning the entire object do a pass to mark the pages read-only.
529 	 * If everything worked out ok, clear OBJ_WRITEABLE and
530 	 * OBJ_MIGHTBEDIRTY.
531 	 */
532 	if (wholescan) {
533 		info.error = 0;
534 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
535 					vm_object_page_clean_pass1, &info);
536 		if (info.error == 0) {
537 			vm_object_clear_flag(object,
538 					     OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
539 			if (object->type == OBJT_VNODE &&
540 			    (vp = (struct vnode *)object->handle) != NULL) {
541 				if (vp->v_flag & VOBJDIRTY)
542 					vclrflags(vp, VOBJDIRTY);
543 			}
544 		}
545 	}
546 
547 	/*
548 	 * Do a pass to clean all the dirty pages we find.
549 	 */
550 	do {
551 		info.error = 0;
552 		curgeneration = object->generation;
553 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
554 					vm_object_page_clean_pass2, &info);
555 	} while (info.error || curgeneration != object->generation);
556 
557 	vm_object_clear_flag(object, OBJ_CLEANING);
558 	crit_exit();
559 }
560 
561 static
562 int
563 vm_object_page_clean_pass1(struct vm_page *p, void *data)
564 {
565 	struct rb_vm_page_scan_info *info = data;
566 
567 	vm_page_flag_set(p, PG_CLEANCHK);
568 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
569 		info->error = 1;
570 	else
571 		vm_page_protect(p, VM_PROT_READ);
572 	return(0);
573 }
574 
575 static
576 int
577 vm_object_page_clean_pass2(struct vm_page *p, void *data)
578 {
579 	struct rb_vm_page_scan_info *info = data;
580 	int n;
581 
582 	/*
583 	 * Do not mess with pages that were inserted after we started
584 	 * the cleaning pass.
585 	 */
586 	if ((p->flags & PG_CLEANCHK) == 0)
587 		return(0);
588 
589 	/*
590 	 * Before wasting time traversing the pmaps, check for trivial
591 	 * cases where the page cannot be dirty.
592 	 */
593 	if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
594 		KKASSERT((p->dirty & p->valid) == 0);
595 		return(0);
596 	}
597 
598 	/*
599 	 * Check whether the page is dirty or not.  The page has been set
600 	 * to be read-only so the check will not race a user dirtying the
601 	 * page.
602 	 */
603 	vm_page_test_dirty(p);
604 	if ((p->dirty & p->valid) == 0) {
605 		vm_page_flag_clear(p, PG_CLEANCHK);
606 		return(0);
607 	}
608 
609 	/*
610 	 * If we have been asked to skip nosync pages and this is a
611 	 * nosync page, skip it.  Note that the object flags were
612 	 * not cleared in this case (because pass1 will have returned an
613 	 * error), so we do not have to set them.
614 	 */
615 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
616 		vm_page_flag_clear(p, PG_CLEANCHK);
617 		return(0);
618 	}
619 
620 	/*
621 	 * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
622 	 * the pages that get successfully flushed.  Set info->error if
623 	 * we raced an object modification.
624 	 */
625 	n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
626 	if (n == 0)
627 		info->error = 1;
628 	return(0);
629 }
630 
631 /*
632  * This routine must be called within a critical section to properly avoid
633  * an interrupt unbusy/free race that can occur prior to the busy check.
634  *
635  * Using the object generation number here to detect page ripout is not
636  * the best idea in the world. XXX
637  *
638  * NOTE: we operate under the assumption that a page found to not be busy
639  * will not be ripped out from under us by an interrupt.  XXX we should
640  * recode this to explicitly busy the pages.
641  */
642 static int
643 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
644 {
645 	int runlen;
646 	int maxf;
647 	int chkb;
648 	int maxb;
649 	int i;
650 	int curgeneration;
651 	vm_pindex_t pi;
652 	vm_page_t maf[vm_pageout_page_count];
653 	vm_page_t mab[vm_pageout_page_count];
654 	vm_page_t ma[vm_pageout_page_count];
655 
656 	curgeneration = object->generation;
657 
658 	pi = p->pindex;
659 	while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
660 		if (object->generation != curgeneration) {
661 			return(0);
662 		}
663 	}
664 	KKASSERT(p->object == object && p->pindex == pi);
665 
666 	maxf = 0;
667 	for(i = 1; i < vm_pageout_page_count; i++) {
668 		vm_page_t tp;
669 
670 		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
671 			if ((tp->flags & PG_BUSY) ||
672 				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
673 				 (tp->flags & PG_CLEANCHK) == 0) ||
674 				(tp->busy != 0))
675 				break;
676 			if((tp->queue - tp->pc) == PQ_CACHE) {
677 				vm_page_flag_clear(tp, PG_CLEANCHK);
678 				break;
679 			}
680 			vm_page_test_dirty(tp);
681 			if ((tp->dirty & tp->valid) == 0) {
682 				vm_page_flag_clear(tp, PG_CLEANCHK);
683 				break;
684 			}
685 			maf[ i - 1 ] = tp;
686 			maxf++;
687 			continue;
688 		}
689 		break;
690 	}
691 
692 	maxb = 0;
693 	chkb = vm_pageout_page_count -  maxf;
694 	if (chkb) {
695 		for(i = 1; i < chkb;i++) {
696 			vm_page_t tp;
697 
698 			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
699 				if ((tp->flags & PG_BUSY) ||
700 					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
701 					 (tp->flags & PG_CLEANCHK) == 0) ||
702 					(tp->busy != 0))
703 					break;
704 				if((tp->queue - tp->pc) == PQ_CACHE) {
705 					vm_page_flag_clear(tp, PG_CLEANCHK);
706 					break;
707 				}
708 				vm_page_test_dirty(tp);
709 				if ((tp->dirty & tp->valid) == 0) {
710 					vm_page_flag_clear(tp, PG_CLEANCHK);
711 					break;
712 				}
713 				mab[ i - 1 ] = tp;
714 				maxb++;
715 				continue;
716 			}
717 			break;
718 		}
719 	}
720 
721 	for(i = 0; i < maxb; i++) {
722 		int index = (maxb - i) - 1;
723 		ma[index] = mab[i];
724 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
725 	}
726 	vm_page_flag_clear(p, PG_CLEANCHK);
727 	ma[maxb] = p;
728 	for(i = 0; i < maxf; i++) {
729 		int index = (maxb + i) + 1;
730 		ma[index] = maf[i];
731 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
732 	}
733 	runlen = maxb + maxf + 1;
734 
735 	vm_pageout_flush(ma, runlen, pagerflags);
736 	for (i = 0; i < runlen; i++) {
737 		if (ma[i]->valid & ma[i]->dirty) {
738 			vm_page_protect(ma[i], VM_PROT_READ);
739 			vm_page_flag_set(ma[i], PG_CLEANCHK);
740 
741 			/*
742 			 * maxf will end up being the actual number of pages
743 			 * we wrote out contiguously, non-inclusive of the
744 			 * first page.  We do not count look-behind pages.
745 			 */
746 			if (i >= maxb + 1 && (maxf > i - maxb - 1))
747 				maxf = i - maxb - 1;
748 		}
749 	}
750 	return(maxf + 1);
751 }
752 
753 #ifdef not_used
754 /* XXX I cannot tell if this should be an exported symbol */
755 /*
756  *	vm_object_deactivate_pages
757  *
758  *	Deactivate all pages in the specified object.  (Keep its pages
759  *	in memory even though it is no longer referenced.)
760  *
761  *	The object must be locked.
762  */
763 static int vm_object_deactivate_pages_callback(vm_page_t p, void *data);
764 
765 static void
766 vm_object_deactivate_pages(vm_object_t object)
767 {
768 	crit_enter();
769 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
770 				vm_object_deactivate_pages_callback, NULL);
771 	crit_exit();
772 }
773 
774 static int
775 vm_object_deactivate_pages_callback(vm_page_t p, void *data __unused)
776 {
777 	vm_page_deactivate(p);
778 	return(0);
779 }
780 
781 #endif
782 
783 /*
784  * Same as vm_object_pmap_copy, except range checking really
785  * works, and is meant for small sections of an object.
786  *
787  * This code protects resident pages by making them read-only
788  * and is typically called on a fork or split when a page
789  * is converted to copy-on-write.
790  *
791  * NOTE: If the page is already at VM_PROT_NONE, calling
792  * vm_page_protect will have no effect.
793  */
794 void
795 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
796 {
797 	vm_pindex_t idx;
798 	vm_page_t p;
799 
800 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
801 		return;
802 
803 	/*
804 	 * spl protection needed to prevent races between the lookup,
805 	 * an interrupt unbusy/free, and our protect call.
806 	 */
807 	crit_enter();
808 	for (idx = start; idx < end; idx++) {
809 		p = vm_page_lookup(object, idx);
810 		if (p == NULL)
811 			continue;
812 		vm_page_protect(p, VM_PROT_READ);
813 	}
814 	crit_exit();
815 }
816 
817 /*
818  *	vm_object_pmap_remove:
819  *
820  *	Removes all physical pages in the specified
821  *	object range from all physical maps.
822  *
823  *	The object must *not* be locked.
824  */
825 
826 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
827 
828 void
829 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
830 {
831 	struct rb_vm_page_scan_info info;
832 
833 	if (object == NULL)
834 		return;
835 	info.start_pindex = start;
836 	info.end_pindex = end - 1;
837 	crit_enter();
838 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
839 				vm_object_pmap_remove_callback, &info);
840 	if (start == 0 && end == object->size)
841 		vm_object_clear_flag(object, OBJ_WRITEABLE);
842 	crit_exit();
843 }
844 
845 static int
846 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
847 {
848 	vm_page_protect(p, VM_PROT_NONE);
849 	return(0);
850 }
851 
852 /*
853  *	vm_object_madvise:
854  *
855  *	Implements the madvise function at the object/page level.
856  *
857  *	MADV_WILLNEED	(any object)
858  *
859  *	    Activate the specified pages if they are resident.
860  *
861  *	MADV_DONTNEED	(any object)
862  *
863  *	    Deactivate the specified pages if they are resident.
864  *
865  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
866  *			 OBJ_ONEMAPPING only)
867  *
868  *	    Deactivate and clean the specified pages if they are
869  *	    resident.  This permits the process to reuse the pages
870  *	    without faulting or the kernel to reclaim the pages
871  *	    without I/O.
872  */
873 void
874 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
875 {
876 	vm_pindex_t end, tpindex;
877 	vm_object_t tobject;
878 	vm_page_t m;
879 
880 	if (object == NULL)
881 		return;
882 
883 	end = pindex + count;
884 
885 	/*
886 	 * Locate and adjust resident pages
887 	 */
888 
889 	for (; pindex < end; pindex += 1) {
890 relookup:
891 		tobject = object;
892 		tpindex = pindex;
893 shadowlookup:
894 		/*
895 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
896 		 * and those pages must be OBJ_ONEMAPPING.
897 		 */
898 		if (advise == MADV_FREE) {
899 			if ((tobject->type != OBJT_DEFAULT &&
900 			     tobject->type != OBJT_SWAP) ||
901 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
902 				continue;
903 			}
904 		}
905 
906 		/*
907 		 * spl protection is required to avoid a race between the
908 		 * lookup, an interrupt unbusy/free, and our busy check.
909 		 */
910 
911 		crit_enter();
912 		m = vm_page_lookup(tobject, tpindex);
913 
914 		if (m == NULL) {
915 			/*
916 			 * There may be swap even if there is no backing page
917 			 */
918 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
919 				swap_pager_freespace(tobject, tpindex, 1);
920 
921 			/*
922 			 * next object
923 			 */
924 			crit_exit();
925 			if (tobject->backing_object == NULL)
926 				continue;
927 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
928 			tobject = tobject->backing_object;
929 			goto shadowlookup;
930 		}
931 
932 		/*
933 		 * If the page is busy or not in a normal active state,
934 		 * we skip it.  If the page is not managed there are no
935 		 * page queues to mess with.  Things can break if we mess
936 		 * with pages in any of the below states.
937 		 */
938 		if (
939 		    m->hold_count ||
940 		    m->wire_count ||
941 		    (m->flags & PG_UNMANAGED) ||
942 		    m->valid != VM_PAGE_BITS_ALL
943 		) {
944 			crit_exit();
945 			continue;
946 		}
947 
948  		if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
949 			crit_exit();
950   			goto relookup;
951 		}
952 		crit_exit();
953 
954 		/*
955 		 * Theoretically once a page is known not to be busy, an
956 		 * interrupt cannot come along and rip it out from under us.
957 		 */
958 
959 		if (advise == MADV_WILLNEED) {
960 			vm_page_activate(m);
961 		} else if (advise == MADV_DONTNEED) {
962 			vm_page_dontneed(m);
963 		} else if (advise == MADV_FREE) {
964 			/*
965 			 * Mark the page clean.  This will allow the page
966 			 * to be freed up by the system.  However, such pages
967 			 * are often reused quickly by malloc()/free()
968 			 * so we do not do anything that would cause
969 			 * a page fault if we can help it.
970 			 *
971 			 * Specifically, we do not try to actually free
972 			 * the page now nor do we try to put it in the
973 			 * cache (which would cause a page fault on reuse).
974 			 *
975 			 * But we do make the page is freeable as we
976 			 * can without actually taking the step of unmapping
977 			 * it.
978 			 */
979 			pmap_clear_modify(m);
980 			m->dirty = 0;
981 			m->act_count = 0;
982 			vm_page_dontneed(m);
983 			if (tobject->type == OBJT_SWAP)
984 				swap_pager_freespace(tobject, tpindex, 1);
985 		}
986 	}
987 }
988 
989 /*
990  *	vm_object_shadow:
991  *
992  *	Create a new object which is backed by the
993  *	specified existing object range.  The source
994  *	object reference is deallocated.
995  *
996  *	The new object and offset into that object
997  *	are returned in the source parameters.
998  */
999 
1000 void
1001 vm_object_shadow(vm_object_t *object,	/* IN/OUT */
1002 		 vm_ooffset_t *offset,	/* IN/OUT */
1003 		 vm_size_t length)
1004 {
1005 	vm_object_t source;
1006 	vm_object_t result;
1007 
1008 	source = *object;
1009 
1010 	/*
1011 	 * Don't create the new object if the old object isn't shared.
1012 	 */
1013 
1014 	if (source != NULL &&
1015 	    source->ref_count == 1 &&
1016 	    source->handle == NULL &&
1017 	    (source->type == OBJT_DEFAULT ||
1018 	     source->type == OBJT_SWAP))
1019 		return;
1020 
1021 	/*
1022 	 * Allocate a new object with the given length
1023 	 */
1024 
1025 	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1026 		panic("vm_object_shadow: no object for shadowing");
1027 
1028 	/*
1029 	 * The new object shadows the source object, adding a reference to it.
1030 	 * Our caller changes his reference to point to the new object,
1031 	 * removing a reference to the source object.  Net result: no change
1032 	 * of reference count.
1033 	 *
1034 	 * Try to optimize the result object's page color when shadowing
1035 	 * in order to maintain page coloring consistency in the combined
1036 	 * shadowed object.
1037 	 */
1038 	result->backing_object = source;
1039 	if (source) {
1040 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1041 		source->shadow_count++;
1042 		source->generation++;
1043 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1044 	}
1045 
1046 	/*
1047 	 * Store the offset into the source object, and fix up the offset into
1048 	 * the new object.
1049 	 */
1050 
1051 	result->backing_object_offset = *offset;
1052 
1053 	/*
1054 	 * Return the new things
1055 	 */
1056 
1057 	*offset = 0;
1058 	*object = result;
1059 }
1060 
1061 #define	OBSC_TEST_ALL_SHADOWED	0x0001
1062 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1063 #define	OBSC_COLLAPSE_WAIT	0x0004
1064 
1065 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1066 
1067 static __inline int
1068 vm_object_backing_scan(vm_object_t object, int op)
1069 {
1070 	struct rb_vm_page_scan_info info;
1071 	vm_object_t backing_object;
1072 
1073 	/*
1074 	 * spl protection is required to avoid races between the memq/lookup,
1075 	 * an interrupt doing an unbusy/free, and our busy check.  Amoung
1076 	 * other things.
1077 	 */
1078 	crit_enter();
1079 
1080 	backing_object = object->backing_object;
1081 	info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1082 
1083 	/*
1084 	 * Initial conditions
1085 	 */
1086 
1087 	if (op & OBSC_TEST_ALL_SHADOWED) {
1088 		/*
1089 		 * We do not want to have to test for the existence of
1090 		 * swap pages in the backing object.  XXX but with the
1091 		 * new swapper this would be pretty easy to do.
1092 		 *
1093 		 * XXX what about anonymous MAP_SHARED memory that hasn't
1094 		 * been ZFOD faulted yet?  If we do not test for this, the
1095 		 * shadow test may succeed! XXX
1096 		 */
1097 		if (backing_object->type != OBJT_DEFAULT) {
1098 			crit_exit();
1099 			return(0);
1100 		}
1101 	}
1102 	if (op & OBSC_COLLAPSE_WAIT) {
1103 		KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1104 		vm_object_set_flag(backing_object, OBJ_DEAD);
1105 	}
1106 
1107 	/*
1108 	 * Our scan.   We have to retry if a negative error code is returned,
1109 	 * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1110 	 * the scan had to be stopped because the parent does not completely
1111 	 * shadow the child.
1112 	 */
1113 	info.object = object;
1114 	info.backing_object = backing_object;
1115 	info.limit = op;
1116 	do {
1117 		info.error = 1;
1118 		vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1119 					vm_object_backing_scan_callback,
1120 					&info);
1121 	} while (info.error < 0);
1122 	crit_exit();
1123 	return(info.error);
1124 }
1125 
1126 static int
1127 vm_object_backing_scan_callback(vm_page_t p, void *data)
1128 {
1129 	struct rb_vm_page_scan_info *info = data;
1130 	vm_object_t backing_object;
1131 	vm_object_t object;
1132 	vm_pindex_t new_pindex;
1133 	vm_pindex_t backing_offset_index;
1134 	int op;
1135 
1136 	new_pindex = p->pindex - info->backing_offset_index;
1137 	op = info->limit;
1138 	object = info->object;
1139 	backing_object = info->backing_object;
1140 	backing_offset_index = info->backing_offset_index;
1141 
1142 	if (op & OBSC_TEST_ALL_SHADOWED) {
1143 		vm_page_t pp;
1144 
1145 		/*
1146 		 * Ignore pages outside the parent object's range
1147 		 * and outside the parent object's mapping of the
1148 		 * backing object.
1149 		 *
1150 		 * note that we do not busy the backing object's
1151 		 * page.
1152 		 */
1153 		if (
1154 		    p->pindex < backing_offset_index ||
1155 		    new_pindex >= object->size
1156 		) {
1157 			return(0);
1158 		}
1159 
1160 		/*
1161 		 * See if the parent has the page or if the parent's
1162 		 * object pager has the page.  If the parent has the
1163 		 * page but the page is not valid, the parent's
1164 		 * object pager must have the page.
1165 		 *
1166 		 * If this fails, the parent does not completely shadow
1167 		 * the object and we might as well give up now.
1168 		 */
1169 
1170 		pp = vm_page_lookup(object, new_pindex);
1171 		if (
1172 		    (pp == NULL || pp->valid == 0) &&
1173 		    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1174 		) {
1175 			info->error = 0;	/* problemo */
1176 			return(-1);		/* stop the scan */
1177 		}
1178 	}
1179 
1180 	/*
1181 	 * Check for busy page
1182 	 */
1183 
1184 	if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1185 		vm_page_t pp;
1186 
1187 		if (op & OBSC_COLLAPSE_NOWAIT) {
1188 			if (
1189 			    (p->flags & PG_BUSY) ||
1190 			    !p->valid ||
1191 			    p->hold_count ||
1192 			    p->wire_count ||
1193 			    p->busy
1194 			) {
1195 				return(0);
1196 			}
1197 		} else if (op & OBSC_COLLAPSE_WAIT) {
1198 			if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1199 				/*
1200 				 * If we slept, anything could have
1201 				 * happened.   Ask that the scan be restarted.
1202 				 *
1203 				 * Since the object is marked dead, the
1204 				 * backing offset should not have changed.
1205 				 */
1206 				info->error = -1;
1207 				return(-1);
1208 			}
1209 		}
1210 
1211 		/*
1212 		 * Busy the page
1213 		 */
1214 		vm_page_busy(p);
1215 
1216 		KASSERT(
1217 		    p->object == backing_object,
1218 		    ("vm_object_qcollapse(): object mismatch")
1219 		);
1220 
1221 		/*
1222 		 * Destroy any associated swap
1223 		 */
1224 		if (backing_object->type == OBJT_SWAP) {
1225 			swap_pager_freespace(
1226 			    backing_object,
1227 			    p->pindex,
1228 			    1
1229 			);
1230 		}
1231 
1232 		if (
1233 		    p->pindex < backing_offset_index ||
1234 		    new_pindex >= object->size
1235 		) {
1236 			/*
1237 			 * Page is out of the parent object's range, we
1238 			 * can simply destroy it.
1239 			 */
1240 			vm_page_protect(p, VM_PROT_NONE);
1241 			vm_page_free(p);
1242 			return(0);
1243 		}
1244 
1245 		pp = vm_page_lookup(object, new_pindex);
1246 		if (
1247 		    pp != NULL ||
1248 		    vm_pager_has_page(object, new_pindex, NULL, NULL)
1249 		) {
1250 			/*
1251 			 * page already exists in parent OR swap exists
1252 			 * for this location in the parent.  Destroy
1253 			 * the original page from the backing object.
1254 			 *
1255 			 * Leave the parent's page alone
1256 			 */
1257 			vm_page_protect(p, VM_PROT_NONE);
1258 			vm_page_free(p);
1259 			return(0);
1260 		}
1261 
1262 		/*
1263 		 * Page does not exist in parent, rename the
1264 		 * page from the backing object to the main object.
1265 		 *
1266 		 * If the page was mapped to a process, it can remain
1267 		 * mapped through the rename.
1268 		 */
1269 		if ((p->queue - p->pc) == PQ_CACHE)
1270 			vm_page_deactivate(p);
1271 
1272 		vm_page_rename(p, object, new_pindex);
1273 		/* page automatically made dirty by rename */
1274 	}
1275 	return(0);
1276 }
1277 
1278 /*
1279  * this version of collapse allows the operation to occur earlier and
1280  * when paging_in_progress is true for an object...  This is not a complete
1281  * operation, but should plug 99.9% of the rest of the leaks.
1282  */
1283 static void
1284 vm_object_qcollapse(vm_object_t object)
1285 {
1286 	vm_object_t backing_object = object->backing_object;
1287 
1288 	if (backing_object->ref_count != 1)
1289 		return;
1290 
1291 	backing_object->ref_count += 2;
1292 
1293 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1294 
1295 	backing_object->ref_count -= 2;
1296 }
1297 
1298 /*
1299  *	vm_object_collapse:
1300  *
1301  *	Collapse an object with the object backing it.
1302  *	Pages in the backing object are moved into the
1303  *	parent, and the backing object is deallocated.
1304  */
1305 void
1306 vm_object_collapse(vm_object_t object)
1307 {
1308 	while (TRUE) {
1309 		vm_object_t backing_object;
1310 
1311 		/*
1312 		 * Verify that the conditions are right for collapse:
1313 		 *
1314 		 * The object exists and the backing object exists.
1315 		 */
1316 		if (object == NULL)
1317 			break;
1318 
1319 		if ((backing_object = object->backing_object) == NULL)
1320 			break;
1321 
1322 		/*
1323 		 * we check the backing object first, because it is most likely
1324 		 * not collapsable.
1325 		 */
1326 		if (backing_object->handle != NULL ||
1327 		    (backing_object->type != OBJT_DEFAULT &&
1328 		     backing_object->type != OBJT_SWAP) ||
1329 		    (backing_object->flags & OBJ_DEAD) ||
1330 		    object->handle != NULL ||
1331 		    (object->type != OBJT_DEFAULT &&
1332 		     object->type != OBJT_SWAP) ||
1333 		    (object->flags & OBJ_DEAD)) {
1334 			break;
1335 		}
1336 
1337 		if (
1338 		    object->paging_in_progress != 0 ||
1339 		    backing_object->paging_in_progress != 0
1340 		) {
1341 			vm_object_qcollapse(object);
1342 			break;
1343 		}
1344 
1345 		/*
1346 		 * We know that we can either collapse the backing object (if
1347 		 * the parent is the only reference to it) or (perhaps) have
1348 		 * the parent bypass the object if the parent happens to shadow
1349 		 * all the resident pages in the entire backing object.
1350 		 *
1351 		 * This is ignoring pager-backed pages such as swap pages.
1352 		 * vm_object_backing_scan fails the shadowing test in this
1353 		 * case.
1354 		 */
1355 
1356 		if (backing_object->ref_count == 1) {
1357 			/*
1358 			 * If there is exactly one reference to the backing
1359 			 * object, we can collapse it into the parent.
1360 			 */
1361 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1362 
1363 			/*
1364 			 * Move the pager from backing_object to object.
1365 			 */
1366 
1367 			if (backing_object->type == OBJT_SWAP) {
1368 				vm_object_pip_add(backing_object, 1);
1369 
1370 				/*
1371 				 * scrap the paging_offset junk and do a
1372 				 * discrete copy.  This also removes major
1373 				 * assumptions about how the swap-pager
1374 				 * works from where it doesn't belong.  The
1375 				 * new swapper is able to optimize the
1376 				 * destroy-source case.
1377 				 */
1378 
1379 				vm_object_pip_add(object, 1);
1380 				swap_pager_copy(
1381 				    backing_object,
1382 				    object,
1383 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1384 				vm_object_pip_wakeup(object);
1385 
1386 				vm_object_pip_wakeup(backing_object);
1387 			}
1388 			/*
1389 			 * Object now shadows whatever backing_object did.
1390 			 * Note that the reference to
1391 			 * backing_object->backing_object moves from within
1392 			 * backing_object to within object.
1393 			 */
1394 
1395 			LIST_REMOVE(object, shadow_list);
1396 			object->backing_object->shadow_count--;
1397 			object->backing_object->generation++;
1398 			if (backing_object->backing_object) {
1399 				LIST_REMOVE(backing_object, shadow_list);
1400 				backing_object->backing_object->shadow_count--;
1401 				backing_object->backing_object->generation++;
1402 			}
1403 			object->backing_object = backing_object->backing_object;
1404 			if (object->backing_object) {
1405 				LIST_INSERT_HEAD(
1406 				    &object->backing_object->shadow_head,
1407 				    object,
1408 				    shadow_list
1409 				);
1410 				object->backing_object->shadow_count++;
1411 				object->backing_object->generation++;
1412 			}
1413 
1414 			object->backing_object_offset +=
1415 			    backing_object->backing_object_offset;
1416 
1417 			/*
1418 			 * Discard backing_object.
1419 			 *
1420 			 * Since the backing object has no pages, no pager left,
1421 			 * and no object references within it, all that is
1422 			 * necessary is to dispose of it.
1423 			 */
1424 
1425 			KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
1426 			KASSERT(RB_EMPTY(&backing_object->rb_memq), ("backing_object %p somehow has left over pages during collapse!", backing_object));
1427 			crit_enter();
1428 			TAILQ_REMOVE(
1429 			    &vm_object_list,
1430 			    backing_object,
1431 			    object_list
1432 			);
1433 			vm_object_count--;
1434 			crit_exit();
1435 
1436 			zfree(obj_zone, backing_object);
1437 
1438 			object_collapses++;
1439 		} else {
1440 			vm_object_t new_backing_object;
1441 
1442 			/*
1443 			 * If we do not entirely shadow the backing object,
1444 			 * there is nothing we can do so we give up.
1445 			 */
1446 
1447 			if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1448 				break;
1449 			}
1450 
1451 			/*
1452 			 * Make the parent shadow the next object in the
1453 			 * chain.  Deallocating backing_object will not remove
1454 			 * it, since its reference count is at least 2.
1455 			 */
1456 
1457 			LIST_REMOVE(object, shadow_list);
1458 			backing_object->shadow_count--;
1459 			backing_object->generation++;
1460 
1461 			new_backing_object = backing_object->backing_object;
1462 			if ((object->backing_object = new_backing_object) != NULL) {
1463 				vm_object_reference(new_backing_object);
1464 				LIST_INSERT_HEAD(
1465 				    &new_backing_object->shadow_head,
1466 				    object,
1467 				    shadow_list
1468 				);
1469 				new_backing_object->shadow_count++;
1470 				new_backing_object->generation++;
1471 				object->backing_object_offset +=
1472 					backing_object->backing_object_offset;
1473 			}
1474 
1475 			/*
1476 			 * Drop the reference count on backing_object. Since
1477 			 * its ref_count was at least 2, it will not vanish;
1478 			 * so we don't need to call vm_object_deallocate, but
1479 			 * we do anyway.
1480 			 */
1481 			vm_object_deallocate(backing_object);
1482 			object_bypasses++;
1483 		}
1484 
1485 		/*
1486 		 * Try again with this object's new backing object.
1487 		 */
1488 	}
1489 }
1490 
1491 /*
1492  *	vm_object_page_remove: [internal]
1493  *
1494  *	Removes all physical pages in the specified
1495  *	object range from the object's list of pages.
1496  */
1497 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1498 
1499 void
1500 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1501 		      boolean_t clean_only)
1502 {
1503 	struct rb_vm_page_scan_info info;
1504 	int all;
1505 
1506 	/*
1507 	 * Degenerate cases and assertions
1508 	 */
1509 	if (object == NULL || object->resident_page_count == 0)
1510 		return;
1511 	KASSERT(object->type != OBJT_PHYS,
1512 		("attempt to remove pages from a physical object"));
1513 
1514 	/*
1515 	 * Indicate that paging is occuring on the object
1516 	 */
1517 	crit_enter();
1518 	vm_object_pip_add(object, 1);
1519 
1520 	/*
1521 	 * Figure out the actual removal range and whether we are removing
1522 	 * the entire contents of the object or not.  If removing the entire
1523 	 * contents, be sure to get all pages, even those that might be
1524 	 * beyond the end of the object.
1525 	 */
1526 	info.start_pindex = start;
1527 	if (end == 0)
1528 		info.end_pindex = (vm_pindex_t)-1;
1529 	else
1530 		info.end_pindex = end - 1;
1531 	info.limit = clean_only;
1532 	all = (start == 0 && info.end_pindex >= object->size - 1);
1533 
1534 	/*
1535 	 * Loop until we are sure we have gotten them all.
1536 	 */
1537 	do {
1538 		info.error = 0;
1539 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1540 					vm_object_page_remove_callback, &info);
1541 	} while (info.error);
1542 
1543 	/*
1544 	 * Cleanup
1545 	 */
1546 	vm_object_pip_wakeup(object);
1547 	crit_exit();
1548 }
1549 
1550 static int
1551 vm_object_page_remove_callback(vm_page_t p, void *data)
1552 {
1553 	struct rb_vm_page_scan_info *info = data;
1554 
1555 	/*
1556 	 * Wired pages cannot be destroyed, but they can be invalidated
1557 	 * and we do so if clean_only (limit) is not set.
1558 	 */
1559 	if (p->wire_count != 0) {
1560 		vm_page_protect(p, VM_PROT_NONE);
1561 		if (info->limit == 0)
1562 			p->valid = 0;
1563 		return(0);
1564 	}
1565 
1566 	/*
1567 	 * The busy flags are only cleared at
1568 	 * interrupt -- minimize the spl transitions
1569 	 */
1570 
1571 	if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1572 		info->error = 1;
1573 		return(0);
1574 	}
1575 
1576 	/*
1577 	 * limit is our clean_only flag.  If set and the page is dirty, do
1578 	 * not free it.
1579 	 */
1580 	if (info->limit && p->valid) {
1581 		vm_page_test_dirty(p);
1582 		if (p->valid & p->dirty)
1583 			return(0);
1584 	}
1585 
1586 	/*
1587 	 * Destroy the page
1588 	 */
1589 	vm_page_busy(p);
1590 	vm_page_protect(p, VM_PROT_NONE);
1591 	vm_page_free(p);
1592 	return(0);
1593 }
1594 
1595 /*
1596  *	Routine:	vm_object_coalesce
1597  *	Function:	Coalesces two objects backing up adjoining
1598  *			regions of memory into a single object.
1599  *
1600  *	returns TRUE if objects were combined.
1601  *
1602  *	NOTE:	Only works at the moment if the second object is NULL -
1603  *		if it's not, which object do we lock first?
1604  *
1605  *	Parameters:
1606  *		prev_object	First object to coalesce
1607  *		prev_offset	Offset into prev_object
1608  *		next_object	Second object into coalesce
1609  *		next_offset	Offset into next_object
1610  *
1611  *		prev_size	Size of reference to prev_object
1612  *		next_size	Size of reference to next_object
1613  *
1614  *	Conditions:
1615  *	The object must *not* be locked.
1616  */
1617 boolean_t
1618 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1619     vm_size_t prev_size, vm_size_t next_size)
1620 {
1621 	vm_pindex_t next_pindex;
1622 
1623 	if (prev_object == NULL) {
1624 		return (TRUE);
1625 	}
1626 
1627 	if (prev_object->type != OBJT_DEFAULT &&
1628 	    prev_object->type != OBJT_SWAP) {
1629 		return (FALSE);
1630 	}
1631 
1632 	/*
1633 	 * Try to collapse the object first
1634 	 */
1635 	vm_object_collapse(prev_object);
1636 
1637 	/*
1638 	 * Can't coalesce if: . more than one reference . paged out . shadows
1639 	 * another object . has a copy elsewhere (any of which mean that the
1640 	 * pages not mapped to prev_entry may be in use anyway)
1641 	 */
1642 
1643 	if (prev_object->backing_object != NULL) {
1644 		return (FALSE);
1645 	}
1646 
1647 	prev_size >>= PAGE_SHIFT;
1648 	next_size >>= PAGE_SHIFT;
1649 	next_pindex = prev_pindex + prev_size;
1650 
1651 	if ((prev_object->ref_count > 1) &&
1652 	    (prev_object->size != next_pindex)) {
1653 		return (FALSE);
1654 	}
1655 
1656 	/*
1657 	 * Remove any pages that may still be in the object from a previous
1658 	 * deallocation.
1659 	 */
1660 	if (next_pindex < prev_object->size) {
1661 		vm_object_page_remove(prev_object,
1662 				      next_pindex,
1663 				      next_pindex + next_size, FALSE);
1664 		if (prev_object->type == OBJT_SWAP)
1665 			swap_pager_freespace(prev_object,
1666 					     next_pindex, next_size);
1667 	}
1668 
1669 	/*
1670 	 * Extend the object if necessary.
1671 	 */
1672 	if (next_pindex + next_size > prev_object->size)
1673 		prev_object->size = next_pindex + next_size;
1674 
1675 	return (TRUE);
1676 }
1677 
1678 void
1679 vm_object_set_writeable_dirty(vm_object_t object)
1680 {
1681 	struct vnode *vp;
1682 
1683 	vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1684 	if (object->type == OBJT_VNODE &&
1685 	    (vp = (struct vnode *)object->handle) != NULL) {
1686 		if ((vp->v_flag & VOBJDIRTY) == 0) {
1687 			vsetflags(vp, VOBJDIRTY);
1688 		}
1689 	}
1690 }
1691 
1692 
1693 
1694 #include "opt_ddb.h"
1695 #ifdef DDB
1696 #include <sys/kernel.h>
1697 
1698 #include <sys/cons.h>
1699 
1700 #include <ddb/ddb.h>
1701 
1702 static int	_vm_object_in_map (vm_map_t map, vm_object_t object,
1703 				       vm_map_entry_t entry);
1704 static int	vm_object_in_map (vm_object_t object);
1705 
1706 static int
1707 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1708 {
1709 	vm_map_t tmpm;
1710 	vm_map_entry_t tmpe;
1711 	vm_object_t obj;
1712 	int entcount;
1713 
1714 	if (map == 0)
1715 		return 0;
1716 	if (entry == 0) {
1717 		tmpe = map->header.next;
1718 		entcount = map->nentries;
1719 		while (entcount-- && (tmpe != &map->header)) {
1720 			if( _vm_object_in_map(map, object, tmpe)) {
1721 				return 1;
1722 			}
1723 			tmpe = tmpe->next;
1724 		}
1725 		return (0);
1726 	}
1727 	switch(entry->maptype) {
1728 	case VM_MAPTYPE_SUBMAP:
1729 		tmpm = entry->object.sub_map;
1730 		tmpe = tmpm->header.next;
1731 		entcount = tmpm->nentries;
1732 		while (entcount-- && tmpe != &tmpm->header) {
1733 			if( _vm_object_in_map(tmpm, object, tmpe)) {
1734 				return 1;
1735 			}
1736 			tmpe = tmpe->next;
1737 		}
1738 		break;
1739 	case VM_MAPTYPE_NORMAL:
1740 	case VM_MAPTYPE_VPAGETABLE:
1741 		obj = entry->object.vm_object;
1742 		while (obj) {
1743 			if (obj == object)
1744 				return 1;
1745 			obj = obj->backing_object;
1746 		}
1747 		break;
1748 	default:
1749 		break;
1750 	}
1751 	return 0;
1752 }
1753 
1754 static int vm_object_in_map_callback(struct proc *p, void *data);
1755 
1756 struct vm_object_in_map_info {
1757 	vm_object_t object;
1758 	int rv;
1759 };
1760 
1761 static int
1762 vm_object_in_map(vm_object_t object)
1763 {
1764 	struct vm_object_in_map_info info;
1765 
1766 	info.rv = 0;
1767 	info.object = object;
1768 
1769 	allproc_scan(vm_object_in_map_callback, &info);
1770 	if (info.rv)
1771 		return 1;
1772 	if( _vm_object_in_map(&kernel_map, object, 0))
1773 		return 1;
1774 	if( _vm_object_in_map(&pager_map, object, 0))
1775 		return 1;
1776 	if( _vm_object_in_map(&buffer_map, object, 0))
1777 		return 1;
1778 	return 0;
1779 }
1780 
1781 static int
1782 vm_object_in_map_callback(struct proc *p, void *data)
1783 {
1784 	struct vm_object_in_map_info *info = data;
1785 
1786 	if (p->p_vmspace) {
1787 		if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1788 			info->rv = 1;
1789 			return -1;
1790 		}
1791 	}
1792 	return (0);
1793 }
1794 
1795 DB_SHOW_COMMAND(vmochk, vm_object_check)
1796 {
1797 	vm_object_t object;
1798 
1799 	/*
1800 	 * make sure that internal objs are in a map somewhere
1801 	 * and none have zero ref counts.
1802 	 */
1803 	for (object = TAILQ_FIRST(&vm_object_list);
1804 			object != NULL;
1805 			object = TAILQ_NEXT(object, object_list)) {
1806 		if (object->handle == NULL &&
1807 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1808 			if (object->ref_count == 0) {
1809 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
1810 					(long)object->size);
1811 			}
1812 			if (!vm_object_in_map(object)) {
1813 				db_printf(
1814 			"vmochk: internal obj is not in a map: "
1815 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1816 				    object->ref_count, (u_long)object->size,
1817 				    (u_long)object->size,
1818 				    (void *)object->backing_object);
1819 			}
1820 		}
1821 	}
1822 }
1823 
1824 /*
1825  *	vm_object_print:	[ debug ]
1826  */
1827 DB_SHOW_COMMAND(object, vm_object_print_static)
1828 {
1829 	/* XXX convert args. */
1830 	vm_object_t object = (vm_object_t)addr;
1831 	boolean_t full = have_addr;
1832 
1833 	vm_page_t p;
1834 
1835 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1836 #define	count	was_count
1837 
1838 	int count;
1839 
1840 	if (object == NULL)
1841 		return;
1842 
1843 	db_iprintf(
1844 	    "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1845 	    object, (int)object->type, (u_long)object->size,
1846 	    object->resident_page_count, object->ref_count, object->flags);
1847 	/*
1848 	 * XXX no %qd in kernel.  Truncate object->backing_object_offset.
1849 	 */
1850 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1851 	    object->shadow_count,
1852 	    object->backing_object ? object->backing_object->ref_count : 0,
1853 	    object->backing_object, (long)object->backing_object_offset);
1854 
1855 	if (!full)
1856 		return;
1857 
1858 	db_indent += 2;
1859 	count = 0;
1860 	RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1861 		if (count == 0)
1862 			db_iprintf("memory:=");
1863 		else if (count == 6) {
1864 			db_printf("\n");
1865 			db_iprintf(" ...");
1866 			count = 0;
1867 		} else
1868 			db_printf(",");
1869 		count++;
1870 
1871 		db_printf("(off=0x%lx,page=0x%lx)",
1872 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1873 	}
1874 	if (count != 0)
1875 		db_printf("\n");
1876 	db_indent -= 2;
1877 }
1878 
1879 /* XXX. */
1880 #undef count
1881 
1882 /* XXX need this non-static entry for calling from vm_map_print. */
1883 void
1884 vm_object_print(/* db_expr_t */ long addr,
1885 		boolean_t have_addr,
1886 		/* db_expr_t */ long count,
1887 		char *modif)
1888 {
1889 	vm_object_print_static(addr, have_addr, count, modif);
1890 }
1891 
1892 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1893 {
1894 	vm_object_t object;
1895 	int nl = 0;
1896 	int c;
1897 	for (object = TAILQ_FIRST(&vm_object_list);
1898 			object != NULL;
1899 			object = TAILQ_NEXT(object, object_list)) {
1900 		vm_pindex_t idx, fidx;
1901 		vm_pindex_t osize;
1902 		vm_paddr_t pa = -1, padiff;
1903 		int rcount;
1904 		vm_page_t m;
1905 
1906 		db_printf("new object: %p\n", (void *)object);
1907 		if ( nl > 18) {
1908 			c = cngetc();
1909 			if (c != ' ')
1910 				return;
1911 			nl = 0;
1912 		}
1913 		nl++;
1914 		rcount = 0;
1915 		fidx = 0;
1916 		osize = object->size;
1917 		if (osize > 128)
1918 			osize = 128;
1919 		for (idx = 0; idx < osize; idx++) {
1920 			m = vm_page_lookup(object, idx);
1921 			if (m == NULL) {
1922 				if (rcount) {
1923 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1924 						(long)fidx, rcount, (long)pa);
1925 					if ( nl > 18) {
1926 						c = cngetc();
1927 						if (c != ' ')
1928 							return;
1929 						nl = 0;
1930 					}
1931 					nl++;
1932 					rcount = 0;
1933 				}
1934 				continue;
1935 			}
1936 
1937 
1938 			if (rcount &&
1939 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1940 				++rcount;
1941 				continue;
1942 			}
1943 			if (rcount) {
1944 				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1945 				padiff >>= PAGE_SHIFT;
1946 				padiff &= PQ_L2_MASK;
1947 				if (padiff == 0) {
1948 					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1949 					++rcount;
1950 					continue;
1951 				}
1952 				db_printf(" index(%ld)run(%d)pa(0x%lx)",
1953 					(long)fidx, rcount, (long)pa);
1954 				db_printf("pd(%ld)\n", (long)padiff);
1955 				if ( nl > 18) {
1956 					c = cngetc();
1957 					if (c != ' ')
1958 						return;
1959 					nl = 0;
1960 				}
1961 				nl++;
1962 			}
1963 			fidx = idx;
1964 			pa = VM_PAGE_TO_PHYS(m);
1965 			rcount = 1;
1966 		}
1967 		if (rcount) {
1968 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1969 				(long)fidx, rcount, (long)pa);
1970 			if ( nl > 18) {
1971 				c = cngetc();
1972 				if (c != ' ')
1973 					return;
1974 				nl = 0;
1975 			}
1976 			nl++;
1977 		}
1978 	}
1979 }
1980 #endif /* DDB */
1981