xref: /dragonfly/sys/vm/vm_object.c (revision f8f04fe3)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
65  * $DragonFly: src/sys/vm/vm_object.c,v 1.31 2007/06/08 02:00:47 dillon Exp $
66  */
67 
68 /*
69  *	Virtual memory object module.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/vnode.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_zone.h>
94 
95 #define EASY_SCAN_FACTOR	8
96 
97 static void	vm_object_qcollapse(vm_object_t object);
98 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
99 					     int pagerflags);
100 
101 /*
102  *	Virtual memory objects maintain the actual data
103  *	associated with allocated virtual memory.  A given
104  *	page of memory exists within exactly one object.
105  *
106  *	An object is only deallocated when all "references"
107  *	are given up.  Only one "reference" to a given
108  *	region of an object should be writeable.
109  *
110  *	Associated with each object is a list of all resident
111  *	memory pages belonging to that object; this list is
112  *	maintained by the "vm_page" module, and locked by the object's
113  *	lock.
114  *
115  *	Each object also records a "pager" routine which is
116  *	used to retrieve (and store) pages to the proper backing
117  *	storage.  In addition, objects may be backed by other
118  *	objects from which they were virtual-copied.
119  *
120  *	The only items within the object structure which are
121  *	modified after time of creation are:
122  *		reference count		locked by object's lock
123  *		pager routine		locked by object's lock
124  *
125  */
126 
127 struct object_q vm_object_list;
128 struct vm_object kernel_object;
129 
130 static long vm_object_count;		/* count of all objects */
131 extern int vm_pageout_page_count;
132 
133 static long object_collapses;
134 static long object_bypasses;
135 static int next_index;
136 static vm_zone_t obj_zone;
137 static struct vm_zone obj_zone_store;
138 static int object_hash_rand;
139 #define VM_OBJECTS_INIT 256
140 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
141 
142 void
143 _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object)
144 {
145 	int incr;
146 	RB_INIT(&object->rb_memq);
147 	LIST_INIT(&object->shadow_head);
148 
149 	object->type = type;
150 	object->size = size;
151 	object->ref_count = 1;
152 	object->flags = 0;
153 	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
154 		vm_object_set_flag(object, OBJ_ONEMAPPING);
155 	object->paging_in_progress = 0;
156 	object->resident_page_count = 0;
157 	object->shadow_count = 0;
158 	object->pg_color = next_index;
159 	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
160 		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
161 	else
162 		incr = size;
163 	next_index = (next_index + incr) & PQ_L2_MASK;
164 	object->handle = NULL;
165 	object->backing_object = NULL;
166 	object->backing_object_offset = (vm_ooffset_t) 0;
167 	/*
168 	 * Try to generate a number that will spread objects out in the
169 	 * hash table.  We 'wipe' new objects across the hash in 128 page
170 	 * increments plus 1 more to offset it a little more by the time
171 	 * it wraps around.
172 	 */
173 	object->hash_rand = object_hash_rand - 129;
174 
175 	object->generation++;
176 
177 	crit_enter();
178 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
179 	vm_object_count++;
180 	object_hash_rand = object->hash_rand;
181 	crit_exit();
182 }
183 
184 /*
185  *	vm_object_init:
186  *
187  *	Initialize the VM objects module.
188  */
189 void
190 vm_object_init(void)
191 {
192 	TAILQ_INIT(&vm_object_list);
193 
194 	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
195 			    &kernel_object);
196 
197 	obj_zone = &obj_zone_store;
198 	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
199 		vm_objects_init, VM_OBJECTS_INIT);
200 }
201 
202 void
203 vm_object_init2(void)
204 {
205 	zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
206 }
207 
208 /*
209  *	vm_object_allocate:
210  *
211  *	Returns a new object with the given size.
212  */
213 
214 vm_object_t
215 vm_object_allocate(objtype_t type, vm_size_t size)
216 {
217 	vm_object_t result;
218 
219 	result = (vm_object_t) zalloc(obj_zone);
220 
221 	_vm_object_allocate(type, size, result);
222 
223 	return (result);
224 }
225 
226 
227 /*
228  *	vm_object_reference:
229  *
230  *	Gets another reference to the given object.
231  */
232 void
233 vm_object_reference(vm_object_t object)
234 {
235 	if (object == NULL)
236 		return;
237 
238 	object->ref_count++;
239 	if (object->type == OBJT_VNODE) {
240 		vref(object->handle);
241 		/* XXX what if the vnode is being destroyed? */
242 	}
243 }
244 
245 static void
246 vm_object_vndeallocate(vm_object_t object)
247 {
248 	struct vnode *vp = (struct vnode *) object->handle;
249 
250 	KASSERT(object->type == OBJT_VNODE,
251 	    ("vm_object_vndeallocate: not a vnode object"));
252 	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
253 #ifdef INVARIANTS
254 	if (object->ref_count == 0) {
255 		vprint("vm_object_vndeallocate", vp);
256 		panic("vm_object_vndeallocate: bad object reference count");
257 	}
258 #endif
259 
260 	object->ref_count--;
261 	if (object->ref_count == 0)
262 		vp->v_flag &= ~VTEXT;
263 	vrele(vp);
264 }
265 
266 /*
267  *	vm_object_deallocate:
268  *
269  *	Release a reference to the specified object,
270  *	gained either through a vm_object_allocate
271  *	or a vm_object_reference call.  When all references
272  *	are gone, storage associated with this object
273  *	may be relinquished.
274  *
275  *	No object may be locked.
276  */
277 void
278 vm_object_deallocate(vm_object_t object)
279 {
280 	vm_object_t temp;
281 
282 	while (object != NULL) {
283 		if (object->type == OBJT_VNODE) {
284 			vm_object_vndeallocate(object);
285 			return;
286 		}
287 
288 		if (object->ref_count == 0) {
289 			panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
290 		} else if (object->ref_count > 2) {
291 			object->ref_count--;
292 			return;
293 		}
294 
295 		/*
296 		 * Here on ref_count of one or two, which are special cases for
297 		 * objects.
298 		 */
299 		if ((object->ref_count == 2) && (object->shadow_count == 0)) {
300 			vm_object_set_flag(object, OBJ_ONEMAPPING);
301 			object->ref_count--;
302 			return;
303 		} else if ((object->ref_count == 2) && (object->shadow_count == 1)) {
304 			object->ref_count--;
305 			if ((object->handle == NULL) &&
306 			    (object->type == OBJT_DEFAULT ||
307 			     object->type == OBJT_SWAP)) {
308 				vm_object_t robject;
309 
310 				robject = LIST_FIRST(&object->shadow_head);
311 				KASSERT(robject != NULL,
312 				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
313 					 object->ref_count,
314 					 object->shadow_count));
315 				if ((robject->handle == NULL) &&
316 				    (robject->type == OBJT_DEFAULT ||
317 				     robject->type == OBJT_SWAP)) {
318 
319 					robject->ref_count++;
320 
321 					while (
322 						robject->paging_in_progress ||
323 						object->paging_in_progress
324 					) {
325 						vm_object_pip_sleep(robject, "objde1");
326 						vm_object_pip_sleep(object, "objde2");
327 					}
328 
329 					if (robject->ref_count == 1) {
330 						robject->ref_count--;
331 						object = robject;
332 						goto doterm;
333 					}
334 
335 					object = robject;
336 					vm_object_collapse(object);
337 					continue;
338 				}
339 			}
340 
341 			return;
342 
343 		} else {
344 			object->ref_count--;
345 			if (object->ref_count != 0)
346 				return;
347 		}
348 
349 doterm:
350 
351 		temp = object->backing_object;
352 		if (temp) {
353 			LIST_REMOVE(object, shadow_list);
354 			temp->shadow_count--;
355 			temp->generation++;
356 			object->backing_object = NULL;
357 		}
358 
359 		/*
360 		 * Don't double-terminate, we could be in a termination
361 		 * recursion due to the terminate having to sync data
362 		 * to disk.
363 		 */
364 		if ((object->flags & OBJ_DEAD) == 0)
365 			vm_object_terminate(object);
366 		object = temp;
367 	}
368 }
369 
370 /*
371  *	vm_object_terminate actually destroys the specified object, freeing
372  *	up all previously used resources.
373  *
374  *	The object must be locked.
375  *	This routine may block.
376  */
377 static int vm_object_terminate_callback(vm_page_t p, void *data);
378 
379 void
380 vm_object_terminate(vm_object_t object)
381 {
382 	/*
383 	 * Make sure no one uses us.
384 	 */
385 	vm_object_set_flag(object, OBJ_DEAD);
386 
387 	/*
388 	 * wait for the pageout daemon to be done with the object
389 	 */
390 	vm_object_pip_wait(object, "objtrm");
391 
392 	KASSERT(!object->paging_in_progress,
393 		("vm_object_terminate: pageout in progress"));
394 
395 	/*
396 	 * Clean and free the pages, as appropriate. All references to the
397 	 * object are gone, so we don't need to lock it.
398 	 */
399 	if (object->type == OBJT_VNODE) {
400 		struct vnode *vp;
401 
402 		/*
403 		 * Clean pages and flush buffers.
404 		 */
405 		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
406 
407 		vp = (struct vnode *) object->handle;
408 		vinvalbuf(vp, V_SAVE, 0, 0);
409 	}
410 
411 	/*
412 	 * Wait for any I/O to complete, after which there had better not
413 	 * be any references left on the object.
414 	 */
415 	vm_object_pip_wait(object, "objtrm");
416 
417 	if (object->ref_count != 0)
418 		panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count);
419 
420 	/*
421 	 * Now free any remaining pages. For internal objects, this also
422 	 * removes them from paging queues. Don't free wired pages, just
423 	 * remove them from the object.
424 	 */
425 	crit_enter();
426 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
427 				vm_object_terminate_callback, NULL);
428 	crit_exit();
429 
430 	/*
431 	 * Let the pager know object is dead.
432 	 */
433 	vm_pager_deallocate(object);
434 
435 	/*
436 	 * Remove the object from the global object list.
437 	 */
438 	crit_enter();
439 	TAILQ_REMOVE(&vm_object_list, object, object_list);
440 	vm_object_count--;
441 	crit_exit();
442 
443 	vm_object_dead_wakeup(object);
444 	if (object->ref_count != 0)
445 		panic("vm_object_terminate2: object with references, ref_count=%d", object->ref_count);
446 
447 	/*
448 	 * Free the space for the object.
449 	 */
450 	zfree(obj_zone, object);
451 }
452 
453 static int
454 vm_object_terminate_callback(vm_page_t p, void *data __unused)
455 {
456 	if (p->busy || (p->flags & PG_BUSY))
457 		panic("vm_object_terminate: freeing busy page %p", p);
458 	if (p->wire_count == 0) {
459 		vm_page_busy(p);
460 		vm_page_free(p);
461 		mycpu->gd_cnt.v_pfree++;
462 	} else {
463 		if (p->queue != PQ_NONE)
464 			kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p, p->queue);
465 		vm_page_busy(p);
466 		vm_page_remove(p);
467 		vm_page_wakeup(p);
468 	}
469 	return(0);
470 }
471 
472 /*
473  * The object is dead but still has an object<->pager association.  Sleep
474  * and return.  The caller typically retests the association in a loop.
475  */
476 void
477 vm_object_dead_sleep(vm_object_t object, const char *wmesg)
478 {
479 	crit_enter();
480 	if (object->handle) {
481 		vm_object_set_flag(object, OBJ_DEADWNT);
482 		tsleep(object, 0, wmesg, 0);
483 	}
484 	crit_exit();
485 }
486 
487 /*
488  * Wakeup anyone waiting for the object<->pager disassociation on
489  * a dead object.
490  */
491 void
492 vm_object_dead_wakeup(vm_object_t object)
493 {
494 	if (object->flags & OBJ_DEADWNT) {
495 		vm_object_clear_flag(object, OBJ_DEADWNT);
496 		wakeup(object);
497 	}
498 }
499 
500 /*
501  *	vm_object_page_clean
502  *
503  *	Clean all dirty pages in the specified range of object.  Leaves page
504  * 	on whatever queue it is currently on.   If NOSYNC is set then do not
505  *	write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
506  *	leaving the object dirty.
507  *
508  *	When stuffing pages asynchronously, allow clustering.  XXX we need a
509  *	synchronous clustering mode implementation.
510  *
511  *	Odd semantics: if start == end, we clean everything.
512  */
513 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
514 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
515 
516 void
517 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
518 		     int flags)
519 {
520 	struct rb_vm_page_scan_info info;
521 	struct vnode *vp;
522 	int wholescan;
523 	int pagerflags;
524 	int curgeneration;
525 
526 	if (object->type != OBJT_VNODE ||
527 		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
528 		return;
529 
530 	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
531 			VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
532 	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
533 
534 	vp = object->handle;
535 
536 	/*
537 	 * Interlock other major object operations.  This allows us to
538 	 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
539 	 */
540 	crit_enter();
541 	vm_object_set_flag(object, OBJ_CLEANING);
542 
543 	/*
544 	 * Handle 'entire object' case
545 	 */
546 	info.start_pindex = start;
547 	if (end == 0) {
548 		info.end_pindex = object->size - 1;
549 	} else {
550 		info.end_pindex = end - 1;
551 	}
552 	wholescan = (start == 0 && info.end_pindex == object->size - 1);
553 	info.limit = flags;
554 	info.pagerflags = pagerflags;
555 	info.object = object;
556 
557 	/*
558 	 * If cleaning the entire object do a pass to mark the pages read-only.
559 	 * If everything worked out ok, clear OBJ_WRITEABLE and
560 	 * OBJ_MIGHTBEDIRTY.
561 	 */
562 	if (wholescan) {
563 		info.error = 0;
564 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
565 					vm_object_page_clean_pass1, &info);
566 		if (info.error == 0) {
567 			vm_object_clear_flag(object,
568 					     OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
569 			if (object->type == OBJT_VNODE &&
570 			    (vp = (struct vnode *)object->handle) != NULL) {
571 				if (vp->v_flag & VOBJDIRTY)
572 					vclrflags(vp, VOBJDIRTY);
573 			}
574 		}
575 	}
576 
577 	/*
578 	 * Do a pass to clean all the dirty pages we find.
579 	 */
580 	do {
581 		info.error = 0;
582 		curgeneration = object->generation;
583 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
584 					vm_object_page_clean_pass2, &info);
585 	} while (info.error || curgeneration != object->generation);
586 
587 	vm_object_clear_flag(object, OBJ_CLEANING);
588 	crit_exit();
589 }
590 
591 static
592 int
593 vm_object_page_clean_pass1(struct vm_page *p, void *data)
594 {
595 	struct rb_vm_page_scan_info *info = data;
596 
597 	vm_page_flag_set(p, PG_CLEANCHK);
598 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
599 		info->error = 1;
600 	else
601 		vm_page_protect(p, VM_PROT_READ);
602 	return(0);
603 }
604 
605 static
606 int
607 vm_object_page_clean_pass2(struct vm_page *p, void *data)
608 {
609 	struct rb_vm_page_scan_info *info = data;
610 	int n;
611 
612 	/*
613 	 * Do not mess with pages that were inserted after we started
614 	 * the cleaning pass.
615 	 */
616 	if ((p->flags & PG_CLEANCHK) == 0)
617 		return(0);
618 
619 	/*
620 	 * Before wasting time traversing the pmaps, check for trivial
621 	 * cases where the page cannot be dirty.
622 	 */
623 	if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
624 		KKASSERT((p->dirty & p->valid) == 0);
625 		return(0);
626 	}
627 
628 	/*
629 	 * Check whether the page is dirty or not.  The page has been set
630 	 * to be read-only so the check will not race a user dirtying the
631 	 * page.
632 	 */
633 	vm_page_test_dirty(p);
634 	if ((p->dirty & p->valid) == 0) {
635 		vm_page_flag_clear(p, PG_CLEANCHK);
636 		return(0);
637 	}
638 
639 	/*
640 	 * If we have been asked to skip nosync pages and this is a
641 	 * nosync page, skip it.  Note that the object flags were
642 	 * not cleared in this case (because pass1 will have returned an
643 	 * error), so we do not have to set them.
644 	 */
645 	if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
646 		vm_page_flag_clear(p, PG_CLEANCHK);
647 		return(0);
648 	}
649 
650 	/*
651 	 * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
652 	 * the pages that get successfully flushed.  Set info->error if
653 	 * we raced an object modification.
654 	 */
655 	n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
656 	if (n == 0)
657 		info->error = 1;
658 	return(0);
659 }
660 
661 /*
662  * This routine must be called within a critical section to properly avoid
663  * an interrupt unbusy/free race that can occur prior to the busy check.
664  *
665  * Using the object generation number here to detect page ripout is not
666  * the best idea in the world. XXX
667  *
668  * NOTE: we operate under the assumption that a page found to not be busy
669  * will not be ripped out from under us by an interrupt.  XXX we should
670  * recode this to explicitly busy the pages.
671  */
672 static int
673 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
674 {
675 	int runlen;
676 	int maxf;
677 	int chkb;
678 	int maxb;
679 	int i;
680 	int curgeneration;
681 	vm_pindex_t pi;
682 	vm_page_t maf[vm_pageout_page_count];
683 	vm_page_t mab[vm_pageout_page_count];
684 	vm_page_t ma[vm_pageout_page_count];
685 
686 	curgeneration = object->generation;
687 
688 	pi = p->pindex;
689 	while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
690 		if (object->generation != curgeneration) {
691 			return(0);
692 		}
693 	}
694 	KKASSERT(p->object == object && p->pindex == pi);
695 
696 	maxf = 0;
697 	for(i = 1; i < vm_pageout_page_count; i++) {
698 		vm_page_t tp;
699 
700 		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
701 			if ((tp->flags & PG_BUSY) ||
702 				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
703 				 (tp->flags & PG_CLEANCHK) == 0) ||
704 				(tp->busy != 0))
705 				break;
706 			if((tp->queue - tp->pc) == PQ_CACHE) {
707 				vm_page_flag_clear(tp, PG_CLEANCHK);
708 				break;
709 			}
710 			vm_page_test_dirty(tp);
711 			if ((tp->dirty & tp->valid) == 0) {
712 				vm_page_flag_clear(tp, PG_CLEANCHK);
713 				break;
714 			}
715 			maf[ i - 1 ] = tp;
716 			maxf++;
717 			continue;
718 		}
719 		break;
720 	}
721 
722 	maxb = 0;
723 	chkb = vm_pageout_page_count -  maxf;
724 	if (chkb) {
725 		for(i = 1; i < chkb;i++) {
726 			vm_page_t tp;
727 
728 			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
729 				if ((tp->flags & PG_BUSY) ||
730 					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
731 					 (tp->flags & PG_CLEANCHK) == 0) ||
732 					(tp->busy != 0))
733 					break;
734 				if((tp->queue - tp->pc) == PQ_CACHE) {
735 					vm_page_flag_clear(tp, PG_CLEANCHK);
736 					break;
737 				}
738 				vm_page_test_dirty(tp);
739 				if ((tp->dirty & tp->valid) == 0) {
740 					vm_page_flag_clear(tp, PG_CLEANCHK);
741 					break;
742 				}
743 				mab[ i - 1 ] = tp;
744 				maxb++;
745 				continue;
746 			}
747 			break;
748 		}
749 	}
750 
751 	for(i = 0; i < maxb; i++) {
752 		int index = (maxb - i) - 1;
753 		ma[index] = mab[i];
754 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
755 	}
756 	vm_page_flag_clear(p, PG_CLEANCHK);
757 	ma[maxb] = p;
758 	for(i = 0; i < maxf; i++) {
759 		int index = (maxb + i) + 1;
760 		ma[index] = maf[i];
761 		vm_page_flag_clear(ma[index], PG_CLEANCHK);
762 	}
763 	runlen = maxb + maxf + 1;
764 
765 	vm_pageout_flush(ma, runlen, pagerflags);
766 	for (i = 0; i < runlen; i++) {
767 		if (ma[i]->valid & ma[i]->dirty) {
768 			vm_page_protect(ma[i], VM_PROT_READ);
769 			vm_page_flag_set(ma[i], PG_CLEANCHK);
770 
771 			/*
772 			 * maxf will end up being the actual number of pages
773 			 * we wrote out contiguously, non-inclusive of the
774 			 * first page.  We do not count look-behind pages.
775 			 */
776 			if (i >= maxb + 1 && (maxf > i - maxb - 1))
777 				maxf = i - maxb - 1;
778 		}
779 	}
780 	return(maxf + 1);
781 }
782 
783 #ifdef not_used
784 /* XXX I cannot tell if this should be an exported symbol */
785 /*
786  *	vm_object_deactivate_pages
787  *
788  *	Deactivate all pages in the specified object.  (Keep its pages
789  *	in memory even though it is no longer referenced.)
790  *
791  *	The object must be locked.
792  */
793 static int vm_object_deactivate_pages_callback(vm_page_t p, void *data);
794 
795 static void
796 vm_object_deactivate_pages(vm_object_t object)
797 {
798 	crit_enter();
799 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
800 				vm_object_deactivate_pages_callback, NULL);
801 	crit_exit();
802 }
803 
804 static int
805 vm_object_deactivate_pages_callback(vm_page_t p, void *data __unused)
806 {
807 	vm_page_deactivate(p);
808 	return(0);
809 }
810 
811 #endif
812 
813 /*
814  * Same as vm_object_pmap_copy, except range checking really
815  * works, and is meant for small sections of an object.
816  *
817  * This code protects resident pages by making them read-only
818  * and is typically called on a fork or split when a page
819  * is converted to copy-on-write.
820  *
821  * NOTE: If the page is already at VM_PROT_NONE, calling
822  * vm_page_protect will have no effect.
823  */
824 void
825 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
826 {
827 	vm_pindex_t idx;
828 	vm_page_t p;
829 
830 	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
831 		return;
832 
833 	/*
834 	 * spl protection needed to prevent races between the lookup,
835 	 * an interrupt unbusy/free, and our protect call.
836 	 */
837 	crit_enter();
838 	for (idx = start; idx < end; idx++) {
839 		p = vm_page_lookup(object, idx);
840 		if (p == NULL)
841 			continue;
842 		vm_page_protect(p, VM_PROT_READ);
843 	}
844 	crit_exit();
845 }
846 
847 /*
848  *	vm_object_pmap_remove:
849  *
850  *	Removes all physical pages in the specified
851  *	object range from all physical maps.
852  *
853  *	The object must *not* be locked.
854  */
855 
856 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
857 
858 void
859 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
860 {
861 	struct rb_vm_page_scan_info info;
862 
863 	if (object == NULL)
864 		return;
865 	info.start_pindex = start;
866 	info.end_pindex = end - 1;
867 	crit_enter();
868 	vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
869 				vm_object_pmap_remove_callback, &info);
870 	if (start == 0 && end == object->size)
871 		vm_object_clear_flag(object, OBJ_WRITEABLE);
872 	crit_exit();
873 }
874 
875 static int
876 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
877 {
878 	vm_page_protect(p, VM_PROT_NONE);
879 	return(0);
880 }
881 
882 /*
883  *	vm_object_madvise:
884  *
885  *	Implements the madvise function at the object/page level.
886  *
887  *	MADV_WILLNEED	(any object)
888  *
889  *	    Activate the specified pages if they are resident.
890  *
891  *	MADV_DONTNEED	(any object)
892  *
893  *	    Deactivate the specified pages if they are resident.
894  *
895  *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
896  *			 OBJ_ONEMAPPING only)
897  *
898  *	    Deactivate and clean the specified pages if they are
899  *	    resident.  This permits the process to reuse the pages
900  *	    without faulting or the kernel to reclaim the pages
901  *	    without I/O.
902  */
903 void
904 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
905 {
906 	vm_pindex_t end, tpindex;
907 	vm_object_t tobject;
908 	vm_page_t m;
909 
910 	if (object == NULL)
911 		return;
912 
913 	end = pindex + count;
914 
915 	/*
916 	 * Locate and adjust resident pages
917 	 */
918 
919 	for (; pindex < end; pindex += 1) {
920 relookup:
921 		tobject = object;
922 		tpindex = pindex;
923 shadowlookup:
924 		/*
925 		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
926 		 * and those pages must be OBJ_ONEMAPPING.
927 		 */
928 		if (advise == MADV_FREE) {
929 			if ((tobject->type != OBJT_DEFAULT &&
930 			     tobject->type != OBJT_SWAP) ||
931 			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
932 				continue;
933 			}
934 		}
935 
936 		/*
937 		 * spl protection is required to avoid a race between the
938 		 * lookup, an interrupt unbusy/free, and our busy check.
939 		 */
940 
941 		crit_enter();
942 		m = vm_page_lookup(tobject, tpindex);
943 
944 		if (m == NULL) {
945 			/*
946 			 * There may be swap even if there is no backing page
947 			 */
948 			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
949 				swap_pager_freespace(tobject, tpindex, 1);
950 
951 			/*
952 			 * next object
953 			 */
954 			crit_exit();
955 			if (tobject->backing_object == NULL)
956 				continue;
957 			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
958 			tobject = tobject->backing_object;
959 			goto shadowlookup;
960 		}
961 
962 		/*
963 		 * If the page is busy or not in a normal active state,
964 		 * we skip it.  If the page is not managed there are no
965 		 * page queues to mess with.  Things can break if we mess
966 		 * with pages in any of the below states.
967 		 */
968 		if (
969 		    m->hold_count ||
970 		    m->wire_count ||
971 		    (m->flags & PG_UNMANAGED) ||
972 		    m->valid != VM_PAGE_BITS_ALL
973 		) {
974 			crit_exit();
975 			continue;
976 		}
977 
978  		if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
979 			crit_exit();
980   			goto relookup;
981 		}
982 		crit_exit();
983 
984 		/*
985 		 * Theoretically once a page is known not to be busy, an
986 		 * interrupt cannot come along and rip it out from under us.
987 		 */
988 
989 		if (advise == MADV_WILLNEED) {
990 			vm_page_activate(m);
991 		} else if (advise == MADV_DONTNEED) {
992 			vm_page_dontneed(m);
993 		} else if (advise == MADV_FREE) {
994 			/*
995 			 * Mark the page clean.  This will allow the page
996 			 * to be freed up by the system.  However, such pages
997 			 * are often reused quickly by malloc()/free()
998 			 * so we do not do anything that would cause
999 			 * a page fault if we can help it.
1000 			 *
1001 			 * Specifically, we do not try to actually free
1002 			 * the page now nor do we try to put it in the
1003 			 * cache (which would cause a page fault on reuse).
1004 			 *
1005 			 * But we do make the page is freeable as we
1006 			 * can without actually taking the step of unmapping
1007 			 * it.
1008 			 */
1009 			pmap_clear_modify(m);
1010 			m->dirty = 0;
1011 			m->act_count = 0;
1012 			vm_page_dontneed(m);
1013 			if (tobject->type == OBJT_SWAP)
1014 				swap_pager_freespace(tobject, tpindex, 1);
1015 		}
1016 	}
1017 }
1018 
1019 /*
1020  *	vm_object_shadow:
1021  *
1022  *	Create a new object which is backed by the
1023  *	specified existing object range.  The source
1024  *	object reference is deallocated.
1025  *
1026  *	The new object and offset into that object
1027  *	are returned in the source parameters.
1028  */
1029 
1030 void
1031 vm_object_shadow(vm_object_t *object,	/* IN/OUT */
1032 		 vm_ooffset_t *offset,	/* IN/OUT */
1033 		 vm_size_t length)
1034 {
1035 	vm_object_t source;
1036 	vm_object_t result;
1037 
1038 	source = *object;
1039 
1040 	/*
1041 	 * Don't create the new object if the old object isn't shared.
1042 	 */
1043 
1044 	if (source != NULL &&
1045 	    source->ref_count == 1 &&
1046 	    source->handle == NULL &&
1047 	    (source->type == OBJT_DEFAULT ||
1048 	     source->type == OBJT_SWAP))
1049 		return;
1050 
1051 	/*
1052 	 * Allocate a new object with the given length
1053 	 */
1054 
1055 	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1056 		panic("vm_object_shadow: no object for shadowing");
1057 
1058 	/*
1059 	 * The new object shadows the source object, adding a reference to it.
1060 	 * Our caller changes his reference to point to the new object,
1061 	 * removing a reference to the source object.  Net result: no change
1062 	 * of reference count.
1063 	 *
1064 	 * Try to optimize the result object's page color when shadowing
1065 	 * in order to maintain page coloring consistency in the combined
1066 	 * shadowed object.
1067 	 */
1068 	result->backing_object = source;
1069 	if (source) {
1070 		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1071 		source->shadow_count++;
1072 		source->generation++;
1073 		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1074 	}
1075 
1076 	/*
1077 	 * Store the offset into the source object, and fix up the offset into
1078 	 * the new object.
1079 	 */
1080 
1081 	result->backing_object_offset = *offset;
1082 
1083 	/*
1084 	 * Return the new things
1085 	 */
1086 
1087 	*offset = 0;
1088 	*object = result;
1089 }
1090 
1091 #define	OBSC_TEST_ALL_SHADOWED	0x0001
1092 #define	OBSC_COLLAPSE_NOWAIT	0x0002
1093 #define	OBSC_COLLAPSE_WAIT	0x0004
1094 
1095 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1096 
1097 static __inline int
1098 vm_object_backing_scan(vm_object_t object, int op)
1099 {
1100 	struct rb_vm_page_scan_info info;
1101 	vm_object_t backing_object;
1102 
1103 	/*
1104 	 * spl protection is required to avoid races between the memq/lookup,
1105 	 * an interrupt doing an unbusy/free, and our busy check.  Amoung
1106 	 * other things.
1107 	 */
1108 	crit_enter();
1109 
1110 	backing_object = object->backing_object;
1111 	info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1112 
1113 	/*
1114 	 * Initial conditions
1115 	 */
1116 
1117 	if (op & OBSC_TEST_ALL_SHADOWED) {
1118 		/*
1119 		 * We do not want to have to test for the existence of
1120 		 * swap pages in the backing object.  XXX but with the
1121 		 * new swapper this would be pretty easy to do.
1122 		 *
1123 		 * XXX what about anonymous MAP_SHARED memory that hasn't
1124 		 * been ZFOD faulted yet?  If we do not test for this, the
1125 		 * shadow test may succeed! XXX
1126 		 */
1127 		if (backing_object->type != OBJT_DEFAULT) {
1128 			crit_exit();
1129 			return(0);
1130 		}
1131 	}
1132 	if (op & OBSC_COLLAPSE_WAIT) {
1133 		KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1134 		vm_object_set_flag(backing_object, OBJ_DEAD);
1135 	}
1136 
1137 	/*
1138 	 * Our scan.   We have to retry if a negative error code is returned,
1139 	 * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1140 	 * the scan had to be stopped because the parent does not completely
1141 	 * shadow the child.
1142 	 */
1143 	info.object = object;
1144 	info.backing_object = backing_object;
1145 	info.limit = op;
1146 	do {
1147 		info.error = 1;
1148 		vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1149 					vm_object_backing_scan_callback,
1150 					&info);
1151 	} while (info.error < 0);
1152 	crit_exit();
1153 	return(info.error);
1154 }
1155 
1156 static int
1157 vm_object_backing_scan_callback(vm_page_t p, void *data)
1158 {
1159 	struct rb_vm_page_scan_info *info = data;
1160 	vm_object_t backing_object;
1161 	vm_object_t object;
1162 	vm_pindex_t new_pindex;
1163 	vm_pindex_t backing_offset_index;
1164 	int op;
1165 
1166 	new_pindex = p->pindex - info->backing_offset_index;
1167 	op = info->limit;
1168 	object = info->object;
1169 	backing_object = info->backing_object;
1170 	backing_offset_index = info->backing_offset_index;
1171 
1172 	if (op & OBSC_TEST_ALL_SHADOWED) {
1173 		vm_page_t pp;
1174 
1175 		/*
1176 		 * Ignore pages outside the parent object's range
1177 		 * and outside the parent object's mapping of the
1178 		 * backing object.
1179 		 *
1180 		 * note that we do not busy the backing object's
1181 		 * page.
1182 		 */
1183 		if (
1184 		    p->pindex < backing_offset_index ||
1185 		    new_pindex >= object->size
1186 		) {
1187 			return(0);
1188 		}
1189 
1190 		/*
1191 		 * See if the parent has the page or if the parent's
1192 		 * object pager has the page.  If the parent has the
1193 		 * page but the page is not valid, the parent's
1194 		 * object pager must have the page.
1195 		 *
1196 		 * If this fails, the parent does not completely shadow
1197 		 * the object and we might as well give up now.
1198 		 */
1199 
1200 		pp = vm_page_lookup(object, new_pindex);
1201 		if (
1202 		    (pp == NULL || pp->valid == 0) &&
1203 		    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1204 		) {
1205 			info->error = 0;	/* problemo */
1206 			return(-1);		/* stop the scan */
1207 		}
1208 	}
1209 
1210 	/*
1211 	 * Check for busy page
1212 	 */
1213 
1214 	if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1215 		vm_page_t pp;
1216 
1217 		if (op & OBSC_COLLAPSE_NOWAIT) {
1218 			if (
1219 			    (p->flags & PG_BUSY) ||
1220 			    !p->valid ||
1221 			    p->hold_count ||
1222 			    p->wire_count ||
1223 			    p->busy
1224 			) {
1225 				return(0);
1226 			}
1227 		} else if (op & OBSC_COLLAPSE_WAIT) {
1228 			if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1229 				/*
1230 				 * If we slept, anything could have
1231 				 * happened.   Ask that the scan be restarted.
1232 				 *
1233 				 * Since the object is marked dead, the
1234 				 * backing offset should not have changed.
1235 				 */
1236 				info->error = -1;
1237 				return(-1);
1238 			}
1239 		}
1240 
1241 		/*
1242 		 * Busy the page
1243 		 */
1244 		vm_page_busy(p);
1245 
1246 		KASSERT(
1247 		    p->object == backing_object,
1248 		    ("vm_object_qcollapse(): object mismatch")
1249 		);
1250 
1251 		/*
1252 		 * Destroy any associated swap
1253 		 */
1254 		if (backing_object->type == OBJT_SWAP) {
1255 			swap_pager_freespace(
1256 			    backing_object,
1257 			    p->pindex,
1258 			    1
1259 			);
1260 		}
1261 
1262 		if (
1263 		    p->pindex < backing_offset_index ||
1264 		    new_pindex >= object->size
1265 		) {
1266 			/*
1267 			 * Page is out of the parent object's range, we
1268 			 * can simply destroy it.
1269 			 */
1270 			vm_page_protect(p, VM_PROT_NONE);
1271 			vm_page_free(p);
1272 			return(0);
1273 		}
1274 
1275 		pp = vm_page_lookup(object, new_pindex);
1276 		if (
1277 		    pp != NULL ||
1278 		    vm_pager_has_page(object, new_pindex, NULL, NULL)
1279 		) {
1280 			/*
1281 			 * page already exists in parent OR swap exists
1282 			 * for this location in the parent.  Destroy
1283 			 * the original page from the backing object.
1284 			 *
1285 			 * Leave the parent's page alone
1286 			 */
1287 			vm_page_protect(p, VM_PROT_NONE);
1288 			vm_page_free(p);
1289 			return(0);
1290 		}
1291 
1292 		/*
1293 		 * Page does not exist in parent, rename the
1294 		 * page from the backing object to the main object.
1295 		 *
1296 		 * If the page was mapped to a process, it can remain
1297 		 * mapped through the rename.
1298 		 */
1299 		if ((p->queue - p->pc) == PQ_CACHE)
1300 			vm_page_deactivate(p);
1301 
1302 		vm_page_rename(p, object, new_pindex);
1303 		/* page automatically made dirty by rename */
1304 	}
1305 	return(0);
1306 }
1307 
1308 /*
1309  * this version of collapse allows the operation to occur earlier and
1310  * when paging_in_progress is true for an object...  This is not a complete
1311  * operation, but should plug 99.9% of the rest of the leaks.
1312  */
1313 static void
1314 vm_object_qcollapse(vm_object_t object)
1315 {
1316 	vm_object_t backing_object = object->backing_object;
1317 
1318 	if (backing_object->ref_count != 1)
1319 		return;
1320 
1321 	backing_object->ref_count += 2;
1322 
1323 	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1324 
1325 	backing_object->ref_count -= 2;
1326 }
1327 
1328 /*
1329  *	vm_object_collapse:
1330  *
1331  *	Collapse an object with the object backing it.
1332  *	Pages in the backing object are moved into the
1333  *	parent, and the backing object is deallocated.
1334  */
1335 void
1336 vm_object_collapse(vm_object_t object)
1337 {
1338 	while (TRUE) {
1339 		vm_object_t backing_object;
1340 
1341 		/*
1342 		 * Verify that the conditions are right for collapse:
1343 		 *
1344 		 * The object exists and the backing object exists.
1345 		 */
1346 		if (object == NULL)
1347 			break;
1348 
1349 		if ((backing_object = object->backing_object) == NULL)
1350 			break;
1351 
1352 		/*
1353 		 * we check the backing object first, because it is most likely
1354 		 * not collapsable.
1355 		 */
1356 		if (backing_object->handle != NULL ||
1357 		    (backing_object->type != OBJT_DEFAULT &&
1358 		     backing_object->type != OBJT_SWAP) ||
1359 		    (backing_object->flags & OBJ_DEAD) ||
1360 		    object->handle != NULL ||
1361 		    (object->type != OBJT_DEFAULT &&
1362 		     object->type != OBJT_SWAP) ||
1363 		    (object->flags & OBJ_DEAD)) {
1364 			break;
1365 		}
1366 
1367 		if (
1368 		    object->paging_in_progress != 0 ||
1369 		    backing_object->paging_in_progress != 0
1370 		) {
1371 			vm_object_qcollapse(object);
1372 			break;
1373 		}
1374 
1375 		/*
1376 		 * We know that we can either collapse the backing object (if
1377 		 * the parent is the only reference to it) or (perhaps) have
1378 		 * the parent bypass the object if the parent happens to shadow
1379 		 * all the resident pages in the entire backing object.
1380 		 *
1381 		 * This is ignoring pager-backed pages such as swap pages.
1382 		 * vm_object_backing_scan fails the shadowing test in this
1383 		 * case.
1384 		 */
1385 
1386 		if (backing_object->ref_count == 1) {
1387 			/*
1388 			 * If there is exactly one reference to the backing
1389 			 * object, we can collapse it into the parent.
1390 			 */
1391 			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1392 
1393 			/*
1394 			 * Move the pager from backing_object to object.
1395 			 */
1396 
1397 			if (backing_object->type == OBJT_SWAP) {
1398 				vm_object_pip_add(backing_object, 1);
1399 
1400 				/*
1401 				 * scrap the paging_offset junk and do a
1402 				 * discrete copy.  This also removes major
1403 				 * assumptions about how the swap-pager
1404 				 * works from where it doesn't belong.  The
1405 				 * new swapper is able to optimize the
1406 				 * destroy-source case.
1407 				 */
1408 
1409 				vm_object_pip_add(object, 1);
1410 				swap_pager_copy(
1411 				    backing_object,
1412 				    object,
1413 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1414 				vm_object_pip_wakeup(object);
1415 
1416 				vm_object_pip_wakeup(backing_object);
1417 			}
1418 			/*
1419 			 * Object now shadows whatever backing_object did.
1420 			 * Note that the reference to
1421 			 * backing_object->backing_object moves from within
1422 			 * backing_object to within object.
1423 			 */
1424 
1425 			LIST_REMOVE(object, shadow_list);
1426 			object->backing_object->shadow_count--;
1427 			object->backing_object->generation++;
1428 			if (backing_object->backing_object) {
1429 				LIST_REMOVE(backing_object, shadow_list);
1430 				backing_object->backing_object->shadow_count--;
1431 				backing_object->backing_object->generation++;
1432 			}
1433 			object->backing_object = backing_object->backing_object;
1434 			if (object->backing_object) {
1435 				LIST_INSERT_HEAD(
1436 				    &object->backing_object->shadow_head,
1437 				    object,
1438 				    shadow_list
1439 				);
1440 				object->backing_object->shadow_count++;
1441 				object->backing_object->generation++;
1442 			}
1443 
1444 			object->backing_object_offset +=
1445 			    backing_object->backing_object_offset;
1446 
1447 			/*
1448 			 * Discard backing_object.
1449 			 *
1450 			 * Since the backing object has no pages, no pager left,
1451 			 * and no object references within it, all that is
1452 			 * necessary is to dispose of it.
1453 			 */
1454 
1455 			KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
1456 			KASSERT(RB_EMPTY(&backing_object->rb_memq), ("backing_object %p somehow has left over pages during collapse!", backing_object));
1457 			crit_enter();
1458 			TAILQ_REMOVE(
1459 			    &vm_object_list,
1460 			    backing_object,
1461 			    object_list
1462 			);
1463 			vm_object_count--;
1464 			crit_exit();
1465 
1466 			zfree(obj_zone, backing_object);
1467 
1468 			object_collapses++;
1469 		} else {
1470 			vm_object_t new_backing_object;
1471 
1472 			/*
1473 			 * If we do not entirely shadow the backing object,
1474 			 * there is nothing we can do so we give up.
1475 			 */
1476 
1477 			if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1478 				break;
1479 			}
1480 
1481 			/*
1482 			 * Make the parent shadow the next object in the
1483 			 * chain.  Deallocating backing_object will not remove
1484 			 * it, since its reference count is at least 2.
1485 			 */
1486 
1487 			LIST_REMOVE(object, shadow_list);
1488 			backing_object->shadow_count--;
1489 			backing_object->generation++;
1490 
1491 			new_backing_object = backing_object->backing_object;
1492 			if ((object->backing_object = new_backing_object) != NULL) {
1493 				vm_object_reference(new_backing_object);
1494 				LIST_INSERT_HEAD(
1495 				    &new_backing_object->shadow_head,
1496 				    object,
1497 				    shadow_list
1498 				);
1499 				new_backing_object->shadow_count++;
1500 				new_backing_object->generation++;
1501 				object->backing_object_offset +=
1502 					backing_object->backing_object_offset;
1503 			}
1504 
1505 			/*
1506 			 * Drop the reference count on backing_object. Since
1507 			 * its ref_count was at least 2, it will not vanish;
1508 			 * so we don't need to call vm_object_deallocate, but
1509 			 * we do anyway.
1510 			 */
1511 			vm_object_deallocate(backing_object);
1512 			object_bypasses++;
1513 		}
1514 
1515 		/*
1516 		 * Try again with this object's new backing object.
1517 		 */
1518 	}
1519 }
1520 
1521 /*
1522  *	vm_object_page_remove: [internal]
1523  *
1524  *	Removes all physical pages in the specified
1525  *	object range from the object's list of pages.
1526  */
1527 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1528 
1529 void
1530 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1531 		      boolean_t clean_only)
1532 {
1533 	struct rb_vm_page_scan_info info;
1534 	int all;
1535 
1536 	/*
1537 	 * Degenerate cases and assertions
1538 	 */
1539 	if (object == NULL || object->resident_page_count == 0)
1540 		return;
1541 	KASSERT(object->type != OBJT_PHYS,
1542 		("attempt to remove pages from a physical object"));
1543 
1544 	/*
1545 	 * Indicate that paging is occuring on the object
1546 	 */
1547 	crit_enter();
1548 	vm_object_pip_add(object, 1);
1549 
1550 	/*
1551 	 * Figure out the actual removal range and whether we are removing
1552 	 * the entire contents of the object or not.  If removing the entire
1553 	 * contents, be sure to get all pages, even those that might be
1554 	 * beyond the end of the object.
1555 	 */
1556 	info.start_pindex = start;
1557 	if (end == 0)
1558 		info.end_pindex = (vm_pindex_t)-1;
1559 	else
1560 		info.end_pindex = end - 1;
1561 	info.limit = clean_only;
1562 	all = (start == 0 && info.end_pindex >= object->size - 1);
1563 
1564 	/*
1565 	 * Loop until we are sure we have gotten them all.
1566 	 */
1567 	do {
1568 		info.error = 0;
1569 		vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1570 					vm_object_page_remove_callback, &info);
1571 	} while (info.error);
1572 
1573 	/*
1574 	 * Cleanup
1575 	 */
1576 	vm_object_pip_wakeup(object);
1577 	crit_exit();
1578 }
1579 
1580 static int
1581 vm_object_page_remove_callback(vm_page_t p, void *data)
1582 {
1583 	struct rb_vm_page_scan_info *info = data;
1584 
1585 	/*
1586 	 * Wired pages cannot be destroyed, but they can be invalidated
1587 	 * and we do so if clean_only (limit) is not set.
1588 	 */
1589 	if (p->wire_count != 0) {
1590 		vm_page_protect(p, VM_PROT_NONE);
1591 		if (info->limit == 0)
1592 			p->valid = 0;
1593 		return(0);
1594 	}
1595 
1596 	/*
1597 	 * The busy flags are only cleared at
1598 	 * interrupt -- minimize the spl transitions
1599 	 */
1600 
1601 	if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1602 		info->error = 1;
1603 		return(0);
1604 	}
1605 
1606 	/*
1607 	 * limit is our clean_only flag.  If set and the page is dirty, do
1608 	 * not free it.
1609 	 */
1610 	if (info->limit && p->valid) {
1611 		vm_page_test_dirty(p);
1612 		if (p->valid & p->dirty)
1613 			return(0);
1614 	}
1615 
1616 	/*
1617 	 * Destroy the page
1618 	 */
1619 	vm_page_busy(p);
1620 	vm_page_protect(p, VM_PROT_NONE);
1621 	vm_page_free(p);
1622 	return(0);
1623 }
1624 
1625 /*
1626  *	Routine:	vm_object_coalesce
1627  *	Function:	Coalesces two objects backing up adjoining
1628  *			regions of memory into a single object.
1629  *
1630  *	returns TRUE if objects were combined.
1631  *
1632  *	NOTE:	Only works at the moment if the second object is NULL -
1633  *		if it's not, which object do we lock first?
1634  *
1635  *	Parameters:
1636  *		prev_object	First object to coalesce
1637  *		prev_offset	Offset into prev_object
1638  *		next_object	Second object into coalesce
1639  *		next_offset	Offset into next_object
1640  *
1641  *		prev_size	Size of reference to prev_object
1642  *		next_size	Size of reference to next_object
1643  *
1644  *	Conditions:
1645  *	The object must *not* be locked.
1646  */
1647 boolean_t
1648 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1649     vm_size_t prev_size, vm_size_t next_size)
1650 {
1651 	vm_pindex_t next_pindex;
1652 
1653 	if (prev_object == NULL) {
1654 		return (TRUE);
1655 	}
1656 
1657 	if (prev_object->type != OBJT_DEFAULT &&
1658 	    prev_object->type != OBJT_SWAP) {
1659 		return (FALSE);
1660 	}
1661 
1662 	/*
1663 	 * Try to collapse the object first
1664 	 */
1665 	vm_object_collapse(prev_object);
1666 
1667 	/*
1668 	 * Can't coalesce if: . more than one reference . paged out . shadows
1669 	 * another object . has a copy elsewhere (any of which mean that the
1670 	 * pages not mapped to prev_entry may be in use anyway)
1671 	 */
1672 
1673 	if (prev_object->backing_object != NULL) {
1674 		return (FALSE);
1675 	}
1676 
1677 	prev_size >>= PAGE_SHIFT;
1678 	next_size >>= PAGE_SHIFT;
1679 	next_pindex = prev_pindex + prev_size;
1680 
1681 	if ((prev_object->ref_count > 1) &&
1682 	    (prev_object->size != next_pindex)) {
1683 		return (FALSE);
1684 	}
1685 
1686 	/*
1687 	 * Remove any pages that may still be in the object from a previous
1688 	 * deallocation.
1689 	 */
1690 	if (next_pindex < prev_object->size) {
1691 		vm_object_page_remove(prev_object,
1692 				      next_pindex,
1693 				      next_pindex + next_size, FALSE);
1694 		if (prev_object->type == OBJT_SWAP)
1695 			swap_pager_freespace(prev_object,
1696 					     next_pindex, next_size);
1697 	}
1698 
1699 	/*
1700 	 * Extend the object if necessary.
1701 	 */
1702 	if (next_pindex + next_size > prev_object->size)
1703 		prev_object->size = next_pindex + next_size;
1704 
1705 	return (TRUE);
1706 }
1707 
1708 void
1709 vm_object_set_writeable_dirty(vm_object_t object)
1710 {
1711 	struct vnode *vp;
1712 
1713 	vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1714 	if (object->type == OBJT_VNODE &&
1715 	    (vp = (struct vnode *)object->handle) != NULL) {
1716 		if ((vp->v_flag & VOBJDIRTY) == 0) {
1717 			vsetflags(vp, VOBJDIRTY);
1718 		}
1719 	}
1720 }
1721 
1722 
1723 
1724 #include "opt_ddb.h"
1725 #ifdef DDB
1726 #include <sys/kernel.h>
1727 
1728 #include <sys/cons.h>
1729 
1730 #include <ddb/ddb.h>
1731 
1732 static int	_vm_object_in_map (vm_map_t map, vm_object_t object,
1733 				       vm_map_entry_t entry);
1734 static int	vm_object_in_map (vm_object_t object);
1735 
1736 static int
1737 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1738 {
1739 	vm_map_t tmpm;
1740 	vm_map_entry_t tmpe;
1741 	vm_object_t obj;
1742 	int entcount;
1743 
1744 	if (map == 0)
1745 		return 0;
1746 	if (entry == 0) {
1747 		tmpe = map->header.next;
1748 		entcount = map->nentries;
1749 		while (entcount-- && (tmpe != &map->header)) {
1750 			if( _vm_object_in_map(map, object, tmpe)) {
1751 				return 1;
1752 			}
1753 			tmpe = tmpe->next;
1754 		}
1755 		return (0);
1756 	}
1757 	switch(entry->maptype) {
1758 	case VM_MAPTYPE_SUBMAP:
1759 		tmpm = entry->object.sub_map;
1760 		tmpe = tmpm->header.next;
1761 		entcount = tmpm->nentries;
1762 		while (entcount-- && tmpe != &tmpm->header) {
1763 			if( _vm_object_in_map(tmpm, object, tmpe)) {
1764 				return 1;
1765 			}
1766 			tmpe = tmpe->next;
1767 		}
1768 		break;
1769 	case VM_MAPTYPE_NORMAL:
1770 	case VM_MAPTYPE_VPAGETABLE:
1771 		obj = entry->object.vm_object;
1772 		while (obj) {
1773 			if (obj == object)
1774 				return 1;
1775 			obj = obj->backing_object;
1776 		}
1777 		break;
1778 	default:
1779 		break;
1780 	}
1781 	return 0;
1782 }
1783 
1784 static int vm_object_in_map_callback(struct proc *p, void *data);
1785 
1786 struct vm_object_in_map_info {
1787 	vm_object_t object;
1788 	int rv;
1789 };
1790 
1791 static int
1792 vm_object_in_map(vm_object_t object)
1793 {
1794 	struct vm_object_in_map_info info;
1795 
1796 	info.rv = 0;
1797 	info.object = object;
1798 
1799 	allproc_scan(vm_object_in_map_callback, &info);
1800 	if (info.rv)
1801 		return 1;
1802 	if( _vm_object_in_map(&kernel_map, object, 0))
1803 		return 1;
1804 	if( _vm_object_in_map(&pager_map, object, 0))
1805 		return 1;
1806 	if( _vm_object_in_map(&buffer_map, object, 0))
1807 		return 1;
1808 	return 0;
1809 }
1810 
1811 static int
1812 vm_object_in_map_callback(struct proc *p, void *data)
1813 {
1814 	struct vm_object_in_map_info *info = data;
1815 
1816 	if (p->p_vmspace) {
1817 		if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1818 			info->rv = 1;
1819 			return -1;
1820 		}
1821 	}
1822 	return (0);
1823 }
1824 
1825 DB_SHOW_COMMAND(vmochk, vm_object_check)
1826 {
1827 	vm_object_t object;
1828 
1829 	/*
1830 	 * make sure that internal objs are in a map somewhere
1831 	 * and none have zero ref counts.
1832 	 */
1833 	for (object = TAILQ_FIRST(&vm_object_list);
1834 			object != NULL;
1835 			object = TAILQ_NEXT(object, object_list)) {
1836 		if (object->handle == NULL &&
1837 		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1838 			if (object->ref_count == 0) {
1839 				db_printf("vmochk: internal obj has zero ref count: %ld\n",
1840 					(long)object->size);
1841 			}
1842 			if (!vm_object_in_map(object)) {
1843 				db_printf(
1844 			"vmochk: internal obj is not in a map: "
1845 			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1846 				    object->ref_count, (u_long)object->size,
1847 				    (u_long)object->size,
1848 				    (void *)object->backing_object);
1849 			}
1850 		}
1851 	}
1852 }
1853 
1854 /*
1855  *	vm_object_print:	[ debug ]
1856  */
1857 DB_SHOW_COMMAND(object, vm_object_print_static)
1858 {
1859 	/* XXX convert args. */
1860 	vm_object_t object = (vm_object_t)addr;
1861 	boolean_t full = have_addr;
1862 
1863 	vm_page_t p;
1864 
1865 	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1866 #define	count	was_count
1867 
1868 	int count;
1869 
1870 	if (object == NULL)
1871 		return;
1872 
1873 	db_iprintf(
1874 	    "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1875 	    object, (int)object->type, (u_long)object->size,
1876 	    object->resident_page_count, object->ref_count, object->flags);
1877 	/*
1878 	 * XXX no %qd in kernel.  Truncate object->backing_object_offset.
1879 	 */
1880 	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1881 	    object->shadow_count,
1882 	    object->backing_object ? object->backing_object->ref_count : 0,
1883 	    object->backing_object, (long)object->backing_object_offset);
1884 
1885 	if (!full)
1886 		return;
1887 
1888 	db_indent += 2;
1889 	count = 0;
1890 	RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1891 		if (count == 0)
1892 			db_iprintf("memory:=");
1893 		else if (count == 6) {
1894 			db_printf("\n");
1895 			db_iprintf(" ...");
1896 			count = 0;
1897 		} else
1898 			db_printf(",");
1899 		count++;
1900 
1901 		db_printf("(off=0x%lx,page=0x%lx)",
1902 		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1903 	}
1904 	if (count != 0)
1905 		db_printf("\n");
1906 	db_indent -= 2;
1907 }
1908 
1909 /* XXX. */
1910 #undef count
1911 
1912 /* XXX need this non-static entry for calling from vm_map_print. */
1913 void
1914 vm_object_print(/* db_expr_t */ long addr,
1915 		boolean_t have_addr,
1916 		/* db_expr_t */ long count,
1917 		char *modif)
1918 {
1919 	vm_object_print_static(addr, have_addr, count, modif);
1920 }
1921 
1922 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1923 {
1924 	vm_object_t object;
1925 	int nl = 0;
1926 	int c;
1927 	for (object = TAILQ_FIRST(&vm_object_list);
1928 			object != NULL;
1929 			object = TAILQ_NEXT(object, object_list)) {
1930 		vm_pindex_t idx, fidx;
1931 		vm_pindex_t osize;
1932 		vm_paddr_t pa = -1, padiff;
1933 		int rcount;
1934 		vm_page_t m;
1935 
1936 		db_printf("new object: %p\n", (void *)object);
1937 		if ( nl > 18) {
1938 			c = cngetc();
1939 			if (c != ' ')
1940 				return;
1941 			nl = 0;
1942 		}
1943 		nl++;
1944 		rcount = 0;
1945 		fidx = 0;
1946 		osize = object->size;
1947 		if (osize > 128)
1948 			osize = 128;
1949 		for (idx = 0; idx < osize; idx++) {
1950 			m = vm_page_lookup(object, idx);
1951 			if (m == NULL) {
1952 				if (rcount) {
1953 					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1954 						(long)fidx, rcount, (long)pa);
1955 					if ( nl > 18) {
1956 						c = cngetc();
1957 						if (c != ' ')
1958 							return;
1959 						nl = 0;
1960 					}
1961 					nl++;
1962 					rcount = 0;
1963 				}
1964 				continue;
1965 			}
1966 
1967 
1968 			if (rcount &&
1969 				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1970 				++rcount;
1971 				continue;
1972 			}
1973 			if (rcount) {
1974 				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1975 				padiff >>= PAGE_SHIFT;
1976 				padiff &= PQ_L2_MASK;
1977 				if (padiff == 0) {
1978 					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1979 					++rcount;
1980 					continue;
1981 				}
1982 				db_printf(" index(%ld)run(%d)pa(0x%lx)",
1983 					(long)fidx, rcount, (long)pa);
1984 				db_printf("pd(%ld)\n", (long)padiff);
1985 				if ( nl > 18) {
1986 					c = cngetc();
1987 					if (c != ' ')
1988 						return;
1989 					nl = 0;
1990 				}
1991 				nl++;
1992 			}
1993 			fidx = idx;
1994 			pa = VM_PAGE_TO_PHYS(m);
1995 			rcount = 1;
1996 		}
1997 		if (rcount) {
1998 			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1999 				(long)fidx, rcount, (long)pa);
2000 			if ( nl > 18) {
2001 				c = cngetc();
2002 				if (c != ' ')
2003 					return;
2004 				nl = 0;
2005 			}
2006 			nl++;
2007 		}
2008 	}
2009 }
2010 #endif /* DDB */
2011