xref: /freebsd/sys/vm/vm_object.c (revision a465acda)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_object.c,v 1.19 1995/01/24 10:13:14 davidg Exp $
65  */
66 
67 /*
68  *	Virtual memory object module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/malloc.h>
76 #include <sys/vnode.h>
77 #include <sys/mount.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/vm_pager.h>
83 #include <vm/swap_pager.h>
84 #include <vm/vnode_pager.h>
85 
86 static void _vm_object_allocate(vm_size_t, vm_object_t);
87 static void vm_object_rcollapse(vm_object_t, vm_object_t);
88 
89 /*
90  *	Virtual memory objects maintain the actual data
91  *	associated with allocated virtual memory.  A given
92  *	page of memory exists within exactly one object.
93  *
94  *	An object is only deallocated when all "references"
95  *	are given up.  Only one "reference" to a given
96  *	region of an object should be writeable.
97  *
98  *	Associated with each object is a list of all resident
99  *	memory pages belonging to that object; this list is
100  *	maintained by the "vm_page" module, and locked by the object's
101  *	lock.
102  *
103  *	Each object also records a "pager" routine which is
104  *	used to retrieve (and store) pages to the proper backing
105  *	storage.  In addition, objects may be backed by other
106  *	objects from which they were virtual-copied.
107  *
108  *	The only items within the object structure which are
109  *	modified after time of creation are:
110  *		reference count		locked by object's lock
111  *		pager routine		locked by object's lock
112  *
113  */
114 
115 
116 struct vm_object kernel_object_store;
117 struct vm_object kmem_object_store;
118 
119 int vm_object_cache_max;
120 
121 #define	VM_OBJECT_HASH_COUNT	509
122 
123 struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
124 
125 long object_collapses = 0;
126 long object_bypasses = 0;
127 
128 static void
129 _vm_object_allocate(size, object)
130 	vm_size_t size;
131 	register vm_object_t object;
132 {
133 	bzero(object, sizeof *object);
134 	TAILQ_INIT(&object->memq);
135 	TAILQ_INIT(&object->reverse_shadow_head);
136 	vm_object_lock_init(object);
137 	object->ref_count = 1;
138 	object->resident_page_count = 0;
139 	object->size = size;
140 	object->flags = OBJ_INTERNAL;	/* vm_allocate_with_pager will reset */
141 	object->paging_in_progress = 0;
142 	object->copy = NULL;
143 	object->last_read = 0;
144 
145 	/*
146 	 * Object starts out read-write, with no pager.
147 	 */
148 
149 	object->pager = NULL;
150 	object->paging_offset = 0;
151 	object->shadow = NULL;
152 	object->shadow_offset = (vm_offset_t) 0;
153 
154 	simple_lock(&vm_object_list_lock);
155 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
156 	vm_object_count++;
157 	cnt.v_nzfod += atop(size);
158 	simple_unlock(&vm_object_list_lock);
159 }
160 
161 /*
162  *	vm_object_init:
163  *
164  *	Initialize the VM objects module.
165  */
166 void
167 vm_object_init(vm_offset_t nothing)
168 {
169 	register int i;
170 
171 	TAILQ_INIT(&vm_object_cached_list);
172 	TAILQ_INIT(&vm_object_list);
173 	vm_object_count = 0;
174 	simple_lock_init(&vm_cache_lock);
175 	simple_lock_init(&vm_object_list_lock);
176 	vm_object_cache_max = (cnt.v_page_count - 500) / 8;
177 
178 	for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
179 		TAILQ_INIT(&vm_object_hashtable[i]);
180 
181 	kernel_object = &kernel_object_store;
182 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
183 	    kernel_object);
184 
185 	kmem_object = &kmem_object_store;
186 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
187 	    kmem_object);
188 }
189 
190 /*
191  *	vm_object_allocate:
192  *
193  *	Returns a new object with the given size.
194  */
195 
196 vm_object_t
197 vm_object_allocate(size)
198 	vm_size_t size;
199 {
200 	register vm_object_t result;
201 
202 	result = (vm_object_t)
203 	    malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
204 
205 
206 	_vm_object_allocate(size, result);
207 
208 	return (result);
209 }
210 
211 
212 /*
213  *	vm_object_reference:
214  *
215  *	Gets another reference to the given object.
216  */
217 inline void
218 vm_object_reference(object)
219 	register vm_object_t object;
220 {
221 	if (object == NULL)
222 		return;
223 
224 	vm_object_lock(object);
225 	object->ref_count++;
226 	vm_object_unlock(object);
227 }
228 
229 /*
230  *	vm_object_deallocate:
231  *
232  *	Release a reference to the specified object,
233  *	gained either through a vm_object_allocate
234  *	or a vm_object_reference call.  When all references
235  *	are gone, storage associated with this object
236  *	may be relinquished.
237  *
238  *	No object may be locked.
239  */
240 void
241 vm_object_deallocate(object)
242 	vm_object_t object;
243 {
244 	vm_object_t temp;
245 
246 	while (object != NULL) {
247 
248 		/*
249 		 * The cache holds a reference (uncounted) to the object; we
250 		 * must lock it before removing the object.
251 		 */
252 
253 		vm_object_cache_lock();
254 
255 		/*
256 		 * Lose the reference
257 		 */
258 		vm_object_lock(object);
259 		if (--(object->ref_count) != 0) {
260 			if (object->ref_count == 1) {
261 				if (object->reverse_shadow_head.tqh_first) {
262 					++object->reverse_shadow_head.tqh_first->ref_count;
263 					if (vm_object_lock_try(object->reverse_shadow_head.tqh_first)) {
264 						vm_object_rcollapse(object->reverse_shadow_head.tqh_first, object);
265 						vm_object_unlock(object->reverse_shadow_head.tqh_first);
266 					}
267 					vm_object_deallocate(object->reverse_shadow_head.tqh_first);
268 				}
269 			}
270 			vm_object_unlock(object);
271 			/*
272 			 * If there are still references, then we are done.
273 			 */
274 			vm_object_cache_unlock();
275 			return;
276 		}
277 		/*
278 		 * See if this object can persist.  If so, enter it in the
279 		 * cache, then deactivate all of its pages.
280 		 */
281 
282 		if (object->flags & OBJ_CANPERSIST) {
283 
284 			TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
285 			    cached_list);
286 			vm_object_cached++;
287 			vm_object_cache_unlock();
288 
289 			vm_object_unlock(object);
290 
291 			vm_object_cache_trim();
292 			return;
293 		}
294 		/*
295 		 * Make sure no one can look us up now.
296 		 */
297 		object->flags |= OBJ_DEAD;
298 		vm_object_remove(object->pager);
299 		vm_object_cache_unlock();
300 
301 		temp = object->shadow;
302 		if (temp)
303 			TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list);
304 		vm_object_terminate(object);
305 		/* unlocks and deallocates object */
306 		object = temp;
307 	}
308 }
309 
310 /*
311  *	vm_object_terminate actually destroys the specified object, freeing
312  *	up all previously used resources.
313  *
314  *	The object must be locked.
315  */
316 void
317 vm_object_terminate(object)
318 	register vm_object_t object;
319 {
320 	register vm_page_t p, next;
321 	vm_object_t shadow_object;
322 	int s;
323 	struct vnode *vp = NULL;
324 
325 	/*
326 	 * Detach the object from its shadow if we are the shadow's copy.
327 	 */
328 	if ((shadow_object = object->shadow) != NULL) {
329 		vm_object_lock(shadow_object);
330 		if (shadow_object->copy == object)
331 			shadow_object->copy = NULL;
332 /*
333 		else if (shadow_object->copy != NULL)
334 			panic("vm_object_terminate: copy/shadow inconsistency");
335 */
336 		vm_object_unlock(shadow_object);
337 	}
338 	if (object->pager && (object->pager->pg_type == PG_VNODE)) {
339 		vn_pager_t vnp = object->pager->pg_data;
340 
341 		vp = vnp->vnp_vp;
342 		VOP_FSYNC(vp, NOCRED, MNT_WAIT, NULL);
343 		vinvalbuf(vp, 0, NOCRED, NULL, 0, 0);
344 	}
345 	/*
346 	 * Wait until the pageout daemon is through with the object.
347 	 */
348 
349 	s = splhigh();
350 	while (object->paging_in_progress) {
351 		vm_object_unlock(object);
352 		tsleep((caddr_t) object, PVM, "objtrm", 0);
353 		vm_object_lock(object);
354 	}
355 	splx(s);
356 
357 	/*
358 	 * While the paging system is locked, pull the object's pages off the
359 	 * active and inactive queues.  This keeps the pageout daemon from
360 	 * playing with them during vm_pager_deallocate.
361 	 *
362 	 * We can't free the pages yet, because the object's pager may have to
363 	 * write them out before deallocating the paging space.
364 	 */
365 
366 	for (p = object->memq.tqh_first; p; p = next) {
367 		VM_PAGE_CHECK(p);
368 		next = p->listq.tqe_next;
369 
370 		vm_page_lock_queues();
371 		if (p->flags & PG_CACHE)
372 			vm_page_free(p);
373 		else
374 			vm_page_unqueue(p);
375 		vm_page_unlock_queues();
376 		p = next;
377 	}
378 
379 	if (object->paging_in_progress != 0)
380 		panic("vm_object_deallocate: pageout in progress");
381 
382 	/*
383 	 * Clean and free the pages, as appropriate. All references to the
384 	 * object are gone, so we don't need to lock it.
385 	 */
386 
387 	if (((object->flags & OBJ_INTERNAL) == 0) &&
388 	    object->pager && (object->pager->pg_type != PG_DEVICE)) {
389 		(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
390 	}
391 	/*
392 	 * one last time -- get rid of buffers that might have been created
393 	 * for the vm_object_page_clean
394 	 */
395 	if (vp != NULL) {
396 		vm_object_unlock(object);
397 		vinvalbuf(vp, 0, NOCRED, NULL, 0, 0);
398 		vm_object_lock(object);
399 	}
400 	/*
401 	 * Now free the pages. For internal objects, this also removes them
402 	 * from paging queues.
403 	 */
404 	while ((p = object->memq.tqh_first) != NULL) {
405 		VM_PAGE_CHECK(p);
406 		vm_page_lock_queues();
407 		PAGE_WAKEUP(p);
408 		vm_page_free(p);
409 		cnt.v_pfree++;
410 		vm_page_unlock_queues();
411 	}
412 	vm_object_unlock(object);
413 
414 	/*
415 	 * Let the pager know object is dead.
416 	 */
417 	if (object->pager != NULL)
418 		vm_pager_deallocate(object->pager);
419 
420 	simple_lock(&vm_object_list_lock);
421 	TAILQ_REMOVE(&vm_object_list, object, object_list);
422 	vm_object_count--;
423 	simple_unlock(&vm_object_list_lock);
424 
425 	/*
426 	 * Free the space for the object.
427 	 */
428 	free((caddr_t) object, M_VMOBJ);
429 }
430 
431 /*
432  *	vm_object_page_clean
433  *
434  *	Clean all dirty pages in the specified range of object.
435  *	Leaves page on whatever queue it is currently on.
436  *
437  *	Odd semantics: if start == end, we clean everything.
438  *
439  *	The object must be locked.
440  */
441 #if 1
442 boolean_t
443 vm_object_page_clean(object, start, end, syncio, de_queue)
444 	register vm_object_t object;
445 	register vm_offset_t start;
446 	register vm_offset_t end;
447 	boolean_t syncio;
448 	boolean_t de_queue;
449 {
450 	register vm_page_t p, nextp;
451 	int size;
452 
453 	if (object->pager == NULL)
454 		return 1;
455 
456 	if (start != end) {
457 		start = trunc_page(start);
458 		end = round_page(end);
459 	}
460 	size = end - start;
461 
462 again:
463 	/*
464 	 * Wait until the pageout daemon is through with the object.
465 	 */
466 	while (object->paging_in_progress) {
467 		tsleep(object, PVM, "objpcw", 0);
468 	}
469 
470 	nextp = object->memq.tqh_first;
471 	while ((p = nextp) && ((start == end) || (size != 0))) {
472 		nextp = p->listq.tqe_next;
473 		if (start == end || (p->offset >= start && p->offset < end)) {
474 			if ((p->flags & PG_BUSY) || p->busy) {
475 				int s = splhigh();
476 
477 				p->flags |= PG_WANTED;
478 				tsleep(p, PVM, "objpcn", 0);
479 				splx(s);
480 				goto again;
481 			}
482 			size -= PAGE_SIZE;
483 
484 			vm_page_test_dirty(p);
485 
486 			if ((p->dirty & p->valid) != 0) {
487 				vm_pageout_clean(p, VM_PAGEOUT_FORCE);
488 				goto again;
489 			}
490 		}
491 	}
492 	wakeup((caddr_t) object);
493 	return 1;
494 }
495 #endif
496 /*
497  *	vm_object_page_clean
498  *
499  *	Clean all dirty pages in the specified range of object.
500  *	If syncio is TRUE, page cleaning is done synchronously.
501  *	If de_queue is TRUE, pages are removed from any paging queue
502  *	they were on, otherwise they are left on whatever queue they
503  *	were on before the cleaning operation began.
504  *
505  *	Odd semantics: if start == end, we clean everything.
506  *
507  *	The object must be locked.
508  *
509  *	Returns TRUE if all was well, FALSE if there was a pager error
510  *	somewhere.  We attempt to clean (and dequeue) all pages regardless
511  *	of where an error occurs.
512  */
513 #if 0
514 boolean_t
515 vm_object_page_clean(object, start, end, syncio, de_queue)
516 	register vm_object_t object;
517 	register vm_offset_t start;
518 	register vm_offset_t end;
519 	boolean_t syncio;
520 	boolean_t de_queue;
521 {
522 	register vm_page_t p;
523 	int onqueue;
524 	boolean_t noerror = TRUE;
525 
526 	if (object == NULL)
527 		return (TRUE);
528 
529 	/*
530 	 * If it is an internal object and there is no pager, attempt to
531 	 * allocate one.  Note that vm_object_collapse may relocate one from a
532 	 * collapsed object so we must recheck afterward.
533 	 */
534 	if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) {
535 		vm_object_collapse(object);
536 		if (object->pager == NULL) {
537 			vm_pager_t pager;
538 
539 			vm_object_unlock(object);
540 			pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0,
541 			    object->size, VM_PROT_ALL,
542 			    (vm_offset_t) 0);
543 			if (pager)
544 				vm_object_setpager(object, pager, 0, FALSE);
545 			vm_object_lock(object);
546 		}
547 	}
548 	if (object->pager == NULL)
549 		return (FALSE);
550 
551 again:
552 	/*
553 	 * Wait until the pageout daemon is through with the object.
554 	 */
555 	while (object->paging_in_progress) {
556 		vm_object_sleep((int) object, object, FALSE);
557 		vm_object_lock(object);
558 	}
559 	/*
560 	 * Loop through the object page list cleaning as necessary.
561 	 */
562 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
563 		onqueue = 0;
564 		if ((start == end || p->offset >= start && p->offset < end) &&
565 		    !(p->flags & PG_FICTITIOUS)) {
566 			vm_page_test_dirty(p);
567 			/*
568 			 * Remove the page from any paging queue. This needs
569 			 * to be done if either we have been explicitly asked
570 			 * to do so or it is about to be cleaned (see comment
571 			 * below).
572 			 */
573 			if (de_queue || (p->dirty & p->valid)) {
574 				vm_page_lock_queues();
575 				if (p->flags & PG_ACTIVE) {
576 					TAILQ_REMOVE(&vm_page_queue_active,
577 					    p, pageq);
578 					p->flags &= ~PG_ACTIVE;
579 					cnt.v_active_count--;
580 					onqueue = 1;
581 				} else if (p->flags & PG_INACTIVE) {
582 					TAILQ_REMOVE(&vm_page_queue_inactive,
583 					    p, pageq);
584 					p->flags &= ~PG_INACTIVE;
585 					cnt.v_inactive_count--;
586 					onqueue = -1;
587 				} else
588 					onqueue = 0;
589 				vm_page_unlock_queues();
590 			}
591 			/*
592 			 * To ensure the state of the page doesn't change
593 			 * during the clean operation we do two things. First
594 			 * we set the busy bit and write-protect all mappings
595 			 * to ensure that write accesses to the page block (in
596 			 * vm_fault).  Second, we remove the page from any
597 			 * paging queue to foil the pageout daemon
598 			 * (vm_pageout_scan).
599 			 */
600 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
601 			if (p->dirty & p->valid) {
602 				p->flags |= PG_BUSY;
603 				object->paging_in_progress++;
604 				vm_object_unlock(object);
605 				/*
606 				 * XXX if put fails we mark the page as clean
607 				 * to avoid an infinite loop. Will loose
608 				 * changes to the page.
609 				 */
610 				if (vm_pager_put(object->pager, p, syncio)) {
611 					printf("%s: pager_put error\n",
612 					    "vm_object_page_clean");
613 					p->dirty = 0;
614 					noerror = FALSE;
615 				}
616 				vm_object_lock(object);
617 				object->paging_in_progress--;
618 				if (!de_queue && onqueue) {
619 					vm_page_lock_queues();
620 					if (onqueue > 0)
621 						vm_page_activate(p);
622 					else
623 						vm_page_deactivate(p);
624 					vm_page_unlock_queues();
625 				}
626 				PAGE_WAKEUP(p);
627 				goto again;
628 			}
629 		}
630 	}
631 	return (noerror);
632 }
633 #endif
634 
635 /*
636  *	vm_object_deactivate_pages
637  *
638  *	Deactivate all pages in the specified object.  (Keep its pages
639  *	in memory even though it is no longer referenced.)
640  *
641  *	The object must be locked.
642  */
643 void
644 vm_object_deactivate_pages(object)
645 	register vm_object_t object;
646 {
647 	register vm_page_t p, next;
648 
649 	for (p = object->memq.tqh_first; p != NULL; p = next) {
650 		next = p->listq.tqe_next;
651 		vm_page_lock_queues();
652 		vm_page_deactivate(p);
653 		vm_page_unlock_queues();
654 	}
655 }
656 
657 /*
658  *	Trim the object cache to size.
659  */
660 void
661 vm_object_cache_trim()
662 {
663 	register vm_object_t object;
664 
665 	vm_object_cache_lock();
666 	while (vm_object_cached > vm_object_cache_max) {
667 		object = vm_object_cached_list.tqh_first;
668 		vm_object_cache_unlock();
669 
670 		if (object != vm_object_lookup(object->pager))
671 			panic("vm_object_cache_trim: I'm sooo confused.");
672 
673 		pager_cache(object, FALSE);
674 
675 		vm_object_cache_lock();
676 	}
677 	vm_object_cache_unlock();
678 }
679 
680 
681 /*
682  *	vm_object_pmap_copy:
683  *
684  *	Makes all physical pages in the specified
685  *	object range copy-on-write.  No writeable
686  *	references to these pages should remain.
687  *
688  *	The object must *not* be locked.
689  */
690 void
691 vm_object_pmap_copy(object, start, end)
692 	register vm_object_t object;
693 	register vm_offset_t start;
694 	register vm_offset_t end;
695 {
696 	register vm_page_t p;
697 
698 	if (object == NULL)
699 		return;
700 
701 	vm_object_lock(object);
702 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
703 		if ((start <= p->offset) && (p->offset < end)) {
704 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
705 			p->flags |= PG_COPYONWRITE;
706 		}
707 	}
708 	vm_object_unlock(object);
709 }
710 
711 /*
712  *	vm_object_pmap_remove:
713  *
714  *	Removes all physical pages in the specified
715  *	object range from all physical maps.
716  *
717  *	The object must *not* be locked.
718  */
719 void
720 vm_object_pmap_remove(object, start, end)
721 	register vm_object_t object;
722 	register vm_offset_t start;
723 	register vm_offset_t end;
724 {
725 	register vm_page_t p;
726 	int s;
727 
728 	if (object == NULL)
729 		return;
730 	++object->paging_in_progress;
731 
732 	vm_object_lock(object);
733 again:
734 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
735 		if ((start <= p->offset) && (p->offset < end)) {
736 			s = splhigh();
737 			if ((p->flags & PG_BUSY) || p->busy) {
738 				p->flags |= PG_WANTED;
739 				tsleep((caddr_t) p, PVM, "vmopmr", 0);
740 				splx(s);
741 				goto again;
742 			}
743 			splx(s);
744 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
745 		}
746 	}
747 	vm_object_unlock(object);
748 	--object->paging_in_progress;
749 	if (object->paging_in_progress == 0)
750 		wakeup((caddr_t) object);
751 }
752 
753 /*
754  *	vm_object_copy:
755  *
756  *	Create a new object which is a copy of an existing
757  *	object, and mark all of the pages in the existing
758  *	object 'copy-on-write'.  The new object has one reference.
759  *	Returns the new object.
760  *
761  *	May defer the copy until later if the object is not backed
762  *	up by a non-default pager.
763  */
764 void
765 vm_object_copy(src_object, src_offset, size,
766     dst_object, dst_offset, src_needs_copy)
767 	register vm_object_t src_object;
768 	vm_offset_t src_offset;
769 	vm_size_t size;
770 	vm_object_t *dst_object;/* OUT */
771 	vm_offset_t *dst_offset;/* OUT */
772 	boolean_t *src_needs_copy;	/* OUT */
773 {
774 	register vm_object_t new_copy;
775 	register vm_object_t old_copy;
776 	vm_offset_t new_start, new_end;
777 
778 	register vm_page_t p;
779 
780 	if (src_object == NULL) {
781 		/*
782 		 * Nothing to copy
783 		 */
784 		*dst_object = NULL;
785 		*dst_offset = 0;
786 		*src_needs_copy = FALSE;
787 		return;
788 	}
789 	/*
790 	 * If the object's pager is null_pager or the default pager, we don't
791 	 * have to make a copy of it.  Instead, we set the needs copy flag and
792 	 * make a shadow later.
793 	 */
794 
795 	vm_object_lock(src_object);
796 
797 	/*
798 	 * Try to collapse the object before copying it.
799 	 */
800 
801 	vm_object_collapse(src_object);
802 
803 	if (src_object->pager == NULL ||
804 	    src_object->pager->pg_type == PG_SWAP ||
805 	    (src_object->flags & OBJ_INTERNAL)) {
806 
807 		/*
808 		 * Make another reference to the object
809 		 */
810 		src_object->ref_count++;
811 
812 		/*
813 		 * Mark all of the pages copy-on-write.
814 		 */
815 		for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
816 			if (src_offset <= p->offset &&
817 			    p->offset < src_offset + size)
818 				p->flags |= PG_COPYONWRITE;
819 		vm_object_unlock(src_object);
820 
821 		*dst_object = src_object;
822 		*dst_offset = src_offset;
823 
824 		/*
825 		 * Must make a shadow when write is desired
826 		 */
827 		*src_needs_copy = TRUE;
828 		return;
829 	}
830 	/*
831 	 * If the object has a pager, the pager wants to see all of the
832 	 * changes.  We need a copy-object for the changed pages.
833 	 *
834 	 * If there is a copy-object, and it is empty, no changes have been made
835 	 * to the object since the copy-object was made.  We can use the same
836 	 * copy- object.
837 	 */
838 
839 Retry1:
840 	old_copy = src_object->copy;
841 	if (old_copy != NULL) {
842 		/*
843 		 * Try to get the locks (out of order)
844 		 */
845 		if (!vm_object_lock_try(old_copy)) {
846 			vm_object_unlock(src_object);
847 
848 			/* should spin a bit here... */
849 			tsleep((caddr_t) old_copy, PVM, "cpylck", 1);
850 			vm_object_lock(src_object);
851 			goto Retry1;
852 		}
853 		if (old_copy->resident_page_count == 0 &&
854 		    old_copy->pager == NULL) {
855 			/*
856 			 * Return another reference to the existing
857 			 * copy-object.
858 			 */
859 			old_copy->ref_count++;
860 			vm_object_unlock(old_copy);
861 			vm_object_unlock(src_object);
862 			*dst_object = old_copy;
863 			*dst_offset = src_offset;
864 			*src_needs_copy = FALSE;
865 			return;
866 		}
867 		vm_object_unlock(old_copy);
868 	}
869 	vm_object_unlock(src_object);
870 
871 	/*
872 	 * If the object has a pager, the pager wants to see all of the
873 	 * changes.  We must make a copy-object and put the changed pages
874 	 * there.
875 	 *
876 	 * The copy-object is always made large enough to completely shadow the
877 	 * original object, since it may have several users who want to shadow
878 	 * the original object at different points.
879 	 */
880 
881 	new_copy = vm_object_allocate(src_object->size);
882 
883 Retry2:
884 	vm_object_lock(src_object);
885 	/*
886 	 * Copy object may have changed while we were unlocked
887 	 */
888 	old_copy = src_object->copy;
889 	if (old_copy != NULL) {
890 		/*
891 		 * Try to get the locks (out of order)
892 		 */
893 		if (!vm_object_lock_try(old_copy)) {
894 			vm_object_unlock(src_object);
895 			tsleep((caddr_t) old_copy, PVM, "cpylck", 1);
896 			goto Retry2;
897 		}
898 		/*
899 		 * Consistency check
900 		 */
901 		if (old_copy->shadow != src_object ||
902 		    old_copy->shadow_offset != (vm_offset_t) 0)
903 			panic("vm_object_copy: copy/shadow inconsistency");
904 
905 		/*
906 		 * Make the old copy-object shadow the new one. It will
907 		 * receive no more pages from the original object.
908 		 */
909 
910 		src_object->ref_count--;	/* remove ref. from old_copy */
911 		if (old_copy->shadow)
912 			TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list);
913 		old_copy->shadow = new_copy;
914 		TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list);
915 		new_copy->ref_count++;	/* locking not needed - we have the
916 					 * only pointer */
917 		vm_object_unlock(old_copy);	/* done with old_copy */
918 	}
919 	new_start = (vm_offset_t) 0;	/* always shadow original at 0 */
920 	new_end = (vm_offset_t) new_copy->size;	/* for the whole object */
921 
922 	/*
923 	 * Point the new copy at the existing object.
924 	 */
925 
926 	new_copy->shadow = src_object;
927 	TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list);
928 	new_copy->shadow_offset = new_start;
929 	src_object->ref_count++;
930 	src_object->copy = new_copy;
931 
932 	/*
933 	 * Mark all the affected pages of the existing object copy-on-write.
934 	 */
935 	for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
936 		if ((new_start <= p->offset) && (p->offset < new_end))
937 			p->flags |= PG_COPYONWRITE;
938 
939 	vm_object_unlock(src_object);
940 
941 	*dst_object = new_copy;
942 	*dst_offset = src_offset - new_start;
943 	*src_needs_copy = FALSE;
944 }
945 
946 /*
947  *	vm_object_shadow:
948  *
949  *	Create a new object which is backed by the
950  *	specified existing object range.  The source
951  *	object reference is deallocated.
952  *
953  *	The new object and offset into that object
954  *	are returned in the source parameters.
955  */
956 
957 void
958 vm_object_shadow(object, offset, length)
959 	vm_object_t *object;	/* IN/OUT */
960 	vm_offset_t *offset;	/* IN/OUT */
961 	vm_size_t length;
962 {
963 	register vm_object_t source;
964 	register vm_object_t result;
965 
966 	source = *object;
967 
968 	/*
969 	 * Allocate a new object with the given length
970 	 */
971 
972 	if ((result = vm_object_allocate(length)) == NULL)
973 		panic("vm_object_shadow: no object for shadowing");
974 
975 	/*
976 	 * The new object shadows the source object, adding a reference to it.
977 	 * Our caller changes his reference to point to the new object,
978 	 * removing a reference to the source object.  Net result: no change
979 	 * of reference count.
980 	 */
981 	result->shadow = source;
982 	if (source)
983 		TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list);
984 
985 	/*
986 	 * Store the offset into the source object, and fix up the offset into
987 	 * the new object.
988 	 */
989 
990 	result->shadow_offset = *offset;
991 
992 	/*
993 	 * Return the new things
994 	 */
995 
996 	*offset = 0;
997 	*object = result;
998 }
999 
1000 /*
1001  *	Set the specified object's pager to the specified pager.
1002  */
1003 
1004 void
1005 vm_object_setpager(object, pager, paging_offset,
1006     read_only)
1007 	vm_object_t object;
1008 	vm_pager_t pager;
1009 	vm_offset_t paging_offset;
1010 	boolean_t read_only;
1011 {
1012 	vm_object_lock(object);	/* XXX ? */
1013 	if (object->pager && object->pager != pager) {
1014 		panic("!!!pager already allocated!!!\n");
1015 	}
1016 	object->pager = pager;
1017 	object->paging_offset = paging_offset;
1018 	vm_object_unlock(object);	/* XXX ? */
1019 }
1020 
1021 /*
1022  *	vm_object_hash hashes the pager/id pair.
1023  */
1024 
1025 #define vm_object_hash(pager) \
1026 	(((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT)
1027 
1028 /*
1029  *	vm_object_lookup looks in the object cache for an object with the
1030  *	specified pager and paging id.
1031  */
1032 
1033 vm_object_t
1034 vm_object_lookup(pager)
1035 	vm_pager_t pager;
1036 {
1037 	register vm_object_hash_entry_t entry;
1038 	vm_object_t object;
1039 
1040 	cnt.v_lookups++;
1041 	vm_object_cache_lock();
1042 
1043 	for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first;
1044 	    entry != NULL;
1045 	    entry = entry->hash_links.tqe_next) {
1046 		object = entry->object;
1047 		if (object->pager == pager) {
1048 			vm_object_lock(object);
1049 			if (object->ref_count == 0) {
1050 				TAILQ_REMOVE(&vm_object_cached_list, object,
1051 				    cached_list);
1052 				vm_object_cached--;
1053 			}
1054 			object->ref_count++;
1055 			vm_object_unlock(object);
1056 			vm_object_cache_unlock();
1057 			cnt.v_hits++;
1058 			return (object);
1059 		}
1060 	}
1061 
1062 	vm_object_cache_unlock();
1063 	return (NULL);
1064 }
1065 
1066 /*
1067  *	vm_object_enter enters the specified object/pager/id into
1068  *	the hash table.
1069  */
1070 
1071 void
1072 vm_object_enter(object, pager)
1073 	vm_object_t object;
1074 	vm_pager_t pager;
1075 {
1076 	struct vm_object_hash_head *bucket;
1077 	register vm_object_hash_entry_t entry;
1078 
1079 	/*
1080 	 * We don't cache null objects, and we can't cache objects with the
1081 	 * null pager.
1082 	 */
1083 
1084 	if (object == NULL)
1085 		return;
1086 	if (pager == NULL)
1087 		return;
1088 
1089 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1090 	entry = (vm_object_hash_entry_t)
1091 	    malloc((u_long) sizeof *entry, M_VMOBJHASH, M_WAITOK);
1092 	entry->object = object;
1093 	object->flags |= OBJ_CANPERSIST;
1094 
1095 	vm_object_cache_lock();
1096 	TAILQ_INSERT_TAIL(bucket, entry, hash_links);
1097 	vm_object_cache_unlock();
1098 }
1099 
1100 /*
1101  *	vm_object_remove:
1102  *
1103  *	Remove the pager from the hash table.
1104  *	Note:  This assumes that the object cache
1105  *	is locked.  XXX this should be fixed
1106  *	by reorganizing vm_object_deallocate.
1107  */
1108 void
1109 vm_object_remove(pager)
1110 	register vm_pager_t pager;
1111 {
1112 	struct vm_object_hash_head *bucket;
1113 	register vm_object_hash_entry_t entry;
1114 	register vm_object_t object;
1115 
1116 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1117 
1118 	for (entry = bucket->tqh_first;
1119 	    entry != NULL;
1120 	    entry = entry->hash_links.tqe_next) {
1121 		object = entry->object;
1122 		if (object->pager == pager) {
1123 			TAILQ_REMOVE(bucket, entry, hash_links);
1124 			free((caddr_t) entry, M_VMOBJHASH);
1125 			break;
1126 		}
1127 	}
1128 }
1129 
1130 static void
1131 vm_object_rcollapse(object, sobject)
1132 	register vm_object_t object, sobject;
1133 {
1134 	register vm_object_t backing_object;
1135 	register vm_offset_t backing_offset, new_offset;
1136 	register vm_page_t p, pp;
1137 	register vm_size_t size;
1138 	int s;
1139 
1140 	if (!object)
1141 		return;
1142 	backing_object = object->shadow;
1143 	if (backing_object != sobject) {
1144 		printf("backing obj != sobject!!!\n");
1145 		return;
1146 	}
1147 	if (!backing_object)
1148 		return;
1149 	if ((backing_object->flags & OBJ_INTERNAL) == 0)
1150 		return;
1151 	if (backing_object->shadow != NULL &&
1152 	    backing_object->shadow->copy == backing_object)
1153 		return;
1154 	if (backing_object->ref_count != 1)
1155 		return;
1156 
1157 	object->ref_count += 2;
1158 	backing_object->ref_count += 2;
1159 
1160 	backing_offset = object->shadow_offset;
1161 	size = object->size;
1162 
1163 again:
1164 	s = splbio();
1165 	/* XXX what about object->paging_in_progress? */
1166 	while (backing_object->paging_in_progress) {
1167 		tsleep(backing_object, PVM, "rcolpp", 0);
1168 	}
1169 	splx(s);
1170 
1171 	p = backing_object->memq.tqh_first;
1172 	while (p) {
1173 		vm_page_t next;
1174 
1175 		next = p->listq.tqe_next;
1176 
1177 		if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
1178 		    !p->valid || p->hold_count || p->wire_count || p->busy || p->bmapped) {
1179 			p = next;
1180 			continue;
1181 		}
1182 		pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1183 		new_offset = (p->offset - backing_offset);
1184 		if (p->offset < backing_offset ||
1185 		    new_offset >= size) {
1186 			if (backing_object->pager)
1187 				swap_pager_freespace(backing_object->pager,
1188 				    backing_object->paging_offset + p->offset, PAGE_SIZE);
1189 			vm_page_lock_queues();
1190 			vm_page_free(p);
1191 			vm_page_unlock_queues();
1192 		} else {
1193 			pp = vm_page_lookup(object, new_offset);
1194 			if (pp != NULL ||
1195 			    (object->pager &&
1196 			    vm_pager_has_page(object->pager, object->paging_offset + new_offset))) {
1197 				if (backing_object->pager)
1198 					swap_pager_freespace(backing_object->pager,
1199 					    backing_object->paging_offset + p->offset, PAGE_SIZE);
1200 				vm_page_lock_queues();
1201 				vm_page_free(p);
1202 				vm_page_unlock_queues();
1203 			} else {
1204 				if (!backing_object->pager ||
1205 				    !vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset))
1206 					vm_page_rename(p, object, new_offset);
1207 			}
1208 		}
1209 		p = next;
1210 	}
1211 	backing_object->ref_count -= 2;
1212 	object->ref_count -= 2;
1213 }
1214 
1215 /*
1216  * this version of collapse allows the operation to occur earlier and
1217  * when paging_in_progress is true for an object...  This is not a complete
1218  * operation, but should plug 99.9% of the rest of the leaks.
1219  */
1220 static void
1221 vm_object_qcollapse(object)
1222 	register vm_object_t object;
1223 {
1224 	register vm_object_t backing_object;
1225 	register vm_offset_t backing_offset, new_offset;
1226 	register vm_page_t p, pp;
1227 	register vm_size_t size;
1228 
1229 	backing_object = object->shadow;
1230 	if (!backing_object)
1231 		return;
1232 	if ((backing_object->flags & OBJ_INTERNAL) == 0)
1233 		return;
1234 	if (backing_object->shadow != NULL &&
1235 	    backing_object->shadow->copy == backing_object)
1236 		return;
1237 	if (backing_object->ref_count != 1)
1238 		return;
1239 
1240 	backing_object->ref_count += 2;
1241 
1242 	backing_offset = object->shadow_offset;
1243 	size = object->size;
1244 	p = backing_object->memq.tqh_first;
1245 	while (p) {
1246 		vm_page_t next;
1247 
1248 		next = p->listq.tqe_next;
1249 		if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
1250 		    !p->valid || p->hold_count || p->wire_count || p->busy || p->bmapped) {
1251 			p = next;
1252 			continue;
1253 		}
1254 		pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1255 		new_offset = (p->offset - backing_offset);
1256 		if (p->offset < backing_offset ||
1257 		    new_offset >= size) {
1258 			if (backing_object->pager)
1259 				swap_pager_freespace(backing_object->pager,
1260 				    backing_object->paging_offset + p->offset, PAGE_SIZE);
1261 			vm_page_lock_queues();
1262 			vm_page_free(p);
1263 			vm_page_unlock_queues();
1264 		} else {
1265 			pp = vm_page_lookup(object, new_offset);
1266 			if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1267 				    object->paging_offset + new_offset))) {
1268 				if (backing_object->pager)
1269 					swap_pager_freespace(backing_object->pager,
1270 					    backing_object->paging_offset + p->offset, PAGE_SIZE);
1271 				vm_page_lock_queues();
1272 				vm_page_free(p);
1273 				vm_page_unlock_queues();
1274 			} else {
1275 				if (!backing_object->pager ||
1276 				    !vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset))
1277 					vm_page_rename(p, object, new_offset);
1278 			}
1279 		}
1280 		p = next;
1281 	}
1282 	backing_object->ref_count -= 2;
1283 }
1284 
1285 boolean_t vm_object_collapse_allowed = TRUE;
1286 
1287 /*
1288  *	vm_object_collapse:
1289  *
1290  *	Collapse an object with the object backing it.
1291  *	Pages in the backing object are moved into the
1292  *	parent, and the backing object is deallocated.
1293  *
1294  *	Requires that the object be locked and the page
1295  *	queues be unlocked.
1296  *
1297  *	This routine has significant changes by John S. Dyson
1298  *	to fix some swap memory leaks.  18 Dec 93
1299  *
1300  */
1301 void
1302 vm_object_collapse(object)
1303 	register vm_object_t object;
1304 
1305 {
1306 	register vm_object_t backing_object;
1307 	register vm_offset_t backing_offset;
1308 	register vm_size_t size;
1309 	register vm_offset_t new_offset;
1310 	register vm_page_t p, pp;
1311 
1312 	if (!vm_object_collapse_allowed)
1313 		return;
1314 
1315 	while (TRUE) {
1316 		/*
1317 		 * Verify that the conditions are right for collapse:
1318 		 *
1319 		 * The object exists and no pages in it are currently being paged
1320 		 * out.
1321 		 */
1322 		if (object == NULL)
1323 			return;
1324 
1325 		/*
1326 		 * Make sure there is a backing object.
1327 		 */
1328 		if ((backing_object = object->shadow) == NULL)
1329 			return;
1330 
1331 		if (object->paging_in_progress != 0) {
1332 			if (backing_object) {
1333 				if (vm_object_lock_try(backing_object)) {
1334 					vm_object_qcollapse(object);
1335 					vm_object_unlock(backing_object);
1336 				}
1337 			}
1338 			return;
1339 		}
1340 
1341 		vm_object_lock(backing_object);
1342 		/*
1343 		 * ... The backing object is not read_only, and no pages in
1344 		 * the backing object are currently being paged out. The
1345 		 * backing object is internal.
1346 		 */
1347 
1348 		if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
1349 		    backing_object->paging_in_progress != 0) {
1350 			vm_object_unlock(backing_object);
1351 			vm_object_qcollapse(object);
1352 			return;
1353 		}
1354 		/*
1355 		 * The backing object can't be a copy-object: the
1356 		 * shadow_offset for the copy-object must stay as 0.
1357 		 * Furthermore (for the 'we have all the pages' case), if we
1358 		 * bypass backing_object and just shadow the next object in
1359 		 * the chain, old pages from that object would then have to be
1360 		 * copied BOTH into the (former) backing_object and into the
1361 		 * parent object.
1362 		 */
1363 		if (backing_object->shadow != NULL &&
1364 		    backing_object->shadow->copy == backing_object) {
1365 			vm_object_unlock(backing_object);
1366 			return;
1367 		}
1368 		/*
1369 		 * we can deal only with the swap pager
1370 		 */
1371 		if ((object->pager &&
1372 			object->pager->pg_type != PG_SWAP) ||
1373 		    (backing_object->pager &&
1374 			backing_object->pager->pg_type != PG_SWAP)) {
1375 			vm_object_unlock(backing_object);
1376 			return;
1377 		}
1378 		/*
1379 		 * We know that we can either collapse the backing object (if
1380 		 * the parent is the only reference to it) or (perhaps) remove
1381 		 * the parent's reference to it.
1382 		 */
1383 
1384 		backing_offset = object->shadow_offset;
1385 		size = object->size;
1386 
1387 		/*
1388 		 * If there is exactly one reference to the backing object, we
1389 		 * can collapse it into the parent.
1390 		 */
1391 
1392 		if (backing_object->ref_count == 1) {
1393 
1394 			/*
1395 			 * We can collapse the backing object.
1396 			 *
1397 			 * Move all in-memory pages from backing_object to the
1398 			 * parent.  Pages that have been paged out will be
1399 			 * overwritten by any of the parent's pages that
1400 			 * shadow them.
1401 			 */
1402 
1403 			while ((p = backing_object->memq.tqh_first) != 0) {
1404 
1405 				new_offset = (p->offset - backing_offset);
1406 
1407 				/*
1408 				 * If the parent has a page here, or if this
1409 				 * page falls outside the parent, dispose of
1410 				 * it.
1411 				 *
1412 				 * Otherwise, move it as planned.
1413 				 */
1414 
1415 				if (p->offset < backing_offset ||
1416 				    new_offset >= size) {
1417 					vm_page_lock_queues();
1418 					pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1419 					PAGE_WAKEUP(p);
1420 					vm_page_free(p);
1421 					vm_page_unlock_queues();
1422 				} else {
1423 					pp = vm_page_lookup(object, new_offset);
1424 					if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1425 					    object->paging_offset + new_offset))) {
1426 						vm_page_lock_queues();
1427 						pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1428 						PAGE_WAKEUP(p);
1429 						vm_page_free(p);
1430 						vm_page_unlock_queues();
1431 					} else {
1432 						vm_page_rename(p, object, new_offset);
1433 					}
1434 				}
1435 			}
1436 
1437 			/*
1438 			 * Move the pager from backing_object to object.
1439 			 */
1440 
1441 			if (backing_object->pager) {
1442 				backing_object->paging_in_progress++;
1443 				if (object->pager) {
1444 					vm_pager_t bopager;
1445 
1446 					object->paging_in_progress++;
1447 					/*
1448 					 * copy shadow object pages into ours
1449 					 * and destroy unneeded pages in
1450 					 * shadow object.
1451 					 */
1452 					bopager = backing_object->pager;
1453 					backing_object->pager = NULL;
1454 					vm_object_remove(backing_object->pager);
1455 					swap_pager_copy(
1456 					    bopager, backing_object->paging_offset,
1457 					    object->pager, object->paging_offset,
1458 					    object->shadow_offset);
1459 					object->paging_in_progress--;
1460 					if (object->paging_in_progress == 0)
1461 						wakeup((caddr_t) object);
1462 				} else {
1463 					object->paging_in_progress++;
1464 					/*
1465 					 * grab the shadow objects pager
1466 					 */
1467 					object->pager = backing_object->pager;
1468 					object->paging_offset = backing_object->paging_offset + backing_offset;
1469 					vm_object_remove(backing_object->pager);
1470 					backing_object->pager = NULL;
1471 					/*
1472 					 * free unnecessary blocks
1473 					 */
1474 					swap_pager_freespace(object->pager, 0, object->paging_offset);
1475 					object->paging_in_progress--;
1476 					if (object->paging_in_progress == 0)
1477 						wakeup((caddr_t) object);
1478 				}
1479 				backing_object->paging_in_progress--;
1480 				if (backing_object->paging_in_progress == 0)
1481 					wakeup((caddr_t) backing_object);
1482 			}
1483 			/*
1484 			 * Object now shadows whatever backing_object did.
1485 			 * Note that the reference to backing_object->shadow
1486 			 * moves from within backing_object to within object.
1487 			 */
1488 
1489 			TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object,
1490 			    reverse_shadow_list);
1491 			if (backing_object->shadow)
1492 				TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head,
1493 				    backing_object, reverse_shadow_list);
1494 			object->shadow = backing_object->shadow;
1495 			if (object->shadow)
1496 				TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head,
1497 				    object, reverse_shadow_list);
1498 
1499 			object->shadow_offset += backing_object->shadow_offset;
1500 			if (object->shadow != NULL &&
1501 			    object->shadow->copy != NULL) {
1502 				panic("vm_object_collapse: we collapsed a copy-object!");
1503 			}
1504 			/*
1505 			 * Discard backing_object.
1506 			 *
1507 			 * Since the backing object has no pages, no pager left,
1508 			 * and no object references within it, all that is
1509 			 * necessary is to dispose of it.
1510 			 */
1511 
1512 			vm_object_unlock(backing_object);
1513 
1514 			simple_lock(&vm_object_list_lock);
1515 			TAILQ_REMOVE(&vm_object_list, backing_object,
1516 			    object_list);
1517 			vm_object_count--;
1518 			simple_unlock(&vm_object_list_lock);
1519 
1520 			free((caddr_t) backing_object, M_VMOBJ);
1521 
1522 			object_collapses++;
1523 		} else {
1524 			/*
1525 			 * If all of the pages in the backing object are
1526 			 * shadowed by the parent object, the parent object no
1527 			 * longer has to shadow the backing object; it can
1528 			 * shadow the next one in the chain.
1529 			 *
1530 			 * The backing object must not be paged out - we'd have
1531 			 * to check all of the paged-out pages, as well.
1532 			 */
1533 
1534 			if (backing_object->pager != NULL) {
1535 				vm_object_unlock(backing_object);
1536 				return;
1537 			}
1538 			/*
1539 			 * Should have a check for a 'small' number of pages
1540 			 * here.
1541 			 */
1542 
1543 			for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) {
1544 				new_offset = (p->offset - backing_offset);
1545 
1546 				/*
1547 				 * If the parent has a page here, or if this
1548 				 * page falls outside the parent, keep going.
1549 				 *
1550 				 * Otherwise, the backing_object must be left in
1551 				 * the chain.
1552 				 */
1553 
1554 				if (p->offset >= backing_offset &&
1555 				    new_offset <= size &&
1556 				    ((pp = vm_page_lookup(object, new_offset)) == NULL ||
1557 					!pp->valid) &&
1558 				    (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset + new_offset))) {
1559 					/*
1560 					 * Page still needed. Can't go any
1561 					 * further.
1562 					 */
1563 					vm_object_unlock(backing_object);
1564 					return;
1565 				}
1566 			}
1567 
1568 			/*
1569 			 * Make the parent shadow the next object in the
1570 			 * chain.  Deallocating backing_object will not remove
1571 			 * it, since its reference count is at least 2.
1572 			 */
1573 
1574 			TAILQ_REMOVE(&object->shadow->reverse_shadow_head,
1575 			    object, reverse_shadow_list);
1576 			vm_object_reference(object->shadow = backing_object->shadow);
1577 			if (object->shadow)
1578 				TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head,
1579 				    object, reverse_shadow_list);
1580 			object->shadow_offset += backing_object->shadow_offset;
1581 
1582 			/*
1583 			 * Backing object might have had a copy pointer to us.
1584 			 * If it did, clear it.
1585 			 */
1586 			if (backing_object->copy == object) {
1587 				backing_object->copy = NULL;
1588 			}
1589 			/*
1590 			 * Drop the reference count on backing_object. Since
1591 			 * its ref_count was at least 2, it will not vanish;
1592 			 * so we don't need to call vm_object_deallocate.
1593 			 */
1594 			if (backing_object->ref_count == 1)
1595 				printf("should have called obj deallocate\n");
1596 			backing_object->ref_count--;
1597 			vm_object_unlock(backing_object);
1598 
1599 			object_bypasses++;
1600 
1601 		}
1602 
1603 		/*
1604 		 * Try again with this object's new backing object.
1605 		 */
1606 	}
1607 }
1608 
1609 /*
1610  *	vm_object_page_remove: [internal]
1611  *
1612  *	Removes all physical pages in the specified
1613  *	object range from the object's list of pages.
1614  *
1615  *	The object must be locked.
1616  */
1617 void
1618 vm_object_page_remove(object, start, end)
1619 	register vm_object_t object;
1620 	register vm_offset_t start;
1621 	register vm_offset_t end;
1622 {
1623 	register vm_page_t p, next;
1624 	vm_offset_t size;
1625 	int s;
1626 
1627 	if (object == NULL)
1628 		return;
1629 
1630 	object->paging_in_progress++;
1631 	start = trunc_page(start);
1632 	end = round_page(end);
1633 again:
1634 	size = end - start;
1635 	if (size > 4 * PAGE_SIZE || size >= object->size / 4) {
1636 		for (p = object->memq.tqh_first; p != NULL; p = next) {
1637 			next = p->listq.tqe_next;
1638 			if ((start <= p->offset) && (p->offset < end)) {
1639 				s = splhigh();
1640 				if (p->bmapped) {
1641 					splx(s);
1642 					continue;
1643 				}
1644 				if ((p->flags & PG_BUSY) || p->busy) {
1645 					p->flags |= PG_WANTED;
1646 					tsleep((caddr_t) p, PVM, "vmopar", 0);
1647 					splx(s);
1648 					goto again;
1649 				}
1650 				splx(s);
1651 				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1652 				vm_page_lock_queues();
1653 				PAGE_WAKEUP(p);
1654 				vm_page_free(p);
1655 				vm_page_unlock_queues();
1656 			}
1657 		}
1658 	} else {
1659 		while (size > 0) {
1660 			while ((p = vm_page_lookup(object, start)) != 0) {
1661 				s = splhigh();
1662 				if (p->bmapped) {
1663 					splx(s);
1664 					break;
1665 				}
1666 				if ((p->flags & PG_BUSY) || p->busy) {
1667 					p->flags |= PG_WANTED;
1668 					tsleep((caddr_t) p, PVM, "vmopar", 0);
1669 					splx(s);
1670 					goto again;
1671 				}
1672 				splx(s);
1673 				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1674 				vm_page_lock_queues();
1675 				PAGE_WAKEUP(p);
1676 				vm_page_free(p);
1677 				vm_page_unlock_queues();
1678 			}
1679 			start += PAGE_SIZE;
1680 			size -= PAGE_SIZE;
1681 		}
1682 	}
1683 	--object->paging_in_progress;
1684 	if (object->paging_in_progress == 0)
1685 		wakeup((caddr_t) object);
1686 }
1687 
1688 /*
1689  *	Routine:	vm_object_coalesce
1690  *	Function:	Coalesces two objects backing up adjoining
1691  *			regions of memory into a single object.
1692  *
1693  *	returns TRUE if objects were combined.
1694  *
1695  *	NOTE:	Only works at the moment if the second object is NULL -
1696  *		if it's not, which object do we lock first?
1697  *
1698  *	Parameters:
1699  *		prev_object	First object to coalesce
1700  *		prev_offset	Offset into prev_object
1701  *		next_object	Second object into coalesce
1702  *		next_offset	Offset into next_object
1703  *
1704  *		prev_size	Size of reference to prev_object
1705  *		next_size	Size of reference to next_object
1706  *
1707  *	Conditions:
1708  *	The object must *not* be locked.
1709  */
1710 boolean_t
1711 vm_object_coalesce(prev_object, next_object,
1712     prev_offset, next_offset,
1713     prev_size, next_size)
1714 	register vm_object_t prev_object;
1715 	vm_object_t next_object;
1716 	vm_offset_t prev_offset, next_offset;
1717 	vm_size_t prev_size, next_size;
1718 {
1719 	vm_size_t newsize;
1720 
1721 	if (next_object != NULL) {
1722 		return (FALSE);
1723 	}
1724 	if (prev_object == NULL) {
1725 		return (TRUE);
1726 	}
1727 	vm_object_lock(prev_object);
1728 
1729 	/*
1730 	 * Try to collapse the object first
1731 	 */
1732 	vm_object_collapse(prev_object);
1733 
1734 	/*
1735 	 * Can't coalesce if: . more than one reference . paged out . shadows
1736 	 * another object . has a copy elsewhere (any of which mean that the
1737 	 * pages not mapped to prev_entry may be in use anyway)
1738 	 */
1739 
1740 	if (prev_object->ref_count > 1 ||
1741 	    prev_object->pager != NULL ||
1742 	    prev_object->shadow != NULL ||
1743 	    prev_object->copy != NULL) {
1744 		vm_object_unlock(prev_object);
1745 		return (FALSE);
1746 	}
1747 	/*
1748 	 * Remove any pages that may still be in the object from a previous
1749 	 * deallocation.
1750 	 */
1751 
1752 	vm_object_page_remove(prev_object,
1753 	    prev_offset + prev_size,
1754 	    prev_offset + prev_size + next_size);
1755 
1756 	/*
1757 	 * Extend the object if necessary.
1758 	 */
1759 	newsize = prev_offset + prev_size + next_size;
1760 	if (newsize > prev_object->size)
1761 		prev_object->size = newsize;
1762 
1763 	vm_object_unlock(prev_object);
1764 	return (TRUE);
1765 }
1766 
1767 /*
1768  * returns page after looking up in shadow chain
1769  */
1770 
1771 vm_page_t
1772 vm_object_page_lookup(object, offset)
1773 	vm_object_t object;
1774 	vm_offset_t offset;
1775 {
1776 	vm_page_t m;
1777 
1778 	if (!(m = vm_page_lookup(object, offset))) {
1779 		if (!object->shadow)
1780 			return 0;
1781 		else
1782 			return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1783 	}
1784 	return m;
1785 }
1786 
1787 #define DEBUG
1788 #if defined(DEBUG) || defined(DDB)
1789 /*
1790  *	vm_object_print:	[ debug ]
1791  */
1792 void
1793 vm_object_print(object, full)
1794 	vm_object_t object;
1795 	boolean_t full;
1796 {
1797 	register vm_page_t p;
1798 	extern indent;
1799 
1800 	register int count;
1801 
1802 	if (object == NULL)
1803 		return;
1804 
1805 	iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1806 	    (int) object, (int) object->size,
1807 	    object->resident_page_count, object->ref_count);
1808 	printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1809 	    (int) object->pager, (int) object->paging_offset,
1810 	    (int) object->shadow, (int) object->shadow_offset);
1811 	printf("cache: next=%p, prev=%p\n",
1812 	    object->cached_list.tqe_next, object->cached_list.tqe_prev);
1813 
1814 	if (!full)
1815 		return;
1816 
1817 	indent += 2;
1818 	count = 0;
1819 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
1820 		if (count == 0)
1821 			iprintf("memory:=");
1822 		else if (count == 6) {
1823 			printf("\n");
1824 			iprintf(" ...");
1825 			count = 0;
1826 		} else
1827 			printf(",");
1828 		count++;
1829 
1830 		printf("(off=0x%lx,page=0x%lx)",
1831 		    (u_long) p->offset, (u_long) VM_PAGE_TO_PHYS(p));
1832 	}
1833 	if (count != 0)
1834 		printf("\n");
1835 	indent -= 2;
1836 }
1837 #endif				/* defined(DEBUG) || defined(DDB) */
1838