xref: /freebsd/sys/vm/vm_object.c (revision 8a129cae)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_object.c,v 1.5 1994/08/18 22:36:06 wollman Exp $
65  */
66 
67 /*
68  *	Virtual memory object module.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>		/* for curproc, pageproc */
75 #include <sys/malloc.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_pageout.h>
80 
81 static void _vm_object_allocate(vm_size_t, vm_object_t);
82 
83 /*
84  *	Virtual memory objects maintain the actual data
85  *	associated with allocated virtual memory.  A given
86  *	page of memory exists within exactly one object.
87  *
88  *	An object is only deallocated when all "references"
89  *	are given up.  Only one "reference" to a given
90  *	region of an object should be writeable.
91  *
92  *	Associated with each object is a list of all resident
93  *	memory pages belonging to that object; this list is
94  *	maintained by the "vm_page" module, and locked by the object's
95  *	lock.
96  *
97  *	Each object also records a "pager" routine which is
98  *	used to retrieve (and store) pages to the proper backing
99  *	storage.  In addition, objects may be backed by other
100  *	objects from which they were virtual-copied.
101  *
102  *	The only items within the object structure which are
103  *	modified after time of creation are:
104  *		reference count		locked by object's lock
105  *		pager routine		locked by object's lock
106  *
107  */
108 
109 
110 struct vm_object	kernel_object_store;
111 struct vm_object	kmem_object_store;
112 
113 extern int vm_cache_max;
114 #define	VM_OBJECT_HASH_COUNT	157
115 
116 struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
117 
118 long	object_collapses = 0;
119 long	object_bypasses  = 0;
120 
121 static void
122 _vm_object_allocate(size, object)
123 	vm_size_t		size;
124 	register vm_object_t	object;
125 {
126 	bzero(object, sizeof *object);
127 	TAILQ_INIT(&object->memq);
128 	vm_object_lock_init(object);
129 	object->ref_count = 1;
130 	object->resident_page_count = 0;
131 	object->size = size;
132 	object->flags = OBJ_INTERNAL;	/* vm_allocate_with_pager will reset */
133 	object->paging_in_progress = 0;
134 	object->copy = NULL;
135 
136 	/*
137 	 *	Object starts out read-write, with no pager.
138 	 */
139 
140 	object->pager = NULL;
141 	object->paging_offset = 0;
142 	object->shadow = NULL;
143 	object->shadow_offset = (vm_offset_t) 0;
144 
145 	simple_lock(&vm_object_list_lock);
146 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
147 	vm_object_count++;
148 	cnt.v_nzfod += atop(size);
149 	simple_unlock(&vm_object_list_lock);
150 }
151 
152 /*
153  *	vm_object_init:
154  *
155  *	Initialize the VM objects module.
156  */
157 void
158 vm_object_init(vm_offset_t nothing)
159 {
160 	register int	i;
161 
162 	TAILQ_INIT(&vm_object_cached_list);
163 	TAILQ_INIT(&vm_object_list);
164 	vm_object_count = 0;
165 	simple_lock_init(&vm_cache_lock);
166 	simple_lock_init(&vm_object_list_lock);
167 
168 	for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
169 		TAILQ_INIT(&vm_object_hashtable[i]);
170 
171 	kernel_object = &kernel_object_store;
172 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
173 			kernel_object);
174 
175 	kmem_object = &kmem_object_store;
176 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
177 			kmem_object);
178 }
179 
180 /*
181  *	vm_object_allocate:
182  *
183  *	Returns a new object with the given size.
184  */
185 
186 vm_object_t
187 vm_object_allocate(size)
188 	vm_size_t	size;
189 {
190 	register vm_object_t	result;
191 	int s;
192 
193 	result = (vm_object_t)
194 		malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
195 
196 
197 	_vm_object_allocate(size, result);
198 
199 	return(result);
200 }
201 
202 
203 /*
204  *	vm_object_reference:
205  *
206  *	Gets another reference to the given object.
207  */
208 inline void
209 vm_object_reference(object)
210 	register vm_object_t	object;
211 {
212 	if (object == NULL)
213 		return;
214 
215 	vm_object_lock(object);
216 	object->ref_count++;
217 	vm_object_unlock(object);
218 }
219 
220 /*
221  *	vm_object_deallocate:
222  *
223  *	Release a reference to the specified object,
224  *	gained either through a vm_object_allocate
225  *	or a vm_object_reference call.  When all references
226  *	are gone, storage associated with this object
227  *	may be relinquished.
228  *
229  *	No object may be locked.
230  */
231 void
232 vm_object_deallocate(object)
233 	vm_object_t	object;
234 {
235 	vm_object_t	temp;
236 
237 	while (object != NULL) {
238 
239 		/*
240 		 *	The cache holds a reference (uncounted) to
241 		 *	the object; we must lock it before removing
242 		 *	the object.
243 		 */
244 
245 		vm_object_cache_lock();
246 
247 		/*
248 		 *	Lose the reference
249 		 */
250 		vm_object_lock(object);
251 		if (--(object->ref_count) != 0) {
252 
253 			vm_object_unlock(object);
254 			/*
255 			 *	If there are still references, then
256 			 *	we are done.
257 			 */
258 			vm_object_cache_unlock();
259 			return;
260 		}
261 
262 		/*
263 		 *	See if this object can persist.  If so, enter
264 		 *	it in the cache, then deactivate all of its
265 		 *	pages.
266 		 */
267 
268 		if (object->flags & OBJ_CANPERSIST) {
269 
270 			TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
271 				cached_list);
272 			vm_object_cached++;
273 			vm_object_cache_unlock();
274 
275 /*
276  * this code segment was removed because it kills performance with
277  * large -- repetively used binaries.  The functionality now resides
278  * in the pageout daemon
279  *			vm_object_deactivate_pages(object);
280  */
281 			vm_object_unlock(object);
282 
283 			vm_object_cache_trim();
284 			return;
285 		}
286 
287 		/*
288 		 *	Make sure no one can look us up now.
289 		 */
290 		vm_object_remove(object->pager);
291 		vm_object_cache_unlock();
292 
293 		temp = object->shadow;
294 		vm_object_terminate(object);
295 			/* unlocks and deallocates object */
296 		object = temp;
297 	}
298 }
299 
300 /*
301  *	vm_object_terminate actually destroys the specified object, freeing
302  *	up all previously used resources.
303  *
304  *	The object must be locked.
305  */
306 void
307 vm_object_terminate(object)
308 	register vm_object_t	object;
309 {
310 	register vm_page_t	p;
311 	vm_object_t		shadow_object;
312 	int s;
313 
314 	/*
315 	 *	Detach the object from its shadow if we are the shadow's
316 	 *	copy.
317 	 */
318 	if ((shadow_object = object->shadow) != NULL) {
319 		vm_object_lock(shadow_object);
320 		if (shadow_object->copy == object)
321 			shadow_object->copy = NULL;
322 /*
323 		else if (shadow_object->copy != NULL)
324 			panic("vm_object_terminate: copy/shadow inconsistency");
325 */
326 		vm_object_unlock(shadow_object);
327 	}
328 
329 	/*
330 	 *	Wait until the pageout daemon is through
331 	 *	with the object.
332 	 */
333 
334 	while (object->paging_in_progress) {
335 		vm_object_sleep((int)object, object, FALSE);
336 		vm_object_lock(object);
337 	}
338 
339 	/*
340 	 *	While the paging system is locked,
341 	 *	pull the object's pages off the active
342 	 *	and inactive queues.  This keeps the
343 	 *	pageout daemon from playing with them
344 	 *	during vm_pager_deallocate.
345 	 *
346 	 *	We can't free the pages yet, because the
347 	 *	object's pager may have to write them out
348 	 *	before deallocating the paging space.
349 	 */
350 
351 	for( p = object->memq.tqh_first; p; p=p->listq.tqe_next) {
352 		VM_PAGE_CHECK(p);
353 
354 		vm_page_lock_queues();
355 		s = splhigh();
356 		if (p->flags & PG_ACTIVE) {
357 			TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
358 			p->flags &= ~PG_ACTIVE;
359 			cnt.v_active_count--;
360 		}
361 
362 		if (p->flags & PG_INACTIVE) {
363 			TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq);
364 			p->flags &= ~PG_INACTIVE;
365 			cnt.v_inactive_count--;
366 		}
367 		splx(s);
368 		vm_page_unlock_queues();
369 	}
370 
371 	vm_object_unlock(object);
372 
373 	if (object->paging_in_progress != 0)
374 		panic("vm_object_deallocate: pageout in progress");
375 
376 	/*
377 	 *	Clean and free the pages, as appropriate.
378 	 *	All references to the object are gone,
379 	 *	so we don't need to lock it.
380 	 */
381 
382 	if ((object->flags & OBJ_INTERNAL) == 0) {
383 		vm_object_lock(object);
384 		(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
385 		vm_object_unlock(object);
386 	}
387 
388 	/*
389 	 * Now free the pages.
390 	 * For internal objects, this also removes them from paging queues.
391 	 */
392 	while ((p = object->memq.tqh_first) != NULL) {
393 		VM_PAGE_CHECK(p);
394 		vm_page_lock_queues();
395 		vm_page_free(p);
396 		cnt.v_pfree++;
397 		vm_page_unlock_queues();
398 	}
399 
400 	/*
401 	 *	Let the pager know object is dead.
402 	 */
403 
404 	if (object->pager != NULL)
405 		vm_pager_deallocate(object->pager);
406 
407 
408 	simple_lock(&vm_object_list_lock);
409 	TAILQ_REMOVE(&vm_object_list, object, object_list);
410 	vm_object_count--;
411 	simple_unlock(&vm_object_list_lock);
412 
413 	/*
414 	 *	Free the space for the object.
415 	 */
416 
417 	free((caddr_t)object, M_VMOBJ);
418 }
419 
420 /*
421  *	vm_object_page_clean
422  *
423  *	Clean all dirty pages in the specified range of object.
424  *	Leaves page on whatever queue it is currently on.
425  *
426  *	Odd semantics: if start == end, we clean everything.
427  *
428  *	The object must be locked.
429  */
430 #if 1
431 boolean_t
432 vm_object_page_clean(object, start, end, syncio, de_queue)
433 	register vm_object_t	object;
434 	register vm_offset_t	start;
435 	register vm_offset_t	end;
436 	boolean_t		syncio;
437 	boolean_t		de_queue;
438 {
439 	register vm_page_t	p, nextp;
440 	int s;
441 	int size;
442 
443 	if (object->pager == NULL)
444 		return 1;
445 
446 	if (start != end) {
447 		start = trunc_page(start);
448 		end = round_page(end);
449 	}
450 	size = end - start;
451 
452 again:
453 	/*
454 	 * Wait until the pageout daemon is through with the object.
455 	 */
456 	while (object->paging_in_progress) {
457 		vm_object_sleep((int)object, object, FALSE);
458 	}
459 
460 	nextp = object->memq.tqh_first;
461 	while ( (p = nextp) && ((start == end) || (size != 0) ) ) {
462 		nextp = p->listq.tqe_next;
463 		if (start == end || (p->offset >= start && p->offset < end)) {
464 			if (p->flags & PG_BUSY)
465 				continue;
466 
467 			size -= PAGE_SIZE;
468 
469 			if ((p->flags & PG_CLEAN)
470 				 && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
471 				p->flags &= ~PG_CLEAN;
472 
473 			if ((p->flags & PG_CLEAN) == 0) {
474 				vm_pageout_clean(p,VM_PAGEOUT_FORCE);
475 				goto again;
476 			}
477 		}
478 	}
479 	wakeup((caddr_t)object);
480 	return 1;
481 }
482 #endif
483 /*
484  *	vm_object_page_clean
485  *
486  *	Clean all dirty pages in the specified range of object.
487  *	If syncio is TRUE, page cleaning is done synchronously.
488  *	If de_queue is TRUE, pages are removed from any paging queue
489  *	they were on, otherwise they are left on whatever queue they
490  *	were on before the cleaning operation began.
491  *
492  *	Odd semantics: if start == end, we clean everything.
493  *
494  *	The object must be locked.
495  *
496  *	Returns TRUE if all was well, FALSE if there was a pager error
497  *	somewhere.  We attempt to clean (and dequeue) all pages regardless
498  *	of where an error occurs.
499  */
500 #if 0
501 boolean_t
502 vm_object_page_clean(object, start, end, syncio, de_queue)
503 	register vm_object_t	object;
504 	register vm_offset_t	start;
505 	register vm_offset_t	end;
506 	boolean_t		syncio;
507 	boolean_t		de_queue;
508 {
509 	register vm_page_t	p;
510 	int onqueue;
511 	boolean_t noerror = TRUE;
512 
513 	if (object == NULL)
514 		return (TRUE);
515 
516 	/*
517 	 * If it is an internal object and there is no pager, attempt to
518 	 * allocate one.  Note that vm_object_collapse may relocate one
519 	 * from a collapsed object so we must recheck afterward.
520 	 */
521 	if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) {
522 		vm_object_collapse(object);
523 		if (object->pager == NULL) {
524 			vm_pager_t pager;
525 
526 			vm_object_unlock(object);
527 			pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
528 						  object->size, VM_PROT_ALL,
529 						  (vm_offset_t)0);
530 			if (pager)
531 				vm_object_setpager(object, pager, 0, FALSE);
532 			vm_object_lock(object);
533 		}
534 	}
535 	if (object->pager == NULL)
536 		return (FALSE);
537 
538 again:
539 	/*
540 	 * Wait until the pageout daemon is through with the object.
541 	 */
542 	while (object->paging_in_progress) {
543 		vm_object_sleep((int)object, object, FALSE);
544 		vm_object_lock(object);
545 	}
546 	/*
547 	 * Loop through the object page list cleaning as necessary.
548 	 */
549 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
550 		onqueue = 0;
551 		if ((start == end || p->offset >= start && p->offset < end) &&
552 		    !(p->flags & PG_FICTITIOUS)) {
553 			if ((p->flags & PG_CLEAN) &&
554 			    pmap_is_modified(VM_PAGE_TO_PHYS(p)))
555 				p->flags &= ~PG_CLEAN;
556 			/*
557 			 * Remove the page from any paging queue.
558 			 * This needs to be done if either we have been
559 			 * explicitly asked to do so or it is about to
560 			 * be cleaned (see comment below).
561 			 */
562 			if (de_queue || !(p->flags & PG_CLEAN)) {
563 				vm_page_lock_queues();
564 				if (p->flags & PG_ACTIVE) {
565 					TAILQ_REMOVE(&vm_page_queue_active,
566 						     p, pageq);
567 					p->flags &= ~PG_ACTIVE;
568 					cnt.v_active_count--;
569 					onqueue = 1;
570 				} else if (p->flags & PG_INACTIVE) {
571 					TAILQ_REMOVE(&vm_page_queue_inactive,
572 						     p, pageq);
573 					p->flags &= ~PG_INACTIVE;
574 					cnt.v_inactive_count--;
575 					onqueue = -1;
576 				} else
577 					onqueue = 0;
578 				vm_page_unlock_queues();
579 			}
580 			/*
581 			 * To ensure the state of the page doesn't change
582 			 * during the clean operation we do two things.
583 			 * First we set the busy bit and write-protect all
584 			 * mappings to ensure that write accesses to the
585 			 * page block (in vm_fault).  Second, we remove
586 			 * the page from any paging queue to foil the
587 			 * pageout daemon (vm_pageout_scan).
588 			 */
589 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
590 			if (!(p->flags & PG_CLEAN)) {
591 				p->flags |= PG_BUSY;
592 				object->paging_in_progress++;
593 				vm_object_unlock(object);
594 				/*
595 				 * XXX if put fails we mark the page as
596 				 * clean to avoid an infinite loop.
597 				 * Will loose changes to the page.
598 				 */
599 				if (vm_pager_put(object->pager, p, syncio)) {
600 					printf("%s: pager_put error\n",
601 					       "vm_object_page_clean");
602 					p->flags |= PG_CLEAN;
603 					noerror = FALSE;
604 				}
605 				vm_object_lock(object);
606 				object->paging_in_progress--;
607 				if (!de_queue && onqueue) {
608 					vm_page_lock_queues();
609 					if (onqueue > 0)
610 						vm_page_activate(p);
611 					else
612 						vm_page_deactivate(p);
613 					vm_page_unlock_queues();
614 				}
615 				PAGE_WAKEUP(p);
616 				goto again;
617 			}
618 		}
619 	}
620 	return (noerror);
621 }
622 #endif
623 
624 /*
625  *	vm_object_deactivate_pages
626  *
627  *	Deactivate all pages in the specified object.  (Keep its pages
628  *	in memory even though it is no longer referenced.)
629  *
630  *	The object must be locked.
631  */
632 void
633 vm_object_deactivate_pages(object)
634 	register vm_object_t	object;
635 {
636 	register vm_page_t	p, next;
637 
638 	for (p = object->memq.tqh_first; p != NULL; p = next) {
639 		next = p->listq.tqe_next;
640 		vm_page_lock_queues();
641 		vm_page_deactivate(p);
642 		vm_page_unlock_queues();
643 	}
644 }
645 
646 /*
647  *	Trim the object cache to size.
648  */
649 void
650 vm_object_cache_trim()
651 {
652 	register vm_object_t	object;
653 
654 	vm_object_cache_lock();
655 	while (vm_object_cached > vm_cache_max) {
656 		object = vm_object_cached_list.tqh_first;
657 		vm_object_cache_unlock();
658 
659 		if (object != vm_object_lookup(object->pager))
660 			panic("vm_object_deactivate: I'm sooo confused.");
661 
662 		pager_cache(object, FALSE);
663 
664 		vm_object_cache_lock();
665 	}
666 	vm_object_cache_unlock();
667 }
668 
669 
670 /*
671  *	vm_object_pmap_copy:
672  *
673  *	Makes all physical pages in the specified
674  *	object range copy-on-write.  No writeable
675  *	references to these pages should remain.
676  *
677  *	The object must *not* be locked.
678  */
679 void vm_object_pmap_copy(object, start, end)
680 	register vm_object_t	object;
681 	register vm_offset_t	start;
682 	register vm_offset_t	end;
683 {
684 	register vm_page_t	p;
685 
686 	if (object == NULL)
687 		return;
688 
689 	vm_object_lock(object);
690 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
691 		if ((start <= p->offset) && (p->offset < end)) {
692 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
693 			p->flags |= PG_COPYONWRITE;
694 		}
695 	}
696 	vm_object_unlock(object);
697 }
698 
699 /*
700  *	vm_object_pmap_remove:
701  *
702  *	Removes all physical pages in the specified
703  *	object range from all physical maps.
704  *
705  *	The object must *not* be locked.
706  */
707 void
708 vm_object_pmap_remove(object, start, end)
709 	register vm_object_t	object;
710 	register vm_offset_t	start;
711 	register vm_offset_t	end;
712 {
713 	register vm_page_t	p;
714 	int s;
715 
716 	if (object == NULL)
717 		return;
718 
719 	vm_object_lock(object);
720 again:
721 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
722 		if ((start <= p->offset) && (p->offset < end)) {
723 			s = splhigh();
724 			if (p->flags & PG_BUSY) {
725 				p->flags |= PG_WANTED;
726 				tsleep((caddr_t) p, PVM, "vmopmr", 0);
727 				splx(s);
728 				goto again;
729 			}
730 			splx(s);
731 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
732 			if ((p->flags & PG_CLEAN) == 0)
733 				p->flags |= PG_LAUNDRY;
734 		}
735 	}
736 	vm_object_unlock(object);
737 }
738 
739 /*
740  *	vm_object_copy:
741  *
742  *	Create a new object which is a copy of an existing
743  *	object, and mark all of the pages in the existing
744  *	object 'copy-on-write'.  The new object has one reference.
745  *	Returns the new object.
746  *
747  *	May defer the copy until later if the object is not backed
748  *	up by a non-default pager.
749  */
750 void vm_object_copy(src_object, src_offset, size,
751 		    dst_object, dst_offset, src_needs_copy)
752 	register vm_object_t	src_object;
753 	vm_offset_t		src_offset;
754 	vm_size_t		size;
755 	vm_object_t		*dst_object;	/* OUT */
756 	vm_offset_t		*dst_offset;	/* OUT */
757 	boolean_t		*src_needs_copy;	/* OUT */
758 {
759 	register vm_object_t	new_copy;
760 	register vm_object_t	old_copy;
761 	vm_offset_t		new_start, new_end;
762 
763 	register vm_page_t	p;
764 
765 	if (src_object == NULL) {
766 		/*
767 		 *	Nothing to copy
768 		 */
769 		*dst_object = NULL;
770 		*dst_offset = 0;
771 		*src_needs_copy = FALSE;
772 		return;
773 	}
774 
775 
776 	/*
777 	 *	If the object's pager is null_pager or the
778 	 *	default pager, we don't have to make a copy
779 	 *	of it.  Instead, we set the needs copy flag and
780 	 *	make a shadow later.
781 	 */
782 
783 	vm_object_lock(src_object);
784 
785 	/*
786 	 *	Try to collapse the object before copying it.
787 	 */
788 
789 	vm_object_collapse(src_object);
790 
791 	if (src_object->pager == NULL ||
792 	    src_object->pager->pg_type == PG_SWAP ||
793 	    (src_object->flags & OBJ_INTERNAL)) {
794 
795 		/*
796 		 *	Make another reference to the object
797 		 */
798 		src_object->ref_count++;
799 
800 		/*
801 		 *	Mark all of the pages copy-on-write.
802 		 */
803 		for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
804 			if (src_offset <= p->offset &&
805 			    p->offset < src_offset + size)
806 				p->flags |= PG_COPYONWRITE;
807 		vm_object_unlock(src_object);
808 
809 		*dst_object = src_object;
810 		*dst_offset = src_offset;
811 
812 		/*
813 		 *	Must make a shadow when write is desired
814 		 */
815 		*src_needs_copy = TRUE;
816 		return;
817 	}
818 
819 
820 	/*
821 	 *	If the object has a pager, the pager wants to
822 	 *	see all of the changes.  We need a copy-object
823 	 *	for the changed pages.
824 	 *
825 	 *	If there is a copy-object, and it is empty,
826 	 *	no changes have been made to the object since the
827 	 *	copy-object was made.  We can use the same copy-
828 	 *	object.
829 	 */
830 
831     Retry1:
832 	old_copy = src_object->copy;
833 	if (old_copy != NULL) {
834 		/*
835 		 *	Try to get the locks (out of order)
836 		 */
837 		if (!vm_object_lock_try(old_copy)) {
838 			vm_object_unlock(src_object);
839 
840 			/* should spin a bit here... */
841 			vm_object_lock(src_object);
842 			goto Retry1;
843 		}
844 
845 		if (old_copy->resident_page_count == 0 &&
846 		    old_copy->pager == NULL) {
847 			/*
848 			 *	Return another reference to
849 			 *	the existing copy-object.
850 			 */
851 			old_copy->ref_count++;
852 			vm_object_unlock(old_copy);
853 			vm_object_unlock(src_object);
854 			*dst_object = old_copy;
855 			*dst_offset = src_offset;
856 			*src_needs_copy = FALSE;
857 			return;
858 		}
859 		vm_object_unlock(old_copy);
860 	}
861 	vm_object_unlock(src_object);
862 
863 	/*
864 	 *	If the object has a pager, the pager wants
865 	 *	to see all of the changes.  We must make
866 	 *	a copy-object and put the changed pages there.
867 	 *
868 	 *	The copy-object is always made large enough to
869 	 *	completely shadow the original object, since
870 	 *	it may have several users who want to shadow
871 	 *	the original object at different points.
872 	 */
873 
874 	new_copy = vm_object_allocate(src_object->size);
875 
876     Retry2:
877 	vm_object_lock(src_object);
878 	/*
879 	 *	Copy object may have changed while we were unlocked
880 	 */
881 	old_copy = src_object->copy;
882 	if (old_copy != NULL) {
883 		/*
884 		 *	Try to get the locks (out of order)
885 		 */
886 		if (!vm_object_lock_try(old_copy)) {
887 			vm_object_unlock(src_object);
888 			goto Retry2;
889 		}
890 
891 		/*
892 		 *	Consistency check
893 		 */
894 		if (old_copy->shadow != src_object ||
895 		    old_copy->shadow_offset != (vm_offset_t) 0)
896 			panic("vm_object_copy: copy/shadow inconsistency");
897 
898 		/*
899 		 *	Make the old copy-object shadow the new one.
900 		 *	It will receive no more pages from the original
901 		 *	object.
902 		 */
903 
904 		src_object->ref_count--;	/* remove ref. from old_copy */
905 		old_copy->shadow = new_copy;
906 		new_copy->ref_count++;		/* locking not needed - we
907 						   have the only pointer */
908 		vm_object_unlock(old_copy);	/* done with old_copy */
909 	}
910 
911 	new_start = (vm_offset_t) 0;	/* always shadow original at 0 */
912 	new_end   = (vm_offset_t) new_copy->size; /* for the whole object */
913 
914 	/*
915 	 *	Point the new copy at the existing object.
916 	 */
917 
918 	new_copy->shadow = src_object;
919 	new_copy->shadow_offset = new_start;
920 	src_object->ref_count++;
921 	src_object->copy = new_copy;
922 
923 	/*
924 	 *	Mark all the affected pages of the existing object
925 	 *	copy-on-write.
926 	 */
927 	for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
928 		if ((new_start <= p->offset) && (p->offset < new_end))
929 			p->flags |= PG_COPYONWRITE;
930 
931 	vm_object_unlock(src_object);
932 
933 	*dst_object = new_copy;
934 	*dst_offset = src_offset - new_start;
935 	*src_needs_copy = FALSE;
936 }
937 
938 /*
939  *	vm_object_shadow:
940  *
941  *	Create a new object which is backed by the
942  *	specified existing object range.  The source
943  *	object reference is deallocated.
944  *
945  *	The new object and offset into that object
946  *	are returned in the source parameters.
947  */
948 
949 void
950 vm_object_shadow(object, offset, length)
951 	vm_object_t	*object;	/* IN/OUT */
952 	vm_offset_t	*offset;	/* IN/OUT */
953 	vm_size_t	length;
954 {
955 	register vm_object_t	source;
956 	register vm_object_t	result;
957 
958 	source = *object;
959 
960 	/*
961 	 *	Allocate a new object with the given length
962 	 */
963 
964 	if ((result = vm_object_allocate(length)) == NULL)
965 		panic("vm_object_shadow: no object for shadowing");
966 
967 	/*
968 	 *	The new object shadows the source object, adding
969 	 *	a reference to it.  Our caller changes his reference
970 	 *	to point to the new object, removing a reference to
971 	 *	the source object.  Net result: no change of reference
972 	 *	count.
973 	 */
974 	result->shadow = source;
975 
976 	/*
977 	 *	Store the offset into the source object,
978 	 *	and fix up the offset into the new object.
979 	 */
980 
981 	result->shadow_offset = *offset;
982 
983 	/*
984 	 *	Return the new things
985 	 */
986 
987 	*offset = 0;
988 	*object = result;
989 }
990 
991 /*
992  *	Set the specified object's pager to the specified pager.
993  */
994 
995 void
996 vm_object_setpager(object, pager, paging_offset,
997 			read_only)
998 	vm_object_t	object;
999 	vm_pager_t	pager;
1000 	vm_offset_t	paging_offset;
1001 	boolean_t	read_only;
1002 {
1003 #ifdef	lint
1004 	read_only++;	/* No longer used */
1005 #endif	lint
1006 
1007 	vm_object_lock(object);			/* XXX ? */
1008 	if (object->pager && object->pager != pager) {
1009 		panic("!!!pager already allocated!!!\n");
1010 	}
1011 	object->pager = pager;
1012 	object->paging_offset = paging_offset;
1013 	vm_object_unlock(object);			/* XXX ? */
1014 }
1015 
1016 /*
1017  *	vm_object_hash hashes the pager/id pair.
1018  */
1019 
1020 #define vm_object_hash(pager) \
1021 	(((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT)
1022 
1023 /*
1024  *	vm_object_lookup looks in the object cache for an object with the
1025  *	specified pager and paging id.
1026  */
1027 
1028 vm_object_t vm_object_lookup(pager)
1029 	vm_pager_t	pager;
1030 {
1031 	register vm_object_hash_entry_t	entry;
1032 	vm_object_t			object;
1033 
1034 	vm_object_cache_lock();
1035 
1036 	for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first;
1037 	     entry != NULL;
1038 	     entry = entry->hash_links.tqe_next) {
1039 		object = entry->object;
1040 		if (object->pager == pager) {
1041 			vm_object_lock(object);
1042 			if (object->ref_count == 0) {
1043 				TAILQ_REMOVE(&vm_object_cached_list, object,
1044 					cached_list);
1045 				vm_object_cached--;
1046 			}
1047 			object->ref_count++;
1048 			vm_object_unlock(object);
1049 			vm_object_cache_unlock();
1050 			return(object);
1051 		}
1052 	}
1053 
1054 	vm_object_cache_unlock();
1055 	return(NULL);
1056 }
1057 
1058 /*
1059  *	vm_object_enter enters the specified object/pager/id into
1060  *	the hash table.
1061  */
1062 
1063 void vm_object_enter(object, pager)
1064 	vm_object_t	object;
1065 	vm_pager_t	pager;
1066 {
1067 	struct vm_object_hash_head	*bucket;
1068 	register vm_object_hash_entry_t	entry;
1069 
1070 	/*
1071 	 *	We don't cache null objects, and we can't cache
1072 	 *	objects with the null pager.
1073 	 */
1074 
1075 	if (object == NULL)
1076 		return;
1077 	if (pager == NULL)
1078 		return;
1079 
1080 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1081 	entry = (vm_object_hash_entry_t)
1082 		malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
1083 	entry->object = object;
1084 	object->flags |= OBJ_CANPERSIST;
1085 
1086 	vm_object_cache_lock();
1087 	TAILQ_INSERT_TAIL(bucket, entry, hash_links);
1088 	vm_object_cache_unlock();
1089 }
1090 
1091 /*
1092  *	vm_object_remove:
1093  *
1094  *	Remove the pager from the hash table.
1095  *	Note:  This assumes that the object cache
1096  *	is locked.  XXX this should be fixed
1097  *	by reorganizing vm_object_deallocate.
1098  */
1099 void
1100 vm_object_remove(pager)
1101 	register vm_pager_t	pager;
1102 {
1103 	struct vm_object_hash_head	*bucket;
1104 	register vm_object_hash_entry_t	entry;
1105 	register vm_object_t		object;
1106 
1107 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1108 
1109 	for (entry = bucket->tqh_first;
1110 	     entry != NULL;
1111 	     entry = entry->hash_links.tqe_next) {
1112 		object = entry->object;
1113 		if (object->pager == pager) {
1114 			TAILQ_REMOVE(bucket, entry, hash_links);
1115 			free((caddr_t)entry, M_VMOBJHASH);
1116 			break;
1117 		}
1118 	}
1119 }
1120 
1121 boolean_t	vm_object_collapse_allowed = TRUE;
1122 /*
1123  *	vm_object_collapse:
1124  *
1125  *	Collapse an object with the object backing it.
1126  *	Pages in the backing object are moved into the
1127  *	parent, and the backing object is deallocated.
1128  *
1129  *	Requires that the object be locked and the page
1130  *	queues be unlocked.
1131  *
1132  *	This routine has significant changes by John S. Dyson
1133  *	to fix some swap memory leaks.  18 Dec 93
1134  *
1135  */
1136 void
1137 vm_object_collapse(object)
1138 	register vm_object_t	object;
1139 
1140 {
1141 	register vm_object_t	backing_object;
1142 	register vm_offset_t	backing_offset;
1143 	register vm_size_t	size;
1144 	register vm_offset_t	new_offset;
1145 	register vm_page_t	p, pp;
1146 
1147 	if (!vm_object_collapse_allowed)
1148 		return;
1149 
1150 	while (TRUE) {
1151 		/*
1152 		 *	Verify that the conditions are right for collapse:
1153 		 *
1154 		 *	The object exists and no pages in it are currently
1155 		 *	being paged out.
1156 		 */
1157 		if (object == NULL ||
1158 		    object->paging_in_progress != 0)
1159 			return;
1160 
1161 		/*
1162 		 *		There is a backing object, and
1163 		 */
1164 
1165 		if ((backing_object = object->shadow) == NULL)
1166 			return;
1167 
1168 		vm_object_lock(backing_object);
1169 		/*
1170 		 *	...
1171 		 *		The backing object is not read_only,
1172 		 *		and no pages in the backing object are
1173 		 *		currently being paged out.
1174 		 *		The backing object is internal.
1175 		 */
1176 
1177 		if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
1178 		    backing_object->paging_in_progress != 0) {
1179 			vm_object_unlock(backing_object);
1180 			return;
1181 		}
1182 
1183 		/*
1184 		 *	The backing object can't be a copy-object:
1185 		 *	the shadow_offset for the copy-object must stay
1186 		 *	as 0.  Furthermore (for the 'we have all the
1187 		 *	pages' case), if we bypass backing_object and
1188 		 *	just shadow the next object in the chain, old
1189 		 *	pages from that object would then have to be copied
1190 		 *	BOTH into the (former) backing_object and into the
1191 		 *	parent object.
1192 		 */
1193 		if (backing_object->shadow != NULL &&
1194 		    backing_object->shadow->copy == backing_object) {
1195 			vm_object_unlock(backing_object);
1196 			return;
1197 		}
1198 
1199 		/*
1200 		 * we can deal only with the swap pager
1201 		 */
1202 		if ((object->pager &&
1203 		    	object->pager->pg_type != PG_SWAP) ||
1204 		    (backing_object->pager &&
1205 		    	backing_object->pager->pg_type != PG_SWAP)) {
1206 			vm_object_unlock(backing_object);
1207 			return;
1208 		}
1209 
1210 
1211 		/*
1212 		 *	We know that we can either collapse the backing
1213 		 *	object (if the parent is the only reference to
1214 		 *	it) or (perhaps) remove the parent's reference
1215 		 *	to it.
1216 		 */
1217 
1218 		backing_offset = object->shadow_offset;
1219 		size = object->size;
1220 
1221 		/*
1222 		 *	If there is exactly one reference to the backing
1223 		 *	object, we can collapse it into the parent.
1224 		 */
1225 
1226 		if (backing_object->ref_count == 1) {
1227 
1228 			/*
1229 			 *	We can collapse the backing object.
1230 			 *
1231 			 *	Move all in-memory pages from backing_object
1232 			 *	to the parent.  Pages that have been paged out
1233 			 *	will be overwritten by any of the parent's
1234 			 *	pages that shadow them.
1235 			 */
1236 
1237 			while (p = backing_object->memq.tqh_first) {
1238 
1239 				new_offset = (p->offset - backing_offset);
1240 
1241 				/*
1242 				 *	If the parent has a page here, or if
1243 				 *	this page falls outside the parent,
1244 				 *	dispose of it.
1245 				 *
1246 				 *	Otherwise, move it as planned.
1247 				 */
1248 
1249 				if (p->offset < backing_offset ||
1250 				    new_offset >= size) {
1251 					vm_page_lock_queues();
1252 					vm_page_free(p);
1253 					vm_page_unlock_queues();
1254 				} else {
1255 				    pp = vm_page_lookup(object, new_offset);
1256 				    if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1257 						object->paging_offset + new_offset))) {
1258 					vm_page_lock_queues();
1259 					vm_page_free(p);
1260 					vm_page_unlock_queues();
1261 				    } else {
1262 					vm_page_rename(p, object, new_offset);
1263 				    }
1264 				}
1265 			}
1266 
1267 			/*
1268 			 *	Move the pager from backing_object to object.
1269 			 */
1270 
1271 			if (backing_object->pager) {
1272 				backing_object->paging_in_progress++;
1273 				if (object->pager) {
1274 					vm_pager_t bopager;
1275 					object->paging_in_progress++;
1276 					/*
1277 					 * copy shadow object pages into ours
1278 					 * and destroy unneeded pages in shadow object.
1279 					 */
1280 					bopager = backing_object->pager;
1281 					backing_object->pager = NULL;
1282 					vm_object_remove(backing_object->pager);
1283 					swap_pager_copy(
1284 						bopager, backing_object->paging_offset,
1285 						object->pager, object->paging_offset,
1286 						object->shadow_offset);
1287 					object->paging_in_progress--;
1288 					if (object->paging_in_progress == 0)
1289 						wakeup((caddr_t)object);
1290 				} else {
1291 					object->paging_in_progress++;
1292 					/*
1293 					 * grab the shadow objects pager
1294 					 */
1295 					object->pager = backing_object->pager;
1296 					object->paging_offset = backing_object->paging_offset + backing_offset;
1297 					vm_object_remove(backing_object->pager);
1298 					backing_object->pager = NULL;
1299 					/*
1300 					 * free unnecessary blocks
1301 					 */
1302 					swap_pager_freespace(object->pager, 0, object->paging_offset);
1303 					object->paging_in_progress--;
1304 					if (object->paging_in_progress == 0)
1305 						wakeup((caddr_t)object);
1306 				}
1307 				backing_object->paging_in_progress--;
1308 				if (backing_object->paging_in_progress == 0)
1309 					wakeup((caddr_t)backing_object);
1310 			}
1311 
1312 
1313 			/*
1314 			 *	Object now shadows whatever backing_object did.
1315 			 *	Note that the reference to backing_object->shadow
1316 			 *	moves from within backing_object to within object.
1317 			 */
1318 
1319 			object->shadow = backing_object->shadow;
1320 			object->shadow_offset += backing_object->shadow_offset;
1321 			if (object->shadow != NULL &&
1322 			    object->shadow->copy != NULL) {
1323 				panic("vm_object_collapse: we collapsed a copy-object!");
1324 			}
1325 			/*
1326 			 *	Discard backing_object.
1327 			 *
1328 			 *	Since the backing object has no pages, no
1329 			 *	pager left, and no object references within it,
1330 			 *	all that is necessary is to dispose of it.
1331 			 */
1332 
1333 			vm_object_unlock(backing_object);
1334 
1335 			simple_lock(&vm_object_list_lock);
1336 			TAILQ_REMOVE(&vm_object_list, backing_object,
1337 				object_list);
1338 			vm_object_count--;
1339 			simple_unlock(&vm_object_list_lock);
1340 
1341 			free((caddr_t)backing_object, M_VMOBJ);
1342 
1343 			object_collapses++;
1344 		}
1345 		else {
1346 			/*
1347 			 *	If all of the pages in the backing object are
1348 			 *	shadowed by the parent object, the parent
1349 			 *	object no longer has to shadow the backing
1350 			 *	object; it can shadow the next one in the
1351 			 *	chain.
1352 			 *
1353 			 *	The backing object must not be paged out - we'd
1354 			 *	have to check all of the paged-out pages, as
1355 			 *	well.
1356 			 */
1357 
1358 			if (backing_object->pager != NULL) {
1359 				vm_object_unlock(backing_object);
1360 				return;
1361 			}
1362 
1363 			/*
1364 			 *	Should have a check for a 'small' number
1365 			 *	of pages here.
1366 			 */
1367 
1368 			for( p = backing_object->memq.tqh_first;p;p=p->listq.tqe_next) {
1369 				new_offset = (p->offset - backing_offset);
1370 
1371 				/*
1372 				 *	If the parent has a page here, or if
1373 				 *	this page falls outside the parent,
1374 				 *	keep going.
1375 				 *
1376 				 *	Otherwise, the backing_object must be
1377 				 *	left in the chain.
1378 				 */
1379 
1380 				if (p->offset >= backing_offset &&
1381 				    new_offset <= size &&
1382 				    ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) &&
1383 					(!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) {
1384 					/*
1385 					 *	Page still needed.
1386 					 *	Can't go any further.
1387 					 */
1388 					vm_object_unlock(backing_object);
1389 					return;
1390 				}
1391 			}
1392 
1393 			/*
1394 			 *	Make the parent shadow the next object
1395 			 *	in the chain.  Deallocating backing_object
1396 			 *	will not remove it, since its reference
1397 			 *	count is at least 2.
1398 			 */
1399 
1400 			vm_object_reference(object->shadow = backing_object->shadow);
1401 			object->shadow_offset += backing_object->shadow_offset;
1402 
1403 			/*
1404 			 *      Backing object might have had a copy pointer
1405 			 *      to us.  If it did, clear it.
1406 			 */
1407 			if (backing_object->copy == object) {
1408 				backing_object->copy = NULL;
1409 			}
1410 
1411 			/*	Drop the reference count on backing_object.
1412 			 *	Since its ref_count was at least 2, it
1413 			 *	will not vanish; so we don't need to call
1414 			 *	vm_object_deallocate.
1415 			 */
1416 			if (backing_object->ref_count == 1)
1417 				printf("should have called obj deallocate\n");
1418 			backing_object->ref_count--;
1419 			vm_object_unlock(backing_object);
1420 
1421 			object_bypasses ++;
1422 
1423 		}
1424 
1425 		/*
1426 		 *	Try again with this object's new backing object.
1427 		 */
1428 	}
1429 }
1430 
1431 /*
1432  *	vm_object_page_remove: [internal]
1433  *
1434  *	Removes all physical pages in the specified
1435  *	object range from the object's list of pages.
1436  *
1437  *	The object must be locked.
1438  */
1439 void
1440 vm_object_page_remove(object, start, end)
1441 	register vm_object_t	object;
1442 	register vm_offset_t	start;
1443 	register vm_offset_t	end;
1444 {
1445 	register vm_page_t	p, next;
1446 	vm_offset_t size;
1447 	int cnt;
1448 	int s;
1449 
1450 	if (object == NULL)
1451 		return;
1452 
1453 	start = trunc_page(start);
1454 	end = round_page(end);
1455 again:
1456 	size = end-start;
1457 	if (size > 4*PAGE_SIZE || size >= object->size/4) {
1458 		for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) {
1459 			next = p->listq.tqe_next;
1460 			if ((start <= p->offset) && (p->offset < end)) {
1461 				s=splhigh();
1462 				if (p->flags & PG_BUSY) {
1463 					p->flags |= PG_WANTED;
1464 					tsleep((caddr_t) p, PVM, "vmopar", 0);
1465 					splx(s);
1466 					goto again;
1467 				}
1468 				splx(s);
1469 				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1470 				vm_page_lock_queues();
1471 				vm_page_free(p);
1472 				vm_page_unlock_queues();
1473 				size -= PAGE_SIZE;
1474 			}
1475 		}
1476 	} else {
1477 		while (size > 0) {
1478 			while (p = vm_page_lookup(object, start)) {
1479 				s = splhigh();
1480 				if (p->flags & PG_BUSY) {
1481 					p->flags |= PG_WANTED;
1482 					tsleep((caddr_t) p, PVM, "vmopar", 0);
1483 					splx(s);
1484 					goto again;
1485 				}
1486 				splx(s);
1487 				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1488 				vm_page_lock_queues();
1489 				vm_page_free(p);
1490 				vm_page_unlock_queues();
1491 			}
1492 			start += PAGE_SIZE;
1493 			size -= PAGE_SIZE;
1494 		}
1495 	}
1496 }
1497 
1498 /*
1499  *	Routine:	vm_object_coalesce
1500  *	Function:	Coalesces two objects backing up adjoining
1501  *			regions of memory into a single object.
1502  *
1503  *	returns TRUE if objects were combined.
1504  *
1505  *	NOTE:	Only works at the moment if the second object is NULL -
1506  *		if it's not, which object do we lock first?
1507  *
1508  *	Parameters:
1509  *		prev_object	First object to coalesce
1510  *		prev_offset	Offset into prev_object
1511  *		next_object	Second object into coalesce
1512  *		next_offset	Offset into next_object
1513  *
1514  *		prev_size	Size of reference to prev_object
1515  *		next_size	Size of reference to next_object
1516  *
1517  *	Conditions:
1518  *	The object must *not* be locked.
1519  */
1520 boolean_t vm_object_coalesce(prev_object, next_object,
1521 			prev_offset, next_offset,
1522 			prev_size, next_size)
1523 
1524 	register vm_object_t	prev_object;
1525 	vm_object_t	next_object;
1526 	vm_offset_t	prev_offset, next_offset;
1527 	vm_size_t	prev_size, next_size;
1528 {
1529 	vm_size_t	newsize;
1530 
1531 #ifdef	lint
1532 	next_offset++;
1533 #endif
1534 
1535 	if (next_object != NULL) {
1536 		return(FALSE);
1537 	}
1538 
1539 	if (prev_object == NULL) {
1540 		return(TRUE);
1541 	}
1542 
1543 	vm_object_lock(prev_object);
1544 
1545 	/*
1546 	 *	Try to collapse the object first
1547 	 */
1548 	vm_object_collapse(prev_object);
1549 
1550 	/*
1551 	 *	Can't coalesce if:
1552 	 *	. more than one reference
1553 	 *	. paged out
1554 	 *	. shadows another object
1555 	 *	. has a copy elsewhere
1556 	 *	(any of which mean that the pages not mapped to
1557 	 *	prev_entry may be in use anyway)
1558 	 */
1559 
1560 	if (prev_object->ref_count > 1 ||
1561 		prev_object->pager != NULL ||
1562 		prev_object->shadow != NULL ||
1563 		prev_object->copy != NULL) {
1564 		vm_object_unlock(prev_object);
1565 		return(FALSE);
1566 	}
1567 
1568 	/*
1569 	 *	Remove any pages that may still be in the object from
1570 	 *	a previous deallocation.
1571 	 */
1572 
1573 	vm_object_page_remove(prev_object,
1574 			prev_offset + prev_size,
1575 			prev_offset + prev_size + next_size);
1576 
1577 	/*
1578 	 *	Extend the object if necessary.
1579 	 */
1580 	newsize = prev_offset + prev_size + next_size;
1581 	if (newsize > prev_object->size)
1582 		prev_object->size = newsize;
1583 
1584 	vm_object_unlock(prev_object);
1585 	return(TRUE);
1586 }
1587 
1588 /*
1589  * returns page after looking up in shadow chain
1590  */
1591 
1592 vm_page_t
1593 vm_object_page_lookup(object, offset)
1594 	vm_object_t object;
1595 	vm_offset_t offset;
1596 {
1597 	vm_page_t m;
1598 	if (!(m=vm_page_lookup(object, offset))) {
1599 		if (!object->shadow)
1600 			return 0;
1601 		else
1602 			return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1603 	}
1604 	return m;
1605 }
1606 
1607 #define DEBUG
1608 #if defined(DEBUG) || defined(DDB)
1609 /*
1610  *	vm_object_print:	[ debug ]
1611  */
1612 void vm_object_print(object, full)
1613 	vm_object_t	object;
1614 	boolean_t	full;
1615 {
1616 	register vm_page_t	p;
1617 	extern indent;
1618 
1619 	register int count;
1620 
1621 	if (object == NULL)
1622 		return;
1623 
1624 	iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1625 		(int) object, (int) object->size,
1626 		object->resident_page_count, object->ref_count);
1627 	printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1628 	       (int) object->pager, (int) object->paging_offset,
1629 	       (int) object->shadow, (int) object->shadow_offset);
1630 	printf("cache: next=0x%x, prev=0x%x\n",
1631 	       object->cached_list.tqe_next, object->cached_list.tqe_prev);
1632 
1633 	if (!full)
1634 		return;
1635 
1636 	indent += 2;
1637 	count = 0;
1638 	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
1639 		if (count == 0)
1640 			iprintf("memory:=");
1641 		else if (count == 6) {
1642 			printf("\n");
1643 			iprintf(" ...");
1644 			count = 0;
1645 		} else
1646 			printf(",");
1647 		count++;
1648 
1649 		printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1650 	}
1651 	if (count != 0)
1652 		printf("\n");
1653 	indent -= 2;
1654 }
1655 #endif /* defined(DEBUG) || defined(DDB) */
1656