xref: /original-bsd/sys/vm/vm_object.c (revision 56b48dd2)
1 /*
2  * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
3  * Copyright (c) 1987 Carnegie-Mellon University
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * The CMU software License Agreement specifies the terms and conditions
11  * for use and redistribution.
12  *
13  *	@(#)vm_object.c	7.2 (Berkeley) 04/20/91
14  */
15 
16 /*
17  *	Virtual memory object module.
18  */
19 
20 #include "param.h"
21 #include "malloc.h"
22 
23 #include "vm.h"
24 #include "vm_page.h"
25 
26 /*
27  *	Virtual memory objects maintain the actual data
28  *	associated with allocated virtual memory.  A given
29  *	page of memory exists within exactly one object.
30  *
31  *	An object is only deallocated when all "references"
32  *	are given up.  Only one "reference" to a given
33  *	region of an object should be writeable.
34  *
35  *	Associated with each object is a list of all resident
36  *	memory pages belonging to that object; this list is
37  *	maintained by the "vm_page" module, and locked by the object's
38  *	lock.
39  *
40  *	Each object also records a "pager" routine which is
41  *	used to retrieve (and store) pages to the proper backing
42  *	storage.  In addition, objects may be backed by other
43  *	objects from which they were virtual-copied.
44  *
45  *	The only items within the object structure which are
46  *	modified after time of creation are:
47  *		reference count		locked by object's lock
48  *		pager routine		locked by object's lock
49  *
50  */
51 
52 struct vm_object	kernel_object_store;
53 struct vm_object	kmem_object_store;
54 
55 #define	VM_OBJECT_HASH_COUNT	157
56 
57 int		vm_cache_max = 100;	/* can patch if necessary */
58 queue_head_t	vm_object_hashtable[VM_OBJECT_HASH_COUNT];
59 
60 long	object_collapses = 0;
61 long	object_bypasses  = 0;
62 
63 /*
64  *	vm_object_init:
65  *
66  *	Initialize the VM objects module.
67  */
68 void vm_object_init()
69 {
70 	register int	i;
71 
72 	queue_init(&vm_object_cached_list);
73 	queue_init(&vm_object_list);
74 	vm_object_count = 0;
75 	simple_lock_init(&vm_cache_lock);
76 	simple_lock_init(&vm_object_list_lock);
77 
78 	for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
79 		queue_init(&vm_object_hashtable[i]);
80 
81 	kernel_object = &kernel_object_store;
82 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
83 			kernel_object);
84 
85 	kmem_object = &kmem_object_store;
86 	_vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object);
87 }
88 
89 /*
90  *	vm_object_allocate:
91  *
92  *	Returns a new object with the given size.
93  */
94 
95 vm_object_t vm_object_allocate(size)
96 	vm_size_t	size;
97 {
98 	register vm_object_t	result;
99 
100 	result = (vm_object_t)
101 		malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
102 
103 	_vm_object_allocate(size, result);
104 
105 	return(result);
106 }
107 
108 _vm_object_allocate(size, object)
109 	vm_size_t		size;
110 	register vm_object_t	object;
111 {
112 	queue_init(&object->memq);
113 	vm_object_lock_init(object);
114 	object->ref_count = 1;
115 	object->resident_page_count = 0;
116 	object->size = size;
117 	object->can_persist = FALSE;
118 	object->paging_in_progress = 0;
119 	object->copy = NULL;
120 
121 	/*
122 	 *	Object starts out read-write, with no pager.
123 	 */
124 
125 	object->pager = NULL;
126 	object->pager_ready = FALSE;
127 	object->internal = TRUE;	/* vm_allocate_with_pager will reset */
128 	object->paging_offset = 0;
129 	object->shadow = NULL;
130 	object->shadow_offset = (vm_offset_t) 0;
131 
132 	simple_lock(&vm_object_list_lock);
133 	queue_enter(&vm_object_list, object, vm_object_t, object_list);
134 	vm_object_count++;
135 	simple_unlock(&vm_object_list_lock);
136 }
137 
138 /*
139  *	vm_object_reference:
140  *
141  *	Gets another reference to the given object.
142  */
143 void vm_object_reference(object)
144 	register vm_object_t	object;
145 {
146 	if (object == NULL)
147 		return;
148 
149 	vm_object_lock(object);
150 	object->ref_count++;
151 	vm_object_unlock(object);
152 }
153 
154 /*
155  *	vm_object_deallocate:
156  *
157  *	Release a reference to the specified object,
158  *	gained either through a vm_object_allocate
159  *	or a vm_object_reference call.  When all references
160  *	are gone, storage associated with this object
161  *	may be relinquished.
162  *
163  *	No object may be locked.
164  */
165 void vm_object_deallocate(object)
166 	register vm_object_t	object;
167 {
168 	vm_object_t	temp;
169 
170 	while (object != NULL) {
171 
172 		/*
173 		 *	The cache holds a reference (uncounted) to
174 		 *	the object; we must lock it before removing
175 		 *	the object.
176 		 */
177 
178 		vm_object_cache_lock();
179 
180 		/*
181 		 *	Lose the reference
182 		 */
183 		vm_object_lock(object);
184 		if (--(object->ref_count) != 0) {
185 
186 			/*
187 			 *	If there are still references, then
188 			 *	we are done.
189 			 */
190 			vm_object_unlock(object);
191 			vm_object_cache_unlock();
192 			return;
193 		}
194 
195 		/*
196 		 *	See if this object can persist.  If so, enter
197 		 *	it in the cache, then deactivate all of its
198 		 *	pages.
199 		 */
200 
201 		if (object->can_persist) {
202 
203 			queue_enter(&vm_object_cached_list, object,
204 				vm_object_t, cached_list);
205 			vm_object_cached++;
206 			vm_object_cache_unlock();
207 
208 			vm_object_deactivate_pages(object);
209 			vm_object_unlock(object);
210 
211 			vm_object_cache_trim();
212 			return;
213 		}
214 
215 		/*
216 		 *	Make sure no one can look us up now.
217 		 */
218 		vm_object_remove(object->pager);
219 		vm_object_cache_unlock();
220 
221 		temp = object->shadow;
222 		vm_object_terminate(object);
223 			/* unlocks and deallocates object */
224 		object = temp;
225 	}
226 }
227 
228 
229 /*
230  *	vm_object_terminate actually destroys the specified object, freeing
231  *	up all previously used resources.
232  *
233  *	The object must be locked.
234  */
235 void vm_object_terminate(object)
236 	register vm_object_t	object;
237 {
238 	register vm_page_t	p;
239 	vm_object_t		shadow_object;
240 
241 	/*
242 	 *	Detach the object from its shadow if we are the shadow's
243 	 *	copy.
244 	 */
245 	if ((shadow_object = object->shadow) != NULL) {
246 		vm_object_lock(shadow_object);
247 		if (shadow_object->copy == object)
248 			shadow_object->copy = NULL;
249 #if 0
250 		else if (shadow_object->copy != NULL)
251 			panic("vm_object_terminate: copy/shadow inconsistency");
252 #endif
253 		vm_object_unlock(shadow_object);
254 	}
255 
256 	/*
257 	 *	Wait until the pageout daemon is through
258 	 *	with the object.
259 	 */
260 
261 	while (object->paging_in_progress != 0) {
262 		vm_object_sleep(object, object, FALSE);
263 		vm_object_lock(object);
264 	}
265 
266 
267 	/*
268 	 *	While the paging system is locked,
269 	 *	pull the object's pages off the active
270 	 *	and inactive queues.  This keeps the
271 	 *	pageout daemon from playing with them
272 	 *	during vm_pager_deallocate.
273 	 *
274 	 *	We can't free the pages yet, because the
275 	 *	object's pager may have to write them out
276 	 *	before deallocating the paging space.
277 	 */
278 
279 	p = (vm_page_t) queue_first(&object->memq);
280 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
281 		VM_PAGE_CHECK(p);
282 
283 		vm_page_lock_queues();
284 		if (p->active) {
285 			queue_remove(&vm_page_queue_active, p, vm_page_t,
286 						pageq);
287 			p->active = FALSE;
288 			vm_page_active_count--;
289 		}
290 
291 		if (p->inactive) {
292 			queue_remove(&vm_page_queue_inactive, p, vm_page_t,
293 						pageq);
294 			p->inactive = FALSE;
295 			vm_page_inactive_count--;
296 		}
297 		vm_page_unlock_queues();
298 		p = (vm_page_t) queue_next(&p->listq);
299 	}
300 
301 	vm_object_unlock(object);
302 
303 	if (object->paging_in_progress != 0)
304 		panic("vm_object_deallocate: pageout in progress");
305 
306 	/*
307 	 *	Clean and free the pages, as appropriate.
308 	 *	All references to the object are gone,
309 	 *	so we don't need to lock it.
310 	 */
311 
312 	if (!object->internal) {
313 		vm_object_lock(object);
314 		vm_object_page_clean(object, 0, 0);
315 		vm_object_unlock(object);
316 	}
317 	while (!queue_empty(&object->memq)) {
318 		p = (vm_page_t) queue_first(&object->memq);
319 
320 		VM_PAGE_CHECK(p);
321 
322 		vm_page_lock_queues();
323 		vm_page_free(p);
324 		vm_page_unlock_queues();
325 	}
326 
327 	/*
328 	 *	Let the pager know object is dead.
329 	 */
330 
331 	if (object->pager != NULL)
332 		vm_pager_deallocate(object->pager);
333 
334 
335 	simple_lock(&vm_object_list_lock);
336 	queue_remove(&vm_object_list, object, vm_object_t, object_list);
337 	vm_object_count--;
338 	simple_unlock(&vm_object_list_lock);
339 
340 	/*
341 	 *	Free the space for the object.
342 	 */
343 
344 	free((caddr_t)object, M_VMOBJ);
345 }
346 
347 /*
348  *	vm_object_page_clean
349  *
350  *	Clean all dirty pages in the specified range of object.
351  *	Leaves page on whatever queue it is currently on.
352  *
353  *	Odd semantics: if start == end, we clean everything.
354  *
355  *	The object must be locked.
356  */
357 vm_object_page_clean(object, start, end)
358 	register vm_object_t	object;
359 	register vm_offset_t	start;
360 	register vm_offset_t	end;
361 {
362 	register vm_page_t	p;
363 
364 	if (object->pager == NULL)
365 		return;
366 
367 again:
368 	p = (vm_page_t) queue_first(&object->memq);
369 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
370 		if (start == end ||
371 		    p->offset >= start && p->offset < end) {
372 			if (p->clean && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
373 				p->clean = FALSE;
374 			pmap_remove_all(VM_PAGE_TO_PHYS(p));
375 			if (!p->clean) {
376 				p->busy = TRUE;
377 				object->paging_in_progress++;
378 				vm_object_unlock(object);
379 				(void) vm_pager_put(object->pager, p, TRUE);
380 				vm_object_lock(object);
381 				object->paging_in_progress--;
382 				p->busy = FALSE;
383 				PAGE_WAKEUP(p);
384 				goto again;
385 			}
386 		}
387 		p = (vm_page_t) queue_next(&p->listq);
388 	}
389 }
390 
391 /*
392  *	vm_object_deactivate_pages
393  *
394  *	Deactivate all pages in the specified object.  (Keep its pages
395  *	in memory even though it is no longer referenced.)
396  *
397  *	The object must be locked.
398  */
399 vm_object_deactivate_pages(object)
400 	register vm_object_t	object;
401 {
402 	register vm_page_t	p, next;
403 
404 	p = (vm_page_t) queue_first(&object->memq);
405 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
406 		next = (vm_page_t) queue_next(&p->listq);
407 		vm_page_lock_queues();
408 		vm_page_deactivate(p);
409 		vm_page_unlock_queues();
410 		p = next;
411 	}
412 }
413 
414 /*
415  *	Trim the object cache to size.
416  */
417 vm_object_cache_trim()
418 {
419 	register vm_object_t	object;
420 
421 	vm_object_cache_lock();
422 	while (vm_object_cached > vm_cache_max) {
423 		object = (vm_object_t) queue_first(&vm_object_cached_list);
424 		vm_object_cache_unlock();
425 
426 		if (object != vm_object_lookup(object->pager))
427 			panic("vm_object_deactivate: I'm sooo confused.");
428 
429 		pager_cache(object, FALSE);
430 
431 		vm_object_cache_lock();
432 	}
433 	vm_object_cache_unlock();
434 }
435 
436 
437 /*
438  *	vm_object_shutdown()
439  *
440  *	Shut down the object system.  Unfortunately, while we
441  *	may be trying to do this, init is happily waiting for
442  *	processes to exit, and therefore will be causing some objects
443  *	to be deallocated.  To handle this, we gain a fake reference
444  *	to all objects we release paging areas for.  This will prevent
445  *	a duplicate deallocation.  This routine is probably full of
446  *	race conditions!
447  */
448 
449 void vm_object_shutdown()
450 {
451 	register vm_object_t	object;
452 
453 	/*
454 	 *	Clean up the object cache *before* we screw up the reference
455 	 *	counts on all of the objects.
456 	 */
457 
458 	vm_object_cache_clear();
459 
460 	printf("free paging spaces: ");
461 
462 	/*
463 	 *	First we gain a reference to each object so that
464 	 *	no one else will deallocate them.
465 	 */
466 
467 	simple_lock(&vm_object_list_lock);
468 	object = (vm_object_t) queue_first(&vm_object_list);
469 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
470 		vm_object_reference(object);
471 		object = (vm_object_t) queue_next(&object->object_list);
472 	}
473 	simple_unlock(&vm_object_list_lock);
474 
475 	/*
476 	 *	Now we deallocate all the paging areas.  We don't need
477 	 *	to lock anything because we've reduced to a single
478 	 *	processor while shutting down.	This also assumes that
479 	 *	no new objects are being created.
480 	 */
481 
482 	object = (vm_object_t) queue_first(&vm_object_list);
483 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
484 		if (object->pager != NULL)
485 			vm_pager_deallocate(object->pager);
486 		object = (vm_object_t) queue_next(&object->object_list);
487 		printf(".");
488 	}
489 	printf("done.\n");
490 }
491 
492 /*
493  *	vm_object_pmap_copy:
494  *
495  *	Makes all physical pages in the specified
496  *	object range copy-on-write.  No writeable
497  *	references to these pages should remain.
498  *
499  *	The object must *not* be locked.
500  */
501 void vm_object_pmap_copy(object, start, end)
502 	register vm_object_t	object;
503 	register vm_offset_t	start;
504 	register vm_offset_t	end;
505 {
506 	register vm_page_t	p;
507 
508 	if (object == NULL)
509 		return;
510 
511 	vm_object_lock(object);
512 	p = (vm_page_t) queue_first(&object->memq);
513 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
514 		if ((start <= p->offset) && (p->offset < end)) {
515 			if (!p->copy_on_write) {
516 				pmap_copy_on_write(VM_PAGE_TO_PHYS(p));
517 				p->copy_on_write = TRUE;
518 			}
519 		}
520 		p = (vm_page_t) queue_next(&p->listq);
521 	}
522 	vm_object_unlock(object);
523 }
524 
525 /*
526  *	vm_object_pmap_remove:
527  *
528  *	Removes all physical pages in the specified
529  *	object range from all physical maps.
530  *
531  *	The object must *not* be locked.
532  */
533 void vm_object_pmap_remove(object, start, end)
534 	register vm_object_t	object;
535 	register vm_offset_t	start;
536 	register vm_offset_t	end;
537 {
538 	register vm_page_t	p;
539 
540 	if (object == NULL)
541 		return;
542 
543 	vm_object_lock(object);
544 	p = (vm_page_t) queue_first(&object->memq);
545 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
546 		if ((start <= p->offset) && (p->offset < end)) {
547 			pmap_remove_all(VM_PAGE_TO_PHYS(p));
548 		}
549 		p = (vm_page_t) queue_next(&p->listq);
550 	}
551 	vm_object_unlock(object);
552 }
553 
554 /*
555  *	vm_object_copy:
556  *
557  *	Create a new object which is a copy of an existing
558  *	object, and mark all of the pages in the existing
559  *	object 'copy-on-write'.  The new object has one reference.
560  *	Returns the new object.
561  *
562  *	May defer the copy until later if the object is not backed
563  *	up by a non-default pager.
564  */
565 void vm_object_copy(src_object, src_offset, size,
566 		    dst_object, dst_offset, src_needs_copy)
567 	register vm_object_t	src_object;
568 	vm_offset_t		src_offset;
569 	vm_size_t		size;
570 	vm_object_t		*dst_object;	/* OUT */
571 	vm_offset_t		*dst_offset;	/* OUT */
572 	boolean_t		*src_needs_copy;	/* OUT */
573 {
574 	register vm_object_t	new_copy;
575 	register vm_object_t	old_copy;
576 	vm_offset_t		new_start, new_end;
577 
578 	register vm_page_t	p;
579 
580 	if (src_object == NULL) {
581 		/*
582 		 *	Nothing to copy
583 		 */
584 		*dst_object = NULL;
585 		*dst_offset = 0;
586 		*src_needs_copy = FALSE;
587 		return;
588 	}
589 
590 	/*
591 	 *	If the object's pager is null_pager or the
592 	 *	default pager, we don't have to make a copy
593 	 *	of it.  Instead, we set the needs copy flag and
594 	 *	make a shadow later.
595 	 */
596 
597 	vm_object_lock(src_object);
598 	if (src_object->pager == NULL ||
599 	    src_object->internal) {
600 
601 		/*
602 		 *	Make another reference to the object
603 		 */
604 		src_object->ref_count++;
605 
606 		/*
607 		 *	Mark all of the pages copy-on-write.
608 		 */
609 		for (p = (vm_page_t) queue_first(&src_object->memq);
610 		     !queue_end(&src_object->memq, (queue_entry_t)p);
611 		     p = (vm_page_t) queue_next(&p->listq)) {
612 			if (src_offset <= p->offset &&
613 			    p->offset < src_offset + size)
614 				p->copy_on_write = TRUE;
615 		}
616 		vm_object_unlock(src_object);
617 
618 		*dst_object = src_object;
619 		*dst_offset = src_offset;
620 
621 		/*
622 		 *	Must make a shadow when write is desired
623 		 */
624 		*src_needs_copy = TRUE;
625 		return;
626 	}
627 
628 	/*
629 	 *	Try to collapse the object before copying it.
630 	 */
631 	vm_object_collapse(src_object);
632 
633 	/*
634 	 *	If the object has a pager, the pager wants to
635 	 *	see all of the changes.  We need a copy-object
636 	 *	for the changed pages.
637 	 *
638 	 *	If there is a copy-object, and it is empty,
639 	 *	no changes have been made to the object since the
640 	 *	copy-object was made.  We can use the same copy-
641 	 *	object.
642 	 */
643 
644     Retry1:
645 	old_copy = src_object->copy;
646 	if (old_copy != NULL) {
647 		/*
648 		 *	Try to get the locks (out of order)
649 		 */
650 		if (!vm_object_lock_try(old_copy)) {
651 			vm_object_unlock(src_object);
652 
653 			/* should spin a bit here... */
654 			vm_object_lock(src_object);
655 			goto Retry1;
656 		}
657 
658 		if (old_copy->resident_page_count == 0 &&
659 		    old_copy->pager == NULL) {
660 			/*
661 			 *	Return another reference to
662 			 *	the existing copy-object.
663 			 */
664 			old_copy->ref_count++;
665 			vm_object_unlock(old_copy);
666 			vm_object_unlock(src_object);
667 			*dst_object = old_copy;
668 			*dst_offset = src_offset;
669 			*src_needs_copy = FALSE;
670 			return;
671 		}
672 		vm_object_unlock(old_copy);
673 	}
674 	vm_object_unlock(src_object);
675 
676 	/*
677 	 *	If the object has a pager, the pager wants
678 	 *	to see all of the changes.  We must make
679 	 *	a copy-object and put the changed pages there.
680 	 *
681 	 *	The copy-object is always made large enough to
682 	 *	completely shadow the original object, since
683 	 *	it may have several users who want to shadow
684 	 *	the original object at different points.
685 	 */
686 
687 	new_copy = vm_object_allocate(src_object->size);
688 
689     Retry2:
690 	vm_object_lock(src_object);
691 	/*
692 	 *	Copy object may have changed while we were unlocked
693 	 */
694 	old_copy = src_object->copy;
695 	if (old_copy != NULL) {
696 		/*
697 		 *	Try to get the locks (out of order)
698 		 */
699 		if (!vm_object_lock_try(old_copy)) {
700 			vm_object_unlock(src_object);
701 			goto Retry2;
702 		}
703 
704 		/*
705 		 *	Consistency check
706 		 */
707 		if (old_copy->shadow != src_object ||
708 		    old_copy->shadow_offset != (vm_offset_t) 0)
709 			panic("vm_object_copy: copy/shadow inconsistency");
710 
711 		/*
712 		 *	Make the old copy-object shadow the new one.
713 		 *	It will receive no more pages from the original
714 		 *	object.
715 		 */
716 
717 		src_object->ref_count--;	/* remove ref. from old_copy */
718 		old_copy->shadow = new_copy;
719 		new_copy->ref_count++;		/* locking not needed - we
720 						   have the only pointer */
721 		vm_object_unlock(old_copy);	/* done with old_copy */
722 	}
723 
724 	new_start = (vm_offset_t) 0;	/* always shadow original at 0 */
725 	new_end   = (vm_offset_t) new_copy->size; /* for the whole object */
726 
727 	/*
728 	 *	Point the new copy at the existing object.
729 	 */
730 
731 	new_copy->shadow = src_object;
732 	new_copy->shadow_offset = new_start;
733 	src_object->ref_count++;
734 	src_object->copy = new_copy;
735 
736 	/*
737 	 *	Mark all the affected pages of the existing object
738 	 *	copy-on-write.
739 	 */
740 	p = (vm_page_t) queue_first(&src_object->memq);
741 	while (!queue_end(&src_object->memq, (queue_entry_t) p)) {
742 		if ((new_start <= p->offset) && (p->offset < new_end)) {
743 			p->copy_on_write = TRUE;
744 		}
745 		p = (vm_page_t) queue_next(&p->listq);
746 	}
747 
748 	vm_object_unlock(src_object);
749 
750 	*dst_object = new_copy;
751 	*dst_offset = src_offset - new_start;
752 	*src_needs_copy = FALSE;
753 }
754 
755 /*
756  *	vm_object_shadow:
757  *
758  *	Create a new object which is backed by the
759  *	specified existing object range.  The source
760  *	object reference is deallocated.
761  *
762  *	The new object and offset into that object
763  *	are returned in the source parameters.
764  */
765 
766 void vm_object_shadow(object, offset, length)
767 	vm_object_t	*object;	/* IN/OUT */
768 	vm_offset_t	*offset;	/* IN/OUT */
769 	vm_size_t	length;
770 {
771 	register vm_object_t	source;
772 	register vm_object_t	result;
773 
774 	source = *object;
775 
776 	/*
777 	 *	Allocate a new object with the given length
778 	 */
779 
780 	if ((result = vm_object_allocate(length)) == NULL)
781 		panic("vm_object_shadow: no object for shadowing");
782 
783 	/*
784 	 *	The new object shadows the source object, adding
785 	 *	a reference to it.  Our caller changes his reference
786 	 *	to point to the new object, removing a reference to
787 	 *	the source object.  Net result: no change of reference
788 	 *	count.
789 	 */
790 	result->shadow = source;
791 
792 	/*
793 	 *	Store the offset into the source object,
794 	 *	and fix up the offset into the new object.
795 	 */
796 
797 	result->shadow_offset = *offset;
798 
799 	/*
800 	 *	Return the new things
801 	 */
802 
803 	*offset = 0;
804 	*object = result;
805 }
806 
807 /*
808  *	Set the specified object's pager to the specified pager.
809  */
810 
811 void vm_object_setpager(object, pager, paging_offset,
812 			read_only)
813 	vm_object_t	object;
814 	vm_pager_t	pager;
815 	vm_offset_t	paging_offset;
816 	boolean_t	read_only;
817 {
818 #ifdef	lint
819 	read_only++;	/* No longer used */
820 #endif	lint
821 
822 	vm_object_lock(object);			/* XXX ? */
823 	object->pager = pager;
824 	object->paging_offset = paging_offset;
825 	vm_object_unlock(object);			/* XXX ? */
826 }
827 
828 /*
829  *	vm_object_hash hashes the pager/id pair.
830  */
831 
832 #define vm_object_hash(pager) \
833 	(((unsigned)pager)%VM_OBJECT_HASH_COUNT)
834 
835 /*
836  *	vm_object_lookup looks in the object cache for an object with the
837  *	specified pager and paging id.
838  */
839 
840 vm_object_t vm_object_lookup(pager)
841 	vm_pager_t	pager;
842 {
843 	register queue_t		bucket;
844 	register vm_object_hash_entry_t	entry;
845 	vm_object_t			object;
846 
847 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
848 
849 	vm_object_cache_lock();
850 
851 	entry = (vm_object_hash_entry_t) queue_first(bucket);
852 	while (!queue_end(bucket, (queue_entry_t) entry)) {
853 		object = entry->object;
854 		if (object->pager == pager) {
855 			vm_object_lock(object);
856 			if (object->ref_count == 0) {
857 				queue_remove(&vm_object_cached_list, object,
858 						vm_object_t, cached_list);
859 				vm_object_cached--;
860 			}
861 			object->ref_count++;
862 			vm_object_unlock(object);
863 			vm_object_cache_unlock();
864 			return(object);
865 		}
866 		entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
867 	}
868 
869 	vm_object_cache_unlock();
870 	return(NULL);
871 }
872 
873 /*
874  *	vm_object_enter enters the specified object/pager/id into
875  *	the hash table.
876  */
877 
878 void vm_object_enter(object, pager)
879 	vm_object_t	object;
880 	vm_pager_t	pager;
881 {
882 	register queue_t		bucket;
883 	register vm_object_hash_entry_t	entry;
884 
885 	/*
886 	 *	We don't cache null objects, and we can't cache
887 	 *	objects with the null pager.
888 	 */
889 
890 	if (object == NULL)
891 		return;
892 	if (pager == NULL)
893 		return;
894 
895 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
896 	entry = (vm_object_hash_entry_t)
897 		malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
898 	entry->object = object;
899 	object->can_persist = TRUE;
900 
901 	vm_object_cache_lock();
902 	queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
903 	vm_object_cache_unlock();
904 }
905 
906 /*
907  *	vm_object_remove:
908  *
909  *	Remove the pager from the hash table.
910  *	Note:  This assumes that the object cache
911  *	is locked.  XXX this should be fixed
912  *	by reorganizing vm_object_deallocate.
913  */
914 vm_object_remove(pager)
915 	register vm_pager_t	pager;
916 {
917 	register queue_t		bucket;
918 	register vm_object_hash_entry_t	entry;
919 	register vm_object_t		object;
920 
921 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
922 
923 	entry = (vm_object_hash_entry_t) queue_first(bucket);
924 	while (!queue_end(bucket, (queue_entry_t) entry)) {
925 		object = entry->object;
926 		if (object->pager == pager) {
927 			queue_remove(bucket, entry, vm_object_hash_entry_t,
928 					hash_links);
929 			free((caddr_t)entry, M_VMOBJHASH);
930 			break;
931 		}
932 		entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
933 	}
934 }
935 
936 /*
937  *	vm_object_cache_clear removes all objects from the cache.
938  *
939  */
940 
941 void vm_object_cache_clear()
942 {
943 	register vm_object_t	object;
944 
945 	/*
946 	 *	Remove each object in the cache by scanning down the
947 	 *	list of cached objects.
948 	 */
949 	vm_object_cache_lock();
950 	while (!queue_empty(&vm_object_cached_list)) {
951 		object = (vm_object_t) queue_first(&vm_object_cached_list);
952 		vm_object_cache_unlock();
953 
954 		/*
955 		 * Note: it is important that we use vm_object_lookup
956 		 * to gain a reference, and not vm_object_reference, because
957 		 * the logic for removing an object from the cache lies in
958 		 * lookup.
959 		 */
960 		if (object != vm_object_lookup(object->pager))
961 			panic("vm_object_cache_clear: I'm sooo confused.");
962 		pager_cache(object, FALSE);
963 
964 		vm_object_cache_lock();
965 	}
966 	vm_object_cache_unlock();
967 }
968 
969 boolean_t	vm_object_collapse_allowed = TRUE;
970 /*
971  *	vm_object_collapse:
972  *
973  *	Collapse an object with the object backing it.
974  *	Pages in the backing object are moved into the
975  *	parent, and the backing object is deallocated.
976  *
977  *	Requires that the object be locked and the page
978  *	queues be unlocked.
979  *
980  */
981 void vm_object_collapse(object)
982 	register vm_object_t	object;
983 
984 {
985 	register vm_object_t	backing_object;
986 	register vm_offset_t	backing_offset;
987 	register vm_size_t	size;
988 	register vm_offset_t	new_offset;
989 	register vm_page_t	p, pp;
990 
991 	if (!vm_object_collapse_allowed)
992 		return;
993 
994 	while (TRUE) {
995 		/*
996 		 *	Verify that the conditions are right for collapse:
997 		 *
998 		 *	The object exists and no pages in it are currently
999 		 *	being paged out (or have ever been paged out).
1000 		 */
1001 		if (object == NULL ||
1002 		    object->paging_in_progress != 0 ||
1003 		    object->pager != NULL)
1004 			return;
1005 
1006 		/*
1007 		 *		There is a backing object, and
1008 		 */
1009 
1010 		if ((backing_object = object->shadow) == NULL)
1011 			return;
1012 
1013 		vm_object_lock(backing_object);
1014 		/*
1015 		 *	...
1016 		 *		The backing object is not read_only,
1017 		 *		and no pages in the backing object are
1018 		 *		currently being paged out.
1019 		 *		The backing object is internal.
1020 		 */
1021 
1022 		if (!backing_object->internal ||
1023 		    backing_object->paging_in_progress != 0) {
1024 			vm_object_unlock(backing_object);
1025 			return;
1026 		}
1027 
1028 		/*
1029 		 *	The backing object can't be a copy-object:
1030 		 *	the shadow_offset for the copy-object must stay
1031 		 *	as 0.  Furthermore (for the 'we have all the
1032 		 *	pages' case), if we bypass backing_object and
1033 		 *	just shadow the next object in the chain, old
1034 		 *	pages from that object would then have to be copied
1035 		 *	BOTH into the (former) backing_object and into the
1036 		 *	parent object.
1037 		 */
1038 		if (backing_object->shadow != NULL &&
1039 		    backing_object->shadow->copy != NULL) {
1040 			vm_object_unlock(backing_object);
1041 			return;
1042 		}
1043 
1044 		/*
1045 		 *	We know that we can either collapse the backing
1046 		 *	object (if the parent is the only reference to
1047 		 *	it) or (perhaps) remove the parent's reference
1048 		 *	to it.
1049 		 */
1050 
1051 		backing_offset = object->shadow_offset;
1052 		size = object->size;
1053 
1054 		/*
1055 		 *	If there is exactly one reference to the backing
1056 		 *	object, we can collapse it into the parent.
1057 		 */
1058 
1059 		if (backing_object->ref_count == 1) {
1060 
1061 			/*
1062 			 *	We can collapse the backing object.
1063 			 *
1064 			 *	Move all in-memory pages from backing_object
1065 			 *	to the parent.  Pages that have been paged out
1066 			 *	will be overwritten by any of the parent's
1067 			 *	pages that shadow them.
1068 			 */
1069 
1070 			while (!queue_empty(&backing_object->memq)) {
1071 
1072 				p = (vm_page_t)
1073 					queue_first(&backing_object->memq);
1074 
1075 				new_offset = (p->offset - backing_offset);
1076 
1077 				/*
1078 				 *	If the parent has a page here, or if
1079 				 *	this page falls outside the parent,
1080 				 *	dispose of it.
1081 				 *
1082 				 *	Otherwise, move it as planned.
1083 				 */
1084 
1085 				if (p->offset < backing_offset ||
1086 				    new_offset >= size) {
1087 					vm_page_lock_queues();
1088 					vm_page_free(p);
1089 					vm_page_unlock_queues();
1090 				} else {
1091 				    pp = vm_page_lookup(object, new_offset);
1092 				    if (pp != NULL && !pp->fake) {
1093 					vm_page_lock_queues();
1094 					vm_page_free(p);
1095 					vm_page_unlock_queues();
1096 				    }
1097 				    else {
1098 					if (pp) {
1099 					    /* may be someone waiting for it */
1100 					    PAGE_WAKEUP(pp);
1101 					    vm_page_lock_queues();
1102 					    vm_page_free(pp);
1103 					    vm_page_unlock_queues();
1104 					}
1105 					vm_page_rename(p, object, new_offset);
1106 				    }
1107 				}
1108 			}
1109 
1110 			/*
1111 			 *	Move the pager from backing_object to object.
1112 			 *
1113 			 *	XXX We're only using part of the paging space
1114 			 *	for keeps now... we ought to discard the
1115 			 *	unused portion.
1116 			 */
1117 
1118 			object->pager = backing_object->pager;
1119 			object->paging_offset += backing_offset;
1120 
1121 			backing_object->pager = NULL;
1122 
1123 			/*
1124 			 *	Object now shadows whatever backing_object did.
1125 			 *	Note that the reference to backing_object->shadow
1126 			 *	moves from within backing_object to within object.
1127 			 */
1128 
1129 			object->shadow = backing_object->shadow;
1130 			object->shadow_offset += backing_object->shadow_offset;
1131 			if (object->shadow != NULL &&
1132 			    object->shadow->copy != NULL) {
1133 				panic("vm_object_collapse: we collapsed a copy-object!");
1134 			}
1135 			/*
1136 			 *	Discard backing_object.
1137 			 *
1138 			 *	Since the backing object has no pages, no
1139 			 *	pager left, and no object references within it,
1140 			 *	all that is necessary is to dispose of it.
1141 			 */
1142 
1143 			vm_object_unlock(backing_object);
1144 
1145 			simple_lock(&vm_object_list_lock);
1146 			queue_remove(&vm_object_list, backing_object,
1147 						vm_object_t, object_list);
1148 			vm_object_count--;
1149 			simple_unlock(&vm_object_list_lock);
1150 
1151 			free((caddr_t)backing_object, M_VMOBJ);
1152 
1153 			object_collapses++;
1154 		}
1155 		else {
1156 			/*
1157 			 *	If all of the pages in the backing object are
1158 			 *	shadowed by the parent object, the parent
1159 			 *	object no longer has to shadow the backing
1160 			 *	object; it can shadow the next one in the
1161 			 *	chain.
1162 			 *
1163 			 *	The backing object must not be paged out - we'd
1164 			 *	have to check all of the paged-out pages, as
1165 			 *	well.
1166 			 */
1167 
1168 			if (backing_object->pager != NULL) {
1169 				vm_object_unlock(backing_object);
1170 				return;
1171 			}
1172 
1173 			/*
1174 			 *	Should have a check for a 'small' number
1175 			 *	of pages here.
1176 			 */
1177 
1178 			p = (vm_page_t) queue_first(&backing_object->memq);
1179 			while (!queue_end(&backing_object->memq,
1180 					  (queue_entry_t) p)) {
1181 
1182 				new_offset = (p->offset - backing_offset);
1183 
1184 				/*
1185 				 *	If the parent has a page here, or if
1186 				 *	this page falls outside the parent,
1187 				 *	keep going.
1188 				 *
1189 				 *	Otherwise, the backing_object must be
1190 				 *	left in the chain.
1191 				 */
1192 
1193 				if (p->offset >= backing_offset &&
1194 				    new_offset <= size &&
1195 				    ((pp = vm_page_lookup(object, new_offset))
1196 				      == NULL ||
1197 				     pp->fake)) {
1198 					/*
1199 					 *	Page still needed.
1200 					 *	Can't go any further.
1201 					 */
1202 					vm_object_unlock(backing_object);
1203 					return;
1204 				}
1205 				p = (vm_page_t) queue_next(&p->listq);
1206 			}
1207 
1208 			/*
1209 			 *	Make the parent shadow the next object
1210 			 *	in the chain.  Deallocating backing_object
1211 			 *	will not remove it, since its reference
1212 			 *	count is at least 2.
1213 			 */
1214 
1215 			vm_object_reference(object->shadow = backing_object->shadow);
1216 			object->shadow_offset += backing_object->shadow_offset;
1217 
1218 			/*	Drop the reference count on backing_object.
1219 			 *	Since its ref_count was at least 2, it
1220 			 *	will not vanish; so we don't need to call
1221 			 *	vm_object_deallocate.
1222 			 */
1223 			backing_object->ref_count--;
1224 			vm_object_unlock(backing_object);
1225 
1226 			object_bypasses ++;
1227 
1228 		}
1229 
1230 		/*
1231 		 *	Try again with this object's new backing object.
1232 		 */
1233 	}
1234 }
1235 
1236 /*
1237  *	vm_object_page_remove: [internal]
1238  *
1239  *	Removes all physical pages in the specified
1240  *	object range from the object's list of pages.
1241  *
1242  *	The object must be locked.
1243  */
1244 void vm_object_page_remove(object, start, end)
1245 	register vm_object_t	object;
1246 	register vm_offset_t	start;
1247 	register vm_offset_t	end;
1248 {
1249 	register vm_page_t	p, next;
1250 
1251 	if (object == NULL)
1252 		return;
1253 
1254 	p = (vm_page_t) queue_first(&object->memq);
1255 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
1256 		next = (vm_page_t) queue_next(&p->listq);
1257 		if ((start <= p->offset) && (p->offset < end)) {
1258 			pmap_remove_all(VM_PAGE_TO_PHYS(p));
1259 			vm_page_lock_queues();
1260 			vm_page_free(p);
1261 			vm_page_unlock_queues();
1262 		}
1263 		p = next;
1264 	}
1265 }
1266 
1267 /*
1268  *	Routine:	vm_object_coalesce
1269  *	Function:	Coalesces two objects backing up adjoining
1270  *			regions of memory into a single object.
1271  *
1272  *	returns TRUE if objects were combined.
1273  *
1274  *	NOTE:	Only works at the moment if the second object is NULL -
1275  *		if it's not, which object do we lock first?
1276  *
1277  *	Parameters:
1278  *		prev_object	First object to coalesce
1279  *		prev_offset	Offset into prev_object
1280  *		next_object	Second object into coalesce
1281  *		next_offset	Offset into next_object
1282  *
1283  *		prev_size	Size of reference to prev_object
1284  *		next_size	Size of reference to next_object
1285  *
1286  *	Conditions:
1287  *	The object must *not* be locked.
1288  */
1289 boolean_t vm_object_coalesce(prev_object, next_object,
1290 			prev_offset, next_offset,
1291 			prev_size, next_size)
1292 
1293 	register vm_object_t	prev_object;
1294 	vm_object_t	next_object;
1295 	vm_offset_t	prev_offset, next_offset;
1296 	vm_size_t	prev_size, next_size;
1297 {
1298 	vm_size_t	newsize;
1299 
1300 #ifdef	lint
1301 	next_offset++;
1302 #endif	lint
1303 
1304 	if (next_object != NULL) {
1305 		return(FALSE);
1306 	}
1307 
1308 	if (prev_object == NULL) {
1309 		return(TRUE);
1310 	}
1311 
1312 	vm_object_lock(prev_object);
1313 
1314 	/*
1315 	 *	Try to collapse the object first
1316 	 */
1317 	vm_object_collapse(prev_object);
1318 
1319 	/*
1320 	 *	Can't coalesce if:
1321 	 *	. more than one reference
1322 	 *	. paged out
1323 	 *	. shadows another object
1324 	 *	. has a copy elsewhere
1325 	 *	(any of which mean that the pages not mapped to
1326 	 *	prev_entry may be in use anyway)
1327 	 */
1328 
1329 	if (prev_object->ref_count > 1 ||
1330 		prev_object->pager != NULL ||
1331 		prev_object->shadow != NULL ||
1332 		prev_object->copy != NULL) {
1333 		vm_object_unlock(prev_object);
1334 		return(FALSE);
1335 	}
1336 
1337 	/*
1338 	 *	Remove any pages that may still be in the object from
1339 	 *	a previous deallocation.
1340 	 */
1341 
1342 	vm_object_page_remove(prev_object,
1343 			prev_offset + prev_size,
1344 			prev_offset + prev_size + next_size);
1345 
1346 	/*
1347 	 *	Extend the object if necessary.
1348 	 */
1349 	newsize = prev_offset + prev_size + next_size;
1350 	if (newsize > prev_object->size)
1351 		prev_object->size = newsize;
1352 
1353 	vm_object_unlock(prev_object);
1354 	return(TRUE);
1355 }
1356 
1357 /*
1358  *	vm_object_print:	[ debug ]
1359  */
1360 void vm_object_print(object, full)
1361 	vm_object_t	object;
1362 	boolean_t	full;
1363 {
1364 	register vm_page_t	p;
1365 	extern indent;
1366 
1367 	register int count;
1368 
1369 	if (object == NULL)
1370 		return;
1371 
1372 	iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1373 		(int) object, (int) object->size,
1374 		object->resident_page_count, object->ref_count);
1375 	printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1376 	       (int) object->pager, (int) object->paging_offset,
1377 	       (int) object->shadow, (int) object->shadow_offset);
1378 	printf("cache: next=0x%x, prev=0x%x\n",
1379 	       object->cached_list.next, object->cached_list.prev);
1380 
1381 	if (!full)
1382 		return;
1383 
1384 	indent += 2;
1385 	count = 0;
1386 	p = (vm_page_t) queue_first(&object->memq);
1387 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
1388 		if (count == 0)
1389 			iprintf("memory:=");
1390 		else if (count == 6) {
1391 			printf("\n");
1392 			iprintf(" ...");
1393 			count = 0;
1394 		} else
1395 			printf(",");
1396 		count++;
1397 
1398 		printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1399 		p = (vm_page_t) queue_next(&p->listq);
1400 	}
1401 	if (count != 0)
1402 		printf("\n");
1403 	indent -= 2;
1404 }
1405