xref: /original-bsd/sys/vm/vm_pageout.c (revision 95ecee29)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pageout.c	8.2 (Berkeley) 11/10/93
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	The proverbial page-out daemon.
41  */
42 
43 #include <sys/param.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_pageout.h>
48 
49 int	vm_pages_needed;	/* Event on which pageout daemon sleeps */
50 
51 int	vm_page_free_min_sanity = 40;
52 
53 int	vm_page_max_wired = 0;	/* XXX max # of wired pages system-wide */
54 
55 /*
56  *	vm_pageout_scan does the dirty work for the pageout daemon.
57  */
58 void
59 vm_pageout_scan()
60 {
61 	register vm_page_t	m;
62 	register int		page_shortage;
63 	register int		s;
64 	register int		pages_freed;
65 	int			free;
66 
67 	/*
68 	 *	Only continue when we want more pages to be "free"
69 	 */
70 
71 	s = splimp();
72 	simple_lock(&vm_page_queue_free_lock);
73 	free = cnt.v_free_count;
74 	simple_unlock(&vm_page_queue_free_lock);
75 	splx(s);
76 
77 	if (free < cnt.v_free_target) {
78 		swapout_threads();
79 
80 		/*
81 		 *	Be sure the pmap system is updated so
82 		 *	we can scan the inactive queue.
83 		 */
84 
85 		pmap_update();
86 	}
87 
88 	/*
89 	 *	Acquire the resident page system lock,
90 	 *	as we may be changing what's resident quite a bit.
91 	 */
92 	vm_page_lock_queues();
93 
94 	/*
95 	 *	Start scanning the inactive queue for pages we can free.
96 	 *	We keep scanning until we have enough free pages or
97 	 *	we have scanned through the entire queue.  If we
98 	 *	encounter dirty pages, we start cleaning them.
99 	 */
100 
101 	pages_freed = 0;
102 	m = (vm_page_t) queue_first(&vm_page_queue_inactive);
103 	while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
104 		vm_page_t next;
105 		vm_object_t object;
106 		vm_pager_t pager;
107 		int pageout_status;
108 
109 		s = splimp();
110 		simple_lock(&vm_page_queue_free_lock);
111 		free = cnt.v_free_count;
112 		simple_unlock(&vm_page_queue_free_lock);
113 		splx(s);
114 
115 		if (free >= cnt.v_free_target)
116 			break;
117 
118 		/*
119 		 * If the page has been referenced, move it back to the
120 		 * active queue.
121 		 */
122 		if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
123 			next = (vm_page_t) queue_next(&m->pageq);
124 			vm_page_activate(m);
125 			cnt.v_reactivated++;
126 			m = next;
127 			continue;
128 		}
129 
130 		/*
131 		 * If the page is clean, free it up.
132 		 */
133 		if (m->flags & PG_CLEAN) {
134 			next = (vm_page_t) queue_next(&m->pageq);
135 			object = m->object;
136 			if (vm_object_lock_try(object)) {
137 				pmap_page_protect(VM_PAGE_TO_PHYS(m),
138 						  VM_PROT_NONE);
139 				vm_page_free(m);
140 				pages_freed++;
141 				vm_object_unlock(object);
142 			}
143 			m = next;
144 			continue;
145 		}
146 
147 		/*
148 		 * If the page is dirty but already being washed, skip it.
149 		 */
150 		if ((m->flags & PG_LAUNDRY) == 0) {
151 			m = (vm_page_t) queue_next(&m->pageq);
152 			continue;
153 		}
154 
155 		/*
156 		 * Otherwise the page is dirty and still in the laundry,
157 		 * so we start the cleaning operation and remove it from
158 		 * the laundry.
159 		 *
160 		 * We set the busy bit to cause potential page faults on
161 		 * this page to block.
162 		 *
163 		 * We also set pageout-in-progress to keep the object from
164 		 * disappearing during pageout.  This guarantees that the
165 		 * page won't move from the inactive queue.  (However, any
166 		 * other page on the inactive queue may move!)
167 		 */
168 		object = m->object;
169 		if (!vm_object_lock_try(object)) {
170 			m = (vm_page_t) queue_next(&m->pageq);
171 			continue;
172 		}
173 		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
174 		m->flags |= PG_BUSY;
175 		cnt.v_pageouts++;
176 
177 		/*
178 		 * Try to collapse the object before making a pager for it.
179 		 * We must unlock the page queues first.
180 		 */
181 		vm_page_unlock_queues();
182 		vm_object_collapse(object);
183 
184 		object->paging_in_progress++;
185 		vm_object_unlock(object);
186 
187 		/*
188 		 * Do a wakeup here in case the following operations block.
189 		 */
190 		thread_wakeup((int) &cnt.v_free_count);
191 
192 		/*
193 		 * If there is no pager for the page, use the default pager.
194 		 * If there is no place to put the page at the moment,
195 		 * leave it in the laundry and hope that there will be
196 		 * paging space later.
197 		 */
198 		if ((pager = object->pager) == NULL) {
199 			pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
200 						  object->size, VM_PROT_ALL,
201 						  (vm_offset_t)0);
202 			if (pager != NULL)
203 				vm_object_setpager(object, pager, 0, FALSE);
204 		}
205 		pageout_status = pager ?
206 			vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL;
207 		vm_object_lock(object);
208 		vm_page_lock_queues();
209 		next = (vm_page_t) queue_next(&m->pageq);
210 
211 		switch (pageout_status) {
212 		case VM_PAGER_OK:
213 		case VM_PAGER_PEND:
214 			m->flags &= ~PG_LAUNDRY;
215 			break;
216 		case VM_PAGER_BAD:
217 			/*
218 			 * Page outside of range of object.  Right now we
219 			 * essentially lose the changes by pretending it
220 			 * worked.
221 			 *
222 			 * XXX dubious, what should we do?
223 			 */
224 			m->flags &= ~PG_LAUNDRY;
225 			m->flags |= PG_CLEAN;
226 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
227 			break;
228 		case VM_PAGER_FAIL:
229 		case VM_PAGER_ERROR:
230 			/*
231 			 * If page couldn't be paged out, then reactivate
232 			 * the page so it doesn't clog the inactive list.
233 			 * (We will try paging out it again later).
234 			 */
235 			vm_page_activate(m);
236 			break;
237 		}
238 
239 		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
240 
241 		/*
242 		 * If the operation is still going, leave the page busy
243 		 * to block all other accesses.  Also, leave the paging
244 		 * in progress indicator set so that we don't attempt an
245 		 * object collapse.
246 		 */
247 		if (pageout_status != VM_PAGER_PEND) {
248 			m->flags &= ~PG_BUSY;
249 			PAGE_WAKEUP(m);
250 			object->paging_in_progress--;
251 		}
252 		thread_wakeup((int) object);
253 		vm_object_unlock(object);
254 		m = next;
255 	}
256 
257 	/*
258 	 *	Compute the page shortage.  If we are still very low on memory
259 	 *	be sure that we will move a minimal amount of pages from active
260 	 *	to inactive.
261 	 */
262 
263 	page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
264 	if (page_shortage <= 0 && pages_freed == 0)
265 		page_shortage = 1;
266 
267 	while (page_shortage > 0) {
268 		/*
269 		 *	Move some more pages from active to inactive.
270 		 */
271 
272 		if (queue_empty(&vm_page_queue_active)) {
273 			break;
274 		}
275 		m = (vm_page_t) queue_first(&vm_page_queue_active);
276 		vm_page_deactivate(m);
277 		page_shortage--;
278 	}
279 
280 	vm_page_unlock_queues();
281 }
282 
283 /*
284  *	vm_pageout is the high level pageout daemon.
285  */
286 
287 void vm_pageout()
288 {
289 	(void) spl0();
290 
291 	/*
292 	 *	Initialize some paging parameters.
293 	 */
294 
295 	if (cnt.v_free_min == 0) {
296 		cnt.v_free_min = cnt.v_free_count / 20;
297 		if (cnt.v_free_min < 3)
298 			cnt.v_free_min = 3;
299 
300 		if (cnt.v_free_min > vm_page_free_min_sanity)
301 			cnt.v_free_min = vm_page_free_min_sanity;
302 	}
303 
304 	if (cnt.v_free_target == 0)
305 		cnt.v_free_target = (cnt.v_free_min * 4) / 3;
306 
307 	if (cnt.v_free_target <= cnt.v_free_min)
308 		cnt.v_free_target = cnt.v_free_min + 1;
309 
310 	/* XXX does not really belong here */
311 	if (vm_page_max_wired == 0)
312 		vm_page_max_wired = cnt.v_free_count / 3;
313 
314 	/*
315 	 *	The pageout daemon is never done, so loop
316 	 *	forever.
317 	 */
318 
319 	simple_lock(&vm_pages_needed_lock);
320 	while (TRUE) {
321 		thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
322 			     FALSE);
323 		/*
324 		 * Compute the inactive target for this scan.
325 		 * We need to keep a reasonable amount of memory in the
326 		 * inactive list to better simulate LRU behavior.
327 		 */
328 		cnt.v_inactive_target =
329 			(cnt.v_active_count + cnt.v_inactive_count) / 3;
330 		if (cnt.v_inactive_target <= cnt.v_free_target)
331 			cnt.v_inactive_target = cnt.v_free_target + 1;
332 
333 		vm_pageout_scan();
334 		vm_pager_sync();
335 		simple_lock(&vm_pages_needed_lock);
336 		thread_wakeup((int) &cnt.v_free_count);
337 	}
338 }
339