xref: /original-bsd/sys/vm/vm_pageout.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pageout.c	7.3 (Berkeley) 04/21/91
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	The proverbial page-out daemon.
41  */
42 
43 #include "param.h"
44 
45 #include "vm.h"
46 #include "vm_page.h"
47 #include "vm_pageout.h"
48 
49 int	vm_pages_needed;		/* Event on which pageout daemon sleeps */
50 int	vm_pageout_free_min = 0;	/* Stop pageout to wait for pagers at this free level */
51 
52 int	vm_page_free_min_sanity = 40;
53 
54 /*
55  *	vm_pageout_scan does the dirty work for the pageout daemon.
56  */
57 vm_pageout_scan()
58 {
59 	register vm_page_t	m;
60 	register int		page_shortage;
61 	register int		s;
62 	register int		pages_freed;
63 	int			free;
64 
65 	/*
66 	 *	Only continue when we want more pages to be "free"
67 	 */
68 
69 	s = splimp();
70 	simple_lock(&vm_page_queue_free_lock);
71 	free = vm_page_free_count;
72 	simple_unlock(&vm_page_queue_free_lock);
73 	splx(s);
74 
75 	if (free < vm_page_free_target) {
76 		swapout_threads();
77 
78 		/*
79 		 *	Be sure the pmap system is updated so
80 		 *	we can scan the inactive queue.
81 		 */
82 
83 		pmap_update();
84 	}
85 
86 	/*
87 	 *	Acquire the resident page system lock,
88 	 *	as we may be changing what's resident quite a bit.
89 	 */
90 	vm_page_lock_queues();
91 
92 	/*
93 	 *	Start scanning the inactive queue for pages we can free.
94 	 *	We keep scanning until we have enough free pages or
95 	 *	we have scanned through the entire queue.  If we
96 	 *	encounter dirty pages, we start cleaning them.
97 	 */
98 
99 	pages_freed = 0;
100 	m = (vm_page_t) queue_first(&vm_page_queue_inactive);
101 	while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
102 		vm_page_t	next;
103 
104 		s = splimp();
105 		simple_lock(&vm_page_queue_free_lock);
106 		free = vm_page_free_count;
107 		simple_unlock(&vm_page_queue_free_lock);
108 		splx(s);
109 
110 		if (free >= vm_page_free_target)
111 			break;
112 
113 		if (m->clean) {
114 			next = (vm_page_t) queue_next(&m->pageq);
115 			if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
116 				vm_page_activate(m);
117 				vm_stat.reactivations++;
118 			}
119 			else {
120 				register vm_object_t	object;
121 				object = m->object;
122 				if (!vm_object_lock_try(object)) {
123 					/*
124 					 *	Can't lock object -
125 					 *	skip page.
126 					 */
127 					m = next;
128 					continue;
129 				}
130 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
131 				vm_page_free(m);	/* will dequeue */
132 				pages_freed++;
133 				vm_object_unlock(object);
134 			}
135 			m = next;
136 		}
137 		else {
138 			/*
139 			 *	If a page is dirty, then it is either
140 			 *	being washed (but not yet cleaned)
141 			 *	or it is still in the laundry.  If it is
142 			 *	still in the laundry, then we start the
143 			 *	cleaning operation.
144 			 */
145 
146 			if (m->laundry) {
147 				/*
148 				 *	Clean the page and remove it from the
149 				 *	laundry.
150 				 *
151 				 *	We set the busy bit to cause
152 				 *	potential page faults on this page to
153 				 *	block.
154 				 *
155 				 *	And we set pageout-in-progress to keep
156 				 *	the object from disappearing during
157 				 *	pageout.  This guarantees that the
158 				 *	page won't move from the inactive
159 				 *	queue.  (However, any other page on
160 				 *	the inactive queue may move!)
161 				 */
162 
163 				register vm_object_t	object;
164 				register vm_pager_t	pager;
165 				int			pageout_status;
166 
167 				object = m->object;
168 				if (!vm_object_lock_try(object)) {
169 					/*
170 					 *	Skip page if we can't lock
171 					 *	its object
172 					 */
173 					m = (vm_page_t) queue_next(&m->pageq);
174 					continue;
175 				}
176 
177 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
178 				m->busy = TRUE;
179 				vm_stat.pageouts++;
180 
181 				/*
182 				 *	Try to collapse the object before
183 				 *	making a pager for it.  We must
184 				 *	unlock the page queues first.
185 				 */
186 				vm_page_unlock_queues();
187 
188 				vm_object_collapse(object);
189 
190 				object->paging_in_progress++;
191 				vm_object_unlock(object);
192 
193 				/*
194 				 *	Do a wakeup here in case the following
195 				 *	operations block.
196 				 */
197 				thread_wakeup((int) &vm_page_free_count);
198 
199 				/*
200 				 *	If there is no pager for the page,
201 				 *	use the default pager.  If there's
202 				 *	no place to put the page at the
203 				 *	moment, leave it in the laundry and
204 				 *	hope that there will be paging space
205 				 *	later.
206 				 */
207 
208 				if ((pager = object->pager) == NULL) {
209 					pager = vm_pager_allocate(PG_DFLT,
210 								  (caddr_t)0,
211 								  object->size,
212 								  VM_PROT_ALL);
213 					if (pager != NULL) {
214 						vm_object_setpager(object,
215 							pager, 0, FALSE);
216 					}
217 				}
218 				pageout_status = pager ?
219 					vm_pager_put(pager, m, FALSE) :
220 					VM_PAGER_FAIL;
221 				vm_object_lock(object);
222 				vm_page_lock_queues();
223 				next = (vm_page_t) queue_next(&m->pageq);
224 
225 				switch (pageout_status) {
226 				case VM_PAGER_OK:
227 				case VM_PAGER_PEND:
228 					m->laundry = FALSE;
229 					break;
230 				case VM_PAGER_BAD:
231 					/*
232 					 * Page outside of range of object.
233 					 * Right now we essentially lose the
234 					 * changes by pretending it worked.
235 					 * XXX dubious, what should we do?
236 					 */
237 					m->laundry = FALSE;
238 					m->clean = TRUE;
239 					pmap_clear_modify(VM_PAGE_TO_PHYS(m));
240 					break;
241 				case VM_PAGER_FAIL:
242 					/*
243 					 * If page couldn't be paged out, then
244 					 * reactivate the page so it doesn't
245 					 * clog the inactive list.  (We will
246 					 * try paging out it again later).
247 					 */
248 					vm_page_activate(m);
249 					break;
250 				}
251 
252 				pmap_clear_reference(VM_PAGE_TO_PHYS(m));
253 				m->busy = FALSE;
254 				PAGE_WAKEUP(m);
255 
256 				/*
257 				 * If the operation is still going, leave the
258 				 * paging in progress indicator set so that we
259 				 * don't attempt an object collapse.
260 				 */
261 				if (pageout_status != VM_PAGER_PEND)
262 					object->paging_in_progress--;
263 				thread_wakeup((int) object);
264 				vm_object_unlock(object);
265 				m = next;
266 			}
267 			else
268 				m = (vm_page_t) queue_next(&m->pageq);
269 		}
270 	}
271 
272 	/*
273 	 *	Compute the page shortage.  If we are still very low on memory
274 	 *	be sure that we will move a minimal amount of pages from active
275 	 *	to inactive.
276 	 */
277 
278 	page_shortage = vm_page_inactive_target - vm_page_inactive_count;
279 	page_shortage -= vm_page_free_count;
280 
281 	if ((page_shortage <= 0) && (pages_freed == 0))
282 		page_shortage = 1;
283 
284 	while (page_shortage > 0) {
285 		/*
286 		 *	Move some more pages from active to inactive.
287 		 */
288 
289 		if (queue_empty(&vm_page_queue_active)) {
290 			break;
291 		}
292 		m = (vm_page_t) queue_first(&vm_page_queue_active);
293 		vm_page_deactivate(m);
294 		page_shortage--;
295 	}
296 
297 	vm_page_unlock_queues();
298 }
299 
300 /*
301  *	vm_pageout is the high level pageout daemon.
302  */
303 
304 void vm_pageout()
305 {
306 	(void) spl0();
307 
308 	/*
309 	 *	Initialize some paging parameters.
310 	 */
311 
312 	if (vm_page_free_min == 0) {
313 		vm_page_free_min = vm_page_free_count / 20;
314 		if (vm_page_free_min < 3)
315 			vm_page_free_min = 3;
316 
317 		if (vm_page_free_min > vm_page_free_min_sanity)
318 			vm_page_free_min = vm_page_free_min_sanity;
319 	}
320 
321 	if (vm_page_free_reserved == 0) {
322 		if ((vm_page_free_reserved = vm_page_free_min / 2) < 10)
323 			vm_page_free_reserved = 10;
324 	}
325 	if (vm_pageout_free_min == 0) {
326 		if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10)
327 			vm_pageout_free_min = 10;
328 	}
329 
330 	if (vm_page_free_target == 0)
331 		vm_page_free_target = (vm_page_free_min * 4) / 3;
332 
333 	if (vm_page_inactive_target == 0)
334 		vm_page_inactive_target = vm_page_free_min * 2;
335 
336 	if (vm_page_free_target <= vm_page_free_min)
337 		vm_page_free_target = vm_page_free_min + 1;
338 
339 	if (vm_page_inactive_target <= vm_page_free_target)
340 		vm_page_inactive_target = vm_page_free_target + 1;
341 
342 	/*
343 	 *	The pageout daemon is never done, so loop
344 	 *	forever.
345 	 */
346 
347 	simple_lock(&vm_pages_needed_lock);
348 	while (TRUE) {
349 		thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
350 			     FALSE);
351 		vm_pageout_scan();
352 		vm_pager_sync();
353 		simple_lock(&vm_pages_needed_lock);
354 		thread_wakeup((int) &vm_page_free_count);
355 	}
356 }
357