xref: /original-bsd/sys/vm/vm_pageout.c (revision f3c03cba)
1 /*
2  * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young
3  * Copyright (c) 1987 Carnegie-Mellon University
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * The CMU software License Agreement specifies the terms and conditions
11  * for use and redistribution.
12  *
13  *	@(#)vm_pageout.c	7.1 (Berkeley) 12/05/90
14  */
15 
16 /*
17  *	The proverbial page-out daemon.
18  */
19 
20 #include "types.h"
21 #include "../vm/vm_page.h"
22 #include "../vm/pmap.h"
23 #include "../vm/vm_object.h"
24 #include "../vm/vm_pageout.h"
25 #include "../vm/vm_statistics.h"
26 #include "../vm/vm_param.h"
27 
28 int	vm_pages_needed;		/* Event on which pageout daemon sleeps */
29 int	vm_pageout_free_min = 0;	/* Stop pageout to wait for pagers at this free level */
30 
31 int	vm_page_free_min_sanity = 40;
32 
33 /*
34  *	vm_pageout_scan does the dirty work for the pageout daemon.
35  */
36 vm_pageout_scan()
37 {
38 	register vm_page_t	m;
39 	register int		page_shortage;
40 	register int		s;
41 	register int		pages_freed;
42 	int			free;
43 
44 	/*
45 	 *	Only continue when we want more pages to be "free"
46 	 */
47 
48 	s = splimp();
49 	simple_lock(&vm_page_queue_free_lock);
50 	free = vm_page_free_count;
51 	simple_unlock(&vm_page_queue_free_lock);
52 	splx(s);
53 
54 	if (free < vm_page_free_target) {
55 		swapout_threads();
56 
57 		/*
58 		 *	Be sure the pmap system is updated so
59 		 *	we can scan the inactive queue.
60 		 */
61 
62 		pmap_update();
63 	}
64 
65 	/*
66 	 *	Acquire the resident page system lock,
67 	 *	as we may be changing what's resident quite a bit.
68 	 */
69 	vm_page_lock_queues();
70 
71 	/*
72 	 *	Start scanning the inactive queue for pages we can free.
73 	 *	We keep scanning until we have enough free pages or
74 	 *	we have scanned through the entire queue.  If we
75 	 *	encounter dirty pages, we start cleaning them.
76 	 */
77 
78 	pages_freed = 0;
79 	m = (vm_page_t) queue_first(&vm_page_queue_inactive);
80 	while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
81 		vm_page_t	next;
82 
83 		s = splimp();
84 		simple_lock(&vm_page_queue_free_lock);
85 		free = vm_page_free_count;
86 		simple_unlock(&vm_page_queue_free_lock);
87 		splx(s);
88 
89 		if (free >= vm_page_free_target)
90 			break;
91 
92 		if (m->clean) {
93 			next = (vm_page_t) queue_next(&m->pageq);
94 			if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
95 				vm_page_activate(m);
96 				vm_stat.reactivations++;
97 			}
98 			else {
99 				register vm_object_t	object;
100 				object = m->object;
101 				if (!vm_object_lock_try(object)) {
102 					/*
103 					 *	Can't lock object -
104 					 *	skip page.
105 					 */
106 					m = next;
107 					continue;
108 				}
109 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
110 				vm_page_free(m);	/* will dequeue */
111 				pages_freed++;
112 				vm_object_unlock(object);
113 			}
114 			m = next;
115 		}
116 		else {
117 			/*
118 			 *	If a page is dirty, then it is either
119 			 *	being washed (but not yet cleaned)
120 			 *	or it is still in the laundry.  If it is
121 			 *	still in the laundry, then we start the
122 			 *	cleaning operation.
123 			 */
124 
125 			if (m->laundry) {
126 				/*
127 				 *	Clean the page and remove it from the
128 				 *	laundry.
129 				 *
130 				 *	We set the busy bit to cause
131 				 *	potential page faults on this page to
132 				 *	block.
133 				 *
134 				 *	And we set pageout-in-progress to keep
135 				 *	the object from disappearing during
136 				 *	pageout.  This guarantees that the
137 				 *	page won't move from the inactive
138 				 *	queue.  (However, any other page on
139 				 *	the inactive queue may move!)
140 				 */
141 
142 				register vm_object_t	object;
143 				register vm_pager_t	pager;
144 				int			pageout_status;
145 
146 				object = m->object;
147 				if (!vm_object_lock_try(object)) {
148 					/*
149 					 *	Skip page if we can't lock
150 					 *	its object
151 					 */
152 					m = (vm_page_t) queue_next(&m->pageq);
153 					continue;
154 				}
155 
156 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
157 				m->busy = TRUE;
158 				vm_stat.pageouts++;
159 
160 				/*
161 				 *	Try to collapse the object before
162 				 *	making a pager for it.  We must
163 				 *	unlock the page queues first.
164 				 */
165 				vm_page_unlock_queues();
166 
167 				vm_object_collapse(object);
168 
169 				object->paging_in_progress++;
170 				vm_object_unlock(object);
171 
172 				/*
173 				 *	Do a wakeup here in case the following
174 				 *	operations block.
175 				 */
176 				thread_wakeup((int) &vm_page_free_count);
177 
178 				/*
179 				 *	If there is no pager for the page,
180 				 *	use the default pager.  If there's
181 				 *	no place to put the page at the
182 				 *	moment, leave it in the laundry and
183 				 *	hope that there will be paging space
184 				 *	later.
185 				 */
186 
187 				if ((pager = object->pager) == vm_pager_null) {
188 					pager = vm_pager_allocate(PG_DFLT,
189 								  (caddr_t)0,
190 								  object->size,
191 								  VM_PROT_ALL);
192 					if (pager != vm_pager_null) {
193 						vm_object_setpager(object,
194 							pager, 0, FALSE);
195 					}
196 				}
197 				pageout_status = pager ?
198 					vm_pager_put(pager, m, FALSE) :
199 					VM_PAGER_FAIL;
200 				vm_object_lock(object);
201 				vm_page_lock_queues();
202 				next = (vm_page_t) queue_next(&m->pageq);
203 
204 				switch (pageout_status) {
205 				case VM_PAGER_OK:
206 				case VM_PAGER_PEND:
207 					m->laundry = FALSE;
208 					break;
209 				case VM_PAGER_BAD:
210 					/*
211 					 * Page outside of range of object.
212 					 * Right now we essentially lose the
213 					 * changes by pretending it worked.
214 					 * XXX dubious, what should we do?
215 					 */
216 					m->laundry = FALSE;
217 					m->clean = TRUE;
218 					pmap_clear_modify(VM_PAGE_TO_PHYS(m));
219 					break;
220 				case VM_PAGER_FAIL:
221 					/*
222 					 * If page couldn't be paged out, then
223 					 * reactivate the page so it doesn't
224 					 * clog the inactive list.  (We will
225 					 * try paging out it again later).
226 					 */
227 					vm_page_activate(m);
228 					break;
229 				}
230 
231 				pmap_clear_reference(VM_PAGE_TO_PHYS(m));
232 				m->busy = FALSE;
233 				PAGE_WAKEUP(m);
234 
235 				/*
236 				 * If the operation is still going, leave the
237 				 * paging in progress indicator set so that we
238 				 * don't attempt an object collapse.
239 				 */
240 				if (pageout_status != VM_PAGER_PEND)
241 					object->paging_in_progress--;
242 				thread_wakeup((int) object);
243 				vm_object_unlock(object);
244 				m = next;
245 			}
246 			else
247 				m = (vm_page_t) queue_next(&m->pageq);
248 		}
249 	}
250 
251 	/*
252 	 *	Compute the page shortage.  If we are still very low on memory
253 	 *	be sure that we will move a minimal amount of pages from active
254 	 *	to inactive.
255 	 */
256 
257 	page_shortage = vm_page_inactive_target - vm_page_inactive_count;
258 	page_shortage -= vm_page_free_count;
259 
260 	if ((page_shortage <= 0) && (pages_freed == 0))
261 		page_shortage = 1;
262 
263 	while (page_shortage > 0) {
264 		/*
265 		 *	Move some more pages from active to inactive.
266 		 */
267 
268 		if (queue_empty(&vm_page_queue_active)) {
269 			break;
270 		}
271 		m = (vm_page_t) queue_first(&vm_page_queue_active);
272 		vm_page_deactivate(m);
273 		page_shortage--;
274 	}
275 
276 	vm_page_unlock_queues();
277 }
278 
279 /*
280  *	vm_pageout is the high level pageout daemon.
281  */
282 
283 void vm_pageout()
284 {
285 	(void) spl0();
286 
287 	/*
288 	 *	Initialize some paging parameters.
289 	 */
290 
291 	if (vm_page_free_min == 0) {
292 		vm_page_free_min = vm_page_free_count / 20;
293 		if (vm_page_free_min < 3)
294 			vm_page_free_min = 3;
295 
296 		if (vm_page_free_min > vm_page_free_min_sanity)
297 			vm_page_free_min = vm_page_free_min_sanity;
298 	}
299 
300 	if (vm_page_free_reserved == 0) {
301 		if ((vm_page_free_reserved = vm_page_free_min / 2) < 10)
302 			vm_page_free_reserved = 10;
303 	}
304 	if (vm_pageout_free_min == 0) {
305 		if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10)
306 			vm_pageout_free_min = 10;
307 	}
308 
309 	if (vm_page_free_target == 0)
310 		vm_page_free_target = (vm_page_free_min * 4) / 3;
311 
312 	if (vm_page_inactive_target == 0)
313 		vm_page_inactive_target = vm_page_free_min * 2;
314 
315 	if (vm_page_free_target <= vm_page_free_min)
316 		vm_page_free_target = vm_page_free_min + 1;
317 
318 	if (vm_page_inactive_target <= vm_page_free_target)
319 		vm_page_inactive_target = vm_page_free_target + 1;
320 
321 	/*
322 	 *	The pageout daemon is never done, so loop
323 	 *	forever.
324 	 */
325 
326 	simple_lock(&vm_pages_needed_lock);
327 	while (TRUE) {
328 		thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
329 			     FALSE);
330 		vm_pageout_scan();
331 		vm_pager_sync();
332 		simple_lock(&vm_pages_needed_lock);
333 		thread_wakeup((int) &vm_page_free_count);
334 	}
335 }
336