xref: /original-bsd/sys/vm/vm_pageout.c (revision be1f24e8)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_pageout.c	7.9 (Berkeley) 10/01/92
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	The proverbial page-out daemon.
41  */
42 
43 #include <sys/param.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_pageout.h>
48 
49 int	vm_pages_needed;	/* Event on which pageout daemon sleeps */
50 
51 int	vm_page_free_min_sanity = 40;
52 
53 /*
54  *	vm_pageout_scan does the dirty work for the pageout daemon.
55  */
56 void
57 vm_pageout_scan()
58 {
59 	register vm_page_t	m;
60 	register int		page_shortage;
61 	register int		s;
62 	register int		pages_freed;
63 	int			free;
64 
65 	/*
66 	 *	Only continue when we want more pages to be "free"
67 	 */
68 
69 	s = splimp();
70 	simple_lock(&vm_page_queue_free_lock);
71 	free = cnt.v_free_count;
72 	simple_unlock(&vm_page_queue_free_lock);
73 	splx(s);
74 
75 	if (free < cnt.v_free_target) {
76 		swapout_threads();
77 
78 		/*
79 		 *	Be sure the pmap system is updated so
80 		 *	we can scan the inactive queue.
81 		 */
82 
83 		pmap_update();
84 	}
85 
86 	/*
87 	 *	Acquire the resident page system lock,
88 	 *	as we may be changing what's resident quite a bit.
89 	 */
90 	vm_page_lock_queues();
91 
92 	/*
93 	 *	Start scanning the inactive queue for pages we can free.
94 	 *	We keep scanning until we have enough free pages or
95 	 *	we have scanned through the entire queue.  If we
96 	 *	encounter dirty pages, we start cleaning them.
97 	 */
98 
99 	pages_freed = 0;
100 	m = (vm_page_t) queue_first(&vm_page_queue_inactive);
101 	while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
102 		vm_page_t	next;
103 
104 		s = splimp();
105 		simple_lock(&vm_page_queue_free_lock);
106 		free = cnt.v_free_count;
107 		simple_unlock(&vm_page_queue_free_lock);
108 		splx(s);
109 
110 		if (free >= cnt.v_free_target)
111 			break;
112 
113 		if (m->flags & PG_CLEAN) {
114 			next = (vm_page_t) queue_next(&m->pageq);
115 			if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
116 				vm_page_activate(m);
117 				cnt.v_reactivated++;
118 			}
119 			else {
120 				register vm_object_t	object;
121 				object = m->object;
122 				if (!vm_object_lock_try(object)) {
123 					/*
124 					 *	Can't lock object -
125 					 *	skip page.
126 					 */
127 					m = next;
128 					continue;
129 				}
130 				pmap_page_protect(VM_PAGE_TO_PHYS(m),
131 						  VM_PROT_NONE);
132 				vm_page_free(m);	/* will dequeue */
133 				pages_freed++;
134 				vm_object_unlock(object);
135 			}
136 			m = next;
137 		}
138 		else {
139 			/*
140 			 *	If a page is dirty, then it is either
141 			 *	being washed (but not yet cleaned)
142 			 *	or it is still in the laundry.  If it is
143 			 *	still in the laundry, then we start the
144 			 *	cleaning operation.
145 			 */
146 
147 			if (m->flags & PG_LAUNDRY) {
148 				/*
149 				 *	Clean the page and remove it from the
150 				 *	laundry.
151 				 *
152 				 *	We set the busy bit to cause
153 				 *	potential page faults on this page to
154 				 *	block.
155 				 *
156 				 *	And we set pageout-in-progress to keep
157 				 *	the object from disappearing during
158 				 *	pageout.  This guarantees that the
159 				 *	page won't move from the inactive
160 				 *	queue.  (However, any other page on
161 				 *	the inactive queue may move!)
162 				 */
163 
164 				register vm_object_t	object;
165 				register vm_pager_t	pager;
166 				int			pageout_status;
167 
168 				object = m->object;
169 				if (!vm_object_lock_try(object)) {
170 					/*
171 					 *	Skip page if we can't lock
172 					 *	its object
173 					 */
174 					m = (vm_page_t) queue_next(&m->pageq);
175 					continue;
176 				}
177 
178 				pmap_page_protect(VM_PAGE_TO_PHYS(m),
179 						  VM_PROT_NONE);
180 				m->flags |= PG_BUSY;
181 				cnt.v_pageouts++;
182 
183 				/*
184 				 *	Try to collapse the object before
185 				 *	making a pager for it.  We must
186 				 *	unlock the page queues first.
187 				 */
188 				vm_page_unlock_queues();
189 
190 				vm_object_collapse(object);
191 
192 				object->paging_in_progress++;
193 				vm_object_unlock(object);
194 
195 				/*
196 				 *	Do a wakeup here in case the following
197 				 *	operations block.
198 				 */
199 				thread_wakeup((int) &cnt.v_free_count);
200 
201 				/*
202 				 *	If there is no pager for the page,
203 				 *	use the default pager.  If there's
204 				 *	no place to put the page at the
205 				 *	moment, leave it in the laundry and
206 				 *	hope that there will be paging space
207 				 *	later.
208 				 */
209 
210 				if ((pager = object->pager) == NULL) {
211 					pager = vm_pager_allocate(PG_DFLT,
212 								  (caddr_t)0,
213 								  object->size,
214 								  VM_PROT_ALL);
215 					if (pager != NULL) {
216 						vm_object_setpager(object,
217 							pager, 0, FALSE);
218 					}
219 				}
220 				pageout_status = pager ?
221 					vm_pager_put(pager, m, FALSE) :
222 					VM_PAGER_FAIL;
223 				vm_object_lock(object);
224 				vm_page_lock_queues();
225 				next = (vm_page_t) queue_next(&m->pageq);
226 
227 				switch (pageout_status) {
228 				case VM_PAGER_OK:
229 				case VM_PAGER_PEND:
230 					m->flags &= ~PG_LAUNDRY;
231 					break;
232 				case VM_PAGER_BAD:
233 					/*
234 					 * Page outside of range of object.
235 					 * Right now we essentially lose the
236 					 * changes by pretending it worked.
237 					 * XXX dubious, what should we do?
238 					 */
239 					m->flags &= ~PG_LAUNDRY;
240 					m->flags |= PG_CLEAN;
241 					pmap_clear_modify(VM_PAGE_TO_PHYS(m));
242 					break;
243 				case VM_PAGER_FAIL:
244 				case VM_PAGER_ERROR:
245 					/*
246 					 * If page couldn't be paged out, then
247 					 * reactivate the page so it doesn't
248 					 * clog the inactive list.  (We will
249 					 * try paging out it again later).
250 					 */
251 					vm_page_activate(m);
252 					break;
253 				}
254 
255 				pmap_clear_reference(VM_PAGE_TO_PHYS(m));
256 
257 				/*
258 				 * If the operation is still going, leave
259 				 * the page busy to block all other accesses.
260 				 * Also, leave the paging in progress
261 				 * indicator set so that we don't attempt an
262 				 * object collapse.
263 				 */
264 				if (pageout_status != VM_PAGER_PEND) {
265 					m->flags &= ~PG_BUSY;
266 					PAGE_WAKEUP(m);
267 					object->paging_in_progress--;
268 				}
269 				thread_wakeup((int) object);
270 				vm_object_unlock(object);
271 				m = next;
272 			}
273 			else
274 				m = (vm_page_t) queue_next(&m->pageq);
275 		}
276 	}
277 
278 	/*
279 	 *	Compute the page shortage.  If we are still very low on memory
280 	 *	be sure that we will move a minimal amount of pages from active
281 	 *	to inactive.
282 	 */
283 
284 	page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
285 	page_shortage -= cnt.v_free_count;
286 
287 	if ((page_shortage <= 0) && (pages_freed == 0))
288 		page_shortage = 1;
289 
290 	while (page_shortage > 0) {
291 		/*
292 		 *	Move some more pages from active to inactive.
293 		 */
294 
295 		if (queue_empty(&vm_page_queue_active)) {
296 			break;
297 		}
298 		m = (vm_page_t) queue_first(&vm_page_queue_active);
299 		vm_page_deactivate(m);
300 		page_shortage--;
301 	}
302 
303 	vm_page_unlock_queues();
304 }
305 
306 /*
307  *	vm_pageout is the high level pageout daemon.
308  */
309 
310 void vm_pageout()
311 {
312 	(void) spl0();
313 
314 	/*
315 	 *	Initialize some paging parameters.
316 	 */
317 
318 	if (cnt.v_free_min == 0) {
319 		cnt.v_free_min = cnt.v_free_count / 20;
320 		if (cnt.v_free_min < 3)
321 			cnt.v_free_min = 3;
322 
323 		if (cnt.v_free_min > vm_page_free_min_sanity)
324 			cnt.v_free_min = vm_page_free_min_sanity;
325 	}
326 
327 	if (cnt.v_free_target == 0)
328 		cnt.v_free_target = (cnt.v_free_min * 4) / 3;
329 
330 	if (cnt.v_inactive_target == 0)
331 		cnt.v_inactive_target = cnt.v_free_min * 2;
332 
333 	if (cnt.v_free_target <= cnt.v_free_min)
334 		cnt.v_free_target = cnt.v_free_min + 1;
335 
336 	if (cnt.v_inactive_target <= cnt.v_free_target)
337 		cnt.v_inactive_target = cnt.v_free_target + 1;
338 
339 	/*
340 	 *	The pageout daemon is never done, so loop
341 	 *	forever.
342 	 */
343 
344 	simple_lock(&vm_pages_needed_lock);
345 	while (TRUE) {
346 		thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
347 			     FALSE);
348 		vm_pageout_scan();
349 		vm_pager_sync();
350 		simple_lock(&vm_pages_needed_lock);
351 		thread_wakeup((int) &cnt.v_free_count);
352 	}
353 }
354