xref: /openbsd/sys/uvm/uvm_pdaemon.c (revision a6445c1d)
1 /*	$OpenBSD: uvm_pdaemon.c,v 1.74 2014/11/16 12:31:00 deraadt Exp $	*/
2 /*	$NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
38  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  * uvm_pdaemon.c: the page daemon
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/pool.h>
73 #include <sys/buf.h>
74 #include <sys/vnode.h>
75 #include <sys/mount.h>
76 
77 #ifdef HIBERNATE
78 #include <sys/hibernate.h>
79 #endif
80 
81 #include <uvm/uvm.h>
82 
83 /*
84  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
85  * in a pass thru the inactive list when swap is full.  the value should be
86  * "small"... if it's too large we'll cycle the active pages thru the inactive
87  * queue too quickly to for them to be referenced and avoid being freed.
88  */
89 
90 #define UVMPD_NUMDIRTYREACTS 16
91 
92 
93 /*
94  * local prototypes
95  */
96 
97 void		uvmpd_scan(void);
98 boolean_t	uvmpd_scan_inactive(struct pglist *);
99 void		uvmpd_tune(void);
100 void		uvmpd_drop(struct pglist *);
101 
102 /*
103  * uvm_wait: wait (sleep) for the page daemon to free some pages
104  *
105  * => should be called with all locks released
106  * => should _not_ be called by the page daemon (to avoid deadlock)
107  */
108 
109 void
110 uvm_wait(const char *wmsg)
111 {
112 	int	timo = 0;
113 
114 	/* check for page daemon going to sleep (waiting for itself) */
115 	if (curproc == uvm.pagedaemon_proc) {
116 		printf("uvm_wait emergency bufbackoff\n");
117 		if (bufbackoff(NULL, 4) == 0)
118 			return;
119 		/*
120 		 * now we have a problem: the pagedaemon wants to go to
121 		 * sleep until it frees more memory.   but how can it
122 		 * free more memory if it is asleep?  that is a deadlock.
123 		 * we have two options:
124 		 *  [1] panic now
125 		 *  [2] put a timeout on the sleep, thus causing the
126 		 *      pagedaemon to only pause (rather than sleep forever)
127 		 *
128 		 * note that option [2] will only help us if we get lucky
129 		 * and some other process on the system breaks the deadlock
130 		 * by exiting or freeing memory (thus allowing the pagedaemon
131 		 * to continue).  for now we panic if DEBUG is defined,
132 		 * otherwise we hope for the best with option [2] (better
133 		 * yet, this should never happen in the first place!).
134 		 */
135 
136 		printf("pagedaemon: deadlock detected!\n");
137 		timo = hz >> 3;		/* set timeout */
138 #if defined(DEBUG)
139 		/* DEBUG: panic so we can debug it */
140 		panic("pagedaemon deadlock");
141 #endif
142 	}
143 
144 	uvm_lock_fpageq();
145 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
146 	msleep(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo);
147 }
148 
149 /*
150  * uvmpd_tune: tune paging parameters
151  *
152  * => called whenever memory is added to (or removed from?) the system
153  * => caller must call with page queues locked
154  */
155 
156 void
157 uvmpd_tune(void)
158 {
159 
160 	uvmexp.freemin = uvmexp.npages / 30;
161 
162 	/* between 16k and 512k */
163 	/* XXX:  what are these values good for? */
164 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
165 #if 0
166 	uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT);
167 #endif
168 
169 	/* Make sure there's always a user page free. */
170 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
171 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
172 
173 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
174 	if (uvmexp.freetarg <= uvmexp.freemin)
175 		uvmexp.freetarg = uvmexp.freemin + 1;
176 
177 	/* uvmexp.inactarg: computed in main daemon loop */
178 
179 	uvmexp.wiredmax = uvmexp.npages / 3;
180 }
181 
182 /*
183  * uvm_pageout: the main loop for the pagedaemon
184  */
185 void
186 uvm_pageout(void *arg)
187 {
188 	struct uvm_constraint_range constraint;
189 	struct uvm_pmalloc *pma;
190 	int work_done;
191 	int npages = 0;
192 
193 	/* ensure correct priority and set paging parameters... */
194 	uvm.pagedaemon_proc = curproc;
195 	(void) spl0();
196 	uvm_lock_pageq();
197 	npages = uvmexp.npages;
198 	uvmpd_tune();
199 	uvm_unlock_pageq();
200 
201 	for (;;) {
202 		long size;
203 	  	work_done = 0; /* No work done this iteration. */
204 
205 		uvm_lock_fpageq();
206 
207 		if (TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
208 			msleep(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
209 			    "pgdaemon", 0);
210 			uvmexp.pdwoke++;
211 		}
212 
213 		if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) {
214 			pma->pm_flags |= UVM_PMA_BUSY;
215 			constraint = pma->pm_constraint;
216 		} else
217 			constraint = no_constraint;
218 
219 		uvm_unlock_fpageq();
220 
221 		/* now lock page queues and recompute inactive count */
222 		uvm_lock_pageq();
223 		if (npages != uvmexp.npages) {	/* check for new pages? */
224 			npages = uvmexp.npages;
225 			uvmpd_tune();
226 		}
227 
228 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
229 		if (uvmexp.inactarg <= uvmexp.freetarg) {
230 			uvmexp.inactarg = uvmexp.freetarg + 1;
231 		}
232 
233 		/* Reclaim pages from the buffer cache if possible. */
234 		size = 0;
235 		if (pma != NULL)
236 			size += pma->pm_size >> PAGE_SHIFT;
237 		if (uvmexp.free - BUFPAGES_DEFICIT < uvmexp.freetarg)
238 			size += uvmexp.freetarg - (uvmexp.free -
239 			    BUFPAGES_DEFICIT);
240 		(void) bufbackoff(&constraint, size * 2);
241 
242 		/* Scan if needed to meet our targets. */
243 		if (pma != NULL ||
244 		    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
245 		    ((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
246 			uvmpd_scan();
247 			work_done = 1; /* XXX we hope... */
248 		}
249 
250 		/*
251 		 * if there's any free memory to be had,
252 		 * wake up any waiters.
253 		 */
254 		uvm_lock_fpageq();
255 		if (uvmexp.free > uvmexp.reserve_kernel ||
256 		    uvmexp.paging == 0) {
257 			wakeup(&uvmexp.free);
258 		}
259 
260 		if (pma != NULL) {
261 			pma->pm_flags &= ~UVM_PMA_BUSY;
262 			if (!work_done)
263 				pma->pm_flags |= UVM_PMA_FAIL;
264 			if (pma->pm_flags & (UVM_PMA_FAIL | UVM_PMA_FREED)) {
265 				pma->pm_flags &= ~UVM_PMA_LINKED;
266 				TAILQ_REMOVE(&uvm.pmr_control.allocs, pma,
267 				    pmq);
268 			}
269 			wakeup(pma);
270 		}
271 		uvm_unlock_fpageq();
272 
273 		/* scan done. unlock page queues (only lock we are holding) */
274 		uvm_unlock_pageq();
275 
276 		sched_pause();
277 	}
278 	/*NOTREACHED*/
279 }
280 
281 
282 /*
283  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
284  */
285 void
286 uvm_aiodone_daemon(void *arg)
287 {
288 	int s, free;
289 	struct buf *bp, *nbp;
290 
291 	uvm.aiodoned_proc = curproc;
292 
293 	for (;;) {
294 		/*
295 		 * Check for done aio structures. If we've got structures to
296 		 * process, do so. Otherwise sleep while avoiding races.
297 		 */
298 		mtx_enter(&uvm.aiodoned_lock);
299 		while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL)
300 			msleep(&uvm.aiodoned, &uvm.aiodoned_lock,
301 			    PVM, "aiodoned", 0);
302 		/* Take the list for ourselves. */
303 		TAILQ_INIT(&uvm.aio_done);
304 		mtx_leave(&uvm.aiodoned_lock);
305 
306 		/* process each i/o that's done. */
307 		free = uvmexp.free;
308 		while (bp != NULL) {
309 			if (bp->b_flags & B_PDAEMON) {
310 				uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT;
311 			}
312 			nbp = TAILQ_NEXT(bp, b_freelist);
313 			s = splbio();	/* b_iodone must by called at splbio */
314 			(*bp->b_iodone)(bp);
315 			splx(s);
316 			bp = nbp;
317 
318 			sched_pause();
319 		}
320 		uvm_lock_fpageq();
321 		wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon :
322 		    &uvmexp.free);
323 		uvm_unlock_fpageq();
324 	}
325 }
326 
327 
328 
329 /*
330  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
331  *
332  * => called with page queues locked
333  * => we work on meeting our free target by converting inactive pages
334  *    into free pages.
335  * => we handle the building of swap-backed clusters
336  * => we return TRUE if we are exiting because we met our target
337  */
338 
339 boolean_t
340 uvmpd_scan_inactive(struct pglist *pglst)
341 {
342 	boolean_t retval = FALSE;	/* assume we haven't hit target */
343 	int free, result;
344 	struct vm_page *p, *nextpg;
345 	struct uvm_object *uobj;
346 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
347 	int npages;
348 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
349 	int swnpages, swcpages;				/* XXX: see below */
350 	int swslot;
351 	struct vm_anon *anon;
352 	boolean_t swap_backed;
353 	vaddr_t start;
354 	int dirtyreacts;
355 
356 	/*
357 	 * note: we currently keep swap-backed pages on a separate inactive
358 	 * list from object-backed pages.   however, merging the two lists
359 	 * back together again hasn't been ruled out.   thus, we keep our
360 	 * swap cluster in "swpps" rather than in pps (allows us to mix
361 	 * clustering types in the event of a mixed inactive queue).
362 	 */
363 	/*
364 	 * swslot is non-zero if we are building a swap cluster.  we want
365 	 * to stay in the loop while we have a page to scan or we have
366 	 * a swap-cluster to build.
367 	 */
368 	swslot = 0;
369 	swnpages = swcpages = 0;
370 	free = 0;
371 	dirtyreacts = 0;
372 
373 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
374 		/*
375 		 * note that p can be NULL iff we have traversed the whole
376 		 * list and need to do one final swap-backed clustered pageout.
377 		 */
378 		uobj = NULL;
379 		anon = NULL;
380 
381 		if (p) {
382 			/*
383 			 * update our copy of "free" and see if we've met
384 			 * our target
385 			 */
386 			free = uvmexp.free - BUFPAGES_DEFICIT;
387 
388 			if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
389 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
390 				retval = TRUE;
391 
392 				if (swslot == 0) {
393 					/* exit now if no swap-i/o pending */
394 					break;
395 				}
396 
397 				/* set p to null to signal final swap i/o */
398 				p = NULL;
399 			}
400 		}
401 
402 		if (p) {	/* if (we have a new page to consider) */
403 			/*
404 			 * we are below target and have a new page to consider.
405 			 */
406 			uvmexp.pdscans++;
407 			nextpg = TAILQ_NEXT(p, pageq);
408 
409 			/*
410 			 * move referenced pages back to active queue and
411 			 * skip to next page (unlikely to happen since
412 			 * inactive pages shouldn't have any valid mappings
413 			 * and we cleared reference before deactivating).
414 			 */
415 
416 			if (pmap_is_referenced(p)) {
417 				uvm_pageactivate(p);
418 				uvmexp.pdreact++;
419 				continue;
420 			}
421 
422 			/*
423 			 * the only time we expect to see an ownerless page
424 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
425 			 * anon has loaned a page from a uvm_object and the
426 			 * uvm_object has dropped the ownership.  in that
427 			 * case, the anon can "take over" the loaned page
428 			 * and make it its own.
429 			 */
430 
431 			/* is page part of an anon or ownerless ? */
432 			if ((p->pg_flags & PQ_ANON) || p->uobject == NULL) {
433 				anon = p->uanon;
434 				KASSERT(anon != NULL);
435 
436 				/*
437 				 * if the page is ownerless, claim it in the
438 				 * name of "anon"!
439 				 */
440 				if ((p->pg_flags & PQ_ANON) == 0) {
441 					KASSERT(p->loan_count > 0);
442 					p->loan_count--;
443 					atomic_setbits_int(&p->pg_flags,
444 					    PQ_ANON);
445 					/* anon now owns it */
446 				}
447 				if (p->pg_flags & PG_BUSY) {
448 					uvmexp.pdbusy++;
449 					/* someone else owns page, skip it */
450 					continue;
451 				}
452 				uvmexp.pdanscan++;
453 			} else {
454 				uobj = p->uobject;
455 				KASSERT(uobj != NULL);
456 				if (p->pg_flags & PG_BUSY) {
457 					uvmexp.pdbusy++;
458 					/* someone else owns page, skip it */
459 					continue;
460 				}
461 				uvmexp.pdobscan++;
462 			}
463 
464 			/*
465 			 * we now have the page queues locked.
466 			 * the page is not busy.   if the page is clean we
467 			 * can free it now and continue.
468 			 */
469 			if (p->pg_flags & PG_CLEAN) {
470 				if (p->pg_flags & PQ_SWAPBACKED) {
471 					/* this page now lives only in swap */
472 					uvmexp.swpgonly++;
473 				}
474 
475 				/* zap all mappings with pmap_page_protect... */
476 				pmap_page_protect(p, PROT_NONE);
477 				uvm_pagefree(p);
478 				uvmexp.pdfreed++;
479 
480 				if (anon) {
481 
482 					/*
483 					 * an anonymous page can only be clean
484 					 * if it has backing store assigned.
485 					 */
486 
487 					KASSERT(anon->an_swslot != 0);
488 
489 					/* remove from object */
490 					anon->an_page = NULL;
491 				}
492 				continue;
493 			}
494 
495 			/*
496 			 * this page is dirty, skip it if we'll have met our
497 			 * free target when all the current pageouts complete.
498 			 */
499 			if (free + uvmexp.paging > uvmexp.freetarg << 2) {
500 				continue;
501 			}
502 
503 			/*
504 			 * this page is dirty, but we can't page it out
505 			 * since all pages in swap are only in swap.
506 			 * reactivate it so that we eventually cycle
507 			 * all pages thru the inactive queue.
508 			 */
509 			KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
510 			if ((p->pg_flags & PQ_SWAPBACKED) &&
511 			    uvmexp.swpgonly == uvmexp.swpages) {
512 				dirtyreacts++;
513 				uvm_pageactivate(p);
514 				continue;
515 			}
516 
517 			/*
518 			 * if the page is swap-backed and dirty and swap space
519 			 * is full, free any swap allocated to the page
520 			 * so that other pages can be paged out.
521 			 */
522 			KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
523 			if ((p->pg_flags & PQ_SWAPBACKED) &&
524 			    uvmexp.swpginuse == uvmexp.swpages) {
525 
526 				if ((p->pg_flags & PQ_ANON) &&
527 				    p->uanon->an_swslot) {
528 					uvm_swap_free(p->uanon->an_swslot, 1);
529 					p->uanon->an_swslot = 0;
530 				}
531 				if (p->pg_flags & PQ_AOBJ) {
532 					uao_dropswap(p->uobject,
533 						     p->offset >> PAGE_SHIFT);
534 				}
535 			}
536 
537 			/*
538 			 * the page we are looking at is dirty.   we must
539 			 * clean it before it can be freed.  to do this we
540 			 * first mark the page busy so that no one else will
541 			 * touch the page.   we write protect all the mappings
542 			 * of the page so that no one touches it while it is
543 			 * in I/O.
544 			 */
545 
546 			swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0);
547 			atomic_setbits_int(&p->pg_flags, PG_BUSY);
548 			UVM_PAGE_OWN(p, "scan_inactive");
549 			pmap_page_protect(p, PROT_READ);
550 			uvmexp.pgswapout++;
551 
552 			/*
553 			 * for swap-backed pages we need to (re)allocate
554 			 * swap space.
555 			 */
556 			if (swap_backed) {
557 				/* free old swap slot (if any) */
558 				if (anon) {
559 					if (anon->an_swslot) {
560 						uvm_swap_free(anon->an_swslot,
561 						    1);
562 						anon->an_swslot = 0;
563 					}
564 				} else {
565 					uao_dropswap(uobj,
566 						     p->offset >> PAGE_SHIFT);
567 				}
568 
569 				/* start new cluster (if necessary) */
570 				if (swslot == 0) {
571 					swnpages = MAXBSIZE >> PAGE_SHIFT;
572 					swslot = uvm_swap_alloc(&swnpages,
573 					    TRUE);
574 					if (swslot == 0) {
575 						/* no swap?  give up! */
576 						atomic_clearbits_int(
577 						    &p->pg_flags,
578 						    PG_BUSY);
579 						UVM_PAGE_OWN(p, NULL);
580 						continue;
581 					}
582 					swcpages = 0;	/* cluster is empty */
583 				}
584 
585 				/* add block to cluster */
586 				swpps[swcpages] = p;
587 				if (anon)
588 					anon->an_swslot = swslot + swcpages;
589 				else
590 					uao_set_swslot(uobj,
591 					    p->offset >> PAGE_SHIFT,
592 					    swslot + swcpages);
593 				swcpages++;
594 			}
595 		} else {
596 			/* if p == NULL we must be doing a last swap i/o */
597 			swap_backed = TRUE;
598 		}
599 
600 		/*
601 		 * now consider doing the pageout.
602 		 *
603 		 * for swap-backed pages, we do the pageout if we have either
604 		 * filled the cluster (in which case (swnpages == swcpages) or
605 		 * run out of pages (p == NULL).
606 		 *
607 		 * for object pages, we always do the pageout.
608 		 */
609 		if (swap_backed) {
610 			if (p) {	/* if we just added a page to cluster */
611 				/* cluster not full yet? */
612 				if (swcpages < swnpages)
613 					continue;
614 			}
615 
616 			/* starting I/O now... set up for it */
617 			npages = swcpages;
618 			ppsp = swpps;
619 			/* for swap-backed pages only */
620 			start = (vaddr_t) swslot;
621 
622 			/* if this is final pageout we could have a few
623 			 * extra swap blocks */
624 			if (swcpages < swnpages) {
625 				uvm_swap_free(swslot + swcpages,
626 				    (swnpages - swcpages));
627 			}
628 		} else {
629 			/* normal object pageout */
630 			ppsp = pps;
631 			npages = sizeof(pps) / sizeof(struct vm_page *);
632 			/* not looked at because PGO_ALLPAGES is set */
633 			start = 0;
634 		}
635 
636 		/*
637 		 * now do the pageout.
638 		 *
639 		 * for swap_backed pages we have already built the cluster.
640 		 * for !swap_backed pages, uvm_pager_put will call the object's
641 		 * "make put cluster" function to build a cluster on our behalf.
642 		 *
643 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
644 		 * it to free the cluster pages for us on a successful I/O (it
645 		 * always does this for un-successful I/O requests).  this
646 		 * allows us to do clustered pageout without having to deal
647 		 * with cluster pages at this level.
648 		 *
649 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
650 		 *  IN: locked: page queues
651 		 * OUT: locked:
652 		 *     !locked: pageqs
653 		 */
654 
655 		uvmexp.pdpageouts++;
656 		result = uvm_pager_put(swap_backed ? NULL : uobj, p,
657 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
658 
659 		/*
660 		 * if we did i/o to swap, zero swslot to indicate that we are
661 		 * no longer building a swap-backed cluster.
662 		 */
663 
664 		if (swap_backed)
665 			swslot = 0;		/* done with this cluster */
666 
667 		/*
668 		 * first, we check for VM_PAGER_PEND which means that the
669 		 * async I/O is in progress and the async I/O done routine
670 		 * will clean up after us.   in this case we move on to the
671 		 * next page.
672 		 *
673 		 * there is a very remote chance that the pending async i/o can
674 		 * finish _before_ we get here.   if that happens, our page "p"
675 		 * may no longer be on the inactive queue.   so we verify this
676 		 * when determining the next page (starting over at the head if
677 		 * we've lost our inactive page).
678 		 */
679 
680 		if (result == VM_PAGER_PEND) {
681 			uvmexp.paging += npages;
682 			uvm_lock_pageq();
683 			uvmexp.pdpending++;
684 			if (p) {
685 				if (p->pg_flags & PQ_INACTIVE)
686 					nextpg = TAILQ_NEXT(p, pageq);
687 				else
688 					nextpg = TAILQ_FIRST(pglst);
689 			} else {
690 				nextpg = NULL;
691 			}
692 			continue;
693 		}
694 
695 		/* clean up "p" if we have one */
696 		if (p) {
697 			/*
698 			 * the I/O request to "p" is done and uvm_pager_put
699 			 * has freed any cluster pages it may have allocated
700 			 * during I/O.  all that is left for us to do is
701 			 * clean up page "p" (which is still PG_BUSY).
702 			 *
703 			 * our result could be one of the following:
704 			 *   VM_PAGER_OK: successful pageout
705 			 *
706 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
707 			 *     to next page
708 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
709 			 *     "reactivate" page to get it out of the way (it
710 			 *     will eventually drift back into the inactive
711 			 *     queue for a retry).
712 			 *   VM_PAGER_UNLOCK: should never see this as it is
713 			 *     only valid for "get" operations
714 			 */
715 
716 			/* relock p's object: page queues not lock yet, so
717 			 * no need for "try" */
718 
719 #ifdef DIAGNOSTIC
720 			if (result == VM_PAGER_UNLOCK)
721 				panic("pagedaemon: pageout returned "
722 				    "invalid 'unlock' code");
723 #endif
724 
725 			/* handle PG_WANTED now */
726 			if (p->pg_flags & PG_WANTED)
727 				wakeup(p);
728 
729 			atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED);
730 			UVM_PAGE_OWN(p, NULL);
731 
732 			/* released during I/O? Can only happen for anons */
733 			if (p->pg_flags & PG_RELEASED) {
734 				KASSERT(anon != NULL);
735 				/*
736 				 * remove page so we can get nextpg,
737 				 * also zero out anon so we don't use
738 				 * it after the free.
739 				 */
740 				anon->an_page = NULL;
741 				p->uanon = NULL;
742 
743 				uvm_anfree(anon);	/* kills anon */
744 				pmap_page_protect(p, PROT_NONE);
745 				anon = NULL;
746 				uvm_lock_pageq();
747 				nextpg = TAILQ_NEXT(p, pageq);
748 				/* free released page */
749 				uvm_pagefree(p);
750 			} else {	/* page was not released during I/O */
751 				uvm_lock_pageq();
752 				nextpg = TAILQ_NEXT(p, pageq);
753 				if (result != VM_PAGER_OK) {
754 					/* pageout was a failure... */
755 					if (result != VM_PAGER_AGAIN)
756 						uvm_pageactivate(p);
757 					pmap_clear_reference(p);
758 					/* XXXCDC: if (swap_backed) FREE p's
759 					 * swap block? */
760 				} else {
761 					/* pageout was a success... */
762 					pmap_clear_reference(p);
763 					pmap_clear_modify(p);
764 					atomic_setbits_int(&p->pg_flags,
765 					    PG_CLEAN);
766 				}
767 			}
768 
769 			/*
770 			 * drop object lock (if there is an object left).   do
771 			 * a safety check of nextpg to make sure it is on the
772 			 * inactive queue (it should be since PG_BUSY pages on
773 			 * the inactive queue can't be re-queued [note: not
774 			 * true for active queue]).
775 			 */
776 
777 			if (nextpg && (nextpg->pg_flags & PQ_INACTIVE) == 0) {
778 				nextpg = TAILQ_FIRST(pglst);	/* reload! */
779 			}
780 		} else {
781 			/*
782 			 * if p is null in this loop, make sure it stays null
783 			 * in the next loop.
784 			 */
785 			nextpg = NULL;
786 
787 			/*
788 			 * lock page queues here just so they're always locked
789 			 * at the end of the loop.
790 			 */
791 			uvm_lock_pageq();
792 		}
793 	}
794 	return (retval);
795 }
796 
797 /*
798  * uvmpd_scan: scan the page queues and attempt to meet our targets.
799  *
800  * => called with pageq's locked
801  */
802 
803 void
804 uvmpd_scan(void)
805 {
806 	int free, inactive_shortage, swap_shortage, pages_freed;
807 	struct vm_page *p, *nextpg;
808 	struct uvm_object *uobj;
809 	boolean_t got_it;
810 
811 	uvmexp.pdrevs++;		/* counter */
812 	uobj = NULL;
813 
814 	/*
815 	 * get current "free" page count
816 	 */
817 	free = uvmexp.free - BUFPAGES_DEFICIT;
818 
819 #ifndef __SWAP_BROKEN
820 	/*
821 	 * swap out some processes if we are below our free target.
822 	 * we need to unlock the page queues for this.
823 	 */
824 	if (free < uvmexp.freetarg) {
825 		uvmexp.pdswout++;
826 		uvm_unlock_pageq();
827 		uvm_swapout_threads();
828 		uvm_lock_pageq();
829 	}
830 #endif
831 
832 	/*
833 	 * now we want to work on meeting our targets.   first we work on our
834 	 * free target by converting inactive pages into free pages.  then
835 	 * we work on meeting our inactive target by converting active pages
836 	 * to inactive ones.
837 	 */
838 
839 	/*
840 	 * alternate starting queue between swap and object based on the
841 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
842 	 */
843 	got_it = FALSE;
844 	pages_freed = uvmexp.pdfreed;	/* XXX - int */
845 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
846 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
847 	if (!got_it)
848 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
849 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
850 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
851 	pages_freed = uvmexp.pdfreed - pages_freed;
852 
853 	/*
854 	 * we have done the scan to get free pages.   now we work on meeting
855 	 * our inactive target.
856 	 */
857 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive - BUFPAGES_INACT;
858 
859 	/*
860 	 * detect if we're not going to be able to page anything out
861 	 * until we free some swap resources from active pages.
862 	 */
863 	swap_shortage = 0;
864 	if (uvmexp.free < uvmexp.freetarg &&
865 	    uvmexp.swpginuse == uvmexp.swpages &&
866 	    uvmexp.swpgonly < uvmexp.swpages &&
867 	    pages_freed == 0) {
868 		swap_shortage = uvmexp.freetarg - uvmexp.free;
869 	}
870 
871 	for (p = TAILQ_FIRST(&uvm.page_active);
872 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
873 	     p = nextpg) {
874 		nextpg = TAILQ_NEXT(p, pageq);
875 		if (p->pg_flags & PG_BUSY)
876 			continue;
877 
878 		/* is page anon owned or ownerless? */
879 		if ((p->pg_flags & PQ_ANON) || p->uobject == NULL) {
880 			KASSERT(p->uanon != NULL);
881 
882 			/* take over the page? */
883 			if ((p->pg_flags & PQ_ANON) == 0) {
884 				KASSERT(p->loan_count > 0);
885 				p->loan_count--;
886 				atomic_setbits_int(&p->pg_flags, PQ_ANON);
887 			}
888 		}
889 
890 		/* skip this page if it's busy. */
891 		if ((p->pg_flags & PG_BUSY) != 0) {
892 			continue;
893 		}
894 
895 		/*
896 		 * if there's a shortage of swap, free any swap allocated
897 		 * to this page so that other pages can be paged out.
898 		 */
899 		if (swap_shortage > 0) {
900 			if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) {
901 				uvm_swap_free(p->uanon->an_swslot, 1);
902 				p->uanon->an_swslot = 0;
903 				atomic_clearbits_int(&p->pg_flags, PG_CLEAN);
904 				swap_shortage--;
905 			}
906 			if (p->pg_flags & PQ_AOBJ) {
907 				int slot = uao_set_swslot(p->uobject,
908 					p->offset >> PAGE_SHIFT, 0);
909 				if (slot) {
910 					uvm_swap_free(slot, 1);
911 					atomic_clearbits_int(&p->pg_flags,
912 					    PG_CLEAN);
913 					swap_shortage--;
914 				}
915 			}
916 		}
917 
918 		/*
919 		 * deactivate this page if there's a shortage of
920 		 * inactive pages.
921 		 */
922 		if (inactive_shortage > 0) {
923 			pmap_page_protect(p, PROT_NONE);
924 			/* no need to check wire_count as pg is "active" */
925 			uvm_pagedeactivate(p);
926 			uvmexp.pddeact++;
927 			inactive_shortage--;
928 		}
929 	}
930 }
931 
932 #ifdef HIBERNATE
933 
934 /*
935  * uvmpd_drop: drop clean pages from list
936  */
937 void
938 uvmpd_drop(struct pglist *pglst)
939 {
940 	struct vm_page *p, *nextpg;
941 
942 	for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) {
943 		nextpg = TAILQ_NEXT(p, pageq);
944 
945 		if (p->pg_flags & PQ_ANON || p->uobject == NULL)
946 			continue;
947 
948 		if (p->pg_flags & PG_BUSY)
949 			continue;
950 
951 		if (p->pg_flags & PG_CLEAN) {
952 			/*
953 			 * we now have the page queues locked.
954 			 * the page is not busy.   if the page is clean we
955 			 * can free it now and continue.
956 			 */
957 			if (p->pg_flags & PG_CLEAN) {
958 				if (p->pg_flags & PQ_SWAPBACKED) {
959 					/* this page now lives only in swap */
960 					uvmexp.swpgonly++;
961 				}
962 
963 				/* zap all mappings with pmap_page_protect... */
964 				pmap_page_protect(p, PROT_NONE);
965 				uvm_pagefree(p);
966 			}
967 		}
968 	}
969 }
970 
971 void
972 uvmpd_hibernate(void)
973 {
974 	uvm_lock_pageq();
975 
976 	uvmpd_drop(&uvm.page_inactive_swp);
977 	uvmpd_drop(&uvm.page_inactive_obj);
978 	uvmpd_drop(&uvm.page_active);
979 
980 	uvm_unlock_pageq();
981 }
982 
983 #endif
984