xref: /openbsd/sys/uvm/uvm_pdaemon.c (revision cca36db2)
1 /*	$OpenBSD: uvm_pdaemon.c,v 1.59 2011/07/06 19:50:38 beck Exp $	*/
2 /*	$NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Charles D. Cranor,
24  *      Washington University, the University of California, Berkeley and
25  *      its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
43  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69 
70 /*
71  * uvm_pdaemon.c: the page daemon
72  */
73 
74 #include <sys/param.h>
75 #include <sys/proc.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/pool.h>
79 #include <sys/buf.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 
83 #include <uvm/uvm.h>
84 
85 /*
86  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
87  * in a pass thru the inactive list when swap is full.  the value should be
88  * "small"... if it's too large we'll cycle the active pages thru the inactive
89  * queue too quickly to for them to be referenced and avoid being freed.
90  */
91 
92 #define UVMPD_NUMDIRTYREACTS 16
93 
94 
95 /*
96  * local prototypes
97  */
98 
99 void		uvmpd_scan(void);
100 boolean_t	uvmpd_scan_inactive(struct pglist *);
101 void		uvmpd_tune(void);
102 
103 /*
104  * uvm_wait: wait (sleep) for the page daemon to free some pages
105  *
106  * => should be called with all locks released
107  * => should _not_ be called by the page daemon (to avoid deadlock)
108  */
109 
110 void
111 uvm_wait(const char *wmsg)
112 {
113 	int	timo = 0;
114 
115 	/*
116 	 * check for page daemon going to sleep (waiting for itself)
117 	 */
118 
119 	if (curproc == uvm.pagedaemon_proc) {
120 		/*
121 		 * now we have a problem: the pagedaemon wants to go to
122 		 * sleep until it frees more memory.   but how can it
123 		 * free more memory if it is asleep?  that is a deadlock.
124 		 * we have two options:
125 		 *  [1] panic now
126 		 *  [2] put a timeout on the sleep, thus causing the
127 		 *      pagedaemon to only pause (rather than sleep forever)
128 		 *
129 		 * note that option [2] will only help us if we get lucky
130 		 * and some other process on the system breaks the deadlock
131 		 * by exiting or freeing memory (thus allowing the pagedaemon
132 		 * to continue).  for now we panic if DEBUG is defined,
133 		 * otherwise we hope for the best with option [2] (better
134 		 * yet, this should never happen in the first place!).
135 		 */
136 
137 		printf("pagedaemon: deadlock detected!\n");
138 		timo = hz >> 3;		/* set timeout */
139 #if defined(DEBUG)
140 		/* DEBUG: panic so we can debug it */
141 		panic("pagedaemon deadlock");
142 #endif
143 	}
144 
145 	uvm_lock_fpageq();
146 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
147 	msleep(&uvmexp.free, &uvm.fpageqlock, PVM | PNORELOCK, wmsg, timo);
148 }
149 
150 /*
151  * uvmpd_tune: tune paging parameters
152  *
153  * => called whenever memory is added to (or removed from?) the system
154  * => caller must call with page queues locked
155  */
156 
157 void
158 uvmpd_tune(void)
159 {
160 
161 	uvmexp.freemin = uvmexp.npages / 30;
162 
163 	/* between 16k and 512k */
164 	/* XXX:  what are these values good for? */
165 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
166 #if 0
167 	uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT);
168 #endif
169 
170 	/* Make sure there's always a user page free. */
171 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
172 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
173 
174 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
175 	if (uvmexp.freetarg <= uvmexp.freemin)
176 		uvmexp.freetarg = uvmexp.freemin + 1;
177 
178 	/* uvmexp.inactarg: computed in main daemon loop */
179 
180 	uvmexp.wiredmax = uvmexp.npages / 3;
181 }
182 
183 /*
184  * uvm_pageout: the main loop for the pagedaemon
185  */
186 
187 void
188 uvm_pageout(void *arg)
189 {
190 	struct uvm_constraint_range constraint;
191 	struct uvm_pmalloc *pma;
192 	int work_done;
193 	int npages = 0;
194 
195 	/*
196 	 * ensure correct priority and set paging parameters...
197 	 */
198 
199 	uvm.pagedaemon_proc = curproc;
200 	(void) spl0();
201 	uvm_lock_pageq();
202 	npages = uvmexp.npages;
203 	uvmpd_tune();
204 	uvm_unlock_pageq();
205 
206 	/*
207 	 * main loop
208 	 */
209 
210 	for (;;) {
211 	  	work_done = 0; /* No work done this iteration. */
212 
213 		uvm_lock_fpageq();
214 
215 		if (TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
216 			msleep(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
217 			    "pgdaemon", 0);
218 			uvmexp.pdwoke++;
219 		}
220 
221 		if ((pma = TAILQ_FIRST(&uvm.pmr_control.allocs)) != NULL) {
222 			pma->pm_flags |= UVM_PMA_BUSY;
223 			constraint = pma->pm_constraint;
224 		} else
225 			constraint = no_constraint;
226 
227 		uvm_unlock_fpageq();
228 
229 		/*
230 		 * now lock page queues and recompute inactive count
231 		 */
232 
233 		uvm_lock_pageq();
234 		if (npages != uvmexp.npages) {	/* check for new pages? */
235 			npages = uvmexp.npages;
236 			uvmpd_tune();
237 		}
238 
239 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
240 		if (uvmexp.inactarg <= uvmexp.freetarg) {
241 			uvmexp.inactarg = uvmexp.freetarg + 1;
242 		}
243 
244 		/*
245 		 * get pages from the buffer cache, or scan if needed
246 		 */
247 		if (pma != NULL ||
248 		    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg) ||
249 		    ((uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) {
250 			if (bufbackoff(&constraint,
251 			    (pma ? pma->pm_size : -1)) == 0)
252 				work_done = 1;
253 			else {
254 				uvmpd_scan();
255 				work_done = 1; /* we hope... */
256 			}
257 		}
258 
259 		/*
260 		 * if there's any free memory to be had,
261 		 * wake up any waiters.
262 		 */
263 		uvm_lock_fpageq();
264 		if (uvmexp.free > uvmexp.reserve_kernel ||
265 		    uvmexp.paging == 0) {
266 			wakeup(&uvmexp.free);
267 		}
268 
269 		if (pma != NULL) {
270 			pma->pm_flags &= ~UVM_PMA_BUSY;
271 			if (!work_done)
272 				pma->pm_flags |= UVM_PMA_FAIL;
273 			if (pma->pm_flags & (UVM_PMA_FAIL | UVM_PMA_FREED)) {
274 				pma->pm_flags &= ~UVM_PMA_LINKED;
275 				TAILQ_REMOVE(&uvm.pmr_control.allocs, pma,
276 				    pmq);
277 			}
278 			wakeup(pma);
279 		}
280 		uvm_unlock_fpageq();
281 
282 		/*
283 		 * scan done.  unlock page queues (the only lock we are holding)
284 		 */
285 
286 		uvm_unlock_pageq();
287 	}
288 	/*NOTREACHED*/
289 }
290 
291 
292 /*
293  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
294  */
295 
296 void
297 uvm_aiodone_daemon(void *arg)
298 {
299 	int s, free;
300 	struct buf *bp, *nbp;
301 
302 	uvm.aiodoned_proc = curproc;
303 
304 	for (;;) {
305 
306 		/*
307 		 * Check for done aio structures. If we've got structures to
308 		 * process, do so. Otherwise sleep while avoiding races.
309 		 */
310 		mtx_enter(&uvm.aiodoned_lock);
311 		while ((bp = TAILQ_FIRST(&uvm.aio_done)) == NULL)
312 			msleep(&uvm.aiodoned, &uvm.aiodoned_lock,
313 			    PVM, "aiodoned", 0);
314 		/* Take the list for ourselves. */
315 		TAILQ_INIT(&uvm.aio_done);
316 		mtx_leave(&uvm.aiodoned_lock);
317 
318 		/*
319 		 * process each i/o that's done.
320 		 */
321 
322 		free = uvmexp.free;
323 		while (bp != NULL) {
324 			if (bp->b_flags & B_PDAEMON) {
325 				uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT;
326 			}
327 			nbp = TAILQ_NEXT(bp, b_freelist);
328 			s = splbio();	/* b_iodone must by called at splbio */
329 			(*bp->b_iodone)(bp);
330 			splx(s);
331 			bp = nbp;
332 		}
333 		uvm_lock_fpageq();
334 		wakeup(free <= uvmexp.reserve_kernel ? &uvm.pagedaemon :
335 		    &uvmexp.free);
336 		uvm_unlock_fpageq();
337 	}
338 }
339 
340 
341 
342 /*
343  * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
344  *
345  * => called with page queues locked
346  * => we work on meeting our free target by converting inactive pages
347  *    into free pages.
348  * => we handle the building of swap-backed clusters
349  * => we return TRUE if we are exiting because we met our target
350  */
351 
352 boolean_t
353 uvmpd_scan_inactive(struct pglist *pglst)
354 {
355 	boolean_t retval = FALSE;	/* assume we haven't hit target */
356 	int free, result;
357 	struct vm_page *p, *nextpg;
358 	struct uvm_object *uobj;
359 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
360 	int npages;
361 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
362 	int swnpages, swcpages;				/* XXX: see below */
363 	int swslot;
364 	struct vm_anon *anon;
365 	boolean_t swap_backed;
366 	vaddr_t start;
367 	int dirtyreacts;
368 
369 	/*
370 	 * note: we currently keep swap-backed pages on a separate inactive
371 	 * list from object-backed pages.   however, merging the two lists
372 	 * back together again hasn't been ruled out.   thus, we keep our
373 	 * swap cluster in "swpps" rather than in pps (allows us to mix
374 	 * clustering types in the event of a mixed inactive queue).
375 	 */
376 
377 	/*
378 	 * swslot is non-zero if we are building a swap cluster.  we want
379 	 * to stay in the loop while we have a page to scan or we have
380 	 * a swap-cluster to build.
381 	 */
382 
383 	swslot = 0;
384 	swnpages = swcpages = 0;
385 	free = 0;
386 	dirtyreacts = 0;
387 
388 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
389 
390 		/*
391 		 * note that p can be NULL iff we have traversed the whole
392 		 * list and need to do one final swap-backed clustered pageout.
393 		 */
394 
395 		uobj = NULL;
396 		anon = NULL;
397 
398 		if (p) {
399 
400 			/*
401 			 * update our copy of "free" and see if we've met
402 			 * our target
403 			 */
404 			free = uvmexp.free - BUFPAGES_DEFICIT;
405 
406 			if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
407 			    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
408 				retval = TRUE;
409 
410 				if (swslot == 0) {
411 					/* exit now if no swap-i/o pending */
412 					break;
413 				}
414 
415 				/* set p to null to signal final swap i/o */
416 				p = NULL;
417 			}
418 		}
419 
420 		if (p) {	/* if (we have a new page to consider) */
421 
422 			/*
423 			 * we are below target and have a new page to consider.
424 			 */
425 			uvmexp.pdscans++;
426 			nextpg = TAILQ_NEXT(p, pageq);
427 
428 			/*
429 			 * move referenced pages back to active queue and
430 			 * skip to next page (unlikely to happen since
431 			 * inactive pages shouldn't have any valid mappings
432 			 * and we cleared reference before deactivating).
433 			 */
434 
435 			if (pmap_is_referenced(p)) {
436 				uvm_pageactivate(p);
437 				uvmexp.pdreact++;
438 				continue;
439 			}
440 
441 			/*
442 			 * first we attempt to lock the object that this page
443 			 * belongs to.  if our attempt fails we skip on to
444 			 * the next page (no harm done).  it is important to
445 			 * "try" locking the object as we are locking in the
446 			 * wrong order (pageq -> object) and we don't want to
447 			 * deadlock.
448 			 *
449 			 * the only time we expect to see an ownerless page
450 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
451 			 * anon has loaned a page from a uvm_object and the
452 			 * uvm_object has dropped the ownership.  in that
453 			 * case, the anon can "take over" the loaned page
454 			 * and make it its own.
455 			 */
456 
457 			/* is page part of an anon or ownerless ? */
458 			if ((p->pg_flags & PQ_ANON) || p->uobject == NULL) {
459 				anon = p->uanon;
460 				KASSERT(anon != NULL);
461 				if (!simple_lock_try(&anon->an_lock)) {
462 					/* lock failed, skip this page */
463 					continue;
464 				}
465 
466 				/*
467 				 * if the page is ownerless, claim it in the
468 				 * name of "anon"!
469 				 */
470 
471 				if ((p->pg_flags & PQ_ANON) == 0) {
472 					KASSERT(p->loan_count > 0);
473 					p->loan_count--;
474 					atomic_setbits_int(&p->pg_flags,
475 					    PQ_ANON);
476 					/* anon now owns it */
477 				}
478 				if (p->pg_flags & PG_BUSY) {
479 					simple_unlock(&anon->an_lock);
480 					uvmexp.pdbusy++;
481 					/* someone else owns page, skip it */
482 					continue;
483 				}
484 				uvmexp.pdanscan++;
485 			} else {
486 				uobj = p->uobject;
487 				KASSERT(uobj != NULL);
488 				if (!simple_lock_try(&uobj->vmobjlock)) {
489 					/* lock failed, skip this page */
490 					continue;
491 				}
492 				if (p->pg_flags & PG_BUSY) {
493 					simple_unlock(&uobj->vmobjlock);
494 					uvmexp.pdbusy++;
495 					/* someone else owns page, skip it */
496 					continue;
497 				}
498 				uvmexp.pdobscan++;
499 			}
500 
501 			/*
502 			 * we now have the object and the page queues locked.
503 			 * the page is not busy.   if the page is clean we
504 			 * can free it now and continue.
505 			 */
506 
507 			if (p->pg_flags & PG_CLEAN) {
508 				if (p->pg_flags & PQ_SWAPBACKED) {
509 					/* this page now lives only in swap */
510 					simple_lock(&uvm.swap_data_lock);
511 					uvmexp.swpgonly++;
512 					simple_unlock(&uvm.swap_data_lock);
513 				}
514 
515 				/* zap all mappings with pmap_page_protect... */
516 				pmap_page_protect(p, VM_PROT_NONE);
517 				uvm_pagefree(p);
518 				uvmexp.pdfreed++;
519 
520 				if (anon) {
521 
522 					/*
523 					 * an anonymous page can only be clean
524 					 * if it has backing store assigned.
525 					 */
526 
527 					KASSERT(anon->an_swslot != 0);
528 
529 					/* remove from object */
530 					anon->an_page = NULL;
531 					simple_unlock(&anon->an_lock);
532 				} else {
533 					/* pagefree has already removed the
534 					 * page from the object */
535 					simple_unlock(&uobj->vmobjlock);
536 				}
537 				continue;
538 			}
539 
540 			/*
541 			 * this page is dirty, skip it if we'll have met our
542 			 * free target when all the current pageouts complete.
543 			 */
544 
545 			if (free + uvmexp.paging > uvmexp.freetarg << 2) {
546 				if (anon) {
547 					simple_unlock(&anon->an_lock);
548 				} else {
549 					simple_unlock(&uobj->vmobjlock);
550 				}
551 				continue;
552 			}
553 
554 			/*
555 			 * this page is dirty, but we can't page it out
556 			 * since all pages in swap are only in swap.
557 			 * reactivate it so that we eventually cycle
558 			 * all pages thru the inactive queue.
559 			 */
560 
561 			KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
562 			if ((p->pg_flags & PQ_SWAPBACKED) &&
563 			    uvmexp.swpgonly == uvmexp.swpages) {
564 				dirtyreacts++;
565 				uvm_pageactivate(p);
566 				if (anon) {
567 					simple_unlock(&anon->an_lock);
568 				} else {
569 					simple_unlock(&uobj->vmobjlock);
570 				}
571 				continue;
572 			}
573 
574 			/*
575 			 * if the page is swap-backed and dirty and swap space
576 			 * is full, free any swap allocated to the page
577 			 * so that other pages can be paged out.
578 			 */
579 
580 			KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
581 			if ((p->pg_flags & PQ_SWAPBACKED) &&
582 			    uvmexp.swpginuse == uvmexp.swpages) {
583 
584 				if ((p->pg_flags & PQ_ANON) &&
585 				    p->uanon->an_swslot) {
586 					uvm_swap_free(p->uanon->an_swslot, 1);
587 					p->uanon->an_swslot = 0;
588 				}
589 				if (p->pg_flags & PQ_AOBJ) {
590 					uao_dropswap(p->uobject,
591 						     p->offset >> PAGE_SHIFT);
592 				}
593 			}
594 
595 			/*
596 			 * the page we are looking at is dirty.   we must
597 			 * clean it before it can be freed.  to do this we
598 			 * first mark the page busy so that no one else will
599 			 * touch the page.   we write protect all the mappings
600 			 * of the page so that no one touches it while it is
601 			 * in I/O.
602 			 */
603 
604 			swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0);
605 			atomic_setbits_int(&p->pg_flags, PG_BUSY);
606 			UVM_PAGE_OWN(p, "scan_inactive");
607 			pmap_page_protect(p, VM_PROT_READ);
608 			uvmexp.pgswapout++;
609 
610 			/*
611 			 * for swap-backed pages we need to (re)allocate
612 			 * swap space.
613 			 */
614 
615 			if (swap_backed) {
616 
617 				/*
618 				 * free old swap slot (if any)
619 				 */
620 
621 				if (anon) {
622 					if (anon->an_swslot) {
623 						uvm_swap_free(anon->an_swslot,
624 						    1);
625 						anon->an_swslot = 0;
626 					}
627 				} else {
628 					uao_dropswap(uobj,
629 						     p->offset >> PAGE_SHIFT);
630 				}
631 
632 				/*
633 				 * start new cluster (if necessary)
634 				 */
635 
636 				if (swslot == 0) {
637 					swnpages = MAXBSIZE >> PAGE_SHIFT;
638 					swslot = uvm_swap_alloc(&swnpages,
639 					    TRUE);
640 					if (swslot == 0) {
641 						/* no swap?  give up! */
642 						atomic_clearbits_int(
643 						    &p->pg_flags,
644 						    PG_BUSY);
645 						UVM_PAGE_OWN(p, NULL);
646 						if (anon)
647 							simple_unlock(
648 							    &anon->an_lock);
649 						else
650 							simple_unlock(
651 							    &uobj->vmobjlock);
652 						continue;
653 					}
654 					swcpages = 0;	/* cluster is empty */
655 				}
656 
657 				/*
658 				 * add block to cluster
659 				 */
660 
661 				swpps[swcpages] = p;
662 				if (anon)
663 					anon->an_swslot = swslot + swcpages;
664 				else
665 					uao_set_swslot(uobj,
666 					    p->offset >> PAGE_SHIFT,
667 					    swslot + swcpages);
668 				swcpages++;
669 			}
670 		} else {
671 
672 			/* if p == NULL we must be doing a last swap i/o */
673 			swap_backed = TRUE;
674 		}
675 
676 		/*
677 		 * now consider doing the pageout.
678 		 *
679 		 * for swap-backed pages, we do the pageout if we have either
680 		 * filled the cluster (in which case (swnpages == swcpages) or
681 		 * run out of pages (p == NULL).
682 		 *
683 		 * for object pages, we always do the pageout.
684 		 */
685 
686 		if (swap_backed) {
687 			if (p) {	/* if we just added a page to cluster */
688 				if (anon)
689 					simple_unlock(&anon->an_lock);
690 				else
691 					simple_unlock(&uobj->vmobjlock);
692 
693 				/* cluster not full yet? */
694 				if (swcpages < swnpages)
695 					continue;
696 			}
697 
698 			/* starting I/O now... set up for it */
699 			npages = swcpages;
700 			ppsp = swpps;
701 			/* for swap-backed pages only */
702 			start = (vaddr_t) swslot;
703 
704 			/* if this is final pageout we could have a few
705 			 * extra swap blocks */
706 			if (swcpages < swnpages) {
707 				uvm_swap_free(swslot + swcpages,
708 				    (swnpages - swcpages));
709 			}
710 		} else {
711 			/* normal object pageout */
712 			ppsp = pps;
713 			npages = sizeof(pps) / sizeof(struct vm_page *);
714 			/* not looked at because PGO_ALLPAGES is set */
715 			start = 0;
716 		}
717 
718 		/*
719 		 * now do the pageout.
720 		 *
721 		 * for swap_backed pages we have already built the cluster.
722 		 * for !swap_backed pages, uvm_pager_put will call the object's
723 		 * "make put cluster" function to build a cluster on our behalf.
724 		 *
725 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
726 		 * it to free the cluster pages for us on a successful I/O (it
727 		 * always does this for un-successful I/O requests).  this
728 		 * allows us to do clustered pageout without having to deal
729 		 * with cluster pages at this level.
730 		 *
731 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
732 		 *  IN: locked: uobj (if !swap_backed), page queues
733 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
734 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
735 		 *
736 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
737 		 */
738 
739 		/* locked: uobj (if !swap_backed), page queues */
740 		uvmexp.pdpageouts++;
741 		result = uvm_pager_put(swap_backed ? NULL : uobj, p,
742 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
743 		/* locked: uobj (if !swap_backed && result != PEND) */
744 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
745 
746 		/*
747 		 * if we did i/o to swap, zero swslot to indicate that we are
748 		 * no longer building a swap-backed cluster.
749 		 */
750 
751 		if (swap_backed)
752 			swslot = 0;		/* done with this cluster */
753 
754 		/*
755 		 * first, we check for VM_PAGER_PEND which means that the
756 		 * async I/O is in progress and the async I/O done routine
757 		 * will clean up after us.   in this case we move on to the
758 		 * next page.
759 		 *
760 		 * there is a very remote chance that the pending async i/o can
761 		 * finish _before_ we get here.   if that happens, our page "p"
762 		 * may no longer be on the inactive queue.   so we verify this
763 		 * when determining the next page (starting over at the head if
764 		 * we've lost our inactive page).
765 		 */
766 
767 		if (result == VM_PAGER_PEND) {
768 			uvmexp.paging += npages;
769 			uvm_lock_pageq();
770 			uvmexp.pdpending++;
771 			if (p) {
772 				if (p->pg_flags & PQ_INACTIVE)
773 					nextpg = TAILQ_NEXT(p, pageq);
774 				else
775 					nextpg = TAILQ_FIRST(pglst);
776 			} else {
777 				nextpg = NULL;
778 			}
779 			continue;
780 		}
781 
782 #ifdef UBC
783 		if (result == VM_PAGER_ERROR &&
784 		    curproc == uvm.pagedaemon_proc) {
785 			uvm_lock_pageq();
786 			nextpg = TAILQ_NEXT(p, pageq);
787 			uvm_pageactivate(p);
788 			continue;
789 		}
790 #endif
791 
792 		/*
793 		 * clean up "p" if we have one
794 		 */
795 
796 		if (p) {
797 			/*
798 			 * the I/O request to "p" is done and uvm_pager_put
799 			 * has freed any cluster pages it may have allocated
800 			 * during I/O.  all that is left for us to do is
801 			 * clean up page "p" (which is still PG_BUSY).
802 			 *
803 			 * our result could be one of the following:
804 			 *   VM_PAGER_OK: successful pageout
805 			 *
806 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
807 			 *     to next page
808 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
809 			 *     "reactivate" page to get it out of the way (it
810 			 *     will eventually drift back into the inactive
811 			 *     queue for a retry).
812 			 *   VM_PAGER_UNLOCK: should never see this as it is
813 			 *     only valid for "get" operations
814 			 */
815 
816 			/* relock p's object: page queues not lock yet, so
817 			 * no need for "try" */
818 
819 			/* !swap_backed case: already locked... */
820 			if (swap_backed) {
821 				if (anon)
822 					simple_lock(&anon->an_lock);
823 				else
824 					simple_lock(&uobj->vmobjlock);
825 			}
826 
827 #ifdef DIAGNOSTIC
828 			if (result == VM_PAGER_UNLOCK)
829 				panic("pagedaemon: pageout returned "
830 				    "invalid 'unlock' code");
831 #endif
832 
833 			/* handle PG_WANTED now */
834 			if (p->pg_flags & PG_WANTED)
835 				/* still holding object lock */
836 				wakeup(p);
837 
838 			atomic_clearbits_int(&p->pg_flags, PG_BUSY|PG_WANTED);
839 			UVM_PAGE_OWN(p, NULL);
840 
841 			/* released during I/O? Can only happen for anons */
842 			if (p->pg_flags & PG_RELEASED) {
843 				KASSERT(anon != NULL);
844 				/*
845 				 * remove page so we can get nextpg,
846 				 * also zero out anon so we don't use
847 				 * it after the free.
848 				 */
849 				anon->an_page = NULL;
850 				p->uanon = NULL;
851 
852 				simple_unlock(&anon->an_lock);
853 				uvm_anfree(anon);	/* kills anon */
854 				pmap_page_protect(p, VM_PROT_NONE);
855 				anon = NULL;
856 				uvm_lock_pageq();
857 				nextpg = TAILQ_NEXT(p, pageq);
858 				/* free released page */
859 				uvm_pagefree(p);
860 			} else {	/* page was not released during I/O */
861 				uvm_lock_pageq();
862 				nextpg = TAILQ_NEXT(p, pageq);
863 				if (result != VM_PAGER_OK) {
864 					/* pageout was a failure... */
865 					if (result != VM_PAGER_AGAIN)
866 						uvm_pageactivate(p);
867 					pmap_clear_reference(p);
868 					/* XXXCDC: if (swap_backed) FREE p's
869 					 * swap block? */
870 				} else {
871 					/* pageout was a success... */
872 					pmap_clear_reference(p);
873 					pmap_clear_modify(p);
874 					atomic_setbits_int(&p->pg_flags,
875 					    PG_CLEAN);
876 				}
877 			}
878 
879 			/*
880 			 * drop object lock (if there is an object left).   do
881 			 * a safety check of nextpg to make sure it is on the
882 			 * inactive queue (it should be since PG_BUSY pages on
883 			 * the inactive queue can't be re-queued [note: not
884 			 * true for active queue]).
885 			 */
886 
887 			if (anon)
888 				simple_unlock(&anon->an_lock);
889 			else if (uobj)
890 				simple_unlock(&uobj->vmobjlock);
891 
892 			if (nextpg && (nextpg->pg_flags & PQ_INACTIVE) == 0) {
893 				nextpg = TAILQ_FIRST(pglst);	/* reload! */
894 			}
895 		} else {
896 
897 			/*
898 			 * if p is null in this loop, make sure it stays null
899 			 * in the next loop.
900 			 */
901 
902 			nextpg = NULL;
903 
904 			/*
905 			 * lock page queues here just so they're always locked
906 			 * at the end of the loop.
907 			 */
908 
909 			uvm_lock_pageq();
910 		}
911 	}
912 	return (retval);
913 }
914 
915 /*
916  * uvmpd_scan: scan the page queues and attempt to meet our targets.
917  *
918  * => called with pageq's locked
919  */
920 
921 void
922 uvmpd_scan(void)
923 {
924 	int free, inactive_shortage, swap_shortage, pages_freed;
925 	struct vm_page *p, *nextpg;
926 	struct uvm_object *uobj;
927 	boolean_t got_it;
928 
929 	uvmexp.pdrevs++;		/* counter */
930 	uobj = NULL;
931 
932 	/*
933 	 * get current "free" page count
934 	 */
935 	free = uvmexp.free - BUFPAGES_DEFICIT;
936 
937 #ifndef __SWAP_BROKEN
938 	/*
939 	 * swap out some processes if we are below our free target.
940 	 * we need to unlock the page queues for this.
941 	 */
942 	if (free < uvmexp.freetarg) {
943 		uvmexp.pdswout++;
944 		uvm_unlock_pageq();
945 		uvm_swapout_threads();
946 		uvm_lock_pageq();
947 	}
948 #endif
949 
950 	/*
951 	 * now we want to work on meeting our targets.   first we work on our
952 	 * free target by converting inactive pages into free pages.  then
953 	 * we work on meeting our inactive target by converting active pages
954 	 * to inactive ones.
955 	 */
956 
957 	/*
958 	 * alternate starting queue between swap and object based on the
959 	 * low bit of uvmexp.pdrevs (which we bump by one each call).
960 	 */
961 
962 	got_it = FALSE;
963 	pages_freed = uvmexp.pdfreed;	/* XXX - int */
964 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
965 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
966 	if (!got_it)
967 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
968 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
969 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
970 	pages_freed = uvmexp.pdfreed - pages_freed;
971 
972 	/*
973 	 * we have done the scan to get free pages.   now we work on meeting
974 	 * our inactive target.
975 	 */
976 
977 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
978 
979 	/*
980 	 * detect if we're not going to be able to page anything out
981 	 * until we free some swap resources from active pages.
982 	 */
983 
984 	swap_shortage = 0;
985 	if (uvmexp.free < uvmexp.freetarg &&
986 	    uvmexp.swpginuse == uvmexp.swpages &&
987 	    uvmexp.swpgonly < uvmexp.swpages &&
988 	    pages_freed == 0) {
989 		swap_shortage = uvmexp.freetarg - uvmexp.free;
990 	}
991 
992 	for (p = TAILQ_FIRST(&uvm.page_active);
993 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
994 	     p = nextpg) {
995 		nextpg = TAILQ_NEXT(p, pageq);
996 		if (p->pg_flags & PG_BUSY)
997 			continue;	/* quick check before trying to lock */
998 
999 		/*
1000 		 * lock the page's owner.
1001 		 */
1002 		/* is page anon owned or ownerless? */
1003 		if ((p->pg_flags & PQ_ANON) || p->uobject == NULL) {
1004 			KASSERT(p->uanon != NULL);
1005 			if (!simple_lock_try(&p->uanon->an_lock))
1006 				continue;
1007 
1008 			/* take over the page? */
1009 			if ((p->pg_flags & PQ_ANON) == 0) {
1010 				KASSERT(p->loan_count > 0);
1011 				p->loan_count--;
1012 				atomic_setbits_int(&p->pg_flags, PQ_ANON);
1013 			}
1014 		} else {
1015 			if (!simple_lock_try(&p->uobject->vmobjlock))
1016 				continue;
1017 		}
1018 
1019 		/*
1020 		 * skip this page if it's busy.
1021 		 */
1022 
1023 		if ((p->pg_flags & PG_BUSY) != 0) {
1024 			if (p->pg_flags & PQ_ANON)
1025 				simple_unlock(&p->uanon->an_lock);
1026 			else
1027 				simple_unlock(&p->uobject->vmobjlock);
1028 			continue;
1029 		}
1030 
1031 		/*
1032 		 * if there's a shortage of swap, free any swap allocated
1033 		 * to this page so that other pages can be paged out.
1034 		 */
1035 
1036 		if (swap_shortage > 0) {
1037 			if ((p->pg_flags & PQ_ANON) && p->uanon->an_swslot) {
1038 				uvm_swap_free(p->uanon->an_swslot, 1);
1039 				p->uanon->an_swslot = 0;
1040 				atomic_clearbits_int(&p->pg_flags, PG_CLEAN);
1041 				swap_shortage--;
1042 			}
1043 			if (p->pg_flags & PQ_AOBJ) {
1044 				int slot = uao_set_swslot(p->uobject,
1045 					p->offset >> PAGE_SHIFT, 0);
1046 				if (slot) {
1047 					uvm_swap_free(slot, 1);
1048 					atomic_clearbits_int(&p->pg_flags,
1049 					    PG_CLEAN);
1050 					swap_shortage--;
1051 				}
1052 			}
1053 		}
1054 
1055 		/*
1056 		 * deactivate this page if there's a shortage of
1057 		 * inactive pages.
1058 		 */
1059 
1060 		if (inactive_shortage > 0) {
1061 			pmap_page_protect(p, VM_PROT_NONE);
1062 			/* no need to check wire_count as pg is "active" */
1063 			uvm_pagedeactivate(p);
1064 			uvmexp.pddeact++;
1065 			inactive_shortage--;
1066 		}
1067 		if (p->pg_flags & PQ_ANON)
1068 			simple_unlock(&p->uanon->an_lock);
1069 		else
1070 			simple_unlock(&p->uobject->vmobjlock);
1071 	}
1072 }
1073