xref: /openbsd/sys/uvm/uvm_pager.c (revision 2da9c48e)
1 /*	$OpenBSD: uvm_pager.c,v 1.93 2024/11/25 12:51:00 mpi Exp $	*/
2 /*	$NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
29  */
30 
31 /*
32  * uvm_pager.c: generic functions used to assist the pagers.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/pool.h>
39 #include <sys/buf.h>
40 #include <sys/atomic.h>
41 
42 #include <uvm/uvm.h>
43 
44 const struct uvm_pagerops *uvmpagerops[] = {
45 	&aobj_pager,
46 	&uvm_deviceops,
47 	&uvm_vnodeops,
48 };
49 
50 /*
51  * the pager map: provides KVA for I/O
52  *
53  * Each uvm_pseg has room for MAX_PAGERMAP_SEGS pager io space of
54  * MAXBSIZE bytes.
55  *
56  * The number of uvm_pseg instances is dynamic using an array segs.
57  * At most UVM_PSEG_COUNT instances can exist.
58  *
59  * psegs[0/1] always exist (so that the pager can always map in pages).
60  * psegs[0/1] element 0 are always reserved for the pagedaemon.
61  *
62  * Any other pseg is automatically created when no space is available
63  * and automatically destroyed when it is no longer in use.
64  */
65 #define MAX_PAGER_SEGS	16
66 #define PSEG_NUMSEGS	(PAGER_MAP_SIZE / MAX_PAGER_SEGS / MAXBSIZE)
67 struct uvm_pseg {
68 	/* Start of virtual space; 0 if not inited. */
69 	vaddr_t	start;
70 	/* Bitmap of the segments in use in this pseg. */
71 	int	use;
72 };
73 struct	mutex uvm_pseg_lck;
74 struct	uvm_pseg psegs[PSEG_NUMSEGS];
75 
76 #define UVM_PSEG_FULL(pseg)	((pseg)->use == (1 << MAX_PAGER_SEGS) - 1)
77 #define UVM_PSEG_EMPTY(pseg)	((pseg)->use == 0)
78 #define UVM_PSEG_INUSE(pseg,id)	(((pseg)->use & (1 << (id))) != 0)
79 
80 void		uvm_pseg_init(struct uvm_pseg *);
81 vaddr_t		uvm_pseg_get(int);
82 void		uvm_pseg_release(vaddr_t);
83 
84 /*
85  * uvm_pager_init: init pagers (at boot time)
86  */
87 void
uvm_pager_init(void)88 uvm_pager_init(void)
89 {
90 	int lcv;
91 
92 	/* init pager map */
93 	uvm_pseg_init(&psegs[0]);
94 	uvm_pseg_init(&psegs[1]);
95 	mtx_init(&uvm_pseg_lck, IPL_VM);
96 
97 	/* init ASYNC I/O queue */
98 	TAILQ_INIT(&uvm.aio_done);
99 
100 	/* call pager init functions */
101 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
102 	    lcv++) {
103 		if (uvmpagerops[lcv]->pgo_init)
104 			uvmpagerops[lcv]->pgo_init();
105 	}
106 }
107 
108 /*
109  * Initialize a uvm_pseg.
110  *
111  * May fail, in which case seg->start == 0.
112  *
113  * Caller locks uvm_pseg_lck.
114  */
115 void
uvm_pseg_init(struct uvm_pseg * pseg)116 uvm_pseg_init(struct uvm_pseg *pseg)
117 {
118 	KASSERT(pseg->start == 0);
119 	KASSERT(pseg->use == 0);
120 	pseg->start = (vaddr_t)km_alloc(MAX_PAGER_SEGS * MAXBSIZE,
121 	    &kv_any, &kp_none, &kd_trylock);
122 }
123 
124 /*
125  * Acquire a pager map segment.
126  *
127  * Returns a vaddr for paging. 0 on failure.
128  *
129  * Caller does not lock.
130  */
131 vaddr_t
uvm_pseg_get(int flags)132 uvm_pseg_get(int flags)
133 {
134 	int i;
135 	struct uvm_pseg *pseg;
136 
137 	mtx_enter(&uvm_pseg_lck);
138 
139 pager_seg_restart:
140 	/* Find first pseg that has room. */
141 	for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
142 		if (UVM_PSEG_FULL(pseg))
143 			continue;
144 
145 		if (pseg->start == 0) {
146 			/* Need initialization. */
147 			uvm_pseg_init(pseg);
148 			if (pseg->start == 0)
149 				goto pager_seg_fail;
150 		}
151 
152 		/* Keep indexes 0,1 reserved for pagedaemon. */
153 		if ((pseg == &psegs[0] || pseg == &psegs[1]) &&
154 		    (curproc != uvm.pagedaemon_proc))
155 			i = 2;
156 		else
157 			i = 0;
158 
159 		for (; i < MAX_PAGER_SEGS; i++) {
160 			if (!UVM_PSEG_INUSE(pseg, i)) {
161 				pseg->use |= 1 << i;
162 				mtx_leave(&uvm_pseg_lck);
163 				return pseg->start + i * MAXBSIZE;
164 			}
165 		}
166 	}
167 
168 pager_seg_fail:
169 	if ((flags & UVMPAGER_MAPIN_WAITOK) != 0) {
170 		msleep_nsec(&psegs, &uvm_pseg_lck, PVM, "pagerseg", INFSLP);
171 		goto pager_seg_restart;
172 	}
173 
174 	mtx_leave(&uvm_pseg_lck);
175 	return 0;
176 }
177 
178 /*
179  * Release a pager map segment.
180  *
181  * Caller does not lock.
182  *
183  * Deallocates pseg if it is no longer in use.
184  */
185 void
uvm_pseg_release(vaddr_t segaddr)186 uvm_pseg_release(vaddr_t segaddr)
187 {
188 	int id;
189 	struct uvm_pseg *pseg;
190 	vaddr_t va = 0;
191 
192 	mtx_enter(&uvm_pseg_lck);
193 	for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
194 		if (pseg->start <= segaddr &&
195 		    segaddr < pseg->start + MAX_PAGER_SEGS * MAXBSIZE)
196 			break;
197 	}
198 	KASSERT(pseg != &psegs[PSEG_NUMSEGS]);
199 
200 	id = (segaddr - pseg->start) / MAXBSIZE;
201 	KASSERT(id >= 0 && id < MAX_PAGER_SEGS);
202 
203 	/* test for no remainder */
204 	KDASSERT(segaddr == pseg->start + id * MAXBSIZE);
205 
206 
207 	KASSERT(UVM_PSEG_INUSE(pseg, id));
208 
209 	pseg->use &= ~(1 << id);
210 	wakeup(&psegs);
211 
212 	if ((pseg != &psegs[0] && pseg != &psegs[1]) && UVM_PSEG_EMPTY(pseg)) {
213 		va = pseg->start;
214 		pseg->start = 0;
215 	}
216 
217 	mtx_leave(&uvm_pseg_lck);
218 
219 	if (va) {
220 		km_free((void *)va, MAX_PAGER_SEGS * MAXBSIZE,
221 		    &kv_any, &kp_none);
222 	}
223 }
224 
225 /*
226  * uvm_pagermapin: map pages into KVA for I/O that needs mappings
227  *
228  * We basically just km_valloc a blank map entry to reserve the space in the
229  * kernel map and then use pmap_enter() to put the mappings in by hand.
230  */
231 vaddr_t
uvm_pagermapin(struct vm_page ** pps,int npages,int flags)232 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
233 {
234 	vaddr_t kva, cva;
235 	vm_prot_t prot;
236 	vsize_t size;
237 	struct vm_page *pp;
238 
239 #if defined(__HAVE_PMAP_DIRECT)
240 	/*
241 	 * Use direct mappings for single page, unless there is a risk
242 	 * of aliasing.
243 	 */
244 	if (npages == 1 && PMAP_PREFER_ALIGN() == 0) {
245 		KASSERT(pps[0]);
246 		KASSERT(pps[0]->pg_flags & PG_BUSY);
247 		return pmap_map_direct(pps[0]);
248 	}
249 #endif
250 
251 	prot = PROT_READ;
252 	if (flags & UVMPAGER_MAPIN_READ)
253 		prot |= PROT_WRITE;
254 	size = ptoa(npages);
255 
256 	KASSERT(size <= MAXBSIZE);
257 
258 	kva = uvm_pseg_get(flags);
259 	if (kva == 0)
260 		return 0;
261 
262 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
263 		pp = *pps++;
264 		KASSERT(pp);
265 		KASSERT(pp->pg_flags & PG_BUSY);
266 		/* Allow pmap_enter to fail. */
267 		if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
268 		    prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
269 			pmap_remove(pmap_kernel(), kva, cva);
270 			pmap_update(pmap_kernel());
271 			uvm_pseg_release(kva);
272 			return 0;
273 		}
274 	}
275 	pmap_update(pmap_kernel());
276 	return kva;
277 }
278 
279 /*
280  * uvm_pagermapout: remove KVA mapping
281  *
282  * We remove our mappings by hand and then remove the mapping.
283  */
284 void
uvm_pagermapout(vaddr_t kva,int npages)285 uvm_pagermapout(vaddr_t kva, int npages)
286 {
287 #if defined(__HAVE_PMAP_DIRECT)
288 	/*
289 	 * Use direct mappings for single page, unless there is a risk
290 	 * of aliasing.
291 	 */
292 	if (npages == 1 && PMAP_PREFER_ALIGN() == 0) {
293 		pmap_unmap_direct(kva);
294 		return;
295 	}
296 #endif
297 
298 	pmap_remove(pmap_kernel(), kva, kva + ((vsize_t)npages << PAGE_SHIFT));
299 	pmap_update(pmap_kernel());
300 	uvm_pseg_release(kva);
301 
302 }
303 
304 /*
305  * uvm_mk_pcluster
306  *
307  * generic "make 'pager put' cluster" function.  a pager can either
308  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
309  * generic function, or [3] set it to a pager specific function.
310  *
311  * => caller must lock object _and_ pagequeues (since we need to look
312  *    at active vs. inactive bits, etc.)
313  * => caller must make center page busy and write-protect it
314  * => we mark all cluster pages busy for the caller
315  * => the caller must unbusy all pages (and check wanted/released
316  *    status if it drops the object lock)
317  * => flags:
318  *      PGO_ALLPAGES:  all pages in object are valid targets
319  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
320  *      PGO_DOACTCLUST: include active pages in cluster.
321  *	PGO_FREE: set the PG_RELEASED bits on the cluster so they'll be freed
322  *		in async io (caller must clean on error).
323  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
324  *              PG_CLEANCHK is only a hint, but clearing will help reduce
325  *		the number of calls we make to the pmap layer.
326  */
327 
328 struct vm_page **
uvm_mk_pcluster(struct uvm_object * uobj,struct vm_page ** pps,int * npages,struct vm_page * center,int flags,voff_t mlo,voff_t mhi)329 uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
330     struct vm_page *center, int flags, voff_t mlo, voff_t mhi)
331 {
332 	struct vm_page **ppsp, *pclust;
333 	voff_t lo, hi, curoff;
334 	int center_idx, forward, incr;
335 
336 	/*
337 	 * center page should already be busy and write protected.  XXX:
338 	 * suppose page is wired?  if we lock, then a process could
339 	 * fault/block on it.  if we don't lock, a process could write the
340 	 * pages in the middle of an I/O.  (consider an msync()).  let's
341 	 * lock it for now (better to delay than corrupt data?).
342 	 */
343 	/* get cluster boundaries, check sanity, and apply our limits as well.*/
344 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
345 	if ((flags & PGO_ALLPAGES) == 0) {
346 		if (lo < mlo)
347 			lo = mlo;
348 		if (hi > mhi)
349 			hi = mhi;
350 	}
351 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
352 		pps[0] = center;
353 		*npages = 1;
354 		return pps;
355 	}
356 
357 	/* now determine the center and attempt to cluster around the edges */
358 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
359 	pps[center_idx] = center;	/* plug in the center page */
360 	ppsp = &pps[center_idx];
361 	*npages = 1;
362 
363 	/*
364 	 * attempt to cluster around the left [backward], and then
365 	 * the right side [forward].
366 	 *
367 	 * note that for inactive pages (pages that have been deactivated)
368 	 * there are no valid mappings and PG_CLEAN should be up to date.
369 	 * [i.e. there is no need to query the pmap with pmap_is_modified
370 	 * since there are no mappings].
371 	 */
372 	for (forward  = 0 ; forward <= 1 ; forward++) {
373 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
374 		curoff = center->offset + incr;
375 		for ( ;(forward == 0 && curoff >= lo) ||
376 		       (forward && curoff < hi);
377 		      curoff += incr) {
378 
379 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
380 			if (pclust == NULL) {
381 				break;			/* no page */
382 			}
383 			/* handle active pages */
384 			/* NOTE: inactive pages don't have pmap mappings */
385 			if ((pclust->pg_flags & PQ_INACTIVE) == 0) {
386 				if ((flags & PGO_DOACTCLUST) == 0) {
387 					/* dont want mapped pages at all */
388 					break;
389 				}
390 
391 				/* make sure "clean" bit is sync'd */
392 				if ((pclust->pg_flags & PG_CLEANCHK) == 0) {
393 					if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY))
394 					   == PG_CLEAN &&
395 					   pmap_is_modified(pclust))
396 						atomic_clearbits_int(
397 						    &pclust->pg_flags,
398 						    PG_CLEAN);
399 					/* now checked */
400 					atomic_setbits_int(&pclust->pg_flags,
401 					    PG_CLEANCHK);
402 				}
403 			}
404 
405 			/* is page available for cleaning and does it need it */
406 			if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY)) != 0) {
407 				break;	/* page is already clean or is busy */
408 			}
409 
410 			/* yes!   enroll the page in our array */
411 			atomic_setbits_int(&pclust->pg_flags, PG_BUSY);
412 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
413 
414 			/*
415 			 * If we want to free after io is done, and we're
416 			 * async, set the released flag
417 			 */
418 			if ((flags & (PGO_FREE|PGO_SYNCIO)) == PGO_FREE)
419 				atomic_setbits_int(&pclust->pg_flags,
420 				    PG_RELEASED);
421 
422 			/* XXX: protect wired page?   see above comment. */
423 			pmap_page_protect(pclust, PROT_READ);
424 			if (!forward) {
425 				ppsp--;			/* back up one page */
426 				*ppsp = pclust;
427 			} else {
428 				/* move forward one page */
429 				ppsp[*npages] = pclust;
430 			}
431 			(*npages)++;
432 		}
433 	}
434 
435 	/*
436 	 * done!  return the cluster array to the caller!!!
437 	 */
438 	return ppsp;
439 }
440 
441 /*
442  * uvm_pager_put: high level pageout routine
443  *
444  * we want to pageout page "pg" to backing store, clustering if
445  * possible.
446  *
447  * => page queues must be locked by caller
448  * => if page is not swap-backed, then "uobj" points to the object
449  *	backing it.
450  * => if page is swap-backed, then "uobj" should be NULL.
451  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
452  *    for swap-backed memory, "pg" can be NULL if there is no page
453  *    of interest [sometimes the case for the pagedaemon]
454  * => "ppsp_ptr" should point to an array of npages vm_page pointers
455  *	for possible cluster building
456  * => flags (first two for non-swap-backed pages)
457  *	PGO_ALLPAGES: all pages in uobj are valid targets
458  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
459  *	PGO_SYNCIO: do SYNC I/O (no async)
460  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
461  *	PGO_FREE: tell the aio daemon to free pages in the async case.
462  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
463  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
464  * => return state:
465  *	1. we return the VM_PAGER status code of the pageout
466  *	2. we return with the page queues unlocked
467  *	3. on errors we always drop the cluster.   thus, if we return
468  *		!PEND, !OK, then the caller only has to worry about
469  *		un-busying the main page (not the cluster pages).
470  *	4. on success, if !PGO_PDFREECLUST, we return the cluster
471  *		with all pages busy (caller must un-busy and check
472  *		wanted/released flags).
473  */
474 int
uvm_pager_put(struct uvm_object * uobj,struct vm_page * pg,struct vm_page *** ppsp_ptr,int * npages,int flags,voff_t start,voff_t stop)475 uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
476     struct vm_page ***ppsp_ptr, int *npages, int flags,
477     voff_t start, voff_t stop)
478 {
479 	int result;
480 	daddr_t swblk;
481 	struct vm_page **ppsp = *ppsp_ptr;
482 
483 	/*
484 	 * note that uobj is null  if we are doing a swap-backed pageout.
485 	 * note that uobj is !null if we are doing normal object pageout.
486 	 * note that the page queues must be locked to cluster.
487 	 */
488 	if (uobj) {	/* if !swap-backed */
489 		/*
490 		 * attempt to build a cluster for pageout using its
491 		 * make-put-cluster function (if it has one).
492 		 */
493 		if (uobj->pgops->pgo_mk_pcluster) {
494 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
495 			    npages, pg, flags, start, stop);
496 			*ppsp_ptr = ppsp;  /* update caller's pointer */
497 		} else {
498 			ppsp[0] = pg;
499 			*npages = 1;
500 		}
501 
502 		swblk = 0;		/* XXX: keep gcc happy */
503 	} else {
504 		/*
505 		 * for swap-backed pageout, the caller (the pagedaemon) has
506 		 * already built the cluster for us.   the starting swap
507 		 * block we are writing to has been passed in as "start."
508 		 * "pg" could be NULL if there is no page we are especially
509 		 * interested in (in which case the whole cluster gets dropped
510 		 * in the event of an error or a sync "done").
511 		 */
512 		swblk = start;
513 		/* ppsp and npages should be ok */
514 	}
515 
516 	/* now that we've clustered we can unlock the page queues */
517 	uvm_unlock_pageq();
518 
519 	/*
520 	 * now attempt the I/O.   if we have a failure and we are
521 	 * clustered, we will drop the cluster and try again.
522 	 */
523 	if (uobj) {
524 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
525 	} else {
526 		/* XXX daddr_t -> int */
527 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
528 	}
529 
530 	/*
531 	 * we have attempted the I/O.
532 	 *
533 	 * if the I/O was a success then:
534 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
535 	 *		caller (who must un-busy all pages)
536 	 *	else we un-busy cluster pages for the pagedaemon
537 	 *
538 	 * if I/O is pending (async i/o) then we return the pending code.
539 	 * [in this case the async i/o done function must clean up when
540 	 *  i/o is done...]
541 	 */
542 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
543 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
544 			/* drop cluster */
545 			if (*npages > 1 || pg == NULL)
546 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
547 				    PGO_PDFREECLUST);
548 		}
549 		return (result);
550 	}
551 
552 	/*
553 	 * a pager error occurred (even after dropping the cluster, if there
554 	 * was one).  give up! the caller only has one page ("pg")
555 	 * to worry about.
556 	 */
557 	if (*npages > 1 || pg == NULL) {
558 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
559 
560 		/*
561 		 * for failed swap-backed pageouts with a "pg",
562 		 * we need to reset pg's swslot to either:
563 		 * "swblk" (for transient errors, so we can retry),
564 		 * or 0 (for hard errors).
565 		 */
566 		if (uobj == NULL) {
567 			if (pg != NULL) {
568 				if (pg->pg_flags & PQ_ANON) {
569 					rw_enter(pg->uanon->an_lock, RW_WRITE);
570 					pg->uanon->an_swslot = 0;
571 					rw_exit(pg->uanon->an_lock);
572 				} else {
573 					rw_enter(pg->uobject->vmobjlock, RW_WRITE);
574 					uao_set_swslot(pg->uobject,
575 					    pg->offset >> PAGE_SHIFT, 0);
576 					rw_exit(pg->uobject->vmobjlock);
577 				}
578 			}
579 			/*
580 			 * for transient failures, free all the swslots
581 			 */
582 			if (result == VM_PAGER_AGAIN) {
583 				/* XXX daddr_t -> int */
584 				uvm_swap_free(swblk, *npages);
585 			} else {
586 				/*
587 				 * for hard errors on swap-backed pageouts,
588 				 * mark the swslots as bad.  note that we do not
589 				 * free swslots that we mark bad.
590 				 */
591 				/* XXX daddr_t -> int */
592 				uvm_swap_markbad(swblk, *npages);
593 			}
594 		}
595 	}
596 
597 	/*
598 	 * a pager error occurred (even after dropping the cluster, if there
599 	 * was one).    give up!   the caller only has one page ("pg")
600 	 * to worry about.
601 	 */
602 	return result;
603 }
604 
605 /*
606  * uvm_pager_dropcluster: drop a cluster we have built (because we
607  * got an error, or, if PGO_PDFREECLUST we are un-busying the
608  * cluster pages on behalf of the pagedaemon).
609  *
610  * => uobj, if non-null, is a non-swap-backed object
611  * => page queues are not locked
612  * => pg is our page of interest (the one we clustered around, can be null)
613  * => ppsp/npages is our current cluster
614  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
615  *	pages on behalf of the pagedaemon.
616  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
617  *		clustered swap-backed pages (except for "pg" if !NULL)
618  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
619  *		[only meaningful if swap-backed (uobj == NULL)]
620  */
621 
622 void
uvm_pager_dropcluster(struct uvm_object * uobj,struct vm_page * pg,struct vm_page ** ppsp,int * npages,int flags)623 uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
624     struct vm_page **ppsp, int *npages, int flags)
625 {
626 	int lcv;
627 
628 	KASSERT(uobj == NULL || rw_write_held(uobj->vmobjlock));
629 
630 	/* drop all pages but "pg" */
631 	for (lcv = 0 ; lcv < *npages ; lcv++) {
632 		/* skip "pg" or empty slot */
633 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
634 			continue;
635 
636 		/*
637 		 * Note that PQ_ANON bit can't change as long as we are holding
638 		 * the PG_BUSY bit (so there is no need to lock the page
639 		 * queues to test it).
640 		 */
641 		if (!uobj) {
642 			if (ppsp[lcv]->pg_flags & PQ_ANON) {
643 				rw_enter(ppsp[lcv]->uanon->an_lock, RW_WRITE);
644 				if (flags & PGO_REALLOCSWAP)
645 					  /* zap swap block */
646 					  ppsp[lcv]->uanon->an_swslot = 0;
647 			} else {
648 				rw_enter(ppsp[lcv]->uobject->vmobjlock,
649 				    RW_WRITE);
650 				if (flags & PGO_REALLOCSWAP)
651 					uao_set_swslot(ppsp[lcv]->uobject,
652 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
653 			}
654 		}
655 
656 		/* did someone want the page while we had it busy-locked? */
657 		if (ppsp[lcv]->pg_flags & PG_WANTED) {
658 			wakeup(ppsp[lcv]);
659 		}
660 
661 		/* if page was released, release it.  otherwise un-busy it */
662 		if (ppsp[lcv]->pg_flags & PG_RELEASED &&
663 		    ppsp[lcv]->pg_flags & PQ_ANON) {
664 				/* kills anon and frees pg */
665 				uvm_anon_release(ppsp[lcv]->uanon);
666 				continue;
667 		} else {
668 			/*
669 			 * if we were planning on async io then we would
670 			 * have PG_RELEASED set, clear that with the others.
671 			 */
672 			atomic_clearbits_int(&ppsp[lcv]->pg_flags,
673 			    PG_BUSY|PG_WANTED|PG_FAKE|PG_RELEASED);
674 			UVM_PAGE_OWN(ppsp[lcv], NULL);
675 		}
676 
677 		/*
678 		 * if we are operating on behalf of the pagedaemon and we
679 		 * had a successful pageout update the page!
680 		 */
681 		if (flags & PGO_PDFREECLUST) {
682 			pmap_clear_reference(ppsp[lcv]);
683 			pmap_clear_modify(ppsp[lcv]);
684 			atomic_setbits_int(&ppsp[lcv]->pg_flags, PG_CLEAN);
685 		}
686 
687 		/* if anonymous cluster, unlock object and move on */
688 		if (!uobj) {
689 			if (ppsp[lcv]->pg_flags & PQ_ANON)
690 				rw_exit(ppsp[lcv]->uanon->an_lock);
691 			else
692 				rw_exit(ppsp[lcv]->uobject->vmobjlock);
693 		}
694 	}
695 }
696 
697 /*
698  * interrupt-context iodone handler for single-buf i/os
699  * or the top-level buf of a nested-buf i/o.
700  *
701  * => must be at splbio().
702  */
703 
704 void
uvm_aio_biodone(struct buf * bp)705 uvm_aio_biodone(struct buf *bp)
706 {
707 	splassert(IPL_BIO);
708 
709 	/* reset b_iodone for when this is a single-buf i/o. */
710 	bp->b_iodone = uvm_aio_aiodone;
711 
712 	mtx_enter(&uvm.aiodoned_lock);
713 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
714 	wakeup(&uvm.aiodoned);
715 	mtx_leave(&uvm.aiodoned_lock);
716 }
717 
718 void
uvm_aio_aiodone_pages(struct vm_page ** pgs,int npages,boolean_t write,int error)719 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, boolean_t write,
720     int error)
721 {
722 	struct vm_page *pg;
723 	struct rwlock *slock;
724 	boolean_t swap;
725 	int i, swslot;
726 
727 	slock = NULL;
728 	pg = pgs[0];
729 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
730 		(pg->pg_flags & PQ_AOBJ) != 0;
731 
732 	KASSERT(swap);
733 	KASSERT(write);
734 
735 	if (error) {
736 		if (pg->uobject != NULL) {
737 			swslot = uao_find_swslot(pg->uobject,
738 			    pg->offset >> PAGE_SHIFT);
739 		} else {
740 			swslot = pg->uanon->an_swslot;
741 		}
742 		KASSERT(swslot);
743 	}
744 
745 	for (i = 0; i < npages; i++) {
746 		int anon_disposed = 0;
747 
748 		pg = pgs[i];
749 		KASSERT((pg->pg_flags & PG_FAKE) == 0);
750 
751 		/*
752 		 * lock each page's object (or anon) individually since
753 		 * each page may need a different lock.
754 		 */
755 		if (pg->uobject != NULL) {
756 			slock = pg->uobject->vmobjlock;
757 		} else {
758 			slock = pg->uanon->an_lock;
759 		}
760 		rw_enter(slock, RW_WRITE);
761 		anon_disposed = (pg->pg_flags & PG_RELEASED) != 0;
762 		KASSERT(!anon_disposed || pg->uobject != NULL ||
763 		    pg->uanon->an_ref == 0);
764 		uvm_lock_pageq();
765 
766 		/*
767 		 * if this was a successful write,
768 		 * mark the page PG_CLEAN.
769 		 */
770 		if (!error) {
771 			pmap_clear_reference(pg);
772 			pmap_clear_modify(pg);
773 			atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
774 		}
775 
776 		/*
777 		 * unlock everything for this page now.
778 		 */
779 		if (pg->uobject == NULL && anon_disposed) {
780 			uvm_unlock_pageq();
781 			uvm_anon_release(pg->uanon);
782 		} else {
783 			uvm_page_unbusy(&pg, 1);
784 			uvm_unlock_pageq();
785 			rw_exit(slock);
786 		}
787 	}
788 
789 	if (error) {
790 		uvm_swap_markbad(swslot, npages);
791 	}
792 }
793 
794 /*
795  * uvm_aio_aiodone: do iodone processing for async i/os.
796  * this should be called in thread context, not interrupt context.
797  */
798 void
uvm_aio_aiodone(struct buf * bp)799 uvm_aio_aiodone(struct buf *bp)
800 {
801 	int npages = bp->b_bufsize >> PAGE_SHIFT;
802 	struct vm_page *pgs[MAXPHYS >> PAGE_SHIFT];
803 	int i, error;
804 	boolean_t write;
805 
806 	KASSERT(npages <= MAXPHYS >> PAGE_SHIFT);
807 	splassert(IPL_BIO);
808 
809 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
810 	write = (bp->b_flags & B_READ) == 0;
811 
812 	for (i = 0; i < npages; i++)
813 		pgs[i] = uvm_atopg((vaddr_t)bp->b_data +
814 		    ((vsize_t)i << PAGE_SHIFT));
815 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
816 #ifdef UVM_SWAP_ENCRYPT
817 	/*
818 	 * XXX - assumes that we only get ASYNC writes. used to be above.
819 	 */
820 	if (pgs[0]->pg_flags & PQ_ENCRYPT) {
821 		uvm_swap_freepages(pgs, npages);
822 		goto freed;
823 	}
824 #endif /* UVM_SWAP_ENCRYPT */
825 
826 	uvm_aio_aiodone_pages(pgs, npages, write, error);
827 
828 #ifdef UVM_SWAP_ENCRYPT
829 freed:
830 #endif
831 	pool_put(&bufpool, bp);
832 }
833