xref: /openbsd/sys/uvm/uvm_anon.c (revision 905646f0)
1 /*	$OpenBSD: uvm_anon.c,v 1.49 2020/01/04 16:17:29 beck Exp $	*/
2 /*	$NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uvm_anon.c: uvm anon ops
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/pool.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39 
40 #include <uvm/uvm.h>
41 #include <uvm/uvm_swap.h>
42 
43 struct pool uvm_anon_pool;
44 
45 /*
46  * allocate anons
47  */
48 void
49 uvm_anon_init(void)
50 {
51 	pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_NONE,
52 	    PR_WAITOK, "anonpl", NULL);
53 	pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
54 }
55 
56 /*
57  * allocate an anon
58  */
59 struct vm_anon *
60 uvm_analloc(void)
61 {
62 	struct vm_anon *anon;
63 
64 	anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
65 	if (anon) {
66 		anon->an_ref = 1;
67 		anon->an_page = NULL;
68 		anon->an_swslot = 0;
69 	}
70 	return(anon);
71 }
72 
73 /*
74  * uvm_anfree: free a single anon structure
75  *
76  * => caller must remove anon from its amap before calling (if it was in
77  *	an amap).
78  * => we may lock the pageq's.
79  */
80 void
81 uvm_anfree_list(struct vm_anon *anon, struct pglist *pgl)
82 {
83 	struct vm_page *pg;
84 
85 	/* get page */
86 	pg = anon->an_page;
87 
88 	/*
89 	 * if we have a resident page, we must dispose of it before freeing
90 	 * the anon.
91 	 */
92 	if (pg) {
93 		/*
94 		 * if page is busy then we just mark it as released (who ever
95 		 * has it busy must check for this when they wake up). if the
96 		 * page is not busy then we can free it now.
97 		 */
98 		if ((pg->pg_flags & PG_BUSY) != 0) {
99 			/* tell them to dump it when done */
100 			atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
101 			return;
102 		}
103 		pmap_page_protect(pg, PROT_NONE);
104 		if (pgl != NULL) {
105 			/*
106 			 * clean page, and put on on pglist
107 			 * for later freeing.
108 			 */
109 			uvm_pageclean(pg);
110 			TAILQ_INSERT_HEAD(pgl, pg, pageq);
111 		} else {
112 			uvm_lock_pageq();	/* lock out pagedaemon */
113 			uvm_pagefree(pg);	/* bye bye */
114 			uvm_unlock_pageq();	/* free the daemon */
115 		}
116 	}
117 	if (pg == NULL && anon->an_swslot != 0) {
118 		/* this page is no longer only in swap. */
119 		KASSERT(uvmexp.swpgonly > 0);
120 		uvmexp.swpgonly--;
121 	}
122 
123 	/* free any swap resources. */
124 	uvm_anon_dropswap(anon);
125 
126 	/*
127 	 * now that we've stripped the data areas from the anon, free the anon
128 	 * itself!
129 	 */
130 	KASSERT(anon->an_page == NULL);
131 	KASSERT(anon->an_swslot == 0);
132 
133 	pool_put(&uvm_anon_pool, anon);
134 }
135 
136 void
137 uvm_anfree(struct vm_anon *anon)
138 {
139 	uvm_anfree_list(anon, NULL);
140 }
141 
142 /*
143  * uvm_anwait: wait for memory to become available to allocate an anon.
144  */
145 void
146 uvm_anwait(void)
147 {
148 	struct vm_anon *anon;
149 
150 	/* XXX: Want something like pool_wait()? */
151 	anon = pool_get(&uvm_anon_pool, PR_WAITOK);
152 	pool_put(&uvm_anon_pool, anon);
153 }
154 
155 /*
156  * uvm_anon_dropswap:  release any swap resources from this anon.
157  */
158 void
159 uvm_anon_dropswap(struct vm_anon *anon)
160 {
161 
162 	if (anon->an_swslot == 0)
163 		return;
164 
165 	uvm_swap_free(anon->an_swslot, 1);
166 	anon->an_swslot = 0;
167 }
168 
169 /*
170  * fetch an anon's page.
171  *
172  * => returns TRUE if pagein was aborted due to lack of memory.
173  */
174 
175 boolean_t
176 uvm_anon_pagein(struct vm_anon *anon)
177 {
178 	struct vm_page *pg;
179 	int rv;
180 
181 	rv = uvmfault_anonget(NULL, NULL, anon);
182 
183 	switch (rv) {
184 	case VM_PAGER_OK:
185 		break;
186 	case VM_PAGER_ERROR:
187 	case VM_PAGER_REFAULT:
188 		/*
189 		 * nothing more to do on errors.
190 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
191 		 * so again there's nothing to do.
192 		 */
193 		return FALSE;
194 	default:
195 #ifdef DIAGNOSTIC
196 		panic("anon_pagein: uvmfault_anonget -> %d", rv);
197 #else
198 		return FALSE;
199 #endif
200 	}
201 
202 	/*
203 	 * ok, we've got the page now.
204 	 * mark it as dirty, clear its swslot and un-busy it.
205 	 */
206 	pg = anon->an_page;
207 	uvm_swap_free(anon->an_swslot, 1);
208 	anon->an_swslot = 0;
209 	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
210 
211 	/* deactivate the page (to put it on a page queue) */
212 	pmap_clear_reference(pg);
213 	pmap_page_protect(pg, PROT_NONE);
214 	uvm_lock_pageq();
215 	uvm_pagedeactivate(pg);
216 	uvm_unlock_pageq();
217 
218 	return FALSE;
219 }
220