1 /* $OpenBSD: uvm_anon.c,v 1.61 2024/12/27 12:04:40 mpi Exp $ */
2 /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * uvm_anon.c: uvm anon ops
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/pool.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39
40 #include <uvm/uvm.h>
41 #include <uvm/uvm_swap.h>
42
43 struct pool uvm_anon_pool;
44
45 void
uvm_anon_init(void)46 uvm_anon_init(void)
47 {
48 pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_MPFLOOR,
49 PR_WAITOK, "anonpl", NULL);
50 pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
51 }
52
53 /*
54 * uvm_analloc: allocate a new anon.
55 *
56 * => anon will have no lock associated.
57 */
58 struct vm_anon *
uvm_analloc(void)59 uvm_analloc(void)
60 {
61 struct vm_anon *anon;
62
63 anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
64 if (anon) {
65 anon->an_lock = NULL;
66 anon->an_ref = 1;
67 anon->an_page = NULL;
68 anon->an_swslot = 0;
69 }
70 return anon;
71 }
72
73 /*
74 * uvm_anfree_list: free a single anon structure
75 *
76 * => anon must be removed from the amap (if anon was in an amap).
77 * => amap must be locked, if anon was owned by amap.
78 * => we may lock the pageq's.
79 */
80 void
uvm_anfree_list(struct vm_anon * anon,struct pglist * pgl)81 uvm_anfree_list(struct vm_anon *anon, struct pglist *pgl)
82 {
83 struct vm_page *pg = anon->an_page;
84
85 KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
86 KASSERT(anon->an_ref == 0);
87
88 /*
89 * Dispose of the page, if it is resident.
90 */
91 if (pg != NULL) {
92 KASSERT(anon->an_lock != NULL);
93
94 /*
95 * If the page is busy, mark it as PG_RELEASED, so
96 * that uvm_anon_release(9) would release it later.
97 */
98 if ((pg->pg_flags & PG_BUSY) != 0) {
99 atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
100 rw_obj_hold(anon->an_lock);
101 return;
102 }
103 pmap_page_protect(pg, PROT_NONE);
104 if (pgl != NULL) {
105 /*
106 * clean page, and put it on pglist
107 * for later freeing.
108 */
109 uvm_lock_pageq();
110 uvm_pageclean(pg);
111 uvm_unlock_pageq();
112 TAILQ_INSERT_HEAD(pgl, pg, pageq);
113 } else {
114 uvm_lock_pageq(); /* lock out pagedaemon */
115 uvm_pagefree(pg); /* bye bye */
116 uvm_unlock_pageq(); /* free the daemon */
117 }
118 } else {
119 if (anon->an_swslot != 0 && anon->an_swslot != SWSLOT_BAD) {
120 /* This page is no longer only in swap. */
121 KASSERT(uvmexp.swpgonly > 0);
122 atomic_dec_int(&uvmexp.swpgonly);
123 }
124 }
125 anon->an_lock = NULL;
126
127 /*
128 * Free any swap resources, leave a page replacement hint.
129 */
130 uvm_anon_dropswap(anon);
131
132 KASSERT(anon->an_page == NULL);
133 KASSERT(anon->an_swslot == 0);
134
135 pool_put(&uvm_anon_pool, anon);
136 }
137
138 /*
139 * uvm_anwait: wait for memory to become available to allocate an anon.
140 */
141 void
uvm_anwait(void)142 uvm_anwait(void)
143 {
144 struct vm_anon *anon;
145
146 /* XXX: Want something like pool_wait()? */
147 anon = pool_get(&uvm_anon_pool, PR_WAITOK);
148 pool_put(&uvm_anon_pool, anon);
149 }
150
151 /*
152 * uvm_anon_pagein: fetch an anon's page.
153 *
154 * => anon must be locked, and is unlocked upon return.
155 * => returns true if pagein was aborted due to lack of memory.
156 */
157
158 boolean_t
uvm_anon_pagein(struct vm_amap * amap,struct vm_anon * anon)159 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
160 {
161 struct vm_page *pg;
162 int rv;
163
164 KASSERT(rw_write_held(anon->an_lock));
165 KASSERT(anon->an_lock == amap->am_lock);
166
167 /*
168 * Get the page of the anon.
169 */
170 rv = uvmfault_anonget(NULL, amap, anon);
171
172 switch (rv) {
173 case 0:
174 /* Success - we have the page. */
175 KASSERT(rw_write_held(anon->an_lock));
176 break;
177 case EACCES:
178 case ERESTART:
179 /*
180 * Nothing more to do on errors. ERESTART means that the
181 * anon was freed.
182 */
183 return FALSE;
184 default:
185 #ifdef DIAGNOSTIC
186 panic("anon_pagein: uvmfault_anonget -> %d", rv);
187 #else
188 return FALSE;
189 #endif
190 }
191
192 /*
193 * Mark the page as dirty and clear its swslot.
194 */
195 pg = anon->an_page;
196 if (anon->an_swslot > 0) {
197 uvm_swap_free(anon->an_swslot, 1);
198 }
199 anon->an_swslot = 0;
200 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
201
202 /*
203 * Deactivate the page (to put it on a page queue).
204 */
205 uvm_lock_pageq();
206 uvm_pagedeactivate(pg);
207 uvm_unlock_pageq();
208 rw_exit(anon->an_lock);
209
210 return FALSE;
211 }
212
213 /*
214 * uvm_anon_dropswap: release any swap resources from this anon.
215 *
216 * => anon must be locked or have a reference count of 0.
217 */
218 void
uvm_anon_dropswap(struct vm_anon * anon)219 uvm_anon_dropswap(struct vm_anon *anon)
220 {
221 KASSERT(anon->an_ref == 0 || rw_lock_held(anon->an_lock));
222
223 if (anon->an_swslot == 0)
224 return;
225
226 uvm_swap_free(anon->an_swslot, 1);
227 anon->an_swslot = 0;
228 }
229
230
231 /*
232 * uvm_anon_release: release an anon and its page.
233 *
234 * => anon should not have any references.
235 * => anon must be locked.
236 */
237
238 void
uvm_anon_release(struct vm_anon * anon)239 uvm_anon_release(struct vm_anon *anon)
240 {
241 struct vm_page *pg = anon->an_page;
242 struct rwlock *lock;
243
244 KASSERT(rw_write_held(anon->an_lock));
245 KASSERT(pg != NULL);
246 KASSERT((pg->pg_flags & PG_RELEASED) != 0);
247 KASSERT((pg->pg_flags & PG_BUSY) != 0);
248 KASSERT(pg->uobject == NULL);
249 KASSERT(pg->uanon == anon);
250 KASSERT(anon->an_ref == 0);
251
252 uvm_lock_pageq();
253 pmap_page_protect(pg, PROT_NONE);
254 uvm_pagefree(pg);
255 uvm_unlock_pageq();
256 KASSERT(anon->an_page == NULL);
257 lock = anon->an_lock;
258 uvm_anon_dropswap(anon);
259 pool_put(&uvm_anon_pool, anon);
260 rw_exit(lock);
261 /* Note: extra reference is held for PG_RELEASED case. */
262 rw_obj_free(lock);
263 }
264