xref: /netbsd/sys/uvm/uvm_anon.c (revision 6550d01e)
1 /*	$NetBSD: uvm_anon.c,v 1.52 2011/02/02 15:13:34 chuck Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * uvm_anon.c: uvm anon ops
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.52 2011/02/02 15:13:34 chuck Exp $");
34 
35 #include "opt_uvmhist.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/pool.h>
42 #include <sys/kernel.h>
43 
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_swap.h>
46 #include <uvm/uvm_pdpolicy.h>
47 
48 static struct pool_cache uvm_anon_cache;
49 
50 static int uvm_anon_ctor(void *, void *, int);
51 static void uvm_anon_dtor(void *, void *);
52 
53 /*
54  * allocate anons
55  */
56 void
57 uvm_anon_init(void)
58 {
59 
60 	pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
61 	    PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
62 	    uvm_anon_dtor, NULL);
63 }
64 
65 static int
66 uvm_anon_ctor(void *arg, void *object, int flags)
67 {
68 	struct vm_anon *anon = object;
69 
70 	anon->an_ref = 0;
71 	mutex_init(&anon->an_lock, MUTEX_DEFAULT, IPL_NONE);
72 	anon->an_page = NULL;
73 #if defined(VMSWAP)
74 	anon->an_swslot = 0;
75 #endif /* defined(VMSWAP) */
76 
77 	return 0;
78 }
79 
80 static void
81 uvm_anon_dtor(void *arg, void *object)
82 {
83 	struct vm_anon *anon = object;
84 
85 	mutex_destroy(&anon->an_lock);
86 }
87 
88 /*
89  * allocate an anon
90  *
91  * => new anon is returned locked!
92  */
93 struct vm_anon *
94 uvm_analloc(void)
95 {
96 	struct vm_anon *anon;
97 
98 	anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
99 	if (anon) {
100 		KASSERT(anon->an_ref == 0);
101 		KASSERT(anon->an_page == NULL);
102 #if defined(VMSWAP)
103 		KASSERT(anon->an_swslot == 0);
104 #endif /* defined(VMSWAP) */
105 		anon->an_ref = 1;
106 		mutex_enter(&anon->an_lock);
107 	}
108 	return anon;
109 }
110 
111 /*
112  * uvm_anfree: free a single anon structure
113  *
114  * => caller must remove anon from its amap before calling (if it was in
115  *	an amap).
116  * => anon must be unlocked and have a zero reference count.
117  * => we may lock the pageq's.
118  */
119 
120 void
121 uvm_anfree(struct vm_anon *anon)
122 {
123 	struct vm_page *pg;
124 	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
125 	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
126 
127 	KASSERT(anon->an_ref == 0);
128 	KASSERT(!mutex_owned(&anon->an_lock));
129 
130 	/*
131 	 * get page
132 	 */
133 
134 	pg = anon->an_page;
135 
136 	/*
137 	 * if there is a resident page and it is loaned, then anon may not
138 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
139  	 * of the page has been identified and locked.
140 	 */
141 
142 	if (pg && pg->loan_count) {
143 		mutex_enter(&anon->an_lock);
144 		pg = uvm_anon_lockloanpg(anon);
145 		mutex_exit(&anon->an_lock);
146 	}
147 
148 	/*
149 	 * if we have a resident page, we must dispose of it before freeing
150 	 * the anon.
151 	 */
152 
153 	if (pg) {
154 
155 		/*
156 		 * if the page is owned by a uobject (now locked), then we must
157 		 * kill the loan on the page rather than free it.
158 		 */
159 
160 		if (pg->uobject) {
161 			mutex_enter(&uvm_pageqlock);
162 			KASSERT(pg->loan_count > 0);
163 			pg->loan_count--;
164 			pg->uanon = NULL;
165 			mutex_exit(&uvm_pageqlock);
166 			mutex_exit(&pg->uobject->vmobjlock);
167 		} else {
168 
169 			/*
170 			 * page has no uobject, so we must be the owner of it.
171 			 */
172 
173 			KASSERT((pg->flags & PG_RELEASED) == 0);
174 			mutex_enter(&anon->an_lock);
175 			pmap_page_protect(pg, VM_PROT_NONE);
176 
177 			/*
178 			 * if the page is busy, mark it as PG_RELEASED
179 			 * so that uvm_anon_release will release it later.
180 			 */
181 
182 			if (pg->flags & PG_BUSY) {
183 				pg->flags |= PG_RELEASED;
184 				mutex_exit(&anon->an_lock);
185 				return;
186 			}
187 			mutex_enter(&uvm_pageqlock);
188 			uvm_pagefree(pg);
189 			mutex_exit(&uvm_pageqlock);
190 			mutex_exit(&anon->an_lock);
191 			UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
192 				    "freed now!", anon, pg, 0, 0);
193 		}
194 	}
195 #if defined(VMSWAP)
196 	if (pg == NULL && anon->an_swslot > 0) {
197 		/* this page is no longer only in swap. */
198 		mutex_enter(&uvm_swap_data_lock);
199 		KASSERT(uvmexp.swpgonly > 0);
200 		uvmexp.swpgonly--;
201 		mutex_exit(&uvm_swap_data_lock);
202 	}
203 #endif /* defined(VMSWAP) */
204 
205 	/*
206 	 * free any swap resources.
207 	 */
208 
209 	uvm_anon_dropswap(anon);
210 
211 	/*
212 	 * give a page replacement hint.
213 	 */
214 
215 	uvmpdpol_anfree(anon);
216 
217 	/*
218 	 * now that we've stripped the data areas from the anon,
219 	 * free the anon itself.
220 	 */
221 
222 	KASSERT(anon->an_page == NULL);
223 #if defined(VMSWAP)
224 	KASSERT(anon->an_swslot == 0);
225 #endif /* defined(VMSWAP) */
226 
227 	pool_cache_put(&uvm_anon_cache, anon);
228 	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
229 }
230 
231 #if defined(VMSWAP)
232 
233 /*
234  * uvm_anon_dropswap:  release any swap resources from this anon.
235  *
236  * => anon must be locked or have a reference count of 0.
237  */
238 void
239 uvm_anon_dropswap(struct vm_anon *anon)
240 {
241 	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
242 
243 	if (anon->an_swslot == 0)
244 		return;
245 
246 	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
247 		    anon, anon->an_swslot, 0, 0);
248 	uvm_swap_free(anon->an_swslot, 1);
249 	anon->an_swslot = 0;
250 }
251 
252 #endif /* defined(VMSWAP) */
253 
254 /*
255  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
256  *
257  * => anon is locked by caller
258  * => on return: anon is locked
259  *		 if there is a resident page:
260  *			if it has a uobject, it is locked by us
261  *			if it is ownerless, we take over as owner
262  *		 we return the resident page (it can change during
263  *		 this function)
264  * => note that the only time an anon has an ownerless resident page
265  *	is if the page was loaned from a uvm_object and the uvm_object
266  *	disowned it
267  * => this only needs to be called when you want to do an operation
268  *	on an anon's resident page and that page has a non-zero loan
269  *	count.
270  */
271 struct vm_page *
272 uvm_anon_lockloanpg(struct vm_anon *anon)
273 {
274 	struct vm_page *pg;
275 	bool locked = false;
276 
277 	KASSERT(mutex_owned(&anon->an_lock));
278 
279 	/*
280 	 * loop while we have a resident page that has a non-zero loan count.
281 	 * if we successfully get our lock, we will "break" the loop.
282 	 * note that the test for pg->loan_count is not protected -- this
283 	 * may produce false positive results.   note that a false positive
284 	 * result may cause us to do more work than we need to, but it will
285 	 * not produce an incorrect result.
286 	 */
287 
288 	while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
289 
290 		/*
291 		 * quickly check to see if the page has an object before
292 		 * bothering to lock the page queues.   this may also produce
293 		 * a false positive result, but that's ok because we do a real
294 		 * check after that.
295 		 */
296 
297 		if (pg->uobject) {
298 			mutex_enter(&uvm_pageqlock);
299 			if (pg->uobject) {
300 				locked =
301 				    mutex_tryenter(&pg->uobject->vmobjlock);
302 			} else {
303 				/* object disowned before we got PQ lock */
304 				locked = true;
305 			}
306 			mutex_exit(&uvm_pageqlock);
307 
308 			/*
309 			 * if we didn't get a lock (try lock failed), then we
310 			 * toggle our anon lock and try again
311 			 */
312 
313 			if (!locked) {
314 				mutex_exit(&anon->an_lock);
315 
316 				/*
317 				 * someone locking the object has a chance to
318 				 * lock us right now
319 				 */
320 				/* XXX Better than yielding but inadequate. */
321 				kpause("livelock", false, 1, NULL);
322 
323 				mutex_enter(&anon->an_lock);
324 				continue;
325 			}
326 		}
327 
328 		/*
329 		 * if page is un-owned [i.e. the object dropped its ownership],
330 		 * then we can take over as owner!
331 		 */
332 
333 		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
334 			mutex_enter(&uvm_pageqlock);
335 			pg->pqflags |= PQ_ANON;
336 			pg->loan_count--;
337 			mutex_exit(&uvm_pageqlock);
338 		}
339 		break;
340 	}
341 	return(pg);
342 }
343 
344 #if defined(VMSWAP)
345 
346 /*
347  * fetch an anon's page.
348  *
349  * => anon must be locked, and is unlocked upon return.
350  * => returns true if pagein was aborted due to lack of memory.
351  */
352 
353 bool
354 uvm_anon_pagein(struct vm_anon *anon)
355 {
356 	struct vm_page *pg;
357 	struct uvm_object *uobj;
358 	int rv;
359 
360 	/* locked: anon */
361 	KASSERT(mutex_owned(&anon->an_lock));
362 
363 	rv = uvmfault_anonget(NULL, NULL, anon);
364 
365 	/*
366 	 * if rv == 0, anon is still locked, else anon
367 	 * is unlocked
368 	 */
369 
370 	switch (rv) {
371 	case 0:
372 		break;
373 
374 	case EIO:
375 	case ERESTART:
376 
377 		/*
378 		 * nothing more to do on errors.
379 		 * ERESTART can only mean that the anon was freed,
380 		 * so again there's nothing to do.
381 		 */
382 
383 		return false;
384 
385 	default:
386 		return true;
387 	}
388 
389 	/*
390 	 * ok, we've got the page now.
391 	 * mark it as dirty, clear its swslot and un-busy it.
392 	 */
393 
394 	pg = anon->an_page;
395 	uobj = pg->uobject;
396 	if (anon->an_swslot > 0)
397 		uvm_swap_free(anon->an_swslot, 1);
398 	anon->an_swslot = 0;
399 	pg->flags &= ~(PG_CLEAN);
400 
401 	/*
402 	 * deactivate the page (to put it on a page queue)
403 	 */
404 
405 	mutex_enter(&uvm_pageqlock);
406 	if (pg->wire_count == 0)
407 		uvm_pagedeactivate(pg);
408 	mutex_exit(&uvm_pageqlock);
409 
410 	if (pg->flags & PG_WANTED) {
411 		wakeup(pg);
412 		pg->flags &= ~(PG_WANTED);
413 	}
414 
415 	/*
416 	 * unlock the anon and we're done.
417 	 */
418 
419 	mutex_exit(&anon->an_lock);
420 	if (uobj) {
421 		mutex_exit(&uobj->vmobjlock);
422 	}
423 	return false;
424 }
425 
426 #endif /* defined(VMSWAP) */
427 
428 /*
429  * uvm_anon_release: release an anon and its page.
430  *
431  * => caller must lock the anon.
432  */
433 
434 void
435 uvm_anon_release(struct vm_anon *anon)
436 {
437 	struct vm_page *pg = anon->an_page;
438 
439 	KASSERT(mutex_owned(&anon->an_lock));
440 	KASSERT(pg != NULL);
441 	KASSERT((pg->flags & PG_RELEASED) != 0);
442 	KASSERT((pg->flags & PG_BUSY) != 0);
443 	KASSERT(pg->uobject == NULL);
444 	KASSERT(pg->uanon == anon);
445 	KASSERT(pg->loan_count == 0);
446 	KASSERT(anon->an_ref == 0);
447 
448 	mutex_enter(&uvm_pageqlock);
449 	uvm_pagefree(pg);
450 	mutex_exit(&uvm_pageqlock);
451 	mutex_exit(&anon->an_lock);
452 
453 	KASSERT(anon->an_page == NULL);
454 
455 	uvm_anfree(anon);
456 }
457