1 /* $OpenBSD: uvm_anon.c,v 1.54 2021/03/26 13:40:05 mpi Exp $ */ 2 /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uvm_anon.c: uvm anon ops 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/pool.h> 37 #include <sys/kernel.h> 38 #include <sys/atomic.h> 39 40 #include <uvm/uvm.h> 41 #include <uvm/uvm_swap.h> 42 43 struct pool uvm_anon_pool; 44 45 void 46 uvm_anon_init(void) 47 { 48 pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_MPFLOOR, 49 PR_WAITOK, "anonpl", NULL); 50 pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16); 51 } 52 53 /* 54 * uvm_analloc: allocate a new anon. 55 * 56 * => anon will have no lock associated. 57 */ 58 struct vm_anon * 59 uvm_analloc(void) 60 { 61 struct vm_anon *anon; 62 63 anon = pool_get(&uvm_anon_pool, PR_NOWAIT); 64 if (anon) { 65 anon->an_lock = NULL; 66 anon->an_ref = 1; 67 anon->an_page = NULL; 68 anon->an_swslot = 0; 69 } 70 return anon; 71 } 72 73 /* 74 * uvm_anfree_list: free a single anon structure 75 * 76 * => anon must be removed from the amap (if anon was in an amap). 77 * => amap must be locked, if anon was owned by amap. 78 * => we may lock the pageq's. 79 */ 80 void 81 uvm_anfree_list(struct vm_anon *anon, struct pglist *pgl) 82 { 83 struct vm_page *pg = anon->an_page; 84 85 KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock)); 86 KASSERT(anon->an_ref == 0); 87 88 /* 89 * Dispose of the page, if it is resident. 90 */ 91 if (pg != NULL) { 92 KASSERT(anon->an_lock != NULL); 93 94 /* 95 * If the page is busy, mark it as PG_RELEASED, so 96 * that uvm_anon_release(9) would release it later. 97 */ 98 if ((pg->pg_flags & PG_BUSY) != 0) { 99 atomic_setbits_int(&pg->pg_flags, PG_RELEASED); 100 rw_obj_hold(anon->an_lock); 101 return; 102 } 103 pmap_page_protect(pg, PROT_NONE); 104 if (pgl != NULL) { 105 /* 106 * clean page, and put on on pglist 107 * for later freeing. 108 */ 109 uvm_lock_pageq(); 110 uvm_pageclean(pg); 111 uvm_unlock_pageq(); 112 TAILQ_INSERT_HEAD(pgl, pg, pageq); 113 } else { 114 uvm_lock_pageq(); /* lock out pagedaemon */ 115 uvm_pagefree(pg); /* bye bye */ 116 uvm_unlock_pageq(); /* free the daemon */ 117 } 118 } else { 119 if (anon->an_swslot != 0) { 120 /* This page is no longer only in swap. */ 121 KASSERT(uvmexp.swpgonly > 0); 122 atomic_dec_int(&uvmexp.swpgonly); 123 } 124 } 125 anon->an_lock = NULL; 126 127 /* 128 * Free any swap resources, leave a page replacement hint. 129 */ 130 uvm_anon_dropswap(anon); 131 132 KASSERT(anon->an_page == NULL); 133 KASSERT(anon->an_swslot == 0); 134 135 pool_put(&uvm_anon_pool, anon); 136 } 137 138 /* 139 * uvm_anwait: wait for memory to become available to allocate an anon. 140 */ 141 void 142 uvm_anwait(void) 143 { 144 struct vm_anon *anon; 145 146 /* XXX: Want something like pool_wait()? */ 147 anon = pool_get(&uvm_anon_pool, PR_WAITOK); 148 pool_put(&uvm_anon_pool, anon); 149 } 150 151 /* 152 * uvm_anon_pagein: fetch an anon's page. 153 * 154 * => anon must be locked, and is unlocked upon return. 155 * => returns true if pagein was aborted due to lack of memory. 156 */ 157 158 boolean_t 159 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon) 160 { 161 struct vm_page *pg; 162 int rv; 163 164 KASSERT(rw_write_held(anon->an_lock)); 165 KASSERT(anon->an_lock == amap->am_lock); 166 167 /* 168 * Get the page of the anon. 169 */ 170 rv = uvmfault_anonget(NULL, amap, anon); 171 172 switch (rv) { 173 case VM_PAGER_OK: 174 KASSERT(rw_write_held(anon->an_lock)); 175 break; 176 177 case VM_PAGER_ERROR: 178 case VM_PAGER_REFAULT: 179 180 /* 181 * Nothing more to do on errors. 182 * VM_PAGER_REFAULT means that the anon was freed. 183 */ 184 185 return FALSE; 186 187 default: 188 #ifdef DIAGNOSTIC 189 panic("anon_pagein: uvmfault_anonget -> %d", rv); 190 #else 191 return FALSE; 192 #endif 193 } 194 195 /* 196 * Mark the page as dirty and clear its swslot. 197 */ 198 pg = anon->an_page; 199 if (anon->an_swslot > 0) { 200 uvm_swap_free(anon->an_swslot, 1); 201 } 202 anon->an_swslot = 0; 203 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 204 205 /* 206 * Deactivate the page (to put it on a page queue). 207 */ 208 pmap_clear_reference(pg); 209 pmap_page_protect(pg, PROT_NONE); 210 uvm_lock_pageq(); 211 uvm_pagedeactivate(pg); 212 uvm_unlock_pageq(); 213 rw_exit(anon->an_lock); 214 215 return FALSE; 216 } 217 218 /* 219 * uvm_anon_dropswap: release any swap resources from this anon. 220 * 221 * => anon must be locked or have a reference count of 0. 222 */ 223 void 224 uvm_anon_dropswap(struct vm_anon *anon) 225 { 226 KASSERT(anon->an_ref == 0 || rw_lock_held(anon->an_lock)); 227 228 if (anon->an_swslot == 0) 229 return; 230 231 uvm_swap_free(anon->an_swslot, 1); 232 anon->an_swslot = 0; 233 } 234 235 236 /* 237 * uvm_anon_release: release an anon and its page. 238 * 239 * => anon should not have any references. 240 * => anon must be locked. 241 */ 242 243 void 244 uvm_anon_release(struct vm_anon *anon) 245 { 246 struct vm_page *pg = anon->an_page; 247 struct rwlock *lock; 248 249 KASSERT(rw_write_held(anon->an_lock)); 250 KASSERT(pg != NULL); 251 KASSERT((pg->pg_flags & PG_RELEASED) != 0); 252 KASSERT((pg->pg_flags & PG_BUSY) != 0); 253 KASSERT(pg->uobject == NULL); 254 KASSERT(pg->uanon == anon); 255 KASSERT(anon->an_ref == 0); 256 257 uvm_lock_pageq(); 258 uvm_pagefree(pg); 259 uvm_unlock_pageq(); 260 KASSERT(anon->an_page == NULL); 261 lock = anon->an_lock; 262 uvm_anfree(anon); 263 rw_exit(lock); 264 /* Note: extra reference is held for PG_RELEASED case. */ 265 rw_obj_free(lock); 266 } 267