1 /* $OpenBSD: uvm_fault.c,v 1.90 2016/05/08 11:52:32 stefan Exp $ */ 2 /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp 29 */ 30 31 /* 32 * uvm_fault.c: fault handler 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/proc.h> 39 #include <sys/malloc.h> 40 #include <sys/mman.h> 41 42 #include <uvm/uvm.h> 43 44 /* 45 * 46 * a word on page faults: 47 * 48 * types of page faults we handle: 49 * 50 * CASE 1: upper layer faults CASE 2: lower layer faults 51 * 52 * CASE 1A CASE 1B CASE 2A CASE 2B 53 * read/write1 write>1 read/write +-cow_write/zero 54 * | | | | 55 * +--|--+ +--|--+ +-----+ + | + | +-----+ 56 * amap | V | | ----------->new| | | | ^ | 57 * +-----+ +-----+ +-----+ + | + | +--|--+ 58 * | | | 59 * +-----+ +-----+ +--|--+ | +--|--+ 60 * uobj | d/c | | d/c | | V | +----| | 61 * +-----+ +-----+ +-----+ +-----+ 62 * 63 * d/c = don't care 64 * 65 * case [0]: layerless fault 66 * no amap or uobj is present. this is an error. 67 * 68 * case [1]: upper layer fault [anon active] 69 * 1A: [read] or [write with anon->an_ref == 1] 70 * I/O takes place in top level anon and uobj is not touched. 71 * 1B: [write with anon->an_ref > 1] 72 * new anon is alloc'd and data is copied off ["COW"] 73 * 74 * case [2]: lower layer fault [uobj] 75 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 76 * I/O takes place directly in object. 77 * 2B: [write to copy_on_write] or [read on NULL uobj] 78 * data is "promoted" from uobj to a new anon. 79 * if uobj is null, then we zero fill. 80 * 81 * we follow the standard UVM locking protocol ordering: 82 * 83 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) 84 * we hold a PG_BUSY page if we unlock for I/O 85 * 86 * 87 * the code is structured as follows: 88 * 89 * - init the "IN" params in the ufi structure 90 * ReFault: 91 * - do lookups [locks maps], check protection, handle needs_copy 92 * - check for case 0 fault (error) 93 * - establish "range" of fault 94 * - if we have an amap lock it and extract the anons 95 * - if sequential advice deactivate pages behind us 96 * - at the same time check pmap for unmapped areas and anon for pages 97 * that we could map in (and do map it if found) 98 * - check object for resident pages that we could map in 99 * - if (case 2) goto Case2 100 * - >>> handle case 1 101 * - ensure source anon is resident in RAM 102 * - if case 1B alloc new anon and copy from source 103 * - map the correct page in 104 * Case2: 105 * - >>> handle case 2 106 * - ensure source page is resident (if uobj) 107 * - if case 2B alloc new anon and copy from source (could be zero 108 * fill if uobj == NULL) 109 * - map the correct page in 110 * - done! 111 * 112 * note on paging: 113 * if we have to do I/O we place a PG_BUSY page in the correct object, 114 * unlock everything, and do the I/O. when I/O is done we must reverify 115 * the state of the world before assuming that our data structures are 116 * valid. [because mappings could change while the map is unlocked] 117 * 118 * alternative 1: unbusy the page in question and restart the page fault 119 * from the top (ReFault). this is easy but does not take advantage 120 * of the information that we already have from our previous lookup, 121 * although it is possible that the "hints" in the vm_map will help here. 122 * 123 * alternative 2: the system already keeps track of a "version" number of 124 * a map. [i.e. every time you write-lock a map (e.g. to change a 125 * mapping) you bump the version number up by one...] so, we can save 126 * the version number of the map before we release the lock and start I/O. 127 * then when I/O is done we can relock and check the version numbers 128 * to see if anything changed. this might save us some over 1 because 129 * we don't have to unbusy the page and may be less compares(?). 130 * 131 * alternative 3: put in backpointers or a way to "hold" part of a map 132 * in place while I/O is in progress. this could be complex to 133 * implement (especially with structures like amap that can be referenced 134 * by multiple map entries, and figuring out what should wait could be 135 * complex as well...). 136 * 137 * given that we are not currently multiprocessor or multithreaded we might 138 * as well choose alternative 2 now. maybe alternative 3 would be useful 139 * in the future. XXX keep in mind for future consideration//rechecking. 140 */ 141 142 /* 143 * local data structures 144 */ 145 struct uvm_advice { 146 int nback; 147 int nforw; 148 }; 149 150 /* 151 * page range array: set up in uvmfault_init(). 152 */ 153 static struct uvm_advice uvmadvice[MADV_MASK + 1]; 154 155 #define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */ 156 157 /* 158 * private prototypes 159 */ 160 static void uvmfault_amapcopy(struct uvm_faultinfo *); 161 static __inline void uvmfault_anonflush(struct vm_anon **, int); 162 void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t); 163 void uvmfault_update_stats(struct uvm_faultinfo *); 164 165 /* 166 * inline functions 167 */ 168 /* 169 * uvmfault_anonflush: try and deactivate pages in specified anons 170 * 171 * => does not have to deactivate page if it is busy 172 */ 173 static __inline void 174 uvmfault_anonflush(struct vm_anon **anons, int n) 175 { 176 int lcv; 177 struct vm_page *pg; 178 179 for (lcv = 0 ; lcv < n ; lcv++) { 180 if (anons[lcv] == NULL) 181 continue; 182 pg = anons[lcv]->an_page; 183 if (pg && (pg->pg_flags & PG_BUSY) == 0) { 184 uvm_lock_pageq(); 185 if (pg->wire_count == 0) { 186 pmap_page_protect(pg, PROT_NONE); 187 uvm_pagedeactivate(pg); 188 } 189 uvm_unlock_pageq(); 190 } 191 } 192 } 193 194 /* 195 * normal functions 196 */ 197 /* 198 * uvmfault_init: compute proper values for the uvmadvice[] array. 199 */ 200 void 201 uvmfault_init(void) 202 { 203 int npages; 204 205 npages = atop(16384); 206 if (npages > 0) { 207 KASSERT(npages <= UVM_MAXRANGE / 2); 208 uvmadvice[MADV_NORMAL].nforw = npages; 209 uvmadvice[MADV_NORMAL].nback = npages - 1; 210 } 211 212 npages = atop(32768); 213 if (npages > 0) { 214 KASSERT(npages <= UVM_MAXRANGE / 2); 215 uvmadvice[MADV_SEQUENTIAL].nforw = npages - 1; 216 uvmadvice[MADV_SEQUENTIAL].nback = npages; 217 } 218 } 219 220 /* 221 * uvmfault_amapcopy: clear "needs_copy" in a map. 222 * 223 * => if we are out of RAM we sleep (waiting for more) 224 */ 225 static void 226 uvmfault_amapcopy(struct uvm_faultinfo *ufi) 227 { 228 229 /* while we haven't done the job */ 230 while (1) { 231 /* no mapping? give up. */ 232 if (uvmfault_lookup(ufi, TRUE) == FALSE) 233 return; 234 235 /* copy if needed. */ 236 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) 237 amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE, 238 ufi->orig_rvaddr, ufi->orig_rvaddr + 1); 239 240 /* didn't work? must be out of RAM. sleep. */ 241 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { 242 uvmfault_unlockmaps(ufi, TRUE); 243 uvm_wait("fltamapcopy"); 244 continue; 245 } 246 247 /* got it! */ 248 uvmfault_unlockmaps(ufi, TRUE); 249 return; 250 } 251 /*NOTREACHED*/ 252 } 253 254 /* 255 * uvmfault_anonget: get data in an anon into a non-busy, non-released 256 * page in that anon. 257 * 258 * => we don't move the page on the queues [gets moved later] 259 * => if we allocate a new page [we_own], it gets put on the queues. 260 * either way, the result is that the page is on the queues at return time 261 */ 262 int 263 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, 264 struct vm_anon *anon) 265 { 266 boolean_t we_own; /* we own anon's page? */ 267 boolean_t locked; /* did we relock? */ 268 struct vm_page *pg; 269 int result; 270 271 result = 0; /* XXX shut up gcc */ 272 uvmexp.fltanget++; 273 /* bump rusage counters */ 274 if (anon->an_page) 275 curproc->p_ru.ru_minflt++; 276 else 277 curproc->p_ru.ru_majflt++; 278 279 /* loop until we get it, or fail. */ 280 while (1) { 281 we_own = FALSE; /* TRUE if we set PG_BUSY on a page */ 282 pg = anon->an_page; 283 284 /* page there? make sure it is not busy/released. */ 285 if (pg) { 286 KASSERT(pg->pg_flags & PQ_ANON); 287 KASSERT(pg->uanon == anon); 288 289 /* 290 * if the page is busy, we drop all the locks and 291 * try again. 292 */ 293 if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) 294 return (VM_PAGER_OK); 295 atomic_setbits_int(&pg->pg_flags, PG_WANTED); 296 uvmexp.fltpgwait++; 297 298 /* 299 * the last unlock must be an atomic unlock+wait on 300 * the owner of page 301 */ 302 uvmfault_unlockall(ufi, amap, NULL, NULL); 303 UVM_WAIT(pg, 0, "anonget2", 0); 304 /* ready to relock and try again */ 305 } else { 306 /* no page, we must try and bring it in. */ 307 pg = uvm_pagealloc(NULL, 0, anon, 0); 308 309 if (pg == NULL) { /* out of RAM. */ 310 uvmfault_unlockall(ufi, amap, NULL, anon); 311 uvmexp.fltnoram++; 312 uvm_wait("flt_noram1"); 313 /* ready to relock and try again */ 314 } else { 315 /* we set the PG_BUSY bit */ 316 we_own = TRUE; 317 uvmfault_unlockall(ufi, amap, NULL, anon); 318 319 /* 320 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN 321 * page into the uvm_swap_get function with 322 * all data structures unlocked. note that 323 * it is ok to read an_swslot here because 324 * we hold PG_BUSY on the page. 325 */ 326 uvmexp.pageins++; 327 result = uvm_swap_get(pg, anon->an_swslot, 328 PGO_SYNCIO); 329 330 /* 331 * we clean up after the i/o below in the 332 * "we_own" case 333 */ 334 /* ready to relock and try again */ 335 } 336 } 337 338 /* now relock and try again */ 339 locked = uvmfault_relock(ufi); 340 341 /* 342 * if we own the page (i.e. we set PG_BUSY), then we need 343 * to clean up after the I/O. there are three cases to 344 * consider: 345 * [1] page released during I/O: free anon and ReFault. 346 * [2] I/O not OK. free the page and cause the fault 347 * to fail. 348 * [3] I/O OK! activate the page and sync with the 349 * non-we_own case (i.e. drop anon lock if not locked). 350 */ 351 if (we_own) { 352 if (pg->pg_flags & PG_WANTED) { 353 wakeup(pg); 354 } 355 /* un-busy! */ 356 atomic_clearbits_int(&pg->pg_flags, 357 PG_WANTED|PG_BUSY|PG_FAKE); 358 UVM_PAGE_OWN(pg, NULL); 359 360 /* 361 * if we were RELEASED during I/O, then our anon is 362 * no longer part of an amap. we need to free the 363 * anon and try again. 364 */ 365 if (pg->pg_flags & PG_RELEASED) { 366 pmap_page_protect(pg, PROT_NONE); 367 uvm_anfree(anon); /* frees page for us */ 368 if (locked) 369 uvmfault_unlockall(ufi, amap, NULL, 370 NULL); 371 uvmexp.fltpgrele++; 372 return (VM_PAGER_REFAULT); /* refault! */ 373 } 374 375 if (result != VM_PAGER_OK) { 376 KASSERT(result != VM_PAGER_PEND); 377 378 /* remove page from anon */ 379 anon->an_page = NULL; 380 381 /* 382 * remove the swap slot from the anon 383 * and mark the anon as having no real slot. 384 * don't free the swap slot, thus preventing 385 * it from being used again. 386 */ 387 uvm_swap_markbad(anon->an_swslot, 1); 388 anon->an_swslot = SWSLOT_BAD; 389 390 /* 391 * note: page was never !PG_BUSY, so it 392 * can't be mapped and thus no need to 393 * pmap_page_protect it... 394 */ 395 uvm_lock_pageq(); 396 uvm_pagefree(pg); 397 uvm_unlock_pageq(); 398 399 if (locked) 400 uvmfault_unlockall(ufi, amap, NULL, 401 anon); 402 return (VM_PAGER_ERROR); 403 } 404 405 /* 406 * must be OK, clear modify (already PG_CLEAN) 407 * and activate 408 */ 409 pmap_clear_modify(pg); 410 uvm_lock_pageq(); 411 uvm_pageactivate(pg); 412 uvm_unlock_pageq(); 413 } 414 415 /* we were not able to relock. restart fault. */ 416 if (!locked) 417 return (VM_PAGER_REFAULT); 418 419 /* verify no one touched the amap and moved the anon on us. */ 420 if (ufi != NULL && 421 amap_lookup(&ufi->entry->aref, 422 ufi->orig_rvaddr - ufi->entry->start) != anon) { 423 424 uvmfault_unlockall(ufi, amap, NULL, anon); 425 return (VM_PAGER_REFAULT); 426 } 427 428 /* try it again! */ 429 uvmexp.fltanretry++; 430 continue; 431 432 } /* while (1) */ 433 /*NOTREACHED*/ 434 } 435 436 /* 437 * Update statistics after fault resolution. 438 * - maxrss 439 */ 440 void 441 uvmfault_update_stats(struct uvm_faultinfo *ufi) 442 { 443 struct vm_map *map; 444 struct proc *p; 445 vsize_t res; 446 447 map = ufi->orig_map; 448 449 /* 450 * If this is a nested pmap (eg, a virtual machine pmap managed 451 * by vmm(4) on amd64/i386), don't do any updating, just return. 452 * 453 * pmap_nested() on other archs is #defined to 0, so this is a 454 * no-op. 455 */ 456 if (pmap_nested(map->pmap)) 457 return; 458 459 /* Update the maxrss for the process. */ 460 if (map->flags & VM_MAP_ISVMSPACE) { 461 p = curproc; 462 KASSERT(p != NULL && &p->p_vmspace->vm_map == map); 463 464 res = pmap_resident_count(map->pmap); 465 /* Convert res from pages to kilobytes. */ 466 res <<= (PAGE_SHIFT - 10); 467 468 if (p->p_ru.ru_maxrss < res) 469 p->p_ru.ru_maxrss = res; 470 } 471 } 472 473 /* 474 * F A U L T - m a i n e n t r y p o i n t 475 */ 476 477 /* 478 * uvm_fault: page fault handler 479 * 480 * => called from MD code to resolve a page fault 481 * => VM data structures usually should be unlocked. however, it is 482 * possible to call here with the main map locked if the caller 483 * gets a write lock, sets it recursive, and then calls us (c.f. 484 * uvm_map_pageable). this should be avoided because it keeps 485 * the map locked off during I/O. 486 */ 487 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 488 ~PROT_WRITE : PROT_MASK) 489 int 490 uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type, 491 vm_prot_t access_type) 492 { 493 struct uvm_faultinfo ufi; 494 vm_prot_t enter_prot; 495 boolean_t wired, narrow, promote, locked, shadowed; 496 int npages, nback, nforw, centeridx, result, lcv, gotpages, ret; 497 vaddr_t startva, currva; 498 voff_t uoff; 499 paddr_t pa; 500 struct vm_amap *amap; 501 struct uvm_object *uobj; 502 struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon; 503 struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage; 504 505 anon = NULL; 506 pg = NULL; 507 508 uvmexp.faults++; /* XXX: locking? */ 509 510 /* init the IN parameters in the ufi */ 511 ufi.orig_map = orig_map; 512 ufi.orig_rvaddr = trunc_page(vaddr); 513 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */ 514 if (fault_type == VM_FAULT_WIRE) 515 narrow = TRUE; /* don't look for neighborhood 516 * pages on wire */ 517 else 518 narrow = FALSE; /* normal fault */ 519 520 /* "goto ReFault" means restart the page fault from ground zero. */ 521 ReFault: 522 /* lookup and lock the maps */ 523 if (uvmfault_lookup(&ufi, FALSE) == FALSE) { 524 return (EFAULT); 525 } 526 527 #ifdef DIAGNOSTIC 528 if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) 529 panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)", 530 ufi.map, vaddr); 531 #endif 532 533 /* check protection */ 534 if ((ufi.entry->protection & access_type) != access_type) { 535 uvmfault_unlockmaps(&ufi, FALSE); 536 return (EACCES); 537 } 538 539 /* 540 * "enter_prot" is the protection we want to enter the page in at. 541 * for certain pages (e.g. copy-on-write pages) this protection can 542 * be more strict than ufi.entry->protection. "wired" means either 543 * the entry is wired or we are fault-wiring the pg. 544 */ 545 546 enter_prot = ufi.entry->protection; 547 wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE); 548 if (wired) 549 access_type = enter_prot; /* full access for wired */ 550 551 /* handle "needs_copy" case. */ 552 if (UVM_ET_ISNEEDSCOPY(ufi.entry)) { 553 if ((access_type & PROT_WRITE) || 554 (ufi.entry->object.uvm_obj == NULL)) { 555 /* need to clear */ 556 uvmfault_unlockmaps(&ufi, FALSE); 557 uvmfault_amapcopy(&ufi); 558 uvmexp.fltamcopy++; 559 goto ReFault; 560 } else { 561 /* 562 * ensure that we pmap_enter page R/O since 563 * needs_copy is still true 564 */ 565 enter_prot &= ~PROT_WRITE; 566 } 567 } 568 569 /* identify the players */ 570 amap = ufi.entry->aref.ar_amap; /* top layer */ 571 uobj = ufi.entry->object.uvm_obj; /* bottom layer */ 572 573 /* 574 * check for a case 0 fault. if nothing backing the entry then 575 * error now. 576 */ 577 if (amap == NULL && uobj == NULL) { 578 uvmfault_unlockmaps(&ufi, FALSE); 579 return (EFAULT); 580 } 581 582 /* 583 * establish range of interest based on advice from mapper 584 * and then clip to fit map entry. note that we only want 585 * to do this the first time through the fault. if we 586 * ReFault we will disable this by setting "narrow" to true. 587 */ 588 if (narrow == FALSE) { 589 590 /* wide fault (!narrow) */ 591 nback = min(uvmadvice[ufi.entry->advice].nback, 592 (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT); 593 startva = ufi.orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT); 594 nforw = min(uvmadvice[ufi.entry->advice].nforw, 595 ((ufi.entry->end - ufi.orig_rvaddr) >> 596 PAGE_SHIFT) - 1); 597 /* 598 * note: "-1" because we don't want to count the 599 * faulting page as forw 600 */ 601 npages = nback + nforw + 1; 602 centeridx = nback; 603 604 narrow = TRUE; /* ensure only once per-fault */ 605 } else { 606 /* narrow fault! */ 607 nback = nforw = 0; 608 startva = ufi.orig_rvaddr; 609 npages = 1; 610 centeridx = 0; 611 } 612 613 /* if we've got an amap, extract current anons. */ 614 if (amap) { 615 anons = anons_store; 616 amap_lookups(&ufi.entry->aref, startva - ufi.entry->start, 617 anons, npages); 618 } else { 619 anons = NULL; /* to be safe */ 620 } 621 622 /* 623 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages 624 * now and then forget about them (for the rest of the fault). 625 */ 626 if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) { 627 /* flush back-page anons? */ 628 if (amap) 629 uvmfault_anonflush(anons, nback); 630 631 /* flush object? */ 632 if (uobj) { 633 uoff = (startva - ufi.entry->start) + ufi.entry->offset; 634 (void) uobj->pgops->pgo_flush(uobj, uoff, uoff + 635 ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE); 636 } 637 638 /* now forget about the backpages */ 639 if (amap) 640 anons += nback; 641 startva += ((vsize_t)nback << PAGE_SHIFT); 642 npages -= nback; 643 centeridx = 0; 644 } 645 646 /* 647 * map in the backpages and frontpages we found in the amap in hopes 648 * of preventing future faults. we also init the pages[] array as 649 * we go. 650 */ 651 currva = startva; 652 shadowed = FALSE; 653 for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) { 654 /* 655 * dont play with VAs that are already mapped 656 * except for center) 657 */ 658 if (lcv != centeridx && 659 pmap_extract(ufi.orig_map->pmap, currva, &pa)) { 660 pages[lcv] = PGO_DONTCARE; 661 continue; 662 } 663 664 /* unmapped or center page. check if any anon at this level. */ 665 if (amap == NULL || anons[lcv] == NULL) { 666 pages[lcv] = NULL; 667 continue; 668 } 669 670 /* check for present page and map if possible. re-activate it. */ 671 pages[lcv] = PGO_DONTCARE; 672 if (lcv == centeridx) { /* save center for later! */ 673 shadowed = TRUE; 674 continue; 675 } 676 anon = anons[lcv]; 677 if (anon->an_page && 678 (anon->an_page->pg_flags & (PG_RELEASED|PG_BUSY)) == 0) { 679 uvm_lock_pageq(); 680 uvm_pageactivate(anon->an_page); /* reactivate */ 681 uvm_unlock_pageq(); 682 uvmexp.fltnamap++; 683 684 /* 685 * Since this isn't the page that's actually faulting, 686 * ignore pmap_enter() failures; it's not critical 687 * that we enter these right now. 688 */ 689 (void) pmap_enter(ufi.orig_map->pmap, currva, 690 VM_PAGE_TO_PHYS(anon->an_page), 691 (anon->an_ref > 1) ? (enter_prot & ~PROT_WRITE) : 692 enter_prot, 693 PMAP_CANFAIL | 694 (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0)); 695 } 696 } 697 if (npages > 1) 698 pmap_update(ufi.orig_map->pmap); 699 700 /* (shadowed == TRUE) if there is an anon at the faulting address */ 701 /* 702 * note that if we are really short of RAM we could sleep in the above 703 * call to pmap_enter. bad? 704 * 705 * XXX Actually, that is bad; pmap_enter() should just fail in that 706 * XXX case. --thorpej 707 */ 708 /* 709 * if the desired page is not shadowed by the amap and we have a 710 * backing object, then we check to see if the backing object would 711 * prefer to handle the fault itself (rather than letting us do it 712 * with the usual pgo_get hook). the backing object signals this by 713 * providing a pgo_fault routine. 714 */ 715 if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) { 716 result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages, 717 centeridx, fault_type, access_type, 718 PGO_LOCKED); 719 720 if (result == VM_PAGER_OK) 721 return (0); /* pgo_fault did pmap enter */ 722 else if (result == VM_PAGER_REFAULT) 723 goto ReFault; /* try again! */ 724 else 725 return (EACCES); 726 } 727 728 /* 729 * now, if the desired page is not shadowed by the amap and we have 730 * a backing object that does not have a special fault routine, then 731 * we ask (with pgo_get) the object for resident pages that we care 732 * about and attempt to map them in. we do not let pgo_get block 733 * (PGO_LOCKED). 734 * 735 * ("get" has the option of doing a pmap_enter for us) 736 */ 737 if (uobj && shadowed == FALSE) { 738 uvmexp.fltlget++; 739 gotpages = npages; 740 (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset + 741 (startva - ufi.entry->start), 742 pages, &gotpages, centeridx, 743 access_type & MASK(ufi.entry), 744 ufi.entry->advice, PGO_LOCKED); 745 746 /* check for pages to map, if we got any */ 747 uobjpage = NULL; 748 if (gotpages) { 749 currva = startva; 750 for (lcv = 0 ; lcv < npages ; 751 lcv++, currva += PAGE_SIZE) { 752 if (pages[lcv] == NULL || 753 pages[lcv] == PGO_DONTCARE) 754 continue; 755 756 KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0); 757 758 /* 759 * if center page is resident and not 760 * PG_BUSY, then pgo_get made it PG_BUSY 761 * for us and gave us a handle to it. 762 * remember this page as "uobjpage." 763 * (for later use). 764 */ 765 if (lcv == centeridx) { 766 uobjpage = pages[lcv]; 767 continue; 768 } 769 770 /* 771 * note: calling pgo_get with locked data 772 * structures returns us pages which are 773 * neither busy nor released, so we don't 774 * need to check for this. we can just 775 * directly enter the page (after moving it 776 * to the head of the active queue [useful?]). 777 */ 778 779 uvm_lock_pageq(); 780 uvm_pageactivate(pages[lcv]); /* reactivate */ 781 uvm_unlock_pageq(); 782 uvmexp.fltnomap++; 783 784 /* 785 * Since this page isn't the page that's 786 * actually faulting, ignore pmap_enter() 787 * failures; it's not critical that we 788 * enter these right now. 789 */ 790 (void) pmap_enter(ufi.orig_map->pmap, currva, 791 VM_PAGE_TO_PHYS(pages[lcv]), 792 enter_prot & MASK(ufi.entry), 793 PMAP_CANFAIL | 794 (wired ? PMAP_WIRED : 0)); 795 796 /* 797 * NOTE: page can't be PG_WANTED because 798 * we've held the lock the whole time 799 * we've had the handle. 800 */ 801 atomic_clearbits_int(&pages[lcv]->pg_flags, 802 PG_BUSY); 803 UVM_PAGE_OWN(pages[lcv], NULL); 804 } /* for "lcv" loop */ 805 pmap_update(ufi.orig_map->pmap); 806 } /* "gotpages" != 0 */ 807 /* note: object still _locked_ */ 808 } else { 809 uobjpage = NULL; 810 } 811 812 /* 813 * note that at this point we are done with any front or back pages. 814 * we are now going to focus on the center page (i.e. the one we've 815 * faulted on). if we have faulted on the top (anon) layer 816 * [i.e. case 1], then the anon we want is anons[centeridx] (we have 817 * not touched it yet). if we have faulted on the bottom (uobj) 818 * layer [i.e. case 2] and the page was both present and available, 819 * then we've got a pointer to it as "uobjpage" and we've already 820 * made it BUSY. 821 */ 822 /* 823 * there are four possible cases we must address: 1A, 1B, 2A, and 2B 824 */ 825 /* redirect case 2: if we are not shadowed, go to case 2. */ 826 if (shadowed == FALSE) 827 goto Case2; 828 829 /* handle case 1: fault on an anon in our amap */ 830 anon = anons[centeridx]; 831 832 /* 833 * no matter if we have case 1A or case 1B we are going to need to 834 * have the anon's memory resident. ensure that now. 835 */ 836 /* 837 * let uvmfault_anonget do the dirty work. 838 * also, if it is OK, then the anon's page is on the queues. 839 */ 840 result = uvmfault_anonget(&ufi, amap, anon); 841 switch (result) { 842 case VM_PAGER_OK: 843 break; 844 845 case VM_PAGER_REFAULT: 846 goto ReFault; 847 848 case VM_PAGER_ERROR: 849 /* 850 * An error occured while trying to bring in the 851 * page -- this is the only error we return right 852 * now. 853 */ 854 return (EACCES); /* XXX */ 855 default: 856 #ifdef DIAGNOSTIC 857 panic("uvm_fault: uvmfault_anonget -> %d", result); 858 #else 859 return (EACCES); 860 #endif 861 } 862 863 /* 864 * if we are case 1B then we will need to allocate a new blank 865 * anon to transfer the data into. note that we have a lock 866 * on anon, so no one can busy or release the page until we are done. 867 * also note that the ref count can't drop to zero here because 868 * it is > 1 and we are only dropping one ref. 869 * 870 * in the (hopefully very rare) case that we are out of RAM we 871 * will wait for more RAM, and refault. 872 * 873 * if we are out of anon VM we wait for RAM to become available. 874 */ 875 876 if ((access_type & PROT_WRITE) != 0 && anon->an_ref > 1) { 877 uvmexp.flt_acow++; 878 oanon = anon; /* oanon = old */ 879 anon = uvm_analloc(); 880 if (anon) { 881 pg = uvm_pagealloc(NULL, 0, anon, 0); 882 } 883 884 /* check for out of RAM */ 885 if (anon == NULL || pg == NULL) { 886 uvmfault_unlockall(&ufi, amap, NULL, oanon); 887 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 888 if (anon == NULL) 889 uvmexp.fltnoanon++; 890 else { 891 uvm_anfree(anon); 892 uvmexp.fltnoram++; 893 } 894 895 if (uvmexp.swpgonly == uvmexp.swpages) 896 return (ENOMEM); 897 898 /* out of RAM, wait for more */ 899 if (anon == NULL) 900 uvm_anwait(); 901 else 902 uvm_wait("flt_noram3"); 903 goto ReFault; 904 } 905 906 /* got all resources, replace anon with nanon */ 907 uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */ 908 /* un-busy! new page */ 909 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE); 910 UVM_PAGE_OWN(pg, NULL); 911 ret = amap_add(&ufi.entry->aref, 912 ufi.orig_rvaddr - ufi.entry->start, anon, 1); 913 KASSERT(ret == 0); 914 915 /* deref: can not drop to zero here by defn! */ 916 oanon->an_ref--; 917 918 /* 919 * note: anon is _not_ locked, but we have the sole references 920 * to in from amap. 921 * thus, no one can get at it until we are done with it. 922 */ 923 } else { 924 uvmexp.flt_anon++; 925 oanon = anon; 926 pg = anon->an_page; 927 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ 928 enter_prot = enter_prot & ~PROT_WRITE; 929 } 930 931 /* 932 * now map the page in ... 933 * XXX: old fault unlocks object before pmap_enter. this seems 934 * suspect since some other thread could blast the page out from 935 * under us between the unlock and the pmap_enter. 936 */ 937 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), 938 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) 939 != 0) { 940 /* 941 * No need to undo what we did; we can simply think of 942 * this as the pmap throwing away the mapping information. 943 * 944 * We do, however, have to go through the ReFault path, 945 * as the map may change while we're asleep. 946 */ 947 uvmfault_unlockall(&ufi, amap, NULL, oanon); 948 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 949 if (uvmexp.swpgonly == uvmexp.swpages) { 950 /* XXX instrumentation */ 951 return (ENOMEM); 952 } 953 /* XXX instrumentation */ 954 uvm_wait("flt_pmfail1"); 955 goto ReFault; 956 } 957 958 /* ... update the page queues. */ 959 uvm_lock_pageq(); 960 961 if (fault_type == VM_FAULT_WIRE) { 962 uvm_pagewire(pg); 963 /* 964 * since the now-wired page cannot be paged out, 965 * release its swap resources for others to use. 966 * since an anon with no swap cannot be PG_CLEAN, 967 * clear its clean flag now. 968 */ 969 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 970 uvm_anon_dropswap(anon); 971 } else { 972 /* activate it */ 973 uvm_pageactivate(pg); 974 } 975 976 uvm_unlock_pageq(); 977 978 /* done case 1! finish up by unlocking everything and returning success */ 979 uvmfault_unlockall(&ufi, amap, NULL, oanon); 980 pmap_update(ufi.orig_map->pmap); 981 return (0); 982 983 984 Case2: 985 /* handle case 2: faulting on backing object or zero fill */ 986 /* 987 * note that uobjpage can not be PGO_DONTCARE at this point. we now 988 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we 989 * have a backing object, check and see if we are going to promote 990 * the data up to an anon during the fault. 991 */ 992 if (uobj == NULL) { 993 uobjpage = PGO_DONTCARE; 994 promote = TRUE; /* always need anon here */ 995 } else { 996 KASSERT(uobjpage != PGO_DONTCARE); 997 promote = (access_type & PROT_WRITE) && 998 UVM_ET_ISCOPYONWRITE(ufi.entry); 999 } 1000 1001 /* 1002 * if uobjpage is not null then we do not need to do I/O to get the 1003 * uobjpage. 1004 * 1005 * if uobjpage is null, then we need to ask the pager to 1006 * get the data for us. once we have the data, we need to reverify 1007 * the state the world. we are currently not holding any resources. 1008 */ 1009 if (uobjpage) { 1010 /* update rusage counters */ 1011 curproc->p_ru.ru_minflt++; 1012 } else { 1013 /* update rusage counters */ 1014 curproc->p_ru.ru_majflt++; 1015 1016 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1017 1018 uvmexp.fltget++; 1019 gotpages = 1; 1020 uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset; 1021 result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, 1022 0, access_type & MASK(ufi.entry), ufi.entry->advice, 1023 PGO_SYNCIO); 1024 1025 /* recover from I/O */ 1026 if (result != VM_PAGER_OK) { 1027 KASSERT(result != VM_PAGER_PEND); 1028 1029 if (result == VM_PAGER_AGAIN) { 1030 tsleep(&lbolt, PVM, "fltagain2", 0); 1031 goto ReFault; 1032 } 1033 1034 if (!UVM_ET_ISNOFAULT(ufi.entry)) 1035 return (EACCES); /* XXX i/o error */ 1036 1037 uobjpage = PGO_DONTCARE; 1038 promote = TRUE; 1039 } 1040 1041 /* re-verify the state of the world. */ 1042 locked = uvmfault_relock(&ufi); 1043 1044 /* 1045 * Re-verify that amap slot is still free. if there is 1046 * a problem, we clean up. 1047 */ 1048 if (locked && amap && amap_lookup(&ufi.entry->aref, 1049 ufi.orig_rvaddr - ufi.entry->start)) { 1050 if (locked) 1051 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1052 locked = FALSE; 1053 } 1054 1055 /* didn't get the lock? release the page and retry. */ 1056 if (locked == FALSE && uobjpage != PGO_DONTCARE) { 1057 uvm_lock_pageq(); 1058 /* make sure it is in queues */ 1059 uvm_pageactivate(uobjpage); 1060 uvm_unlock_pageq(); 1061 1062 if (uobjpage->pg_flags & PG_WANTED) 1063 /* still holding object lock */ 1064 wakeup(uobjpage); 1065 atomic_clearbits_int(&uobjpage->pg_flags, 1066 PG_BUSY|PG_WANTED); 1067 UVM_PAGE_OWN(uobjpage, NULL); 1068 goto ReFault; 1069 } 1070 1071 /* 1072 * we have the data in uobjpage which is PG_BUSY 1073 */ 1074 } 1075 1076 /* 1077 * notes: 1078 * - at this point uobjpage can not be NULL 1079 * - at this point uobjpage could be PG_WANTED (handle later) 1080 */ 1081 if (promote == FALSE) { 1082 /* 1083 * we are not promoting. if the mapping is COW ensure that we 1084 * don't give more access than we should (e.g. when doing a read 1085 * fault on a COPYONWRITE mapping we want to map the COW page in 1086 * R/O even though the entry protection could be R/W). 1087 * 1088 * set "pg" to the page we want to map in (uobjpage, usually) 1089 */ 1090 uvmexp.flt_obj++; 1091 if (UVM_ET_ISCOPYONWRITE(ufi.entry)) 1092 enter_prot &= ~PROT_WRITE; 1093 pg = uobjpage; /* map in the actual object */ 1094 1095 /* assert(uobjpage != PGO_DONTCARE) */ 1096 1097 /* 1098 * we are faulting directly on the page. 1099 */ 1100 } else { 1101 /* 1102 * if we are going to promote the data to an anon we 1103 * allocate a blank anon here and plug it into our amap. 1104 */ 1105 #ifdef DIAGNOSTIC 1106 if (amap == NULL) 1107 panic("uvm_fault: want to promote data, but no anon"); 1108 #endif 1109 1110 anon = uvm_analloc(); 1111 if (anon) { 1112 /* 1113 * In `Fill in data...' below, if 1114 * uobjpage == PGO_DONTCARE, we want 1115 * a zero'd, dirty page, so have 1116 * uvm_pagealloc() do that for us. 1117 */ 1118 pg = uvm_pagealloc(NULL, 0, anon, 1119 (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0); 1120 } 1121 1122 /* 1123 * out of memory resources? 1124 */ 1125 if (anon == NULL || pg == NULL) { 1126 /* arg! must unbusy our page and fail or sleep. */ 1127 if (uobjpage != PGO_DONTCARE) { 1128 uvm_lock_pageq(); 1129 uvm_pageactivate(uobjpage); 1130 uvm_unlock_pageq(); 1131 1132 if (uobjpage->pg_flags & PG_WANTED) 1133 wakeup(uobjpage); 1134 atomic_clearbits_int(&uobjpage->pg_flags, 1135 PG_BUSY|PG_WANTED); 1136 UVM_PAGE_OWN(uobjpage, NULL); 1137 } 1138 1139 /* unlock and fail ... */ 1140 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1141 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1142 if (anon == NULL) 1143 uvmexp.fltnoanon++; 1144 else { 1145 uvm_anfree(anon); 1146 uvmexp.fltnoram++; 1147 } 1148 1149 if (uvmexp.swpgonly == uvmexp.swpages) 1150 return (ENOMEM); 1151 1152 /* out of RAM, wait for more */ 1153 if (anon == NULL) 1154 uvm_anwait(); 1155 else 1156 uvm_wait("flt_noram5"); 1157 goto ReFault; 1158 } 1159 1160 /* fill in the data */ 1161 if (uobjpage != PGO_DONTCARE) { 1162 uvmexp.flt_prcopy++; 1163 /* copy page [pg now dirty] */ 1164 uvm_pagecopy(uobjpage, pg); 1165 1166 /* 1167 * promote to shared amap? make sure all sharing 1168 * procs see it 1169 */ 1170 if ((amap_flags(amap) & AMAP_SHARED) != 0) { 1171 pmap_page_protect(uobjpage, PROT_NONE); 1172 } 1173 1174 /* dispose of uobjpage. drop handle to uobj as well. */ 1175 if (uobjpage->pg_flags & PG_WANTED) 1176 wakeup(uobjpage); 1177 atomic_clearbits_int(&uobjpage->pg_flags, 1178 PG_BUSY|PG_WANTED); 1179 UVM_PAGE_OWN(uobjpage, NULL); 1180 uvm_lock_pageq(); 1181 uvm_pageactivate(uobjpage); 1182 uvm_unlock_pageq(); 1183 uobj = NULL; 1184 } else { 1185 uvmexp.flt_przero++; 1186 /* 1187 * Page is zero'd and marked dirty by uvm_pagealloc() 1188 * above. 1189 */ 1190 } 1191 1192 if (amap_add(&ufi.entry->aref, 1193 ufi.orig_rvaddr - ufi.entry->start, anon, 0)) { 1194 uvmfault_unlockall(&ufi, amap, NULL, oanon); 1195 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1196 uvm_anfree(anon); 1197 uvmexp.fltnoamap++; 1198 1199 if (uvmexp.swpgonly == uvmexp.swpages) 1200 return (ENOMEM); 1201 1202 amap_populate(&ufi.entry->aref, 1203 ufi.orig_rvaddr - ufi.entry->start); 1204 goto ReFault; 1205 } 1206 } 1207 1208 /* note: pg is either the uobjpage or the new page in the new anon */ 1209 /* 1210 * all resources are present. we can now map it in and free our 1211 * resources. 1212 */ 1213 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), 1214 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) 1215 != 0) { 1216 /* 1217 * No need to undo what we did; we can simply think of 1218 * this as the pmap throwing away the mapping information. 1219 * 1220 * We do, however, have to go through the ReFault path, 1221 * as the map may change while we're asleep. 1222 */ 1223 if (pg->pg_flags & PG_WANTED) 1224 wakeup(pg); 1225 1226 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); 1227 UVM_PAGE_OWN(pg, NULL); 1228 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1229 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1230 if (uvmexp.swpgonly == uvmexp.swpages) { 1231 /* XXX instrumentation */ 1232 return (ENOMEM); 1233 } 1234 /* XXX instrumentation */ 1235 uvm_wait("flt_pmfail2"); 1236 goto ReFault; 1237 } 1238 1239 uvm_lock_pageq(); 1240 1241 if (fault_type == VM_FAULT_WIRE) { 1242 uvm_pagewire(pg); 1243 if (pg->pg_flags & PQ_AOBJ) { 1244 /* 1245 * since the now-wired page cannot be paged out, 1246 * release its swap resources for others to use. 1247 * since an aobj page with no swap cannot be PG_CLEAN, 1248 * clear its clean flag now. 1249 */ 1250 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1251 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 1252 } 1253 } else { 1254 /* activate it */ 1255 uvm_pageactivate(pg); 1256 } 1257 uvm_unlock_pageq(); 1258 1259 if (pg->pg_flags & PG_WANTED) 1260 wakeup(pg); 1261 1262 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED); 1263 UVM_PAGE_OWN(pg, NULL); 1264 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1265 pmap_update(ufi.orig_map->pmap); 1266 1267 return (0); 1268 } 1269 1270 1271 /* 1272 * uvm_fault_wire: wire down a range of virtual addresses in a map. 1273 * 1274 * => map may be read-locked by caller, but MUST NOT be write-locked. 1275 * => if map is read-locked, any operations which may cause map to 1276 * be write-locked in uvm_fault() must be taken care of by 1277 * the caller. See uvm_map_pageable(). 1278 */ 1279 int 1280 uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type) 1281 { 1282 vaddr_t va; 1283 int rv; 1284 1285 /* 1286 * now fault it in a page at a time. if the fault fails then we have 1287 * to undo what we have done. note that in uvm_fault PROT_NONE 1288 * is replaced with the max protection if fault_type is VM_FAULT_WIRE. 1289 */ 1290 for (va = start ; va < end ; va += PAGE_SIZE) { 1291 rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type); 1292 if (rv) { 1293 if (va != start) { 1294 uvm_fault_unwire(map, start, va); 1295 } 1296 return (rv); 1297 } 1298 } 1299 1300 return (0); 1301 } 1302 1303 /* 1304 * uvm_fault_unwire(): unwire range of virtual space. 1305 */ 1306 void 1307 uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end) 1308 { 1309 1310 vm_map_lock_read(map); 1311 uvm_fault_unwire_locked(map, start, end); 1312 vm_map_unlock_read(map); 1313 } 1314 1315 /* 1316 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire(). 1317 * 1318 * => map must be at least read-locked. 1319 */ 1320 void 1321 uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end) 1322 { 1323 vm_map_entry_t entry, next; 1324 pmap_t pmap = vm_map_pmap(map); 1325 vaddr_t va; 1326 paddr_t pa; 1327 struct vm_page *pg; 1328 1329 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); 1330 1331 /* 1332 * we assume that the area we are unwiring has actually been wired 1333 * in the first place. this means that we should be able to extract 1334 * the PAs from the pmap. we also lock out the page daemon so that 1335 * we can call uvm_pageunwire. 1336 */ 1337 uvm_lock_pageq(); 1338 1339 /* find the beginning map entry for the region. */ 1340 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map)); 1341 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) 1342 panic("uvm_fault_unwire_locked: address not in map"); 1343 1344 for (va = start; va < end ; va += PAGE_SIZE) { 1345 if (pmap_extract(pmap, va, &pa) == FALSE) 1346 continue; 1347 1348 /* find the map entry for the current address. */ 1349 KASSERT(va >= entry->start); 1350 while (va >= entry->end) { 1351 next = RB_NEXT(uvm_map_addr, &map->addr, entry); 1352 KASSERT(next != NULL && next->start <= entry->end); 1353 entry = next; 1354 } 1355 1356 /* if the entry is no longer wired, tell the pmap. */ 1357 if (VM_MAPENT_ISWIRED(entry) == 0) 1358 pmap_unwire(pmap, va); 1359 1360 pg = PHYS_TO_VM_PAGE(pa); 1361 if (pg) 1362 uvm_pageunwire(pg); 1363 } 1364 1365 uvm_unlock_pageq(); 1366 } 1367 1368 /* 1369 * uvmfault_unlockmaps: unlock the maps 1370 */ 1371 void 1372 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked) 1373 { 1374 /* 1375 * ufi can be NULL when this isn't really a fault, 1376 * but merely paging in anon data. 1377 */ 1378 if (ufi == NULL) { 1379 return; 1380 } 1381 1382 uvmfault_update_stats(ufi); 1383 if (write_locked) { 1384 vm_map_unlock(ufi->map); 1385 } else { 1386 vm_map_unlock_read(ufi->map); 1387 } 1388 } 1389 1390 /* 1391 * uvmfault_unlockall: unlock everything passed in. 1392 * 1393 * => maps must be read-locked (not write-locked). 1394 */ 1395 void 1396 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap, 1397 struct uvm_object *uobj, struct vm_anon *anon) 1398 { 1399 1400 uvmfault_unlockmaps(ufi, FALSE); 1401 } 1402 1403 /* 1404 * uvmfault_lookup: lookup a virtual address in a map 1405 * 1406 * => caller must provide a uvm_faultinfo structure with the IN 1407 * params properly filled in 1408 * => we will lookup the map entry (handling submaps) as we go 1409 * => if the lookup is a success we will return with the maps locked 1410 * => if "write_lock" is TRUE, we write_lock the map, otherwise we only 1411 * get a read lock. 1412 * => note that submaps can only appear in the kernel and they are 1413 * required to use the same virtual addresses as the map they 1414 * are referenced by (thus address translation between the main 1415 * map and the submap is unnecessary). 1416 */ 1417 1418 boolean_t 1419 uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock) 1420 { 1421 vm_map_t tmpmap; 1422 1423 /* init ufi values for lookup. */ 1424 ufi->map = ufi->orig_map; 1425 ufi->size = ufi->orig_size; 1426 1427 /* 1428 * keep going down levels until we are done. note that there can 1429 * only be two levels so we won't loop very long. 1430 */ 1431 while (1) { 1432 if (ufi->orig_rvaddr < ufi->map->min_offset || 1433 ufi->orig_rvaddr >= ufi->map->max_offset) 1434 return(FALSE); 1435 1436 /* lock map */ 1437 if (write_lock) { 1438 vm_map_lock(ufi->map); 1439 } else { 1440 vm_map_lock_read(ufi->map); 1441 } 1442 1443 /* lookup */ 1444 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr, 1445 &ufi->entry)) { 1446 uvmfault_unlockmaps(ufi, write_lock); 1447 return(FALSE); 1448 } 1449 1450 /* reduce size if necessary */ 1451 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size) 1452 ufi->size = ufi->entry->end - ufi->orig_rvaddr; 1453 1454 /* 1455 * submap? replace map with the submap and lookup again. 1456 * note: VAs in submaps must match VAs in main map. 1457 */ 1458 if (UVM_ET_ISSUBMAP(ufi->entry)) { 1459 tmpmap = ufi->entry->object.sub_map; 1460 uvmfault_unlockmaps(ufi, write_lock); 1461 ufi->map = tmpmap; 1462 continue; 1463 } 1464 1465 /* got it! */ 1466 ufi->mapv = ufi->map->timestamp; 1467 return(TRUE); 1468 1469 } 1470 /*NOTREACHED*/ 1471 } 1472 1473 /* 1474 * uvmfault_relock: attempt to relock the same version of the map 1475 * 1476 * => fault data structures should be unlocked before calling. 1477 * => if a success (TRUE) maps will be locked after call. 1478 */ 1479 boolean_t 1480 uvmfault_relock(struct uvm_faultinfo *ufi) 1481 { 1482 /* 1483 * ufi can be NULL when this isn't really a fault, 1484 * but merely paging in anon data. 1485 */ 1486 if (ufi == NULL) { 1487 return TRUE; 1488 } 1489 1490 uvmexp.fltrelck++; 1491 1492 /* 1493 * relock map. fail if version mismatch (in which case nothing 1494 * gets locked). 1495 */ 1496 vm_map_lock_read(ufi->map); 1497 if (ufi->mapv != ufi->map->timestamp) { 1498 vm_map_unlock_read(ufi->map); 1499 return(FALSE); 1500 } 1501 1502 uvmexp.fltrelckok++; 1503 return(TRUE); /* got it! */ 1504 } 1505