1 /* $NetBSD: uvm_fault.c,v 1.79 2002/10/30 05:24:33 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp 35 */ 36 37 /* 38 * uvm_fault.c: fault handler 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.79 2002/10/30 05:24:33 yamt Exp $"); 43 44 #include "opt_uvmhist.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/mman.h> 52 #include <sys/user.h> 53 54 #include <uvm/uvm.h> 55 56 /* 57 * 58 * a word on page faults: 59 * 60 * types of page faults we handle: 61 * 62 * CASE 1: upper layer faults CASE 2: lower layer faults 63 * 64 * CASE 1A CASE 1B CASE 2A CASE 2B 65 * read/write1 write>1 read/write +-cow_write/zero 66 * | | | | 67 * +--|--+ +--|--+ +-----+ + | + | +-----+ 68 * amap | V | | ----------->new| | | | ^ | 69 * +-----+ +-----+ +-----+ + | + | +--|--+ 70 * | | | 71 * +-----+ +-----+ +--|--+ | +--|--+ 72 * uobj | d/c | | d/c | | V | +----| | 73 * +-----+ +-----+ +-----+ +-----+ 74 * 75 * d/c = don't care 76 * 77 * case [0]: layerless fault 78 * no amap or uobj is present. this is an error. 79 * 80 * case [1]: upper layer fault [anon active] 81 * 1A: [read] or [write with anon->an_ref == 1] 82 * I/O takes place in top level anon and uobj is not touched. 83 * 1B: [write with anon->an_ref > 1] 84 * new anon is alloc'd and data is copied off ["COW"] 85 * 86 * case [2]: lower layer fault [uobj] 87 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 88 * I/O takes place directly in object. 89 * 2B: [write to copy_on_write] or [read on NULL uobj] 90 * data is "promoted" from uobj to a new anon. 91 * if uobj is null, then we zero fill. 92 * 93 * we follow the standard UVM locking protocol ordering: 94 * 95 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) 96 * we hold a PG_BUSY page if we unlock for I/O 97 * 98 * 99 * the code is structured as follows: 100 * 101 * - init the "IN" params in the ufi structure 102 * ReFault: 103 * - do lookups [locks maps], check protection, handle needs_copy 104 * - check for case 0 fault (error) 105 * - establish "range" of fault 106 * - if we have an amap lock it and extract the anons 107 * - if sequential advice deactivate pages behind us 108 * - at the same time check pmap for unmapped areas and anon for pages 109 * that we could map in (and do map it if found) 110 * - check object for resident pages that we could map in 111 * - if (case 2) goto Case2 112 * - >>> handle case 1 113 * - ensure source anon is resident in RAM 114 * - if case 1B alloc new anon and copy from source 115 * - map the correct page in 116 * Case2: 117 * - >>> handle case 2 118 * - ensure source page is resident (if uobj) 119 * - if case 2B alloc new anon and copy from source (could be zero 120 * fill if uobj == NULL) 121 * - map the correct page in 122 * - done! 123 * 124 * note on paging: 125 * if we have to do I/O we place a PG_BUSY page in the correct object, 126 * unlock everything, and do the I/O. when I/O is done we must reverify 127 * the state of the world before assuming that our data structures are 128 * valid. [because mappings could change while the map is unlocked] 129 * 130 * alternative 1: unbusy the page in question and restart the page fault 131 * from the top (ReFault). this is easy but does not take advantage 132 * of the information that we already have from our previous lookup, 133 * although it is possible that the "hints" in the vm_map will help here. 134 * 135 * alternative 2: the system already keeps track of a "version" number of 136 * a map. [i.e. every time you write-lock a map (e.g. to change a 137 * mapping) you bump the version number up by one...] so, we can save 138 * the version number of the map before we release the lock and start I/O. 139 * then when I/O is done we can relock and check the version numbers 140 * to see if anything changed. this might save us some over 1 because 141 * we don't have to unbusy the page and may be less compares(?). 142 * 143 * alternative 3: put in backpointers or a way to "hold" part of a map 144 * in place while I/O is in progress. this could be complex to 145 * implement (especially with structures like amap that can be referenced 146 * by multiple map entries, and figuring out what should wait could be 147 * complex as well...). 148 * 149 * given that we are not currently multiprocessor or multithreaded we might 150 * as well choose alternative 2 now. maybe alternative 3 would be useful 151 * in the future. XXX keep in mind for future consideration//rechecking. 152 */ 153 154 /* 155 * local data structures 156 */ 157 158 struct uvm_advice { 159 int advice; 160 int nback; 161 int nforw; 162 }; 163 164 /* 165 * page range array: 166 * note: index in array must match "advice" value 167 * XXX: borrowed numbers from freebsd. do they work well for us? 168 */ 169 170 static struct uvm_advice uvmadvice[] = { 171 { MADV_NORMAL, 3, 4 }, 172 { MADV_RANDOM, 0, 0 }, 173 { MADV_SEQUENTIAL, 8, 7}, 174 }; 175 176 #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */ 177 178 /* 179 * private prototypes 180 */ 181 182 static void uvmfault_amapcopy __P((struct uvm_faultinfo *)); 183 static __inline void uvmfault_anonflush __P((struct vm_anon **, int)); 184 185 /* 186 * inline functions 187 */ 188 189 /* 190 * uvmfault_anonflush: try and deactivate pages in specified anons 191 * 192 * => does not have to deactivate page if it is busy 193 */ 194 195 static __inline void 196 uvmfault_anonflush(anons, n) 197 struct vm_anon **anons; 198 int n; 199 { 200 int lcv; 201 struct vm_page *pg; 202 203 for (lcv = 0 ; lcv < n ; lcv++) { 204 if (anons[lcv] == NULL) 205 continue; 206 simple_lock(&anons[lcv]->an_lock); 207 pg = anons[lcv]->u.an_page; 208 if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) { 209 uvm_lock_pageq(); 210 if (pg->wire_count == 0) { 211 pmap_clear_reference(pg); 212 uvm_pagedeactivate(pg); 213 } 214 uvm_unlock_pageq(); 215 } 216 simple_unlock(&anons[lcv]->an_lock); 217 } 218 } 219 220 /* 221 * normal functions 222 */ 223 224 /* 225 * uvmfault_amapcopy: clear "needs_copy" in a map. 226 * 227 * => called with VM data structures unlocked (usually, see below) 228 * => we get a write lock on the maps and clear needs_copy for a VA 229 * => if we are out of RAM we sleep (waiting for more) 230 */ 231 232 static void 233 uvmfault_amapcopy(ufi) 234 struct uvm_faultinfo *ufi; 235 { 236 for (;;) { 237 238 /* 239 * no mapping? give up. 240 */ 241 242 if (uvmfault_lookup(ufi, TRUE) == FALSE) 243 return; 244 245 /* 246 * copy if needed. 247 */ 248 249 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) 250 amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE, 251 ufi->orig_rvaddr, ufi->orig_rvaddr + 1); 252 253 /* 254 * didn't work? must be out of RAM. unlock and sleep. 255 */ 256 257 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) { 258 uvmfault_unlockmaps(ufi, TRUE); 259 uvm_wait("fltamapcopy"); 260 continue; 261 } 262 263 /* 264 * got it! unlock and return. 265 */ 266 267 uvmfault_unlockmaps(ufi, TRUE); 268 return; 269 } 270 /*NOTREACHED*/ 271 } 272 273 /* 274 * uvmfault_anonget: get data in an anon into a non-busy, non-released 275 * page in that anon. 276 * 277 * => maps, amap, and anon locked by caller. 278 * => if we fail (result != 0) we unlock everything. 279 * => if we are successful, we return with everything still locked. 280 * => we don't move the page on the queues [gets moved later] 281 * => if we allocate a new page [we_own], it gets put on the queues. 282 * either way, the result is that the page is on the queues at return time 283 * => for pages which are on loan from a uvm_object (and thus are not 284 * owned by the anon): if successful, we return with the owning object 285 * locked. the caller must unlock this object when it unlocks everything 286 * else. 287 */ 288 289 int 290 uvmfault_anonget(ufi, amap, anon) 291 struct uvm_faultinfo *ufi; 292 struct vm_amap *amap; 293 struct vm_anon *anon; 294 { 295 boolean_t we_own; /* we own anon's page? */ 296 boolean_t locked; /* did we relock? */ 297 struct vm_page *pg; 298 int error; 299 UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist); 300 301 LOCK_ASSERT(simple_lock_held(&anon->an_lock)); 302 303 error = 0; 304 uvmexp.fltanget++; 305 /* bump rusage counters */ 306 if (anon->u.an_page) 307 curproc->p_addr->u_stats.p_ru.ru_minflt++; 308 else 309 curproc->p_addr->u_stats.p_ru.ru_majflt++; 310 311 /* 312 * loop until we get it, or fail. 313 */ 314 315 for (;;) { 316 we_own = FALSE; /* TRUE if we set PG_BUSY on a page */ 317 pg = anon->u.an_page; 318 319 /* 320 * if there is a resident page and it is loaned, then anon 321 * may not own it. call out to uvm_anon_lockpage() to ensure 322 * the real owner of the page has been identified and locked. 323 */ 324 325 if (pg && pg->loan_count) 326 pg = uvm_anon_lockloanpg(anon); 327 328 /* 329 * page there? make sure it is not busy/released. 330 */ 331 332 if (pg) { 333 334 /* 335 * at this point, if the page has a uobject [meaning 336 * we have it on loan], then that uobject is locked 337 * by us! if the page is busy, we drop all the 338 * locks (including uobject) and try again. 339 */ 340 341 if ((pg->flags & PG_BUSY) == 0) { 342 UVMHIST_LOG(maphist, "<- OK",0,0,0,0); 343 return (0); 344 } 345 pg->flags |= PG_WANTED; 346 uvmexp.fltpgwait++; 347 348 /* 349 * the last unlock must be an atomic unlock+wait on 350 * the owner of page 351 */ 352 353 if (pg->uobject) { /* owner is uobject ? */ 354 uvmfault_unlockall(ufi, amap, NULL, anon); 355 UVMHIST_LOG(maphist, " unlock+wait on uobj",0, 356 0,0,0); 357 UVM_UNLOCK_AND_WAIT(pg, 358 &pg->uobject->vmobjlock, 359 FALSE, "anonget1",0); 360 } else { 361 /* anon owns page */ 362 uvmfault_unlockall(ufi, amap, NULL, NULL); 363 UVMHIST_LOG(maphist, " unlock+wait on anon",0, 364 0,0,0); 365 UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0, 366 "anonget2",0); 367 } 368 } else { 369 370 /* 371 * no page, we must try and bring it in. 372 */ 373 374 pg = uvm_pagealloc(NULL, 0, anon, 0); 375 if (pg == NULL) { /* out of RAM. */ 376 uvmfault_unlockall(ufi, amap, NULL, anon); 377 uvmexp.fltnoram++; 378 UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0, 379 0,0,0); 380 uvm_wait("flt_noram1"); 381 } else { 382 /* we set the PG_BUSY bit */ 383 we_own = TRUE; 384 uvmfault_unlockall(ufi, amap, NULL, anon); 385 386 /* 387 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN 388 * page into the uvm_swap_get function with 389 * all data structures unlocked. note that 390 * it is ok to read an_swslot here because 391 * we hold PG_BUSY on the page. 392 */ 393 uvmexp.pageins++; 394 error = uvm_swap_get(pg, anon->an_swslot, 395 PGO_SYNCIO); 396 397 /* 398 * we clean up after the i/o below in the 399 * "we_own" case 400 */ 401 } 402 } 403 404 /* 405 * now relock and try again 406 */ 407 408 locked = uvmfault_relock(ufi); 409 if (locked && amap != NULL) { 410 amap_lock(amap); 411 } 412 if (locked || we_own) 413 simple_lock(&anon->an_lock); 414 415 /* 416 * if we own the page (i.e. we set PG_BUSY), then we need 417 * to clean up after the I/O. there are three cases to 418 * consider: 419 * [1] page released during I/O: free anon and ReFault. 420 * [2] I/O not OK. free the page and cause the fault 421 * to fail. 422 * [3] I/O OK! activate the page and sync with the 423 * non-we_own case (i.e. drop anon lock if not locked). 424 */ 425 426 if (we_own) { 427 if (pg->flags & PG_WANTED) { 428 wakeup(pg); 429 } 430 if (error) { 431 /* remove page from anon */ 432 anon->u.an_page = NULL; 433 434 /* 435 * remove the swap slot from the anon 436 * and mark the anon as having no real slot. 437 * don't free the swap slot, thus preventing 438 * it from being used again. 439 */ 440 441 uvm_swap_markbad(anon->an_swslot, 1); 442 anon->an_swslot = SWSLOT_BAD; 443 444 /* 445 * note: page was never !PG_BUSY, so it 446 * can't be mapped and thus no need to 447 * pmap_page_protect it... 448 */ 449 450 uvm_lock_pageq(); 451 uvm_pagefree(pg); 452 uvm_unlock_pageq(); 453 454 if (locked) 455 uvmfault_unlockall(ufi, amap, NULL, 456 anon); 457 else 458 simple_unlock(&anon->an_lock); 459 UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0); 460 return error; 461 } 462 463 /* 464 * we've successfully read the page, activate it. 465 */ 466 467 uvm_lock_pageq(); 468 uvm_pageactivate(pg); 469 uvm_unlock_pageq(); 470 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 471 UVM_PAGE_OWN(pg, NULL); 472 if (!locked) 473 simple_unlock(&anon->an_lock); 474 } 475 476 /* 477 * we were not able to relock. restart fault. 478 */ 479 480 if (!locked) { 481 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); 482 return (ERESTART); 483 } 484 485 /* 486 * verify no one has touched the amap and moved the anon on us. 487 */ 488 489 if (ufi != NULL && 490 amap_lookup(&ufi->entry->aref, 491 ufi->orig_rvaddr - ufi->entry->start) != anon) { 492 493 uvmfault_unlockall(ufi, amap, NULL, anon); 494 UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0); 495 return (ERESTART); 496 } 497 498 /* 499 * try it again! 500 */ 501 502 uvmexp.fltanretry++; 503 continue; 504 } 505 /*NOTREACHED*/ 506 } 507 508 /* 509 * F A U L T - m a i n e n t r y p o i n t 510 */ 511 512 /* 513 * uvm_fault: page fault handler 514 * 515 * => called from MD code to resolve a page fault 516 * => VM data structures usually should be unlocked. however, it is 517 * possible to call here with the main map locked if the caller 518 * gets a write lock, sets it recusive, and then calls us (c.f. 519 * uvm_map_pageable). this should be avoided because it keeps 520 * the map locked off during I/O. 521 * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT 522 */ 523 524 #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \ 525 ~VM_PROT_WRITE : VM_PROT_ALL) 526 527 int 528 uvm_fault(orig_map, vaddr, fault_type, access_type) 529 struct vm_map *orig_map; 530 vaddr_t vaddr; 531 vm_fault_t fault_type; 532 vm_prot_t access_type; 533 { 534 struct uvm_faultinfo ufi; 535 vm_prot_t enter_prot, check_prot; 536 boolean_t wired, narrow, promote, locked, shadowed, wire_fault, cow_now; 537 int npages, nback, nforw, centeridx, error, lcv, gotpages; 538 vaddr_t startva, objaddr, currva, offset; 539 voff_t uoff; 540 paddr_t pa; 541 struct vm_amap *amap; 542 struct uvm_object *uobj; 543 struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon; 544 struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage; 545 UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist); 546 547 UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, ft=%d, at=%d)", 548 orig_map, vaddr, fault_type, access_type); 549 550 anon = NULL; 551 pg = NULL; 552 553 uvmexp.faults++; /* XXX: locking? */ 554 555 /* 556 * init the IN parameters in the ufi 557 */ 558 559 ufi.orig_map = orig_map; 560 ufi.orig_rvaddr = trunc_page(vaddr); 561 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */ 562 wire_fault = fault_type == VM_FAULT_WIRE || 563 fault_type == VM_FAULT_WIREMAX; 564 if (wire_fault) 565 narrow = TRUE; /* don't look for neighborhood 566 * pages on wire */ 567 else 568 narrow = FALSE; /* normal fault */ 569 570 /* 571 * "goto ReFault" means restart the page fault from ground zero. 572 */ 573 ReFault: 574 575 /* 576 * lookup and lock the maps 577 */ 578 579 if (uvmfault_lookup(&ufi, FALSE) == FALSE) { 580 UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0); 581 return (EFAULT); 582 } 583 /* locked: maps(read) */ 584 585 #ifdef DIAGNOSTIC 586 if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) { 587 printf("Page fault on non-pageable map:\n"); 588 printf("ufi.map = %p\n", ufi.map); 589 printf("ufi.orig_map = %p\n", ufi.orig_map); 590 printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr); 591 panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0"); 592 } 593 #endif 594 595 /* 596 * check protection 597 */ 598 599 check_prot = fault_type == VM_FAULT_WIREMAX ? 600 ufi.entry->max_protection : ufi.entry->protection; 601 if ((check_prot & access_type) != access_type) { 602 UVMHIST_LOG(maphist, 603 "<- protection failure (prot=0x%x, access=0x%x)", 604 ufi.entry->protection, access_type, 0, 0); 605 uvmfault_unlockmaps(&ufi, FALSE); 606 return EACCES; 607 } 608 609 /* 610 * "enter_prot" is the protection we want to enter the page in at. 611 * for certain pages (e.g. copy-on-write pages) this protection can 612 * be more strict than ufi.entry->protection. "wired" means either 613 * the entry is wired or we are fault-wiring the pg. 614 */ 615 616 enter_prot = ufi.entry->protection; 617 wired = VM_MAPENT_ISWIRED(ufi.entry) || wire_fault; 618 if (wired) { 619 access_type = enter_prot; /* full access for wired */ 620 cow_now = (check_prot & VM_PROT_WRITE) != 0; 621 } else { 622 cow_now = (access_type & VM_PROT_WRITE) != 0; 623 } 624 625 /* 626 * handle "needs_copy" case. if we need to copy the amap we will 627 * have to drop our readlock and relock it with a write lock. (we 628 * need a write lock to change anything in a map entry [e.g. 629 * needs_copy]). 630 */ 631 632 if (UVM_ET_ISNEEDSCOPY(ufi.entry)) { 633 KASSERT(fault_type != VM_FAULT_WIREMAX); 634 if (cow_now || (ufi.entry->object.uvm_obj == NULL)) { 635 /* need to clear */ 636 UVMHIST_LOG(maphist, 637 " need to clear needs_copy and refault",0,0,0,0); 638 uvmfault_unlockmaps(&ufi, FALSE); 639 uvmfault_amapcopy(&ufi); 640 uvmexp.fltamcopy++; 641 goto ReFault; 642 643 } else { 644 645 /* 646 * ensure that we pmap_enter page R/O since 647 * needs_copy is still true 648 */ 649 650 enter_prot &= ~VM_PROT_WRITE; 651 } 652 } 653 654 /* 655 * identify the players 656 */ 657 658 amap = ufi.entry->aref.ar_amap; /* top layer */ 659 uobj = ufi.entry->object.uvm_obj; /* bottom layer */ 660 661 /* 662 * check for a case 0 fault. if nothing backing the entry then 663 * error now. 664 */ 665 666 if (amap == NULL && uobj == NULL) { 667 uvmfault_unlockmaps(&ufi, FALSE); 668 UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0); 669 return (EFAULT); 670 } 671 672 /* 673 * establish range of interest based on advice from mapper 674 * and then clip to fit map entry. note that we only want 675 * to do this the first time through the fault. if we 676 * ReFault we will disable this by setting "narrow" to true. 677 */ 678 679 if (narrow == FALSE) { 680 681 /* wide fault (!narrow) */ 682 KASSERT(uvmadvice[ufi.entry->advice].advice == 683 ufi.entry->advice); 684 nback = MIN(uvmadvice[ufi.entry->advice].nback, 685 (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT); 686 startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT); 687 nforw = MIN(uvmadvice[ufi.entry->advice].nforw, 688 ((ufi.entry->end - ufi.orig_rvaddr) >> 689 PAGE_SHIFT) - 1); 690 /* 691 * note: "-1" because we don't want to count the 692 * faulting page as forw 693 */ 694 npages = nback + nforw + 1; 695 centeridx = nback; 696 697 narrow = TRUE; /* ensure only once per-fault */ 698 699 } else { 700 701 /* narrow fault! */ 702 nback = nforw = 0; 703 startva = ufi.orig_rvaddr; 704 npages = 1; 705 centeridx = 0; 706 707 } 708 709 /* locked: maps(read) */ 710 UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x", 711 narrow, nback, nforw, startva); 712 UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi.entry, 713 amap, uobj, 0); 714 715 /* 716 * if we've got an amap, lock it and extract current anons. 717 */ 718 719 if (amap) { 720 amap_lock(amap); 721 anons = anons_store; 722 amap_lookups(&ufi.entry->aref, startva - ufi.entry->start, 723 anons, npages); 724 } else { 725 anons = NULL; /* to be safe */ 726 } 727 728 /* locked: maps(read), amap(if there) */ 729 730 /* 731 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages 732 * now and then forget about them (for the rest of the fault). 733 */ 734 735 if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) { 736 737 UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages", 738 0,0,0,0); 739 /* flush back-page anons? */ 740 if (amap) 741 uvmfault_anonflush(anons, nback); 742 743 /* flush object? */ 744 if (uobj) { 745 objaddr = 746 (startva - ufi.entry->start) + ufi.entry->offset; 747 simple_lock(&uobj->vmobjlock); 748 (void) (uobj->pgops->pgo_put)(uobj, objaddr, objaddr + 749 (nback << PAGE_SHIFT), PGO_DEACTIVATE); 750 } 751 752 /* now forget about the backpages */ 753 if (amap) 754 anons += nback; 755 startva += (nback << PAGE_SHIFT); 756 npages -= nback; 757 nback = centeridx = 0; 758 } 759 760 /* locked: maps(read), amap(if there) */ 761 762 /* 763 * map in the backpages and frontpages we found in the amap in hopes 764 * of preventing future faults. we also init the pages[] array as 765 * we go. 766 */ 767 768 currva = startva; 769 shadowed = FALSE; 770 for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) { 771 772 /* 773 * dont play with VAs that are already mapped 774 * except for center) 775 */ 776 if (lcv != centeridx && 777 pmap_extract(ufi.orig_map->pmap, currva, &pa)) { 778 pages[lcv] = PGO_DONTCARE; 779 continue; 780 } 781 782 /* 783 * unmapped or center page. check if any anon at this level. 784 */ 785 if (amap == NULL || anons[lcv] == NULL) { 786 pages[lcv] = NULL; 787 continue; 788 } 789 790 /* 791 * check for present page and map if possible. re-activate it. 792 */ 793 794 pages[lcv] = PGO_DONTCARE; 795 if (lcv == centeridx) { /* save center for later! */ 796 shadowed = TRUE; 797 continue; 798 } 799 anon = anons[lcv]; 800 simple_lock(&anon->an_lock); 801 /* ignore loaned pages */ 802 if (anon->u.an_page && anon->u.an_page->loan_count == 0 && 803 (anon->u.an_page->flags & PG_BUSY) == 0) { 804 uvm_lock_pageq(); 805 uvm_pageactivate(anon->u.an_page); 806 uvm_unlock_pageq(); 807 UVMHIST_LOG(maphist, 808 " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x", 809 ufi.orig_map->pmap, currva, anon->u.an_page, 0); 810 uvmexp.fltnamap++; 811 812 /* 813 * Since this isn't the page that's actually faulting, 814 * ignore pmap_enter() failures; it's not critical 815 * that we enter these right now. 816 */ 817 818 (void) pmap_enter(ufi.orig_map->pmap, currva, 819 VM_PAGE_TO_PHYS(anon->u.an_page), 820 (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) : 821 enter_prot, 822 PMAP_CANFAIL | 823 (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0)); 824 } 825 simple_unlock(&anon->an_lock); 826 pmap_update(ufi.orig_map->pmap); 827 } 828 829 /* locked: maps(read), amap(if there) */ 830 /* (shadowed == TRUE) if there is an anon at the faulting address */ 831 UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed, 832 (uobj && shadowed == FALSE),0,0); 833 834 /* 835 * note that if we are really short of RAM we could sleep in the above 836 * call to pmap_enter with everything locked. bad? 837 * 838 * XXX Actually, that is bad; pmap_enter() should just fail in that 839 * XXX case. --thorpej 840 */ 841 842 /* 843 * if the desired page is not shadowed by the amap and we have a 844 * backing object, then we check to see if the backing object would 845 * prefer to handle the fault itself (rather than letting us do it 846 * with the usual pgo_get hook). the backing object signals this by 847 * providing a pgo_fault routine. 848 */ 849 850 if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) { 851 simple_lock(&uobj->vmobjlock); 852 853 /* locked: maps(read), amap (if there), uobj */ 854 error = uobj->pgops->pgo_fault(&ufi, startva, pages, npages, 855 centeridx, fault_type, access_type, PGO_LOCKED|PGO_SYNCIO); 856 857 /* locked: nothing, pgo_fault has unlocked everything */ 858 859 if (error == ERESTART) 860 goto ReFault; /* try again! */ 861 /* 862 * object fault routine responsible for pmap_update(). 863 */ 864 return error; 865 } 866 867 /* 868 * now, if the desired page is not shadowed by the amap and we have 869 * a backing object that does not have a special fault routine, then 870 * we ask (with pgo_get) the object for resident pages that we care 871 * about and attempt to map them in. we do not let pgo_get block 872 * (PGO_LOCKED). 873 */ 874 875 if (uobj && shadowed == FALSE) { 876 simple_lock(&uobj->vmobjlock); 877 878 /* locked (!shadowed): maps(read), amap (if there), uobj */ 879 /* 880 * the following call to pgo_get does _not_ change locking state 881 */ 882 883 uvmexp.fltlget++; 884 gotpages = npages; 885 (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset + 886 (startva - ufi.entry->start), 887 pages, &gotpages, centeridx, 888 access_type & MASK(ufi.entry), 889 ufi.entry->advice, PGO_LOCKED); 890 891 /* 892 * check for pages to map, if we got any 893 */ 894 895 uobjpage = NULL; 896 897 if (gotpages) { 898 currva = startva; 899 for (lcv = 0; lcv < npages; 900 lcv++, currva += PAGE_SIZE) { 901 if (pages[lcv] == NULL || 902 pages[lcv] == PGO_DONTCARE) { 903 continue; 904 } 905 906 /* 907 * if center page is resident and not 908 * PG_BUSY|PG_RELEASED then pgo_get 909 * made it PG_BUSY for us and gave 910 * us a handle to it. remember this 911 * page as "uobjpage." (for later use). 912 */ 913 914 if (lcv == centeridx) { 915 uobjpage = pages[lcv]; 916 UVMHIST_LOG(maphist, " got uobjpage " 917 "(0x%x) with locked get", 918 uobjpage, 0,0,0); 919 continue; 920 } 921 922 /* 923 * calling pgo_get with PGO_LOCKED returns us 924 * pages which are neither busy nor released, 925 * so we don't need to check for this. 926 * we can just directly enter the pages. 927 */ 928 929 uvm_lock_pageq(); 930 uvm_pageactivate(pages[lcv]); 931 uvm_unlock_pageq(); 932 UVMHIST_LOG(maphist, 933 " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x", 934 ufi.orig_map->pmap, currva, pages[lcv], 0); 935 uvmexp.fltnomap++; 936 937 /* 938 * Since this page isn't the page that's 939 * actually fauling, ignore pmap_enter() 940 * failures; it's not critical that we 941 * enter these right now. 942 */ 943 944 (void) pmap_enter(ufi.orig_map->pmap, currva, 945 VM_PAGE_TO_PHYS(pages[lcv]), 946 pages[lcv]->flags & PG_RDONLY ? 947 enter_prot & ~VM_PROT_WRITE : 948 enter_prot & MASK(ufi.entry), 949 PMAP_CANFAIL | 950 (wired ? PMAP_WIRED : 0)); 951 952 /* 953 * NOTE: page can't be PG_WANTED or PG_RELEASED 954 * because we've held the lock the whole time 955 * we've had the handle. 956 */ 957 958 pages[lcv]->flags &= ~(PG_BUSY); 959 UVM_PAGE_OWN(pages[lcv], NULL); 960 } 961 pmap_update(ufi.orig_map->pmap); 962 } 963 } else { 964 uobjpage = NULL; 965 } 966 967 /* locked (shadowed): maps(read), amap */ 968 /* locked (!shadowed): maps(read), amap(if there), 969 uobj(if !null), uobjpage(if !null) */ 970 971 /* 972 * note that at this point we are done with any front or back pages. 973 * we are now going to focus on the center page (i.e. the one we've 974 * faulted on). if we have faulted on the top (anon) layer 975 * [i.e. case 1], then the anon we want is anons[centeridx] (we have 976 * not touched it yet). if we have faulted on the bottom (uobj) 977 * layer [i.e. case 2] and the page was both present and available, 978 * then we've got a pointer to it as "uobjpage" and we've already 979 * made it BUSY. 980 */ 981 982 /* 983 * there are four possible cases we must address: 1A, 1B, 2A, and 2B 984 */ 985 986 /* 987 * redirect case 2: if we are not shadowed, go to case 2. 988 */ 989 990 if (shadowed == FALSE) 991 goto Case2; 992 993 /* locked: maps(read), amap */ 994 995 /* 996 * handle case 1: fault on an anon in our amap 997 */ 998 999 anon = anons[centeridx]; 1000 UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0); 1001 simple_lock(&anon->an_lock); 1002 1003 /* locked: maps(read), amap, anon */ 1004 1005 /* 1006 * no matter if we have case 1A or case 1B we are going to need to 1007 * have the anon's memory resident. ensure that now. 1008 */ 1009 1010 /* 1011 * let uvmfault_anonget do the dirty work. 1012 * if it fails (!OK) it will unlock everything for us. 1013 * if it succeeds, locks are still valid and locked. 1014 * also, if it is OK, then the anon's page is on the queues. 1015 * if the page is on loan from a uvm_object, then anonget will 1016 * lock that object for us if it does not fail. 1017 */ 1018 1019 error = uvmfault_anonget(&ufi, amap, anon); 1020 switch (error) { 1021 case 0: 1022 break; 1023 1024 case ERESTART: 1025 goto ReFault; 1026 1027 case EAGAIN: 1028 tsleep(&lbolt, PVM, "fltagain1", 0); 1029 goto ReFault; 1030 1031 default: 1032 return error; 1033 } 1034 1035 /* 1036 * uobj is non null if the page is on loan from an object (i.e. uobj) 1037 */ 1038 1039 uobj = anon->u.an_page->uobject; /* locked by anonget if !NULL */ 1040 1041 /* locked: maps(read), amap, anon, uobj(if one) */ 1042 1043 /* 1044 * special handling for loaned pages 1045 */ 1046 1047 if (anon->u.an_page->loan_count) { 1048 1049 if (!cow_now) { 1050 1051 /* 1052 * for read faults on loaned pages we just cap the 1053 * protection at read-only. 1054 */ 1055 1056 enter_prot = enter_prot & ~VM_PROT_WRITE; 1057 1058 } else { 1059 /* 1060 * note that we can't allow writes into a loaned page! 1061 * 1062 * if we have a write fault on a loaned page in an 1063 * anon then we need to look at the anon's ref count. 1064 * if it is greater than one then we are going to do 1065 * a normal copy-on-write fault into a new anon (this 1066 * is not a problem). however, if the reference count 1067 * is one (a case where we would normally allow a 1068 * write directly to the page) then we need to kill 1069 * the loan before we continue. 1070 */ 1071 1072 /* >1 case is already ok */ 1073 if (anon->an_ref == 1) { 1074 1075 /* get new un-owned replacement page */ 1076 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1077 if (pg == NULL) { 1078 uvmfault_unlockall(&ufi, amap, uobj, 1079 anon); 1080 uvm_wait("flt_noram2"); 1081 goto ReFault; 1082 } 1083 1084 /* 1085 * copy data, kill loan, and drop uobj lock 1086 * (if any) 1087 */ 1088 /* copy old -> new */ 1089 uvm_pagecopy(anon->u.an_page, pg); 1090 1091 /* force reload */ 1092 pmap_page_protect(anon->u.an_page, 1093 VM_PROT_NONE); 1094 uvm_lock_pageq(); /* KILL loan */ 1095 1096 anon->u.an_page->uanon = NULL; 1097 /* in case we owned */ 1098 anon->u.an_page->pqflags &= ~PQ_ANON; 1099 1100 if (uobj) { 1101 /* if we were receiver of loan */ 1102 anon->u.an_page->loan_count--; 1103 } else { 1104 /* 1105 * we were the lender (A->K); need 1106 * to remove the page from pageq's. 1107 */ 1108 uvm_pagedequeue(anon->u.an_page); 1109 } 1110 1111 uvm_pageactivate(pg); 1112 uvm_unlock_pageq(); 1113 if (uobj) { 1114 simple_unlock(&uobj->vmobjlock); 1115 uobj = NULL; 1116 } 1117 1118 /* install new page in anon */ 1119 anon->u.an_page = pg; 1120 pg->uanon = anon; 1121 pg->pqflags |= PQ_ANON; 1122 pg->flags &= ~(PG_BUSY|PG_FAKE); 1123 UVM_PAGE_OWN(pg, NULL); 1124 1125 /* done! */ 1126 } /* ref == 1 */ 1127 } /* write fault */ 1128 } /* loan count */ 1129 1130 /* 1131 * if we are case 1B then we will need to allocate a new blank 1132 * anon to transfer the data into. note that we have a lock 1133 * on anon, so no one can busy or release the page until we are done. 1134 * also note that the ref count can't drop to zero here because 1135 * it is > 1 and we are only dropping one ref. 1136 * 1137 * in the (hopefully very rare) case that we are out of RAM we 1138 * will unlock, wait for more RAM, and refault. 1139 * 1140 * if we are out of anon VM we kill the process (XXX: could wait?). 1141 */ 1142 1143 if (cow_now && anon->an_ref > 1) { 1144 1145 UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0); 1146 uvmexp.flt_acow++; 1147 oanon = anon; /* oanon = old, locked anon */ 1148 anon = uvm_analloc(); 1149 if (anon) { 1150 /* new anon is locked! */ 1151 pg = uvm_pagealloc(NULL, 0, anon, 0); 1152 } 1153 1154 /* check for out of RAM */ 1155 if (anon == NULL || pg == NULL) { 1156 if (anon) { 1157 anon->an_ref--; 1158 simple_unlock(&anon->an_lock); 1159 uvm_anfree(anon); 1160 } 1161 uvmfault_unlockall(&ufi, amap, uobj, oanon); 1162 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1163 if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { 1164 UVMHIST_LOG(maphist, 1165 "<- failed. out of VM",0,0,0,0); 1166 uvmexp.fltnoanon++; 1167 return ENOMEM; 1168 } 1169 1170 uvmexp.fltnoram++; 1171 uvm_wait("flt_noram3"); /* out of RAM, wait for more */ 1172 goto ReFault; 1173 } 1174 1175 /* got all resources, replace anon with nanon */ 1176 uvm_pagecopy(oanon->u.an_page, pg); 1177 uvm_pageactivate(pg); 1178 pg->flags &= ~(PG_BUSY|PG_FAKE); 1179 UVM_PAGE_OWN(pg, NULL); 1180 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start, 1181 anon, 1); 1182 1183 /* deref: can not drop to zero here by defn! */ 1184 oanon->an_ref--; 1185 1186 /* 1187 * note: oanon is still locked, as is the new anon. we 1188 * need to check for this later when we unlock oanon; if 1189 * oanon != anon, we'll have to unlock anon, too. 1190 */ 1191 1192 } else { 1193 1194 uvmexp.flt_anon++; 1195 oanon = anon; /* old, locked anon is same as anon */ 1196 pg = anon->u.an_page; 1197 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ 1198 enter_prot = enter_prot & ~VM_PROT_WRITE; 1199 1200 } 1201 1202 /* locked: maps(read), amap, oanon, anon (if different from oanon) */ 1203 1204 /* 1205 * now map the page in. 1206 */ 1207 1208 UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x", 1209 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0); 1210 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), 1211 enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) 1212 != 0) { 1213 1214 /* 1215 * No need to undo what we did; we can simply think of 1216 * this as the pmap throwing away the mapping information. 1217 * 1218 * We do, however, have to go through the ReFault path, 1219 * as the map may change while we're asleep. 1220 */ 1221 1222 if (anon != oanon) 1223 simple_unlock(&anon->an_lock); 1224 uvmfault_unlockall(&ufi, amap, uobj, oanon); 1225 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1226 if (uvmexp.swpgonly == uvmexp.swpages) { 1227 UVMHIST_LOG(maphist, 1228 "<- failed. out of VM",0,0,0,0); 1229 /* XXX instrumentation */ 1230 return ENOMEM; 1231 } 1232 /* XXX instrumentation */ 1233 uvm_wait("flt_pmfail1"); 1234 goto ReFault; 1235 } 1236 1237 /* 1238 * ... update the page queues. 1239 */ 1240 1241 uvm_lock_pageq(); 1242 if (wire_fault) { 1243 uvm_pagewire(pg); 1244 1245 /* 1246 * since the now-wired page cannot be paged out, 1247 * release its swap resources for others to use. 1248 * since an anon with no swap cannot be PG_CLEAN, 1249 * clear its clean flag now. 1250 */ 1251 1252 pg->flags &= ~(PG_CLEAN); 1253 uvm_anon_dropswap(anon); 1254 } else { 1255 uvm_pageactivate(pg); 1256 } 1257 uvm_unlock_pageq(); 1258 1259 /* 1260 * done case 1! finish up by unlocking everything and returning success 1261 */ 1262 1263 if (anon != oanon) 1264 simple_unlock(&anon->an_lock); 1265 uvmfault_unlockall(&ufi, amap, uobj, oanon); 1266 pmap_update(ufi.orig_map->pmap); 1267 return 0; 1268 1269 Case2: 1270 /* 1271 * handle case 2: faulting on backing object or zero fill 1272 */ 1273 1274 /* 1275 * locked: 1276 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null) 1277 */ 1278 1279 /* 1280 * note that uobjpage can not be PGO_DONTCARE at this point. we now 1281 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we 1282 * have a backing object, check and see if we are going to promote 1283 * the data up to an anon during the fault. 1284 */ 1285 1286 if (uobj == NULL) { 1287 uobjpage = PGO_DONTCARE; 1288 promote = TRUE; /* always need anon here */ 1289 } else { 1290 KASSERT(uobjpage != PGO_DONTCARE); 1291 promote = cow_now && UVM_ET_ISCOPYONWRITE(ufi.entry); 1292 } 1293 UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d", 1294 promote, (uobj == NULL), 0,0); 1295 1296 /* 1297 * if uobjpage is not null then we do not need to do I/O to get the 1298 * uobjpage. 1299 * 1300 * if uobjpage is null, then we need to unlock and ask the pager to 1301 * get the data for us. once we have the data, we need to reverify 1302 * the state the world. we are currently not holding any resources. 1303 */ 1304 1305 if (uobjpage) { 1306 /* update rusage counters */ 1307 curproc->p_addr->u_stats.p_ru.ru_minflt++; 1308 } else { 1309 /* update rusage counters */ 1310 curproc->p_addr->u_stats.p_ru.ru_majflt++; 1311 1312 /* locked: maps(read), amap(if there), uobj */ 1313 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1314 /* locked: uobj */ 1315 1316 uvmexp.fltget++; 1317 gotpages = 1; 1318 uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset; 1319 error = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, 1320 0, access_type & MASK(ufi.entry), ufi.entry->advice, 1321 PGO_SYNCIO); 1322 /* locked: uobjpage(if no error) */ 1323 1324 /* 1325 * recover from I/O 1326 */ 1327 1328 if (error) { 1329 if (error == EAGAIN) { 1330 UVMHIST_LOG(maphist, 1331 " pgo_get says TRY AGAIN!",0,0,0,0); 1332 tsleep(&lbolt, PVM, "fltagain2", 0); 1333 goto ReFault; 1334 } 1335 1336 UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)", 1337 error, 0,0,0); 1338 return error; 1339 } 1340 1341 /* locked: uobjpage */ 1342 1343 uvm_lock_pageq(); 1344 uvm_pageactivate(uobjpage); 1345 uvm_unlock_pageq(); 1346 1347 /* 1348 * re-verify the state of the world by first trying to relock 1349 * the maps. always relock the object. 1350 */ 1351 1352 locked = uvmfault_relock(&ufi); 1353 if (locked && amap) 1354 amap_lock(amap); 1355 simple_lock(&uobj->vmobjlock); 1356 1357 /* locked(locked): maps(read), amap(if !null), uobj, uobjpage */ 1358 /* locked(!locked): uobj, uobjpage */ 1359 1360 /* 1361 * verify that the page has not be released and re-verify 1362 * that amap slot is still free. if there is a problem, 1363 * we unlock and clean up. 1364 */ 1365 1366 if ((uobjpage->flags & PG_RELEASED) != 0 || 1367 (locked && amap && 1368 amap_lookup(&ufi.entry->aref, 1369 ufi.orig_rvaddr - ufi.entry->start))) { 1370 if (locked) 1371 uvmfault_unlockall(&ufi, amap, NULL, NULL); 1372 locked = FALSE; 1373 } 1374 1375 /* 1376 * didn't get the lock? release the page and retry. 1377 */ 1378 1379 if (locked == FALSE) { 1380 UVMHIST_LOG(maphist, 1381 " wasn't able to relock after fault: retry", 1382 0,0,0,0); 1383 if (uobjpage->flags & PG_WANTED) 1384 wakeup(uobjpage); 1385 if (uobjpage->flags & PG_RELEASED) { 1386 uvmexp.fltpgrele++; 1387 uvm_pagefree(uobjpage); 1388 goto ReFault; 1389 } 1390 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 1391 UVM_PAGE_OWN(uobjpage, NULL); 1392 simple_unlock(&uobj->vmobjlock); 1393 goto ReFault; 1394 } 1395 1396 /* 1397 * we have the data in uobjpage which is busy and 1398 * not released. we are holding object lock (so the page 1399 * can't be released on us). 1400 */ 1401 1402 /* locked: maps(read), amap(if !null), uobj, uobjpage */ 1403 } 1404 1405 /* 1406 * locked: 1407 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj) 1408 */ 1409 1410 /* 1411 * notes: 1412 * - at this point uobjpage can not be NULL 1413 * - at this point uobjpage can not be PG_RELEASED (since we checked 1414 * for it above) 1415 * - at this point uobjpage could be PG_WANTED (handle later) 1416 */ 1417 1418 if (promote == FALSE) { 1419 1420 /* 1421 * we are not promoting. if the mapping is COW ensure that we 1422 * don't give more access than we should (e.g. when doing a read 1423 * fault on a COPYONWRITE mapping we want to map the COW page in 1424 * R/O even though the entry protection could be R/W). 1425 * 1426 * set "pg" to the page we want to map in (uobjpage, usually) 1427 */ 1428 1429 /* no anon in this case. */ 1430 anon = NULL; 1431 1432 uvmexp.flt_obj++; 1433 if (UVM_ET_ISCOPYONWRITE(ufi.entry)) 1434 enter_prot &= ~VM_PROT_WRITE; 1435 pg = uobjpage; /* map in the actual object */ 1436 1437 /* assert(uobjpage != PGO_DONTCARE) */ 1438 1439 /* 1440 * we are faulting directly on the page. be careful 1441 * about writing to loaned pages... 1442 */ 1443 1444 if (uobjpage->loan_count) { 1445 if (!cow_now) { 1446 /* read fault: cap the protection at readonly */ 1447 /* cap! */ 1448 enter_prot = enter_prot & ~VM_PROT_WRITE; 1449 } else { 1450 /* write fault: must break the loan here */ 1451 1452 /* alloc new un-owned page */ 1453 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1454 1455 if (pg == NULL) { 1456 1457 /* 1458 * drop ownership of page, it can't 1459 * be released 1460 */ 1461 1462 if (uobjpage->flags & PG_WANTED) 1463 wakeup(uobjpage); 1464 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 1465 UVM_PAGE_OWN(uobjpage, NULL); 1466 1467 uvmfault_unlockall(&ufi, amap, uobj, 1468 NULL); 1469 UVMHIST_LOG(maphist, 1470 " out of RAM breaking loan, waiting", 1471 0,0,0,0); 1472 uvmexp.fltnoram++; 1473 uvm_wait("flt_noram4"); 1474 goto ReFault; 1475 } 1476 1477 /* 1478 * copy the data from the old page to the new 1479 * one and clear the fake/clean flags on the 1480 * new page (keep it busy). force a reload 1481 * of the old page by clearing it from all 1482 * pmaps. then lock the page queues to 1483 * rename the pages. 1484 */ 1485 1486 uvm_pagecopy(uobjpage, pg); /* old -> new */ 1487 pg->flags &= ~(PG_FAKE|PG_CLEAN); 1488 pmap_page_protect(uobjpage, VM_PROT_NONE); 1489 if (uobjpage->flags & PG_WANTED) 1490 wakeup(uobjpage); 1491 /* uobj still locked */ 1492 uobjpage->flags &= ~(PG_WANTED|PG_BUSY); 1493 UVM_PAGE_OWN(uobjpage, NULL); 1494 1495 uvm_lock_pageq(); 1496 offset = uobjpage->offset; 1497 uvm_pagerealloc(uobjpage, NULL, 0); 1498 1499 /* 1500 * if the page is no longer referenced by 1501 * an anon (i.e. we are breaking an O->K 1502 * loan), then remove it from any pageq's. 1503 */ 1504 if (uobjpage->uanon == NULL) 1505 uvm_pagedequeue(uobjpage); 1506 1507 /* 1508 * at this point we have absolutely no 1509 * control over uobjpage 1510 */ 1511 1512 /* install new page */ 1513 uvm_pageactivate(pg); 1514 uvm_pagerealloc(pg, uobj, offset); 1515 uvm_unlock_pageq(); 1516 1517 /* 1518 * done! loan is broken and "pg" is 1519 * PG_BUSY. it can now replace uobjpage. 1520 */ 1521 1522 uobjpage = pg; 1523 } 1524 } 1525 } else { 1526 1527 /* 1528 * if we are going to promote the data to an anon we 1529 * allocate a blank anon here and plug it into our amap. 1530 */ 1531 #if DIAGNOSTIC 1532 if (amap == NULL) 1533 panic("uvm_fault: want to promote data, but no anon"); 1534 #endif 1535 1536 anon = uvm_analloc(); 1537 if (anon) { 1538 1539 /* 1540 * The new anon is locked. 1541 * 1542 * In `Fill in data...' below, if 1543 * uobjpage == PGO_DONTCARE, we want 1544 * a zero'd, dirty page, so have 1545 * uvm_pagealloc() do that for us. 1546 */ 1547 1548 pg = uvm_pagealloc(NULL, 0, anon, 1549 (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0); 1550 } 1551 1552 /* 1553 * out of memory resources? 1554 */ 1555 1556 if (anon == NULL || pg == NULL) { 1557 if (anon != NULL) { 1558 anon->an_ref--; 1559 simple_unlock(&anon->an_lock); 1560 uvm_anfree(anon); 1561 } 1562 1563 /* 1564 * arg! must unbusy our page and fail or sleep. 1565 */ 1566 1567 if (uobjpage != PGO_DONTCARE) { 1568 if (uobjpage->flags & PG_WANTED) 1569 /* still holding object lock */ 1570 wakeup(uobjpage); 1571 1572 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 1573 UVM_PAGE_OWN(uobjpage, NULL); 1574 } 1575 1576 /* unlock and fail ... */ 1577 uvmfault_unlockall(&ufi, amap, uobj, NULL); 1578 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1579 if (anon == NULL || uvmexp.swpgonly == uvmexp.swpages) { 1580 UVMHIST_LOG(maphist, " promote: out of VM", 1581 0,0,0,0); 1582 uvmexp.fltnoanon++; 1583 return ENOMEM; 1584 } 1585 1586 UVMHIST_LOG(maphist, " out of RAM, waiting for more", 1587 0,0,0,0); 1588 uvmexp.fltnoram++; 1589 uvm_wait("flt_noram5"); 1590 goto ReFault; 1591 } 1592 1593 /* 1594 * fill in the data 1595 */ 1596 1597 if (uobjpage != PGO_DONTCARE) { 1598 uvmexp.flt_prcopy++; 1599 /* copy page [pg now dirty] */ 1600 uvm_pagecopy(uobjpage, pg); 1601 1602 /* 1603 * promote to shared amap? make sure all sharing 1604 * procs see it 1605 */ 1606 1607 if ((amap_flags(amap) & AMAP_SHARED) != 0) { 1608 pmap_page_protect(uobjpage, VM_PROT_NONE); 1609 /* 1610 * XXX: PAGE MIGHT BE WIRED! 1611 */ 1612 } 1613 1614 /* 1615 * dispose of uobjpage. it can't be PG_RELEASED 1616 * since we still hold the object lock. 1617 * drop handle to uobj as well. 1618 */ 1619 1620 if (uobjpage->flags & PG_WANTED) 1621 /* still have the obj lock */ 1622 wakeup(uobjpage); 1623 uobjpage->flags &= ~(PG_BUSY|PG_WANTED); 1624 UVM_PAGE_OWN(uobjpage, NULL); 1625 simple_unlock(&uobj->vmobjlock); 1626 uobj = NULL; 1627 1628 UVMHIST_LOG(maphist, 1629 " promote uobjpage 0x%x to anon/page 0x%x/0x%x", 1630 uobjpage, anon, pg, 0); 1631 1632 } else { 1633 uvmexp.flt_przero++; 1634 1635 /* 1636 * Page is zero'd and marked dirty by uvm_pagealloc() 1637 * above. 1638 */ 1639 1640 UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x", 1641 anon, pg, 0, 0); 1642 } 1643 amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start, 1644 anon, 0); 1645 } 1646 1647 /* 1648 * locked: 1649 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj), 1650 * anon(if !null), pg(if anon) 1651 * 1652 * note: pg is either the uobjpage or the new page in the new anon 1653 */ 1654 1655 /* 1656 * all resources are present. we can now map it in and free our 1657 * resources. 1658 */ 1659 1660 UVMHIST_LOG(maphist, 1661 " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d", 1662 ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote); 1663 KASSERT((access_type & VM_PROT_WRITE) == 0 || 1664 (pg->flags & PG_RDONLY) == 0); 1665 if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg), 1666 pg->flags & PG_RDONLY ? enter_prot & ~VM_PROT_WRITE : enter_prot, 1667 access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) { 1668 1669 /* 1670 * No need to undo what we did; we can simply think of 1671 * this as the pmap throwing away the mapping information. 1672 * 1673 * We do, however, have to go through the ReFault path, 1674 * as the map may change while we're asleep. 1675 */ 1676 1677 if (pg->flags & PG_WANTED) 1678 wakeup(pg); 1679 1680 /* 1681 * note that pg can't be PG_RELEASED since we did not drop 1682 * the object lock since the last time we checked. 1683 */ 1684 1685 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED); 1686 UVM_PAGE_OWN(pg, NULL); 1687 uvmfault_unlockall(&ufi, amap, uobj, anon); 1688 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 1689 if (uvmexp.swpgonly == uvmexp.swpages) { 1690 UVMHIST_LOG(maphist, 1691 "<- failed. out of VM",0,0,0,0); 1692 /* XXX instrumentation */ 1693 return ENOMEM; 1694 } 1695 /* XXX instrumentation */ 1696 uvm_wait("flt_pmfail2"); 1697 goto ReFault; 1698 } 1699 1700 uvm_lock_pageq(); 1701 if (wire_fault) { 1702 uvm_pagewire(pg); 1703 if (pg->pqflags & PQ_AOBJ) { 1704 1705 /* 1706 * since the now-wired page cannot be paged out, 1707 * release its swap resources for others to use. 1708 * since an aobj page with no swap cannot be PG_CLEAN, 1709 * clear its clean flag now. 1710 */ 1711 1712 pg->flags &= ~(PG_CLEAN); 1713 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 1714 } 1715 } else { 1716 uvm_pageactivate(pg); 1717 } 1718 uvm_unlock_pageq(); 1719 if (pg->flags & PG_WANTED) 1720 wakeup(pg); 1721 1722 /* 1723 * note that pg can't be PG_RELEASED since we did not drop the object 1724 * lock since the last time we checked. 1725 */ 1726 1727 pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED); 1728 UVM_PAGE_OWN(pg, NULL); 1729 uvmfault_unlockall(&ufi, amap, uobj, anon); 1730 pmap_update(ufi.orig_map->pmap); 1731 UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0); 1732 return 0; 1733 } 1734 1735 /* 1736 * uvm_fault_wire: wire down a range of virtual addresses in a map. 1737 * 1738 * => map may be read-locked by caller, but MUST NOT be write-locked. 1739 * => if map is read-locked, any operations which may cause map to 1740 * be write-locked in uvm_fault() must be taken care of by 1741 * the caller. See uvm_map_pageable(). 1742 */ 1743 1744 int 1745 uvm_fault_wire(map, start, end, fault_type, access_type) 1746 struct vm_map *map; 1747 vaddr_t start, end; 1748 vm_fault_t fault_type; 1749 vm_prot_t access_type; 1750 { 1751 vaddr_t va; 1752 int error; 1753 1754 /* 1755 * now fault it in a page at a time. if the fault fails then we have 1756 * to undo what we have done. note that in uvm_fault VM_PROT_NONE 1757 * is replaced with the max protection if fault_type is VM_FAULT_WIRE. 1758 */ 1759 1760 /* 1761 * XXX work around overflowing a vaddr_t. this prevents us from 1762 * wiring the last page in the address space, though. 1763 */ 1764 if (start > end) { 1765 return EFAULT; 1766 } 1767 1768 for (va = start ; va < end ; va += PAGE_SIZE) { 1769 error = uvm_fault(map, va, fault_type, access_type); 1770 if (error) { 1771 if (va != start) { 1772 uvm_fault_unwire(map, start, va); 1773 } 1774 return error; 1775 } 1776 } 1777 return 0; 1778 } 1779 1780 /* 1781 * uvm_fault_unwire(): unwire range of virtual space. 1782 */ 1783 1784 void 1785 uvm_fault_unwire(map, start, end) 1786 struct vm_map *map; 1787 vaddr_t start, end; 1788 { 1789 vm_map_lock_read(map); 1790 uvm_fault_unwire_locked(map, start, end); 1791 vm_map_unlock_read(map); 1792 } 1793 1794 /* 1795 * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire(). 1796 * 1797 * => map must be at least read-locked. 1798 */ 1799 1800 void 1801 uvm_fault_unwire_locked(map, start, end) 1802 struct vm_map *map; 1803 vaddr_t start, end; 1804 { 1805 struct vm_map_entry *entry; 1806 pmap_t pmap = vm_map_pmap(map); 1807 vaddr_t va; 1808 paddr_t pa; 1809 struct vm_page *pg; 1810 1811 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); 1812 1813 /* 1814 * we assume that the area we are unwiring has actually been wired 1815 * in the first place. this means that we should be able to extract 1816 * the PAs from the pmap. we also lock out the page daemon so that 1817 * we can call uvm_pageunwire. 1818 */ 1819 1820 uvm_lock_pageq(); 1821 1822 /* 1823 * find the beginning map entry for the region. 1824 */ 1825 1826 KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map)); 1827 if (uvm_map_lookup_entry(map, start, &entry) == FALSE) 1828 panic("uvm_fault_unwire_locked: address not in map"); 1829 1830 for (va = start; va < end; va += PAGE_SIZE) { 1831 if (pmap_extract(pmap, va, &pa) == FALSE) 1832 continue; 1833 1834 /* 1835 * find the map entry for the current address. 1836 */ 1837 1838 KASSERT(va >= entry->start); 1839 while (va >= entry->end) { 1840 KASSERT(entry->next != &map->header && 1841 entry->next->start <= entry->end); 1842 entry = entry->next; 1843 } 1844 1845 /* 1846 * if the entry is no longer wired, tell the pmap. 1847 */ 1848 1849 if (VM_MAPENT_ISWIRED(entry) == 0) 1850 pmap_unwire(pmap, va); 1851 1852 pg = PHYS_TO_VM_PAGE(pa); 1853 if (pg) 1854 uvm_pageunwire(pg); 1855 } 1856 1857 uvm_unlock_pageq(); 1858 } 1859