1 /* $NetBSD: uvm_loan.c,v 1.37 2002/05/07 02:29:52 enami Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp 35 */ 36 37 /* 38 * uvm_loan.c: page loanout handler 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.37 2002/05/07 02:29:52 enami Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mman.h> 50 51 #include <uvm/uvm.h> 52 53 /* 54 * "loaned" pages are pages which are (read-only, copy-on-write) loaned 55 * from the VM system to other parts of the kernel. this allows page 56 * copying to be avoided (e.g. you can loan pages from objs/anons to 57 * the mbuf system). 58 * 59 * there are 3 types of loans possible: 60 * O->K uvm_object page to wired kernel page (e.g. mbuf data area) 61 * A->K anon page to wired kernel page (e.g. mbuf data area) 62 * O->A uvm_object to anon loan (e.g. vnode page to an anon) 63 * note that it possible to have an O page loaned to both an A and K 64 * at the same time. 65 * 66 * loans are tracked by pg->loan_count. an O->A page will have both 67 * a uvm_object and a vm_anon, but PQ_ANON will not be set. this sort 68 * of page is considered "owned" by the uvm_object (not the anon). 69 * 70 * each loan of a page to the kernel bumps the pg->wire_count. the 71 * kernel mappings for these pages will be read-only and wired. since 72 * the page will also be wired, it will not be a candidate for pageout, 73 * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a 74 * write fault in the kernel to one of these pages will not cause 75 * copy-on-write. instead, the page fault is considered fatal. this 76 * is because the kernel mapping will have no way to look up the 77 * object/anon which the page is owned by. this is a good side-effect, 78 * since a kernel write to a loaned page is an error. 79 * 80 * owners that want to free their pages and discover that they are 81 * loaned out simply "disown" them (the page becomes an orphan). these 82 * pages should be freed when the last loan is dropped. in some cases 83 * an anon may "adopt" an orphaned page. 84 * 85 * locking: to read pg->loan_count either the owner or the page queues 86 * must be locked. to modify pg->loan_count, both the owner of the page 87 * and the PQs must be locked. pg->flags is (as always) locked by 88 * the owner of the page. 89 * 90 * note that locking from the "loaned" side is tricky since the object 91 * getting the loaned page has no reference to the page's owner and thus 92 * the owner could "die" at any time. in order to prevent the owner 93 * from dying the page queues should be locked. this forces us to sometimes 94 * use "try" locking. 95 * 96 * loans are typically broken by the following events: 97 * 1. user-level xwrite fault to a loaned page 98 * 2. pageout of clean+inactive O->A loaned page 99 * 3. owner frees page (e.g. pager flush) 100 * 101 * note that loaning a page causes all mappings of the page to become 102 * read-only (via pmap_page_protect). this could have an unexpected 103 * effect on normal "wired" pages if one is not careful (XXX). 104 */ 105 106 /* 107 * local prototypes 108 */ 109 110 static int uvm_loananon __P((struct uvm_faultinfo *, void ***, 111 int, struct vm_anon *)); 112 static int uvm_loanentry __P((struct uvm_faultinfo *, void ***, int)); 113 static int uvm_loanuobj __P((struct uvm_faultinfo *, void ***, 114 int, vaddr_t)); 115 static int uvm_loanzero __P((struct uvm_faultinfo *, void ***, int)); 116 static void uvm_unloananon __P((struct vm_anon **, int)); 117 static void uvm_unloanpage __P((struct vm_page **, int)); 118 119 120 /* 121 * inlines 122 */ 123 124 /* 125 * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan()) 126 * 127 * => "ufi" is the result of a successful map lookup (meaning that 128 * on entry the map is locked by the caller) 129 * => we may unlock and then relock the map if needed (for I/O) 130 * => we put our output result in "output" 131 * => we always return with the map unlocked 132 * => possible return values: 133 * -1 == error, map is unlocked 134 * 0 == map relock error (try again!), map is unlocked 135 * >0 == number of pages we loaned, map is unlocked 136 */ 137 138 static __inline int 139 uvm_loanentry(ufi, output, flags) 140 struct uvm_faultinfo *ufi; 141 void ***output; 142 int flags; 143 { 144 vaddr_t curaddr = ufi->orig_rvaddr; 145 vsize_t togo = ufi->size; 146 struct vm_aref *aref = &ufi->entry->aref; 147 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 148 struct vm_anon *anon; 149 int rv, result = 0; 150 151 /* 152 * lock us the rest of the way down (we unlock before return) 153 */ 154 if (aref->ar_amap) 155 amap_lock(aref->ar_amap); 156 if (uobj) 157 simple_lock(&uobj->vmobjlock); 158 159 /* 160 * loop until done 161 */ 162 while (togo) { 163 164 /* 165 * find the page we want. check the anon layer first. 166 */ 167 168 if (aref->ar_amap) { 169 anon = amap_lookup(aref, curaddr - ufi->entry->start); 170 } else { 171 anon = NULL; 172 } 173 174 /* locked: map, amap, uobj */ 175 if (anon) { 176 rv = uvm_loananon(ufi, output, flags, anon); 177 } else if (uobj) { 178 rv = uvm_loanuobj(ufi, output, flags, curaddr); 179 } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) { 180 rv = uvm_loanzero(ufi, output, flags); 181 } else { 182 rv = -1; 183 } 184 /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */ 185 186 /* total failure */ 187 if (rv < 0) 188 return (-1); 189 190 /* relock failed, need to do another lookup */ 191 if (rv == 0) 192 return (result); 193 194 /* 195 * got it... advance to next page 196 */ 197 198 result++; 199 togo -= PAGE_SIZE; 200 curaddr += PAGE_SIZE; 201 } 202 203 /* 204 * unlock what we locked, unlock the maps and return 205 */ 206 207 if (aref->ar_amap) 208 amap_unlock(aref->ar_amap); 209 if (uobj) 210 simple_unlock(&uobj->vmobjlock); 211 uvmfault_unlockmaps(ufi, FALSE); 212 return (result); 213 } 214 215 /* 216 * normal functions 217 */ 218 219 /* 220 * uvm_loan: loan pages in a map out to anons or to the kernel 221 * 222 * => map should be unlocked 223 * => start and len should be multiples of PAGE_SIZE 224 * => result is either an array of anon's or vm_pages (depending on flags) 225 * => flag values: UVM_LOAN_TOANON - loan to anons 226 * UVM_LOAN_TOPAGE - loan to wired kernel page 227 * one and only one of these flags must be set! 228 * => returns 0 (success), or an appropriate error number 229 */ 230 231 int 232 uvm_loan(map, start, len, v, flags) 233 struct vm_map *map; 234 vaddr_t start; 235 vsize_t len; 236 void *v; 237 int flags; 238 { 239 struct uvm_faultinfo ufi; 240 void **result, **output; 241 int rv, error; 242 243 /* 244 * ensure that one and only one of the flags is set 245 */ 246 247 KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^ 248 ((flags & UVM_LOAN_TOPAGE) == 0)); 249 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); 250 251 /* 252 * "output" is a pointer to the current place to put the loaned page. 253 */ 254 255 result = v; 256 output = &result[0]; /* start at the beginning ... */ 257 258 /* 259 * while we've got pages to do 260 */ 261 262 while (len > 0) { 263 264 /* 265 * fill in params for a call to uvmfault_lookup 266 */ 267 268 ufi.orig_map = map; 269 ufi.orig_rvaddr = start; 270 ufi.orig_size = len; 271 272 /* 273 * do the lookup, the only time this will fail is if we hit on 274 * an unmapped region (an error) 275 */ 276 277 if (!uvmfault_lookup(&ufi, FALSE)) { 278 error = ENOENT; 279 goto fail; 280 } 281 282 /* 283 * map now locked. now do the loanout... 284 */ 285 286 rv = uvm_loanentry(&ufi, &output, flags); 287 if (rv < 0) { 288 /* all unlocked due to error */ 289 error = EINVAL; 290 goto fail; 291 } 292 293 /* 294 * done! the map is unlocked. advance, if possible. 295 * 296 * XXXCDC: could be recoded to hold the map lock with 297 * smarter code (but it only happens on map entry 298 * boundaries, so it isn't that bad). 299 */ 300 301 if (rv) { 302 rv <<= PAGE_SHIFT; 303 len -= rv; 304 start += rv; 305 } 306 } 307 return 0; 308 309 fail: 310 /* 311 * failed to complete loans. drop any loans and return failure code. 312 * map is already unlocked. 313 */ 314 315 if (output - result) { 316 if (flags & UVM_LOAN_TOANON) { 317 uvm_unloananon((struct vm_anon **)result, 318 output - result); 319 } else { 320 uvm_unloanpage((struct vm_page **)result, 321 output - result); 322 } 323 } 324 return (error); 325 } 326 327 /* 328 * uvm_loananon: loan a page from an anon out 329 * 330 * => called with map, amap, uobj locked 331 * => return value: 332 * -1 = fatal error, everything is unlocked, abort. 333 * 0 = lookup in ufi went stale, everything unlocked, relookup and 334 * try again 335 * 1 = got it, everything still locked 336 */ 337 338 int 339 uvm_loananon(ufi, output, flags, anon) 340 struct uvm_faultinfo *ufi; 341 void ***output; 342 int flags; 343 struct vm_anon *anon; 344 { 345 struct vm_page *pg; 346 int error; 347 348 /* 349 * if we are loaning to "another" anon then it is easy, we just 350 * bump the reference count on the current anon and return a 351 * pointer to it (it becomes copy-on-write shared). 352 */ 353 354 if (flags & UVM_LOAN_TOANON) { 355 simple_lock(&anon->an_lock); 356 pg = anon->u.an_page; 357 if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) { 358 pmap_page_protect(pg, VM_PROT_READ); 359 } 360 anon->an_ref++; 361 **output = anon; 362 (*output)++; 363 simple_unlock(&anon->an_lock); 364 return (1); 365 } 366 367 /* 368 * we are loaning to a kernel-page. we need to get the page 369 * resident so we can wire it. uvmfault_anonget will handle 370 * this for us. 371 */ 372 373 simple_lock(&anon->an_lock); 374 error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon); 375 376 /* 377 * if we were unable to get the anon, then uvmfault_anonget has 378 * unlocked everything and returned an error code. 379 */ 380 381 if (error) { 382 383 /* need to refault (i.e. refresh our lookup) ? */ 384 if (error == ERESTART) { 385 return (0); 386 } 387 388 /* "try again"? sleep a bit and retry ... */ 389 if (error == EAGAIN) { 390 tsleep(&lbolt, PVM, "loanagain", 0); 391 return (0); 392 } 393 394 /* otherwise flag it as an error */ 395 return (-1); 396 } 397 398 /* 399 * we have the page and its owner locked: do the loan now. 400 */ 401 402 pg = anon->u.an_page; 403 uvm_lock_pageq(); 404 KASSERT(pg->wire_count == 0); 405 if (pg->loan_count == 0) { 406 pmap_page_protect(pg, VM_PROT_READ); 407 } 408 pg->loan_count++; 409 uvm_pagedequeue(pg); 410 uvm_unlock_pageq(); 411 **output = pg; 412 (*output)++; 413 414 /* unlock anon and return success */ 415 if (pg->uobject) /* XXXCDC: what if this is our uobj? bad */ 416 simple_unlock(&pg->uobject->vmobjlock); 417 simple_unlock(&anon->an_lock); 418 return (1); 419 } 420 421 /* 422 * uvm_loanuobj: loan a page from a uobj out 423 * 424 * => called with map, amap, uobj locked 425 * => return value: 426 * -1 = fatal error, everything is unlocked, abort. 427 * 0 = lookup in ufi went stale, everything unlocked, relookup and 428 * try again 429 * 1 = got it, everything still locked 430 */ 431 432 static int 433 uvm_loanuobj(ufi, output, flags, va) 434 struct uvm_faultinfo *ufi; 435 void ***output; 436 int flags; 437 vaddr_t va; 438 { 439 struct vm_amap *amap = ufi->entry->aref.ar_amap; 440 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 441 struct vm_page *pg; 442 struct vm_anon *anon; 443 int error, npages; 444 boolean_t locked; 445 446 /* 447 * first we must make sure the page is resident. 448 * 449 * XXXCDC: duplicate code with uvm_fault(). 450 */ 451 452 if (uobj->pgops->pgo_get) { /* try locked pgo_get */ 453 npages = 1; 454 pg = NULL; 455 error = (*uobj->pgops->pgo_get)(uobj, 456 va - ufi->entry->start + ufi->entry->offset, 457 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED); 458 } else { 459 error = EIO; /* must have pgo_get op */ 460 } 461 462 /* 463 * check the result of the locked pgo_get. if there is a problem, 464 * then we fail the loan. 465 */ 466 467 if (error && error != EBUSY) { 468 uvmfault_unlockall(ufi, amap, uobj, NULL); 469 return (-1); 470 } 471 472 /* 473 * if we need to unlock for I/O, do so now. 474 */ 475 476 if (error == EBUSY) { 477 uvmfault_unlockall(ufi, amap, NULL, NULL); 478 479 /* locked: uobj */ 480 npages = 1; 481 error = (*uobj->pgops->pgo_get)(uobj, 482 va - ufi->entry->start + ufi->entry->offset, 483 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO); 484 /* locked: <nothing> */ 485 486 if (error) { 487 if (error == EAGAIN) { 488 tsleep(&lbolt, PVM, "fltagain2", 0); 489 return (0); 490 } 491 return (-1); 492 } 493 494 /* 495 * pgo_get was a success. attempt to relock everything. 496 */ 497 498 locked = uvmfault_relock(ufi); 499 if (locked && amap) 500 amap_lock(amap); 501 simple_lock(&uobj->vmobjlock); 502 503 /* 504 * verify that the page has not be released and re-verify 505 * that amap slot is still free. if there is a problem we 506 * drop our lock (thus force a lookup refresh/retry). 507 */ 508 509 if ((pg->flags & PG_RELEASED) != 0 || 510 (locked && amap && amap_lookup(&ufi->entry->aref, 511 ufi->orig_rvaddr - ufi->entry->start))) { 512 if (locked) 513 uvmfault_unlockall(ufi, amap, NULL, NULL); 514 locked = FALSE; 515 } 516 517 /* 518 * didn't get the lock? release the page and retry. 519 */ 520 521 if (locked == FALSE) { 522 if (pg->flags & PG_WANTED) { 523 wakeup(pg); 524 } 525 if (pg->flags & PG_RELEASED) { 526 uvm_pagefree(pg); 527 return (0); 528 } 529 uvm_lock_pageq(); 530 uvm_pageactivate(pg); 531 uvm_unlock_pageq(); 532 pg->flags &= ~(PG_BUSY|PG_WANTED); 533 UVM_PAGE_OWN(pg, NULL); 534 simple_unlock(&uobj->vmobjlock); 535 return (0); 536 } 537 } 538 539 /* 540 * at this point we have the page we want ("pg") marked PG_BUSY for us 541 * and we have all data structures locked. do the loanout. page can 542 * not be PG_RELEASED (we caught this above). 543 */ 544 545 if ((flags & UVM_LOAN_TOANON) == 0) { 546 uvm_lock_pageq(); 547 if (pg->loan_count == 0) { 548 pmap_page_protect(pg, VM_PROT_READ); 549 } 550 pg->loan_count++; 551 uvm_pagedequeue(pg); 552 uvm_unlock_pageq(); 553 if (pg->flags & PG_WANTED) { 554 wakeup(pg); 555 } 556 pg->flags &= ~(PG_WANTED|PG_BUSY); 557 UVM_PAGE_OWN(pg, NULL); 558 **output = pg; 559 (*output)++; 560 return (1); 561 } 562 563 /* 564 * must be a loan to an anon. check to see if there is already 565 * an anon associated with this page. if so, then just return 566 * a reference to this object. the page should already be 567 * mapped read-only because it is already on loan. 568 */ 569 570 if (pg->uanon) { 571 anon = pg->uanon; 572 simple_lock(&anon->an_lock); 573 anon->an_ref++; 574 simple_unlock(&anon->an_lock); 575 if (pg->flags & PG_WANTED) { 576 wakeup(pg); 577 } 578 pg->flags &= ~(PG_WANTED|PG_BUSY); 579 UVM_PAGE_OWN(pg, NULL); 580 **output = anon; 581 (*output)++; 582 return (1); 583 } 584 585 /* 586 * need to allocate a new anon 587 */ 588 589 anon = uvm_analloc(); 590 if (anon == NULL) { 591 if (pg->flags & PG_WANTED) { 592 wakeup(pg); 593 } 594 pg->flags &= ~(PG_WANTED|PG_BUSY); 595 UVM_PAGE_OWN(pg, NULL); 596 uvmfault_unlockall(ufi, amap, uobj, NULL); 597 return (-1); 598 } 599 anon->u.an_page = pg; 600 pg->uanon = anon; 601 uvm_lock_pageq(); 602 if (pg->loan_count == 0) { 603 pmap_page_protect(pg, VM_PROT_READ); 604 } 605 pg->loan_count++; 606 uvm_pageactivate(pg); 607 uvm_unlock_pageq(); 608 if (pg->flags & PG_WANTED) { 609 wakeup(pg); 610 } 611 pg->flags &= ~(PG_WANTED|PG_BUSY); 612 UVM_PAGE_OWN(pg, NULL); 613 simple_unlock(&anon->an_lock); 614 **output = anon; 615 (*output)++; 616 return (1); 617 } 618 619 /* 620 * uvm_loanzero: "loan" a zero-fill page out 621 * 622 * => called with map, amap, uobj locked 623 * => return value: 624 * -1 = fatal error, everything is unlocked, abort. 625 * 0 = lookup in ufi went stale, everything unlocked, relookup and 626 * try again 627 * 1 = got it, everything still locked 628 */ 629 630 static int 631 uvm_loanzero(ufi, output, flags) 632 struct uvm_faultinfo *ufi; 633 void ***output; 634 int flags; 635 { 636 struct vm_anon *anon; 637 struct vm_page *pg; 638 struct uvm_object *uobj = ufi->entry->object.uvm_obj; 639 struct vm_amap *amap = ufi->entry->aref.ar_amap; 640 641 if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */ 642 while ((pg = uvm_pagealloc(NULL, 0, NULL, 643 UVM_PGA_ZERO)) == NULL) { 644 uvmfault_unlockall(ufi, amap, uobj, NULL); 645 uvm_wait("loanzero1"); 646 if (!uvmfault_relock(ufi)) { 647 return (0); 648 } 649 if (amap) { 650 amap_lock(amap); 651 } 652 if (uobj) { 653 simple_lock(&uobj->vmobjlock); 654 } 655 } 656 657 /* got a zero'd page; return */ 658 pg->flags &= ~(PG_WANTED|PG_BUSY); 659 UVM_PAGE_OWN(pg, NULL); 660 **output = pg; 661 (*output)++; 662 pg->loan_count = 1; 663 return (1); 664 } 665 666 /* loaning to an anon */ 667 while ((anon = uvm_analloc()) == NULL || 668 (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) { 669 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, anon); 670 671 /* out of swap causes us to fail */ 672 if (anon == NULL) { 673 return (-1); 674 } 675 676 /* 677 * drop our reference; we're the only one, 678 * so it's okay that the anon isn't locked 679 * here. 680 */ 681 682 anon->an_ref--; 683 uvm_anfree(anon); 684 uvm_wait("loanzero2"); /* wait for pagedaemon */ 685 686 if (!uvmfault_relock(ufi)) { 687 /* map changed while unlocked, need relookup */ 688 return (0); 689 } 690 691 /* relock everything else */ 692 if (amap) { 693 amap_lock(amap); 694 } 695 if (uobj) { 696 simple_lock(&uobj->vmobjlock); 697 } 698 } 699 700 /* got a zero'd page; return */ 701 pg->flags &= ~(PG_BUSY|PG_FAKE); 702 UVM_PAGE_OWN(pg, NULL); 703 uvm_lock_pageq(); 704 uvm_pageactivate(pg); 705 uvm_unlock_pageq(); 706 **output = anon; 707 (*output)++; 708 return (1); 709 } 710 711 712 /* 713 * uvm_unloananon: kill loans on anons (basically a normal ref drop) 714 * 715 * => we expect all our resources to be unlocked 716 */ 717 718 static void 719 uvm_unloananon(aloans, nanons) 720 struct vm_anon **aloans; 721 int nanons; 722 { 723 struct vm_anon *anon; 724 725 while (nanons-- > 0) { 726 int refs; 727 728 anon = *aloans++; 729 simple_lock(&anon->an_lock); 730 refs = --anon->an_ref; 731 simple_unlock(&anon->an_lock); 732 733 if (refs == 0) { 734 uvm_anfree(anon); 735 } 736 } 737 } 738 739 /* 740 * uvm_unloanpage: kill loans on pages loaned out to the kernel 741 * 742 * => we expect all our resources to be unlocked 743 */ 744 745 static void 746 uvm_unloanpage(ploans, npages) 747 struct vm_page **ploans; 748 int npages; 749 { 750 struct vm_page *pg; 751 struct simplelock *slock; 752 753 uvm_lock_pageq(); 754 while (npages-- > 0) { 755 pg = *ploans++; 756 757 /* 758 * do a little dance to acquire the object or anon lock 759 * as appropriate. we are locking in the wrong order, 760 * so we have to do a try-lock here. 761 */ 762 763 slock = NULL; 764 while (pg->uobject != NULL || pg->uanon != NULL) { 765 if (pg->uobject != NULL) { 766 slock = &pg->uobject->vmobjlock; 767 } else { 768 slock = &pg->uanon->an_lock; 769 } 770 if (simple_lock_try(slock)) { 771 break; 772 } 773 uvm_unlock_pageq(); 774 uvm_lock_pageq(); 775 slock = NULL; 776 } 777 778 /* 779 * drop our loan. if page is owned by an anon but 780 * PQ_ANON is not set, the page was loaned to the anon 781 * from an object which dropped ownership, so resolve 782 * this by turning the anon's loan into real ownership 783 * (ie. decrement loan_count again and set PQ_ANON). 784 * after all this, if there are no loans left, put the 785 * page back a paging queue (if the page is owned by 786 * an anon) or free it (if the page is now unowned). 787 */ 788 789 KASSERT(pg->loan_count > 0); 790 pg->loan_count--; 791 if (pg->uobject == NULL && pg->uanon != NULL && 792 (pg->pqflags & PQ_ANON) == 0) { 793 KASSERT(pg->loan_count > 0); 794 pg->loan_count--; 795 pg->pqflags |= PQ_ANON; 796 } 797 if (pg->loan_count == 0) { 798 if (pg->uobject == NULL && pg->uanon == NULL) { 799 KASSERT((pg->flags & PG_BUSY) == 0); 800 uvm_pagefree(pg); 801 } else { 802 uvm_pageactivate(pg); 803 } 804 } 805 if (slock != NULL) { 806 simple_unlock(slock); 807 } 808 } 809 uvm_unlock_pageq(); 810 } 811 812 /* 813 * uvm_unloan: kill loans on pages or anons. 814 */ 815 816 void 817 uvm_unloan(void *v, int npages, int flags) 818 { 819 if (flags & UVM_LOAN_TOANON) { 820 uvm_unloananon(v, npages); 821 } else { 822 uvm_unloanpage(v, npages); 823 } 824 } 825