1 /* $OpenBSD: uvm_aobj.c,v 1.47 2009/08/06 15:28:14 oga Exp $ */ 2 /* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and 6 * Washington University. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Charles D. Cranor and 20 * Washington University. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp 36 */ 37 /* 38 * uvm_aobj.c: anonymous memory uvm_object pager 39 * 40 * author: Chuck Silvers <chuq@chuq.com> 41 * started: Jan-1998 42 * 43 * - design mostly from Chuck Cranor 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/kernel.h> 51 #include <sys/pool.h> 52 #include <sys/kernel.h> 53 54 #include <uvm/uvm.h> 55 56 /* 57 * an aobj manages anonymous-memory backed uvm_objects. in addition 58 * to keeping the list of resident pages, it also keeps a list of 59 * allocated swap blocks. depending on the size of the aobj this list 60 * of allocated swap blocks is either stored in an array (small objects) 61 * or in a hash table (large objects). 62 */ 63 64 /* 65 * local structures 66 */ 67 68 /* 69 * for hash tables, we break the address space of the aobj into blocks 70 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to 71 * be a power of two. 72 */ 73 74 #define UAO_SWHASH_CLUSTER_SHIFT 4 75 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT) 76 77 /* get the "tag" for this page index */ 78 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \ 79 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) 80 81 /* given an ELT and a page index, find the swap slot */ 82 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \ 83 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)]) 84 85 /* given an ELT, return its pageidx base */ 86 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \ 87 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT) 88 89 /* 90 * the swhash hash function 91 */ 92 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \ 93 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \ 94 & (AOBJ)->u_swhashmask)]) 95 96 /* 97 * the swhash threshold determines if we will use an array or a 98 * hash table to store the list of allocated swap blocks. 99 */ 100 101 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4) 102 #define UAO_USES_SWHASH(AOBJ) \ 103 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */ 104 105 /* 106 * the number of buckets in a swhash, with an upper bound 107 */ 108 #define UAO_SWHASH_MAXBUCKETS 256 109 #define UAO_SWHASH_BUCKETS(AOBJ) \ 110 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \ 111 UAO_SWHASH_MAXBUCKETS)) 112 113 114 /* 115 * uao_swhash_elt: when a hash table is being used, this structure defines 116 * the format of an entry in the bucket list. 117 */ 118 119 struct uao_swhash_elt { 120 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */ 121 voff_t tag; /* our 'tag' */ 122 int count; /* our number of active slots */ 123 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */ 124 }; 125 126 /* 127 * uao_swhash: the swap hash table structure 128 */ 129 130 LIST_HEAD(uao_swhash, uao_swhash_elt); 131 132 /* 133 * uao_swhash_elt_pool: pool of uao_swhash_elt structures 134 */ 135 136 struct pool uao_swhash_elt_pool; 137 138 /* 139 * uvm_aobj: the actual anon-backed uvm_object 140 * 141 * => the uvm_object is at the top of the structure, this allows 142 * (struct uvm_aobj *) == (struct uvm_object *) 143 * => only one of u_swslots and u_swhash is used in any given aobj 144 */ 145 146 struct uvm_aobj { 147 struct uvm_object u_obj; /* has: lock, pgops, memt, #pages, #refs */ 148 int u_pages; /* number of pages in entire object */ 149 int u_flags; /* the flags (see uvm_aobj.h) */ 150 int *u_swslots; /* array of offset->swapslot mappings */ 151 /* 152 * hashtable of offset->swapslot mappings 153 * (u_swhash is an array of bucket heads) 154 */ 155 struct uao_swhash *u_swhash; 156 u_long u_swhashmask; /* mask for hashtable */ 157 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */ 158 }; 159 160 /* 161 * uvm_aobj_pool: pool of uvm_aobj structures 162 */ 163 164 struct pool uvm_aobj_pool; 165 166 /* 167 * local functions 168 */ 169 170 static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int, 171 boolean_t); 172 static int uao_find_swslot(struct uvm_aobj *, int); 173 static boolean_t uao_flush(struct uvm_object *, voff_t, 174 voff_t, int); 175 static void uao_free(struct uvm_aobj *); 176 static int uao_get(struct uvm_object *, voff_t, 177 vm_page_t *, int *, int, vm_prot_t, 178 int, int); 179 static boolean_t uao_pagein(struct uvm_aobj *, int, int); 180 static boolean_t uao_pagein_page(struct uvm_aobj *, int); 181 182 /* 183 * aobj_pager 184 * 185 * note that some functions (e.g. put) are handled elsewhere 186 */ 187 188 struct uvm_pagerops aobj_pager = { 189 NULL, /* init */ 190 uao_reference, /* reference */ 191 uao_detach, /* detach */ 192 NULL, /* fault */ 193 uao_flush, /* flush */ 194 uao_get, /* get */ 195 }; 196 197 /* 198 * uao_list: global list of active aobjs, locked by uao_list_lock 199 */ 200 201 static LIST_HEAD(aobjlist, uvm_aobj) uao_list; 202 static simple_lock_data_t uao_list_lock; 203 204 205 /* 206 * functions 207 */ 208 209 /* 210 * hash table/array related functions 211 */ 212 213 /* 214 * uao_find_swhash_elt: find (or create) a hash table entry for a page 215 * offset. 216 * 217 * => the object should be locked by the caller 218 */ 219 220 static struct uao_swhash_elt * 221 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create) 222 { 223 struct uao_swhash *swhash; 224 struct uao_swhash_elt *elt; 225 voff_t page_tag; 226 227 swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */ 228 page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */ 229 230 /* 231 * now search the bucket for the requested tag 232 */ 233 LIST_FOREACH(elt, swhash, list) { 234 if (elt->tag == page_tag) 235 return(elt); 236 } 237 238 /* fail now if we are not allowed to create a new entry in the bucket */ 239 if (!create) 240 return NULL; 241 242 243 /* 244 * allocate a new entry for the bucket and init/insert it in 245 */ 246 elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK | PR_ZERO); 247 LIST_INSERT_HEAD(swhash, elt, list); 248 elt->tag = page_tag; 249 250 return(elt); 251 } 252 253 /* 254 * uao_find_swslot: find the swap slot number for an aobj/pageidx 255 * 256 * => object must be locked by caller 257 */ 258 __inline static int 259 uao_find_swslot(struct uvm_aobj *aobj, int pageidx) 260 { 261 262 /* 263 * if noswap flag is set, then we never return a slot 264 */ 265 266 if (aobj->u_flags & UAO_FLAG_NOSWAP) 267 return(0); 268 269 /* 270 * if hashing, look in hash table. 271 */ 272 273 if (UAO_USES_SWHASH(aobj)) { 274 struct uao_swhash_elt *elt = 275 uao_find_swhash_elt(aobj, pageidx, FALSE); 276 277 if (elt) 278 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)); 279 else 280 return(0); 281 } 282 283 /* 284 * otherwise, look in the array 285 */ 286 return(aobj->u_swslots[pageidx]); 287 } 288 289 /* 290 * uao_set_swslot: set the swap slot for a page in an aobj. 291 * 292 * => setting a slot to zero frees the slot 293 * => object must be locked by caller 294 */ 295 int 296 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) 297 { 298 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 299 int oldslot; 300 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist); 301 UVMHIST_LOG(pdhist, "aobj %p pageidx %ld slot %ld", 302 aobj, pageidx, slot, 0); 303 304 /* 305 * if noswap flag is set, then we can't set a slot 306 */ 307 308 if (aobj->u_flags & UAO_FLAG_NOSWAP) { 309 310 if (slot == 0) 311 return(0); /* a clear is ok */ 312 313 /* but a set is not */ 314 printf("uao_set_swslot: uobj = %p\n", uobj); 315 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object"); 316 } 317 318 /* 319 * are we using a hash table? if so, add it in the hash. 320 */ 321 322 if (UAO_USES_SWHASH(aobj)) { 323 324 /* 325 * Avoid allocating an entry just to free it again if 326 * the page had not swap slot in the first place, and 327 * we are freeing. 328 */ 329 330 struct uao_swhash_elt *elt = 331 uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE); 332 if (elt == NULL) { 333 KASSERT(slot == 0); 334 return (0); 335 } 336 337 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx); 338 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot; 339 340 /* 341 * now adjust the elt's reference counter and free it if we've 342 * dropped it to zero. 343 */ 344 345 /* an allocation? */ 346 if (slot) { 347 if (oldslot == 0) 348 elt->count++; 349 } else { /* freeing slot ... */ 350 if (oldslot) /* to be safe */ 351 elt->count--; 352 353 if (elt->count == 0) { 354 LIST_REMOVE(elt, list); 355 pool_put(&uao_swhash_elt_pool, elt); 356 } 357 } 358 } else { 359 /* we are using an array */ 360 oldslot = aobj->u_swslots[pageidx]; 361 aobj->u_swslots[pageidx] = slot; 362 } 363 return (oldslot); 364 } 365 366 /* 367 * end of hash/array functions 368 */ 369 370 /* 371 * uao_free: free all resources held by an aobj, and then free the aobj 372 * 373 * => the aobj should be dead 374 */ 375 static void 376 uao_free(struct uvm_aobj *aobj) 377 { 378 379 simple_unlock(&aobj->u_obj.vmobjlock); 380 381 if (UAO_USES_SWHASH(aobj)) { 382 int i, hashbuckets = aobj->u_swhashmask + 1; 383 384 /* 385 * free the swslots from each hash bucket, 386 * then the hash bucket, and finally the hash table itself. 387 */ 388 for (i = 0; i < hashbuckets; i++) { 389 struct uao_swhash_elt *elt, *next; 390 391 for (elt = LIST_FIRST(&aobj->u_swhash[i]); 392 elt != NULL; 393 elt = next) { 394 int j; 395 396 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) { 397 int slot = elt->slots[j]; 398 399 if (slot == 0) { 400 continue; 401 } 402 uvm_swap_free(slot, 1); 403 404 /* 405 * this page is no longer 406 * only in swap. 407 */ 408 simple_lock(&uvm.swap_data_lock); 409 uvmexp.swpgonly--; 410 simple_unlock(&uvm.swap_data_lock); 411 } 412 413 next = LIST_NEXT(elt, list); 414 pool_put(&uao_swhash_elt_pool, elt); 415 } 416 } 417 free(aobj->u_swhash, M_UVMAOBJ); 418 } else { 419 int i; 420 421 /* 422 * free the array 423 */ 424 425 for (i = 0; i < aobj->u_pages; i++) { 426 int slot = aobj->u_swslots[i]; 427 428 if (slot) { 429 uvm_swap_free(slot, 1); 430 431 /* this page is no longer only in swap. */ 432 simple_lock(&uvm.swap_data_lock); 433 uvmexp.swpgonly--; 434 simple_unlock(&uvm.swap_data_lock); 435 } 436 } 437 free(aobj->u_swslots, M_UVMAOBJ); 438 } 439 440 /* 441 * finally free the aobj itself 442 */ 443 pool_put(&uvm_aobj_pool, aobj); 444 } 445 446 /* 447 * pager functions 448 */ 449 450 /* 451 * uao_create: create an aobj of the given size and return its uvm_object. 452 * 453 * => for normal use, flags are always zero 454 * => for the kernel object, the flags are: 455 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once) 456 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ") 457 */ 458 struct uvm_object * 459 uao_create(vsize_t size, int flags) 460 { 461 static struct uvm_aobj kernel_object_store; /* home of kernel_object */ 462 static int kobj_alloced = 0; /* not allocated yet */ 463 int pages = round_page(size) >> PAGE_SHIFT; 464 struct uvm_aobj *aobj; 465 466 /* 467 * malloc a new aobj unless we are asked for the kernel object 468 */ 469 if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */ 470 if (kobj_alloced) 471 panic("uao_create: kernel object already allocated"); 472 473 aobj = &kernel_object_store; 474 aobj->u_pages = pages; 475 aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */ 476 /* we are special, we never die */ 477 aobj->u_obj.uo_refs = UVM_OBJ_KERN; 478 kobj_alloced = UAO_FLAG_KERNOBJ; 479 } else if (flags & UAO_FLAG_KERNSWAP) { 480 aobj = &kernel_object_store; 481 if (kobj_alloced != UAO_FLAG_KERNOBJ) 482 panic("uao_create: asked to enable swap on kernel object"); 483 kobj_alloced = UAO_FLAG_KERNSWAP; 484 } else { /* normal object */ 485 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK); 486 aobj->u_pages = pages; 487 aobj->u_flags = 0; /* normal object */ 488 aobj->u_obj.uo_refs = 1; /* start with 1 reference */ 489 } 490 491 /* 492 * allocate hash/array if necessary 493 * 494 * note: in the KERNSWAP case no need to worry about locking since 495 * we are still booting we should be the only thread around. 496 */ 497 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) { 498 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ? 499 M_NOWAIT : M_WAITOK; 500 501 /* allocate hash table or array depending on object size */ 502 if (UAO_USES_SWHASH(aobj)) { 503 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj), 504 M_UVMAOBJ, mflags, &aobj->u_swhashmask); 505 if (aobj->u_swhash == NULL) 506 panic("uao_create: hashinit swhash failed"); 507 } else { 508 aobj->u_swslots = malloc(pages * sizeof(int), 509 M_UVMAOBJ, mflags|M_ZERO); 510 if (aobj->u_swslots == NULL) 511 panic("uao_create: malloc swslots failed"); 512 } 513 514 if (flags) { 515 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */ 516 return(&aobj->u_obj); 517 /* done! */ 518 } 519 } 520 521 /* 522 * init aobj fields 523 */ 524 simple_lock_init(&aobj->u_obj.vmobjlock); 525 aobj->u_obj.pgops = &aobj_pager; 526 RB_INIT(&aobj->u_obj.memt); 527 aobj->u_obj.uo_npages = 0; 528 529 /* 530 * now that aobj is ready, add it to the global list 531 */ 532 simple_lock(&uao_list_lock); 533 LIST_INSERT_HEAD(&uao_list, aobj, u_list); 534 simple_unlock(&uao_list_lock); 535 536 /* 537 * done! 538 */ 539 return(&aobj->u_obj); 540 } 541 542 543 544 /* 545 * uao_init: set up aobj pager subsystem 546 * 547 * => called at boot time from uvm_pager_init() 548 */ 549 void 550 uao_init(void) 551 { 552 static int uao_initialized; 553 554 if (uao_initialized) 555 return; 556 uao_initialized = TRUE; 557 558 LIST_INIT(&uao_list); 559 simple_lock_init(&uao_list_lock); 560 561 /* 562 * NOTE: Pages for this pool must not come from a pageable 563 * kernel map! 564 */ 565 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 566 0, 0, 0, "uaoeltpl", &pool_allocator_nointr); 567 568 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, 569 "aobjpl", &pool_allocator_nointr); 570 } 571 572 /* 573 * uao_reference: add a ref to an aobj 574 * 575 * => aobj must be unlocked 576 * => just lock it and call the locked version 577 */ 578 void 579 uao_reference(struct uvm_object *uobj) 580 { 581 simple_lock(&uobj->vmobjlock); 582 uao_reference_locked(uobj); 583 simple_unlock(&uobj->vmobjlock); 584 } 585 586 /* 587 * uao_reference_locked: add a ref to an aobj that is already locked 588 * 589 * => aobj must be locked 590 * this needs to be separate from the normal routine 591 * since sometimes we need to add a reference to an aobj when 592 * it's already locked. 593 */ 594 void 595 uao_reference_locked(struct uvm_object *uobj) 596 { 597 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist); 598 599 /* 600 * kernel_object already has plenty of references, leave it alone. 601 */ 602 603 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) 604 return; 605 606 uobj->uo_refs++; /* bump! */ 607 UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)", 608 uobj, uobj->uo_refs,0,0); 609 } 610 611 612 /* 613 * uao_detach: drop a reference to an aobj 614 * 615 * => aobj must be unlocked 616 * => just lock it and call the locked version 617 */ 618 void 619 uao_detach(struct uvm_object *uobj) 620 { 621 simple_lock(&uobj->vmobjlock); 622 uao_detach_locked(uobj); 623 } 624 625 626 /* 627 * uao_detach_locked: drop a reference to an aobj 628 * 629 * => aobj must be locked, and is unlocked (or freed) upon return. 630 * this needs to be separate from the normal routine 631 * since sometimes we need to detach from an aobj when 632 * it's already locked. 633 */ 634 void 635 uao_detach_locked(struct uvm_object *uobj) 636 { 637 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 638 struct vm_page *pg; 639 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist); 640 641 /* 642 * detaching from kernel_object is a noop. 643 */ 644 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { 645 simple_unlock(&uobj->vmobjlock); 646 return; 647 } 648 649 UVMHIST_LOG(maphist," (uobj=%p) ref=%ld", uobj,uobj->uo_refs,0,0); 650 uobj->uo_refs--; /* drop ref! */ 651 if (uobj->uo_refs) { /* still more refs? */ 652 simple_unlock(&uobj->vmobjlock); 653 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 654 return; 655 } 656 657 /* 658 * remove the aobj from the global list. 659 */ 660 simple_lock(&uao_list_lock); 661 LIST_REMOVE(aobj, u_list); 662 simple_unlock(&uao_list_lock); 663 664 /* 665 * Free all pages left in the object. If they're busy, wait 666 * for them to become available before we kill it. 667 * Release swap resources then free the page. 668 */ 669 uvm_lock_pageq(); 670 while((pg = RB_ROOT(&uobj->memt)) != NULL) { 671 if (pg->pg_flags & PG_BUSY) { 672 atomic_setbits_int(&pg->pg_flags, PG_WANTED); 673 uvm_unlock_pageq(); 674 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 675 "uao_det", 0); 676 simple_lock(&uobj->vmobjlock); 677 uvm_lock_pageq(); 678 continue; 679 } 680 pmap_page_protect(pg, VM_PROT_NONE); 681 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); 682 uvm_pagefree(pg); 683 } 684 uvm_unlock_pageq(); 685 686 /* 687 * finally, free the rest. 688 */ 689 uao_free(aobj); 690 } 691 692 /* 693 * uao_flush: "flush" pages out of a uvm object 694 * 695 * => object should be locked by caller. we may _unlock_ the object 696 * if (and only if) we need to clean a page (PGO_CLEANIT). 697 * XXXJRT Currently, however, we don't. In the case of cleaning 698 * XXXJRT a page, we simply just deactivate it. Should probably 699 * XXXJRT handle this better, in the future (although "flushing" 700 * XXXJRT anonymous memory isn't terribly important). 701 * => if PGO_CLEANIT is not set, then we will neither unlock the object 702 * or block. 703 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 704 * for flushing. 705 * => NOTE: we are allowed to lock the page queues, so the caller 706 * must not be holding the lock on them [e.g. pagedaemon had 707 * better not call us with the queues locked] 708 * => we return TRUE unless we encountered some sort of I/O error 709 * XXXJRT currently never happens, as we never directly initiate 710 * XXXJRT I/O 711 */ 712 713 #define UAO_HASH_PENALTY 4 /* XXX: a guess */ 714 715 boolean_t 716 uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 717 { 718 struct uvm_aobj *aobj = (struct uvm_aobj *) uobj; 719 struct vm_page *pp; 720 voff_t curoff; 721 UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist); 722 723 if (flags & PGO_ALLPAGES) { 724 start = 0; 725 stop = aobj->u_pages << PAGE_SHIFT; 726 } else { 727 start = trunc_page(start); 728 stop = round_page(stop); 729 if (stop > (aobj->u_pages << PAGE_SHIFT)) { 730 printf("uao_flush: strange, got an out of range " 731 "flush (fixed)\n"); 732 stop = aobj->u_pages << PAGE_SHIFT; 733 } 734 } 735 736 UVMHIST_LOG(maphist, " flush start=0x%lx, stop=0x%lx, flags=0x%lx", 737 (u_long)start, (u_long)stop, flags, 0); 738 739 /* 740 * Don't need to do any work here if we're not freeing 741 * or deactivating pages. 742 */ 743 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 744 UVMHIST_LOG(maphist, 745 "<- done (no work to do)",0,0,0,0); 746 return (TRUE); 747 } 748 749 /* locked: uobj */ 750 curoff = start; 751 for (;;) { 752 if (curoff < stop) { 753 pp = uvm_pagelookup(uobj, curoff); 754 curoff += PAGE_SIZE; 755 if (pp == NULL) 756 continue; 757 } else { 758 break; 759 } 760 761 /* Make sure page is unbusy, else wait for it. */ 762 if (pp->pg_flags & PG_BUSY) { 763 atomic_setbits_int(&pp->pg_flags, PG_WANTED); 764 UVM_UNLOCK_AND_WAIT(pp, &uobj->vmobjlock, 0, 765 "uaoflsh", 0); 766 simple_lock(&uobj->vmobjlock); 767 curoff -= PAGE_SIZE; 768 continue; 769 } 770 771 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 772 /* 773 * XXX In these first 3 cases, we always just 774 * XXX deactivate the page. We may want to 775 * XXX handle the different cases more specifically 776 * XXX in the future. 777 */ 778 case PGO_CLEANIT|PGO_FREE: 779 /* FALLTHROUGH */ 780 case PGO_CLEANIT|PGO_DEACTIVATE: 781 /* FALLTHROUGH */ 782 case PGO_DEACTIVATE: 783 deactivate_it: 784 /* skip the page if it's loaned or wired */ 785 if (pp->loan_count != 0 || 786 pp->wire_count != 0) 787 continue; 788 789 uvm_lock_pageq(); 790 /* zap all mappings for the page. */ 791 pmap_page_protect(pp, VM_PROT_NONE); 792 793 /* ...and deactivate the page. */ 794 uvm_pagedeactivate(pp); 795 uvm_unlock_pageq(); 796 797 continue; 798 799 case PGO_FREE: 800 /* 801 * If there are multiple references to 802 * the object, just deactivate the page. 803 */ 804 if (uobj->uo_refs > 1) 805 goto deactivate_it; 806 807 /* XXX skip the page if it's loaned or wired */ 808 if (pp->loan_count != 0 || 809 pp->wire_count != 0) 810 continue; 811 812 /* zap all mappings for the page. */ 813 pmap_page_protect(pp, VM_PROT_NONE); 814 815 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 816 uvm_lock_pageq(); 817 uvm_pagefree(pp); 818 uvm_unlock_pageq(); 819 820 continue; 821 822 default: 823 panic("uao_flush: weird flags"); 824 } 825 } 826 827 UVMHIST_LOG(maphist, 828 "<- done, rv=%ld",retval,0,0,0); 829 return (TRUE); 830 } 831 832 /* 833 * uao_get: fetch me a page 834 * 835 * we have three cases: 836 * 1: page is resident -> just return the page. 837 * 2: page is zero-fill -> allocate a new page and zero it. 838 * 3: page is swapped out -> fetch the page from swap. 839 * 840 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot. 841 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES), 842 * then we will need to return VM_PAGER_UNLOCK. 843 * 844 * => prefer map unlocked (not required) 845 * => object must be locked! we will _unlock_ it before starting any I/O. 846 * => flags: PGO_ALLPAGES: get all of the pages 847 * PGO_LOCKED: fault data structures are locked 848 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 849 * => NOTE: caller must check for released pages!! 850 */ 851 static int 852 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, 853 int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags) 854 { 855 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 856 voff_t current_offset; 857 vm_page_t ptmp; 858 int lcv, gotpages, maxpages, swslot, rv, pageidx; 859 boolean_t done; 860 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist); 861 862 UVMHIST_LOG(pdhist, "aobj=%p offset=%ld, flags=%ld", 863 aobj, (u_long)offset, flags,0); 864 865 /* 866 * get number of pages 867 */ 868 maxpages = *npagesp; 869 870 /* 871 * step 1: handled the case where fault data structures are locked. 872 */ 873 874 if (flags & PGO_LOCKED) { 875 /* 876 * step 1a: get pages that are already resident. only do 877 * this if the data structures are locked (i.e. the first 878 * time through). 879 */ 880 881 done = TRUE; /* be optimistic */ 882 gotpages = 0; /* # of pages we got so far */ 883 884 for (lcv = 0, current_offset = offset ; lcv < maxpages ; 885 lcv++, current_offset += PAGE_SIZE) { 886 /* do we care about this page? if not, skip it */ 887 if (pps[lcv] == PGO_DONTCARE) 888 continue; 889 890 ptmp = uvm_pagelookup(uobj, current_offset); 891 892 /* 893 * if page is new, attempt to allocate the page, 894 * zero-fill'd. 895 */ 896 if (ptmp == NULL && uao_find_swslot(aobj, 897 current_offset >> PAGE_SHIFT) == 0) { 898 ptmp = uvm_pagealloc(uobj, current_offset, 899 NULL, UVM_PGA_ZERO); 900 if (ptmp) { 901 /* new page */ 902 atomic_clearbits_int(&ptmp->pg_flags, 903 PG_BUSY|PG_FAKE); 904 atomic_setbits_int(&ptmp->pg_flags, 905 PQ_AOBJ); 906 UVM_PAGE_OWN(ptmp, NULL); 907 } 908 } 909 910 /* 911 * to be useful must get a non-busy page 912 */ 913 if (ptmp == NULL || 914 (ptmp->pg_flags & PG_BUSY) != 0) { 915 if (lcv == centeridx || 916 (flags & PGO_ALLPAGES) != 0) 917 /* need to do a wait or I/O! */ 918 done = FALSE; 919 continue; 920 } 921 922 /* 923 * useful page: busy/lock it and plug it in our 924 * result array 925 */ 926 /* caller must un-busy this page */ 927 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY); 928 UVM_PAGE_OWN(ptmp, "uao_get1"); 929 pps[lcv] = ptmp; 930 gotpages++; 931 932 } /* "for" lcv loop */ 933 934 /* 935 * step 1b: now we've either done everything needed or we 936 * to unlock and do some waiting or I/O. 937 */ 938 939 UVMHIST_LOG(pdhist, "<- done (done=%ld)", done, 0,0,0); 940 941 *npagesp = gotpages; 942 if (done) 943 /* bingo! */ 944 return(VM_PAGER_OK); 945 else 946 /* EEK! Need to unlock and I/O */ 947 return(VM_PAGER_UNLOCK); 948 } 949 950 /* 951 * step 2: get non-resident or busy pages. 952 * object is locked. data structures are unlocked. 953 */ 954 955 for (lcv = 0, current_offset = offset ; lcv < maxpages ; 956 lcv++, current_offset += PAGE_SIZE) { 957 958 /* 959 * - skip over pages we've already gotten or don't want 960 * - skip over pages we don't _have_ to get 961 */ 962 963 if (pps[lcv] != NULL || 964 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) 965 continue; 966 967 pageidx = current_offset >> PAGE_SHIFT; 968 969 /* 970 * we have yet to locate the current page (pps[lcv]). we 971 * first look for a page that is already at the current offset. 972 * if we find a page, we check to see if it is busy or 973 * released. if that is the case, then we sleep on the page 974 * until it is no longer busy or released and repeat the lookup. 975 * if the page we found is neither busy nor released, then we 976 * busy it (so we own it) and plug it into pps[lcv]. this 977 * 'break's the following while loop and indicates we are 978 * ready to move on to the next page in the "lcv" loop above. 979 * 980 * if we exit the while loop with pps[lcv] still set to NULL, 981 * then it means that we allocated a new busy/fake/clean page 982 * ptmp in the object and we need to do I/O to fill in the data. 983 */ 984 985 /* top of "pps" while loop */ 986 while (pps[lcv] == NULL) { 987 /* look for a resident page */ 988 ptmp = uvm_pagelookup(uobj, current_offset); 989 990 /* not resident? allocate one now (if we can) */ 991 if (ptmp == NULL) { 992 993 ptmp = uvm_pagealloc(uobj, current_offset, 994 NULL, 0); 995 996 /* out of RAM? */ 997 if (ptmp == NULL) { 998 simple_unlock(&uobj->vmobjlock); 999 UVMHIST_LOG(pdhist, 1000 "sleeping, ptmp == NULL\n",0,0,0,0); 1001 uvm_wait("uao_getpage"); 1002 simple_lock(&uobj->vmobjlock); 1003 /* goto top of pps while loop */ 1004 continue; 1005 } 1006 1007 /* 1008 * safe with PQ's unlocked: because we just 1009 * alloc'd the page 1010 */ 1011 atomic_setbits_int(&ptmp->pg_flags, PQ_AOBJ); 1012 1013 /* 1014 * got new page ready for I/O. break pps while 1015 * loop. pps[lcv] is still NULL. 1016 */ 1017 break; 1018 } 1019 1020 /* page is there, see if we need to wait on it */ 1021 if ((ptmp->pg_flags & PG_BUSY) != 0) { 1022 atomic_setbits_int(&ptmp->pg_flags, PG_WANTED); 1023 UVMHIST_LOG(pdhist, 1024 "sleeping, ptmp->flags 0x%lx\n", 1025 ptmp->pg_flags,0,0,0); 1026 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 1027 FALSE, "uao_get", 0); 1028 simple_lock(&uobj->vmobjlock); 1029 continue; /* goto top of pps while loop */ 1030 } 1031 1032 /* 1033 * if we get here then the page has become resident and 1034 * unbusy between steps 1 and 2. we busy it now (so we 1035 * own it) and set pps[lcv] (so that we exit the while 1036 * loop). 1037 */ 1038 /* we own it, caller must un-busy */ 1039 atomic_setbits_int(&ptmp->pg_flags, PG_BUSY); 1040 UVM_PAGE_OWN(ptmp, "uao_get2"); 1041 pps[lcv] = ptmp; 1042 } 1043 1044 /* 1045 * if we own the valid page at the correct offset, pps[lcv] will 1046 * point to it. nothing more to do except go to the next page. 1047 */ 1048 if (pps[lcv]) 1049 continue; /* next lcv */ 1050 1051 /* 1052 * we have a "fake/busy/clean" page that we just allocated. 1053 * do the needed "i/o", either reading from swap or zeroing. 1054 */ 1055 swslot = uao_find_swslot(aobj, pageidx); 1056 1057 /* 1058 * just zero the page if there's nothing in swap. 1059 */ 1060 if (swslot == 0) { 1061 /* 1062 * page hasn't existed before, just zero it. 1063 */ 1064 uvm_pagezero(ptmp); 1065 } else { 1066 UVMHIST_LOG(pdhist, "pagein from swslot %ld", 1067 swslot, 0,0,0); 1068 1069 /* 1070 * page in the swapped-out page. 1071 * unlock object for i/o, relock when done. 1072 */ 1073 simple_unlock(&uobj->vmobjlock); 1074 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO); 1075 simple_lock(&uobj->vmobjlock); 1076 1077 /* 1078 * I/O done. check for errors. 1079 */ 1080 if (rv != VM_PAGER_OK) 1081 { 1082 UVMHIST_LOG(pdhist, "<- done (error=%ld)", 1083 rv,0,0,0); 1084 if (ptmp->pg_flags & PG_WANTED) 1085 wakeup(ptmp); 1086 1087 /* 1088 * remove the swap slot from the aobj 1089 * and mark the aobj as having no real slot. 1090 * don't free the swap slot, thus preventing 1091 * it from being used again. 1092 */ 1093 swslot = uao_set_swslot(&aobj->u_obj, pageidx, 1094 SWSLOT_BAD); 1095 uvm_swap_markbad(swslot, 1); 1096 1097 atomic_clearbits_int(&ptmp->pg_flags, 1098 PG_WANTED|PG_BUSY); 1099 UVM_PAGE_OWN(ptmp, NULL); 1100 uvm_lock_pageq(); 1101 uvm_pagefree(ptmp); 1102 uvm_unlock_pageq(); 1103 1104 simple_unlock(&uobj->vmobjlock); 1105 return (rv); 1106 } 1107 } 1108 1109 /* 1110 * we got the page! clear the fake flag (indicates valid 1111 * data now in page) and plug into our result array. note 1112 * that page is still busy. 1113 * 1114 * it is the callers job to: 1115 * => check if the page is released 1116 * => unbusy the page 1117 * => activate the page 1118 */ 1119 1120 /* data is valid ... */ 1121 atomic_clearbits_int(&ptmp->pg_flags, PG_FAKE); 1122 pmap_clear_modify(ptmp); /* ... and clean */ 1123 pps[lcv] = ptmp; 1124 1125 } /* lcv loop */ 1126 1127 /* 1128 * finally, unlock object and return. 1129 */ 1130 1131 simple_unlock(&uobj->vmobjlock); 1132 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); 1133 return(VM_PAGER_OK); 1134 } 1135 1136 /* 1137 * uao_dropswap: release any swap resources from this aobj page. 1138 * 1139 * => aobj must be locked or have a reference count of 0. 1140 */ 1141 1142 void 1143 uao_dropswap(struct uvm_object *uobj, int pageidx) 1144 { 1145 int slot; 1146 1147 slot = uao_set_swslot(uobj, pageidx, 0); 1148 if (slot) { 1149 uvm_swap_free(slot, 1); 1150 } 1151 } 1152 1153 1154 /* 1155 * page in every page in every aobj that is paged-out to a range of swslots. 1156 * 1157 * => nothing should be locked. 1158 * => returns TRUE if pagein was aborted due to lack of memory. 1159 */ 1160 boolean_t 1161 uao_swap_off(int startslot, int endslot) 1162 { 1163 struct uvm_aobj *aobj, *nextaobj; 1164 1165 /* 1166 * walk the list of all aobjs. 1167 */ 1168 1169 restart: 1170 simple_lock(&uao_list_lock); 1171 1172 for (aobj = LIST_FIRST(&uao_list); 1173 aobj != NULL; 1174 aobj = nextaobj) { 1175 boolean_t rv; 1176 1177 /* 1178 * try to get the object lock, 1179 * start all over if we fail. 1180 * most of the time we'll get the aobj lock, 1181 * so this should be a rare case. 1182 */ 1183 if (!simple_lock_try(&aobj->u_obj.vmobjlock)) { 1184 simple_unlock(&uao_list_lock); 1185 goto restart; 1186 } 1187 1188 /* 1189 * add a ref to the aobj so it doesn't disappear 1190 * while we're working. 1191 */ 1192 uao_reference_locked(&aobj->u_obj); 1193 1194 /* 1195 * now it's safe to unlock the uao list. 1196 */ 1197 simple_unlock(&uao_list_lock); 1198 1199 /* 1200 * page in any pages in the swslot range. 1201 * if there's an error, abort and return the error. 1202 */ 1203 rv = uao_pagein(aobj, startslot, endslot); 1204 if (rv) { 1205 uao_detach_locked(&aobj->u_obj); 1206 return rv; 1207 } 1208 1209 /* 1210 * we're done with this aobj. 1211 * relock the list and drop our ref on the aobj. 1212 */ 1213 simple_lock(&uao_list_lock); 1214 nextaobj = LIST_NEXT(aobj, u_list); 1215 uao_detach_locked(&aobj->u_obj); 1216 } 1217 1218 /* 1219 * done with traversal, unlock the list 1220 */ 1221 simple_unlock(&uao_list_lock); 1222 return FALSE; 1223 } 1224 1225 1226 /* 1227 * page in any pages from aobj in the given range. 1228 * 1229 * => aobj must be locked and is returned locked. 1230 * => returns TRUE if pagein was aborted due to lack of memory. 1231 */ 1232 static boolean_t 1233 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot) 1234 { 1235 boolean_t rv; 1236 1237 if (UAO_USES_SWHASH(aobj)) { 1238 struct uao_swhash_elt *elt; 1239 int bucket; 1240 1241 restart: 1242 for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) { 1243 for (elt = LIST_FIRST(&aobj->u_swhash[bucket]); 1244 elt != NULL; 1245 elt = LIST_NEXT(elt, list)) { 1246 int i; 1247 1248 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) { 1249 int slot = elt->slots[i]; 1250 1251 /* 1252 * if the slot isn't in range, skip it. 1253 */ 1254 if (slot < startslot || 1255 slot >= endslot) { 1256 continue; 1257 } 1258 1259 /* 1260 * process the page, 1261 * the start over on this object 1262 * since the swhash elt 1263 * may have been freed. 1264 */ 1265 rv = uao_pagein_page(aobj, 1266 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i); 1267 if (rv) { 1268 return rv; 1269 } 1270 goto restart; 1271 } 1272 } 1273 } 1274 } else { 1275 int i; 1276 1277 for (i = 0; i < aobj->u_pages; i++) { 1278 int slot = aobj->u_swslots[i]; 1279 1280 /* 1281 * if the slot isn't in range, skip it 1282 */ 1283 if (slot < startslot || slot >= endslot) { 1284 continue; 1285 } 1286 1287 /* 1288 * process the page. 1289 */ 1290 rv = uao_pagein_page(aobj, i); 1291 if (rv) { 1292 return rv; 1293 } 1294 } 1295 } 1296 1297 return FALSE; 1298 } 1299 1300 /* 1301 * page in a page from an aobj. used for swap_off. 1302 * returns TRUE if pagein was aborted due to lack of memory. 1303 * 1304 * => aobj must be locked and is returned locked. 1305 */ 1306 static boolean_t 1307 uao_pagein_page(struct uvm_aobj *aobj, int pageidx) 1308 { 1309 struct vm_page *pg; 1310 int rv, slot, npages; 1311 1312 pg = NULL; 1313 npages = 1; 1314 /* locked: aobj */ 1315 rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT, 1316 &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0); 1317 /* unlocked: aobj */ 1318 1319 /* 1320 * relock and finish up. 1321 */ 1322 simple_lock(&aobj->u_obj.vmobjlock); 1323 1324 switch (rv) { 1325 case VM_PAGER_OK: 1326 break; 1327 1328 case VM_PAGER_ERROR: 1329 case VM_PAGER_REFAULT: 1330 /* 1331 * nothing more to do on errors. 1332 * VM_PAGER_REFAULT can only mean that the anon was freed, 1333 * so again there's nothing to do. 1334 */ 1335 return FALSE; 1336 1337 } 1338 1339 /* 1340 * ok, we've got the page now. 1341 * mark it as dirty, clear its swslot and un-busy it. 1342 */ 1343 slot = uao_set_swslot(&aobj->u_obj, pageidx, 0); 1344 uvm_swap_free(slot, 1); 1345 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE); 1346 UVM_PAGE_OWN(pg, NULL); 1347 1348 /* 1349 * deactivate the page (to put it on a page queue). 1350 */ 1351 pmap_clear_reference(pg); 1352 #ifndef UBC 1353 pmap_page_protect(pg, VM_PROT_NONE); 1354 #endif 1355 uvm_lock_pageq(); 1356 uvm_pagedeactivate(pg); 1357 uvm_unlock_pageq(); 1358 1359 return FALSE; 1360 } 1361