1 /* $NetBSD: uvm_aobj.c,v 1.52 2002/11/24 11:50:32 scw Exp $ */ 2 3 /* 4 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and 5 * Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp 35 */ 36 /* 37 * uvm_aobj.c: anonymous memory uvm_object pager 38 * 39 * author: Chuck Silvers <chuq@chuq.com> 40 * started: Jan-1998 41 * 42 * - design mostly from Chuck Cranor 43 */ 44 45 #include <sys/cdefs.h> 46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.52 2002/11/24 11:50:32 scw Exp $"); 47 48 #include "opt_uvmhist.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/pool.h> 56 #include <sys/kernel.h> 57 58 #include <uvm/uvm.h> 59 60 /* 61 * an aobj manages anonymous-memory backed uvm_objects. in addition 62 * to keeping the list of resident pages, it also keeps a list of 63 * allocated swap blocks. depending on the size of the aobj this list 64 * of allocated swap blocks is either stored in an array (small objects) 65 * or in a hash table (large objects). 66 */ 67 68 /* 69 * local structures 70 */ 71 72 /* 73 * for hash tables, we break the address space of the aobj into blocks 74 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to 75 * be a power of two. 76 */ 77 78 #define UAO_SWHASH_CLUSTER_SHIFT 4 79 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT) 80 81 /* get the "tag" for this page index */ 82 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \ 83 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) 84 85 /* given an ELT and a page index, find the swap slot */ 86 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \ 87 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)]) 88 89 /* given an ELT, return its pageidx base */ 90 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \ 91 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT) 92 93 /* 94 * the swhash hash function 95 */ 96 97 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \ 98 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \ 99 & (AOBJ)->u_swhashmask)]) 100 101 /* 102 * the swhash threshhold determines if we will use an array or a 103 * hash table to store the list of allocated swap blocks. 104 */ 105 106 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4) 107 #define UAO_USES_SWHASH(AOBJ) \ 108 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */ 109 110 /* 111 * the number of buckets in a swhash, with an upper bound 112 */ 113 114 #define UAO_SWHASH_MAXBUCKETS 256 115 #define UAO_SWHASH_BUCKETS(AOBJ) \ 116 (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \ 117 UAO_SWHASH_MAXBUCKETS)) 118 119 120 /* 121 * uao_swhash_elt: when a hash table is being used, this structure defines 122 * the format of an entry in the bucket list. 123 */ 124 125 struct uao_swhash_elt { 126 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */ 127 voff_t tag; /* our 'tag' */ 128 int count; /* our number of active slots */ 129 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */ 130 }; 131 132 /* 133 * uao_swhash: the swap hash table structure 134 */ 135 136 LIST_HEAD(uao_swhash, uao_swhash_elt); 137 138 /* 139 * uao_swhash_elt_pool: pool of uao_swhash_elt structures 140 */ 141 142 struct pool uao_swhash_elt_pool; 143 144 /* 145 * uvm_aobj: the actual anon-backed uvm_object 146 * 147 * => the uvm_object is at the top of the structure, this allows 148 * (struct uvm_aobj *) == (struct uvm_object *) 149 * => only one of u_swslots and u_swhash is used in any given aobj 150 */ 151 152 struct uvm_aobj { 153 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */ 154 int u_pages; /* number of pages in entire object */ 155 int u_flags; /* the flags (see uvm_aobj.h) */ 156 int *u_swslots; /* array of offset->swapslot mappings */ 157 /* 158 * hashtable of offset->swapslot mappings 159 * (u_swhash is an array of bucket heads) 160 */ 161 struct uao_swhash *u_swhash; 162 u_long u_swhashmask; /* mask for hashtable */ 163 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */ 164 }; 165 166 /* 167 * uvm_aobj_pool: pool of uvm_aobj structures 168 */ 169 170 struct pool uvm_aobj_pool; 171 172 /* 173 * local functions 174 */ 175 176 static struct uao_swhash_elt *uao_find_swhash_elt 177 __P((struct uvm_aobj *, int, boolean_t)); 178 179 static void uao_free __P((struct uvm_aobj *)); 180 static int uao_get __P((struct uvm_object *, voff_t, struct vm_page **, 181 int *, int, vm_prot_t, int, int)); 182 static boolean_t uao_put __P((struct uvm_object *, voff_t, voff_t, int)); 183 static boolean_t uao_pagein __P((struct uvm_aobj *, int, int)); 184 static boolean_t uao_pagein_page __P((struct uvm_aobj *, int)); 185 186 /* 187 * aobj_pager 188 * 189 * note that some functions (e.g. put) are handled elsewhere 190 */ 191 192 struct uvm_pagerops aobj_pager = { 193 NULL, /* init */ 194 uao_reference, /* reference */ 195 uao_detach, /* detach */ 196 NULL, /* fault */ 197 uao_get, /* get */ 198 uao_put, /* flush */ 199 }; 200 201 /* 202 * uao_list: global list of active aobjs, locked by uao_list_lock 203 */ 204 205 static LIST_HEAD(aobjlist, uvm_aobj) uao_list; 206 static struct simplelock uao_list_lock; 207 208 /* 209 * functions 210 */ 211 212 /* 213 * hash table/array related functions 214 */ 215 216 /* 217 * uao_find_swhash_elt: find (or create) a hash table entry for a page 218 * offset. 219 * 220 * => the object should be locked by the caller 221 */ 222 223 static struct uao_swhash_elt * 224 uao_find_swhash_elt(aobj, pageidx, create) 225 struct uvm_aobj *aobj; 226 int pageidx; 227 boolean_t create; 228 { 229 struct uao_swhash *swhash; 230 struct uao_swhash_elt *elt; 231 voff_t page_tag; 232 233 swhash = UAO_SWHASH_HASH(aobj, pageidx); 234 page_tag = UAO_SWHASH_ELT_TAG(pageidx); 235 236 /* 237 * now search the bucket for the requested tag 238 */ 239 240 LIST_FOREACH(elt, swhash, list) { 241 if (elt->tag == page_tag) { 242 return elt; 243 } 244 } 245 if (!create) { 246 return NULL; 247 } 248 249 /* 250 * allocate a new entry for the bucket and init/insert it in 251 */ 252 253 elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT); 254 if (elt == NULL) { 255 return NULL; 256 } 257 LIST_INSERT_HEAD(swhash, elt, list); 258 elt->tag = page_tag; 259 elt->count = 0; 260 memset(elt->slots, 0, sizeof(elt->slots)); 261 return elt; 262 } 263 264 /* 265 * uao_find_swslot: find the swap slot number for an aobj/pageidx 266 * 267 * => object must be locked by caller 268 */ 269 270 int 271 uao_find_swslot(uobj, pageidx) 272 struct uvm_object *uobj; 273 int pageidx; 274 { 275 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 276 struct uao_swhash_elt *elt; 277 278 /* 279 * if noswap flag is set, then we never return a slot 280 */ 281 282 if (aobj->u_flags & UAO_FLAG_NOSWAP) 283 return(0); 284 285 /* 286 * if hashing, look in hash table. 287 */ 288 289 if (UAO_USES_SWHASH(aobj)) { 290 elt = uao_find_swhash_elt(aobj, pageidx, FALSE); 291 if (elt) 292 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)); 293 else 294 return(0); 295 } 296 297 /* 298 * otherwise, look in the array 299 */ 300 301 return(aobj->u_swslots[pageidx]); 302 } 303 304 /* 305 * uao_set_swslot: set the swap slot for a page in an aobj. 306 * 307 * => setting a slot to zero frees the slot 308 * => object must be locked by caller 309 * => we return the old slot number, or -1 if we failed to allocate 310 * memory to record the new slot number 311 */ 312 313 int 314 uao_set_swslot(uobj, pageidx, slot) 315 struct uvm_object *uobj; 316 int pageidx, slot; 317 { 318 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 319 struct uao_swhash_elt *elt; 320 int oldslot; 321 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist); 322 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d", 323 aobj, pageidx, slot, 0); 324 325 /* 326 * if noswap flag is set, then we can't set a non-zero slot. 327 */ 328 329 if (aobj->u_flags & UAO_FLAG_NOSWAP) { 330 if (slot == 0) 331 return(0); 332 333 printf("uao_set_swslot: uobj = %p\n", uobj); 334 panic("uao_set_swslot: NOSWAP object"); 335 } 336 337 /* 338 * are we using a hash table? if so, add it in the hash. 339 */ 340 341 if (UAO_USES_SWHASH(aobj)) { 342 343 /* 344 * Avoid allocating an entry just to free it again if 345 * the page had not swap slot in the first place, and 346 * we are freeing. 347 */ 348 349 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0); 350 if (elt == NULL) { 351 return slot ? -1 : 0; 352 } 353 354 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx); 355 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot; 356 357 /* 358 * now adjust the elt's reference counter and free it if we've 359 * dropped it to zero. 360 */ 361 362 if (slot) { 363 if (oldslot == 0) 364 elt->count++; 365 } else { 366 if (oldslot) 367 elt->count--; 368 369 if (elt->count == 0) { 370 LIST_REMOVE(elt, list); 371 pool_put(&uao_swhash_elt_pool, elt); 372 } 373 } 374 } else { 375 /* we are using an array */ 376 oldslot = aobj->u_swslots[pageidx]; 377 aobj->u_swslots[pageidx] = slot; 378 } 379 return (oldslot); 380 } 381 382 /* 383 * end of hash/array functions 384 */ 385 386 /* 387 * uao_free: free all resources held by an aobj, and then free the aobj 388 * 389 * => the aobj should be dead 390 */ 391 392 static void 393 uao_free(aobj) 394 struct uvm_aobj *aobj; 395 { 396 int swpgonlydelta = 0; 397 398 simple_unlock(&aobj->u_obj.vmobjlock); 399 if (UAO_USES_SWHASH(aobj)) { 400 int i, hashbuckets = aobj->u_swhashmask + 1; 401 402 /* 403 * free the swslots from each hash bucket, 404 * then the hash bucket, and finally the hash table itself. 405 */ 406 407 for (i = 0; i < hashbuckets; i++) { 408 struct uao_swhash_elt *elt, *next; 409 410 for (elt = LIST_FIRST(&aobj->u_swhash[i]); 411 elt != NULL; 412 elt = next) { 413 int j; 414 415 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) { 416 int slot = elt->slots[j]; 417 418 if (slot == 0) { 419 continue; 420 } 421 uvm_swap_free(slot, 1); 422 swpgonlydelta++; 423 } 424 425 next = LIST_NEXT(elt, list); 426 pool_put(&uao_swhash_elt_pool, elt); 427 } 428 } 429 free(aobj->u_swhash, M_UVMAOBJ); 430 } else { 431 int i; 432 433 /* 434 * free the array 435 */ 436 437 for (i = 0; i < aobj->u_pages; i++) { 438 int slot = aobj->u_swslots[i]; 439 440 if (slot) { 441 uvm_swap_free(slot, 1); 442 swpgonlydelta++; 443 } 444 } 445 free(aobj->u_swslots, M_UVMAOBJ); 446 } 447 448 /* 449 * finally free the aobj itself 450 */ 451 452 pool_put(&uvm_aobj_pool, aobj); 453 454 /* 455 * adjust the counter of pages only in swap for all 456 * the swap slots we've freed. 457 */ 458 459 if (swpgonlydelta > 0) { 460 simple_lock(&uvm.swap_data_lock); 461 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 462 uvmexp.swpgonly -= swpgonlydelta; 463 simple_unlock(&uvm.swap_data_lock); 464 } 465 } 466 467 /* 468 * pager functions 469 */ 470 471 /* 472 * uao_create: create an aobj of the given size and return its uvm_object. 473 * 474 * => for normal use, flags are always zero 475 * => for the kernel object, the flags are: 476 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once) 477 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ") 478 */ 479 480 struct uvm_object * 481 uao_create(size, flags) 482 vsize_t size; 483 int flags; 484 { 485 static struct uvm_aobj kernel_object_store; 486 static int kobj_alloced = 0; 487 int pages = round_page(size) >> PAGE_SHIFT; 488 struct uvm_aobj *aobj; 489 490 /* 491 * malloc a new aobj unless we are asked for the kernel object 492 */ 493 494 if (flags & UAO_FLAG_KERNOBJ) { 495 KASSERT(!kobj_alloced); 496 aobj = &kernel_object_store; 497 aobj->u_pages = pages; 498 aobj->u_flags = UAO_FLAG_NOSWAP; 499 aobj->u_obj.uo_refs = UVM_OBJ_KERN; 500 kobj_alloced = UAO_FLAG_KERNOBJ; 501 } else if (flags & UAO_FLAG_KERNSWAP) { 502 KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ); 503 aobj = &kernel_object_store; 504 kobj_alloced = UAO_FLAG_KERNSWAP; 505 } else { 506 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK); 507 aobj->u_pages = pages; 508 aobj->u_flags = 0; 509 aobj->u_obj.uo_refs = 1; 510 } 511 512 /* 513 * allocate hash/array if necessary 514 * 515 * note: in the KERNSWAP case no need to worry about locking since 516 * we are still booting we should be the only thread around. 517 */ 518 519 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) { 520 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ? 521 M_NOWAIT : M_WAITOK; 522 523 /* allocate hash table or array depending on object size */ 524 if (UAO_USES_SWHASH(aobj)) { 525 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj), 526 HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask); 527 if (aobj->u_swhash == NULL) 528 panic("uao_create: hashinit swhash failed"); 529 } else { 530 aobj->u_swslots = malloc(pages * sizeof(int), 531 M_UVMAOBJ, mflags); 532 if (aobj->u_swslots == NULL) 533 panic("uao_create: malloc swslots failed"); 534 memset(aobj->u_swslots, 0, pages * sizeof(int)); 535 } 536 537 if (flags) { 538 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */ 539 return(&aobj->u_obj); 540 } 541 } 542 543 /* 544 * init aobj fields 545 */ 546 547 simple_lock_init(&aobj->u_obj.vmobjlock); 548 aobj->u_obj.pgops = &aobj_pager; 549 TAILQ_INIT(&aobj->u_obj.memq); 550 aobj->u_obj.uo_npages = 0; 551 552 /* 553 * now that aobj is ready, add it to the global list 554 */ 555 556 simple_lock(&uao_list_lock); 557 LIST_INSERT_HEAD(&uao_list, aobj, u_list); 558 simple_unlock(&uao_list_lock); 559 return(&aobj->u_obj); 560 } 561 562 563 564 /* 565 * uao_init: set up aobj pager subsystem 566 * 567 * => called at boot time from uvm_pager_init() 568 */ 569 570 void 571 uao_init(void) 572 { 573 static int uao_initialized; 574 575 if (uao_initialized) 576 return; 577 uao_initialized = TRUE; 578 LIST_INIT(&uao_list); 579 simple_lock_init(&uao_list_lock); 580 581 /* 582 * NOTE: Pages fror this pool must not come from a pageable 583 * kernel map! 584 */ 585 586 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 587 0, 0, 0, "uaoeltpl", NULL); 588 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, 589 "aobjpl", &pool_allocator_nointr); 590 } 591 592 /* 593 * uao_reference: add a ref to an aobj 594 * 595 * => aobj must be unlocked 596 * => just lock it and call the locked version 597 */ 598 599 void 600 uao_reference(uobj) 601 struct uvm_object *uobj; 602 { 603 simple_lock(&uobj->vmobjlock); 604 uao_reference_locked(uobj); 605 simple_unlock(&uobj->vmobjlock); 606 } 607 608 /* 609 * uao_reference_locked: add a ref to an aobj that is already locked 610 * 611 * => aobj must be locked 612 * this needs to be separate from the normal routine 613 * since sometimes we need to add a reference to an aobj when 614 * it's already locked. 615 */ 616 617 void 618 uao_reference_locked(uobj) 619 struct uvm_object *uobj; 620 { 621 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist); 622 623 /* 624 * kernel_object already has plenty of references, leave it alone. 625 */ 626 627 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) 628 return; 629 630 uobj->uo_refs++; 631 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 632 uobj, uobj->uo_refs,0,0); 633 } 634 635 /* 636 * uao_detach: drop a reference to an aobj 637 * 638 * => aobj must be unlocked 639 * => just lock it and call the locked version 640 */ 641 642 void 643 uao_detach(uobj) 644 struct uvm_object *uobj; 645 { 646 simple_lock(&uobj->vmobjlock); 647 uao_detach_locked(uobj); 648 } 649 650 /* 651 * uao_detach_locked: drop a reference to an aobj 652 * 653 * => aobj must be locked, and is unlocked (or freed) upon return. 654 * this needs to be separate from the normal routine 655 * since sometimes we need to detach from an aobj when 656 * it's already locked. 657 */ 658 659 void 660 uao_detach_locked(uobj) 661 struct uvm_object *uobj; 662 { 663 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 664 struct vm_page *pg; 665 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist); 666 667 /* 668 * detaching from kernel_object is a noop. 669 */ 670 671 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { 672 simple_unlock(&uobj->vmobjlock); 673 return; 674 } 675 676 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); 677 uobj->uo_refs--; 678 if (uobj->uo_refs) { 679 simple_unlock(&uobj->vmobjlock); 680 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 681 return; 682 } 683 684 /* 685 * remove the aobj from the global list. 686 */ 687 688 simple_lock(&uao_list_lock); 689 LIST_REMOVE(aobj, u_list); 690 simple_unlock(&uao_list_lock); 691 692 /* 693 * free all the pages left in the aobj. for each page, 694 * when the page is no longer busy (and thus after any disk i/o that 695 * it's involved in is complete), release any swap resources and 696 * free the page itself. 697 */ 698 699 uvm_lock_pageq(); 700 while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) { 701 pmap_page_protect(pg, VM_PROT_NONE); 702 if (pg->flags & PG_BUSY) { 703 pg->flags |= PG_WANTED; 704 uvm_unlock_pageq(); 705 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE, 706 "uao_det", 0); 707 simple_lock(&uobj->vmobjlock); 708 uvm_lock_pageq(); 709 continue; 710 } 711 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); 712 uvm_pagefree(pg); 713 } 714 uvm_unlock_pageq(); 715 716 /* 717 * finally, free the aobj itself. 718 */ 719 720 uao_free(aobj); 721 } 722 723 /* 724 * uao_put: flush pages out of a uvm object 725 * 726 * => object should be locked by caller. we may _unlock_ the object 727 * if (and only if) we need to clean a page (PGO_CLEANIT). 728 * XXXJRT Currently, however, we don't. In the case of cleaning 729 * XXXJRT a page, we simply just deactivate it. Should probably 730 * XXXJRT handle this better, in the future (although "flushing" 731 * XXXJRT anonymous memory isn't terribly important). 732 * => if PGO_CLEANIT is not set, then we will neither unlock the object 733 * or block. 734 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 735 * for flushing. 736 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 737 * that new pages are inserted on the tail end of the list. thus, 738 * we can make a complete pass through the object in one go by starting 739 * at the head and working towards the tail (new pages are put in 740 * front of us). 741 * => NOTE: we are allowed to lock the page queues, so the caller 742 * must not be holding the lock on them [e.g. pagedaemon had 743 * better not call us with the queues locked] 744 * => we return TRUE unless we encountered some sort of I/O error 745 * XXXJRT currently never happens, as we never directly initiate 746 * XXXJRT I/O 747 * 748 * note on page traversal: 749 * we can traverse the pages in an object either by going down the 750 * linked list in "uobj->memq", or we can go over the address range 751 * by page doing hash table lookups for each address. depending 752 * on how many pages are in the object it may be cheaper to do one 753 * or the other. we set "by_list" to true if we are using memq. 754 * if the cost of a hash lookup was equal to the cost of the list 755 * traversal we could compare the number of pages in the start->stop 756 * range to the total number of pages in the object. however, it 757 * seems that a hash table lookup is more expensive than the linked 758 * list traversal, so we multiply the number of pages in the 759 * start->stop range by a penalty which we define below. 760 */ 761 762 int 763 uao_put(uobj, start, stop, flags) 764 struct uvm_object *uobj; 765 voff_t start, stop; 766 int flags; 767 { 768 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 769 struct vm_page *pg, *nextpg, curmp, endmp; 770 boolean_t by_list; 771 voff_t curoff; 772 UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist); 773 774 curoff = 0; 775 if (flags & PGO_ALLPAGES) { 776 start = 0; 777 stop = aobj->u_pages << PAGE_SHIFT; 778 by_list = TRUE; /* always go by the list */ 779 } else { 780 start = trunc_page(start); 781 stop = round_page(stop); 782 if (stop > (aobj->u_pages << PAGE_SHIFT)) { 783 printf("uao_flush: strange, got an out of range " 784 "flush (fixed)\n"); 785 stop = aobj->u_pages << PAGE_SHIFT; 786 } 787 by_list = (uobj->uo_npages <= 788 ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY); 789 } 790 UVMHIST_LOG(maphist, 791 " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x", 792 start, stop, by_list, flags); 793 794 /* 795 * Don't need to do any work here if we're not freeing 796 * or deactivating pages. 797 */ 798 799 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 800 simple_unlock(&uobj->vmobjlock); 801 return 0; 802 } 803 804 /* 805 * Initialize the marker pages. See the comment in 806 * genfs_putpages() also. 807 */ 808 809 curmp.uobject = uobj; 810 curmp.offset = (voff_t)-1; 811 curmp.flags = PG_BUSY; 812 endmp.uobject = uobj; 813 endmp.offset = (voff_t)-1; 814 endmp.flags = PG_BUSY; 815 816 /* 817 * now do it. note: we must update nextpg in the body of loop or we 818 * will get stuck. we need to use nextpg if we'll traverse the list 819 * because we may free "pg" before doing the next loop. 820 */ 821 822 if (by_list) { 823 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq); 824 nextpg = TAILQ_FIRST(&uobj->memq); 825 PHOLD(curproc); 826 } else { 827 curoff = start; 828 nextpg = NULL; /* Quell compiler warning */ 829 } 830 831 uvm_lock_pageq(); 832 833 /* locked: both page queues and uobj */ 834 for (;;) { 835 if (by_list) { 836 pg = nextpg; 837 if (pg == &endmp) 838 break; 839 nextpg = TAILQ_NEXT(pg, listq); 840 if (pg->offset < start || pg->offset >= stop) 841 continue; 842 } else { 843 if (curoff < stop) { 844 pg = uvm_pagelookup(uobj, curoff); 845 curoff += PAGE_SIZE; 846 } else 847 break; 848 if (pg == NULL) 849 continue; 850 } 851 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 852 853 /* 854 * XXX In these first 3 cases, we always just 855 * XXX deactivate the page. We may want to 856 * XXX handle the different cases more specifically 857 * XXX in the future. 858 */ 859 860 case PGO_CLEANIT|PGO_FREE: 861 case PGO_CLEANIT|PGO_DEACTIVATE: 862 case PGO_DEACTIVATE: 863 deactivate_it: 864 /* skip the page if it's loaned or wired */ 865 if (pg->loan_count != 0 || pg->wire_count != 0) 866 continue; 867 868 /* ...and deactivate the page. */ 869 pmap_clear_reference(pg); 870 uvm_pagedeactivate(pg); 871 continue; 872 873 case PGO_FREE: 874 875 /* 876 * If there are multiple references to 877 * the object, just deactivate the page. 878 */ 879 880 if (uobj->uo_refs > 1) 881 goto deactivate_it; 882 883 /* XXX skip the page if it's loaned or wired */ 884 if (pg->loan_count != 0 || pg->wire_count != 0) 885 continue; 886 887 /* 888 * wait and try again if the page is busy. 889 * otherwise free the swap slot and the page. 890 */ 891 892 pmap_page_protect(pg, VM_PROT_NONE); 893 if (pg->flags & PG_BUSY) { 894 if (by_list) { 895 TAILQ_INSERT_BEFORE(pg, &curmp, listq); 896 } 897 pg->flags |= PG_WANTED; 898 uvm_unlock_pageq(); 899 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 900 "uao_put", 0); 901 simple_lock(&uobj->vmobjlock); 902 uvm_lock_pageq(); 903 if (by_list) { 904 nextpg = TAILQ_NEXT(&curmp, listq); 905 TAILQ_REMOVE(&uobj->memq, &curmp, 906 listq); 907 } else 908 curoff -= PAGE_SIZE; 909 continue; 910 } 911 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 912 uvm_pagefree(pg); 913 continue; 914 } 915 } 916 uvm_unlock_pageq(); 917 simple_unlock(&uobj->vmobjlock); 918 if (by_list) { 919 TAILQ_REMOVE(&uobj->memq, &endmp, listq); 920 PRELE(curproc); 921 } 922 return 0; 923 } 924 925 /* 926 * uao_get: fetch me a page 927 * 928 * we have three cases: 929 * 1: page is resident -> just return the page. 930 * 2: page is zero-fill -> allocate a new page and zero it. 931 * 3: page is swapped out -> fetch the page from swap. 932 * 933 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot. 934 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES), 935 * then we will need to return EBUSY. 936 * 937 * => prefer map unlocked (not required) 938 * => object must be locked! we will _unlock_ it before starting any I/O. 939 * => flags: PGO_ALLPAGES: get all of the pages 940 * PGO_LOCKED: fault data structures are locked 941 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 942 * => NOTE: caller must check for released pages!! 943 */ 944 945 static int 946 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 947 struct uvm_object *uobj; 948 voff_t offset; 949 struct vm_page **pps; 950 int *npagesp; 951 int centeridx, advice, flags; 952 vm_prot_t access_type; 953 { 954 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 955 voff_t current_offset; 956 struct vm_page *ptmp = NULL; /* Quell compiler warning */ 957 int lcv, gotpages, maxpages, swslot, error, pageidx; 958 boolean_t done; 959 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist); 960 961 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", 962 aobj, offset, flags,0); 963 964 /* 965 * get number of pages 966 */ 967 968 maxpages = *npagesp; 969 970 /* 971 * step 1: handled the case where fault data structures are locked. 972 */ 973 974 if (flags & PGO_LOCKED) { 975 976 /* 977 * step 1a: get pages that are already resident. only do 978 * this if the data structures are locked (i.e. the first 979 * time through). 980 */ 981 982 done = TRUE; /* be optimistic */ 983 gotpages = 0; /* # of pages we got so far */ 984 for (lcv = 0, current_offset = offset ; lcv < maxpages ; 985 lcv++, current_offset += PAGE_SIZE) { 986 /* do we care about this page? if not, skip it */ 987 if (pps[lcv] == PGO_DONTCARE) 988 continue; 989 ptmp = uvm_pagelookup(uobj, current_offset); 990 991 /* 992 * if page is new, attempt to allocate the page, 993 * zero-fill'd. 994 */ 995 996 if (ptmp == NULL && uao_find_swslot(&aobj->u_obj, 997 current_offset >> PAGE_SHIFT) == 0) { 998 ptmp = uvm_pagealloc(uobj, current_offset, 999 NULL, UVM_PGA_ZERO); 1000 if (ptmp) { 1001 /* new page */ 1002 ptmp->flags &= ~(PG_FAKE); 1003 ptmp->pqflags |= PQ_AOBJ; 1004 goto gotpage; 1005 } 1006 } 1007 1008 /* 1009 * to be useful must get a non-busy page 1010 */ 1011 1012 if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) { 1013 if (lcv == centeridx || 1014 (flags & PGO_ALLPAGES) != 0) 1015 /* need to do a wait or I/O! */ 1016 done = FALSE; 1017 continue; 1018 } 1019 1020 /* 1021 * useful page: busy/lock it and plug it in our 1022 * result array 1023 */ 1024 1025 /* caller must un-busy this page */ 1026 ptmp->flags |= PG_BUSY; 1027 UVM_PAGE_OWN(ptmp, "uao_get1"); 1028 gotpage: 1029 pps[lcv] = ptmp; 1030 gotpages++; 1031 } 1032 1033 /* 1034 * step 1b: now we've either done everything needed or we 1035 * to unlock and do some waiting or I/O. 1036 */ 1037 1038 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0); 1039 *npagesp = gotpages; 1040 if (done) 1041 return 0; 1042 else 1043 return EBUSY; 1044 } 1045 1046 /* 1047 * step 2: get non-resident or busy pages. 1048 * object is locked. data structures are unlocked. 1049 */ 1050 1051 for (lcv = 0, current_offset = offset ; lcv < maxpages ; 1052 lcv++, current_offset += PAGE_SIZE) { 1053 1054 /* 1055 * - skip over pages we've already gotten or don't want 1056 * - skip over pages we don't _have_ to get 1057 */ 1058 1059 if (pps[lcv] != NULL || 1060 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) 1061 continue; 1062 1063 pageidx = current_offset >> PAGE_SHIFT; 1064 1065 /* 1066 * we have yet to locate the current page (pps[lcv]). we 1067 * first look for a page that is already at the current offset. 1068 * if we find a page, we check to see if it is busy or 1069 * released. if that is the case, then we sleep on the page 1070 * until it is no longer busy or released and repeat the lookup. 1071 * if the page we found is neither busy nor released, then we 1072 * busy it (so we own it) and plug it into pps[lcv]. this 1073 * 'break's the following while loop and indicates we are 1074 * ready to move on to the next page in the "lcv" loop above. 1075 * 1076 * if we exit the while loop with pps[lcv] still set to NULL, 1077 * then it means that we allocated a new busy/fake/clean page 1078 * ptmp in the object and we need to do I/O to fill in the data. 1079 */ 1080 1081 /* top of "pps" while loop */ 1082 while (pps[lcv] == NULL) { 1083 /* look for a resident page */ 1084 ptmp = uvm_pagelookup(uobj, current_offset); 1085 1086 /* not resident? allocate one now (if we can) */ 1087 if (ptmp == NULL) { 1088 1089 ptmp = uvm_pagealloc(uobj, current_offset, 1090 NULL, 0); 1091 1092 /* out of RAM? */ 1093 if (ptmp == NULL) { 1094 simple_unlock(&uobj->vmobjlock); 1095 UVMHIST_LOG(pdhist, 1096 "sleeping, ptmp == NULL\n",0,0,0,0); 1097 uvm_wait("uao_getpage"); 1098 simple_lock(&uobj->vmobjlock); 1099 continue; 1100 } 1101 1102 /* 1103 * safe with PQ's unlocked: because we just 1104 * alloc'd the page 1105 */ 1106 1107 ptmp->pqflags |= PQ_AOBJ; 1108 1109 /* 1110 * got new page ready for I/O. break pps while 1111 * loop. pps[lcv] is still NULL. 1112 */ 1113 1114 break; 1115 } 1116 1117 /* page is there, see if we need to wait on it */ 1118 if ((ptmp->flags & PG_BUSY) != 0) { 1119 ptmp->flags |= PG_WANTED; 1120 UVMHIST_LOG(pdhist, 1121 "sleeping, ptmp->flags 0x%x\n", 1122 ptmp->flags,0,0,0); 1123 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 1124 FALSE, "uao_get", 0); 1125 simple_lock(&uobj->vmobjlock); 1126 continue; 1127 } 1128 1129 /* 1130 * if we get here then the page has become resident and 1131 * unbusy between steps 1 and 2. we busy it now (so we 1132 * own it) and set pps[lcv] (so that we exit the while 1133 * loop). 1134 */ 1135 1136 /* we own it, caller must un-busy */ 1137 ptmp->flags |= PG_BUSY; 1138 UVM_PAGE_OWN(ptmp, "uao_get2"); 1139 pps[lcv] = ptmp; 1140 } 1141 1142 /* 1143 * if we own the valid page at the correct offset, pps[lcv] will 1144 * point to it. nothing more to do except go to the next page. 1145 */ 1146 1147 if (pps[lcv]) 1148 continue; /* next lcv */ 1149 1150 /* 1151 * we have a "fake/busy/clean" page that we just allocated. 1152 * do the needed "i/o", either reading from swap or zeroing. 1153 */ 1154 1155 swslot = uao_find_swslot(&aobj->u_obj, pageidx); 1156 1157 /* 1158 * just zero the page if there's nothing in swap. 1159 */ 1160 1161 if (swslot == 0) { 1162 1163 /* 1164 * page hasn't existed before, just zero it. 1165 */ 1166 1167 uvm_pagezero(ptmp); 1168 } else { 1169 UVMHIST_LOG(pdhist, "pagein from swslot %d", 1170 swslot, 0,0,0); 1171 1172 /* 1173 * page in the swapped-out page. 1174 * unlock object for i/o, relock when done. 1175 */ 1176 1177 simple_unlock(&uobj->vmobjlock); 1178 error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO); 1179 simple_lock(&uobj->vmobjlock); 1180 1181 /* 1182 * I/O done. check for errors. 1183 */ 1184 1185 if (error != 0) { 1186 UVMHIST_LOG(pdhist, "<- done (error=%d)", 1187 error,0,0,0); 1188 if (ptmp->flags & PG_WANTED) 1189 wakeup(ptmp); 1190 1191 /* 1192 * remove the swap slot from the aobj 1193 * and mark the aobj as having no real slot. 1194 * don't free the swap slot, thus preventing 1195 * it from being used again. 1196 */ 1197 1198 swslot = uao_set_swslot(&aobj->u_obj, pageidx, 1199 SWSLOT_BAD); 1200 if (swslot != -1) { 1201 uvm_swap_markbad(swslot, 1); 1202 } 1203 1204 uvm_lock_pageq(); 1205 uvm_pagefree(ptmp); 1206 uvm_unlock_pageq(); 1207 simple_unlock(&uobj->vmobjlock); 1208 return error; 1209 } 1210 } 1211 1212 /* 1213 * we got the page! clear the fake flag (indicates valid 1214 * data now in page) and plug into our result array. note 1215 * that page is still busy. 1216 * 1217 * it is the callers job to: 1218 * => check if the page is released 1219 * => unbusy the page 1220 * => activate the page 1221 */ 1222 1223 ptmp->flags &= ~PG_FAKE; 1224 pps[lcv] = ptmp; 1225 } 1226 1227 /* 1228 * finally, unlock object and return. 1229 */ 1230 1231 simple_unlock(&uobj->vmobjlock); 1232 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); 1233 return 0; 1234 } 1235 1236 /* 1237 * uao_dropswap: release any swap resources from this aobj page. 1238 * 1239 * => aobj must be locked or have a reference count of 0. 1240 */ 1241 1242 void 1243 uao_dropswap(uobj, pageidx) 1244 struct uvm_object *uobj; 1245 int pageidx; 1246 { 1247 int slot; 1248 1249 slot = uao_set_swslot(uobj, pageidx, 0); 1250 if (slot) { 1251 uvm_swap_free(slot, 1); 1252 } 1253 } 1254 1255 /* 1256 * page in every page in every aobj that is paged-out to a range of swslots. 1257 * 1258 * => nothing should be locked. 1259 * => returns TRUE if pagein was aborted due to lack of memory. 1260 */ 1261 1262 boolean_t 1263 uao_swap_off(startslot, endslot) 1264 int startslot, endslot; 1265 { 1266 struct uvm_aobj *aobj, *nextaobj; 1267 boolean_t rv; 1268 1269 /* 1270 * walk the list of all aobjs. 1271 */ 1272 1273 restart: 1274 simple_lock(&uao_list_lock); 1275 for (aobj = LIST_FIRST(&uao_list); 1276 aobj != NULL; 1277 aobj = nextaobj) { 1278 1279 /* 1280 * try to get the object lock, start all over if we fail. 1281 * most of the time we'll get the aobj lock, 1282 * so this should be a rare case. 1283 */ 1284 1285 if (!simple_lock_try(&aobj->u_obj.vmobjlock)) { 1286 simple_unlock(&uao_list_lock); 1287 goto restart; 1288 } 1289 1290 /* 1291 * add a ref to the aobj so it doesn't disappear 1292 * while we're working. 1293 */ 1294 1295 uao_reference_locked(&aobj->u_obj); 1296 1297 /* 1298 * now it's safe to unlock the uao list. 1299 */ 1300 1301 simple_unlock(&uao_list_lock); 1302 1303 /* 1304 * page in any pages in the swslot range. 1305 * if there's an error, abort and return the error. 1306 */ 1307 1308 rv = uao_pagein(aobj, startslot, endslot); 1309 if (rv) { 1310 uao_detach_locked(&aobj->u_obj); 1311 return rv; 1312 } 1313 1314 /* 1315 * we're done with this aobj. 1316 * relock the list and drop our ref on the aobj. 1317 */ 1318 1319 simple_lock(&uao_list_lock); 1320 nextaobj = LIST_NEXT(aobj, u_list); 1321 uao_detach_locked(&aobj->u_obj); 1322 } 1323 1324 /* 1325 * done with traversal, unlock the list 1326 */ 1327 simple_unlock(&uao_list_lock); 1328 return FALSE; 1329 } 1330 1331 1332 /* 1333 * page in any pages from aobj in the given range. 1334 * 1335 * => aobj must be locked and is returned locked. 1336 * => returns TRUE if pagein was aborted due to lack of memory. 1337 */ 1338 static boolean_t 1339 uao_pagein(aobj, startslot, endslot) 1340 struct uvm_aobj *aobj; 1341 int startslot, endslot; 1342 { 1343 boolean_t rv; 1344 1345 if (UAO_USES_SWHASH(aobj)) { 1346 struct uao_swhash_elt *elt; 1347 int bucket; 1348 1349 restart: 1350 for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) { 1351 for (elt = LIST_FIRST(&aobj->u_swhash[bucket]); 1352 elt != NULL; 1353 elt = LIST_NEXT(elt, list)) { 1354 int i; 1355 1356 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) { 1357 int slot = elt->slots[i]; 1358 1359 /* 1360 * if the slot isn't in range, skip it. 1361 */ 1362 1363 if (slot < startslot || 1364 slot >= endslot) { 1365 continue; 1366 } 1367 1368 /* 1369 * process the page, 1370 * the start over on this object 1371 * since the swhash elt 1372 * may have been freed. 1373 */ 1374 1375 rv = uao_pagein_page(aobj, 1376 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i); 1377 if (rv) { 1378 return rv; 1379 } 1380 goto restart; 1381 } 1382 } 1383 } 1384 } else { 1385 int i; 1386 1387 for (i = 0; i < aobj->u_pages; i++) { 1388 int slot = aobj->u_swslots[i]; 1389 1390 /* 1391 * if the slot isn't in range, skip it 1392 */ 1393 1394 if (slot < startslot || slot >= endslot) { 1395 continue; 1396 } 1397 1398 /* 1399 * process the page. 1400 */ 1401 1402 rv = uao_pagein_page(aobj, i); 1403 if (rv) { 1404 return rv; 1405 } 1406 } 1407 } 1408 1409 return FALSE; 1410 } 1411 1412 /* 1413 * page in a page from an aobj. used for swap_off. 1414 * returns TRUE if pagein was aborted due to lack of memory. 1415 * 1416 * => aobj must be locked and is returned locked. 1417 */ 1418 1419 static boolean_t 1420 uao_pagein_page(aobj, pageidx) 1421 struct uvm_aobj *aobj; 1422 int pageidx; 1423 { 1424 struct vm_page *pg; 1425 int rv, slot, npages; 1426 1427 pg = NULL; 1428 npages = 1; 1429 /* locked: aobj */ 1430 rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT, 1431 &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0); 1432 /* unlocked: aobj */ 1433 1434 /* 1435 * relock and finish up. 1436 */ 1437 1438 simple_lock(&aobj->u_obj.vmobjlock); 1439 switch (rv) { 1440 case 0: 1441 break; 1442 1443 case EIO: 1444 case ERESTART: 1445 1446 /* 1447 * nothing more to do on errors. 1448 * ERESTART can only mean that the anon was freed, 1449 * so again there's nothing to do. 1450 */ 1451 1452 return FALSE; 1453 } 1454 1455 /* 1456 * ok, we've got the page now. 1457 * mark it as dirty, clear its swslot and un-busy it. 1458 */ 1459 1460 slot = uao_set_swslot(&aobj->u_obj, pageidx, 0); 1461 uvm_swap_free(slot, 1); 1462 pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE); 1463 UVM_PAGE_OWN(pg, NULL); 1464 1465 /* 1466 * deactivate the page (to make sure it's on a page queue). 1467 */ 1468 1469 uvm_lock_pageq(); 1470 uvm_pagedeactivate(pg); 1471 uvm_unlock_pageq(); 1472 return FALSE; 1473 } 1474