1 /* $OpenBSD: uvm_vnode.c,v 1.24 2001/11/10 18:42:32 art Exp $ */ 2 /* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993 7 * The Regents of the University of California. 8 * Copyright (c) 1990 University of Utah. 9 * 10 * All rights reserved. 11 * 12 * This code is derived from software contributed to Berkeley by 13 * the Systems Programming Group of the University of Utah Computer 14 * Science Department. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by Charles D. Cranor, 27 * Washington University, the University of California, Berkeley and 28 * its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 46 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 47 */ 48 49 /* 50 * uvm_vnode.c: the vnode pager. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/malloc.h> 57 #include <sys/vnode.h> 58 #include <sys/disklabel.h> 59 #include <sys/ioctl.h> 60 #include <sys/fcntl.h> 61 #include <sys/conf.h> 62 63 #include <miscfs/specfs/specdev.h> 64 65 #include <uvm/uvm.h> 66 #include <uvm/uvm_vnode.h> 67 68 /* 69 * private global data structure 70 * 71 * we keep a list of writeable active vnode-backed VM objects for sync op. 72 * we keep a simpleq of vnodes that are currently being sync'd. 73 */ 74 75 LIST_HEAD(uvn_list_struct, uvm_vnode); 76 static struct uvn_list_struct uvn_wlist; /* writeable uvns */ 77 static simple_lock_data_t uvn_wl_lock; /* locks uvn_wlist */ 78 79 SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode); 80 static struct uvn_sq_struct uvn_sync_q; /* sync'ing uvns */ 81 lock_data_t uvn_sync_lock; /* locks sync operation */ 82 83 /* 84 * functions 85 */ 86 87 static void uvn_cluster __P((struct uvm_object *, voff_t, 88 voff_t *, voff_t *)); 89 static void uvn_detach __P((struct uvm_object *)); 90 static boolean_t uvn_flush __P((struct uvm_object *, voff_t, 91 voff_t, int)); 92 static int uvn_get __P((struct uvm_object *, voff_t, 93 vm_page_t *, int *, int, 94 vm_prot_t, int, int)); 95 static void uvn_init __P((void)); 96 static int uvn_io __P((struct uvm_vnode *, vm_page_t *, 97 int, int, int)); 98 static int uvn_put __P((struct uvm_object *, vm_page_t *, 99 int, boolean_t)); 100 static void uvn_reference __P((struct uvm_object *)); 101 static boolean_t uvn_releasepg __P((struct vm_page *, 102 struct vm_page **)); 103 104 /* 105 * master pager structure 106 */ 107 108 struct uvm_pagerops uvm_vnodeops = { 109 uvn_init, 110 uvn_reference, 111 uvn_detach, 112 NULL, /* no specialized fault routine required */ 113 uvn_flush, 114 uvn_get, 115 uvn_put, 116 uvn_cluster, 117 uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */ 118 uvn_releasepg, 119 }; 120 121 /* 122 * the ops! 123 */ 124 125 /* 126 * uvn_init 127 * 128 * init pager private data structures. 129 */ 130 131 static void 132 uvn_init() 133 { 134 135 LIST_INIT(&uvn_wlist); 136 simple_lock_init(&uvn_wl_lock); 137 /* note: uvn_sync_q init'd in uvm_vnp_sync() */ 138 lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0); 139 } 140 141 /* 142 * uvn_attach 143 * 144 * attach a vnode structure to a VM object. if the vnode is already 145 * attached, then just bump the reference count by one and return the 146 * VM object. if not already attached, attach and return the new VM obj. 147 * the "accessprot" tells the max access the attaching thread wants to 148 * our pages. 149 * 150 * => caller must _not_ already be holding the lock on the uvm_object. 151 * => in fact, nothing should be locked so that we can sleep here. 152 * => note that uvm_object is first thing in vnode structure, so their 153 * pointers are equiv. 154 */ 155 156 struct uvm_object * 157 uvn_attach(arg, accessprot) 158 void *arg; 159 vm_prot_t accessprot; 160 { 161 struct vnode *vp = arg; 162 struct uvm_vnode *uvn = &vp->v_uvm; 163 struct vattr vattr; 164 int oldflags, result; 165 struct partinfo pi; 166 u_quad_t used_vnode_size; 167 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); 168 169 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); 170 171 used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */ 172 173 /* 174 * first get a lock on the uvn. 175 */ 176 simple_lock(&uvn->u_obj.vmobjlock); 177 while (uvn->u_flags & UVM_VNODE_BLOCKED) { 178 printf("uvn_attach: blocked at 0x%p flags 0x%x\n", 179 uvn, uvn->u_flags); 180 uvn->u_flags |= UVM_VNODE_WANTED; 181 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); 182 UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, 183 "uvn_attach", 0); 184 simple_lock(&uvn->u_obj.vmobjlock); 185 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); 186 } 187 188 /* 189 * if we're mapping a BLK device, make sure it is a disk. 190 */ 191 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { 192 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 193 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); 194 return(NULL); 195 } 196 197 /* 198 * now we have lock and uvn must not be in a blocked state. 199 * first check to see if it is already active, in which case 200 * we can bump the reference count, check to see if we need to 201 * add it to the writeable list, and then return. 202 */ 203 if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */ 204 205 /* regain VREF if we were persisting */ 206 if (uvn->u_obj.uo_refs == 0) { 207 VREF(vp); 208 UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)", 209 0,0,0,0); 210 } 211 uvn->u_obj.uo_refs++; /* bump uvn ref! */ 212 213 /* check for new writeable uvn */ 214 if ((accessprot & VM_PROT_WRITE) != 0 && 215 (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { 216 simple_lock(&uvn_wl_lock); 217 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 218 simple_unlock(&uvn_wl_lock); 219 /* we are now on wlist! */ 220 uvn->u_flags |= UVM_VNODE_WRITEABLE; 221 } 222 223 /* unlock and return */ 224 simple_unlock(&uvn->u_obj.vmobjlock); 225 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 226 0, 0, 0); 227 return (&uvn->u_obj); 228 } 229 230 /* 231 * need to call VOP_GETATTR() to get the attributes, but that could 232 * block (due to I/O), so we want to unlock the object before calling. 233 * however, we want to keep anyone else from playing with the object 234 * while it is unlocked. to do this we set UVM_VNODE_ALOCK which 235 * prevents anyone from attaching to the vnode until we are done with 236 * it. 237 */ 238 uvn->u_flags = UVM_VNODE_ALOCK; 239 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ 240 /* XXX: curproc? */ 241 242 if (vp->v_type == VBLK) { 243 /* 244 * We could implement this as a specfs getattr call, but: 245 * 246 * (1) VOP_GETATTR() would get the file system 247 * vnode operation, not the specfs operation. 248 * 249 * (2) All we want is the size, anyhow. 250 */ 251 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, 252 DIOCGPART, (caddr_t)&pi, FREAD, curproc); 253 if (result == 0) { 254 /* XXX should remember blocksize */ 255 used_vnode_size = (u_quad_t)pi.disklab->d_secsize * 256 (u_quad_t)pi.part->p_size; 257 } 258 } else { 259 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); 260 if (result == 0) 261 used_vnode_size = vattr.va_size; 262 } 263 264 /* relock object */ 265 simple_lock(&uvn->u_obj.vmobjlock); 266 267 if (result != 0) { 268 if (uvn->u_flags & UVM_VNODE_WANTED) 269 wakeup(uvn); 270 uvn->u_flags = 0; 271 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 272 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); 273 return(NULL); 274 } 275 276 /* 277 * make sure that the newsize fits within a vaddr_t 278 * XXX: need to revise addressing data types 279 */ 280 #ifdef DEBUG 281 if (vp->v_type == VBLK) 282 printf("used_vnode_size = %llu\n", (long long)used_vnode_size); 283 #endif 284 285 /* 286 * now set up the uvn. 287 */ 288 uvn->u_obj.pgops = &uvm_vnodeops; 289 TAILQ_INIT(&uvn->u_obj.memq); 290 uvn->u_obj.uo_npages = 0; 291 uvn->u_obj.uo_refs = 1; /* just us... */ 292 oldflags = uvn->u_flags; 293 uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST; 294 uvn->u_nio = 0; 295 uvn->u_size = used_vnode_size; 296 297 /* if write access, we need to add it to the wlist */ 298 if (accessprot & VM_PROT_WRITE) { 299 simple_lock(&uvn_wl_lock); 300 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 301 simple_unlock(&uvn_wl_lock); 302 uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ 303 } 304 305 /* 306 * add a reference to the vnode. this reference will stay as long 307 * as there is a valid mapping of the vnode. dropped when the 308 * reference count goes to zero [and we either free or persist]. 309 */ 310 VREF(vp); 311 simple_unlock(&uvn->u_obj.vmobjlock); 312 if (oldflags & UVM_VNODE_WANTED) 313 wakeup(uvn); 314 315 UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0); 316 return(&uvn->u_obj); 317 } 318 319 320 /* 321 * uvn_reference 322 * 323 * duplicate a reference to a VM object. Note that the reference 324 * count must already be at least one (the passed in reference) so 325 * there is no chance of the uvn being killed or locked out here. 326 * 327 * => caller must call with object unlocked. 328 * => caller must be using the same accessprot as was used at attach time 329 */ 330 331 332 static void 333 uvn_reference(uobj) 334 struct uvm_object *uobj; 335 { 336 #ifdef DEBUG 337 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 338 #endif 339 UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); 340 341 simple_lock(&uobj->vmobjlock); 342 #ifdef DEBUG 343 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 344 printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags, 345 uobj->uo_refs); 346 panic("uvn_reference: invalid state"); 347 } 348 #endif 349 uobj->uo_refs++; 350 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 351 uobj, uobj->uo_refs,0,0); 352 simple_unlock(&uobj->vmobjlock); 353 } 354 355 /* 356 * uvn_detach 357 * 358 * remove a reference to a VM object. 359 * 360 * => caller must call with object unlocked and map locked. 361 * => this starts the detach process, but doesn't have to finish it 362 * (async i/o could still be pending). 363 */ 364 static void 365 uvn_detach(uobj) 366 struct uvm_object *uobj; 367 { 368 struct uvm_vnode *uvn; 369 struct vnode *vp; 370 int oldflags; 371 UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); 372 373 simple_lock(&uobj->vmobjlock); 374 375 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); 376 uobj->uo_refs--; /* drop ref! */ 377 if (uobj->uo_refs) { /* still more refs */ 378 simple_unlock(&uobj->vmobjlock); 379 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 380 return; 381 } 382 383 /* 384 * get other pointers ... 385 */ 386 387 uvn = (struct uvm_vnode *) uobj; 388 vp = (struct vnode *) uobj; 389 390 /* 391 * clear VTEXT flag now that there are no mappings left (VTEXT is used 392 * to keep an active text file from being overwritten). 393 */ 394 vp->v_flag &= ~VTEXT; 395 396 /* 397 * we just dropped the last reference to the uvn. see if we can 398 * let it "stick around". 399 */ 400 401 if (uvn->u_flags & UVM_VNODE_CANPERSIST) { 402 /* won't block */ 403 uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); 404 simple_unlock(&uobj->vmobjlock); 405 vrele(vp); /* drop vnode reference */ 406 UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); 407 return; 408 } 409 410 /* 411 * its a goner! 412 */ 413 414 UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); 415 416 uvn->u_flags |= UVM_VNODE_DYING; 417 418 /* 419 * even though we may unlock in flush, no one can gain a reference 420 * to us until we clear the "dying" flag [because it blocks 421 * attaches]. we will not do that until after we've disposed of all 422 * the pages with uvn_flush(). note that before the flush the only 423 * pages that could be marked PG_BUSY are ones that are in async 424 * pageout by the daemon. (there can't be any pending "get"'s 425 * because there are no references to the object). 426 */ 427 428 (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 429 430 UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); 431 432 /* 433 * given the structure of this pager, the above flush request will 434 * create the following state: all the pages that were in the object 435 * have either been free'd or they are marked PG_BUSY|PG_RELEASED. 436 * the PG_BUSY bit was set either by us or the daemon for async I/O. 437 * in either case, if we have pages left we can't kill the object 438 * yet because i/o is pending. in this case we set the "relkill" 439 * flag which will cause pgo_releasepg to kill the object once all 440 * the I/O's are done [pgo_releasepg will be called from the aiodone 441 * routine or from the page daemon]. 442 */ 443 444 if (uobj->uo_npages) { /* I/O pending. iodone will free */ 445 #ifdef DEBUG 446 /* 447 * XXXCDC: very unlikely to happen until we have async i/o 448 * so print a little info message in case it does. 449 */ 450 printf("uvn_detach: vn %p has pages left after flush - " 451 "relkill mode\n", uobj); 452 #endif 453 uvn->u_flags |= UVM_VNODE_RELKILL; 454 simple_unlock(&uobj->vmobjlock); 455 UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0, 456 0, 0); 457 return; 458 } 459 460 /* 461 * kill object now. note that we can't be on the sync q because 462 * all references are gone. 463 */ 464 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 465 simple_lock(&uvn_wl_lock); /* protect uvn_wlist */ 466 LIST_REMOVE(uvn, u_wlist); 467 simple_unlock(&uvn_wl_lock); 468 } 469 #ifdef DIAGNOSTIC 470 if (uobj->memq.tqh_first != NULL) 471 panic("uvn_deref: vnode VM object still has pages afer " 472 "syncio/free flush"); 473 #endif 474 oldflags = uvn->u_flags; 475 uvn->u_flags = 0; 476 simple_unlock(&uobj->vmobjlock); 477 478 /* wake up any sleepers */ 479 if (oldflags & UVM_VNODE_WANTED) 480 wakeup(uvn); 481 482 /* 483 * drop our reference to the vnode. 484 */ 485 vrele(vp); 486 UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); 487 488 return; 489 } 490 491 /* 492 * uvm_vnp_terminate: external hook to clear out a vnode's VM 493 * 494 * called in two cases: 495 * [1] when a persisting vnode vm object (i.e. one with a zero reference 496 * count) needs to be freed so that a vnode can be reused. this 497 * happens under "getnewvnode" in vfs_subr.c. if the vnode from 498 * the free list is still attached (i.e. not VBAD) then vgone is 499 * called. as part of the vgone trace this should get called to 500 * free the vm object. this is the common case. 501 * [2] when a filesystem is being unmounted by force (MNT_FORCE, 502 * "umount -f") the vgone() function is called on active vnodes 503 * on the mounted file systems to kill their data (the vnodes become 504 * "dead" ones [see src/sys/miscfs/deadfs/...]). that results in a 505 * call here (even if the uvn is still in use -- i.e. has a non-zero 506 * reference count). this case happens at "umount -f" and during a 507 * "reboot/halt" operation. 508 * 509 * => the caller must XLOCK and VOP_LOCK the vnode before calling us 510 * [protects us from getting a vnode that is already in the DYING 511 * state...] 512 * => unlike uvn_detach, this function must not return until all the 513 * uvn's pages are disposed of. 514 * => in case [2] the uvn is still alive after this call, but all I/O 515 * ops will fail (due to the backing vnode now being "dead"). this 516 * will prob. kill any process using the uvn due to pgo_get failing. 517 */ 518 519 void 520 uvm_vnp_terminate(vp) 521 struct vnode *vp; 522 { 523 struct uvm_vnode *uvn = &vp->v_uvm; 524 int oldflags; 525 UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); 526 527 /* 528 * lock object and check if it is valid 529 */ 530 simple_lock(&uvn->u_obj.vmobjlock); 531 UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp, 532 uvn->u_obj.uo_refs, uvn->u_flags, 0); 533 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 534 simple_unlock(&uvn->u_obj.vmobjlock); 535 UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); 536 return; 537 } 538 539 /* 540 * must be a valid uvn that is not already dying (because XLOCK 541 * protects us from that). the uvn can't in the ALOCK state 542 * because it is valid, and uvn's that are in the ALOCK state haven't 543 * been marked valid yet. 544 */ 545 546 #ifdef DEBUG 547 /* 548 * debug check: are we yanking the vnode out from under our uvn? 549 */ 550 if (uvn->u_obj.uo_refs) { 551 printf("uvm_vnp_terminate(%p): terminating active vnode " 552 "(refs=%d)\n", uvn, uvn->u_obj.uo_refs); 553 } 554 #endif 555 556 /* 557 * it is possible that the uvn was detached and is in the relkill 558 * state [i.e. waiting for async i/o to finish so that releasepg can 559 * kill object]. we take over the vnode now and cancel the relkill. 560 * we want to know when the i/o is done so we can recycle right 561 * away. note that a uvn can only be in the RELKILL state if it 562 * has a zero reference count. 563 */ 564 565 if (uvn->u_flags & UVM_VNODE_RELKILL) 566 uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */ 567 568 /* 569 * block the uvn by setting the dying flag, and then flush the 570 * pages. (note that flush may unlock object while doing I/O, but 571 * it will re-lock it before it returns control here). 572 * 573 * also, note that we tell I/O that we are already VOP_LOCK'd so 574 * that uvn_io doesn't attempt to VOP_LOCK again. 575 * 576 * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated 577 * due to a forceful unmount might not be a good idea. maybe we 578 * need a way to pass in this info to uvn_flush through a 579 * pager-defined PGO_ constant [currently there are none]. 580 */ 581 uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED; 582 583 (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 584 585 /* 586 * as we just did a flush we expect all the pages to be gone or in 587 * the process of going. sleep to wait for the rest to go [via iosync]. 588 */ 589 590 while (uvn->u_obj.uo_npages) { 591 #ifdef DEBUG 592 struct vm_page *pp; 593 for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ; 594 pp = pp->listq.tqe_next) { 595 if ((pp->flags & PG_BUSY) == 0) 596 panic("uvm_vnp_terminate: detected unbusy pg"); 597 } 598 if (uvn->u_nio == 0) 599 panic("uvm_vnp_terminate: no I/O to wait for?"); 600 printf("uvm_vnp_terminate: waiting for I/O to fin.\n"); 601 /* 602 * XXXCDC: this is unlikely to happen without async i/o so we 603 * put a printf in just to keep an eye on it. 604 */ 605 #endif 606 uvn->u_flags |= UVM_VNODE_IOSYNC; 607 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, 608 "uvn_term",0); 609 simple_lock(&uvn->u_obj.vmobjlock); 610 } 611 612 /* 613 * done. now we free the uvn if its reference count is zero 614 * (true if we are zapping a persisting uvn). however, if we are 615 * terminating a uvn with active mappings we let it live ... future 616 * calls down to the vnode layer will fail. 617 */ 618 619 oldflags = uvn->u_flags; 620 if (uvn->u_obj.uo_refs) { 621 622 /* 623 * uvn must live on it is dead-vnode state until all references 624 * are gone. restore flags. clear CANPERSIST state. 625 */ 626 627 uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED| 628 UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST); 629 630 } else { 631 632 /* 633 * free the uvn now. note that the VREF reference is already 634 * gone [it is dropped when we enter the persist state]. 635 */ 636 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 637 panic("uvm_vnp_terminate: io sync wanted bit set"); 638 639 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 640 simple_lock(&uvn_wl_lock); 641 LIST_REMOVE(uvn, u_wlist); 642 simple_unlock(&uvn_wl_lock); 643 } 644 uvn->u_flags = 0; /* uvn is history, clear all bits */ 645 } 646 647 if (oldflags & UVM_VNODE_WANTED) 648 wakeup(uvn); /* object lock still held */ 649 650 simple_unlock(&uvn->u_obj.vmobjlock); 651 UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); 652 653 } 654 655 /* 656 * uvn_releasepg: handled a released page in a uvn 657 * 658 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need 659 * to dispose of. 660 * => caller must handled PG_WANTED case 661 * => called with page's object locked, pageq's unlocked 662 * => returns TRUE if page's object is still alive, FALSE if we 663 * killed the page's object. if we return TRUE, then we 664 * return with the object locked. 665 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return 666 * with the page queues locked [for pagedaemon] 667 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case] 668 * => we kill the uvn if it is not referenced and we are suppose to 669 * kill it ("relkill"). 670 */ 671 672 boolean_t 673 uvn_releasepg(pg, nextpgp) 674 struct vm_page *pg; 675 struct vm_page **nextpgp; /* OUT */ 676 { 677 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; 678 #ifdef DIAGNOSTIC 679 if ((pg->flags & PG_RELEASED) == 0) 680 panic("uvn_releasepg: page not released!"); 681 #endif 682 683 /* 684 * dispose of the page [caller handles PG_WANTED] 685 */ 686 pmap_page_protect(pg, VM_PROT_NONE); 687 uvm_lock_pageq(); 688 if (nextpgp) 689 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ 690 uvm_pagefree(pg); 691 if (!nextpgp) 692 uvm_unlock_pageq(); 693 694 /* 695 * now see if we need to kill the object 696 */ 697 if (uvn->u_flags & UVM_VNODE_RELKILL) { 698 if (uvn->u_obj.uo_refs) 699 panic("uvn_releasepg: kill flag set on referenced " 700 "object!"); 701 if (uvn->u_obj.uo_npages == 0) { 702 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 703 simple_lock(&uvn_wl_lock); 704 LIST_REMOVE(uvn, u_wlist); 705 simple_unlock(&uvn_wl_lock); 706 } 707 #ifdef DIAGNOSTIC 708 if (uvn->u_obj.memq.tqh_first) 709 panic("uvn_releasepg: pages in object with npages == 0"); 710 #endif 711 if (uvn->u_flags & UVM_VNODE_WANTED) 712 /* still holding object lock */ 713 wakeup(uvn); 714 715 uvn->u_flags = 0; /* DEAD! */ 716 simple_unlock(&uvn->u_obj.vmobjlock); 717 return (FALSE); 718 } 719 } 720 return (TRUE); 721 } 722 723 /* 724 * NOTE: currently we have to use VOP_READ/VOP_WRITE because they go 725 * through the buffer cache and allow I/O in any size. These VOPs use 726 * synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't 727 * go through the buffer cache or allow I/O sizes larger than a 728 * block]. we will eventually want to change this. 729 * 730 * issues to consider: 731 * uvm provides the uvm_aiodesc structure for async i/o management. 732 * there are two tailq's in the uvm. structure... one for pending async 733 * i/o and one for "done" async i/o. to do an async i/o one puts 734 * an aiodesc on the "pending" list (protected by splbio()), starts the 735 * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect 736 * some sort of "i/o done" function to be called (at splbio(), interrupt 737 * time). this function should remove the aiodesc from the pending list 738 * and place it on the "done" list and wakeup the daemon. the daemon 739 * will run at normal spl() and will remove all items from the "done" 740 * list and call the "aiodone" hook for each done request (see uvm_pager.c). 741 * [in the old vm code, this was done by calling the "put" routine with 742 * null arguments which made the code harder to read and understand because 743 * you had one function ("put") doing two things.] 744 * 745 * so the current pager needs: 746 * int uvn_aiodone(struct uvm_aiodesc *) 747 * 748 * => return KERN_SUCCESS (aio finished, free it). otherwise requeue for 749 * later collection. 750 * => called with pageq's locked by the daemon. 751 * 752 * general outline: 753 * - "try" to lock object. if fail, just return (will try again later) 754 * - drop "u_nio" (this req is done!) 755 * - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio } 756 * - get "page" structures (atop?). 757 * - handle "wanted" pages 758 * - handle "released" pages [using pgo_releasepg] 759 * >>> pgo_releasepg may kill the object 760 * dont forget to look at "object" wanted flag in all cases. 761 */ 762 763 764 /* 765 * uvn_flush: flush pages out of a uvm object. 766 * 767 * => object should be locked by caller. we may _unlock_ the object 768 * if (and only if) we need to clean a page (PGO_CLEANIT). 769 * we return with the object locked. 770 * => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller 771 * might want to unlock higher level resources (e.g. vm_map) 772 * before calling flush. 773 * => if PGO_CLEANIT is not set, then we will neither unlock the object 774 * or block. 775 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 776 * for flushing. 777 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 778 * that new pages are inserted on the tail end of the list. thus, 779 * we can make a complete pass through the object in one go by starting 780 * at the head and working towards the tail (new pages are put in 781 * front of us). 782 * => NOTE: we are allowed to lock the page queues, so the caller 783 * must not be holding the lock on them [e.g. pagedaemon had 784 * better not call us with the queues locked] 785 * => we return TRUE unless we encountered some sort of I/O error 786 * 787 * comment on "cleaning" object and PG_BUSY pages: 788 * this routine is holding the lock on the object. the only time 789 * that it can run into a PG_BUSY page that it does not own is if 790 * some other process has started I/O on the page (e.g. either 791 * a pagein, or a pageout). if the PG_BUSY page is being paged 792 * in, then it can not be dirty (!PG_CLEAN) because no one has 793 * had a chance to modify it yet. if the PG_BUSY page is being 794 * paged out then it means that someone else has already started 795 * cleaning the page for us (how nice!). in this case, if we 796 * have syncio specified, then after we make our pass through the 797 * object we need to wait for the other PG_BUSY pages to clear 798 * off (i.e. we need to do an iosync). also note that once a 799 * page is PG_BUSY it must stay in its object until it is un-busyed. 800 * 801 * note on page traversal: 802 * we can traverse the pages in an object either by going down the 803 * linked list in "uobj->memq", or we can go over the address range 804 * by page doing hash table lookups for each address. depending 805 * on how many pages are in the object it may be cheaper to do one 806 * or the other. we set "by_list" to true if we are using memq. 807 * if the cost of a hash lookup was equal to the cost of the list 808 * traversal we could compare the number of pages in the start->stop 809 * range to the total number of pages in the object. however, it 810 * seems that a hash table lookup is more expensive than the linked 811 * list traversal, so we multiply the number of pages in the 812 * start->stop range by a penalty which we define below. 813 */ 814 815 #define UVN_HASH_PENALTY 4 /* XXX: a guess */ 816 817 static boolean_t 818 uvn_flush(uobj, start, stop, flags) 819 struct uvm_object *uobj; 820 voff_t start, stop; 821 int flags; 822 { 823 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 824 struct vm_page *pp, *ppnext, *ptmp; 825 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; 826 int npages, result, lcv; 827 boolean_t retval, need_iosync, by_list, needs_clean, all; 828 voff_t curoff; 829 u_short pp_version; 830 UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); 831 832 curoff = 0; /* XXX: shut up gcc */ 833 /* 834 * get init vals and determine how we are going to traverse object 835 */ 836 837 need_iosync = FALSE; 838 retval = TRUE; /* return value */ 839 if (flags & PGO_ALLPAGES) { 840 all = TRUE; 841 by_list = TRUE; /* always go by the list */ 842 } else { 843 start = trunc_page(start); 844 stop = round_page(stop); 845 #ifdef DEBUG 846 if (stop > round_page(uvn->u_size)) 847 printf("uvn_flush: strange, got an out of range " 848 "flush (fixed)\n"); 849 #endif 850 all = FALSE; 851 by_list = (uobj->uo_npages <= 852 ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY); 853 } 854 855 UVMHIST_LOG(maphist, 856 " flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x", 857 start, stop, by_list, flags); 858 859 /* 860 * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as 861 * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint 862 * is wrong it will only prevent us from clustering... it won't break 863 * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster 864 * will set them as it syncs PG_CLEAN. This is only an issue if we 865 * are looking at non-inactive pages (because inactive page's PG_CLEAN 866 * bit is always up to date since there are no mappings). 867 * [borrowed PG_CLEANCHK idea from FreeBSD VM] 868 */ 869 870 if ((flags & PGO_CLEANIT) != 0 && 871 uobj->pgops->pgo_mk_pcluster != NULL) { 872 if (by_list) { 873 for (pp = uobj->memq.tqh_first ; pp != NULL ; 874 pp = pp->listq.tqe_next) { 875 if (!all && 876 (pp->offset < start || pp->offset >= stop)) 877 continue; 878 pp->flags &= ~PG_CLEANCHK; 879 } 880 881 } else { /* by hash */ 882 for (curoff = start ; curoff < stop; 883 curoff += PAGE_SIZE) { 884 pp = uvm_pagelookup(uobj, curoff); 885 if (pp) 886 pp->flags &= ~PG_CLEANCHK; 887 } 888 } 889 } 890 891 /* 892 * now do it. note: we must update ppnext in body of loop or we 893 * will get stuck. we need to use ppnext because we may free "pp" 894 * before doing the next loop. 895 */ 896 897 if (by_list) { 898 pp = uobj->memq.tqh_first; 899 } else { 900 curoff = start; 901 pp = uvm_pagelookup(uobj, curoff); 902 } 903 904 ppnext = NULL; /* XXX: shut up gcc */ 905 ppsp = NULL; /* XXX: shut up gcc */ 906 uvm_lock_pageq(); /* page queues locked */ 907 908 /* locked: both page queues and uobj */ 909 for ( ; (by_list && pp != NULL) || 910 (!by_list && curoff < stop) ; pp = ppnext) { 911 912 if (by_list) { 913 914 /* 915 * range check 916 */ 917 918 if (!all && 919 (pp->offset < start || pp->offset >= stop)) { 920 ppnext = pp->listq.tqe_next; 921 continue; 922 } 923 924 } else { 925 926 /* 927 * null check 928 */ 929 930 curoff += PAGE_SIZE; 931 if (pp == NULL) { 932 if (curoff < stop) 933 ppnext = uvm_pagelookup(uobj, curoff); 934 continue; 935 } 936 937 } 938 939 /* 940 * handle case where we do not need to clean page (either 941 * because we are not clean or because page is not dirty or 942 * is busy): 943 * 944 * NOTE: we are allowed to deactivate a non-wired active 945 * PG_BUSY page, but once a PG_BUSY page is on the inactive 946 * queue it must stay put until it is !PG_BUSY (so as not to 947 * confuse pagedaemon). 948 */ 949 950 if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { 951 needs_clean = FALSE; 952 if ((pp->flags & PG_BUSY) != 0 && 953 (flags & (PGO_CLEANIT|PGO_SYNCIO)) == 954 (PGO_CLEANIT|PGO_SYNCIO)) 955 need_iosync = TRUE; 956 } else { 957 /* 958 * freeing: nuke all mappings so we can sync 959 * PG_CLEAN bit with no race 960 */ 961 if ((pp->flags & PG_CLEAN) != 0 && 962 (flags & PGO_FREE) != 0 && 963 (pp->pqflags & PQ_ACTIVE) != 0) 964 pmap_page_protect(pp, VM_PROT_NONE); 965 if ((pp->flags & PG_CLEAN) != 0 && 966 pmap_is_modified(pp)) 967 pp->flags &= ~(PG_CLEAN); 968 pp->flags |= PG_CLEANCHK; /* update "hint" */ 969 970 needs_clean = ((pp->flags & PG_CLEAN) == 0); 971 } 972 973 /* 974 * if we don't need a clean... load ppnext and dispose of pp 975 */ 976 if (!needs_clean) { 977 /* load ppnext */ 978 if (by_list) 979 ppnext = pp->listq.tqe_next; 980 else { 981 if (curoff < stop) 982 ppnext = uvm_pagelookup(uobj, curoff); 983 } 984 985 /* now dispose of pp */ 986 if (flags & PGO_DEACTIVATE) { 987 if ((pp->pqflags & PQ_INACTIVE) == 0 && 988 pp->wire_count == 0) { 989 pmap_page_protect(pp, VM_PROT_NONE); 990 uvm_pagedeactivate(pp); 991 } 992 993 } else if (flags & PGO_FREE) { 994 if (pp->flags & PG_BUSY) { 995 /* release busy pages */ 996 pp->flags |= PG_RELEASED; 997 } else { 998 pmap_page_protect(pp, VM_PROT_NONE); 999 /* removed page from object */ 1000 uvm_pagefree(pp); 1001 } 1002 } 1003 /* ppnext is valid so we can continue... */ 1004 continue; 1005 } 1006 1007 /* 1008 * pp points to a page in the locked object that we are 1009 * working on. if it is !PG_CLEAN,!PG_BUSY and we asked 1010 * for cleaning (PGO_CLEANIT). we clean it now. 1011 * 1012 * let uvm_pager_put attempted a clustered page out. 1013 * note: locked: uobj and page queues. 1014 */ 1015 1016 pp->flags |= PG_BUSY; /* we 'own' page now */ 1017 UVM_PAGE_OWN(pp, "uvn_flush"); 1018 pmap_page_protect(pp, VM_PROT_READ); 1019 pp_version = pp->version; 1020 ReTry: 1021 ppsp = pps; 1022 npages = sizeof(pps) / sizeof(struct vm_page *); 1023 1024 /* locked: page queues, uobj */ 1025 result = uvm_pager_put(uobj, pp, &ppsp, &npages, 1026 flags | PGO_DOACTCLUST, start, stop); 1027 /* unlocked: page queues, uobj */ 1028 1029 /* 1030 * at this point nothing is locked. if we did an async I/O 1031 * it is remotely possible for the async i/o to complete and 1032 * the page "pp" be freed or what not before we get a chance 1033 * to relock the object. in order to detect this, we have 1034 * saved the version number of the page in "pp_version". 1035 */ 1036 1037 /* relock! */ 1038 simple_lock(&uobj->vmobjlock); 1039 uvm_lock_pageq(); 1040 1041 /* 1042 * VM_PAGER_AGAIN: given the structure of this pager, this 1043 * can only happen when we are doing async I/O and can't 1044 * map the pages into kernel memory (pager_map) due to lack 1045 * of vm space. if this happens we drop back to sync I/O. 1046 */ 1047 1048 if (result == VM_PAGER_AGAIN) { 1049 /* 1050 * it is unlikely, but page could have been released 1051 * while we had the object lock dropped. we ignore 1052 * this now and retry the I/O. we will detect and 1053 * handle the released page after the syncio I/O 1054 * completes. 1055 */ 1056 #ifdef DIAGNOSTIC 1057 if (flags & PGO_SYNCIO) 1058 panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)"); 1059 #endif 1060 flags |= PGO_SYNCIO; 1061 goto ReTry; 1062 } 1063 1064 /* 1065 * the cleaning operation is now done. finish up. note that 1066 * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. 1067 * if success (OK, PEND) then uvm_pager_put returns the cluster 1068 * to us in ppsp/npages. 1069 */ 1070 1071 /* 1072 * for pending async i/o if we are not deactivating/freeing 1073 * we can move on to the next page. 1074 */ 1075 1076 if (result == VM_PAGER_PEND) { 1077 1078 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 1079 /* 1080 * no per-page ops: refresh ppnext and continue 1081 */ 1082 if (by_list) { 1083 if (pp->version == pp_version) 1084 ppnext = pp->listq.tqe_next; 1085 else 1086 /* reset */ 1087 ppnext = uobj->memq.tqh_first; 1088 } else { 1089 if (curoff < stop) 1090 ppnext = uvm_pagelookup(uobj, 1091 curoff); 1092 } 1093 continue; 1094 } 1095 1096 /* need to do anything here? */ 1097 } 1098 1099 /* 1100 * need to look at each page of the I/O operation. we defer 1101 * processing "pp" until the last trip through this "for" loop 1102 * so that we can load "ppnext" for the main loop after we 1103 * play with the cluster pages [thus the "npages + 1" in the 1104 * loop below]. 1105 */ 1106 1107 for (lcv = 0 ; lcv < npages + 1 ; lcv++) { 1108 1109 /* 1110 * handle ppnext for outside loop, and saving pp 1111 * until the end. 1112 */ 1113 if (lcv < npages) { 1114 if (ppsp[lcv] == pp) 1115 continue; /* skip pp until the end */ 1116 ptmp = ppsp[lcv]; 1117 } else { 1118 ptmp = pp; 1119 1120 /* set up next page for outer loop */ 1121 if (by_list) { 1122 if (pp->version == pp_version) 1123 ppnext = pp->listq.tqe_next; 1124 else 1125 /* reset */ 1126 ppnext = uobj->memq.tqh_first; 1127 } else { 1128 if (curoff < stop) 1129 ppnext = uvm_pagelookup(uobj, curoff); 1130 } 1131 } 1132 1133 /* 1134 * verify the page didn't get moved while obj was 1135 * unlocked 1136 */ 1137 if (result == VM_PAGER_PEND && ptmp->uobject != uobj) 1138 continue; 1139 1140 /* 1141 * unbusy the page if I/O is done. note that for 1142 * pending I/O it is possible that the I/O op 1143 * finished before we relocked the object (in 1144 * which case the page is no longer busy). 1145 */ 1146 1147 if (result != VM_PAGER_PEND) { 1148 if (ptmp->flags & PG_WANTED) 1149 /* still holding object lock */ 1150 wakeup(ptmp); 1151 1152 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1153 UVM_PAGE_OWN(ptmp, NULL); 1154 if (ptmp->flags & PG_RELEASED) { 1155 1156 /* pgo_releasepg wants this */ 1157 uvm_unlock_pageq(); 1158 if (!uvn_releasepg(ptmp, NULL)) 1159 return (TRUE); 1160 1161 uvm_lock_pageq(); /* relock */ 1162 continue; /* next page */ 1163 1164 } else { 1165 ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); 1166 if ((flags & PGO_FREE) == 0) 1167 pmap_clear_modify(ptmp); 1168 } 1169 } 1170 1171 /* 1172 * dispose of page 1173 */ 1174 1175 if (flags & PGO_DEACTIVATE) { 1176 if ((pp->pqflags & PQ_INACTIVE) == 0 && 1177 pp->wire_count == 0) { 1178 pmap_page_protect(ptmp, VM_PROT_NONE); 1179 uvm_pagedeactivate(ptmp); 1180 } 1181 1182 } else if (flags & PGO_FREE) { 1183 if (result == VM_PAGER_PEND) { 1184 if ((ptmp->flags & PG_BUSY) != 0) 1185 /* signal for i/o done */ 1186 ptmp->flags |= PG_RELEASED; 1187 } else { 1188 if (result != VM_PAGER_OK) { 1189 printf("uvn_flush: obj=%p, " 1190 "offset=0x%llx. error " 1191 "during pageout.\n", 1192 pp->uobject, 1193 (long long)pp->offset); 1194 printf("uvn_flush: WARNING: " 1195 "changes to page may be " 1196 "lost!\n"); 1197 retval = FALSE; 1198 } 1199 pmap_page_protect(ptmp, VM_PROT_NONE); 1200 uvm_pagefree(ptmp); 1201 } 1202 } 1203 1204 } /* end of "lcv" for loop */ 1205 1206 } /* end of "pp" for loop */ 1207 1208 /* 1209 * done with pagequeues: unlock 1210 */ 1211 uvm_unlock_pageq(); 1212 1213 /* 1214 * now wait for all I/O if required. 1215 */ 1216 if (need_iosync) { 1217 1218 UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0); 1219 while (uvn->u_nio != 0) { 1220 uvn->u_flags |= UVM_VNODE_IOSYNC; 1221 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, 1222 FALSE, "uvn_flush",0); 1223 simple_lock(&uvn->u_obj.vmobjlock); 1224 } 1225 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 1226 wakeup(&uvn->u_flags); 1227 uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED); 1228 } 1229 1230 /* return, with object locked! */ 1231 UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); 1232 return(retval); 1233 } 1234 1235 /* 1236 * uvn_cluster 1237 * 1238 * we are about to do I/O in an object at offset. this function is called 1239 * to establish a range of offsets around "offset" in which we can cluster 1240 * I/O. 1241 * 1242 * - currently doesn't matter if obj locked or not. 1243 */ 1244 1245 static void 1246 uvn_cluster(uobj, offset, loffset, hoffset) 1247 struct uvm_object *uobj; 1248 voff_t offset; 1249 voff_t *loffset, *hoffset; /* OUT */ 1250 { 1251 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 1252 *loffset = offset; 1253 1254 if (*loffset >= uvn->u_size) 1255 panic("uvn_cluster: offset out of range"); 1256 1257 /* 1258 * XXX: old pager claims we could use VOP_BMAP to get maxcontig value. 1259 */ 1260 *hoffset = *loffset + MAXBSIZE; 1261 if (*hoffset > round_page(uvn->u_size)) /* past end? */ 1262 *hoffset = round_page(uvn->u_size); 1263 1264 return; 1265 } 1266 1267 /* 1268 * uvn_put: flush page data to backing store. 1269 * 1270 * => prefer map unlocked (not required) 1271 * => object must be locked! we will _unlock_ it before starting I/O. 1272 * => flags: PGO_SYNCIO -- use sync. I/O 1273 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) 1274 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1275 * [thus we never do async i/o! see iodone comment] 1276 */ 1277 1278 static int 1279 uvn_put(uobj, pps, npages, flags) 1280 struct uvm_object *uobj; 1281 struct vm_page **pps; 1282 int npages, flags; 1283 { 1284 int retval; 1285 1286 /* note: object locked */ 1287 retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE); 1288 /* note: object unlocked */ 1289 1290 return(retval); 1291 } 1292 1293 1294 /* 1295 * uvn_get: get pages (synchronously) from backing store 1296 * 1297 * => prefer map unlocked (not required) 1298 * => object must be locked! we will _unlock_ it before starting any I/O. 1299 * => flags: PGO_ALLPAGES: get all of the pages 1300 * PGO_LOCKED: fault data structures are locked 1301 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 1302 * => NOTE: caller must check for released pages!! 1303 */ 1304 1305 static int 1306 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 1307 struct uvm_object *uobj; 1308 voff_t offset; 1309 struct vm_page **pps; /* IN/OUT */ 1310 int *npagesp; /* IN (OUT if PGO_LOCKED) */ 1311 int centeridx, advice, flags; 1312 vm_prot_t access_type; 1313 { 1314 voff_t current_offset; 1315 struct vm_page *ptmp; 1316 int lcv, result, gotpages; 1317 boolean_t done; 1318 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); 1319 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 1320 1321 /* 1322 * step 1: handled the case where fault data structures are locked. 1323 */ 1324 1325 if (flags & PGO_LOCKED) { 1326 1327 /* 1328 * gotpages is the current number of pages we've gotten (which 1329 * we pass back up to caller via *npagesp. 1330 */ 1331 1332 gotpages = 0; 1333 1334 /* 1335 * step 1a: get pages that are already resident. only do this 1336 * if the data structures are locked (i.e. the first time 1337 * through). 1338 */ 1339 1340 done = TRUE; /* be optimistic */ 1341 1342 for (lcv = 0, current_offset = offset ; lcv < *npagesp ; 1343 lcv++, current_offset += PAGE_SIZE) { 1344 1345 /* do we care about this page? if not, skip it */ 1346 if (pps[lcv] == PGO_DONTCARE) 1347 continue; 1348 1349 /* lookup page */ 1350 ptmp = uvm_pagelookup(uobj, current_offset); 1351 1352 /* to be useful must get a non-busy, non-released pg */ 1353 if (ptmp == NULL || 1354 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1355 if (lcv == centeridx || (flags & PGO_ALLPAGES) 1356 != 0) 1357 done = FALSE; /* need to do a wait or I/O! */ 1358 continue; 1359 } 1360 1361 /* 1362 * useful page: busy/lock it and plug it in our 1363 * result array 1364 */ 1365 ptmp->flags |= PG_BUSY; /* loan up to caller */ 1366 UVM_PAGE_OWN(ptmp, "uvn_get1"); 1367 pps[lcv] = ptmp; 1368 gotpages++; 1369 1370 } /* "for" lcv loop */ 1371 1372 /* 1373 * XXX: given the "advice", should we consider async read-ahead? 1374 * XXX: fault current does deactive of pages behind us. is 1375 * this good (other callers might now). 1376 */ 1377 /* 1378 * XXX: read-ahead currently handled by buffer cache (bread) 1379 * level. 1380 * XXX: no async i/o available. 1381 * XXX: so we don't do anything now. 1382 */ 1383 1384 /* 1385 * step 1c: now we've either done everything needed or we to 1386 * unlock and do some waiting or I/O. 1387 */ 1388 1389 *npagesp = gotpages; /* let caller know */ 1390 if (done) 1391 return(VM_PAGER_OK); /* bingo! */ 1392 else 1393 /* EEK! Need to unlock and I/O */ 1394 return(VM_PAGER_UNLOCK); 1395 } 1396 1397 /* 1398 * step 2: get non-resident or busy pages. 1399 * object is locked. data structures are unlocked. 1400 * 1401 * XXX: because we can't do async I/O at this level we get things 1402 * page at a time (otherwise we'd chunk). the VOP_READ() will do 1403 * async-read-ahead for us at a lower level. 1404 */ 1405 1406 for (lcv = 0, current_offset = offset ; 1407 lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { 1408 1409 /* skip over pages we've already gotten or don't want */ 1410 /* skip over pages we don't _have_ to get */ 1411 if (pps[lcv] != NULL || (lcv != centeridx && 1412 (flags & PGO_ALLPAGES) == 0)) 1413 continue; 1414 1415 /* 1416 * we have yet to locate the current page (pps[lcv]). we first 1417 * look for a page that is already at the current offset. if 1418 * we fine a page, we check to see if it is busy or released. 1419 * if that is the case, then we sleep on the page until it is 1420 * no longer busy or released and repeat the lookup. if the 1421 * page we found is neither busy nor released, then we busy it 1422 * (so we own it) and plug it into pps[lcv]. this breaks the 1423 * following while loop and indicates we are ready to move on 1424 * to the next page in the "lcv" loop above. 1425 * 1426 * if we exit the while loop with pps[lcv] still set to NULL, 1427 * then it means that we allocated a new busy/fake/clean page 1428 * ptmp in the object and we need to do I/O to fill in the data. 1429 */ 1430 1431 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 1432 1433 /* look for a current page */ 1434 ptmp = uvm_pagelookup(uobj, current_offset); 1435 1436 /* nope? allocate one now (if we can) */ 1437 if (ptmp == NULL) { 1438 1439 ptmp = uvm_pagealloc(uobj, current_offset, 1440 NULL, 0); 1441 1442 /* out of RAM? */ 1443 if (ptmp == NULL) { 1444 simple_unlock(&uobj->vmobjlock); 1445 uvm_wait("uvn_getpage"); 1446 simple_lock(&uobj->vmobjlock); 1447 1448 /* goto top of pps while loop */ 1449 continue; 1450 } 1451 1452 /* 1453 * got new page ready for I/O. break pps 1454 * while loop. pps[lcv] is still NULL. 1455 */ 1456 break; 1457 } 1458 1459 /* page is there, see if we need to wait on it */ 1460 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1461 ptmp->flags |= PG_WANTED; 1462 UVM_UNLOCK_AND_WAIT(ptmp, 1463 &uobj->vmobjlock, FALSE, "uvn_get",0); 1464 simple_lock(&uobj->vmobjlock); 1465 continue; /* goto top of pps while loop */ 1466 } 1467 1468 /* 1469 * if we get here then the page has become resident 1470 * and unbusy between steps 1 and 2. we busy it 1471 * now (so we own it) and set pps[lcv] (so that we 1472 * exit the while loop). 1473 */ 1474 ptmp->flags |= PG_BUSY; 1475 UVM_PAGE_OWN(ptmp, "uvn_get2"); 1476 pps[lcv] = ptmp; 1477 } 1478 1479 /* 1480 * if we own the a valid page at the correct offset, pps[lcv] 1481 * will point to it. nothing more to do except go to the 1482 * next page. 1483 */ 1484 1485 if (pps[lcv]) 1486 continue; /* next lcv */ 1487 1488 /* 1489 * we have a "fake/busy/clean" page that we just allocated. do 1490 * I/O to fill it with valid data. note that object must be 1491 * locked going into uvn_io, but will be unlocked afterwards. 1492 */ 1493 1494 result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1, 1495 PGO_SYNCIO, UIO_READ); 1496 1497 /* 1498 * I/O done. object is unlocked (by uvn_io). because we used 1499 * syncio the result can not be PEND or AGAIN. we must relock 1500 * and check for errors. 1501 */ 1502 1503 /* lock object. check for errors. */ 1504 simple_lock(&uobj->vmobjlock); 1505 if (result != VM_PAGER_OK) { 1506 if (ptmp->flags & PG_WANTED) 1507 /* object lock still held */ 1508 wakeup(ptmp); 1509 1510 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1511 UVM_PAGE_OWN(ptmp, NULL); 1512 uvm_lock_pageq(); 1513 uvm_pagefree(ptmp); 1514 uvm_unlock_pageq(); 1515 simple_unlock(&uobj->vmobjlock); 1516 return(result); 1517 } 1518 1519 /* 1520 * we got the page! clear the fake flag (indicates valid 1521 * data now in page) and plug into our result array. note 1522 * that page is still busy. 1523 * 1524 * it is the callers job to: 1525 * => check if the page is released 1526 * => unbusy the page 1527 * => activate the page 1528 */ 1529 1530 ptmp->flags &= ~PG_FAKE; /* data is valid ... */ 1531 pmap_clear_modify(ptmp); /* ... and clean */ 1532 pps[lcv] = ptmp; 1533 1534 } /* lcv loop */ 1535 1536 /* 1537 * finally, unlock object and return. 1538 */ 1539 1540 simple_unlock(&uobj->vmobjlock); 1541 return (VM_PAGER_OK); 1542 } 1543 1544 /* 1545 * uvn_io: do I/O to a vnode 1546 * 1547 * => prefer map unlocked (not required) 1548 * => object must be locked! we will _unlock_ it before starting I/O. 1549 * => flags: PGO_SYNCIO -- use sync. I/O 1550 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1551 * [thus we never do async i/o! see iodone comment] 1552 */ 1553 1554 static int 1555 uvn_io(uvn, pps, npages, flags, rw) 1556 struct uvm_vnode *uvn; 1557 vm_page_t *pps; 1558 int npages, flags, rw; 1559 { 1560 struct vnode *vn; 1561 struct uio uio; 1562 struct iovec iov; 1563 vaddr_t kva; 1564 off_t file_offset; 1565 int waitf, result, mapinflags; 1566 size_t got, wanted; 1567 UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); 1568 1569 UVMHIST_LOG(maphist, "rw=%d", rw,0,0,0); 1570 1571 /* 1572 * init values 1573 */ 1574 1575 waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; 1576 vn = (struct vnode *) uvn; 1577 file_offset = pps[0]->offset; 1578 1579 /* 1580 * check for sync'ing I/O. 1581 */ 1582 1583 while (uvn->u_flags & UVM_VNODE_IOSYNC) { 1584 if (waitf == M_NOWAIT) { 1585 simple_unlock(&uvn->u_obj.vmobjlock); 1586 UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); 1587 return(VM_PAGER_AGAIN); 1588 } 1589 uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; 1590 UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock, 1591 FALSE, "uvn_iosync",0); 1592 simple_lock(&uvn->u_obj.vmobjlock); 1593 } 1594 1595 /* 1596 * check size 1597 */ 1598 1599 if (file_offset >= uvn->u_size) { 1600 simple_unlock(&uvn->u_obj.vmobjlock); 1601 UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); 1602 return(VM_PAGER_BAD); 1603 } 1604 1605 /* 1606 * first try and map the pages in (without waiting) 1607 */ 1608 1609 mapinflags = (rw == UIO_READ) ? 1610 UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE; 1611 1612 kva = uvm_pagermapin(pps, npages, mapinflags); 1613 if (kva == 0 && waitf == M_NOWAIT) { 1614 simple_unlock(&uvn->u_obj.vmobjlock); 1615 UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); 1616 return(VM_PAGER_AGAIN); 1617 } 1618 1619 /* 1620 * ok, now bump u_nio up. at this point we are done with uvn 1621 * and can unlock it. if we still don't have a kva, try again 1622 * (this time with sleep ok). 1623 */ 1624 1625 uvn->u_nio++; /* we have an I/O in progress! */ 1626 simple_unlock(&uvn->u_obj.vmobjlock); 1627 /* NOTE: object now unlocked */ 1628 if (kva == 0) 1629 kva = uvm_pagermapin(pps, npages, 1630 mapinflags | UVMPAGER_MAPIN_WAITOK); 1631 1632 /* 1633 * ok, mapped in. our pages are PG_BUSY so they are not going to 1634 * get touched (so we can look at "offset" without having to lock 1635 * the object). set up for I/O. 1636 */ 1637 1638 /* 1639 * fill out uio/iov 1640 */ 1641 1642 iov.iov_base = (caddr_t) kva; 1643 wanted = npages << PAGE_SHIFT; 1644 if (file_offset + wanted > uvn->u_size) 1645 wanted = uvn->u_size - file_offset; /* XXX: needed? */ 1646 iov.iov_len = wanted; 1647 uio.uio_iov = &iov; 1648 uio.uio_iovcnt = 1; 1649 uio.uio_offset = file_offset; 1650 uio.uio_segflg = UIO_SYSSPACE; 1651 uio.uio_rw = rw; 1652 uio.uio_resid = wanted; 1653 uio.uio_procp = curproc; 1654 1655 /* 1656 * do the I/O! (XXX: curproc?) 1657 */ 1658 1659 UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); 1660 1661 /* 1662 * This process may already have this vnode locked, if we faulted in 1663 * copyin() or copyout() on a region backed by this vnode 1664 * while doing I/O to the vnode. If this is the case, don't 1665 * panic.. instead, return the error to the user. 1666 * 1667 * XXX this is a stopgap to prevent a panic. 1668 * Ideally, this kind of operation *should* work. 1669 */ 1670 result = 0; 1671 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1672 result = vn_lock(vn, LK_EXCLUSIVE | LK_RETRY | LK_RECURSEFAIL, curproc); 1673 1674 if (result == 0) { 1675 /* NOTE: vnode now locked! */ 1676 1677 if (rw == UIO_READ) 1678 result = VOP_READ(vn, &uio, 0, curproc->p_ucred); 1679 else 1680 result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); 1681 1682 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1683 VOP_UNLOCK(vn, 0, curproc); 1684 } 1685 1686 /* NOTE: vnode now unlocked (unless vnislocked) */ 1687 1688 UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); 1689 1690 /* 1691 * result == unix style errno (0 == OK!) 1692 * 1693 * zero out rest of buffer (if needed) 1694 */ 1695 1696 if (result == 0) { 1697 got = wanted - uio.uio_resid; 1698 1699 if (wanted && got == 0) { 1700 result = EIO; /* XXX: error? */ 1701 } else if (got < PAGE_SIZE * npages && rw == UIO_READ) { 1702 memset((void *) (kva + got), 0, 1703 (npages << PAGE_SHIFT) - got); 1704 } 1705 } 1706 1707 /* 1708 * now remove pager mapping 1709 */ 1710 uvm_pagermapout(kva, npages); 1711 1712 /* 1713 * now clean up the object (i.e. drop I/O count) 1714 */ 1715 1716 simple_lock(&uvn->u_obj.vmobjlock); 1717 /* NOTE: object now locked! */ 1718 1719 uvn->u_nio--; /* I/O DONE! */ 1720 if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) { 1721 wakeup(&uvn->u_nio); 1722 } 1723 simple_unlock(&uvn->u_obj.vmobjlock); 1724 /* NOTE: object now unlocked! */ 1725 1726 /* 1727 * done! 1728 */ 1729 1730 UVMHIST_LOG(maphist, "<- done (result %d)", result,0,0,0); 1731 if (result == 0) 1732 return(VM_PAGER_OK); 1733 else 1734 return(VM_PAGER_ERROR); 1735 } 1736 1737 /* 1738 * uvm_vnp_uncache: disable "persisting" in a vnode... when last reference 1739 * is gone we will kill the object (flushing dirty pages back to the vnode 1740 * if needed). 1741 * 1742 * => returns TRUE if there was no uvm_object attached or if there was 1743 * one and we killed it [i.e. if there is no active uvn] 1744 * => called with the vnode VOP_LOCK'd [we will unlock it for I/O, if 1745 * needed] 1746 * 1747 * => XXX: given that we now kill uvn's when a vnode is recycled (without 1748 * having to hold a reference on the vnode) and given a working 1749 * uvm_vnp_sync(), how does that effect the need for this function? 1750 * [XXXCDC: seems like it can die?] 1751 * 1752 * => XXX: this function should DIE once we merge the VM and buffer 1753 * cache. 1754 * 1755 * research shows that this is called in the following places: 1756 * ext2fs_truncate, ffs_truncate, detrunc[msdosfs]: called when vnode 1757 * changes sizes 1758 * ext2fs_write, WRITE [ufs_readwrite], msdosfs_write: called when we 1759 * are written to 1760 * ex2fs_chmod, ufs_chmod: called if VTEXT vnode and the sticky bit 1761 * is off 1762 * ffs_realloccg: when we can't extend the current block and have 1763 * to allocate a new one we call this [XXX: why?] 1764 * nfsrv_rename, rename_files: called when the target filename is there 1765 * and we want to remove it 1766 * nfsrv_remove, sys_unlink: called on file we are removing 1767 * nfsrv_access: if VTEXT and we want WRITE access and we don't uncache 1768 * then return "text busy" 1769 * nfs_open: seems to uncache any file opened with nfs 1770 * vn_writechk: if VTEXT vnode and can't uncache return "text busy" 1771 */ 1772 1773 boolean_t 1774 uvm_vnp_uncache(vp) 1775 struct vnode *vp; 1776 { 1777 struct uvm_vnode *uvn = &vp->v_uvm; 1778 1779 /* 1780 * lock uvn part of the vnode and check to see if we need to do anything 1781 */ 1782 1783 simple_lock(&uvn->u_obj.vmobjlock); 1784 if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || 1785 (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 1786 simple_unlock(&uvn->u_obj.vmobjlock); 1787 return(TRUE); 1788 } 1789 1790 /* 1791 * we have a valid, non-blocked uvn. clear persist flag. 1792 * if uvn is currently active we can return now. 1793 */ 1794 1795 uvn->u_flags &= ~UVM_VNODE_CANPERSIST; 1796 if (uvn->u_obj.uo_refs) { 1797 simple_unlock(&uvn->u_obj.vmobjlock); 1798 return(FALSE); 1799 } 1800 1801 /* 1802 * uvn is currently persisting! we have to gain a reference to 1803 * it so that we can call uvn_detach to kill the uvn. 1804 */ 1805 1806 VREF(vp); /* seems ok, even with VOP_LOCK */ 1807 uvn->u_obj.uo_refs++; /* value is now 1 */ 1808 simple_unlock(&uvn->u_obj.vmobjlock); 1809 1810 1811 #ifdef DEBUG 1812 /* 1813 * carry over sanity check from old vnode pager: the vnode should 1814 * be VOP_LOCK'd, and we confirm it here. 1815 */ 1816 if (!VOP_ISLOCKED(vp)) { 1817 boolean_t is_ok_anyway = FALSE; 1818 #if defined(NFSCLIENT) 1819 extern int (**nfsv2_vnodeop_p) __P((void *)); 1820 extern int (**spec_nfsv2nodeop_p) __P((void *)); 1821 extern int (**fifo_nfsv2nodeop_p) __P((void *)); 1822 1823 /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */ 1824 if (vp->v_op == nfsv2_vnodeop_p || 1825 vp->v_op == spec_nfsv2nodeop_p) { 1826 is_ok_anyway = TRUE; 1827 } 1828 if (vp->v_op == fifo_nfsv2nodeop_p) { 1829 is_ok_anyway = TRUE; 1830 } 1831 #endif /* defined(NFSSERVER) || defined(NFSCLIENT) */ 1832 if (!is_ok_anyway) 1833 panic("uvm_vnp_uncache: vnode not locked!"); 1834 } 1835 #endif /* DEBUG */ 1836 1837 /* 1838 * now drop our reference to the vnode. if we have the sole 1839 * reference to the vnode then this will cause it to die [as we 1840 * just cleared the persist flag]. we have to unlock the vnode 1841 * while we are doing this as it may trigger I/O. 1842 * 1843 * XXX: it might be possible for uvn to get reclaimed while we are 1844 * unlocked causing us to return TRUE when we should not. we ignore 1845 * this as a false-positive return value doesn't hurt us. 1846 */ 1847 VOP_UNLOCK(vp, 0, curproc); 1848 uvn_detach(&uvn->u_obj); 1849 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc); 1850 1851 /* 1852 * and return... 1853 */ 1854 1855 return(TRUE); 1856 } 1857 1858 /* 1859 * uvm_vnp_setsize: grow or shrink a vnode uvn 1860 * 1861 * grow => just update size value 1862 * shrink => toss un-needed pages 1863 * 1864 * => we assume that the caller has a reference of some sort to the 1865 * vnode in question so that it will not be yanked out from under 1866 * us. 1867 * 1868 * called from: 1869 * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos]) 1870 * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write) 1871 * => ffs_balloc [XXX: why? doesn't WRITE handle?] 1872 * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr 1873 * => union fs: union_newsize 1874 */ 1875 1876 void 1877 uvm_vnp_setsize(vp, newsize) 1878 struct vnode *vp; 1879 voff_t newsize; 1880 { 1881 struct uvm_vnode *uvn = &vp->v_uvm; 1882 1883 /* 1884 * lock uvn and check for valid object, and if valid: do it! 1885 */ 1886 simple_lock(&uvn->u_obj.vmobjlock); 1887 if (uvn->u_flags & UVM_VNODE_VALID) { 1888 1889 /* 1890 * now check if the size has changed: if we shrink we had better 1891 * toss some pages... 1892 */ 1893 1894 if (uvn->u_size > newsize) { 1895 (void)uvn_flush(&uvn->u_obj, newsize, 1896 uvn->u_size, PGO_FREE); 1897 } 1898 uvn->u_size = newsize; 1899 } 1900 simple_unlock(&uvn->u_obj.vmobjlock); 1901 1902 /* 1903 * done 1904 */ 1905 return; 1906 } 1907 1908 /* 1909 * uvm_vnp_sync: flush all dirty VM pages back to their backing vnodes. 1910 * 1911 * => called from sys_sync with no VM structures locked 1912 * => only one process can do a sync at a time (because the uvn 1913 * structure only has one queue for sync'ing). we ensure this 1914 * by holding the uvn_sync_lock while the sync is in progress. 1915 * other processes attempting a sync will sleep on this lock 1916 * until we are done. 1917 */ 1918 1919 void 1920 uvm_vnp_sync(mp) 1921 struct mount *mp; 1922 { 1923 struct uvm_vnode *uvn; 1924 struct vnode *vp; 1925 boolean_t got_lock; 1926 1927 /* 1928 * step 1: ensure we are only ones using the uvn_sync_q by locking 1929 * our lock... 1930 */ 1931 lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, NULL, curproc); 1932 1933 /* 1934 * step 2: build up a simpleq of uvns of interest based on the 1935 * write list. we gain a reference to uvns of interest. must 1936 * be careful about locking uvn's since we will be holding uvn_wl_lock 1937 * in the body of the loop. 1938 */ 1939 SIMPLEQ_INIT(&uvn_sync_q); 1940 simple_lock(&uvn_wl_lock); 1941 for (uvn = uvn_wlist.lh_first ; uvn != NULL ; 1942 uvn = uvn->u_wlist.le_next) { 1943 1944 vp = (struct vnode *) uvn; 1945 if (mp && vp->v_mount != mp) 1946 continue; 1947 1948 /* attempt to gain reference */ 1949 while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) == 1950 FALSE && 1951 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) 1952 /* spin */ ; 1953 1954 /* 1955 * we will exit the loop if either if the following are true: 1956 * - we got the lock [always true if NCPU == 1] 1957 * - we failed to get the lock but noticed the vnode was 1958 * "blocked" -- in this case the vnode must be a dying 1959 * vnode, and since dying vnodes are in the process of 1960 * being flushed out, we can safely skip this one 1961 * 1962 * we want to skip over the vnode if we did not get the lock, 1963 * or if the vnode is already dying (due to the above logic). 1964 * 1965 * note that uvn must already be valid because we found it on 1966 * the wlist (this also means it can't be ALOCK'd). 1967 */ 1968 if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 1969 if (got_lock) 1970 simple_unlock(&uvn->u_obj.vmobjlock); 1971 continue; /* skip it */ 1972 } 1973 1974 /* 1975 * gain reference. watch out for persisting uvns (need to 1976 * regain vnode REF). 1977 */ 1978 if (uvn->u_obj.uo_refs == 0) 1979 VREF(vp); 1980 uvn->u_obj.uo_refs++; 1981 simple_unlock(&uvn->u_obj.vmobjlock); 1982 1983 /* 1984 * got it! 1985 */ 1986 SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq); 1987 } 1988 simple_unlock(&uvn_wl_lock); 1989 1990 /* 1991 * step 3: we now have a list of uvn's that may need cleaning. 1992 * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock 1993 * (so we can now safely lock uvn's again). 1994 */ 1995 1996 for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) { 1997 simple_lock(&uvn->u_obj.vmobjlock); 1998 #ifdef DEBUG 1999 if (uvn->u_flags & UVM_VNODE_DYING) { 2000 printf("uvm_vnp_sync: dying vnode on sync list\n"); 2001 } 2002 #endif 2003 uvn_flush(&uvn->u_obj, 0, 0, 2004 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); 2005 2006 /* 2007 * if we have the only reference and we just cleaned the uvn, 2008 * then we can pull it out of the UVM_VNODE_WRITEABLE state 2009 * thus allowing us to avoid thinking about flushing it again 2010 * on later sync ops. 2011 */ 2012 if (uvn->u_obj.uo_refs == 1 && 2013 (uvn->u_flags & UVM_VNODE_WRITEABLE)) { 2014 LIST_REMOVE(uvn, u_wlist); 2015 uvn->u_flags &= ~UVM_VNODE_WRITEABLE; 2016 } 2017 2018 simple_unlock(&uvn->u_obj.vmobjlock); 2019 2020 /* now drop our reference to the uvn */ 2021 uvn_detach(&uvn->u_obj); 2022 } 2023 2024 /* 2025 * done! release sync lock 2026 */ 2027 lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0, curproc); 2028 } 2029