1 /* $NetBSD: genfs_vnops.c,v 1.68 2002/11/15 14:01:57 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.68 2002/11/15 14:01:57 yamt Exp $"); 39 40 #include "opt_nfsserver.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/mount.h> 47 #include <sys/namei.h> 48 #include <sys/vnode.h> 49 #include <sys/fcntl.h> 50 #include <sys/malloc.h> 51 #include <sys/poll.h> 52 #include <sys/mman.h> 53 #include <sys/file.h> 54 55 #include <miscfs/genfs/genfs.h> 56 #include <miscfs/genfs/genfs_node.h> 57 #include <miscfs/specfs/specdev.h> 58 59 #include <uvm/uvm.h> 60 #include <uvm/uvm_pager.h> 61 62 #ifdef NFSSERVER 63 #include <nfs/rpcv2.h> 64 #include <nfs/nfsproto.h> 65 #include <nfs/nfs.h> 66 #include <nfs/nqnfs.h> 67 #include <nfs/nfs_var.h> 68 #endif 69 70 static __inline void genfs_rel_pages(struct vm_page **, int); 71 72 #define MAX_READ_AHEAD 16 /* XXXUBC 16 */ 73 int genfs_rapages = MAX_READ_AHEAD; /* # of pages in each chunk of readahead */ 74 int genfs_racount = 2; /* # of page chunks to readahead */ 75 int genfs_raskip = 2; /* # of busy page chunks allowed to skip */ 76 77 int 78 genfs_poll(void *v) 79 { 80 struct vop_poll_args /* { 81 struct vnode *a_vp; 82 int a_events; 83 struct proc *a_p; 84 } */ *ap = v; 85 86 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 87 } 88 89 int 90 genfs_fsync(void *v) 91 { 92 struct vop_fsync_args /* { 93 struct vnode *a_vp; 94 struct ucred *a_cred; 95 int a_flags; 96 off_t offlo; 97 off_t offhi; 98 struct proc *a_p; 99 } */ *ap = v; 100 struct vnode *vp = ap->a_vp; 101 int wait; 102 103 wait = (ap->a_flags & FSYNC_WAIT) != 0; 104 vflushbuf(vp, wait); 105 if ((ap->a_flags & FSYNC_DATAONLY) != 0) 106 return (0); 107 else 108 return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0)); 109 } 110 111 int 112 genfs_seek(void *v) 113 { 114 struct vop_seek_args /* { 115 struct vnode *a_vp; 116 off_t a_oldoff; 117 off_t a_newoff; 118 struct ucred *a_ucred; 119 } */ *ap = v; 120 121 if (ap->a_newoff < 0) 122 return (EINVAL); 123 124 return (0); 125 } 126 127 int 128 genfs_abortop(void *v) 129 { 130 struct vop_abortop_args /* { 131 struct vnode *a_dvp; 132 struct componentname *a_cnp; 133 } */ *ap = v; 134 135 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) 136 PNBUF_PUT(ap->a_cnp->cn_pnbuf); 137 return (0); 138 } 139 140 int 141 genfs_fcntl(void *v) 142 { 143 struct vop_fcntl_args /* { 144 struct vnode *a_vp; 145 u_int a_command; 146 caddr_t a_data; 147 int a_fflag; 148 struct ucred *a_cred; 149 struct proc *a_p; 150 } */ *ap = v; 151 152 if (ap->a_command == F_SETFL) 153 return (0); 154 else 155 return (EOPNOTSUPP); 156 } 157 158 /*ARGSUSED*/ 159 int 160 genfs_badop(void *v) 161 { 162 163 panic("genfs: bad op"); 164 } 165 166 /*ARGSUSED*/ 167 int 168 genfs_nullop(void *v) 169 { 170 171 return (0); 172 } 173 174 /*ARGSUSED*/ 175 int 176 genfs_einval(void *v) 177 { 178 179 return (EINVAL); 180 } 181 182 /*ARGSUSED*/ 183 int 184 genfs_eopnotsupp(void *v) 185 { 186 187 return (EOPNOTSUPP); 188 } 189 190 /* 191 * Called when an fs doesn't support a particular vop but the vop needs to 192 * vrele, vput, or vunlock passed in vnodes. 193 */ 194 int 195 genfs_eopnotsupp_rele(void *v) 196 { 197 struct vop_generic_args /* 198 struct vnodeop_desc *a_desc; 199 / * other random data follows, presumably * / 200 } */ *ap = v; 201 struct vnodeop_desc *desc = ap->a_desc; 202 struct vnode *vp; 203 int flags, i, j, offset; 204 205 flags = desc->vdesc_flags; 206 for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) { 207 if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET) 208 break; /* stop at end of list */ 209 if ((j = flags & VDESC_VP0_WILLPUT)) { 210 vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap); 211 switch (j) { 212 case VDESC_VP0_WILLPUT: 213 vput(vp); 214 break; 215 case VDESC_VP0_WILLUNLOCK: 216 VOP_UNLOCK(vp, 0); 217 break; 218 case VDESC_VP0_WILLRELE: 219 vrele(vp); 220 break; 221 } 222 } 223 } 224 225 return (EOPNOTSUPP); 226 } 227 228 /*ARGSUSED*/ 229 int 230 genfs_ebadf(void *v) 231 { 232 233 return (EBADF); 234 } 235 236 /* ARGSUSED */ 237 int 238 genfs_enoioctl(void *v) 239 { 240 241 return (EPASSTHROUGH); 242 } 243 244 245 /* 246 * Eliminate all activity associated with the requested vnode 247 * and with all vnodes aliased to the requested vnode. 248 */ 249 int 250 genfs_revoke(void *v) 251 { 252 struct vop_revoke_args /* { 253 struct vnode *a_vp; 254 int a_flags; 255 } */ *ap = v; 256 struct vnode *vp, *vq; 257 struct proc *p = curproc; /* XXX */ 258 259 #ifdef DIAGNOSTIC 260 if ((ap->a_flags & REVOKEALL) == 0) 261 panic("genfs_revoke: not revokeall"); 262 #endif 263 264 vp = ap->a_vp; 265 simple_lock(&vp->v_interlock); 266 267 if (vp->v_flag & VALIASED) { 268 /* 269 * If a vgone (or vclean) is already in progress, 270 * wait until it is done and return. 271 */ 272 if (vp->v_flag & VXLOCK) { 273 vp->v_flag |= VXWANT; 274 simple_unlock(&vp->v_interlock); 275 tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); 276 return (0); 277 } 278 /* 279 * Ensure that vp will not be vgone'd while we 280 * are eliminating its aliases. 281 */ 282 vp->v_flag |= VXLOCK; 283 simple_unlock(&vp->v_interlock); 284 while (vp->v_flag & VALIASED) { 285 simple_lock(&spechash_slock); 286 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 287 if (vq->v_rdev != vp->v_rdev || 288 vq->v_type != vp->v_type || vp == vq) 289 continue; 290 simple_unlock(&spechash_slock); 291 vgone(vq); 292 break; 293 } 294 if (vq == NULLVP) 295 simple_unlock(&spechash_slock); 296 } 297 /* 298 * Remove the lock so that vgone below will 299 * really eliminate the vnode after which time 300 * vgone will awaken any sleepers. 301 */ 302 simple_lock(&vp->v_interlock); 303 vp->v_flag &= ~VXLOCK; 304 } 305 vgonel(vp, p); 306 return (0); 307 } 308 309 /* 310 * Lock the node. 311 */ 312 int 313 genfs_lock(void *v) 314 { 315 struct vop_lock_args /* { 316 struct vnode *a_vp; 317 int a_flags; 318 } */ *ap = v; 319 struct vnode *vp = ap->a_vp; 320 321 return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock)); 322 } 323 324 /* 325 * Unlock the node. 326 */ 327 int 328 genfs_unlock(void *v) 329 { 330 struct vop_unlock_args /* { 331 struct vnode *a_vp; 332 int a_flags; 333 } */ *ap = v; 334 struct vnode *vp = ap->a_vp; 335 336 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, 337 &vp->v_interlock)); 338 } 339 340 /* 341 * Return whether or not the node is locked. 342 */ 343 int 344 genfs_islocked(void *v) 345 { 346 struct vop_islocked_args /* { 347 struct vnode *a_vp; 348 } */ *ap = v; 349 struct vnode *vp = ap->a_vp; 350 351 return (lockstatus(&vp->v_lock)); 352 } 353 354 /* 355 * Stubs to use when there is no locking to be done on the underlying object. 356 */ 357 int 358 genfs_nolock(void *v) 359 { 360 struct vop_lock_args /* { 361 struct vnode *a_vp; 362 int a_flags; 363 struct proc *a_p; 364 } */ *ap = v; 365 366 /* 367 * Since we are not using the lock manager, we must clear 368 * the interlock here. 369 */ 370 if (ap->a_flags & LK_INTERLOCK) 371 simple_unlock(&ap->a_vp->v_interlock); 372 return (0); 373 } 374 375 int 376 genfs_nounlock(void *v) 377 { 378 379 return (0); 380 } 381 382 int 383 genfs_noislocked(void *v) 384 { 385 386 return (0); 387 } 388 389 /* 390 * Local lease check for NFS servers. Just set up args and let 391 * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel, 392 * this is a null operation. 393 */ 394 int 395 genfs_lease_check(void *v) 396 { 397 #ifdef NFSSERVER 398 struct vop_lease_args /* { 399 struct vnode *a_vp; 400 struct proc *a_p; 401 struct ucred *a_cred; 402 int a_flag; 403 } */ *ap = v; 404 u_int32_t duration = 0; 405 int cache; 406 u_quad_t frev; 407 408 (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag, 409 NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred); 410 return (0); 411 #else 412 return (0); 413 #endif /* NFSSERVER */ 414 } 415 416 int 417 genfs_mmap(void *v) 418 { 419 420 return (0); 421 } 422 423 static __inline void 424 genfs_rel_pages(struct vm_page **pgs, int npages) 425 { 426 int i; 427 428 for (i = 0; i < npages; i++) { 429 struct vm_page *pg = pgs[i]; 430 431 if (pg == NULL) 432 continue; 433 if (pg->flags & PG_FAKE) { 434 pg->flags |= PG_RELEASED; 435 } 436 } 437 uvm_lock_pageq(); 438 uvm_page_unbusy(pgs, npages); 439 uvm_unlock_pageq(); 440 } 441 442 /* 443 * generic VM getpages routine. 444 * Return PG_BUSY pages for the given range, 445 * reading from backing store if necessary. 446 */ 447 448 int 449 genfs_getpages(void *v) 450 { 451 struct vop_getpages_args /* { 452 struct vnode *a_vp; 453 voff_t a_offset; 454 struct vm_page **a_m; 455 int *a_count; 456 int a_centeridx; 457 vm_prot_t a_access_type; 458 int a_advice; 459 int a_flags; 460 } */ *ap = v; 461 462 off_t newsize, diskeof, memeof; 463 off_t offset, origoffset, startoffset, endoffset, raoffset; 464 daddr_t lbn, blkno; 465 int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount; 466 int fs_bshift, fs_bsize, dev_bshift; 467 int flags = ap->a_flags; 468 size_t bytes, iobytes, tailbytes, totalbytes, skipbytes; 469 vaddr_t kva; 470 struct buf *bp, *mbp; 471 struct vnode *vp = ap->a_vp; 472 struct vnode *devvp; 473 struct genfs_node *gp = VTOG(vp); 474 struct uvm_object *uobj = &vp->v_uobj; 475 struct vm_page *pg, *pgs[MAX_READ_AHEAD]; 476 struct ucred *cred = curproc->p_ucred; /* XXXUBC curproc */ 477 boolean_t async = (flags & PGO_SYNCIO) == 0; 478 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0; 479 boolean_t sawhole = FALSE; 480 boolean_t overwrite = (flags & PGO_OVERWRITE) != 0; 481 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist); 482 483 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d", 484 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count); 485 486 /* XXXUBC temp limit */ 487 if (*ap->a_count > MAX_READ_AHEAD) { 488 panic("genfs_getpages: too many pages"); 489 } 490 491 error = 0; 492 origoffset = ap->a_offset; 493 orignpages = *ap->a_count; 494 GOP_SIZE(vp, vp->v_size, &diskeof); 495 if (flags & PGO_PASTEOF) { 496 newsize = MAX(vp->v_size, 497 origoffset + (orignpages << PAGE_SHIFT)); 498 GOP_SIZE(vp, newsize, &memeof); 499 } else { 500 memeof = diskeof; 501 } 502 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages); 503 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0); 504 KASSERT(orignpages > 0); 505 506 /* 507 * Bounds-check the request. 508 */ 509 510 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) { 511 if ((flags & PGO_LOCKED) == 0) { 512 simple_unlock(&uobj->vmobjlock); 513 } 514 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x", 515 origoffset, *ap->a_count, memeof,0); 516 return (EINVAL); 517 } 518 519 /* 520 * For PGO_LOCKED requests, just return whatever's in memory. 521 */ 522 523 if (flags & PGO_LOCKED) { 524 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, 525 UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0)); 526 527 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); 528 } 529 530 /* vnode is VOP_LOCKed, uobj is locked */ 531 532 if (write && (vp->v_flag & VONWORKLST) == 0) { 533 vn_syncer_add_to_worklist(vp, filedelay); 534 } 535 536 /* 537 * find the requested pages and make some simple checks. 538 * leave space in the page array for a whole block. 539 */ 540 541 if (vp->v_type == VREG) { 542 fs_bshift = vp->v_mount->mnt_fs_bshift; 543 dev_bshift = vp->v_mount->mnt_dev_bshift; 544 } else { 545 fs_bshift = DEV_BSHIFT; 546 dev_bshift = DEV_BSHIFT; 547 } 548 fs_bsize = 1 << fs_bshift; 549 550 orignpages = MIN(orignpages, 551 round_page(memeof - origoffset) >> PAGE_SHIFT); 552 npages = orignpages; 553 startoffset = origoffset & ~(fs_bsize - 1); 554 endoffset = round_page((origoffset + (npages << PAGE_SHIFT) + 555 fs_bsize - 1) & ~(fs_bsize - 1)); 556 endoffset = MIN(endoffset, round_page(memeof)); 557 ridx = (origoffset - startoffset) >> PAGE_SHIFT; 558 559 memset(pgs, 0, sizeof(pgs)); 560 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld", 561 ridx, npages, startoffset, endoffset); 562 KASSERT(&pgs[ridx + npages] <= &pgs[MAX_READ_AHEAD]); 563 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], 564 async ? UFP_NOWAIT : UFP_ALL) != orignpages) { 565 KASSERT(async != 0); 566 genfs_rel_pages(&pgs[ridx], orignpages); 567 simple_unlock(&uobj->vmobjlock); 568 return (EBUSY); 569 } 570 571 /* 572 * if the pages are already resident, just return them. 573 */ 574 575 for (i = 0; i < npages; i++) { 576 struct vm_page *pg = pgs[ridx + i]; 577 578 if ((pg->flags & PG_FAKE) || 579 (write && (pg->flags & PG_RDONLY))) { 580 break; 581 } 582 } 583 if (i == npages) { 584 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0); 585 raoffset = origoffset + (orignpages << PAGE_SHIFT); 586 npages += ridx; 587 goto raout; 588 } 589 590 /* 591 * if PGO_OVERWRITE is set, don't bother reading the pages. 592 */ 593 594 if (flags & PGO_OVERWRITE) { 595 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0); 596 597 for (i = 0; i < npages; i++) { 598 struct vm_page *pg = pgs[ridx + i]; 599 600 pg->flags &= ~(PG_RDONLY|PG_CLEAN); 601 } 602 npages += ridx; 603 goto out; 604 } 605 606 /* 607 * the page wasn't resident and we're not overwriting, 608 * so we're going to have to do some i/o. 609 * find any additional pages needed to cover the expanded range. 610 */ 611 612 npages = (endoffset - startoffset) >> PAGE_SHIFT; 613 if (startoffset != origoffset || npages != orignpages) { 614 615 /* 616 * we need to avoid deadlocks caused by locking 617 * additional pages at lower offsets than pages we 618 * already have locked. unlock them all and start over. 619 */ 620 621 genfs_rel_pages(&pgs[ridx], orignpages); 622 memset(pgs, 0, sizeof(pgs)); 623 624 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x", 625 startoffset, endoffset, 0,0); 626 npgs = npages; 627 if (uvn_findpages(uobj, startoffset, &npgs, pgs, 628 async ? UFP_NOWAIT : UFP_ALL) != npages) { 629 KASSERT(async != 0); 630 genfs_rel_pages(pgs, npages); 631 simple_unlock(&uobj->vmobjlock); 632 return (EBUSY); 633 } 634 } 635 simple_unlock(&uobj->vmobjlock); 636 637 /* 638 * read the desired page(s). 639 */ 640 641 totalbytes = npages << PAGE_SHIFT; 642 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0)); 643 tailbytes = totalbytes - bytes; 644 skipbytes = 0; 645 646 kva = uvm_pagermapin(pgs, npages, 647 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 648 649 s = splbio(); 650 mbp = pool_get(&bufpool, PR_WAITOK); 651 splx(s); 652 mbp->b_bufsize = totalbytes; 653 mbp->b_data = (void *)kva; 654 mbp->b_resid = mbp->b_bcount = bytes; 655 mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0); 656 mbp->b_iodone = (async ? uvm_aio_biodone : 0); 657 mbp->b_vp = vp; 658 LIST_INIT(&mbp->b_dep); 659 660 /* 661 * if EOF is in the middle of the range, zero the part past EOF. 662 * if the page including EOF is not PG_FAKE, skip over it since 663 * in that case it has valid data that we need to preserve. 664 */ 665 666 if (tailbytes > 0) { 667 size_t tailstart = bytes; 668 669 if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) { 670 tailstart = round_page(tailstart); 671 tailbytes -= tailstart - bytes; 672 } 673 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x", 674 kva, tailstart, tailbytes,0); 675 memset((void *)(kva + tailstart), 0, tailbytes); 676 } 677 678 /* 679 * now loop over the pages, reading as needed. 680 */ 681 682 if (write) { 683 lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL); 684 } else { 685 lockmgr(&gp->g_glock, LK_SHARED, NULL); 686 } 687 688 bp = NULL; 689 for (offset = startoffset; 690 bytes > 0; 691 offset += iobytes, bytes -= iobytes) { 692 693 /* 694 * skip pages which don't need to be read. 695 */ 696 697 pidx = (offset - startoffset) >> PAGE_SHIFT; 698 while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) { 699 size_t b; 700 701 KASSERT((offset & (PAGE_SIZE - 1)) == 0); 702 b = MIN(PAGE_SIZE, bytes); 703 offset += b; 704 bytes -= b; 705 skipbytes += b; 706 pidx++; 707 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x", 708 offset, 0,0,0); 709 if (bytes == 0) { 710 goto loopdone; 711 } 712 } 713 714 /* 715 * bmap the file to find out the blkno to read from and 716 * how much we can read in one i/o. if bmap returns an error, 717 * skip the rest of the top-level i/o. 718 */ 719 720 lbn = offset >> fs_bshift; 721 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 722 if (error) { 723 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n", 724 lbn, error,0,0); 725 skipbytes += bytes; 726 goto loopdone; 727 } 728 729 /* 730 * see how many pages can be read with this i/o. 731 * reduce the i/o size if necessary to avoid 732 * overwriting pages with valid data. 733 */ 734 735 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 736 bytes); 737 if (offset + iobytes > round_page(offset)) { 738 pcount = 1; 739 while (pidx + pcount < npages && 740 pgs[pidx + pcount]->flags & PG_FAKE) { 741 pcount++; 742 } 743 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) - 744 (offset - trunc_page(offset))); 745 } 746 747 /* 748 * if this block isn't allocated, zero it instead of 749 * reading it. if this is a read access, mark the 750 * pages we zeroed PG_RDONLY. 751 */ 752 753 if (blkno < 0) { 754 int holepages = (round_page(offset + iobytes) - 755 trunc_page(offset)) >> PAGE_SHIFT; 756 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0); 757 758 sawhole = TRUE; 759 memset((char *)kva + (offset - startoffset), 0, 760 iobytes); 761 skipbytes += iobytes; 762 763 for (i = 0; i < holepages; i++) { 764 if (write) { 765 pgs[pidx + i]->flags &= ~PG_CLEAN; 766 } else { 767 pgs[pidx + i]->flags |= PG_RDONLY; 768 } 769 } 770 continue; 771 } 772 773 /* 774 * allocate a sub-buf for this piece of the i/o 775 * (or just use mbp if there's only 1 piece), 776 * and start it going. 777 */ 778 779 if (offset == startoffset && iobytes == bytes) { 780 bp = mbp; 781 } else { 782 s = splbio(); 783 bp = pool_get(&bufpool, PR_WAITOK); 784 splx(s); 785 bp->b_data = (char *)kva + offset - startoffset; 786 bp->b_resid = bp->b_bcount = iobytes; 787 bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC; 788 bp->b_iodone = uvm_aio_biodone1; 789 bp->b_vp = vp; 790 bp->b_proc = NULL; 791 LIST_INIT(&bp->b_dep); 792 } 793 bp->b_lblkno = 0; 794 bp->b_private = mbp; 795 if (devvp->v_type == VBLK) { 796 bp->b_dev = devvp->v_rdev; 797 } 798 799 /* adjust physical blkno for partial blocks */ 800 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 801 dev_bshift); 802 803 UVMHIST_LOG(ubchist, 804 "bp %p offset 0x%x bcount 0x%x blkno 0x%x", 805 bp, offset, iobytes, bp->b_blkno); 806 807 VOP_STRATEGY(bp); 808 } 809 810 loopdone: 811 if (skipbytes) { 812 s = splbio(); 813 if (error) { 814 mbp->b_flags |= B_ERROR; 815 mbp->b_error = error; 816 } 817 mbp->b_resid -= skipbytes; 818 if (mbp->b_resid == 0) { 819 biodone(mbp); 820 } 821 splx(s); 822 } 823 824 if (async) { 825 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0); 826 lockmgr(&gp->g_glock, LK_RELEASE, NULL); 827 return (0); 828 } 829 if (bp != NULL) { 830 error = biowait(mbp); 831 } 832 s = splbio(); 833 pool_put(&bufpool, mbp); 834 splx(s); 835 uvm_pagermapout(kva, npages); 836 raoffset = startoffset + totalbytes; 837 838 /* 839 * if this we encountered a hole then we have to do a little more work. 840 * for read faults, we marked the page PG_RDONLY so that future 841 * write accesses to the page will fault again. 842 * for write faults, we must make sure that the backing store for 843 * the page is completely allocated while the pages are locked. 844 */ 845 846 if (!error && sawhole && write) { 847 for (i = 0; i < npages; i++) { 848 if (pgs[i] == NULL) { 849 continue; 850 } 851 pgs[i]->flags &= ~PG_CLEAN; 852 UVMHIST_LOG(ubchist, "mark dirty pg %p", pgs[i],0,0,0); 853 } 854 error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0, 855 cred); 856 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d", 857 startoffset, npages << PAGE_SHIFT, error,0); 858 } 859 lockmgr(&gp->g_glock, LK_RELEASE, NULL); 860 simple_lock(&uobj->vmobjlock); 861 862 /* 863 * see if we want to start any readahead. 864 * XXXUBC for now, just read the next 128k on 64k boundaries. 865 * this is pretty nonsensical, but it is 50% faster than reading 866 * just the next 64k. 867 */ 868 869 raout: 870 if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 && 871 PAGE_SHIFT <= 16) { 872 off_t rasize; 873 int rapages, err, i, skipped; 874 875 /* XXXUBC temp limit, from above */ 876 rapages = MIN(MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD), 877 genfs_rapages); 878 rasize = rapages << PAGE_SHIFT; 879 for (i = skipped = 0; i < genfs_racount; i++) { 880 err = VOP_GETPAGES(vp, raoffset, NULL, &rapages, 0, 881 VM_PROT_READ, 0, 0); 882 simple_lock(&uobj->vmobjlock); 883 if (err) { 884 if (err != EBUSY || 885 skipped++ == genfs_raskip) 886 break; 887 } 888 raoffset += rasize; 889 rapages = rasize >> PAGE_SHIFT; 890 } 891 } 892 893 /* 894 * we're almost done! release the pages... 895 * for errors, we free the pages. 896 * otherwise we activate them and mark them as valid and clean. 897 * also, unbusy pages that were not actually requested. 898 */ 899 900 if (error) { 901 for (i = 0; i < npages; i++) { 902 if (pgs[i] == NULL) { 903 continue; 904 } 905 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x", 906 pgs[i], pgs[i]->flags, 0,0); 907 if (pgs[i]->flags & PG_FAKE) { 908 pgs[i]->flags |= PG_RELEASED; 909 } 910 } 911 uvm_lock_pageq(); 912 uvm_page_unbusy(pgs, npages); 913 uvm_unlock_pageq(); 914 simple_unlock(&uobj->vmobjlock); 915 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0); 916 return (error); 917 } 918 919 out: 920 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0); 921 uvm_lock_pageq(); 922 for (i = 0; i < npages; i++) { 923 pg = pgs[i]; 924 if (pg == NULL) { 925 continue; 926 } 927 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x", 928 pg, pg->flags, 0,0); 929 if (pg->flags & PG_FAKE && !overwrite) { 930 pg->flags &= ~(PG_FAKE); 931 pmap_clear_modify(pgs[i]); 932 } 933 if (write) { 934 pg->flags &= ~(PG_RDONLY); 935 } 936 if (i < ridx || i >= ridx + orignpages || async) { 937 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x", 938 pg, pg->offset,0,0); 939 if (pg->flags & PG_WANTED) { 940 wakeup(pg); 941 } 942 if (pg->flags & PG_FAKE) { 943 KASSERT(overwrite); 944 uvm_pagezero(pg); 945 } 946 if (pg->flags & PG_RELEASED) { 947 uvm_pagefree(pg); 948 continue; 949 } 950 uvm_pageactivate(pg); 951 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 952 UVM_PAGE_OWN(pg, NULL); 953 } 954 } 955 uvm_unlock_pageq(); 956 simple_unlock(&uobj->vmobjlock); 957 if (ap->a_m != NULL) { 958 memcpy(ap->a_m, &pgs[ridx], 959 orignpages * sizeof(struct vm_page *)); 960 } 961 return (0); 962 } 963 964 /* 965 * generic VM putpages routine. 966 * Write the given range of pages to backing store. 967 * 968 * => "offhi == 0" means flush all pages at or after "offlo". 969 * => object should be locked by caller. we may _unlock_ the object 970 * if (and only if) we need to clean a page (PGO_CLEANIT), or 971 * if PGO_SYNCIO is set and there are pages busy. 972 * we return with the object locked. 973 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O). 974 * thus, a caller might want to unlock higher level resources 975 * (e.g. vm_map) before calling flush. 976 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither 977 * unlock the object nor block. 978 * => if PGO_ALLPAGES is set, then all pages in the object will be processed. 979 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 980 * that new pages are inserted on the tail end of the list. thus, 981 * we can make a complete pass through the object in one go by starting 982 * at the head and working towards the tail (new pages are put in 983 * front of us). 984 * => NOTE: we are allowed to lock the page queues, so the caller 985 * must not be holding the page queue lock. 986 * 987 * note on "cleaning" object and PG_BUSY pages: 988 * this routine is holding the lock on the object. the only time 989 * that it can run into a PG_BUSY page that it does not own is if 990 * some other process has started I/O on the page (e.g. either 991 * a pagein, or a pageout). if the PG_BUSY page is being paged 992 * in, then it can not be dirty (!PG_CLEAN) because no one has 993 * had a chance to modify it yet. if the PG_BUSY page is being 994 * paged out then it means that someone else has already started 995 * cleaning the page for us (how nice!). in this case, if we 996 * have syncio specified, then after we make our pass through the 997 * object we need to wait for the other PG_BUSY pages to clear 998 * off (i.e. we need to do an iosync). also note that once a 999 * page is PG_BUSY it must stay in its object until it is un-busyed. 1000 * 1001 * note on page traversal: 1002 * we can traverse the pages in an object either by going down the 1003 * linked list in "uobj->memq", or we can go over the address range 1004 * by page doing hash table lookups for each address. depending 1005 * on how many pages are in the object it may be cheaper to do one 1006 * or the other. we set "by_list" to true if we are using memq. 1007 * if the cost of a hash lookup was equal to the cost of the list 1008 * traversal we could compare the number of pages in the start->stop 1009 * range to the total number of pages in the object. however, it 1010 * seems that a hash table lookup is more expensive than the linked 1011 * list traversal, so we multiply the number of pages in the 1012 * range by an estimate of the relatively higher cost of the hash lookup. 1013 */ 1014 1015 int 1016 genfs_putpages(void *v) 1017 { 1018 struct vop_putpages_args /* { 1019 struct vnode *a_vp; 1020 voff_t a_offlo; 1021 voff_t a_offhi; 1022 int a_flags; 1023 } */ *ap = v; 1024 struct vnode *vp = ap->a_vp; 1025 struct uvm_object *uobj = &vp->v_uobj; 1026 struct simplelock *slock = &uobj->vmobjlock; 1027 off_t startoff = ap->a_offlo; 1028 off_t endoff = ap->a_offhi; 1029 off_t off; 1030 int flags = ap->a_flags; 1031 const int maxpages = MAXBSIZE >> PAGE_SHIFT; 1032 int i, s, error, npages, nback; 1033 int freeflag; 1034 struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp; 1035 boolean_t wasclean, by_list, needs_clean, yield; 1036 boolean_t async = (flags & PGO_SYNCIO) == 0; 1037 boolean_t pagedaemon = curproc == uvm.pagedaemon_proc; 1038 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist); 1039 1040 KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)); 1041 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0); 1042 KASSERT(startoff < endoff || endoff == 0); 1043 1044 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x", 1045 vp, uobj->uo_npages, startoff, endoff - startoff); 1046 if (uobj->uo_npages == 0) { 1047 s = splbio(); 1048 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL && 1049 (vp->v_flag & VONWORKLST)) { 1050 vp->v_flag &= ~VONWORKLST; 1051 LIST_REMOVE(vp, v_synclist); 1052 } 1053 splx(s); 1054 simple_unlock(slock); 1055 return (0); 1056 } 1057 1058 /* 1059 * the vnode has pages, set up to process the request. 1060 */ 1061 1062 error = 0; 1063 s = splbio(); 1064 wasclean = (vp->v_numoutput == 0); 1065 splx(s); 1066 off = startoff; 1067 if (endoff == 0 || flags & PGO_ALLPAGES) { 1068 endoff = trunc_page(LLONG_MAX); 1069 } 1070 by_list = (uobj->uo_npages <= 1071 ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY); 1072 1073 /* 1074 * start the loop. when scanning by list, hold the last page 1075 * in the list before we start. pages allocated after we start 1076 * will be added to the end of the list, so we can stop at the 1077 * current last page. 1078 */ 1079 1080 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED; 1081 curmp.uobject = uobj; 1082 curmp.offset = (voff_t)-1; 1083 curmp.flags = PG_BUSY; 1084 endmp.uobject = uobj; 1085 endmp.offset = (voff_t)-1; 1086 endmp.flags = PG_BUSY; 1087 if (by_list) { 1088 pg = TAILQ_FIRST(&uobj->memq); 1089 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq); 1090 PHOLD(curproc); 1091 } else { 1092 pg = uvm_pagelookup(uobj, off); 1093 } 1094 nextpg = NULL; 1095 while (by_list || off < endoff) { 1096 1097 /* 1098 * if the current page is not interesting, move on to the next. 1099 */ 1100 1101 KASSERT(pg == NULL || pg->uobject == uobj); 1102 KASSERT(pg == NULL || 1103 (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 1104 (pg->flags & PG_BUSY) != 0); 1105 if (by_list) { 1106 if (pg == &endmp) { 1107 break; 1108 } 1109 if (pg->offset < startoff || pg->offset >= endoff || 1110 pg->flags & (PG_RELEASED|PG_PAGEOUT)) { 1111 pg = TAILQ_NEXT(pg, listq); 1112 continue; 1113 } 1114 off = pg->offset; 1115 } else if (pg == NULL || 1116 pg->flags & (PG_RELEASED|PG_PAGEOUT)) { 1117 off += PAGE_SIZE; 1118 if (off < endoff) { 1119 pg = uvm_pagelookup(uobj, off); 1120 } 1121 continue; 1122 } 1123 1124 /* 1125 * if the current page needs to be cleaned and it's busy, 1126 * wait for it to become unbusy. 1127 */ 1128 1129 yield = (curproc->p_cpu->ci_schedstate.spc_flags & 1130 SPCF_SHOULDYIELD) && !pagedaemon; 1131 if (pg->flags & PG_BUSY || yield) { 1132 KASSERT(!pagedaemon); 1133 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0); 1134 if (by_list) { 1135 TAILQ_INSERT_BEFORE(pg, &curmp, listq); 1136 UVMHIST_LOG(ubchist, "curmp next %p", 1137 TAILQ_NEXT(&curmp, listq), 0,0,0); 1138 } 1139 if (yield) { 1140 simple_unlock(slock); 1141 preempt(NULL); 1142 simple_lock(slock); 1143 } else { 1144 pg->flags |= PG_WANTED; 1145 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0); 1146 simple_lock(slock); 1147 } 1148 if (by_list) { 1149 UVMHIST_LOG(ubchist, "after next %p", 1150 TAILQ_NEXT(&curmp, listq), 0,0,0); 1151 pg = TAILQ_NEXT(&curmp, listq); 1152 TAILQ_REMOVE(&uobj->memq, &curmp, listq); 1153 } else { 1154 pg = uvm_pagelookup(uobj, off); 1155 } 1156 continue; 1157 } 1158 1159 /* 1160 * if we're freeing, remove all mappings of the page now. 1161 * if we're cleaning, check if the page is needs to be cleaned. 1162 */ 1163 1164 if (flags & PGO_FREE) { 1165 pmap_page_protect(pg, VM_PROT_NONE); 1166 } 1167 if (flags & PGO_CLEANIT) { 1168 needs_clean = pmap_clear_modify(pg) || 1169 (pg->flags & PG_CLEAN) == 0; 1170 pg->flags |= PG_CLEAN; 1171 } else { 1172 needs_clean = FALSE; 1173 } 1174 1175 /* 1176 * if we're cleaning, build a cluster. 1177 * the cluster will consist of pages which are currently dirty, 1178 * but they will be returned to us marked clean. 1179 * if not cleaning, just operate on the one page. 1180 */ 1181 1182 if (needs_clean) { 1183 wasclean = FALSE; 1184 memset(pgs, 0, sizeof(pgs)); 1185 pg->flags |= PG_BUSY; 1186 UVM_PAGE_OWN(pg, "genfs_putpages"); 1187 1188 /* 1189 * first look backward. 1190 */ 1191 1192 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT); 1193 nback = npages; 1194 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0], 1195 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD); 1196 if (nback) { 1197 memmove(&pgs[0], &pgs[npages - nback], 1198 nback * sizeof(pgs[0])); 1199 if (npages - nback < nback) 1200 memset(&pgs[nback], 0, 1201 (npages - nback) * sizeof(pgs[0])); 1202 else 1203 memset(&pgs[npages - nback], 0, 1204 nback * sizeof(pgs[0])); 1205 } 1206 1207 /* 1208 * then plug in our page of interest. 1209 */ 1210 1211 pgs[nback] = pg; 1212 1213 /* 1214 * then look forward to fill in the remaining space in 1215 * the array of pages. 1216 */ 1217 1218 npages = maxpages - nback - 1; 1219 uvn_findpages(uobj, off + PAGE_SIZE, &npages, 1220 &pgs[nback + 1], 1221 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY); 1222 npages += nback + 1; 1223 } else { 1224 pgs[0] = pg; 1225 npages = 1; 1226 nback = 0; 1227 } 1228 1229 /* 1230 * apply FREE or DEACTIVATE options if requested. 1231 */ 1232 1233 if (flags & (PGO_DEACTIVATE|PGO_FREE)) { 1234 uvm_lock_pageq(); 1235 } 1236 for (i = 0; i < npages; i++) { 1237 tpg = pgs[i]; 1238 KASSERT(tpg->uobject == uobj); 1239 if (by_list && tpg == TAILQ_NEXT(pg, listq)) 1240 pg = tpg; 1241 if (tpg->offset < startoff || tpg->offset >= endoff) 1242 continue; 1243 if (flags & PGO_DEACTIVATE && 1244 (tpg->pqflags & PQ_INACTIVE) == 0 && 1245 tpg->wire_count == 0) { 1246 (void) pmap_clear_reference(tpg); 1247 uvm_pagedeactivate(tpg); 1248 } else if (flags & PGO_FREE) { 1249 pmap_page_protect(tpg, VM_PROT_NONE); 1250 if (tpg->flags & PG_BUSY) { 1251 tpg->flags |= freeflag; 1252 if (pagedaemon) { 1253 uvmexp.paging++; 1254 uvm_pagedequeue(tpg); 1255 } 1256 } else { 1257 1258 /* 1259 * ``page is not busy'' 1260 * implies that npages is 1 1261 * and needs_clean is false. 1262 */ 1263 1264 nextpg = TAILQ_NEXT(tpg, listq); 1265 uvm_pagefree(tpg); 1266 } 1267 } 1268 } 1269 if (flags & (PGO_DEACTIVATE|PGO_FREE)) { 1270 uvm_unlock_pageq(); 1271 } 1272 if (needs_clean) { 1273 1274 /* 1275 * start the i/o. if we're traversing by list, 1276 * keep our place in the list with a marker page. 1277 */ 1278 1279 if (by_list) { 1280 TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp, 1281 listq); 1282 } 1283 simple_unlock(slock); 1284 error = GOP_WRITE(vp, pgs, npages, flags); 1285 simple_lock(slock); 1286 if (by_list) { 1287 pg = TAILQ_NEXT(&curmp, listq); 1288 TAILQ_REMOVE(&uobj->memq, &curmp, listq); 1289 } 1290 if (error) { 1291 break; 1292 } 1293 if (by_list) { 1294 continue; 1295 } 1296 } 1297 1298 /* 1299 * find the next page and continue if there was no error. 1300 */ 1301 1302 if (by_list) { 1303 if (nextpg) { 1304 pg = nextpg; 1305 nextpg = NULL; 1306 } else { 1307 pg = TAILQ_NEXT(pg, listq); 1308 } 1309 } else { 1310 off += (npages - nback) << PAGE_SHIFT; 1311 if (off < endoff) { 1312 pg = uvm_pagelookup(uobj, off); 1313 } 1314 } 1315 } 1316 if (by_list) { 1317 TAILQ_REMOVE(&uobj->memq, &endmp, listq); 1318 PRELE(curproc); 1319 } 1320 1321 /* 1322 * if we're cleaning and there was nothing to clean, 1323 * take us off the syncer list. if we started any i/o 1324 * and we're doing sync i/o, wait for all writes to finish. 1325 */ 1326 1327 s = splbio(); 1328 if ((flags & PGO_CLEANIT) && wasclean && 1329 startoff == 0 && endoff == trunc_page(LLONG_MAX) && 1330 LIST_FIRST(&vp->v_dirtyblkhd) == NULL && 1331 (vp->v_flag & VONWORKLST)) { 1332 vp->v_flag &= ~VONWORKLST; 1333 LIST_REMOVE(vp, v_synclist); 1334 } 1335 splx(s); 1336 if (!wasclean && !async) { 1337 s = splbio(); 1338 while (vp->v_numoutput != 0) { 1339 vp->v_flag |= VBWAIT; 1340 UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE, 1341 "genput2", 0); 1342 simple_lock(slock); 1343 } 1344 splx(s); 1345 } 1346 simple_unlock(&uobj->vmobjlock); 1347 return (error); 1348 } 1349 1350 int 1351 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 1352 { 1353 int s, error, run; 1354 int fs_bshift, dev_bshift; 1355 vaddr_t kva; 1356 off_t eof, offset, startoffset; 1357 size_t bytes, iobytes, skipbytes; 1358 daddr_t lbn, blkno; 1359 struct vm_page *pg; 1360 struct buf *mbp, *bp; 1361 struct vnode *devvp; 1362 boolean_t async = (flags & PGO_SYNCIO) == 0; 1363 UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist); 1364 1365 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x", 1366 vp, pgs, npages, flags); 1367 1368 GOP_SIZE(vp, vp->v_size, &eof); 1369 if (vp->v_type == VREG) { 1370 fs_bshift = vp->v_mount->mnt_fs_bshift; 1371 dev_bshift = vp->v_mount->mnt_dev_bshift; 1372 } else { 1373 fs_bshift = DEV_BSHIFT; 1374 dev_bshift = DEV_BSHIFT; 1375 } 1376 error = 0; 1377 pg = pgs[0]; 1378 startoffset = pg->offset; 1379 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset); 1380 skipbytes = 0; 1381 KASSERT(bytes != 0); 1382 1383 kva = uvm_pagermapin(pgs, npages, 1384 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1385 1386 s = splbio(); 1387 vp->v_numoutput += 2; 1388 mbp = pool_get(&bufpool, PR_WAITOK); 1389 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x", 1390 vp, mbp, vp->v_numoutput, bytes); 1391 splx(s); 1392 mbp->b_bufsize = npages << PAGE_SHIFT; 1393 mbp->b_data = (void *)kva; 1394 mbp->b_resid = mbp->b_bcount = bytes; 1395 mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0); 1396 mbp->b_iodone = uvm_aio_biodone; 1397 mbp->b_vp = vp; 1398 LIST_INIT(&mbp->b_dep); 1399 1400 bp = NULL; 1401 for (offset = startoffset; 1402 bytes > 0; 1403 offset += iobytes, bytes -= iobytes) { 1404 lbn = offset >> fs_bshift; 1405 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 1406 if (error) { 1407 UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0); 1408 skipbytes += bytes; 1409 bytes = 0; 1410 break; 1411 } 1412 1413 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 1414 bytes); 1415 if (blkno == (daddr_t)-1) { 1416 skipbytes += iobytes; 1417 continue; 1418 } 1419 1420 /* if it's really one i/o, don't make a second buf */ 1421 if (offset == startoffset && iobytes == bytes) { 1422 bp = mbp; 1423 } else { 1424 s = splbio(); 1425 vp->v_numoutput++; 1426 bp = pool_get(&bufpool, PR_WAITOK); 1427 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d", 1428 vp, bp, vp->v_numoutput, 0); 1429 splx(s); 1430 bp->b_data = (char *)kva + 1431 (vaddr_t)(offset - pg->offset); 1432 bp->b_resid = bp->b_bcount = iobytes; 1433 bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC; 1434 bp->b_iodone = uvm_aio_biodone1; 1435 bp->b_vp = vp; 1436 LIST_INIT(&bp->b_dep); 1437 } 1438 bp->b_lblkno = 0; 1439 bp->b_private = mbp; 1440 if (devvp->v_type == VBLK) { 1441 bp->b_dev = devvp->v_rdev; 1442 } 1443 1444 /* adjust physical blkno for partial blocks */ 1445 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 1446 dev_bshift); 1447 UVMHIST_LOG(ubchist, 1448 "vp %p offset 0x%x bcount 0x%x blkno 0x%x", 1449 vp, offset, bp->b_bcount, bp->b_blkno); 1450 VOP_STRATEGY(bp); 1451 } 1452 if (skipbytes) { 1453 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0); 1454 s = splbio(); 1455 if (error) { 1456 mbp->b_flags |= B_ERROR; 1457 mbp->b_error = error; 1458 } 1459 mbp->b_resid -= skipbytes; 1460 if (mbp->b_resid == 0) { 1461 biodone(mbp); 1462 } 1463 splx(s); 1464 } 1465 if (async) { 1466 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0); 1467 return (0); 1468 } 1469 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0); 1470 error = biowait(mbp); 1471 uvm_aio_aiodone(mbp); 1472 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0); 1473 return (error); 1474 } 1475 1476 /* 1477 * VOP_PUTPAGES() for vnodes which never have pages. 1478 */ 1479 1480 int 1481 genfs_null_putpages(void *v) 1482 { 1483 struct vop_putpages_args /* { 1484 struct vnode *a_vp; 1485 voff_t a_offlo; 1486 voff_t a_offhi; 1487 int a_flags; 1488 } */ *ap = v; 1489 struct vnode *vp = ap->a_vp; 1490 1491 KASSERT(vp->v_uobj.uo_npages == 0); 1492 simple_unlock(&vp->v_interlock); 1493 return (0); 1494 } 1495 1496 void 1497 genfs_node_init(struct vnode *vp, struct genfs_ops *ops) 1498 { 1499 struct genfs_node *gp = VTOG(vp); 1500 1501 lockinit(&gp->g_glock, PINOD, "glock", 0, 0); 1502 gp->g_op = ops; 1503 } 1504 1505 void 1506 genfs_size(struct vnode *vp, off_t size, off_t *eobp) 1507 { 1508 int bsize; 1509 1510 bsize = 1 << vp->v_mount->mnt_fs_bshift; 1511 *eobp = (size + bsize - 1) & ~(bsize - 1); 1512 } 1513 1514 int 1515 genfs_compat_getpages(void *v) 1516 { 1517 struct vop_getpages_args /* { 1518 struct vnode *a_vp; 1519 voff_t a_offset; 1520 struct vm_page **a_m; 1521 int *a_count; 1522 int a_centeridx; 1523 vm_prot_t a_access_type; 1524 int a_advice; 1525 int a_flags; 1526 } */ *ap = v; 1527 1528 off_t origoffset; 1529 struct vnode *vp = ap->a_vp; 1530 struct uvm_object *uobj = &vp->v_uobj; 1531 struct vm_page *pg, **pgs; 1532 vaddr_t kva; 1533 int i, error, orignpages, npages; 1534 struct iovec iov; 1535 struct uio uio; 1536 struct ucred *cred = curproc->p_ucred; 1537 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0; 1538 1539 error = 0; 1540 origoffset = ap->a_offset; 1541 orignpages = *ap->a_count; 1542 pgs = ap->a_m; 1543 1544 if (write && (vp->v_flag & VONWORKLST) == 0) { 1545 vn_syncer_add_to_worklist(vp, filedelay); 1546 } 1547 if (ap->a_flags & PGO_LOCKED) { 1548 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, 1549 UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0)); 1550 1551 return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); 1552 } 1553 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { 1554 simple_unlock(&uobj->vmobjlock); 1555 return (EINVAL); 1556 } 1557 npages = orignpages; 1558 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL); 1559 simple_unlock(&uobj->vmobjlock); 1560 kva = uvm_pagermapin(pgs, npages, 1561 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 1562 for (i = 0; i < npages; i++) { 1563 pg = pgs[i]; 1564 if ((pg->flags & PG_FAKE) == 0) { 1565 continue; 1566 } 1567 iov.iov_base = (char *)kva + (i << PAGE_SHIFT); 1568 iov.iov_len = PAGE_SIZE; 1569 uio.uio_iov = &iov; 1570 uio.uio_iovcnt = 1; 1571 uio.uio_offset = origoffset + (i << PAGE_SHIFT); 1572 uio.uio_segflg = UIO_SYSSPACE; 1573 uio.uio_rw = UIO_READ; 1574 uio.uio_resid = PAGE_SIZE; 1575 uio.uio_procp = curproc; 1576 error = VOP_READ(vp, &uio, 0, cred); 1577 if (error) { 1578 break; 1579 } 1580 if (uio.uio_resid) { 1581 memset(iov.iov_base, 0, uio.uio_resid); 1582 } 1583 } 1584 uvm_pagermapout(kva, npages); 1585 simple_lock(&uobj->vmobjlock); 1586 uvm_lock_pageq(); 1587 for (i = 0; i < npages; i++) { 1588 pg = pgs[i]; 1589 if (error && (pg->flags & PG_FAKE) != 0) { 1590 pg->flags |= PG_RELEASED; 1591 } else { 1592 pmap_clear_modify(pg); 1593 uvm_pageactivate(pg); 1594 } 1595 } 1596 if (error) { 1597 uvm_page_unbusy(pgs, npages); 1598 } 1599 uvm_unlock_pageq(); 1600 simple_unlock(&uobj->vmobjlock); 1601 return (error); 1602 } 1603 1604 int 1605 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, 1606 int flags) 1607 { 1608 off_t offset; 1609 struct iovec iov; 1610 struct uio uio; 1611 struct ucred *cred = curproc->p_ucred; 1612 struct buf *bp; 1613 vaddr_t kva; 1614 int s, error; 1615 1616 offset = pgs[0]->offset; 1617 kva = uvm_pagermapin(pgs, npages, 1618 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1619 1620 iov.iov_base = (void *)kva; 1621 iov.iov_len = npages << PAGE_SHIFT; 1622 uio.uio_iov = &iov; 1623 uio.uio_iovcnt = 1; 1624 uio.uio_offset = offset; 1625 uio.uio_segflg = UIO_SYSSPACE; 1626 uio.uio_rw = UIO_WRITE; 1627 uio.uio_resid = npages << PAGE_SHIFT; 1628 uio.uio_procp = curproc; 1629 error = VOP_WRITE(vp, &uio, 0, cred); 1630 1631 s = splbio(); 1632 vp->v_numoutput++; 1633 bp = pool_get(&bufpool, PR_WAITOK); 1634 splx(s); 1635 1636 bp->b_flags = B_BUSY | B_WRITE | B_AGE; 1637 bp->b_vp = vp; 1638 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift; 1639 bp->b_data = (char *)kva; 1640 bp->b_bcount = npages << PAGE_SHIFT; 1641 bp->b_bufsize = npages << PAGE_SHIFT; 1642 bp->b_resid = 0; 1643 LIST_INIT(&bp->b_dep); 1644 if (error) { 1645 bp->b_flags |= B_ERROR; 1646 bp->b_error = error; 1647 } 1648 uvm_aio_aiodone(bp); 1649 return (error); 1650 } 1651 1652 static void 1653 filt_genfsdetach(struct knote *kn) 1654 { 1655 struct vnode *vp = (struct vnode *)kn->kn_hook; 1656 1657 /* XXXLUKEM lock the struct? */ 1658 SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext); 1659 } 1660 1661 static int 1662 filt_genfsread(struct knote *kn, long hint) 1663 { 1664 struct vnode *vp = (struct vnode *)kn->kn_hook; 1665 1666 /* 1667 * filesystem is gone, so set the EOF flag and schedule 1668 * the knote for deletion. 1669 */ 1670 if (hint == NOTE_REVOKE) { 1671 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 1672 return (1); 1673 } 1674 1675 /* XXXLUKEM lock the struct? */ 1676 kn->kn_data = vp->v_size - kn->kn_fp->f_offset; 1677 return (kn->kn_data != 0); 1678 } 1679 1680 static int 1681 filt_genfsvnode(struct knote *kn, long hint) 1682 { 1683 1684 if (kn->kn_sfflags & hint) 1685 kn->kn_fflags |= hint; 1686 if (hint == NOTE_REVOKE) { 1687 kn->kn_flags |= EV_EOF; 1688 return (1); 1689 } 1690 return (kn->kn_fflags != 0); 1691 } 1692 1693 static const struct filterops genfsread_filtops = 1694 { 1, NULL, filt_genfsdetach, filt_genfsread }; 1695 static const struct filterops genfsvnode_filtops = 1696 { 1, NULL, filt_genfsdetach, filt_genfsvnode }; 1697 1698 int 1699 genfs_kqfilter(void *v) 1700 { 1701 struct vop_kqfilter_args /* { 1702 struct vnode *a_vp; 1703 struct knote *a_kn; 1704 } */ *ap = v; 1705 struct vnode *vp; 1706 struct knote *kn; 1707 1708 vp = ap->a_vp; 1709 kn = ap->a_kn; 1710 switch (kn->kn_filter) { 1711 case EVFILT_READ: 1712 kn->kn_fop = &genfsread_filtops; 1713 break; 1714 case EVFILT_VNODE: 1715 kn->kn_fop = &genfsvnode_filtops; 1716 break; 1717 default: 1718 return (1); 1719 } 1720 1721 kn->kn_hook = vp; 1722 1723 /* XXXLUKEM lock the struct? */ 1724 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext); 1725 1726 return (0); 1727 } 1728