1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/fcntl.h> 41 #include <sys/file.h> 42 #include <sys/stat.h> 43 #include <sys/proc.h> 44 #include <sys/priv.h> 45 #include <sys/mount.h> 46 #include <sys/nlookup.h> 47 #include <sys/vnode.h> 48 #include <sys/buf.h> 49 #include <sys/filio.h> 50 #include <sys/ttycom.h> 51 #include <sys/conf.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 55 #include <sys/thread2.h> 56 #include <sys/mplock2.h> 57 58 static int vn_closefile (struct file *fp); 59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data, 60 struct ucred *cred, struct sysmsg *msg); 61 static int vn_read (struct file *fp, struct uio *uio, 62 struct ucred *cred, int flags); 63 static int vn_kqfilter (struct file *fp, struct knote *kn); 64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred); 65 static int vn_write (struct file *fp, struct uio *uio, 66 struct ucred *cred, int flags); 67 68 struct fileops vnode_fileops = { 69 .fo_read = vn_read, 70 .fo_write = vn_write, 71 .fo_ioctl = vn_ioctl, 72 .fo_kqfilter = vn_kqfilter, 73 .fo_stat = vn_statfile, 74 .fo_close = vn_closefile, 75 .fo_shutdown = nofo_shutdown 76 }; 77 78 /* 79 * Common code for vnode open operations. Check permissions, and call 80 * the VOP_NOPEN or VOP_NCREATE routine. 81 * 82 * The caller is responsible for setting up nd with nlookup_init() and 83 * for cleaning it up with nlookup_done(), whether we return an error 84 * or not. 85 * 86 * On success nd->nl_open_vp will hold a referenced and, if requested, 87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp 88 * is non-NULL the vnode will be installed in the file pointer. 89 * 90 * NOTE: If the caller wishes the namecache entry to be operated with 91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set 92 * then the vnode lock will also be shared. 93 * 94 * NOTE: The vnode is referenced just once on return whether or not it 95 * is also installed in the file pointer. 96 */ 97 int 98 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode) 99 { 100 struct vnode *vp; 101 struct ucred *cred = nd->nl_cred; 102 struct vattr vat; 103 struct vattr *vap = &vat; 104 int error; 105 u_int flags; 106 uint64_t osize; 107 struct mount *mp; 108 109 /* 110 * Certain combinations are illegal 111 */ 112 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC) 113 return(EACCES); 114 115 /* 116 * Lookup the path and create or obtain the vnode. After a 117 * successful lookup a locked nd->nl_nch will be returned. 118 * 119 * The result of this section should be a locked vnode. 120 * 121 * XXX with only a little work we should be able to avoid locking 122 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set. 123 */ 124 nd->nl_flags |= NLC_OPEN; 125 if (fmode & O_APPEND) 126 nd->nl_flags |= NLC_APPEND; 127 if (fmode & O_TRUNC) 128 nd->nl_flags |= NLC_TRUNCATE; 129 if (fmode & FREAD) 130 nd->nl_flags |= NLC_READ; 131 if (fmode & FWRITE) 132 nd->nl_flags |= NLC_WRITE; 133 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 134 nd->nl_flags |= NLC_FOLLOW; 135 136 if (fmode & O_CREAT) { 137 /* 138 * CONDITIONAL CREATE FILE CASE 139 * 140 * Setting NLC_CREATE causes a negative hit to store 141 * the negative hit ncp and not return an error. Then 142 * nc_error or nc_vp may be checked to see if the ncp 143 * represents a negative hit. NLC_CREATE also requires 144 * write permission on the governing directory or EPERM 145 * is returned. 146 */ 147 nd->nl_flags |= NLC_CREATE; 148 nd->nl_flags |= NLC_REFDVP; 149 bwillinode(1); 150 error = nlookup(nd); 151 } else { 152 /* 153 * NORMAL OPEN FILE CASE 154 */ 155 error = nlookup(nd); 156 } 157 158 if (error) 159 return (error); 160 161 /* 162 * split case to allow us to re-resolve and retry the ncp in case 163 * we get ESTALE. 164 */ 165 again: 166 if (fmode & O_CREAT) { 167 if (nd->nl_nch.ncp->nc_vp == NULL) { 168 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 169 return (error); 170 VATTR_NULL(vap); 171 vap->va_type = VREG; 172 vap->va_mode = cmode; 173 if (fmode & O_EXCL) 174 vap->va_vaflags |= VA_EXCLUSIVE; 175 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp, 176 nd->nl_cred, vap); 177 if (error) 178 return (error); 179 fmode &= ~O_TRUNC; 180 /* locked vnode is returned */ 181 } else { 182 if (fmode & O_EXCL) { 183 error = EEXIST; 184 } else { 185 error = cache_vget(&nd->nl_nch, cred, 186 LK_EXCLUSIVE, &vp); 187 } 188 if (error) 189 return (error); 190 fmode &= ~O_CREAT; 191 } 192 } else { 193 if (nd->nl_flags & NLC_SHAREDLOCK) { 194 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp); 195 } else { 196 error = cache_vget(&nd->nl_nch, cred, 197 LK_EXCLUSIVE, &vp); 198 } 199 if (error) 200 return (error); 201 } 202 203 /* 204 * We have a locked vnode and ncp now. Note that the ncp will 205 * be cleaned up by the caller if nd->nl_nch is left intact. 206 */ 207 if (vp->v_type == VLNK) { 208 error = EMLINK; 209 goto bad; 210 } 211 if (vp->v_type == VSOCK) { 212 error = EOPNOTSUPP; 213 goto bad; 214 } 215 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) { 216 error = ENOTDIR; 217 goto bad; 218 } 219 if ((fmode & O_CREAT) == 0) { 220 if (fmode & (FWRITE | O_TRUNC)) { 221 if (vp->v_type == VDIR) { 222 error = EISDIR; 223 goto bad; 224 } 225 error = vn_writechk(vp, &nd->nl_nch); 226 if (error) { 227 /* 228 * Special stale handling, re-resolve the 229 * vnode. 230 */ 231 if (error == ESTALE) { 232 vput(vp); 233 vp = NULL; 234 if (nd->nl_flags & NLC_SHAREDLOCK) { 235 cache_unlock(&nd->nl_nch); 236 cache_lock(&nd->nl_nch); 237 } 238 cache_setunresolved(&nd->nl_nch); 239 error = cache_resolve(&nd->nl_nch, 240 cred); 241 if (error == 0) 242 goto again; 243 } 244 goto bad; 245 } 246 } 247 } 248 if (fmode & O_TRUNC) { 249 vn_unlock(vp); /* XXX */ 250 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 251 osize = vp->v_filesize; 252 VATTR_NULL(vap); 253 vap->va_size = 0; 254 error = VOP_SETATTR(vp, vap, cred); 255 if (error) 256 goto bad; 257 error = VOP_GETATTR(vp, vap); 258 if (error) 259 goto bad; 260 mp = vq_vptomp(vp); 261 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize); 262 } 263 264 /* 265 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag. 266 * These particular bits a tracked all the way from the root. 267 * 268 * NOTE: Might not work properly on NFS servers due to the 269 * disconnected namecache. 270 */ 271 flags = nd->nl_nch.ncp->nc_flag; 272 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) && 273 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) { 274 vsetflags(vp, VSWAPCACHE); 275 } else { 276 vclrflags(vp, VSWAPCACHE); 277 } 278 279 /* 280 * Setup the fp so VOP_OPEN can override it. No descriptor has been 281 * associated with the fp yet so we own it clean. 282 * 283 * f_nchandle inherits nl_nch. This used to be necessary only for 284 * directories but now we do it unconditionally so f*() ops 285 * such as fchmod() can access the actual namespace that was 286 * used to open the file. 287 */ 288 if (fp) { 289 if (nd->nl_flags & NLC_APPENDONLY) 290 fmode |= FAPPENDONLY; 291 fp->f_nchandle = nd->nl_nch; 292 cache_zero(&nd->nl_nch); 293 cache_unlock(&fp->f_nchandle); 294 } 295 296 /* 297 * Get rid of nl_nch. vn_open does not return it (it returns the 298 * vnode or the file pointer). Note: we can't leave nl_nch locked 299 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g. 300 * on /dev/ttyd0 301 */ 302 if (nd->nl_nch.ncp) 303 cache_put(&nd->nl_nch); 304 305 error = VOP_OPEN(vp, fmode, cred, fp); 306 if (error) { 307 /* 308 * setting f_ops to &badfileops will prevent the descriptor 309 * code from trying to close and release the vnode, since 310 * the open failed we do not want to call close. 311 */ 312 if (fp) { 313 fp->f_data = NULL; 314 fp->f_ops = &badfileops; 315 } 316 goto bad; 317 } 318 319 #if 0 320 /* 321 * Assert that VREG files have been setup for vmio. 322 */ 323 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 324 ("vn_open: regular file was not VMIO enabled!")); 325 #endif 326 327 /* 328 * Return the vnode. XXX needs some cleaning up. The vnode is 329 * only returned in the fp == NULL case. 330 */ 331 if (fp == NULL) { 332 nd->nl_open_vp = vp; 333 nd->nl_vp_fmode = fmode; 334 if ((nd->nl_flags & NLC_LOCKVP) == 0) 335 vn_unlock(vp); 336 } else { 337 vput(vp); 338 } 339 return (0); 340 bad: 341 if (vp) 342 vput(vp); 343 return (error); 344 } 345 346 int 347 vn_opendisk(const char *devname, int fmode, struct vnode **vpp) 348 { 349 struct vnode *vp; 350 int error; 351 352 if (strncmp(devname, "/dev/", 5) == 0) 353 devname += 5; 354 if ((vp = getsynthvnode(devname)) == NULL) { 355 error = ENODEV; 356 } else { 357 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL); 358 vn_unlock(vp); 359 if (error) { 360 vrele(vp); 361 vp = NULL; 362 } 363 } 364 *vpp = vp; 365 return (error); 366 } 367 368 /* 369 * Check for write permissions on the specified vnode. nch may be NULL. 370 */ 371 int 372 vn_writechk(struct vnode *vp, struct nchandle *nch) 373 { 374 /* 375 * If there's shared text associated with 376 * the vnode, try to free it up once. If 377 * we fail, we can't allow writing. 378 */ 379 if (vp->v_flag & VTEXT) 380 return (ETXTBSY); 381 382 /* 383 * If the vnode represents a regular file, check the mount 384 * point via the nch. This may be a different mount point 385 * then the one embedded in the vnode (e.g. nullfs). 386 * 387 * We can still write to non-regular files (e.g. devices) 388 * via read-only mounts. 389 */ 390 if (nch && nch->ncp && vp->v_type == VREG) 391 return (ncp_writechk(nch)); 392 return (0); 393 } 394 395 /* 396 * Check whether the underlying mount is read-only. The mount point 397 * referenced by the namecache may be different from the mount point 398 * used by the underlying vnode in the case of NULLFS, so a separate 399 * check is needed. 400 */ 401 int 402 ncp_writechk(struct nchandle *nch) 403 { 404 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY)) 405 return (EROFS); 406 return(0); 407 } 408 409 /* 410 * Vnode close call 411 * 412 * MPSAFE 413 */ 414 int 415 vn_close(struct vnode *vp, int flags) 416 { 417 int error; 418 419 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM); 420 if (error == 0) { 421 error = VOP_CLOSE(vp, flags); 422 vn_unlock(vp); 423 } 424 vrele(vp); 425 return (error); 426 } 427 428 /* 429 * Sequential heuristic. 430 * 431 * MPSAFE (f_seqcount and f_nextoff are allowed to race) 432 */ 433 static __inline 434 int 435 sequential_heuristic(struct uio *uio, struct file *fp) 436 { 437 /* 438 * Sequential heuristic - detect sequential operation 439 * 440 * NOTE: SMP: We allow f_seqcount updates to race. 441 */ 442 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 443 uio->uio_offset == fp->f_nextoff) { 444 int tmpseq = fp->f_seqcount; 445 446 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 447 if (tmpseq > IO_SEQMAX) 448 tmpseq = IO_SEQMAX; 449 fp->f_seqcount = tmpseq; 450 return(fp->f_seqcount << IO_SEQSHIFT); 451 } 452 453 /* 454 * Not sequential, quick draw-down of seqcount 455 * 456 * NOTE: SMP: We allow f_seqcount updates to race. 457 */ 458 if (fp->f_seqcount > 1) 459 fp->f_seqcount = 1; 460 else 461 fp->f_seqcount = 0; 462 return(0); 463 } 464 465 /* 466 * get - lock and return the f_offset field. 467 * set - set and unlock the f_offset field. 468 * 469 * These routines serve the dual purpose of serializing access to the 470 * f_offset field (at least on i386) and guaranteeing operational integrity 471 * when multiple read()ers and write()ers are present on the same fp. 472 * 473 * MPSAFE 474 */ 475 static __inline off_t 476 vn_get_fpf_offset(struct file *fp) 477 { 478 u_int flags; 479 u_int nflags; 480 481 /* 482 * Shortcut critical path. 483 */ 484 flags = fp->f_flag & ~FOFFSETLOCK; 485 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK)) 486 return(fp->f_offset); 487 488 /* 489 * The hard way 490 */ 491 for (;;) { 492 flags = fp->f_flag; 493 if (flags & FOFFSETLOCK) { 494 nflags = flags | FOFFSETWAKE; 495 tsleep_interlock(&fp->f_flag, 0); 496 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 497 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0); 498 } else { 499 nflags = flags | FOFFSETLOCK; 500 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 501 break; 502 } 503 } 504 return(fp->f_offset); 505 } 506 507 /* 508 * MPSAFE 509 */ 510 static __inline void 511 vn_set_fpf_offset(struct file *fp, off_t offset) 512 { 513 u_int flags; 514 u_int nflags; 515 516 /* 517 * We hold the lock so we can set the offset without interference. 518 */ 519 fp->f_offset = offset; 520 521 /* 522 * Normal release is already a reasonably critical path. 523 */ 524 for (;;) { 525 flags = fp->f_flag; 526 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE); 527 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) { 528 if (flags & FOFFSETWAKE) 529 wakeup(&fp->f_flag); 530 break; 531 } 532 } 533 } 534 535 /* 536 * MPSAFE 537 */ 538 static __inline off_t 539 vn_poll_fpf_offset(struct file *fp) 540 { 541 #if defined(__x86_64__) 542 return(fp->f_offset); 543 #else 544 off_t off = vn_get_fpf_offset(fp); 545 vn_set_fpf_offset(fp, off); 546 return(off); 547 #endif 548 } 549 550 /* 551 * Package up an I/O request on a vnode into a uio and do it. 552 * 553 * MPSAFE 554 */ 555 int 556 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 557 off_t offset, enum uio_seg segflg, int ioflg, 558 struct ucred *cred, int *aresid) 559 { 560 struct uio auio; 561 struct iovec aiov; 562 int error; 563 564 if ((ioflg & IO_NODELOCKED) == 0) 565 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 566 auio.uio_iov = &aiov; 567 auio.uio_iovcnt = 1; 568 aiov.iov_base = base; 569 aiov.iov_len = len; 570 auio.uio_resid = len; 571 auio.uio_offset = offset; 572 auio.uio_segflg = segflg; 573 auio.uio_rw = rw; 574 auio.uio_td = curthread; 575 if (rw == UIO_READ) { 576 error = VOP_READ(vp, &auio, ioflg, cred); 577 } else { 578 error = VOP_WRITE(vp, &auio, ioflg, cred); 579 } 580 if (aresid) 581 *aresid = auio.uio_resid; 582 else 583 if (auio.uio_resid && error == 0) 584 error = EIO; 585 if ((ioflg & IO_NODELOCKED) == 0) 586 vn_unlock(vp); 587 return (error); 588 } 589 590 /* 591 * Package up an I/O request on a vnode into a uio and do it. The I/O 592 * request is split up into smaller chunks and we try to avoid saturating 593 * the buffer cache while potentially holding a vnode locked, so we 594 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield() 595 * to give other processes a chance to lock the vnode (either other processes 596 * core'ing the same binary, or unrelated processes scanning the directory). 597 * 598 * MPSAFE 599 */ 600 int 601 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 602 off_t offset, enum uio_seg segflg, int ioflg, 603 struct ucred *cred, int *aresid) 604 { 605 int error = 0; 606 607 do { 608 int chunk; 609 610 /* 611 * Force `offset' to a multiple of MAXBSIZE except possibly 612 * for the first chunk, so that filesystems only need to 613 * write full blocks except possibly for the first and last 614 * chunks. 615 */ 616 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 617 618 if (chunk > len) 619 chunk = len; 620 if (vp->v_type == VREG) { 621 switch(rw) { 622 case UIO_READ: 623 bwillread(chunk); 624 break; 625 case UIO_WRITE: 626 bwillwrite(chunk); 627 break; 628 } 629 } 630 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 631 ioflg, cred, aresid); 632 len -= chunk; /* aresid calc already includes length */ 633 if (error) 634 break; 635 offset += chunk; 636 base += chunk; 637 lwkt_user_yield(); 638 } while (len); 639 if (aresid) 640 *aresid += len; 641 return (error); 642 } 643 644 /* 645 * File pointers can no longer get ripped up by revoke so 646 * we don't need to lock access to the vp. 647 * 648 * f_offset updates are not guaranteed against multiple readers 649 */ 650 static int 651 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 652 { 653 struct vnode *vp; 654 int error, ioflag; 655 656 KASSERT(uio->uio_td == curthread, 657 ("uio_td %p is not td %p", uio->uio_td, curthread)); 658 vp = (struct vnode *)fp->f_data; 659 660 ioflag = 0; 661 if (flags & O_FBLOCKING) { 662 /* ioflag &= ~IO_NDELAY; */ 663 } else if (flags & O_FNONBLOCKING) { 664 ioflag |= IO_NDELAY; 665 } else if (fp->f_flag & FNONBLOCK) { 666 ioflag |= IO_NDELAY; 667 } 668 if (flags & O_FBUFFERED) { 669 /* ioflag &= ~IO_DIRECT; */ 670 } else if (flags & O_FUNBUFFERED) { 671 ioflag |= IO_DIRECT; 672 } else if (fp->f_flag & O_DIRECT) { 673 ioflag |= IO_DIRECT; 674 } 675 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 676 uio->uio_offset = vn_get_fpf_offset(fp); 677 vn_lock(vp, LK_SHARED | LK_RETRY); 678 ioflag |= sequential_heuristic(uio, fp); 679 680 error = VOP_READ(vp, uio, ioflag, cred); 681 fp->f_nextoff = uio->uio_offset; 682 vn_unlock(vp); 683 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 684 vn_set_fpf_offset(fp, uio->uio_offset); 685 return (error); 686 } 687 688 /* 689 * MPSAFE 690 */ 691 static int 692 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 693 { 694 struct vnode *vp; 695 int error, ioflag; 696 697 KASSERT(uio->uio_td == curthread, 698 ("uio_td %p is not p %p", uio->uio_td, curthread)); 699 vp = (struct vnode *)fp->f_data; 700 701 ioflag = IO_UNIT; 702 if (vp->v_type == VREG && 703 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 704 ioflag |= IO_APPEND; 705 } 706 707 if (flags & O_FBLOCKING) { 708 /* ioflag &= ~IO_NDELAY; */ 709 } else if (flags & O_FNONBLOCKING) { 710 ioflag |= IO_NDELAY; 711 } else if (fp->f_flag & FNONBLOCK) { 712 ioflag |= IO_NDELAY; 713 } 714 if (flags & O_FBUFFERED) { 715 /* ioflag &= ~IO_DIRECT; */ 716 } else if (flags & O_FUNBUFFERED) { 717 ioflag |= IO_DIRECT; 718 } else if (fp->f_flag & O_DIRECT) { 719 ioflag |= IO_DIRECT; 720 } 721 if (flags & O_FASYNCWRITE) { 722 /* ioflag &= ~IO_SYNC; */ 723 } else if (flags & O_FSYNCWRITE) { 724 ioflag |= IO_SYNC; 725 } else if (fp->f_flag & O_FSYNC) { 726 ioflag |= IO_SYNC; 727 } 728 729 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 730 ioflag |= IO_SYNC; 731 if ((flags & O_FOFFSET) == 0) 732 uio->uio_offset = vn_get_fpf_offset(fp); 733 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 734 ioflag |= sequential_heuristic(uio, fp); 735 error = VOP_WRITE(vp, uio, ioflag, cred); 736 fp->f_nextoff = uio->uio_offset; 737 vn_unlock(vp); 738 if ((flags & O_FOFFSET) == 0) 739 vn_set_fpf_offset(fp, uio->uio_offset); 740 return (error); 741 } 742 743 /* 744 * MPSAFE 745 */ 746 static int 747 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred) 748 { 749 struct vnode *vp; 750 int error; 751 752 vp = (struct vnode *)fp->f_data; 753 error = vn_stat(vp, sb, cred); 754 return (error); 755 } 756 757 /* 758 * MPSAFE 759 */ 760 int 761 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred) 762 { 763 struct vattr vattr; 764 struct vattr *vap; 765 int error; 766 u_short mode; 767 cdev_t dev; 768 769 vap = &vattr; 770 error = VOP_GETATTR(vp, vap); 771 if (error) 772 return (error); 773 774 /* 775 * Zero the spare stat fields 776 */ 777 sb->st_lspare = 0; 778 sb->st_qspare1 = 0; 779 sb->st_qspare2 = 0; 780 781 /* 782 * Copy from vattr table 783 */ 784 if (vap->va_fsid != VNOVAL) 785 sb->st_dev = vap->va_fsid; 786 else 787 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 788 sb->st_ino = vap->va_fileid; 789 mode = vap->va_mode; 790 switch (vap->va_type) { 791 case VREG: 792 mode |= S_IFREG; 793 break; 794 case VDATABASE: 795 mode |= S_IFDB; 796 break; 797 case VDIR: 798 mode |= S_IFDIR; 799 break; 800 case VBLK: 801 mode |= S_IFBLK; 802 break; 803 case VCHR: 804 mode |= S_IFCHR; 805 break; 806 case VLNK: 807 mode |= S_IFLNK; 808 /* This is a cosmetic change, symlinks do not have a mode. */ 809 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 810 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 811 else 812 sb->st_mode |= ACCESSPERMS; /* 0777 */ 813 break; 814 case VSOCK: 815 mode |= S_IFSOCK; 816 break; 817 case VFIFO: 818 mode |= S_IFIFO; 819 break; 820 default: 821 return (EBADF); 822 } 823 sb->st_mode = mode; 824 if (vap->va_nlink > (nlink_t)-1) 825 sb->st_nlink = (nlink_t)-1; 826 else 827 sb->st_nlink = vap->va_nlink; 828 sb->st_uid = vap->va_uid; 829 sb->st_gid = vap->va_gid; 830 sb->st_rdev = dev2udev(vp->v_rdev); 831 sb->st_size = vap->va_size; 832 sb->st_atimespec = vap->va_atime; 833 sb->st_mtimespec = vap->va_mtime; 834 sb->st_ctimespec = vap->va_ctime; 835 836 /* 837 * A VCHR and VBLK device may track the last access and last modified 838 * time independantly of the filesystem. This is particularly true 839 * because device read and write calls may bypass the filesystem. 840 */ 841 if (vp->v_type == VCHR || vp->v_type == VBLK) { 842 dev = vp->v_rdev; 843 if (dev != NULL) { 844 if (dev->si_lastread) { 845 sb->st_atimespec.tv_sec = time_second + 846 (time_uptime - 847 dev->si_lastread); 848 sb->st_atimespec.tv_nsec = 0; 849 } 850 if (dev->si_lastwrite) { 851 sb->st_atimespec.tv_sec = time_second + 852 (time_uptime - 853 dev->si_lastwrite); 854 sb->st_atimespec.tv_nsec = 0; 855 } 856 } 857 } 858 859 /* 860 * According to www.opengroup.org, the meaning of st_blksize is 861 * "a filesystem-specific preferred I/O block size for this 862 * object. In some filesystem types, this may vary from file 863 * to file" 864 * Default to PAGE_SIZE after much discussion. 865 */ 866 867 if (vap->va_type == VREG) { 868 sb->st_blksize = vap->va_blocksize; 869 } else if (vn_isdisk(vp, NULL)) { 870 /* 871 * XXX this is broken. If the device is not yet open (aka 872 * stat() call, aka v_rdev == NULL), how are we supposed 873 * to get a valid block size out of it? 874 */ 875 dev = vp->v_rdev; 876 877 sb->st_blksize = dev->si_bsize_best; 878 if (sb->st_blksize < dev->si_bsize_phys) 879 sb->st_blksize = dev->si_bsize_phys; 880 if (sb->st_blksize < BLKDEV_IOSIZE) 881 sb->st_blksize = BLKDEV_IOSIZE; 882 } else { 883 sb->st_blksize = PAGE_SIZE; 884 } 885 886 sb->st_flags = vap->va_flags; 887 888 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 889 if (error) 890 sb->st_gen = 0; 891 else 892 sb->st_gen = (u_int32_t)vap->va_gen; 893 894 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 895 return (0); 896 } 897 898 /* 899 * MPALMOSTSAFE - acquires mplock 900 */ 901 static int 902 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred, 903 struct sysmsg *msg) 904 { 905 struct vnode *vp = ((struct vnode *)fp->f_data); 906 struct vnode *ovp; 907 struct vattr vattr; 908 int error; 909 off_t size; 910 911 switch (vp->v_type) { 912 case VREG: 913 case VDIR: 914 if (com == FIONREAD) { 915 error = VOP_GETATTR(vp, &vattr); 916 if (error) 917 break; 918 size = vattr.va_size; 919 if ((vp->v_flag & VNOTSEEKABLE) == 0) 920 size -= vn_poll_fpf_offset(fp); 921 if (size > 0x7FFFFFFF) 922 size = 0x7FFFFFFF; 923 *(int *)data = size; 924 error = 0; 925 break; 926 } 927 if (com == FIOASYNC) { /* XXX */ 928 error = 0; /* XXX */ 929 break; 930 } 931 /* fall into ... */ 932 default: 933 #if 0 934 return (ENOTTY); 935 #endif 936 case VFIFO: 937 case VCHR: 938 case VBLK: 939 if (com == FIODTYPE) { 940 if (vp->v_type != VCHR && vp->v_type != VBLK) { 941 error = ENOTTY; 942 break; 943 } 944 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK; 945 error = 0; 946 break; 947 } 948 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg); 949 if (error == 0 && com == TIOCSCTTY) { 950 struct proc *p = curthread->td_proc; 951 struct session *sess; 952 953 if (p == NULL) { 954 error = ENOTTY; 955 break; 956 } 957 958 get_mplock(); 959 sess = p->p_session; 960 /* Do nothing if reassigning same control tty */ 961 if (sess->s_ttyvp == vp) { 962 error = 0; 963 rel_mplock(); 964 break; 965 } 966 967 /* Get rid of reference to old control tty */ 968 ovp = sess->s_ttyvp; 969 vref(vp); 970 sess->s_ttyvp = vp; 971 if (ovp) 972 vrele(ovp); 973 rel_mplock(); 974 } 975 break; 976 } 977 return (error); 978 } 979 980 /* 981 * Obtain the requested vnode lock 982 * 983 * LK_RETRY Automatically retry on timeout 984 * LK_FAILRECLAIM Fail if the vnode is being reclaimed 985 * 986 * Failures will occur if the vnode is undergoing recyclement, but not 987 * all callers expect that the function will fail so the caller must pass 988 * LK_FAILOK if it wants to process an error code. 989 * 990 * Errors can occur for other reasons if you pass in other LK_ flags, 991 * regardless of whether you pass in LK_FAILRECLAIM 992 */ 993 int 994 #ifndef DEBUG_LOCKS 995 vn_lock(struct vnode *vp, int flags) 996 #else 997 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line) 998 #endif 999 { 1000 int error; 1001 1002 do { 1003 #ifdef DEBUG_LOCKS 1004 vp->filename = filename; 1005 vp->line = line; 1006 error = debuglockmgr(&vp->v_lock, flags, 1007 "vn_lock", filename, line); 1008 #else 1009 error = lockmgr(&vp->v_lock, flags); 1010 #endif 1011 if (error == 0) 1012 break; 1013 } while (flags & LK_RETRY); 1014 1015 /* 1016 * Because we (had better!) have a ref on the vnode, once it 1017 * goes to VRECLAIMED state it will not be recycled until all 1018 * refs go away. So we can just check the flag. 1019 */ 1020 if (error == 0 && (vp->v_flag & VRECLAIMED)) { 1021 if (flags & LK_FAILRECLAIM) { 1022 lockmgr(&vp->v_lock, LK_RELEASE); 1023 error = ENOENT; 1024 } 1025 } 1026 return (error); 1027 } 1028 1029 #ifdef DEBUG_VN_UNLOCK 1030 1031 void 1032 debug_vn_unlock(struct vnode *vp, const char *filename, int line) 1033 { 1034 kprintf("vn_unlock from %s:%d\n", filename, line); 1035 lockmgr(&vp->v_lock, LK_RELEASE); 1036 } 1037 1038 #else 1039 1040 void 1041 vn_unlock(struct vnode *vp) 1042 { 1043 lockmgr(&vp->v_lock, LK_RELEASE); 1044 } 1045 1046 #endif 1047 1048 /* 1049 * MPSAFE 1050 */ 1051 int 1052 vn_islocked(struct vnode *vp) 1053 { 1054 return (lockstatus(&vp->v_lock, curthread)); 1055 } 1056 1057 /* 1058 * Return the lock status of a vnode and unlock the vnode 1059 * if we owned the lock. This is not a boolean, if the 1060 * caller cares what the lock status is the caller must 1061 * check the various possible values. 1062 * 1063 * This only unlocks exclusive locks held by the caller, 1064 * it will NOT unlock shared locks (there is no way to 1065 * tell who the shared lock belongs to). 1066 * 1067 * MPSAFE 1068 */ 1069 int 1070 vn_islocked_unlock(struct vnode *vp) 1071 { 1072 int vpls; 1073 1074 vpls = lockstatus(&vp->v_lock, curthread); 1075 if (vpls == LK_EXCLUSIVE) 1076 lockmgr(&vp->v_lock, LK_RELEASE); 1077 return(vpls); 1078 } 1079 1080 /* 1081 * Restore a vnode lock that we previously released via 1082 * vn_islocked_unlock(). This is a NOP if we did not 1083 * own the original lock. 1084 * 1085 * MPSAFE 1086 */ 1087 void 1088 vn_islocked_relock(struct vnode *vp, int vpls) 1089 { 1090 int error; 1091 1092 if (vpls == LK_EXCLUSIVE) 1093 error = lockmgr(&vp->v_lock, vpls); 1094 } 1095 1096 /* 1097 * MPSAFE 1098 */ 1099 static int 1100 vn_closefile(struct file *fp) 1101 { 1102 int error; 1103 1104 fp->f_ops = &badfileops; 1105 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag); 1106 return (error); 1107 } 1108 1109 /* 1110 * MPSAFE 1111 */ 1112 static int 1113 vn_kqfilter(struct file *fp, struct knote *kn) 1114 { 1115 int error; 1116 1117 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn); 1118 return (error); 1119 } 1120