1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $ 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/fcntl.h> 41 #include <sys/file.h> 42 #include <sys/stat.h> 43 #include <sys/proc.h> 44 #include <sys/priv.h> 45 #include <sys/mount.h> 46 #include <sys/nlookup.h> 47 #include <sys/vnode.h> 48 #include <sys/buf.h> 49 #include <sys/filio.h> 50 #include <sys/ttycom.h> 51 #include <sys/conf.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 55 #include <sys/thread2.h> 56 #include <sys/mplock2.h> 57 58 static int vn_closefile (struct file *fp); 59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data, 60 struct ucred *cred, struct sysmsg *msg); 61 static int vn_read (struct file *fp, struct uio *uio, 62 struct ucred *cred, int flags); 63 static int vn_kqfilter (struct file *fp, struct knote *kn); 64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred); 65 static int vn_write (struct file *fp, struct uio *uio, 66 struct ucred *cred, int flags); 67 68 struct fileops vnode_fileops = { 69 .fo_read = vn_read, 70 .fo_write = vn_write, 71 .fo_ioctl = vn_ioctl, 72 .fo_kqfilter = vn_kqfilter, 73 .fo_stat = vn_statfile, 74 .fo_close = vn_closefile, 75 .fo_shutdown = nofo_shutdown 76 }; 77 78 /* 79 * Common code for vnode open operations. Check permissions, and call 80 * the VOP_NOPEN or VOP_NCREATE routine. 81 * 82 * The caller is responsible for setting up nd with nlookup_init() and 83 * for cleaning it up with nlookup_done(), whether we return an error 84 * or not. 85 * 86 * On success nd->nl_open_vp will hold a referenced and, if requested, 87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp 88 * is non-NULL the vnode will be installed in the file pointer. 89 * 90 * NOTE: The vnode is referenced just once on return whether or not it 91 * is also installed in the file pointer. 92 */ 93 int 94 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode) 95 { 96 struct vnode *vp; 97 struct ucred *cred = nd->nl_cred; 98 struct vattr vat; 99 struct vattr *vap = &vat; 100 int error; 101 u_int flags; 102 uint64_t osize; 103 struct mount *mp; 104 105 /* 106 * Certain combinations are illegal 107 */ 108 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC) 109 return(EACCES); 110 111 /* 112 * Lookup the path and create or obtain the vnode. After a 113 * successful lookup a locked nd->nl_nch will be returned. 114 * 115 * The result of this section should be a locked vnode. 116 * 117 * XXX with only a little work we should be able to avoid locking 118 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set. 119 */ 120 nd->nl_flags |= NLC_OPEN; 121 if (fmode & O_APPEND) 122 nd->nl_flags |= NLC_APPEND; 123 if (fmode & O_TRUNC) 124 nd->nl_flags |= NLC_TRUNCATE; 125 if (fmode & FREAD) 126 nd->nl_flags |= NLC_READ; 127 if (fmode & FWRITE) 128 nd->nl_flags |= NLC_WRITE; 129 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 130 nd->nl_flags |= NLC_FOLLOW; 131 132 if (fmode & O_CREAT) { 133 /* 134 * CONDITIONAL CREATE FILE CASE 135 * 136 * Setting NLC_CREATE causes a negative hit to store 137 * the negative hit ncp and not return an error. Then 138 * nc_error or nc_vp may be checked to see if the ncp 139 * represents a negative hit. NLC_CREATE also requires 140 * write permission on the governing directory or EPERM 141 * is returned. 142 */ 143 nd->nl_flags |= NLC_CREATE; 144 nd->nl_flags |= NLC_REFDVP; 145 bwillinode(1); 146 error = nlookup(nd); 147 } else { 148 /* 149 * NORMAL OPEN FILE CASE 150 */ 151 error = nlookup(nd); 152 } 153 154 if (error) 155 return (error); 156 157 /* 158 * split case to allow us to re-resolve and retry the ncp in case 159 * we get ESTALE. 160 */ 161 again: 162 if (fmode & O_CREAT) { 163 if (nd->nl_nch.ncp->nc_vp == NULL) { 164 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 165 return (error); 166 VATTR_NULL(vap); 167 vap->va_type = VREG; 168 vap->va_mode = cmode; 169 if (fmode & O_EXCL) 170 vap->va_vaflags |= VA_EXCLUSIVE; 171 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp, 172 nd->nl_cred, vap); 173 if (error) 174 return (error); 175 fmode &= ~O_TRUNC; 176 /* locked vnode is returned */ 177 } else { 178 if (fmode & O_EXCL) { 179 error = EEXIST; 180 } else { 181 error = cache_vget(&nd->nl_nch, cred, 182 LK_EXCLUSIVE, &vp); 183 } 184 if (error) 185 return (error); 186 fmode &= ~O_CREAT; 187 } 188 } else { 189 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp); 190 if (error) 191 return (error); 192 } 193 194 /* 195 * We have a locked vnode and ncp now. Note that the ncp will 196 * be cleaned up by the caller if nd->nl_nch is left intact. 197 */ 198 if (vp->v_type == VLNK) { 199 error = EMLINK; 200 goto bad; 201 } 202 if (vp->v_type == VSOCK) { 203 error = EOPNOTSUPP; 204 goto bad; 205 } 206 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) { 207 error = ENOTDIR; 208 goto bad; 209 } 210 if ((fmode & O_CREAT) == 0) { 211 if (fmode & (FWRITE | O_TRUNC)) { 212 if (vp->v_type == VDIR) { 213 error = EISDIR; 214 goto bad; 215 } 216 error = vn_writechk(vp, &nd->nl_nch); 217 if (error) { 218 /* 219 * Special stale handling, re-resolve the 220 * vnode. 221 */ 222 if (error == ESTALE) { 223 vput(vp); 224 vp = NULL; 225 cache_setunresolved(&nd->nl_nch); 226 error = cache_resolve(&nd->nl_nch, cred); 227 if (error == 0) 228 goto again; 229 } 230 goto bad; 231 } 232 } 233 } 234 if (fmode & O_TRUNC) { 235 vn_unlock(vp); /* XXX */ 236 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 237 osize = vp->v_filesize; 238 VATTR_NULL(vap); 239 vap->va_size = 0; 240 error = VOP_SETATTR(vp, vap, cred); 241 if (error) 242 goto bad; 243 error = VOP_GETATTR(vp, vap); 244 if (error) 245 goto bad; 246 mp = vq_vptomp(vp); 247 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize); 248 } 249 250 /* 251 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag. 252 * These particular bits a tracked all the way from the root. 253 * 254 * NOTE: Might not work properly on NFS servers due to the 255 * disconnected namecache. 256 */ 257 flags = nd->nl_nch.ncp->nc_flag; 258 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) && 259 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) { 260 vsetflags(vp, VSWAPCACHE); 261 } else { 262 vclrflags(vp, VSWAPCACHE); 263 } 264 265 /* 266 * Setup the fp so VOP_OPEN can override it. No descriptor has been 267 * associated with the fp yet so we own it clean. 268 * 269 * f_nchandle inherits nl_nch. This used to be necessary only for 270 * directories but now we do it unconditionally so f*() ops 271 * such as fchmod() can access the actual namespace that was 272 * used to open the file. 273 */ 274 if (fp) { 275 if (nd->nl_flags & NLC_APPENDONLY) 276 fmode |= FAPPENDONLY; 277 fp->f_nchandle = nd->nl_nch; 278 cache_zero(&nd->nl_nch); 279 cache_unlock(&fp->f_nchandle); 280 } 281 282 /* 283 * Get rid of nl_nch. vn_open does not return it (it returns the 284 * vnode or the file pointer). Note: we can't leave nl_nch locked 285 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g. 286 * on /dev/ttyd0 287 */ 288 if (nd->nl_nch.ncp) 289 cache_put(&nd->nl_nch); 290 291 error = VOP_OPEN(vp, fmode, cred, fp); 292 if (error) { 293 /* 294 * setting f_ops to &badfileops will prevent the descriptor 295 * code from trying to close and release the vnode, since 296 * the open failed we do not want to call close. 297 */ 298 if (fp) { 299 fp->f_data = NULL; 300 fp->f_ops = &badfileops; 301 } 302 goto bad; 303 } 304 305 #if 0 306 /* 307 * Assert that VREG files have been setup for vmio. 308 */ 309 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 310 ("vn_open: regular file was not VMIO enabled!")); 311 #endif 312 313 /* 314 * Return the vnode. XXX needs some cleaning up. The vnode is 315 * only returned in the fp == NULL case. 316 */ 317 if (fp == NULL) { 318 nd->nl_open_vp = vp; 319 nd->nl_vp_fmode = fmode; 320 if ((nd->nl_flags & NLC_LOCKVP) == 0) 321 vn_unlock(vp); 322 } else { 323 vput(vp); 324 } 325 return (0); 326 bad: 327 if (vp) 328 vput(vp); 329 return (error); 330 } 331 332 int 333 vn_opendisk(const char *devname, int fmode, struct vnode **vpp) 334 { 335 struct vnode *vp; 336 int error; 337 338 if (strncmp(devname, "/dev/", 5) == 0) 339 devname += 5; 340 if ((vp = getsynthvnode(devname)) == NULL) { 341 error = ENODEV; 342 } else { 343 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL); 344 vn_unlock(vp); 345 if (error) { 346 vrele(vp); 347 vp = NULL; 348 } 349 } 350 *vpp = vp; 351 return (error); 352 } 353 354 /* 355 * Check for write permissions on the specified vnode. nch may be NULL. 356 */ 357 int 358 vn_writechk(struct vnode *vp, struct nchandle *nch) 359 { 360 /* 361 * If there's shared text associated with 362 * the vnode, try to free it up once. If 363 * we fail, we can't allow writing. 364 */ 365 if (vp->v_flag & VTEXT) 366 return (ETXTBSY); 367 368 /* 369 * If the vnode represents a regular file, check the mount 370 * point via the nch. This may be a different mount point 371 * then the one embedded in the vnode (e.g. nullfs). 372 * 373 * We can still write to non-regular files (e.g. devices) 374 * via read-only mounts. 375 */ 376 if (nch && nch->ncp && vp->v_type == VREG) 377 return (ncp_writechk(nch)); 378 return (0); 379 } 380 381 /* 382 * Check whether the underlying mount is read-only. The mount point 383 * referenced by the namecache may be different from the mount point 384 * used by the underlying vnode in the case of NULLFS, so a separate 385 * check is needed. 386 */ 387 int 388 ncp_writechk(struct nchandle *nch) 389 { 390 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY)) 391 return (EROFS); 392 return(0); 393 } 394 395 /* 396 * Vnode close call 397 * 398 * MPSAFE 399 */ 400 int 401 vn_close(struct vnode *vp, int flags) 402 { 403 int error; 404 405 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 406 if (error == 0) { 407 error = VOP_CLOSE(vp, flags); 408 vn_unlock(vp); 409 } 410 vrele(vp); 411 return (error); 412 } 413 414 /* 415 * Sequential heuristic. 416 * 417 * MPSAFE (f_seqcount and f_nextoff are allowed to race) 418 */ 419 static __inline 420 int 421 sequential_heuristic(struct uio *uio, struct file *fp) 422 { 423 /* 424 * Sequential heuristic - detect sequential operation 425 * 426 * NOTE: SMP: We allow f_seqcount updates to race. 427 */ 428 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 429 uio->uio_offset == fp->f_nextoff) { 430 int tmpseq = fp->f_seqcount; 431 432 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 433 if (tmpseq > IO_SEQMAX) 434 tmpseq = IO_SEQMAX; 435 fp->f_seqcount = tmpseq; 436 return(fp->f_seqcount << IO_SEQSHIFT); 437 } 438 439 /* 440 * Not sequential, quick draw-down of seqcount 441 * 442 * NOTE: SMP: We allow f_seqcount updates to race. 443 */ 444 if (fp->f_seqcount > 1) 445 fp->f_seqcount = 1; 446 else 447 fp->f_seqcount = 0; 448 return(0); 449 } 450 451 /* 452 * get - lock and return the f_offset field. 453 * set - set and unlock the f_offset field. 454 * 455 * These routines serve the dual purpose of serializing access to the 456 * f_offset field (at least on i386) and guaranteeing operational integrity 457 * when multiple read()ers and write()ers are present on the same fp. 458 * 459 * MPSAFE 460 */ 461 static __inline off_t 462 vn_get_fpf_offset(struct file *fp) 463 { 464 u_int flags; 465 u_int nflags; 466 467 /* 468 * Shortcut critical path. 469 */ 470 flags = fp->f_flag & ~FOFFSETLOCK; 471 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK)) 472 return(fp->f_offset); 473 474 /* 475 * The hard way 476 */ 477 for (;;) { 478 flags = fp->f_flag; 479 if (flags & FOFFSETLOCK) { 480 nflags = flags | FOFFSETWAKE; 481 tsleep_interlock(&fp->f_flag, 0); 482 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 483 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0); 484 } else { 485 nflags = flags | FOFFSETLOCK; 486 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 487 break; 488 } 489 } 490 return(fp->f_offset); 491 } 492 493 /* 494 * MPSAFE 495 */ 496 static __inline void 497 vn_set_fpf_offset(struct file *fp, off_t offset) 498 { 499 u_int flags; 500 u_int nflags; 501 502 /* 503 * We hold the lock so we can set the offset without interference. 504 */ 505 fp->f_offset = offset; 506 507 /* 508 * Normal release is already a reasonably critical path. 509 */ 510 for (;;) { 511 flags = fp->f_flag; 512 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE); 513 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) { 514 if (flags & FOFFSETWAKE) 515 wakeup(&fp->f_flag); 516 break; 517 } 518 } 519 } 520 521 /* 522 * MPSAFE 523 */ 524 static __inline off_t 525 vn_poll_fpf_offset(struct file *fp) 526 { 527 #if defined(__x86_64__) 528 return(fp->f_offset); 529 #else 530 off_t off = vn_get_fpf_offset(fp); 531 vn_set_fpf_offset(fp, off); 532 return(off); 533 #endif 534 } 535 536 /* 537 * Package up an I/O request on a vnode into a uio and do it. 538 * 539 * MPSAFE 540 */ 541 int 542 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 543 off_t offset, enum uio_seg segflg, int ioflg, 544 struct ucred *cred, int *aresid) 545 { 546 struct uio auio; 547 struct iovec aiov; 548 int error; 549 550 if ((ioflg & IO_NODELOCKED) == 0) 551 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 552 auio.uio_iov = &aiov; 553 auio.uio_iovcnt = 1; 554 aiov.iov_base = base; 555 aiov.iov_len = len; 556 auio.uio_resid = len; 557 auio.uio_offset = offset; 558 auio.uio_segflg = segflg; 559 auio.uio_rw = rw; 560 auio.uio_td = curthread; 561 if (rw == UIO_READ) { 562 error = VOP_READ(vp, &auio, ioflg, cred); 563 } else { 564 error = VOP_WRITE(vp, &auio, ioflg, cred); 565 } 566 if (aresid) 567 *aresid = auio.uio_resid; 568 else 569 if (auio.uio_resid && error == 0) 570 error = EIO; 571 if ((ioflg & IO_NODELOCKED) == 0) 572 vn_unlock(vp); 573 return (error); 574 } 575 576 /* 577 * Package up an I/O request on a vnode into a uio and do it. The I/O 578 * request is split up into smaller chunks and we try to avoid saturating 579 * the buffer cache while potentially holding a vnode locked, so we 580 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield() 581 * to give other processes a chance to lock the vnode (either other processes 582 * core'ing the same binary, or unrelated processes scanning the directory). 583 * 584 * MPSAFE 585 */ 586 int 587 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 588 off_t offset, enum uio_seg segflg, int ioflg, 589 struct ucred *cred, int *aresid) 590 { 591 int error = 0; 592 593 do { 594 int chunk; 595 596 /* 597 * Force `offset' to a multiple of MAXBSIZE except possibly 598 * for the first chunk, so that filesystems only need to 599 * write full blocks except possibly for the first and last 600 * chunks. 601 */ 602 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 603 604 if (chunk > len) 605 chunk = len; 606 if (vp->v_type == VREG) { 607 switch(rw) { 608 case UIO_READ: 609 bwillread(chunk); 610 break; 611 case UIO_WRITE: 612 bwillwrite(chunk); 613 break; 614 } 615 } 616 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 617 ioflg, cred, aresid); 618 len -= chunk; /* aresid calc already includes length */ 619 if (error) 620 break; 621 offset += chunk; 622 base += chunk; 623 lwkt_user_yield(); 624 } while (len); 625 if (aresid) 626 *aresid += len; 627 return (error); 628 } 629 630 /* 631 * File pointers can no longer get ripped up by revoke so 632 * we don't need to lock access to the vp. 633 * 634 * f_offset updates are not guaranteed against multiple readers 635 */ 636 static int 637 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 638 { 639 struct vnode *vp; 640 int error, ioflag; 641 642 KASSERT(uio->uio_td == curthread, 643 ("uio_td %p is not td %p", uio->uio_td, curthread)); 644 vp = (struct vnode *)fp->f_data; 645 646 ioflag = 0; 647 if (flags & O_FBLOCKING) { 648 /* ioflag &= ~IO_NDELAY; */ 649 } else if (flags & O_FNONBLOCKING) { 650 ioflag |= IO_NDELAY; 651 } else if (fp->f_flag & FNONBLOCK) { 652 ioflag |= IO_NDELAY; 653 } 654 if (flags & O_FBUFFERED) { 655 /* ioflag &= ~IO_DIRECT; */ 656 } else if (flags & O_FUNBUFFERED) { 657 ioflag |= IO_DIRECT; 658 } else if (fp->f_flag & O_DIRECT) { 659 ioflag |= IO_DIRECT; 660 } 661 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 662 uio->uio_offset = vn_get_fpf_offset(fp); 663 vn_lock(vp, LK_SHARED | LK_RETRY); 664 ioflag |= sequential_heuristic(uio, fp); 665 666 error = VOP_READ(vp, uio, ioflag, cred); 667 fp->f_nextoff = uio->uio_offset; 668 vn_unlock(vp); 669 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 670 vn_set_fpf_offset(fp, uio->uio_offset); 671 return (error); 672 } 673 674 /* 675 * MPSAFE 676 */ 677 static int 678 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 679 { 680 struct vnode *vp; 681 int error, ioflag; 682 683 KASSERT(uio->uio_td == curthread, 684 ("uio_td %p is not p %p", uio->uio_td, curthread)); 685 vp = (struct vnode *)fp->f_data; 686 687 ioflag = IO_UNIT; 688 if (vp->v_type == VREG && 689 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 690 ioflag |= IO_APPEND; 691 } 692 693 if (flags & O_FBLOCKING) { 694 /* ioflag &= ~IO_NDELAY; */ 695 } else if (flags & O_FNONBLOCKING) { 696 ioflag |= IO_NDELAY; 697 } else if (fp->f_flag & FNONBLOCK) { 698 ioflag |= IO_NDELAY; 699 } 700 if (flags & O_FBUFFERED) { 701 /* ioflag &= ~IO_DIRECT; */ 702 } else if (flags & O_FUNBUFFERED) { 703 ioflag |= IO_DIRECT; 704 } else if (fp->f_flag & O_DIRECT) { 705 ioflag |= IO_DIRECT; 706 } 707 if (flags & O_FASYNCWRITE) { 708 /* ioflag &= ~IO_SYNC; */ 709 } else if (flags & O_FSYNCWRITE) { 710 ioflag |= IO_SYNC; 711 } else if (fp->f_flag & O_FSYNC) { 712 ioflag |= IO_SYNC; 713 } 714 715 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 716 ioflag |= IO_SYNC; 717 if ((flags & O_FOFFSET) == 0) 718 uio->uio_offset = vn_get_fpf_offset(fp); 719 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 720 ioflag |= sequential_heuristic(uio, fp); 721 error = VOP_WRITE(vp, uio, ioflag, cred); 722 fp->f_nextoff = uio->uio_offset; 723 vn_unlock(vp); 724 if ((flags & O_FOFFSET) == 0) 725 vn_set_fpf_offset(fp, uio->uio_offset); 726 return (error); 727 } 728 729 /* 730 * MPSAFE 731 */ 732 static int 733 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred) 734 { 735 struct vnode *vp; 736 int error; 737 738 vp = (struct vnode *)fp->f_data; 739 error = vn_stat(vp, sb, cred); 740 return (error); 741 } 742 743 /* 744 * MPSAFE 745 */ 746 int 747 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred) 748 { 749 struct vattr vattr; 750 struct vattr *vap; 751 int error; 752 u_short mode; 753 cdev_t dev; 754 755 vap = &vattr; 756 error = VOP_GETATTR(vp, vap); 757 if (error) 758 return (error); 759 760 /* 761 * Zero the spare stat fields 762 */ 763 sb->st_lspare = 0; 764 sb->st_qspare1 = 0; 765 sb->st_qspare2 = 0; 766 767 /* 768 * Copy from vattr table 769 */ 770 if (vap->va_fsid != VNOVAL) 771 sb->st_dev = vap->va_fsid; 772 else 773 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 774 sb->st_ino = vap->va_fileid; 775 mode = vap->va_mode; 776 switch (vap->va_type) { 777 case VREG: 778 mode |= S_IFREG; 779 break; 780 case VDATABASE: 781 mode |= S_IFDB; 782 break; 783 case VDIR: 784 mode |= S_IFDIR; 785 break; 786 case VBLK: 787 mode |= S_IFBLK; 788 break; 789 case VCHR: 790 mode |= S_IFCHR; 791 break; 792 case VLNK: 793 mode |= S_IFLNK; 794 /* This is a cosmetic change, symlinks do not have a mode. */ 795 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 796 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 797 else 798 sb->st_mode |= ACCESSPERMS; /* 0777 */ 799 break; 800 case VSOCK: 801 mode |= S_IFSOCK; 802 break; 803 case VFIFO: 804 mode |= S_IFIFO; 805 break; 806 default: 807 return (EBADF); 808 } 809 sb->st_mode = mode; 810 if (vap->va_nlink > (nlink_t)-1) 811 sb->st_nlink = (nlink_t)-1; 812 else 813 sb->st_nlink = vap->va_nlink; 814 sb->st_uid = vap->va_uid; 815 sb->st_gid = vap->va_gid; 816 sb->st_rdev = dev2udev(vp->v_rdev); 817 sb->st_size = vap->va_size; 818 sb->st_atimespec = vap->va_atime; 819 sb->st_mtimespec = vap->va_mtime; 820 sb->st_ctimespec = vap->va_ctime; 821 822 /* 823 * A VCHR and VBLK device may track the last access and last modified 824 * time independantly of the filesystem. This is particularly true 825 * because device read and write calls may bypass the filesystem. 826 */ 827 if (vp->v_type == VCHR || vp->v_type == VBLK) { 828 dev = vp->v_rdev; 829 if (dev != NULL) { 830 if (dev->si_lastread) { 831 sb->st_atimespec.tv_sec = time_second + 832 (time_uptime - 833 dev->si_lastread); 834 sb->st_atimespec.tv_nsec = 0; 835 } 836 if (dev->si_lastwrite) { 837 sb->st_atimespec.tv_sec = time_second + 838 (time_uptime - 839 dev->si_lastwrite); 840 sb->st_atimespec.tv_nsec = 0; 841 } 842 } 843 } 844 845 /* 846 * According to www.opengroup.org, the meaning of st_blksize is 847 * "a filesystem-specific preferred I/O block size for this 848 * object. In some filesystem types, this may vary from file 849 * to file" 850 * Default to PAGE_SIZE after much discussion. 851 */ 852 853 if (vap->va_type == VREG) { 854 sb->st_blksize = vap->va_blocksize; 855 } else if (vn_isdisk(vp, NULL)) { 856 /* 857 * XXX this is broken. If the device is not yet open (aka 858 * stat() call, aka v_rdev == NULL), how are we supposed 859 * to get a valid block size out of it? 860 */ 861 dev = vp->v_rdev; 862 863 sb->st_blksize = dev->si_bsize_best; 864 if (sb->st_blksize < dev->si_bsize_phys) 865 sb->st_blksize = dev->si_bsize_phys; 866 if (sb->st_blksize < BLKDEV_IOSIZE) 867 sb->st_blksize = BLKDEV_IOSIZE; 868 } else { 869 sb->st_blksize = PAGE_SIZE; 870 } 871 872 sb->st_flags = vap->va_flags; 873 874 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 875 if (error) 876 sb->st_gen = 0; 877 else 878 sb->st_gen = (u_int32_t)vap->va_gen; 879 880 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 881 return (0); 882 } 883 884 /* 885 * MPALMOSTSAFE - acquires mplock 886 */ 887 static int 888 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred, 889 struct sysmsg *msg) 890 { 891 struct vnode *vp = ((struct vnode *)fp->f_data); 892 struct vnode *ovp; 893 struct vattr vattr; 894 int error; 895 off_t size; 896 897 switch (vp->v_type) { 898 case VREG: 899 case VDIR: 900 if (com == FIONREAD) { 901 error = VOP_GETATTR(vp, &vattr); 902 if (error) 903 break; 904 size = vattr.va_size; 905 if ((vp->v_flag & VNOTSEEKABLE) == 0) 906 size -= vn_poll_fpf_offset(fp); 907 if (size > 0x7FFFFFFF) 908 size = 0x7FFFFFFF; 909 *(int *)data = size; 910 error = 0; 911 break; 912 } 913 if (com == FIOASYNC) { /* XXX */ 914 error = 0; /* XXX */ 915 break; 916 } 917 /* fall into ... */ 918 default: 919 #if 0 920 return (ENOTTY); 921 #endif 922 case VFIFO: 923 case VCHR: 924 case VBLK: 925 if (com == FIODTYPE) { 926 if (vp->v_type != VCHR && vp->v_type != VBLK) { 927 error = ENOTTY; 928 break; 929 } 930 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK; 931 error = 0; 932 break; 933 } 934 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg); 935 if (error == 0 && com == TIOCSCTTY) { 936 struct proc *p = curthread->td_proc; 937 struct session *sess; 938 939 if (p == NULL) { 940 error = ENOTTY; 941 break; 942 } 943 944 get_mplock(); 945 sess = p->p_session; 946 /* Do nothing if reassigning same control tty */ 947 if (sess->s_ttyvp == vp) { 948 error = 0; 949 rel_mplock(); 950 break; 951 } 952 953 /* Get rid of reference to old control tty */ 954 ovp = sess->s_ttyvp; 955 vref(vp); 956 sess->s_ttyvp = vp; 957 if (ovp) 958 vrele(ovp); 959 rel_mplock(); 960 } 961 break; 962 } 963 return (error); 964 } 965 966 /* 967 * Check that the vnode is still valid, and if so 968 * acquire requested lock. 969 */ 970 int 971 #ifndef DEBUG_LOCKS 972 vn_lock(struct vnode *vp, int flags) 973 #else 974 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line) 975 #endif 976 { 977 int error; 978 979 do { 980 #ifdef DEBUG_LOCKS 981 vp->filename = filename; 982 vp->line = line; 983 error = debuglockmgr(&vp->v_lock, flags, 984 "vn_lock", filename, line); 985 #else 986 error = lockmgr(&vp->v_lock, flags); 987 #endif 988 if (error == 0) 989 break; 990 } while (flags & LK_RETRY); 991 992 /* 993 * Because we (had better!) have a ref on the vnode, once it 994 * goes to VRECLAIMED state it will not be recycled until all 995 * refs go away. So we can just check the flag. 996 */ 997 if (error == 0 && (vp->v_flag & VRECLAIMED)) { 998 lockmgr(&vp->v_lock, LK_RELEASE); 999 error = ENOENT; 1000 } 1001 return (error); 1002 } 1003 1004 #ifdef DEBUG_VN_UNLOCK 1005 1006 void 1007 debug_vn_unlock(struct vnode *vp, const char *filename, int line) 1008 { 1009 kprintf("vn_unlock from %s:%d\n", filename, line); 1010 lockmgr(&vp->v_lock, LK_RELEASE); 1011 } 1012 1013 #else 1014 1015 void 1016 vn_unlock(struct vnode *vp) 1017 { 1018 lockmgr(&vp->v_lock, LK_RELEASE); 1019 } 1020 1021 #endif 1022 1023 /* 1024 * MPSAFE 1025 */ 1026 int 1027 vn_islocked(struct vnode *vp) 1028 { 1029 return (lockstatus(&vp->v_lock, curthread)); 1030 } 1031 1032 /* 1033 * Return the lock status of a vnode and unlock the vnode 1034 * if we owned the lock. This is not a boolean, if the 1035 * caller cares what the lock status is the caller must 1036 * check the various possible values. 1037 * 1038 * This only unlocks exclusive locks held by the caller, 1039 * it will NOT unlock shared locks (there is no way to 1040 * tell who the shared lock belongs to). 1041 * 1042 * MPSAFE 1043 */ 1044 int 1045 vn_islocked_unlock(struct vnode *vp) 1046 { 1047 int vpls; 1048 1049 vpls = lockstatus(&vp->v_lock, curthread); 1050 if (vpls == LK_EXCLUSIVE) 1051 lockmgr(&vp->v_lock, LK_RELEASE); 1052 return(vpls); 1053 } 1054 1055 /* 1056 * Restore a vnode lock that we previously released via 1057 * vn_islocked_unlock(). This is a NOP if we did not 1058 * own the original lock. 1059 * 1060 * MPSAFE 1061 */ 1062 void 1063 vn_islocked_relock(struct vnode *vp, int vpls) 1064 { 1065 int error; 1066 1067 if (vpls == LK_EXCLUSIVE) 1068 error = lockmgr(&vp->v_lock, vpls); 1069 } 1070 1071 /* 1072 * MPSAFE 1073 */ 1074 static int 1075 vn_closefile(struct file *fp) 1076 { 1077 int error; 1078 1079 fp->f_ops = &badfileops; 1080 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag); 1081 return (error); 1082 } 1083 1084 /* 1085 * MPSAFE 1086 */ 1087 static int 1088 vn_kqfilter(struct file *fp, struct knote *kn) 1089 { 1090 int error; 1091 1092 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn); 1093 return (error); 1094 } 1095