1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $ 40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/fcntl.h> 46 #include <sys/file.h> 47 #include <sys/stat.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/mount.h> 51 #include <sys/nlookup.h> 52 #include <sys/vnode.h> 53 #include <sys/buf.h> 54 #include <sys/filio.h> 55 #include <sys/ttycom.h> 56 #include <sys/conf.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 60 #include <sys/thread2.h> 61 #include <sys/mplock2.h> 62 63 static int vn_closefile (struct file *fp); 64 static int vn_ioctl (struct file *fp, u_long com, caddr_t data, 65 struct ucred *cred, struct sysmsg *msg); 66 static int vn_read (struct file *fp, struct uio *uio, 67 struct ucred *cred, int flags); 68 static int vn_kqfilter (struct file *fp, struct knote *kn); 69 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred); 70 static int vn_write (struct file *fp, struct uio *uio, 71 struct ucred *cred, int flags); 72 73 struct fileops vnode_fileops = { 74 .fo_read = vn_read, 75 .fo_write = vn_write, 76 .fo_ioctl = vn_ioctl, 77 .fo_kqfilter = vn_kqfilter, 78 .fo_stat = vn_statfile, 79 .fo_close = vn_closefile, 80 .fo_shutdown = nofo_shutdown 81 }; 82 83 /* 84 * Common code for vnode open operations. Check permissions, and call 85 * the VOP_NOPEN or VOP_NCREATE routine. 86 * 87 * The caller is responsible for setting up nd with nlookup_init() and 88 * for cleaning it up with nlookup_done(), whether we return an error 89 * or not. 90 * 91 * On success nd->nl_open_vp will hold a referenced and, if requested, 92 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp 93 * is non-NULL the vnode will be installed in the file pointer. 94 * 95 * NOTE: The vnode is referenced just once on return whether or not it 96 * is also installed in the file pointer. 97 */ 98 int 99 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode) 100 { 101 struct vnode *vp; 102 struct ucred *cred = nd->nl_cred; 103 struct vattr vat; 104 struct vattr *vap = &vat; 105 int error; 106 u_int flags; 107 uint64_t osize; 108 struct mount *mp; 109 110 /* 111 * Certain combinations are illegal 112 */ 113 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC) 114 return(EACCES); 115 116 /* 117 * Lookup the path and create or obtain the vnode. After a 118 * successful lookup a locked nd->nl_nch will be returned. 119 * 120 * The result of this section should be a locked vnode. 121 * 122 * XXX with only a little work we should be able to avoid locking 123 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set. 124 */ 125 nd->nl_flags |= NLC_OPEN; 126 if (fmode & O_APPEND) 127 nd->nl_flags |= NLC_APPEND; 128 if (fmode & O_TRUNC) 129 nd->nl_flags |= NLC_TRUNCATE; 130 if (fmode & FREAD) 131 nd->nl_flags |= NLC_READ; 132 if (fmode & FWRITE) 133 nd->nl_flags |= NLC_WRITE; 134 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 135 nd->nl_flags |= NLC_FOLLOW; 136 137 if (fmode & O_CREAT) { 138 /* 139 * CONDITIONAL CREATE FILE CASE 140 * 141 * Setting NLC_CREATE causes a negative hit to store 142 * the negative hit ncp and not return an error. Then 143 * nc_error or nc_vp may be checked to see if the ncp 144 * represents a negative hit. NLC_CREATE also requires 145 * write permission on the governing directory or EPERM 146 * is returned. 147 */ 148 nd->nl_flags |= NLC_CREATE; 149 nd->nl_flags |= NLC_REFDVP; 150 bwillinode(1); 151 error = nlookup(nd); 152 } else { 153 /* 154 * NORMAL OPEN FILE CASE 155 */ 156 error = nlookup(nd); 157 } 158 159 if (error) 160 return (error); 161 162 /* 163 * split case to allow us to re-resolve and retry the ncp in case 164 * we get ESTALE. 165 */ 166 again: 167 if (fmode & O_CREAT) { 168 if (nd->nl_nch.ncp->nc_vp == NULL) { 169 if ((error = ncp_writechk(&nd->nl_nch)) != 0) 170 return (error); 171 VATTR_NULL(vap); 172 vap->va_type = VREG; 173 vap->va_mode = cmode; 174 if (fmode & O_EXCL) 175 vap->va_vaflags |= VA_EXCLUSIVE; 176 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp, 177 nd->nl_cred, vap); 178 if (error) 179 return (error); 180 fmode &= ~O_TRUNC; 181 /* locked vnode is returned */ 182 } else { 183 if (fmode & O_EXCL) { 184 error = EEXIST; 185 } else { 186 error = cache_vget(&nd->nl_nch, cred, 187 LK_EXCLUSIVE, &vp); 188 } 189 if (error) 190 return (error); 191 fmode &= ~O_CREAT; 192 } 193 } else { 194 error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp); 195 if (error) 196 return (error); 197 } 198 199 /* 200 * We have a locked vnode and ncp now. Note that the ncp will 201 * be cleaned up by the caller if nd->nl_nch is left intact. 202 */ 203 if (vp->v_type == VLNK) { 204 error = EMLINK; 205 goto bad; 206 } 207 if (vp->v_type == VSOCK) { 208 error = EOPNOTSUPP; 209 goto bad; 210 } 211 if ((fmode & O_CREAT) == 0) { 212 if (fmode & (FWRITE | O_TRUNC)) { 213 if (vp->v_type == VDIR) { 214 error = EISDIR; 215 goto bad; 216 } 217 error = vn_writechk(vp, &nd->nl_nch); 218 if (error) { 219 /* 220 * Special stale handling, re-resolve the 221 * vnode. 222 */ 223 if (error == ESTALE) { 224 vput(vp); 225 vp = NULL; 226 cache_setunresolved(&nd->nl_nch); 227 error = cache_resolve(&nd->nl_nch, cred); 228 if (error == 0) 229 goto again; 230 } 231 goto bad; 232 } 233 } 234 } 235 if (fmode & O_TRUNC) { 236 vn_unlock(vp); /* XXX */ 237 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ 238 osize = vp->v_filesize; 239 VATTR_NULL(vap); 240 vap->va_size = 0; 241 error = VOP_SETATTR(vp, vap, cred); 242 if (error) 243 goto bad; 244 error = VOP_GETATTR(vp, vap); 245 if (error) 246 goto bad; 247 mp = vq_vptomp(vp); 248 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize); 249 } 250 251 /* 252 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag. 253 * These particular bits a tracked all the way from the root. 254 * 255 * NOTE: Might not work properly on NFS servers due to the 256 * disconnected namecache. 257 */ 258 flags = nd->nl_nch.ncp->nc_flag; 259 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) && 260 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) { 261 vsetflags(vp, VSWAPCACHE); 262 } else { 263 vclrflags(vp, VSWAPCACHE); 264 } 265 266 /* 267 * Setup the fp so VOP_OPEN can override it. No descriptor has been 268 * associated with the fp yet so we own it clean. 269 * 270 * f_nchandle inherits nl_nch. This used to be necessary only for 271 * directories but now we do it unconditionally so f*() ops 272 * such as fchmod() can access the actual namespace that was 273 * used to open the file. 274 */ 275 if (fp) { 276 if (nd->nl_flags & NLC_APPENDONLY) 277 fmode |= FAPPENDONLY; 278 fp->f_nchandle = nd->nl_nch; 279 cache_zero(&nd->nl_nch); 280 cache_unlock(&fp->f_nchandle); 281 } 282 283 /* 284 * Get rid of nl_nch. vn_open does not return it (it returns the 285 * vnode or the file pointer). Note: we can't leave nl_nch locked 286 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g. 287 * on /dev/ttyd0 288 */ 289 if (nd->nl_nch.ncp) 290 cache_put(&nd->nl_nch); 291 292 error = VOP_OPEN(vp, fmode, cred, fp); 293 if (error) { 294 /* 295 * setting f_ops to &badfileops will prevent the descriptor 296 * code from trying to close and release the vnode, since 297 * the open failed we do not want to call close. 298 */ 299 if (fp) { 300 fp->f_data = NULL; 301 fp->f_ops = &badfileops; 302 } 303 goto bad; 304 } 305 306 #if 0 307 /* 308 * Assert that VREG files have been setup for vmio. 309 */ 310 KASSERT(vp->v_type != VREG || vp->v_object != NULL, 311 ("vn_open: regular file was not VMIO enabled!")); 312 #endif 313 314 /* 315 * Return the vnode. XXX needs some cleaning up. The vnode is 316 * only returned in the fp == NULL case. 317 */ 318 if (fp == NULL) { 319 nd->nl_open_vp = vp; 320 nd->nl_vp_fmode = fmode; 321 if ((nd->nl_flags & NLC_LOCKVP) == 0) 322 vn_unlock(vp); 323 } else { 324 vput(vp); 325 } 326 return (0); 327 bad: 328 if (vp) 329 vput(vp); 330 return (error); 331 } 332 333 int 334 vn_opendisk(const char *devname, int fmode, struct vnode **vpp) 335 { 336 struct vnode *vp; 337 int error; 338 339 if (strncmp(devname, "/dev/", 5) == 0) 340 devname += 5; 341 if ((vp = getsynthvnode(devname)) == NULL) { 342 error = ENODEV; 343 } else { 344 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL); 345 vn_unlock(vp); 346 if (error) { 347 vrele(vp); 348 vp = NULL; 349 } 350 } 351 *vpp = vp; 352 return (error); 353 } 354 355 /* 356 * Check for write permissions on the specified vnode. nch may be NULL. 357 */ 358 int 359 vn_writechk(struct vnode *vp, struct nchandle *nch) 360 { 361 /* 362 * If there's shared text associated with 363 * the vnode, try to free it up once. If 364 * we fail, we can't allow writing. 365 */ 366 if (vp->v_flag & VTEXT) 367 return (ETXTBSY); 368 369 /* 370 * If the vnode represents a regular file, check the mount 371 * point via the nch. This may be a different mount point 372 * then the one embedded in the vnode (e.g. nullfs). 373 * 374 * We can still write to non-regular files (e.g. devices) 375 * via read-only mounts. 376 */ 377 if (nch && nch->ncp && vp->v_type == VREG) 378 return (ncp_writechk(nch)); 379 return (0); 380 } 381 382 /* 383 * Check whether the underlying mount is read-only. The mount point 384 * referenced by the namecache may be different from the mount point 385 * used by the underlying vnode in the case of NULLFS, so a separate 386 * check is needed. 387 */ 388 int 389 ncp_writechk(struct nchandle *nch) 390 { 391 if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY)) 392 return (EROFS); 393 return(0); 394 } 395 396 /* 397 * Vnode close call 398 * 399 * MPSAFE 400 */ 401 int 402 vn_close(struct vnode *vp, int flags) 403 { 404 int error; 405 406 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 407 if (error == 0) { 408 error = VOP_CLOSE(vp, flags); 409 vn_unlock(vp); 410 } 411 vrele(vp); 412 return (error); 413 } 414 415 /* 416 * Sequential heuristic. 417 * 418 * MPSAFE (f_seqcount and f_nextoff are allowed to race) 419 */ 420 static __inline 421 int 422 sequential_heuristic(struct uio *uio, struct file *fp) 423 { 424 /* 425 * Sequential heuristic - detect sequential operation 426 * 427 * NOTE: SMP: We allow f_seqcount updates to race. 428 */ 429 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 430 uio->uio_offset == fp->f_nextoff) { 431 int tmpseq = fp->f_seqcount; 432 433 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 434 if (tmpseq > IO_SEQMAX) 435 tmpseq = IO_SEQMAX; 436 fp->f_seqcount = tmpseq; 437 return(fp->f_seqcount << IO_SEQSHIFT); 438 } 439 440 /* 441 * Not sequential, quick draw-down of seqcount 442 * 443 * NOTE: SMP: We allow f_seqcount updates to race. 444 */ 445 if (fp->f_seqcount > 1) 446 fp->f_seqcount = 1; 447 else 448 fp->f_seqcount = 0; 449 return(0); 450 } 451 452 /* 453 * get - lock and return the f_offset field. 454 * set - set and unlock the f_offset field. 455 * 456 * These routines serve the dual purpose of serializing access to the 457 * f_offset field (at least on i386) and guaranteeing operational integrity 458 * when multiple read()ers and write()ers are present on the same fp. 459 * 460 * MPSAFE 461 */ 462 static __inline off_t 463 vn_get_fpf_offset(struct file *fp) 464 { 465 u_int flags; 466 u_int nflags; 467 468 /* 469 * Shortcut critical path. 470 */ 471 flags = fp->f_flag & ~FOFFSETLOCK; 472 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK)) 473 return(fp->f_offset); 474 475 /* 476 * The hard way 477 */ 478 for (;;) { 479 flags = fp->f_flag; 480 if (flags & FOFFSETLOCK) { 481 nflags = flags | FOFFSETWAKE; 482 tsleep_interlock(&fp->f_flag, 0); 483 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 484 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0); 485 } else { 486 nflags = flags | FOFFSETLOCK; 487 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) 488 break; 489 } 490 } 491 return(fp->f_offset); 492 } 493 494 /* 495 * MPSAFE 496 */ 497 static __inline void 498 vn_set_fpf_offset(struct file *fp, off_t offset) 499 { 500 u_int flags; 501 u_int nflags; 502 503 /* 504 * We hold the lock so we can set the offset without interference. 505 */ 506 fp->f_offset = offset; 507 508 /* 509 * Normal release is already a reasonably critical path. 510 */ 511 for (;;) { 512 flags = fp->f_flag; 513 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE); 514 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) { 515 if (flags & FOFFSETWAKE) 516 wakeup(&fp->f_flag); 517 break; 518 } 519 } 520 } 521 522 /* 523 * MPSAFE 524 */ 525 static __inline off_t 526 vn_poll_fpf_offset(struct file *fp) 527 { 528 #if defined(__x86_64__) || !defined(SMP) 529 return(fp->f_offset); 530 #else 531 off_t off = vn_get_fpf_offset(fp); 532 vn_set_fpf_offset(fp, off); 533 return(off); 534 #endif 535 } 536 537 /* 538 * Package up an I/O request on a vnode into a uio and do it. 539 * 540 * MPSAFE 541 */ 542 int 543 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 544 off_t offset, enum uio_seg segflg, int ioflg, 545 struct ucred *cred, int *aresid) 546 { 547 struct uio auio; 548 struct iovec aiov; 549 int error; 550 551 if ((ioflg & IO_NODELOCKED) == 0) 552 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 553 auio.uio_iov = &aiov; 554 auio.uio_iovcnt = 1; 555 aiov.iov_base = base; 556 aiov.iov_len = len; 557 auio.uio_resid = len; 558 auio.uio_offset = offset; 559 auio.uio_segflg = segflg; 560 auio.uio_rw = rw; 561 auio.uio_td = curthread; 562 if (rw == UIO_READ) { 563 error = VOP_READ(vp, &auio, ioflg, cred); 564 } else { 565 error = VOP_WRITE(vp, &auio, ioflg, cred); 566 } 567 if (aresid) 568 *aresid = auio.uio_resid; 569 else 570 if (auio.uio_resid && error == 0) 571 error = EIO; 572 if ((ioflg & IO_NODELOCKED) == 0) 573 vn_unlock(vp); 574 return (error); 575 } 576 577 /* 578 * Package up an I/O request on a vnode into a uio and do it. The I/O 579 * request is split up into smaller chunks and we try to avoid saturating 580 * the buffer cache while potentially holding a vnode locked, so we 581 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield() 582 * to give other processes a chance to lock the vnode (either other processes 583 * core'ing the same binary, or unrelated processes scanning the directory). 584 * 585 * MPSAFE 586 */ 587 int 588 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, 589 off_t offset, enum uio_seg segflg, int ioflg, 590 struct ucred *cred, int *aresid) 591 { 592 int error = 0; 593 594 do { 595 int chunk; 596 597 /* 598 * Force `offset' to a multiple of MAXBSIZE except possibly 599 * for the first chunk, so that filesystems only need to 600 * write full blocks except possibly for the first and last 601 * chunks. 602 */ 603 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 604 605 if (chunk > len) 606 chunk = len; 607 if (vp->v_type == VREG) { 608 switch(rw) { 609 case UIO_READ: 610 bwillread(chunk); 611 break; 612 case UIO_WRITE: 613 bwillwrite(chunk); 614 break; 615 } 616 } 617 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 618 ioflg, cred, aresid); 619 len -= chunk; /* aresid calc already includes length */ 620 if (error) 621 break; 622 offset += chunk; 623 base += chunk; 624 lwkt_user_yield(); 625 } while (len); 626 if (aresid) 627 *aresid += len; 628 return (error); 629 } 630 631 /* 632 * File pointers can no longer get ripped up by revoke so 633 * we don't need to lock access to the vp. 634 * 635 * f_offset updates are not guaranteed against multiple readers 636 * 637 * MPSAFE 638 */ 639 static int 640 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 641 { 642 struct vnode *vp; 643 int error, ioflag; 644 645 KASSERT(uio->uio_td == curthread, 646 ("uio_td %p is not td %p", uio->uio_td, curthread)); 647 vp = (struct vnode *)fp->f_data; 648 649 ioflag = 0; 650 if (flags & O_FRNONBLOCKING) { 651 ioflag |= (IO_NDELAY | IO_NRDELAY); 652 } else if (flags & O_FBLOCKING) { 653 /* ioflag &= ~IO_NDELAY; */ 654 } else if (flags & O_FNONBLOCKING) { 655 ioflag |= IO_NDELAY; 656 } else if (fp->f_flag & FNONBLOCK) { 657 ioflag |= IO_NDELAY; 658 } 659 if (flags & O_FBUFFERED) { 660 /* ioflag &= ~IO_DIRECT; */ 661 } else if (flags & O_FUNBUFFERED) { 662 ioflag |= IO_DIRECT; 663 } else if (fp->f_flag & O_DIRECT) { 664 ioflag |= IO_DIRECT; 665 } 666 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 667 uio->uio_offset = vn_get_fpf_offset(fp); 668 vn_lock(vp, LK_SHARED | LK_RETRY); 669 ioflag |= sequential_heuristic(uio, fp); 670 671 error = VOP_READ(vp, uio, ioflag, cred); 672 fp->f_nextoff = uio->uio_offset; 673 vn_unlock(vp); 674 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0) 675 vn_set_fpf_offset(fp, uio->uio_offset); 676 return (error); 677 } 678 679 /* 680 * MPSAFE 681 */ 682 static int 683 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags) 684 { 685 struct vnode *vp; 686 int error, ioflag; 687 688 KASSERT(uio->uio_td == curthread, 689 ("uio_td %p is not p %p", uio->uio_td, curthread)); 690 vp = (struct vnode *)fp->f_data; 691 692 ioflag = IO_UNIT; 693 if (vp->v_type == VREG && 694 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 695 ioflag |= IO_APPEND; 696 } 697 698 if (flags & O_FBLOCKING) { 699 /* ioflag &= ~IO_NDELAY; */ 700 } else if (flags & O_FNONBLOCKING) { 701 ioflag |= IO_NDELAY; 702 } else if (fp->f_flag & FNONBLOCK) { 703 ioflag |= IO_NDELAY; 704 } 705 if (flags & O_FBUFFERED) { 706 /* ioflag &= ~IO_DIRECT; */ 707 } else if (flags & O_FUNBUFFERED) { 708 ioflag |= IO_DIRECT; 709 } else if (fp->f_flag & O_DIRECT) { 710 ioflag |= IO_DIRECT; 711 } 712 if (flags & O_FASYNCWRITE) { 713 /* ioflag &= ~IO_SYNC; */ 714 } else if (flags & O_FSYNCWRITE) { 715 ioflag |= IO_SYNC; 716 } else if (fp->f_flag & O_FSYNC) { 717 ioflag |= IO_SYNC; 718 } 719 720 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 721 ioflag |= IO_SYNC; 722 if ((flags & O_FOFFSET) == 0) 723 uio->uio_offset = vn_get_fpf_offset(fp); 724 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 725 ioflag |= sequential_heuristic(uio, fp); 726 error = VOP_WRITE(vp, uio, ioflag, cred); 727 fp->f_nextoff = uio->uio_offset; 728 vn_unlock(vp); 729 if ((flags & O_FOFFSET) == 0) 730 vn_set_fpf_offset(fp, uio->uio_offset); 731 return (error); 732 } 733 734 /* 735 * MPSAFE 736 */ 737 static int 738 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred) 739 { 740 struct vnode *vp; 741 int error; 742 743 vp = (struct vnode *)fp->f_data; 744 error = vn_stat(vp, sb, cred); 745 return (error); 746 } 747 748 /* 749 * MPSAFE 750 */ 751 int 752 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred) 753 { 754 struct vattr vattr; 755 struct vattr *vap; 756 int error; 757 u_short mode; 758 cdev_t dev; 759 760 vap = &vattr; 761 error = VOP_GETATTR(vp, vap); 762 if (error) 763 return (error); 764 765 /* 766 * Zero the spare stat fields 767 */ 768 sb->st_lspare = 0; 769 sb->st_qspare1 = 0; 770 sb->st_qspare2 = 0; 771 772 /* 773 * Copy from vattr table 774 */ 775 if (vap->va_fsid != VNOVAL) 776 sb->st_dev = vap->va_fsid; 777 else 778 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 779 sb->st_ino = vap->va_fileid; 780 mode = vap->va_mode; 781 switch (vap->va_type) { 782 case VREG: 783 mode |= S_IFREG; 784 break; 785 case VDATABASE: 786 mode |= S_IFDB; 787 break; 788 case VDIR: 789 mode |= S_IFDIR; 790 break; 791 case VBLK: 792 mode |= S_IFBLK; 793 break; 794 case VCHR: 795 mode |= S_IFCHR; 796 break; 797 case VLNK: 798 mode |= S_IFLNK; 799 /* This is a cosmetic change, symlinks do not have a mode. */ 800 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 801 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 802 else 803 sb->st_mode |= ACCESSPERMS; /* 0777 */ 804 break; 805 case VSOCK: 806 mode |= S_IFSOCK; 807 break; 808 case VFIFO: 809 mode |= S_IFIFO; 810 break; 811 default: 812 return (EBADF); 813 } 814 sb->st_mode = mode; 815 if (vap->va_nlink > (nlink_t)-1) 816 sb->st_nlink = (nlink_t)-1; 817 else 818 sb->st_nlink = vap->va_nlink; 819 sb->st_uid = vap->va_uid; 820 sb->st_gid = vap->va_gid; 821 sb->st_rdev = dev2udev(vp->v_rdev); 822 sb->st_size = vap->va_size; 823 sb->st_atimespec = vap->va_atime; 824 sb->st_mtimespec = vap->va_mtime; 825 sb->st_ctimespec = vap->va_ctime; 826 827 /* 828 * A VCHR and VBLK device may track the last access and last modified 829 * time independantly of the filesystem. This is particularly true 830 * because device read and write calls may bypass the filesystem. 831 */ 832 if (vp->v_type == VCHR || vp->v_type == VBLK) { 833 dev = vp->v_rdev; 834 if (dev != NULL) { 835 if (dev->si_lastread) { 836 sb->st_atimespec.tv_sec = dev->si_lastread; 837 sb->st_atimespec.tv_nsec = 0; 838 } 839 if (dev->si_lastwrite) { 840 sb->st_atimespec.tv_sec = dev->si_lastwrite; 841 sb->st_atimespec.tv_nsec = 0; 842 } 843 } 844 } 845 846 /* 847 * According to www.opengroup.org, the meaning of st_blksize is 848 * "a filesystem-specific preferred I/O block size for this 849 * object. In some filesystem types, this may vary from file 850 * to file" 851 * Default to PAGE_SIZE after much discussion. 852 */ 853 854 if (vap->va_type == VREG) { 855 sb->st_blksize = vap->va_blocksize; 856 } else if (vn_isdisk(vp, NULL)) { 857 /* 858 * XXX this is broken. If the device is not yet open (aka 859 * stat() call, aka v_rdev == NULL), how are we supposed 860 * to get a valid block size out of it? 861 */ 862 dev = vp->v_rdev; 863 864 sb->st_blksize = dev->si_bsize_best; 865 if (sb->st_blksize < dev->si_bsize_phys) 866 sb->st_blksize = dev->si_bsize_phys; 867 if (sb->st_blksize < BLKDEV_IOSIZE) 868 sb->st_blksize = BLKDEV_IOSIZE; 869 } else { 870 sb->st_blksize = PAGE_SIZE; 871 } 872 873 sb->st_flags = vap->va_flags; 874 875 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 876 if (error) 877 sb->st_gen = 0; 878 else 879 sb->st_gen = (u_int32_t)vap->va_gen; 880 881 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 882 return (0); 883 } 884 885 /* 886 * MPALMOSTSAFE - acquires mplock 887 */ 888 static int 889 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred, 890 struct sysmsg *msg) 891 { 892 struct vnode *vp = ((struct vnode *)fp->f_data); 893 struct vnode *ovp; 894 struct vattr vattr; 895 int error; 896 off_t size; 897 898 switch (vp->v_type) { 899 case VREG: 900 case VDIR: 901 if (com == FIONREAD) { 902 error = VOP_GETATTR(vp, &vattr); 903 if (error) 904 break; 905 size = vattr.va_size; 906 if ((vp->v_flag & VNOTSEEKABLE) == 0) 907 size -= vn_poll_fpf_offset(fp); 908 if (size > 0x7FFFFFFF) 909 size = 0x7FFFFFFF; 910 *(int *)data = size; 911 error = 0; 912 break; 913 } 914 if (com == FIOASYNC) { /* XXX */ 915 error = 0; /* XXX */ 916 break; 917 } 918 /* fall into ... */ 919 default: 920 #if 0 921 return (ENOTTY); 922 #endif 923 case VFIFO: 924 case VCHR: 925 case VBLK: 926 if (com == FIODTYPE) { 927 if (vp->v_type != VCHR && vp->v_type != VBLK) { 928 error = ENOTTY; 929 break; 930 } 931 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK; 932 error = 0; 933 break; 934 } 935 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg); 936 if (error == 0 && com == TIOCSCTTY) { 937 struct proc *p = curthread->td_proc; 938 struct session *sess; 939 940 if (p == NULL) { 941 error = ENOTTY; 942 break; 943 } 944 945 get_mplock(); 946 sess = p->p_session; 947 /* Do nothing if reassigning same control tty */ 948 if (sess->s_ttyvp == vp) { 949 error = 0; 950 rel_mplock(); 951 break; 952 } 953 954 /* Get rid of reference to old control tty */ 955 ovp = sess->s_ttyvp; 956 vref(vp); 957 sess->s_ttyvp = vp; 958 if (ovp) 959 vrele(ovp); 960 rel_mplock(); 961 } 962 break; 963 } 964 return (error); 965 } 966 967 /* 968 * Check that the vnode is still valid, and if so 969 * acquire requested lock. 970 */ 971 int 972 #ifndef DEBUG_LOCKS 973 vn_lock(struct vnode *vp, int flags) 974 #else 975 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line) 976 #endif 977 { 978 int error; 979 980 do { 981 #ifdef DEBUG_LOCKS 982 vp->filename = filename; 983 vp->line = line; 984 error = debuglockmgr(&vp->v_lock, flags, 985 "vn_lock", filename, line); 986 #else 987 error = lockmgr(&vp->v_lock, flags); 988 #endif 989 if (error == 0) 990 break; 991 } while (flags & LK_RETRY); 992 993 /* 994 * Because we (had better!) have a ref on the vnode, once it 995 * goes to VRECLAIMED state it will not be recycled until all 996 * refs go away. So we can just check the flag. 997 */ 998 if (error == 0 && (vp->v_flag & VRECLAIMED)) { 999 lockmgr(&vp->v_lock, LK_RELEASE); 1000 error = ENOENT; 1001 } 1002 return (error); 1003 } 1004 1005 #ifdef DEBUG_VN_UNLOCK 1006 1007 void 1008 debug_vn_unlock(struct vnode *vp, const char *filename, int line) 1009 { 1010 kprintf("vn_unlock from %s:%d\n", filename, line); 1011 lockmgr(&vp->v_lock, LK_RELEASE); 1012 } 1013 1014 #else 1015 1016 void 1017 vn_unlock(struct vnode *vp) 1018 { 1019 lockmgr(&vp->v_lock, LK_RELEASE); 1020 } 1021 1022 #endif 1023 1024 /* 1025 * MPSAFE 1026 */ 1027 int 1028 vn_islocked(struct vnode *vp) 1029 { 1030 return (lockstatus(&vp->v_lock, curthread)); 1031 } 1032 1033 /* 1034 * Return the lock status of a vnode and unlock the vnode 1035 * if we owned the lock. This is not a boolean, if the 1036 * caller cares what the lock status is the caller must 1037 * check the various possible values. 1038 * 1039 * This only unlocks exclusive locks held by the caller, 1040 * it will NOT unlock shared locks (there is no way to 1041 * tell who the shared lock belongs to). 1042 * 1043 * MPSAFE 1044 */ 1045 int 1046 vn_islocked_unlock(struct vnode *vp) 1047 { 1048 int vpls; 1049 1050 vpls = lockstatus(&vp->v_lock, curthread); 1051 if (vpls == LK_EXCLUSIVE) 1052 lockmgr(&vp->v_lock, LK_RELEASE); 1053 return(vpls); 1054 } 1055 1056 /* 1057 * Restore a vnode lock that we previously released via 1058 * vn_islocked_unlock(). This is a NOP if we did not 1059 * own the original lock. 1060 * 1061 * MPSAFE 1062 */ 1063 void 1064 vn_islocked_relock(struct vnode *vp, int vpls) 1065 { 1066 int error; 1067 1068 if (vpls == LK_EXCLUSIVE) 1069 error = lockmgr(&vp->v_lock, vpls); 1070 } 1071 1072 /* 1073 * MPSAFE 1074 */ 1075 static int 1076 vn_closefile(struct file *fp) 1077 { 1078 int error; 1079 1080 fp->f_ops = &badfileops; 1081 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag); 1082 return (error); 1083 } 1084 1085 /* 1086 * MPSAFE 1087 */ 1088 static int 1089 vn_kqfilter(struct file *fp, struct knote *kn) 1090 { 1091 int error; 1092 1093 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn); 1094 return (error); 1095 } 1096