xref: /dragonfly/sys/kern/vfs_vnops.c (revision 8e11cefe)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 
56 #include <sys/mplock2.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 
68 struct fileops vnode_fileops = {
69 	.fo_read = vn_read,
70 	.fo_write = vn_write,
71 	.fo_ioctl = vn_ioctl,
72 	.fo_kqfilter = vn_kqfilter,
73 	.fo_stat = vn_statfile,
74 	.fo_close = vn_closefile,
75 	.fo_shutdown = nofo_shutdown
76 };
77 
78 /*
79  * Common code for vnode open operations.  Check permissions, and call
80  * the VOP_NOPEN or VOP_NCREATE routine.
81  *
82  * The caller is responsible for setting up nd with nlookup_init() and
83  * for cleaning it up with nlookup_done(), whether we return an error
84  * or not.
85  *
86  * On success nd->nl_open_vp will hold a referenced and, if requested,
87  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
88  * is non-NULL the vnode will be installed in the file pointer.
89  *
90  * NOTE: If the caller wishes the namecache entry to be operated with
91  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
92  *	 then the vnode lock will also be shared.
93  *
94  * NOTE: The vnode is referenced just once on return whether or not it
95  *	 is also installed in the file pointer.
96  */
97 int
98 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
99 {
100 	struct vnode *vp;
101 	struct ucred *cred = nd->nl_cred;
102 	struct vattr vat;
103 	struct vattr *vap = &vat;
104 	int error;
105 	u_int flags;
106 	uint64_t osize;
107 	struct mount *mp;
108 
109 	/*
110 	 * Certain combinations are illegal
111 	 */
112 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
113 		return(EACCES);
114 
115 	/*
116 	 * Lookup the path and create or obtain the vnode.  After a
117 	 * successful lookup a locked nd->nl_nch will be returned.
118 	 *
119 	 * The result of this section should be a locked vnode.
120 	 *
121 	 * XXX with only a little work we should be able to avoid locking
122 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
123 	 */
124 	nd->nl_flags |= NLC_OPEN;
125 	if (fmode & O_APPEND)
126 		nd->nl_flags |= NLC_APPEND;
127 	if (fmode & O_TRUNC)
128 		nd->nl_flags |= NLC_TRUNCATE;
129 	if (fmode & FREAD)
130 		nd->nl_flags |= NLC_READ;
131 	if (fmode & FWRITE)
132 		nd->nl_flags |= NLC_WRITE;
133 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
134 		nd->nl_flags |= NLC_FOLLOW;
135 
136 	if (fmode & O_CREAT) {
137 		/*
138 		 * CONDITIONAL CREATE FILE CASE
139 		 *
140 		 * Setting NLC_CREATE causes a negative hit to store
141 		 * the negative hit ncp and not return an error.  Then
142 		 * nc_error or nc_vp may be checked to see if the ncp
143 		 * represents a negative hit.  NLC_CREATE also requires
144 		 * write permission on the governing directory or EPERM
145 		 * is returned.
146 		 */
147 		nd->nl_flags |= NLC_CREATE;
148 		nd->nl_flags |= NLC_REFDVP;
149 		bwillinode(1);
150 		error = nlookup(nd);
151 	} else {
152 		/*
153 		 * NORMAL OPEN FILE CASE
154 		 */
155 		error = nlookup(nd);
156 	}
157 
158 	if (error)
159 		return (error);
160 
161 	/*
162 	 * split case to allow us to re-resolve and retry the ncp in case
163 	 * we get ESTALE.
164 	 *
165 	 * (error is 0 on entry / retry)
166 	 */
167 again:
168 	/*
169 	 * Checks for (likely) filesystem-modifying cases and allows
170 	 * the filesystem to stall the front-end.
171 	 */
172 	if ((fmode & (FWRITE | O_TRUNC)) ||
173 	    ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
174 		error = ncp_writechk(&nd->nl_nch);
175 		if (error)
176 			return error;
177 	}
178 
179 	if (fmode & O_CREAT) {
180 		if (nd->nl_nch.ncp->nc_vp == NULL) {
181 			VATTR_NULL(vap);
182 			vap->va_type = VREG;
183 			vap->va_mode = cmode;
184 			vap->va_fuseflags = fmode; /* FUSE */
185 			if (fmode & O_EXCL)
186 				vap->va_vaflags |= VA_EXCLUSIVE;
187 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
188 					    nd->nl_cred, vap);
189 			if (error)
190 				return (error);
191 			fmode &= ~O_TRUNC;
192 			/* locked vnode is returned */
193 		} else {
194 			if (fmode & O_EXCL) {
195 				error = EEXIST;
196 			} else {
197 				error = cache_vget(&nd->nl_nch, cred,
198 						    LK_EXCLUSIVE, &vp);
199 			}
200 			if (error)
201 				return (error);
202 			fmode &= ~O_CREAT;
203 		}
204 	} else {
205 		if (nd->nl_flags & NLC_SHAREDLOCK) {
206 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
207 		} else {
208 			error = cache_vget(&nd->nl_nch, cred,
209 					   LK_EXCLUSIVE, &vp);
210 		}
211 		if (error)
212 			return (error);
213 	}
214 
215 	/*
216 	 * We have a locked vnode and ncp now.  Note that the ncp will
217 	 * be cleaned up by the caller if nd->nl_nch is left intact.
218 	 */
219 	if (vp->v_type == VLNK) {
220 		error = EMLINK;
221 		goto bad;
222 	}
223 	if (vp->v_type == VSOCK) {
224 		error = EOPNOTSUPP;
225 		goto bad;
226 	}
227 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
228 		error = ENOTDIR;
229 		goto bad;
230 	}
231 	if ((fmode & O_CREAT) == 0) {
232 		if (fmode & (FWRITE | O_TRUNC)) {
233 			if (vp->v_type == VDIR) {
234 				error = EISDIR;
235 				goto bad;
236 			}
237 
238 			/*
239 			 * Additional checks on vnode (does not substitute
240 			 * for ncp_writechk()).
241 			 */
242 			error = vn_writechk(vp);
243 			if (error) {
244 				/*
245 				 * Special stale handling, re-resolve the
246 				 * vnode.
247 				 */
248 				if (error == ESTALE) {
249 					vput(vp);
250 					vp = NULL;
251 					if (nd->nl_flags & NLC_SHAREDLOCK) {
252 						cache_unlock(&nd->nl_nch);
253 						cache_lock(&nd->nl_nch);
254 					}
255 					cache_setunresolved(&nd->nl_nch);
256 					error = cache_resolve(&nd->nl_nch,
257 							      cred);
258 					if (error == 0)
259 						goto again;
260 				}
261 				goto bad;
262 			}
263 		}
264 	}
265 	if (fmode & O_TRUNC) {
266 		vn_unlock(vp);				/* XXX */
267 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
268 		osize = vp->v_filesize;
269 		VATTR_NULL(vap);
270 		vap->va_size = 0;
271 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
272 		if (error)
273 			goto bad;
274 		error = VOP_GETATTR(vp, vap);
275 		if (error)
276 			goto bad;
277 		mp = vq_vptomp(vp);
278 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
279 	}
280 
281 	/*
282 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
283 	 * These particular bits a tracked all the way from the root.
284 	 *
285 	 * NOTE: Might not work properly on NFS servers due to the
286 	 * disconnected namecache.
287 	 */
288 	flags = nd->nl_nch.ncp->nc_flag;
289 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
290 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
291 		vsetflags(vp, VSWAPCACHE);
292 	} else {
293 		vclrflags(vp, VSWAPCACHE);
294 	}
295 
296 	/*
297 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
298 	 * associated with the fp yet so we own it clean.
299 	 *
300 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
301 	 * directories but now we do it unconditionally so f*() ops
302 	 * such as fchmod() can access the actual namespace that was
303 	 * used to open the file.
304 	 */
305 	if (fp) {
306 		if (nd->nl_flags & NLC_APPENDONLY)
307 			fmode |= FAPPENDONLY;
308 		fp->f_nchandle = nd->nl_nch;
309 		cache_zero(&nd->nl_nch);
310 		cache_unlock(&fp->f_nchandle);
311 	}
312 
313 	/*
314 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
315 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
316 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
317 	 * on /dev/ttyd0
318 	 */
319 	if (nd->nl_nch.ncp)
320 		cache_put(&nd->nl_nch);
321 
322 	error = VOP_OPEN(vp, fmode, cred, fp);
323 	if (error) {
324 		/*
325 		 * setting f_ops to &badfileops will prevent the descriptor
326 		 * code from trying to close and release the vnode, since
327 		 * the open failed we do not want to call close.
328 		 */
329 		if (fp) {
330 			fp->f_data = NULL;
331 			fp->f_ops = &badfileops;
332 		}
333 		goto bad;
334 	}
335 
336 #if 0
337 	/*
338 	 * Assert that VREG files have been setup for vmio.
339 	 */
340 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
341 		("vn_open: regular file was not VMIO enabled!"));
342 #endif
343 
344 	/*
345 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
346 	 * only returned in the fp == NULL case.
347 	 */
348 	if (fp == NULL) {
349 		nd->nl_open_vp = vp;
350 		nd->nl_vp_fmode = fmode;
351 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
352 			vn_unlock(vp);
353 	} else {
354 		vput(vp);
355 	}
356 	return (0);
357 bad:
358 	if (vp)
359 		vput(vp);
360 	return (error);
361 }
362 
363 int
364 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
365 {
366 	struct vnode *vp;
367 	int error;
368 
369 	if (strncmp(devname, "/dev/", 5) == 0)
370 		devname += 5;
371 	if ((vp = getsynthvnode(devname)) == NULL) {
372 		error = ENODEV;
373 	} else {
374 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
375 		vn_unlock(vp);
376 		if (error) {
377 			vrele(vp);
378 			vp = NULL;
379 		}
380 	}
381 	*vpp = vp;
382 	return (error);
383 }
384 
385 /*
386  * Checks for special conditions on the vnode which might prevent writing
387  * after the vnode has (likely) been locked.  The vnode might or might not
388  * be locked as of this call, but will be at least referenced.
389  *
390  * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
391  * to the vnode being locked.
392  */
393 int
394 vn_writechk(struct vnode *vp)
395 {
396 	/*
397 	 * If there's shared text associated with
398 	 * the vnode, try to free it up once.  If
399 	 * we fail, we can't allow writing.
400 	 */
401 	if (vp->v_flag & VTEXT)
402 		return (ETXTBSY);
403 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
404 		return (EROFS);
405 	return 0;
406 }
407 
408 /*
409  * Check whether the underlying mount is read-only.  The mount point
410  * referenced by the namecache may be different from the mount point
411  * used by the underlying vnode in the case of NULLFS, so a separate
412  * check is needed.
413  *
414  * Must be called PRIOR to any vnodes being locked.
415  */
416 int
417 ncp_writechk(struct nchandle *nch)
418 {
419 	struct mount *mp;
420 
421 	if ((mp = nch->mount) != NULL) {
422 		if (mp->mnt_flag & MNT_RDONLY)
423 			return (EROFS);
424 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
425 			VFS_MODIFYING(mp);
426 	}
427 	return(0);
428 }
429 
430 /*
431  * Vnode close call
432  *
433  * MPSAFE
434  */
435 int
436 vn_close(struct vnode *vp, int flags, struct file *fp)
437 {
438 	int error;
439 
440 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
441 	if (error == 0) {
442 		error = VOP_CLOSE(vp, flags, fp);
443 		vn_unlock(vp);
444 	}
445 	vrele(vp);
446 	return (error);
447 }
448 
449 /*
450  * Sequential heuristic.
451  *
452  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
453  */
454 static __inline
455 int
456 sequential_heuristic(struct uio *uio, struct file *fp)
457 {
458 	/*
459 	 * Sequential heuristic - detect sequential operation
460 	 *
461 	 * NOTE: SMP: We allow f_seqcount updates to race.
462 	 */
463 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
464 	    uio->uio_offset == fp->f_nextoff) {
465 		int tmpseq = fp->f_seqcount;
466 
467 		tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
468 		if (tmpseq > IO_SEQMAX)
469 			tmpseq = IO_SEQMAX;
470 		fp->f_seqcount = tmpseq;
471 		return(fp->f_seqcount << IO_SEQSHIFT);
472 	}
473 
474 	/*
475 	 * Not sequential, quick draw-down of seqcount
476 	 *
477 	 * NOTE: SMP: We allow f_seqcount updates to race.
478 	 */
479 	if (fp->f_seqcount > 1)
480 		fp->f_seqcount = 1;
481 	else
482 		fp->f_seqcount = 0;
483 	return(0);
484 }
485 
486 /*
487  * get - lock and return the f_offset field.
488  * set - set and unlock the f_offset field.
489  *
490  * These routines serve the dual purpose of serializing access to the
491  * f_offset field (at least on x86) and guaranteeing operational integrity
492  * when multiple read()ers and write()ers are present on the same fp.
493  *
494  * MPSAFE
495  */
496 static __inline off_t
497 vn_get_fpf_offset(struct file *fp)
498 {
499 	u_int	flags;
500 	u_int	nflags;
501 
502 	/*
503 	 * Shortcut critical path.
504 	 */
505 	flags = fp->f_flag & ~FOFFSETLOCK;
506 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
507 		return(fp->f_offset);
508 
509 	/*
510 	 * The hard way
511 	 */
512 	for (;;) {
513 		flags = fp->f_flag;
514 		if (flags & FOFFSETLOCK) {
515 			nflags = flags | FOFFSETWAKE;
516 			tsleep_interlock(&fp->f_flag, 0);
517 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
518 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
519 		} else {
520 			nflags = flags | FOFFSETLOCK;
521 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
522 				break;
523 		}
524 	}
525 	return(fp->f_offset);
526 }
527 
528 /*
529  * MPSAFE
530  */
531 static __inline void
532 vn_set_fpf_offset(struct file *fp, off_t offset)
533 {
534 	u_int	flags;
535 	u_int	nflags;
536 
537 	/*
538 	 * We hold the lock so we can set the offset without interference.
539 	 */
540 	fp->f_offset = offset;
541 
542 	/*
543 	 * Normal release is already a reasonably critical path.
544 	 */
545 	for (;;) {
546 		flags = fp->f_flag;
547 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
548 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
549 			if (flags & FOFFSETWAKE)
550 				wakeup(&fp->f_flag);
551 			break;
552 		}
553 	}
554 }
555 
556 /*
557  * MPSAFE
558  */
559 static __inline off_t
560 vn_poll_fpf_offset(struct file *fp)
561 {
562 #if defined(__x86_64__)
563 	return(fp->f_offset);
564 #else
565 	off_t off = vn_get_fpf_offset(fp);
566 	vn_set_fpf_offset(fp, off);
567 	return(off);
568 #endif
569 }
570 
571 /*
572  * Package up an I/O request on a vnode into a uio and do it.
573  *
574  * MPSAFE
575  */
576 int
577 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
578 	off_t offset, enum uio_seg segflg, int ioflg,
579 	struct ucred *cred, int *aresid)
580 {
581 	struct uio auio;
582 	struct iovec aiov;
583 	int error;
584 
585 	if ((ioflg & IO_NODELOCKED) == 0)
586 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
587 	auio.uio_iov = &aiov;
588 	auio.uio_iovcnt = 1;
589 	aiov.iov_base = base;
590 	aiov.iov_len = len;
591 	auio.uio_resid = len;
592 	auio.uio_offset = offset;
593 	auio.uio_segflg = segflg;
594 	auio.uio_rw = rw;
595 	auio.uio_td = curthread;
596 	if (rw == UIO_READ) {
597 		error = VOP_READ(vp, &auio, ioflg, cred);
598 	} else {
599 		error = VOP_WRITE(vp, &auio, ioflg, cred);
600 	}
601 	if (aresid)
602 		*aresid = auio.uio_resid;
603 	else
604 		if (auio.uio_resid && error == 0)
605 			error = EIO;
606 	if ((ioflg & IO_NODELOCKED) == 0)
607 		vn_unlock(vp);
608 	return (error);
609 }
610 
611 /*
612  * Package up an I/O request on a vnode into a uio and do it.  The I/O
613  * request is split up into smaller chunks and we try to avoid saturating
614  * the buffer cache while potentially holding a vnode locked, so we
615  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
616  * to give other processes a chance to lock the vnode (either other processes
617  * core'ing the same binary, or unrelated processes scanning the directory).
618  *
619  * MPSAFE
620  */
621 int
622 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
623 		 off_t offset, enum uio_seg segflg, int ioflg,
624 		 struct ucred *cred, int *aresid)
625 {
626 	int error = 0;
627 
628 	do {
629 		int chunk;
630 
631 		/*
632 		 * Force `offset' to a multiple of MAXBSIZE except possibly
633 		 * for the first chunk, so that filesystems only need to
634 		 * write full blocks except possibly for the first and last
635 		 * chunks.
636 		 */
637 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
638 
639 		if (chunk > len)
640 			chunk = len;
641 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
642 			switch(rw) {
643 			case UIO_READ:
644 				bwillread(chunk);
645 				break;
646 			case UIO_WRITE:
647 				bwillwrite(chunk);
648 				break;
649 			}
650 		}
651 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
652 				ioflg, cred, aresid);
653 		len -= chunk;	/* aresid calc already includes length */
654 		if (error)
655 			break;
656 		offset += chunk;
657 		base += chunk;
658 		lwkt_user_yield();
659 	} while (len);
660 	if (aresid)
661 		*aresid += len;
662 	return (error);
663 }
664 
665 /*
666  * File pointers can no longer get ripped up by revoke so
667  * we don't need to lock access to the vp.
668  *
669  * f_offset updates are not guaranteed against multiple readers
670  */
671 static int
672 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
673 {
674 	struct vnode *vp;
675 	int error, ioflag;
676 
677 	KASSERT(uio->uio_td == curthread,
678 		("uio_td %p is not td %p", uio->uio_td, curthread));
679 	vp = (struct vnode *)fp->f_data;
680 
681 	ioflag = 0;
682 	if (flags & O_FBLOCKING) {
683 		/* ioflag &= ~IO_NDELAY; */
684 	} else if (flags & O_FNONBLOCKING) {
685 		ioflag |= IO_NDELAY;
686 	} else if (fp->f_flag & FNONBLOCK) {
687 		ioflag |= IO_NDELAY;
688 	}
689 	if (fp->f_flag & O_DIRECT) {
690 		ioflag |= IO_DIRECT;
691 	}
692 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
693 		uio->uio_offset = vn_get_fpf_offset(fp);
694 	vn_lock(vp, LK_SHARED | LK_RETRY);
695 	ioflag |= sequential_heuristic(uio, fp);
696 
697 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
698 	fp->f_nextoff = uio->uio_offset;
699 	vn_unlock(vp);
700 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
701 		vn_set_fpf_offset(fp, uio->uio_offset);
702 	return (error);
703 }
704 
705 /*
706  * MPSAFE
707  */
708 static int
709 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
710 {
711 	struct vnode *vp;
712 	int error, ioflag;
713 
714 	KASSERT(uio->uio_td == curthread,
715 		("uio_td %p is not p %p", uio->uio_td, curthread));
716 	vp = (struct vnode *)fp->f_data;
717 
718 	ioflag = IO_UNIT;
719 	if (vp->v_type == VREG &&
720 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
721 		ioflag |= IO_APPEND;
722 	}
723 
724 	if (flags & O_FBLOCKING) {
725 		/* ioflag &= ~IO_NDELAY; */
726 	} else if (flags & O_FNONBLOCKING) {
727 		ioflag |= IO_NDELAY;
728 	} else if (fp->f_flag & FNONBLOCK) {
729 		ioflag |= IO_NDELAY;
730 	}
731 	if (fp->f_flag & O_DIRECT) {
732 		ioflag |= IO_DIRECT;
733 	}
734 	if (flags & O_FASYNCWRITE) {
735 		/* ioflag &= ~IO_SYNC; */
736 	} else if (flags & O_FSYNCWRITE) {
737 		ioflag |= IO_SYNC;
738 	} else if (fp->f_flag & O_FSYNC) {
739 		ioflag |= IO_SYNC;
740 	}
741 
742 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
743 		ioflag |= IO_SYNC;
744 	if ((flags & O_FOFFSET) == 0)
745 		uio->uio_offset = vn_get_fpf_offset(fp);
746 	if (vp->v_mount)
747 		VFS_MODIFYING(vp->v_mount);
748 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
749 	ioflag |= sequential_heuristic(uio, fp);
750 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
751 	fp->f_nextoff = uio->uio_offset;
752 	vn_unlock(vp);
753 	if ((flags & O_FOFFSET) == 0)
754 		vn_set_fpf_offset(fp, uio->uio_offset);
755 	return (error);
756 }
757 
758 /*
759  * MPSAFE
760  */
761 static int
762 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
763 {
764 	struct vnode *vp;
765 	int error;
766 
767 	vp = (struct vnode *)fp->f_data;
768 	error = vn_stat(vp, sb, cred);
769 	return (error);
770 }
771 
772 /*
773  * MPSAFE
774  */
775 int
776 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
777 {
778 	struct vattr vattr;
779 	struct vattr *vap;
780 	int error;
781 	u_short mode;
782 	cdev_t dev;
783 
784 	vap = &vattr;
785 	error = VOP_GETATTR(vp, vap);
786 	if (error)
787 		return (error);
788 
789 	/*
790 	 * Zero the spare stat fields
791 	 */
792 	sb->st_lspare = 0;
793 	sb->st_qspare2 = 0;
794 
795 	/*
796 	 * Copy from vattr table
797 	 */
798 	if (vap->va_fsid != VNOVAL)
799 		sb->st_dev = vap->va_fsid;
800 	else
801 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
802 	sb->st_ino = vap->va_fileid;
803 	mode = vap->va_mode;
804 	switch (vap->va_type) {
805 	case VREG:
806 		mode |= S_IFREG;
807 		break;
808 	case VDATABASE:
809 		mode |= S_IFDB;
810 		break;
811 	case VDIR:
812 		mode |= S_IFDIR;
813 		break;
814 	case VBLK:
815 		mode |= S_IFBLK;
816 		break;
817 	case VCHR:
818 		mode |= S_IFCHR;
819 		break;
820 	case VLNK:
821 		mode |= S_IFLNK;
822 		/* This is a cosmetic change, symlinks do not have a mode. */
823 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
824 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
825 		else
826 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
827 		break;
828 	case VSOCK:
829 		mode |= S_IFSOCK;
830 		break;
831 	case VFIFO:
832 		mode |= S_IFIFO;
833 		break;
834 	default:
835 		return (EBADF);
836 	}
837 	sb->st_mode = mode;
838 	if (vap->va_nlink > (nlink_t)-1)
839 		sb->st_nlink = (nlink_t)-1;
840 	else
841 		sb->st_nlink = vap->va_nlink;
842 	sb->st_uid = vap->va_uid;
843 	sb->st_gid = vap->va_gid;
844 	sb->st_rdev = devid_from_dev(vp->v_rdev);
845 	sb->st_size = vap->va_size;
846 	sb->st_atimespec = vap->va_atime;
847 	sb->st_mtimespec = vap->va_mtime;
848 	sb->st_ctimespec = vap->va_ctime;
849 
850 	/*
851 	 * A VCHR and VBLK device may track the last access and last modified
852 	 * time independantly of the filesystem.  This is particularly true
853 	 * because device read and write calls may bypass the filesystem.
854 	 */
855 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
856 		dev = vp->v_rdev;
857 		if (dev != NULL) {
858 			if (dev->si_lastread) {
859 				sb->st_atimespec.tv_sec = time_second +
860 							  (time_uptime -
861 							   dev->si_lastread);
862 				sb->st_atimespec.tv_nsec = 0;
863 			}
864 			if (dev->si_lastwrite) {
865 				sb->st_atimespec.tv_sec = time_second +
866 							  (time_uptime -
867 							   dev->si_lastwrite);
868 				sb->st_atimespec.tv_nsec = 0;
869 			}
870 		}
871 	}
872 
873         /*
874 	 * According to www.opengroup.org, the meaning of st_blksize is
875 	 *   "a filesystem-specific preferred I/O block size for this
876 	 *    object.  In some filesystem types, this may vary from file
877 	 *    to file"
878 	 * Default to PAGE_SIZE after much discussion.
879 	 */
880 
881 	if (vap->va_type == VREG) {
882 		sb->st_blksize = vap->va_blocksize;
883 	} else if (vn_isdisk(vp, NULL)) {
884 		/*
885 		 * XXX this is broken.  If the device is not yet open (aka
886 		 * stat() call, aka v_rdev == NULL), how are we supposed
887 		 * to get a valid block size out of it?
888 		 */
889 		dev = vp->v_rdev;
890 
891 		sb->st_blksize = dev->si_bsize_best;
892 		if (sb->st_blksize < dev->si_bsize_phys)
893 			sb->st_blksize = dev->si_bsize_phys;
894 		if (sb->st_blksize < BLKDEV_IOSIZE)
895 			sb->st_blksize = BLKDEV_IOSIZE;
896 	} else {
897 		sb->st_blksize = PAGE_SIZE;
898 	}
899 
900 	sb->st_flags = vap->va_flags;
901 
902 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
903 	if (error)
904 		sb->st_gen = 0;
905 	else
906 		sb->st_gen = (u_int32_t)vap->va_gen;
907 
908 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
909 
910 	/*
911 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
912 	 * 5.7 master).
913 	 */
914 	sb->__old_st_blksize = sb->st_blksize;
915 
916 	return (0);
917 }
918 
919 /*
920  * MPALMOSTSAFE - acquires mplock
921  */
922 static int
923 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
924 	 struct sysmsg *msg)
925 {
926 	struct vnode *vp = ((struct vnode *)fp->f_data);
927 	struct vnode *ovp;
928 	struct vattr vattr;
929 	int error;
930 	off_t size;
931 
932 	switch (vp->v_type) {
933 	case VREG:
934 	case VDIR:
935 		if (com == FIONREAD) {
936 			error = VOP_GETATTR(vp, &vattr);
937 			if (error)
938 				break;
939 			size = vattr.va_size;
940 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
941 				size -= vn_poll_fpf_offset(fp);
942 			if (size > 0x7FFFFFFF)
943 				size = 0x7FFFFFFF;
944 			*(int *)data = size;
945 			error = 0;
946 			break;
947 		}
948 		if (com == FIOASYNC) {				/* XXX */
949 			error = 0;				/* XXX */
950 			break;
951 		}
952 		/* fall into ... */
953 	default:
954 #if 0
955 		return (ENOTTY);
956 #endif
957 	case VFIFO:
958 	case VCHR:
959 	case VBLK:
960 		if (com == FIODTYPE) {
961 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
962 				error = ENOTTY;
963 				break;
964 			}
965 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
966 			error = 0;
967 			break;
968 		}
969 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
970 		if (error == 0 && com == TIOCSCTTY) {
971 			struct proc *p = curthread->td_proc;
972 			struct session *sess;
973 
974 			if (p == NULL) {
975 				error = ENOTTY;
976 				break;
977 			}
978 
979 			get_mplock();
980 			sess = p->p_session;
981 			/* Do nothing if reassigning same control tty */
982 			if (sess->s_ttyvp == vp) {
983 				error = 0;
984 				rel_mplock();
985 				break;
986 			}
987 
988 			/* Get rid of reference to old control tty */
989 			ovp = sess->s_ttyvp;
990 			vref(vp);
991 			sess->s_ttyvp = vp;
992 			if (ovp)
993 				vrele(ovp);
994 			rel_mplock();
995 		}
996 		break;
997 	}
998 	return (error);
999 }
1000 
1001 /*
1002  * Obtain the requested vnode lock
1003  *
1004  *	LK_RETRY	Automatically retry on timeout
1005  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
1006  *
1007  * Failures will occur if the vnode is undergoing recyclement, but not
1008  * all callers expect that the function will fail so the caller must pass
1009  * LK_FAILOK if it wants to process an error code.
1010  *
1011  * Errors can occur for other reasons if you pass in other LK_ flags,
1012  * regardless of whether you pass in LK_FAILRECLAIM
1013  */
1014 int
1015 vn_lock(struct vnode *vp, int flags)
1016 {
1017 	int error;
1018 
1019 	do {
1020 		error = lockmgr(&vp->v_lock, flags);
1021 		if (error == 0)
1022 			break;
1023 	} while (flags & LK_RETRY);
1024 
1025 	/*
1026 	 * Because we (had better!) have a ref on the vnode, once it
1027 	 * goes to VRECLAIMED state it will not be recycled until all
1028 	 * refs go away.  So we can just check the flag.
1029 	 */
1030 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1031 		if (flags & LK_FAILRECLAIM) {
1032 			lockmgr(&vp->v_lock, LK_RELEASE);
1033 			error = ENOENT;
1034 		}
1035 	}
1036 	return (error);
1037 }
1038 
1039 #ifdef DEBUG_VN_UNLOCK
1040 
1041 void
1042 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1043 {
1044 	kprintf("vn_unlock from %s:%d\n", filename, line);
1045 	lockmgr(&vp->v_lock, LK_RELEASE);
1046 }
1047 
1048 #else
1049 
1050 void
1051 vn_unlock(struct vnode *vp)
1052 {
1053 	lockmgr(&vp->v_lock, LK_RELEASE);
1054 }
1055 
1056 #endif
1057 
1058 /*
1059  * MPSAFE
1060  */
1061 int
1062 vn_islocked(struct vnode *vp)
1063 {
1064 	return (lockstatus(&vp->v_lock, curthread));
1065 }
1066 
1067 /*
1068  * Return the lock status of a vnode and unlock the vnode
1069  * if we owned the lock.  This is not a boolean, if the
1070  * caller cares what the lock status is the caller must
1071  * check the various possible values.
1072  *
1073  * This only unlocks exclusive locks held by the caller,
1074  * it will NOT unlock shared locks (there is no way to
1075  * tell who the shared lock belongs to).
1076  *
1077  * MPSAFE
1078  */
1079 int
1080 vn_islocked_unlock(struct vnode *vp)
1081 {
1082 	int vpls;
1083 
1084 	vpls = lockstatus(&vp->v_lock, curthread);
1085 	if (vpls == LK_EXCLUSIVE)
1086 		lockmgr(&vp->v_lock, LK_RELEASE);
1087 	return(vpls);
1088 }
1089 
1090 /*
1091  * Restore a vnode lock that we previously released via
1092  * vn_islocked_unlock().  This is a NOP if we did not
1093  * own the original lock.
1094  *
1095  * MPSAFE
1096  */
1097 void
1098 vn_islocked_relock(struct vnode *vp, int vpls)
1099 {
1100 	int error;
1101 
1102 	if (vpls == LK_EXCLUSIVE)
1103 		error = lockmgr(&vp->v_lock, vpls);
1104 }
1105 
1106 /*
1107  * MPSAFE
1108  */
1109 static int
1110 vn_closefile(struct file *fp)
1111 {
1112 	int error;
1113 
1114 	fp->f_ops = &badfileops;
1115 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1116 	return (error);
1117 }
1118 
1119 /*
1120  * MPSAFE
1121  */
1122 static int
1123 vn_kqfilter(struct file *fp, struct knote *kn)
1124 {
1125 	int error;
1126 
1127 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1128 	return (error);
1129 }
1130