xref: /dragonfly/sys/kern/vfs_vnops.c (revision ed183f8c)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/fcntl.h>
41 #include <sys/file.h>
42 #include <sys/stat.h>
43 #include <sys/proc.h>
44 #include <sys/priv.h>
45 #include <sys/mount.h>
46 #include <sys/nlookup.h>
47 #include <sys/vnode.h>
48 #include <sys/buf.h>
49 #include <sys/filio.h>
50 #include <sys/ttycom.h>
51 #include <sys/conf.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 
55 #include <sys/mplock2.h>
56 
57 static int vn_closefile (struct file *fp);
58 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
59 		struct ucred *cred, struct sysmsg *msg);
60 static int vn_read (struct file *fp, struct uio *uio,
61 		struct ucred *cred, int flags);
62 static int vn_kqfilter (struct file *fp, struct knote *kn);
63 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
64 static int vn_write (struct file *fp, struct uio *uio,
65 		struct ucred *cred, int flags);
66 
67 struct fileops vnode_fileops = {
68 	.fo_read = vn_read,
69 	.fo_write = vn_write,
70 	.fo_ioctl = vn_ioctl,
71 	.fo_kqfilter = vn_kqfilter,
72 	.fo_stat = vn_statfile,
73 	.fo_close = vn_closefile,
74 	.fo_shutdown = nofo_shutdown
75 };
76 
77 /*
78  * Common code for vnode open operations.  Check permissions, and call
79  * the VOP_NOPEN or VOP_NCREATE routine.
80  *
81  * The caller is responsible for setting up nd with nlookup_init() and
82  * for cleaning it up with nlookup_done(), whether we return an error
83  * or not.
84  *
85  * On success nd->nl_open_vp will hold a referenced and, if requested,
86  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
87  * is non-NULL the vnode will be installed in the file pointer.
88  *
89  * NOTE: If the caller wishes the namecache entry to be operated with
90  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
91  *	 then the vnode lock will also be shared.
92  *
93  * NOTE: The vnode is referenced just once on return whether or not it
94  *	 is also installed in the file pointer.
95  */
96 int
97 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
98 {
99 	struct vnode *vp;
100 	struct ucred *cred = nd->nl_cred;
101 	struct vattr vat;
102 	struct vattr *vap = &vat;
103 	int error;
104 	u_int flags;
105 	uint64_t osize;
106 	struct mount *mp;
107 
108 	/*
109 	 * Certain combinations are illegal
110 	 */
111 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
112 		return(EACCES);
113 
114 	/*
115 	 * Lookup the path and create or obtain the vnode.  After a
116 	 * successful lookup a locked nd->nl_nch will be returned.
117 	 *
118 	 * The result of this section should be a locked vnode.
119 	 *
120 	 * XXX with only a little work we should be able to avoid locking
121 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
122 	 */
123 	nd->nl_flags |= NLC_OPEN;
124 	if (fmode & O_APPEND)
125 		nd->nl_flags |= NLC_APPEND;
126 	if (fmode & O_TRUNC)
127 		nd->nl_flags |= NLC_TRUNCATE;
128 	if (fmode & FREAD)
129 		nd->nl_flags |= NLC_READ;
130 	if (fmode & FWRITE)
131 		nd->nl_flags |= NLC_WRITE;
132 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
133 		nd->nl_flags |= NLC_FOLLOW;
134 
135 	if (fmode & O_CREAT) {
136 		/*
137 		 * CONDITIONAL CREATE FILE CASE
138 		 *
139 		 * Setting NLC_CREATE causes a negative hit to store
140 		 * the negative hit ncp and not return an error.  Then
141 		 * nc_error or nc_vp may be checked to see if the ncp
142 		 * represents a negative hit.  NLC_CREATE also requires
143 		 * write permission on the governing directory or EPERM
144 		 * is returned.
145 		 */
146 		nd->nl_flags |= NLC_CREATE;
147 		nd->nl_flags |= NLC_REFDVP;
148 		bwillinode(1);
149 		error = nlookup(nd);
150 	} else {
151 		/*
152 		 * NORMAL OPEN FILE CASE
153 		 */
154 		error = nlookup(nd);
155 	}
156 
157 	if (error)
158 		return (error);
159 
160 	/*
161 	 * split case to allow us to re-resolve and retry the ncp in case
162 	 * we get ESTALE.
163 	 */
164 again:
165 	if (fmode & O_CREAT) {
166 		if (nd->nl_nch.ncp->nc_vp == NULL) {
167 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
168 				return (error);
169 			VATTR_NULL(vap);
170 			vap->va_type = VREG;
171 			vap->va_mode = cmode;
172 			vap->va_fuseflags = fmode; /* FUSE */
173 			if (fmode & O_EXCL)
174 				vap->va_vaflags |= VA_EXCLUSIVE;
175 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
176 					    nd->nl_cred, vap);
177 			if (error)
178 				return (error);
179 			fmode &= ~O_TRUNC;
180 			/* locked vnode is returned */
181 		} else {
182 			if (fmode & O_EXCL) {
183 				error = EEXIST;
184 			} else {
185 				error = cache_vget(&nd->nl_nch, cred,
186 						    LK_EXCLUSIVE, &vp);
187 			}
188 			if (error)
189 				return (error);
190 			fmode &= ~O_CREAT;
191 		}
192 	} else {
193 		if (nd->nl_flags & NLC_SHAREDLOCK) {
194 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
195 		} else {
196 			error = cache_vget(&nd->nl_nch, cred,
197 					   LK_EXCLUSIVE, &vp);
198 		}
199 		if (error)
200 			return (error);
201 	}
202 
203 	/*
204 	 * We have a locked vnode and ncp now.  Note that the ncp will
205 	 * be cleaned up by the caller if nd->nl_nch is left intact.
206 	 */
207 	if (vp->v_type == VLNK) {
208 		error = EMLINK;
209 		goto bad;
210 	}
211 	if (vp->v_type == VSOCK) {
212 		error = EOPNOTSUPP;
213 		goto bad;
214 	}
215 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
216 		error = ENOTDIR;
217 		goto bad;
218 	}
219 	if ((fmode & O_CREAT) == 0) {
220 		if (fmode & (FWRITE | O_TRUNC)) {
221 			if (vp->v_type == VDIR) {
222 				error = EISDIR;
223 				goto bad;
224 			}
225 			error = vn_writechk(vp, &nd->nl_nch);
226 			if (error) {
227 				/*
228 				 * Special stale handling, re-resolve the
229 				 * vnode.
230 				 */
231 				if (error == ESTALE) {
232 					vput(vp);
233 					vp = NULL;
234 					if (nd->nl_flags & NLC_SHAREDLOCK) {
235 						cache_unlock(&nd->nl_nch);
236 						cache_lock(&nd->nl_nch);
237 					}
238 					cache_setunresolved(&nd->nl_nch);
239 					error = cache_resolve(&nd->nl_nch,
240 							      cred);
241 					if (error == 0)
242 						goto again;
243 				}
244 				goto bad;
245 			}
246 		}
247 	}
248 	if (fmode & O_TRUNC) {
249 		vn_unlock(vp);				/* XXX */
250 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
251 		osize = vp->v_filesize;
252 		VATTR_NULL(vap);
253 		vap->va_size = 0;
254 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
255 		if (error)
256 			goto bad;
257 		error = VOP_GETATTR(vp, vap);
258 		if (error)
259 			goto bad;
260 		mp = vq_vptomp(vp);
261 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
262 	}
263 
264 	/*
265 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
266 	 * These particular bits a tracked all the way from the root.
267 	 *
268 	 * NOTE: Might not work properly on NFS servers due to the
269 	 * disconnected namecache.
270 	 */
271 	flags = nd->nl_nch.ncp->nc_flag;
272 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
273 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
274 		vsetflags(vp, VSWAPCACHE);
275 	} else {
276 		vclrflags(vp, VSWAPCACHE);
277 	}
278 
279 	/*
280 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
281 	 * associated with the fp yet so we own it clean.
282 	 *
283 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
284 	 * directories but now we do it unconditionally so f*() ops
285 	 * such as fchmod() can access the actual namespace that was
286 	 * used to open the file.
287 	 */
288 	if (fp) {
289 		if (nd->nl_flags & NLC_APPENDONLY)
290 			fmode |= FAPPENDONLY;
291 		fp->f_nchandle = nd->nl_nch;
292 		cache_zero(&nd->nl_nch);
293 		cache_unlock(&fp->f_nchandle);
294 	}
295 
296 	/*
297 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
298 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
299 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
300 	 * on /dev/ttyd0
301 	 */
302 	if (nd->nl_nch.ncp)
303 		cache_put(&nd->nl_nch);
304 
305 	error = VOP_OPEN(vp, fmode, cred, fp);
306 	if (error) {
307 		/*
308 		 * setting f_ops to &badfileops will prevent the descriptor
309 		 * code from trying to close and release the vnode, since
310 		 * the open failed we do not want to call close.
311 		 */
312 		if (fp) {
313 			fp->f_data = NULL;
314 			fp->f_ops = &badfileops;
315 		}
316 		goto bad;
317 	}
318 
319 #if 0
320 	/*
321 	 * Assert that VREG files have been setup for vmio.
322 	 */
323 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
324 		("vn_open: regular file was not VMIO enabled!"));
325 #endif
326 
327 	/*
328 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
329 	 * only returned in the fp == NULL case.
330 	 */
331 	if (fp == NULL) {
332 		nd->nl_open_vp = vp;
333 		nd->nl_vp_fmode = fmode;
334 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
335 			vn_unlock(vp);
336 	} else {
337 		vput(vp);
338 	}
339 	return (0);
340 bad:
341 	if (vp)
342 		vput(vp);
343 	return (error);
344 }
345 
346 int
347 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
348 {
349 	struct vnode *vp;
350 	int error;
351 
352 	if (strncmp(devname, "/dev/", 5) == 0)
353 		devname += 5;
354 	if ((vp = getsynthvnode(devname)) == NULL) {
355 		error = ENODEV;
356 	} else {
357 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
358 		vn_unlock(vp);
359 		if (error) {
360 			vrele(vp);
361 			vp = NULL;
362 		}
363 	}
364 	*vpp = vp;
365 	return (error);
366 }
367 
368 /*
369  * Check for write permissions on the specified vnode.  nch may be NULL.
370  */
371 int
372 vn_writechk(struct vnode *vp, struct nchandle *nch)
373 {
374 	/*
375 	 * If there's shared text associated with
376 	 * the vnode, try to free it up once.  If
377 	 * we fail, we can't allow writing.
378 	 */
379 	if (vp->v_flag & VTEXT)
380 		return (ETXTBSY);
381 
382 	/*
383 	 * If the vnode represents a regular file, check the mount
384 	 * point via the nch.  This may be a different mount point
385 	 * then the one embedded in the vnode (e.g. nullfs).
386 	 *
387 	 * We can still write to non-regular files (e.g. devices)
388 	 * via read-only mounts.
389 	 */
390 	if (nch && nch->ncp && vp->v_type == VREG)
391 		return (ncp_writechk(nch));
392 	return (0);
393 }
394 
395 /*
396  * Check whether the underlying mount is read-only.  The mount point
397  * referenced by the namecache may be different from the mount point
398  * used by the underlying vnode in the case of NULLFS, so a separate
399  * check is needed.
400  */
401 int
402 ncp_writechk(struct nchandle *nch)
403 {
404 	struct mount *mp;
405 
406 	if ((mp = nch->mount) != NULL) {
407 		if (mp->mnt_flag & MNT_RDONLY)
408 			return (EROFS);
409 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
410 			VFS_MODIFYING(mp);
411 	}
412 	return(0);
413 }
414 
415 /*
416  * Vnode close call
417  *
418  * MPSAFE
419  */
420 int
421 vn_close(struct vnode *vp, int flags, struct file *fp)
422 {
423 	int error;
424 
425 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
426 	if (error == 0) {
427 		error = VOP_CLOSE(vp, flags, fp);
428 		vn_unlock(vp);
429 	}
430 	vrele(vp);
431 	return (error);
432 }
433 
434 /*
435  * Sequential heuristic.
436  *
437  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
438  */
439 static __inline
440 int
441 sequential_heuristic(struct uio *uio, struct file *fp)
442 {
443 	/*
444 	 * Sequential heuristic - detect sequential operation
445 	 *
446 	 * NOTE: SMP: We allow f_seqcount updates to race.
447 	 */
448 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
449 	    uio->uio_offset == fp->f_nextoff) {
450 		int tmpseq = fp->f_seqcount;
451 
452 		tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
453 		if (tmpseq > IO_SEQMAX)
454 			tmpseq = IO_SEQMAX;
455 		fp->f_seqcount = tmpseq;
456 		return(fp->f_seqcount << IO_SEQSHIFT);
457 	}
458 
459 	/*
460 	 * Not sequential, quick draw-down of seqcount
461 	 *
462 	 * NOTE: SMP: We allow f_seqcount updates to race.
463 	 */
464 	if (fp->f_seqcount > 1)
465 		fp->f_seqcount = 1;
466 	else
467 		fp->f_seqcount = 0;
468 	return(0);
469 }
470 
471 /*
472  * get - lock and return the f_offset field.
473  * set - set and unlock the f_offset field.
474  *
475  * These routines serve the dual purpose of serializing access to the
476  * f_offset field (at least on x86) and guaranteeing operational integrity
477  * when multiple read()ers and write()ers are present on the same fp.
478  *
479  * MPSAFE
480  */
481 static __inline off_t
482 vn_get_fpf_offset(struct file *fp)
483 {
484 	u_int	flags;
485 	u_int	nflags;
486 
487 	/*
488 	 * Shortcut critical path.
489 	 */
490 	flags = fp->f_flag & ~FOFFSETLOCK;
491 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
492 		return(fp->f_offset);
493 
494 	/*
495 	 * The hard way
496 	 */
497 	for (;;) {
498 		flags = fp->f_flag;
499 		if (flags & FOFFSETLOCK) {
500 			nflags = flags | FOFFSETWAKE;
501 			tsleep_interlock(&fp->f_flag, 0);
502 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
503 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
504 		} else {
505 			nflags = flags | FOFFSETLOCK;
506 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
507 				break;
508 		}
509 	}
510 	return(fp->f_offset);
511 }
512 
513 /*
514  * MPSAFE
515  */
516 static __inline void
517 vn_set_fpf_offset(struct file *fp, off_t offset)
518 {
519 	u_int	flags;
520 	u_int	nflags;
521 
522 	/*
523 	 * We hold the lock so we can set the offset without interference.
524 	 */
525 	fp->f_offset = offset;
526 
527 	/*
528 	 * Normal release is already a reasonably critical path.
529 	 */
530 	for (;;) {
531 		flags = fp->f_flag;
532 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
533 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
534 			if (flags & FOFFSETWAKE)
535 				wakeup(&fp->f_flag);
536 			break;
537 		}
538 	}
539 }
540 
541 /*
542  * MPSAFE
543  */
544 static __inline off_t
545 vn_poll_fpf_offset(struct file *fp)
546 {
547 #if defined(__x86_64__)
548 	return(fp->f_offset);
549 #else
550 	off_t off = vn_get_fpf_offset(fp);
551 	vn_set_fpf_offset(fp, off);
552 	return(off);
553 #endif
554 }
555 
556 /*
557  * Package up an I/O request on a vnode into a uio and do it.
558  *
559  * MPSAFE
560  */
561 int
562 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
563 	off_t offset, enum uio_seg segflg, int ioflg,
564 	struct ucred *cred, int *aresid)
565 {
566 	struct uio auio;
567 	struct iovec aiov;
568 	int error;
569 
570 	if ((ioflg & IO_NODELOCKED) == 0)
571 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
572 	auio.uio_iov = &aiov;
573 	auio.uio_iovcnt = 1;
574 	aiov.iov_base = base;
575 	aiov.iov_len = len;
576 	auio.uio_resid = len;
577 	auio.uio_offset = offset;
578 	auio.uio_segflg = segflg;
579 	auio.uio_rw = rw;
580 	auio.uio_td = curthread;
581 	if (rw == UIO_READ) {
582 		error = VOP_READ(vp, &auio, ioflg, cred);
583 	} else {
584 		error = VOP_WRITE(vp, &auio, ioflg, cred);
585 	}
586 	if (aresid)
587 		*aresid = auio.uio_resid;
588 	else
589 		if (auio.uio_resid && error == 0)
590 			error = EIO;
591 	if ((ioflg & IO_NODELOCKED) == 0)
592 		vn_unlock(vp);
593 	return (error);
594 }
595 
596 /*
597  * Package up an I/O request on a vnode into a uio and do it.  The I/O
598  * request is split up into smaller chunks and we try to avoid saturating
599  * the buffer cache while potentially holding a vnode locked, so we
600  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
601  * to give other processes a chance to lock the vnode (either other processes
602  * core'ing the same binary, or unrelated processes scanning the directory).
603  *
604  * MPSAFE
605  */
606 int
607 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
608 		 off_t offset, enum uio_seg segflg, int ioflg,
609 		 struct ucred *cred, int *aresid)
610 {
611 	int error = 0;
612 
613 	do {
614 		int chunk;
615 
616 		/*
617 		 * Force `offset' to a multiple of MAXBSIZE except possibly
618 		 * for the first chunk, so that filesystems only need to
619 		 * write full blocks except possibly for the first and last
620 		 * chunks.
621 		 */
622 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
623 
624 		if (chunk > len)
625 			chunk = len;
626 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
627 			switch(rw) {
628 			case UIO_READ:
629 				bwillread(chunk);
630 				break;
631 			case UIO_WRITE:
632 				bwillwrite(chunk);
633 				break;
634 			}
635 		}
636 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
637 				ioflg, cred, aresid);
638 		len -= chunk;	/* aresid calc already includes length */
639 		if (error)
640 			break;
641 		offset += chunk;
642 		base += chunk;
643 		lwkt_user_yield();
644 	} while (len);
645 	if (aresid)
646 		*aresid += len;
647 	return (error);
648 }
649 
650 /*
651  * File pointers can no longer get ripped up by revoke so
652  * we don't need to lock access to the vp.
653  *
654  * f_offset updates are not guaranteed against multiple readers
655  */
656 static int
657 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
658 {
659 	struct vnode *vp;
660 	int error, ioflag;
661 
662 	KASSERT(uio->uio_td == curthread,
663 		("uio_td %p is not td %p", uio->uio_td, curthread));
664 	vp = (struct vnode *)fp->f_data;
665 
666 	ioflag = 0;
667 	if (flags & O_FBLOCKING) {
668 		/* ioflag &= ~IO_NDELAY; */
669 	} else if (flags & O_FNONBLOCKING) {
670 		ioflag |= IO_NDELAY;
671 	} else if (fp->f_flag & FNONBLOCK) {
672 		ioflag |= IO_NDELAY;
673 	}
674 	if (fp->f_flag & O_DIRECT) {
675 		ioflag |= IO_DIRECT;
676 	}
677 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
678 		uio->uio_offset = vn_get_fpf_offset(fp);
679 	vn_lock(vp, LK_SHARED | LK_RETRY);
680 	ioflag |= sequential_heuristic(uio, fp);
681 
682 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
683 	fp->f_nextoff = uio->uio_offset;
684 	vn_unlock(vp);
685 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
686 		vn_set_fpf_offset(fp, uio->uio_offset);
687 	return (error);
688 }
689 
690 /*
691  * MPSAFE
692  */
693 static int
694 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
695 {
696 	struct vnode *vp;
697 	int error, ioflag;
698 
699 	KASSERT(uio->uio_td == curthread,
700 		("uio_td %p is not p %p", uio->uio_td, curthread));
701 	vp = (struct vnode *)fp->f_data;
702 
703 	ioflag = IO_UNIT;
704 	if (vp->v_type == VREG &&
705 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
706 		ioflag |= IO_APPEND;
707 	}
708 
709 	if (flags & O_FBLOCKING) {
710 		/* ioflag &= ~IO_NDELAY; */
711 	} else if (flags & O_FNONBLOCKING) {
712 		ioflag |= IO_NDELAY;
713 	} else if (fp->f_flag & FNONBLOCK) {
714 		ioflag |= IO_NDELAY;
715 	}
716 	if (fp->f_flag & O_DIRECT) {
717 		ioflag |= IO_DIRECT;
718 	}
719 	if (flags & O_FASYNCWRITE) {
720 		/* ioflag &= ~IO_SYNC; */
721 	} else if (flags & O_FSYNCWRITE) {
722 		ioflag |= IO_SYNC;
723 	} else if (fp->f_flag & O_FSYNC) {
724 		ioflag |= IO_SYNC;
725 	}
726 
727 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
728 		ioflag |= IO_SYNC;
729 	if ((flags & O_FOFFSET) == 0)
730 		uio->uio_offset = vn_get_fpf_offset(fp);
731 	if (vp->v_mount)
732 		VFS_MODIFYING(vp->v_mount);
733 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
734 	ioflag |= sequential_heuristic(uio, fp);
735 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
736 	fp->f_nextoff = uio->uio_offset;
737 	vn_unlock(vp);
738 	if ((flags & O_FOFFSET) == 0)
739 		vn_set_fpf_offset(fp, uio->uio_offset);
740 	return (error);
741 }
742 
743 /*
744  * MPSAFE
745  */
746 static int
747 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
748 {
749 	struct vnode *vp;
750 	int error;
751 
752 	vp = (struct vnode *)fp->f_data;
753 	error = vn_stat(vp, sb, cred);
754 	return (error);
755 }
756 
757 /*
758  * MPSAFE
759  */
760 int
761 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
762 {
763 	struct vattr vattr;
764 	struct vattr *vap;
765 	int error;
766 	u_short mode;
767 	cdev_t dev;
768 
769 	vap = &vattr;
770 	error = VOP_GETATTR(vp, vap);
771 	if (error)
772 		return (error);
773 
774 	/*
775 	 * Zero the spare stat fields
776 	 */
777 	sb->st_lspare = 0;
778 	sb->st_qspare2 = 0;
779 
780 	/*
781 	 * Copy from vattr table
782 	 */
783 	if (vap->va_fsid != VNOVAL)
784 		sb->st_dev = vap->va_fsid;
785 	else
786 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
787 	sb->st_ino = vap->va_fileid;
788 	mode = vap->va_mode;
789 	switch (vap->va_type) {
790 	case VREG:
791 		mode |= S_IFREG;
792 		break;
793 	case VDATABASE:
794 		mode |= S_IFDB;
795 		break;
796 	case VDIR:
797 		mode |= S_IFDIR;
798 		break;
799 	case VBLK:
800 		mode |= S_IFBLK;
801 		break;
802 	case VCHR:
803 		mode |= S_IFCHR;
804 		break;
805 	case VLNK:
806 		mode |= S_IFLNK;
807 		/* This is a cosmetic change, symlinks do not have a mode. */
808 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
809 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
810 		else
811 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
812 		break;
813 	case VSOCK:
814 		mode |= S_IFSOCK;
815 		break;
816 	case VFIFO:
817 		mode |= S_IFIFO;
818 		break;
819 	default:
820 		return (EBADF);
821 	}
822 	sb->st_mode = mode;
823 	if (vap->va_nlink > (nlink_t)-1)
824 		sb->st_nlink = (nlink_t)-1;
825 	else
826 		sb->st_nlink = vap->va_nlink;
827 	sb->st_uid = vap->va_uid;
828 	sb->st_gid = vap->va_gid;
829 	sb->st_rdev = dev2udev(vp->v_rdev);
830 	sb->st_size = vap->va_size;
831 	sb->st_atimespec = vap->va_atime;
832 	sb->st_mtimespec = vap->va_mtime;
833 	sb->st_ctimespec = vap->va_ctime;
834 
835 	/*
836 	 * A VCHR and VBLK device may track the last access and last modified
837 	 * time independantly of the filesystem.  This is particularly true
838 	 * because device read and write calls may bypass the filesystem.
839 	 */
840 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
841 		dev = vp->v_rdev;
842 		if (dev != NULL) {
843 			if (dev->si_lastread) {
844 				sb->st_atimespec.tv_sec = time_second +
845 							  (time_uptime -
846 							   dev->si_lastread);
847 				sb->st_atimespec.tv_nsec = 0;
848 			}
849 			if (dev->si_lastwrite) {
850 				sb->st_atimespec.tv_sec = time_second +
851 							  (time_uptime -
852 							   dev->si_lastwrite);
853 				sb->st_atimespec.tv_nsec = 0;
854 			}
855 		}
856 	}
857 
858         /*
859 	 * According to www.opengroup.org, the meaning of st_blksize is
860 	 *   "a filesystem-specific preferred I/O block size for this
861 	 *    object.  In some filesystem types, this may vary from file
862 	 *    to file"
863 	 * Default to PAGE_SIZE after much discussion.
864 	 */
865 
866 	if (vap->va_type == VREG) {
867 		sb->st_blksize = vap->va_blocksize;
868 	} else if (vn_isdisk(vp, NULL)) {
869 		/*
870 		 * XXX this is broken.  If the device is not yet open (aka
871 		 * stat() call, aka v_rdev == NULL), how are we supposed
872 		 * to get a valid block size out of it?
873 		 */
874 		dev = vp->v_rdev;
875 
876 		sb->st_blksize = dev->si_bsize_best;
877 		if (sb->st_blksize < dev->si_bsize_phys)
878 			sb->st_blksize = dev->si_bsize_phys;
879 		if (sb->st_blksize < BLKDEV_IOSIZE)
880 			sb->st_blksize = BLKDEV_IOSIZE;
881 	} else {
882 		sb->st_blksize = PAGE_SIZE;
883 	}
884 
885 	sb->st_flags = vap->va_flags;
886 
887 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
888 	if (error)
889 		sb->st_gen = 0;
890 	else
891 		sb->st_gen = (u_int32_t)vap->va_gen;
892 
893 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
894 
895 	/*
896 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
897 	 * 5.7 master).
898 	 */
899 	sb->__old_st_blksize = sb->st_blksize;
900 
901 	return (0);
902 }
903 
904 /*
905  * MPALMOSTSAFE - acquires mplock
906  */
907 static int
908 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
909 	 struct sysmsg *msg)
910 {
911 	struct vnode *vp = ((struct vnode *)fp->f_data);
912 	struct vnode *ovp;
913 	struct vattr vattr;
914 	int error;
915 	off_t size;
916 
917 	switch (vp->v_type) {
918 	case VREG:
919 	case VDIR:
920 		if (com == FIONREAD) {
921 			error = VOP_GETATTR(vp, &vattr);
922 			if (error)
923 				break;
924 			size = vattr.va_size;
925 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
926 				size -= vn_poll_fpf_offset(fp);
927 			if (size > 0x7FFFFFFF)
928 				size = 0x7FFFFFFF;
929 			*(int *)data = size;
930 			error = 0;
931 			break;
932 		}
933 		if (com == FIOASYNC) {				/* XXX */
934 			error = 0;				/* XXX */
935 			break;
936 		}
937 		/* fall into ... */
938 	default:
939 #if 0
940 		return (ENOTTY);
941 #endif
942 	case VFIFO:
943 	case VCHR:
944 	case VBLK:
945 		if (com == FIODTYPE) {
946 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
947 				error = ENOTTY;
948 				break;
949 			}
950 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
951 			error = 0;
952 			break;
953 		}
954 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
955 		if (error == 0 && com == TIOCSCTTY) {
956 			struct proc *p = curthread->td_proc;
957 			struct session *sess;
958 
959 			if (p == NULL) {
960 				error = ENOTTY;
961 				break;
962 			}
963 
964 			get_mplock();
965 			sess = p->p_session;
966 			/* Do nothing if reassigning same control tty */
967 			if (sess->s_ttyvp == vp) {
968 				error = 0;
969 				rel_mplock();
970 				break;
971 			}
972 
973 			/* Get rid of reference to old control tty */
974 			ovp = sess->s_ttyvp;
975 			vref(vp);
976 			sess->s_ttyvp = vp;
977 			if (ovp)
978 				vrele(ovp);
979 			rel_mplock();
980 		}
981 		break;
982 	}
983 	return (error);
984 }
985 
986 /*
987  * Obtain the requested vnode lock
988  *
989  *	LK_RETRY	Automatically retry on timeout
990  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
991  *
992  * Failures will occur if the vnode is undergoing recyclement, but not
993  * all callers expect that the function will fail so the caller must pass
994  * LK_FAILOK if it wants to process an error code.
995  *
996  * Errors can occur for other reasons if you pass in other LK_ flags,
997  * regardless of whether you pass in LK_FAILRECLAIM
998  */
999 int
1000 vn_lock(struct vnode *vp, int flags)
1001 {
1002 	int error;
1003 
1004 	do {
1005 		error = lockmgr(&vp->v_lock, flags);
1006 		if (error == 0)
1007 			break;
1008 	} while (flags & LK_RETRY);
1009 
1010 	/*
1011 	 * Because we (had better!) have a ref on the vnode, once it
1012 	 * goes to VRECLAIMED state it will not be recycled until all
1013 	 * refs go away.  So we can just check the flag.
1014 	 */
1015 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1016 		if (flags & LK_FAILRECLAIM) {
1017 			lockmgr(&vp->v_lock, LK_RELEASE);
1018 			error = ENOENT;
1019 		}
1020 	}
1021 	return (error);
1022 }
1023 
1024 #ifdef DEBUG_VN_UNLOCK
1025 
1026 void
1027 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1028 {
1029 	kprintf("vn_unlock from %s:%d\n", filename, line);
1030 	lockmgr(&vp->v_lock, LK_RELEASE);
1031 }
1032 
1033 #else
1034 
1035 void
1036 vn_unlock(struct vnode *vp)
1037 {
1038 	lockmgr(&vp->v_lock, LK_RELEASE);
1039 }
1040 
1041 #endif
1042 
1043 /*
1044  * MPSAFE
1045  */
1046 int
1047 vn_islocked(struct vnode *vp)
1048 {
1049 	return (lockstatus(&vp->v_lock, curthread));
1050 }
1051 
1052 /*
1053  * Return the lock status of a vnode and unlock the vnode
1054  * if we owned the lock.  This is not a boolean, if the
1055  * caller cares what the lock status is the caller must
1056  * check the various possible values.
1057  *
1058  * This only unlocks exclusive locks held by the caller,
1059  * it will NOT unlock shared locks (there is no way to
1060  * tell who the shared lock belongs to).
1061  *
1062  * MPSAFE
1063  */
1064 int
1065 vn_islocked_unlock(struct vnode *vp)
1066 {
1067 	int vpls;
1068 
1069 	vpls = lockstatus(&vp->v_lock, curthread);
1070 	if (vpls == LK_EXCLUSIVE)
1071 		lockmgr(&vp->v_lock, LK_RELEASE);
1072 	return(vpls);
1073 }
1074 
1075 /*
1076  * Restore a vnode lock that we previously released via
1077  * vn_islocked_unlock().  This is a NOP if we did not
1078  * own the original lock.
1079  *
1080  * MPSAFE
1081  */
1082 void
1083 vn_islocked_relock(struct vnode *vp, int vpls)
1084 {
1085 	int error;
1086 
1087 	if (vpls == LK_EXCLUSIVE)
1088 		error = lockmgr(&vp->v_lock, vpls);
1089 }
1090 
1091 /*
1092  * MPSAFE
1093  */
1094 static int
1095 vn_closefile(struct file *fp)
1096 {
1097 	int error;
1098 
1099 	fp->f_ops = &badfileops;
1100 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1101 	return (error);
1102 }
1103 
1104 /*
1105  * MPSAFE
1106  */
1107 static int
1108 vn_kqfilter(struct file *fp, struct knote *kn)
1109 {
1110 	int error;
1111 
1112 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1113 	return (error);
1114 }
1115