xref: /dragonfly/sys/kern/vfs_vnops.c (revision 1cef5f30)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 
56 #include <sys/mplock2.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 
68 struct fileops vnode_fileops = {
69 	.fo_read = vn_read,
70 	.fo_write = vn_write,
71 	.fo_ioctl = vn_ioctl,
72 	.fo_kqfilter = vn_kqfilter,
73 	.fo_stat = vn_statfile,
74 	.fo_close = vn_closefile,
75 	.fo_shutdown = nofo_shutdown
76 };
77 
78 /*
79  * Common code for vnode open operations.  Check permissions, and call
80  * the VOP_NOPEN or VOP_NCREATE routine.
81  *
82  * The caller is responsible for setting up nd with nlookup_init() and
83  * for cleaning it up with nlookup_done(), whether we return an error
84  * or not.
85  *
86  * On success nd->nl_open_vp will hold a referenced and, if requested,
87  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
88  * is non-NULL the vnode will be installed in the file pointer.
89  *
90  * NOTE: If the caller wishes the namecache entry to be operated with
91  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
92  *	 then the vnode lock will also be shared.
93  *
94  * NOTE: The vnode is referenced just once on return whether or not it
95  *	 is also installed in the file pointer.
96  */
97 int
98 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
99 {
100 	struct vnode *vp;
101 	struct ucred *cred = nd->nl_cred;
102 	struct vattr vat;
103 	struct vattr *vap = &vat;
104 	int error;
105 	u_int flags;
106 	uint64_t osize;
107 	struct mount *mp;
108 
109 	/*
110 	 * Certain combinations are illegal
111 	 */
112 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
113 		return(EACCES);
114 
115 	/*
116 	 * Lookup the path and create or obtain the vnode.  After a
117 	 * successful lookup a locked nd->nl_nch will be returned.
118 	 *
119 	 * The result of this section should be a locked vnode.
120 	 *
121 	 * XXX with only a little work we should be able to avoid locking
122 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
123 	 */
124 	nd->nl_flags |= NLC_OPEN;
125 	if (fmode & O_APPEND)
126 		nd->nl_flags |= NLC_APPEND;
127 	if (fmode & O_TRUNC)
128 		nd->nl_flags |= NLC_TRUNCATE;
129 	if (fmode & FREAD)
130 		nd->nl_flags |= NLC_READ;
131 	if (fmode & FWRITE)
132 		nd->nl_flags |= NLC_WRITE;
133 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
134 		nd->nl_flags |= NLC_FOLLOW;
135 
136 	if (fmode & O_CREAT) {
137 		/*
138 		 * CONDITIONAL CREATE FILE CASE
139 		 *
140 		 * Setting NLC_CREATE causes a negative hit to store
141 		 * the negative hit ncp and not return an error.  Then
142 		 * nc_error or nc_vp may be checked to see if the ncp
143 		 * represents a negative hit.  NLC_CREATE also requires
144 		 * write permission on the governing directory or EPERM
145 		 * is returned.
146 		 */
147 		nd->nl_flags |= NLC_CREATE;
148 		nd->nl_flags |= NLC_REFDVP;
149 		bwillinode(1);
150 		error = nlookup(nd);
151 	} else {
152 		/*
153 		 * NORMAL OPEN FILE CASE
154 		 */
155 		error = nlookup(nd);
156 	}
157 
158 	if (error)
159 		return (error);
160 
161 	/*
162 	 * split case to allow us to re-resolve and retry the ncp in case
163 	 * we get ESTALE.
164 	 */
165 again:
166 	if (fmode & O_CREAT) {
167 		if (nd->nl_nch.ncp->nc_vp == NULL) {
168 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
169 				return (error);
170 			VATTR_NULL(vap);
171 			vap->va_type = VREG;
172 			vap->va_mode = cmode;
173 			vap->va_fuseflags = fmode; /* FUSE */
174 			if (fmode & O_EXCL)
175 				vap->va_vaflags |= VA_EXCLUSIVE;
176 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
177 					    nd->nl_cred, vap);
178 			if (error)
179 				return (error);
180 			fmode &= ~O_TRUNC;
181 			/* locked vnode is returned */
182 		} else {
183 			if (fmode & O_EXCL) {
184 				error = EEXIST;
185 			} else {
186 				error = cache_vget(&nd->nl_nch, cred,
187 						    LK_EXCLUSIVE, &vp);
188 			}
189 			if (error)
190 				return (error);
191 			fmode &= ~O_CREAT;
192 		}
193 	} else {
194 		if (nd->nl_flags & NLC_SHAREDLOCK) {
195 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
196 		} else {
197 			error = cache_vget(&nd->nl_nch, cred,
198 					   LK_EXCLUSIVE, &vp);
199 		}
200 		if (error)
201 			return (error);
202 	}
203 
204 	/*
205 	 * We have a locked vnode and ncp now.  Note that the ncp will
206 	 * be cleaned up by the caller if nd->nl_nch is left intact.
207 	 */
208 	if (vp->v_type == VLNK) {
209 		error = EMLINK;
210 		goto bad;
211 	}
212 	if (vp->v_type == VSOCK) {
213 		error = EOPNOTSUPP;
214 		goto bad;
215 	}
216 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
217 		error = ENOTDIR;
218 		goto bad;
219 	}
220 	if ((fmode & O_CREAT) == 0) {
221 		if (fmode & (FWRITE | O_TRUNC)) {
222 			if (vp->v_type == VDIR) {
223 				error = EISDIR;
224 				goto bad;
225 			}
226 			error = vn_writechk(vp, &nd->nl_nch);
227 			if (error) {
228 				/*
229 				 * Special stale handling, re-resolve the
230 				 * vnode.
231 				 */
232 				if (error == ESTALE) {
233 					vput(vp);
234 					vp = NULL;
235 					if (nd->nl_flags & NLC_SHAREDLOCK) {
236 						cache_unlock(&nd->nl_nch);
237 						cache_lock(&nd->nl_nch);
238 					}
239 					cache_setunresolved(&nd->nl_nch);
240 					error = cache_resolve(&nd->nl_nch,
241 							      cred);
242 					if (error == 0)
243 						goto again;
244 				}
245 				goto bad;
246 			}
247 		}
248 	}
249 	if (fmode & O_TRUNC) {
250 		vn_unlock(vp);				/* XXX */
251 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
252 		osize = vp->v_filesize;
253 		VATTR_NULL(vap);
254 		vap->va_size = 0;
255 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
256 		if (error)
257 			goto bad;
258 		error = VOP_GETATTR(vp, vap);
259 		if (error)
260 			goto bad;
261 		mp = vq_vptomp(vp);
262 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
263 	}
264 
265 	/*
266 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
267 	 * These particular bits a tracked all the way from the root.
268 	 *
269 	 * NOTE: Might not work properly on NFS servers due to the
270 	 * disconnected namecache.
271 	 */
272 	flags = nd->nl_nch.ncp->nc_flag;
273 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
274 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
275 		vsetflags(vp, VSWAPCACHE);
276 	} else {
277 		vclrflags(vp, VSWAPCACHE);
278 	}
279 
280 	/*
281 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
282 	 * associated with the fp yet so we own it clean.
283 	 *
284 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
285 	 * directories but now we do it unconditionally so f*() ops
286 	 * such as fchmod() can access the actual namespace that was
287 	 * used to open the file.
288 	 */
289 	if (fp) {
290 		if (nd->nl_flags & NLC_APPENDONLY)
291 			fmode |= FAPPENDONLY;
292 		fp->f_nchandle = nd->nl_nch;
293 		cache_zero(&nd->nl_nch);
294 		cache_unlock(&fp->f_nchandle);
295 	}
296 
297 	/*
298 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
299 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
300 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
301 	 * on /dev/ttyd0
302 	 */
303 	if (nd->nl_nch.ncp)
304 		cache_put(&nd->nl_nch);
305 
306 	error = VOP_OPEN(vp, fmode, cred, fp);
307 	if (error) {
308 		/*
309 		 * setting f_ops to &badfileops will prevent the descriptor
310 		 * code from trying to close and release the vnode, since
311 		 * the open failed we do not want to call close.
312 		 */
313 		if (fp) {
314 			fp->f_data = NULL;
315 			fp->f_ops = &badfileops;
316 		}
317 		goto bad;
318 	}
319 
320 #if 0
321 	/*
322 	 * Assert that VREG files have been setup for vmio.
323 	 */
324 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
325 		("vn_open: regular file was not VMIO enabled!"));
326 #endif
327 
328 	/*
329 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
330 	 * only returned in the fp == NULL case.
331 	 */
332 	if (fp == NULL) {
333 		nd->nl_open_vp = vp;
334 		nd->nl_vp_fmode = fmode;
335 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
336 			vn_unlock(vp);
337 	} else {
338 		vput(vp);
339 	}
340 	return (0);
341 bad:
342 	if (vp)
343 		vput(vp);
344 	return (error);
345 }
346 
347 int
348 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
349 {
350 	struct vnode *vp;
351 	int error;
352 
353 	if (strncmp(devname, "/dev/", 5) == 0)
354 		devname += 5;
355 	if ((vp = getsynthvnode(devname)) == NULL) {
356 		error = ENODEV;
357 	} else {
358 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
359 		vn_unlock(vp);
360 		if (error) {
361 			vrele(vp);
362 			vp = NULL;
363 		}
364 	}
365 	*vpp = vp;
366 	return (error);
367 }
368 
369 /*
370  * Check for write permissions on the specified vnode.  nch may be NULL.
371  */
372 int
373 vn_writechk(struct vnode *vp, struct nchandle *nch)
374 {
375 	/*
376 	 * If there's shared text associated with
377 	 * the vnode, try to free it up once.  If
378 	 * we fail, we can't allow writing.
379 	 */
380 	if (vp->v_flag & VTEXT)
381 		return (ETXTBSY);
382 
383 	/*
384 	 * If the vnode represents a regular file, check the mount
385 	 * point via the nch.  This may be a different mount point
386 	 * then the one embedded in the vnode (e.g. nullfs).
387 	 *
388 	 * We can still write to non-regular files (e.g. devices)
389 	 * via read-only mounts.
390 	 */
391 	if (nch && nch->ncp && vp->v_type == VREG)
392 		return (ncp_writechk(nch));
393 	return (0);
394 }
395 
396 /*
397  * Check whether the underlying mount is read-only.  The mount point
398  * referenced by the namecache may be different from the mount point
399  * used by the underlying vnode in the case of NULLFS, so a separate
400  * check is needed.
401  */
402 int
403 ncp_writechk(struct nchandle *nch)
404 {
405 	struct mount *mp;
406 
407 	if ((mp = nch->mount) != NULL) {
408 		if (mp->mnt_flag & MNT_RDONLY)
409 			return (EROFS);
410 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
411 			VFS_MODIFYING(mp);
412 	}
413 	return(0);
414 }
415 
416 /*
417  * Vnode close call
418  *
419  * MPSAFE
420  */
421 int
422 vn_close(struct vnode *vp, int flags, struct file *fp)
423 {
424 	int error;
425 
426 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
427 	if (error == 0) {
428 		error = VOP_CLOSE(vp, flags, fp);
429 		vn_unlock(vp);
430 	}
431 	vrele(vp);
432 	return (error);
433 }
434 
435 /*
436  * Sequential heuristic.
437  *
438  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
439  */
440 static __inline
441 int
442 sequential_heuristic(struct uio *uio, struct file *fp)
443 {
444 	/*
445 	 * Sequential heuristic - detect sequential operation
446 	 *
447 	 * NOTE: SMP: We allow f_seqcount updates to race.
448 	 */
449 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
450 	    uio->uio_offset == fp->f_nextoff) {
451 		int tmpseq = fp->f_seqcount;
452 
453 		tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
454 		if (tmpseq > IO_SEQMAX)
455 			tmpseq = IO_SEQMAX;
456 		fp->f_seqcount = tmpseq;
457 		return(fp->f_seqcount << IO_SEQSHIFT);
458 	}
459 
460 	/*
461 	 * Not sequential, quick draw-down of seqcount
462 	 *
463 	 * NOTE: SMP: We allow f_seqcount updates to race.
464 	 */
465 	if (fp->f_seqcount > 1)
466 		fp->f_seqcount = 1;
467 	else
468 		fp->f_seqcount = 0;
469 	return(0);
470 }
471 
472 /*
473  * get - lock and return the f_offset field.
474  * set - set and unlock the f_offset field.
475  *
476  * These routines serve the dual purpose of serializing access to the
477  * f_offset field (at least on x86) and guaranteeing operational integrity
478  * when multiple read()ers and write()ers are present on the same fp.
479  *
480  * MPSAFE
481  */
482 static __inline off_t
483 vn_get_fpf_offset(struct file *fp)
484 {
485 	u_int	flags;
486 	u_int	nflags;
487 
488 	/*
489 	 * Shortcut critical path.
490 	 */
491 	flags = fp->f_flag & ~FOFFSETLOCK;
492 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
493 		return(fp->f_offset);
494 
495 	/*
496 	 * The hard way
497 	 */
498 	for (;;) {
499 		flags = fp->f_flag;
500 		if (flags & FOFFSETLOCK) {
501 			nflags = flags | FOFFSETWAKE;
502 			tsleep_interlock(&fp->f_flag, 0);
503 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
504 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
505 		} else {
506 			nflags = flags | FOFFSETLOCK;
507 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
508 				break;
509 		}
510 	}
511 	return(fp->f_offset);
512 }
513 
514 /*
515  * MPSAFE
516  */
517 static __inline void
518 vn_set_fpf_offset(struct file *fp, off_t offset)
519 {
520 	u_int	flags;
521 	u_int	nflags;
522 
523 	/*
524 	 * We hold the lock so we can set the offset without interference.
525 	 */
526 	fp->f_offset = offset;
527 
528 	/*
529 	 * Normal release is already a reasonably critical path.
530 	 */
531 	for (;;) {
532 		flags = fp->f_flag;
533 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
534 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
535 			if (flags & FOFFSETWAKE)
536 				wakeup(&fp->f_flag);
537 			break;
538 		}
539 	}
540 }
541 
542 /*
543  * MPSAFE
544  */
545 static __inline off_t
546 vn_poll_fpf_offset(struct file *fp)
547 {
548 #if defined(__x86_64__)
549 	return(fp->f_offset);
550 #else
551 	off_t off = vn_get_fpf_offset(fp);
552 	vn_set_fpf_offset(fp, off);
553 	return(off);
554 #endif
555 }
556 
557 /*
558  * Package up an I/O request on a vnode into a uio and do it.
559  *
560  * MPSAFE
561  */
562 int
563 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
564 	off_t offset, enum uio_seg segflg, int ioflg,
565 	struct ucred *cred, int *aresid)
566 {
567 	struct uio auio;
568 	struct iovec aiov;
569 	int error;
570 
571 	if ((ioflg & IO_NODELOCKED) == 0)
572 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
573 	auio.uio_iov = &aiov;
574 	auio.uio_iovcnt = 1;
575 	aiov.iov_base = base;
576 	aiov.iov_len = len;
577 	auio.uio_resid = len;
578 	auio.uio_offset = offset;
579 	auio.uio_segflg = segflg;
580 	auio.uio_rw = rw;
581 	auio.uio_td = curthread;
582 	if (rw == UIO_READ) {
583 		error = VOP_READ(vp, &auio, ioflg, cred);
584 	} else {
585 		error = VOP_WRITE(vp, &auio, ioflg, cred);
586 	}
587 	if (aresid)
588 		*aresid = auio.uio_resid;
589 	else
590 		if (auio.uio_resid && error == 0)
591 			error = EIO;
592 	if ((ioflg & IO_NODELOCKED) == 0)
593 		vn_unlock(vp);
594 	return (error);
595 }
596 
597 /*
598  * Package up an I/O request on a vnode into a uio and do it.  The I/O
599  * request is split up into smaller chunks and we try to avoid saturating
600  * the buffer cache while potentially holding a vnode locked, so we
601  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
602  * to give other processes a chance to lock the vnode (either other processes
603  * core'ing the same binary, or unrelated processes scanning the directory).
604  *
605  * MPSAFE
606  */
607 int
608 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
609 		 off_t offset, enum uio_seg segflg, int ioflg,
610 		 struct ucred *cred, int *aresid)
611 {
612 	int error = 0;
613 
614 	do {
615 		int chunk;
616 
617 		/*
618 		 * Force `offset' to a multiple of MAXBSIZE except possibly
619 		 * for the first chunk, so that filesystems only need to
620 		 * write full blocks except possibly for the first and last
621 		 * chunks.
622 		 */
623 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
624 
625 		if (chunk > len)
626 			chunk = len;
627 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
628 			switch(rw) {
629 			case UIO_READ:
630 				bwillread(chunk);
631 				break;
632 			case UIO_WRITE:
633 				bwillwrite(chunk);
634 				break;
635 			}
636 		}
637 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
638 				ioflg, cred, aresid);
639 		len -= chunk;	/* aresid calc already includes length */
640 		if (error)
641 			break;
642 		offset += chunk;
643 		base += chunk;
644 		lwkt_user_yield();
645 	} while (len);
646 	if (aresid)
647 		*aresid += len;
648 	return (error);
649 }
650 
651 /*
652  * File pointers can no longer get ripped up by revoke so
653  * we don't need to lock access to the vp.
654  *
655  * f_offset updates are not guaranteed against multiple readers
656  */
657 static int
658 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
659 {
660 	struct vnode *vp;
661 	int error, ioflag;
662 
663 	KASSERT(uio->uio_td == curthread,
664 		("uio_td %p is not td %p", uio->uio_td, curthread));
665 	vp = (struct vnode *)fp->f_data;
666 
667 	ioflag = 0;
668 	if (flags & O_FBLOCKING) {
669 		/* ioflag &= ~IO_NDELAY; */
670 	} else if (flags & O_FNONBLOCKING) {
671 		ioflag |= IO_NDELAY;
672 	} else if (fp->f_flag & FNONBLOCK) {
673 		ioflag |= IO_NDELAY;
674 	}
675 	if (fp->f_flag & O_DIRECT) {
676 		ioflag |= IO_DIRECT;
677 	}
678 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
679 		uio->uio_offset = vn_get_fpf_offset(fp);
680 	vn_lock(vp, LK_SHARED | LK_RETRY);
681 	ioflag |= sequential_heuristic(uio, fp);
682 
683 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
684 	fp->f_nextoff = uio->uio_offset;
685 	vn_unlock(vp);
686 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
687 		vn_set_fpf_offset(fp, uio->uio_offset);
688 	return (error);
689 }
690 
691 /*
692  * MPSAFE
693  */
694 static int
695 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
696 {
697 	struct vnode *vp;
698 	int error, ioflag;
699 
700 	KASSERT(uio->uio_td == curthread,
701 		("uio_td %p is not p %p", uio->uio_td, curthread));
702 	vp = (struct vnode *)fp->f_data;
703 
704 	ioflag = IO_UNIT;
705 	if (vp->v_type == VREG &&
706 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
707 		ioflag |= IO_APPEND;
708 	}
709 
710 	if (flags & O_FBLOCKING) {
711 		/* ioflag &= ~IO_NDELAY; */
712 	} else if (flags & O_FNONBLOCKING) {
713 		ioflag |= IO_NDELAY;
714 	} else if (fp->f_flag & FNONBLOCK) {
715 		ioflag |= IO_NDELAY;
716 	}
717 	if (fp->f_flag & O_DIRECT) {
718 		ioflag |= IO_DIRECT;
719 	}
720 	if (flags & O_FASYNCWRITE) {
721 		/* ioflag &= ~IO_SYNC; */
722 	} else if (flags & O_FSYNCWRITE) {
723 		ioflag |= IO_SYNC;
724 	} else if (fp->f_flag & O_FSYNC) {
725 		ioflag |= IO_SYNC;
726 	}
727 
728 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
729 		ioflag |= IO_SYNC;
730 	if ((flags & O_FOFFSET) == 0)
731 		uio->uio_offset = vn_get_fpf_offset(fp);
732 	if (vp->v_mount)
733 		VFS_MODIFYING(vp->v_mount);
734 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
735 	ioflag |= sequential_heuristic(uio, fp);
736 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
737 	fp->f_nextoff = uio->uio_offset;
738 	vn_unlock(vp);
739 	if ((flags & O_FOFFSET) == 0)
740 		vn_set_fpf_offset(fp, uio->uio_offset);
741 	return (error);
742 }
743 
744 /*
745  * MPSAFE
746  */
747 static int
748 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
749 {
750 	struct vnode *vp;
751 	int error;
752 
753 	vp = (struct vnode *)fp->f_data;
754 	error = vn_stat(vp, sb, cred);
755 	return (error);
756 }
757 
758 /*
759  * MPSAFE
760  */
761 int
762 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
763 {
764 	struct vattr vattr;
765 	struct vattr *vap;
766 	int error;
767 	u_short mode;
768 	cdev_t dev;
769 
770 	vap = &vattr;
771 	error = VOP_GETATTR(vp, vap);
772 	if (error)
773 		return (error);
774 
775 	/*
776 	 * Zero the spare stat fields
777 	 */
778 	sb->st_lspare = 0;
779 	sb->st_qspare2 = 0;
780 
781 	/*
782 	 * Copy from vattr table
783 	 */
784 	if (vap->va_fsid != VNOVAL)
785 		sb->st_dev = vap->va_fsid;
786 	else
787 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
788 	sb->st_ino = vap->va_fileid;
789 	mode = vap->va_mode;
790 	switch (vap->va_type) {
791 	case VREG:
792 		mode |= S_IFREG;
793 		break;
794 	case VDATABASE:
795 		mode |= S_IFDB;
796 		break;
797 	case VDIR:
798 		mode |= S_IFDIR;
799 		break;
800 	case VBLK:
801 		mode |= S_IFBLK;
802 		break;
803 	case VCHR:
804 		mode |= S_IFCHR;
805 		break;
806 	case VLNK:
807 		mode |= S_IFLNK;
808 		/* This is a cosmetic change, symlinks do not have a mode. */
809 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
810 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
811 		else
812 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
813 		break;
814 	case VSOCK:
815 		mode |= S_IFSOCK;
816 		break;
817 	case VFIFO:
818 		mode |= S_IFIFO;
819 		break;
820 	default:
821 		return (EBADF);
822 	}
823 	sb->st_mode = mode;
824 	if (vap->va_nlink > (nlink_t)-1)
825 		sb->st_nlink = (nlink_t)-1;
826 	else
827 		sb->st_nlink = vap->va_nlink;
828 	sb->st_uid = vap->va_uid;
829 	sb->st_gid = vap->va_gid;
830 	sb->st_rdev = dev2udev(vp->v_rdev);
831 	sb->st_size = vap->va_size;
832 	sb->st_atimespec = vap->va_atime;
833 	sb->st_mtimespec = vap->va_mtime;
834 	sb->st_ctimespec = vap->va_ctime;
835 
836 	/*
837 	 * A VCHR and VBLK device may track the last access and last modified
838 	 * time independantly of the filesystem.  This is particularly true
839 	 * because device read and write calls may bypass the filesystem.
840 	 */
841 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
842 		dev = vp->v_rdev;
843 		if (dev != NULL) {
844 			if (dev->si_lastread) {
845 				sb->st_atimespec.tv_sec = time_second +
846 							  (time_uptime -
847 							   dev->si_lastread);
848 				sb->st_atimespec.tv_nsec = 0;
849 			}
850 			if (dev->si_lastwrite) {
851 				sb->st_atimespec.tv_sec = time_second +
852 							  (time_uptime -
853 							   dev->si_lastwrite);
854 				sb->st_atimespec.tv_nsec = 0;
855 			}
856 		}
857 	}
858 
859         /*
860 	 * According to www.opengroup.org, the meaning of st_blksize is
861 	 *   "a filesystem-specific preferred I/O block size for this
862 	 *    object.  In some filesystem types, this may vary from file
863 	 *    to file"
864 	 * Default to PAGE_SIZE after much discussion.
865 	 */
866 
867 	if (vap->va_type == VREG) {
868 		sb->st_blksize = vap->va_blocksize;
869 	} else if (vn_isdisk(vp, NULL)) {
870 		/*
871 		 * XXX this is broken.  If the device is not yet open (aka
872 		 * stat() call, aka v_rdev == NULL), how are we supposed
873 		 * to get a valid block size out of it?
874 		 */
875 		dev = vp->v_rdev;
876 
877 		sb->st_blksize = dev->si_bsize_best;
878 		if (sb->st_blksize < dev->si_bsize_phys)
879 			sb->st_blksize = dev->si_bsize_phys;
880 		if (sb->st_blksize < BLKDEV_IOSIZE)
881 			sb->st_blksize = BLKDEV_IOSIZE;
882 	} else {
883 		sb->st_blksize = PAGE_SIZE;
884 	}
885 
886 	sb->st_flags = vap->va_flags;
887 
888 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
889 	if (error)
890 		sb->st_gen = 0;
891 	else
892 		sb->st_gen = (u_int32_t)vap->va_gen;
893 
894 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
895 
896 	/*
897 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
898 	 * 5.7 master).
899 	 */
900 	sb->__old_st_blksize = sb->st_blksize;
901 
902 	return (0);
903 }
904 
905 /*
906  * MPALMOSTSAFE - acquires mplock
907  */
908 static int
909 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
910 	 struct sysmsg *msg)
911 {
912 	struct vnode *vp = ((struct vnode *)fp->f_data);
913 	struct vnode *ovp;
914 	struct vattr vattr;
915 	int error;
916 	off_t size;
917 
918 	switch (vp->v_type) {
919 	case VREG:
920 	case VDIR:
921 		if (com == FIONREAD) {
922 			error = VOP_GETATTR(vp, &vattr);
923 			if (error)
924 				break;
925 			size = vattr.va_size;
926 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
927 				size -= vn_poll_fpf_offset(fp);
928 			if (size > 0x7FFFFFFF)
929 				size = 0x7FFFFFFF;
930 			*(int *)data = size;
931 			error = 0;
932 			break;
933 		}
934 		if (com == FIOASYNC) {				/* XXX */
935 			error = 0;				/* XXX */
936 			break;
937 		}
938 		/* fall into ... */
939 	default:
940 #if 0
941 		return (ENOTTY);
942 #endif
943 	case VFIFO:
944 	case VCHR:
945 	case VBLK:
946 		if (com == FIODTYPE) {
947 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
948 				error = ENOTTY;
949 				break;
950 			}
951 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
952 			error = 0;
953 			break;
954 		}
955 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
956 		if (error == 0 && com == TIOCSCTTY) {
957 			struct proc *p = curthread->td_proc;
958 			struct session *sess;
959 
960 			if (p == NULL) {
961 				error = ENOTTY;
962 				break;
963 			}
964 
965 			get_mplock();
966 			sess = p->p_session;
967 			/* Do nothing if reassigning same control tty */
968 			if (sess->s_ttyvp == vp) {
969 				error = 0;
970 				rel_mplock();
971 				break;
972 			}
973 
974 			/* Get rid of reference to old control tty */
975 			ovp = sess->s_ttyvp;
976 			vref(vp);
977 			sess->s_ttyvp = vp;
978 			if (ovp)
979 				vrele(ovp);
980 			rel_mplock();
981 		}
982 		break;
983 	}
984 	return (error);
985 }
986 
987 /*
988  * Obtain the requested vnode lock
989  *
990  *	LK_RETRY	Automatically retry on timeout
991  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
992  *
993  * Failures will occur if the vnode is undergoing recyclement, but not
994  * all callers expect that the function will fail so the caller must pass
995  * LK_FAILOK if it wants to process an error code.
996  *
997  * Errors can occur for other reasons if you pass in other LK_ flags,
998  * regardless of whether you pass in LK_FAILRECLAIM
999  */
1000 int
1001 vn_lock(struct vnode *vp, int flags)
1002 {
1003 	int error;
1004 
1005 	do {
1006 		error = lockmgr(&vp->v_lock, flags);
1007 		if (error == 0)
1008 			break;
1009 	} while (flags & LK_RETRY);
1010 
1011 	/*
1012 	 * Because we (had better!) have a ref on the vnode, once it
1013 	 * goes to VRECLAIMED state it will not be recycled until all
1014 	 * refs go away.  So we can just check the flag.
1015 	 */
1016 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1017 		if (flags & LK_FAILRECLAIM) {
1018 			lockmgr(&vp->v_lock, LK_RELEASE);
1019 			error = ENOENT;
1020 		}
1021 	}
1022 	return (error);
1023 }
1024 
1025 #ifdef DEBUG_VN_UNLOCK
1026 
1027 void
1028 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1029 {
1030 	kprintf("vn_unlock from %s:%d\n", filename, line);
1031 	lockmgr(&vp->v_lock, LK_RELEASE);
1032 }
1033 
1034 #else
1035 
1036 void
1037 vn_unlock(struct vnode *vp)
1038 {
1039 	lockmgr(&vp->v_lock, LK_RELEASE);
1040 }
1041 
1042 #endif
1043 
1044 /*
1045  * MPSAFE
1046  */
1047 int
1048 vn_islocked(struct vnode *vp)
1049 {
1050 	return (lockstatus(&vp->v_lock, curthread));
1051 }
1052 
1053 /*
1054  * Return the lock status of a vnode and unlock the vnode
1055  * if we owned the lock.  This is not a boolean, if the
1056  * caller cares what the lock status is the caller must
1057  * check the various possible values.
1058  *
1059  * This only unlocks exclusive locks held by the caller,
1060  * it will NOT unlock shared locks (there is no way to
1061  * tell who the shared lock belongs to).
1062  *
1063  * MPSAFE
1064  */
1065 int
1066 vn_islocked_unlock(struct vnode *vp)
1067 {
1068 	int vpls;
1069 
1070 	vpls = lockstatus(&vp->v_lock, curthread);
1071 	if (vpls == LK_EXCLUSIVE)
1072 		lockmgr(&vp->v_lock, LK_RELEASE);
1073 	return(vpls);
1074 }
1075 
1076 /*
1077  * Restore a vnode lock that we previously released via
1078  * vn_islocked_unlock().  This is a NOP if we did not
1079  * own the original lock.
1080  *
1081  * MPSAFE
1082  */
1083 void
1084 vn_islocked_relock(struct vnode *vp, int vpls)
1085 {
1086 	int error;
1087 
1088 	if (vpls == LK_EXCLUSIVE)
1089 		error = lockmgr(&vp->v_lock, vpls);
1090 }
1091 
1092 /*
1093  * MPSAFE
1094  */
1095 static int
1096 vn_closefile(struct file *fp)
1097 {
1098 	int error;
1099 
1100 	fp->f_ops = &badfileops;
1101 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1102 	return (error);
1103 }
1104 
1105 /*
1106  * MPSAFE
1107  */
1108 static int
1109 vn_kqfilter(struct file *fp, struct knote *kn)
1110 {
1111 	int error;
1112 
1113 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1114 	return (error);
1115 }
1116