xref: /dragonfly/sys/kern/vfs_vnops.c (revision abf903a5)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 
56 #include <sys/mplock2.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 
68 struct fileops vnode_fileops = {
69 	.fo_read = vn_read,
70 	.fo_write = vn_write,
71 	.fo_ioctl = vn_ioctl,
72 	.fo_kqfilter = vn_kqfilter,
73 	.fo_stat = vn_statfile,
74 	.fo_close = vn_closefile,
75 	.fo_shutdown = nofo_shutdown
76 };
77 
78 /*
79  * Common code for vnode open operations.  Check permissions, and call
80  * the VOP_NOPEN or VOP_NCREATE routine.
81  *
82  * The caller is responsible for setting up nd with nlookup_init() and
83  * for cleaning it up with nlookup_done(), whether we return an error
84  * or not.
85  *
86  * On success nd->nl_open_vp will hold a referenced and, if requested,
87  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
88  * is non-NULL the vnode will be installed in the file pointer.
89  *
90  * NOTE: If the caller wishes the namecache entry to be operated with
91  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
92  *	 then the vnode lock will also be shared.
93  *
94  * NOTE: The vnode is referenced just once on return whether or not it
95  *	 is also installed in the file pointer.
96  */
97 int
98 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
99 {
100 	struct vnode *vp;
101 	struct ucred *cred = nd->nl_cred;
102 	struct vattr vat;
103 	struct vattr *vap = &vat;
104 	int error;
105 	int vpexcl;
106 	u_int flags;
107 	uint64_t osize;
108 	struct mount *mp;
109 
110 	/*
111 	 * Certain combinations are illegal
112 	 */
113 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
114 		return(EACCES);
115 
116 	/*
117 	 * Lookup the path and create or obtain the vnode.  After a
118 	 * successful lookup a locked nd->nl_nch will be returned.
119 	 *
120 	 * The result of this section should be a locked vnode.
121 	 *
122 	 * XXX with only a little work we should be able to avoid locking
123 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
124 	 */
125 	nd->nl_flags |= NLC_OPEN;
126 	if (fmode & O_APPEND)
127 		nd->nl_flags |= NLC_APPEND;
128 	if (fmode & O_TRUNC)
129 		nd->nl_flags |= NLC_TRUNCATE;
130 	if (fmode & FREAD)
131 		nd->nl_flags |= NLC_READ;
132 	if (fmode & FWRITE)
133 		nd->nl_flags |= NLC_WRITE;
134 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
135 		nd->nl_flags |= NLC_FOLLOW;
136 
137 	if (fmode & O_CREAT) {
138 		/*
139 		 * CONDITIONAL CREATE FILE CASE
140 		 *
141 		 * Setting NLC_CREATE causes a negative hit to store
142 		 * the negative hit ncp and not return an error.  Then
143 		 * nc_error or nc_vp may be checked to see if the ncp
144 		 * represents a negative hit.  NLC_CREATE also requires
145 		 * write permission on the governing directory or EPERM
146 		 * is returned.
147 		 */
148 		nd->nl_flags |= NLC_CREATE;
149 		nd->nl_flags |= NLC_REFDVP;
150 		bwillinode(1);
151 		error = nlookup(nd);
152 	} else {
153 		/*
154 		 * NORMAL OPEN FILE CASE
155 		 */
156 		error = nlookup(nd);
157 	}
158 
159 	if (error)
160 		return (error);
161 
162 	/*
163 	 * split case to allow us to re-resolve and retry the ncp in case
164 	 * we get ESTALE.
165 	 *
166 	 * (error is 0 on entry / retry)
167 	 */
168 again:
169 	/*
170 	 * Checks for (likely) filesystem-modifying cases and allows
171 	 * the filesystem to stall the front-end.
172 	 */
173 	if ((fmode & (FWRITE | O_TRUNC)) ||
174 	    ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
175 		error = ncp_writechk(&nd->nl_nch);
176 		if (error)
177 			return error;
178 	}
179 
180 	vpexcl = 1;
181 	if (fmode & O_CREAT) {
182 		if (nd->nl_nch.ncp->nc_vp == NULL) {
183 			VATTR_NULL(vap);
184 			vap->va_type = VREG;
185 			vap->va_mode = cmode;
186 			vap->va_fuseflags = fmode; /* FUSE */
187 			if (fmode & O_EXCL)
188 				vap->va_vaflags |= VA_EXCLUSIVE;
189 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
190 					    nd->nl_cred, vap);
191 			if (error)
192 				return (error);
193 			fmode &= ~O_TRUNC;
194 			/* locked vnode is returned */
195 		} else {
196 			if (fmode & O_EXCL) {
197 				error = EEXIST;
198 			} else {
199 				error = cache_vget(&nd->nl_nch, cred,
200 						    LK_EXCLUSIVE, &vp);
201 			}
202 			if (error)
203 				return (error);
204 			fmode &= ~O_CREAT;
205 		}
206 	} else {
207 		/*
208 		 * In most other cases a shared lock on the vnode is
209 		 * sufficient.  However, the O_RDWR case needs an
210 		 * exclusive lock if the vnode is executable.  The
211 		 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
212 		 * this.
213 		 *
214 		 * NOTE: If NCF_NOTX is not set, we do not know the
215 		 *	 the state of the 'x' bits and have to get
216 		 *	 an exclusive lock for the EXCLLOCK_IFEXEC case.
217 		 */
218 		if ((nd->nl_flags & NLC_SHAREDLOCK) &&
219 		    ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
220 		     nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
221 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
222 			vpexcl = 0;
223 		} else {
224 			error = cache_vget(&nd->nl_nch, cred,
225 					   LK_EXCLUSIVE, &vp);
226 		}
227 		if (error)
228 			return (error);
229 	}
230 
231 	/*
232 	 * We have a locked vnode and ncp now.  Note that the ncp will
233 	 * be cleaned up by the caller if nd->nl_nch is left intact.
234 	 */
235 	if (vp->v_type == VLNK) {
236 		error = EMLINK;
237 		goto bad;
238 	}
239 	if (vp->v_type == VSOCK) {
240 		error = EOPNOTSUPP;
241 		goto bad;
242 	}
243 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
244 		error = ENOTDIR;
245 		goto bad;
246 	}
247 	if ((fmode & O_CREAT) == 0) {
248 		if (fmode & (FWRITE | O_TRUNC)) {
249 			if (vp->v_type == VDIR) {
250 				error = EISDIR;
251 				goto bad;
252 			}
253 
254 			/*
255 			 * Additional checks on vnode (does not substitute
256 			 * for ncp_writechk()).
257 			 */
258 			error = vn_writechk(vp);
259 			if (error) {
260 				/*
261 				 * Special stale handling, re-resolve the
262 				 * vnode.
263 				 */
264 				if (error == ESTALE) {
265 					vput(vp);
266 					vp = NULL;
267 					if (vpexcl == 0) {
268 						cache_unlock(&nd->nl_nch);
269 						cache_lock(&nd->nl_nch);
270 					}
271 					cache_setunresolved(&nd->nl_nch);
272 					error = cache_resolve(&nd->nl_nch,
273 							      cred);
274 					if (error == 0)
275 						goto again;
276 				}
277 				goto bad;
278 			}
279 		}
280 	}
281 	if (fmode & O_TRUNC) {
282 		vn_unlock(vp);				/* XXX */
283 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
284 		osize = vp->v_filesize;
285 		VATTR_NULL(vap);
286 		vap->va_size = 0;
287 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
288 		if (error)
289 			goto bad;
290 		error = VOP_GETATTR(vp, vap);
291 		if (error)
292 			goto bad;
293 		mp = vq_vptomp(vp);
294 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
295 	}
296 
297 	/*
298 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
299 	 * These particular bits a tracked all the way from the root.
300 	 *
301 	 * NOTE: Might not work properly on NFS servers due to the
302 	 * disconnected namecache.
303 	 */
304 	flags = nd->nl_nch.ncp->nc_flag;
305 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
306 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
307 		vsetflags(vp, VSWAPCACHE);
308 	} else {
309 		vclrflags(vp, VSWAPCACHE);
310 	}
311 
312 	/*
313 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
314 	 * associated with the fp yet so we own it clean.
315 	 *
316 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
317 	 * directories but now we do it unconditionally so f*() ops
318 	 * such as fchmod() can access the actual namespace that was
319 	 * used to open the file.
320 	 */
321 	if (fp) {
322 		if (nd->nl_flags & NLC_APPENDONLY)
323 			fmode |= FAPPENDONLY;
324 		fp->f_nchandle = nd->nl_nch;
325 		cache_zero(&nd->nl_nch);
326 		cache_unlock(&fp->f_nchandle);
327 	}
328 
329 	/*
330 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
331 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
332 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
333 	 * on /dev/ttyd0
334 	 */
335 	if (nd->nl_nch.ncp)
336 		cache_put(&nd->nl_nch);
337 
338 	error = VOP_OPEN(vp, fmode, cred, fp);
339 	if (error) {
340 		/*
341 		 * setting f_ops to &badfileops will prevent the descriptor
342 		 * code from trying to close and release the vnode, since
343 		 * the open failed we do not want to call close.
344 		 */
345 		if (fp) {
346 			fp->f_data = NULL;
347 			fp->f_ops = &badfileops;
348 		}
349 		goto bad;
350 	}
351 
352 #if 0
353 	/*
354 	 * Assert that VREG files have been setup for vmio.
355 	 */
356 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
357 		("vn_open: regular file was not VMIO enabled!"));
358 #endif
359 
360 	/*
361 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
362 	 * only returned in the fp == NULL case.
363 	 */
364 	if (fp == NULL) {
365 		nd->nl_open_vp = vp;
366 		nd->nl_vp_fmode = fmode;
367 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
368 			vn_unlock(vp);
369 	} else {
370 		vput(vp);
371 	}
372 	return (0);
373 bad:
374 	if (vp)
375 		vput(vp);
376 	return (error);
377 }
378 
379 int
380 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
381 {
382 	struct vnode *vp;
383 	int error;
384 
385 	if (strncmp(devname, "/dev/", 5) == 0)
386 		devname += 5;
387 	if ((vp = getsynthvnode(devname)) == NULL) {
388 		error = ENODEV;
389 	} else {
390 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
391 		vn_unlock(vp);
392 		if (error) {
393 			vrele(vp);
394 			vp = NULL;
395 		}
396 	}
397 	*vpp = vp;
398 	return (error);
399 }
400 
401 /*
402  * Checks for special conditions on the vnode which might prevent writing
403  * after the vnode has (likely) been locked.  The vnode might or might not
404  * be locked as of this call, but will be at least referenced.
405  *
406  * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
407  * to the vnode being locked.
408  */
409 int
410 vn_writechk(struct vnode *vp)
411 {
412 	/*
413 	 * If there's shared text associated with
414 	 * the vnode, try to free it up once.  If
415 	 * we fail, we can't allow writing.
416 	 */
417 	if (vp->v_flag & VTEXT)
418 		return (ETXTBSY);
419 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
420 		return (EROFS);
421 	return 0;
422 }
423 
424 /*
425  * Check whether the underlying mount is read-only.  The mount point
426  * referenced by the namecache may be different from the mount point
427  * used by the underlying vnode in the case of NULLFS, so a separate
428  * check is needed.
429  *
430  * Must be called PRIOR to any vnodes being locked.
431  */
432 int
433 ncp_writechk(struct nchandle *nch)
434 {
435 	struct mount *mp;
436 
437 	if ((mp = nch->mount) != NULL) {
438 		if (mp->mnt_flag & MNT_RDONLY)
439 			return (EROFS);
440 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
441 			VFS_MODIFYING(mp);
442 	}
443 	return(0);
444 }
445 
446 /*
447  * Vnode close call
448  *
449  * MPSAFE
450  */
451 int
452 vn_close(struct vnode *vp, int flags, struct file *fp)
453 {
454 	int error;
455 
456 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
457 	if (error == 0) {
458 		error = VOP_CLOSE(vp, flags, fp);
459 		vn_unlock(vp);
460 	}
461 	vrele(vp);
462 	return (error);
463 }
464 
465 /*
466  * Sequential heuristic.
467  *
468  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
469  */
470 static __inline
471 int
472 sequential_heuristic(struct uio *uio, struct file *fp)
473 {
474 	/*
475 	 * Sequential heuristic - detect sequential operation
476 	 *
477 	 * NOTE: SMP: We allow f_seqcount updates to race.
478 	 */
479 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
480 	    uio->uio_offset == fp->f_nextoff) {
481 		int tmpseq = fp->f_seqcount;
482 
483 		tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
484 		if (tmpseq > IO_SEQMAX)
485 			tmpseq = IO_SEQMAX;
486 		fp->f_seqcount = tmpseq;
487 		return(fp->f_seqcount << IO_SEQSHIFT);
488 	}
489 
490 	/*
491 	 * Not sequential, quick draw-down of seqcount
492 	 *
493 	 * NOTE: SMP: We allow f_seqcount updates to race.
494 	 */
495 	if (fp->f_seqcount > 1)
496 		fp->f_seqcount = 1;
497 	else
498 		fp->f_seqcount = 0;
499 	return(0);
500 }
501 
502 /*
503  * get - lock and return the f_offset field.
504  * set - set and unlock the f_offset field.
505  *
506  * These routines serve the dual purpose of serializing access to the
507  * f_offset field (at least on x86) and guaranteeing operational integrity
508  * when multiple read()ers and write()ers are present on the same fp.
509  *
510  * MPSAFE
511  */
512 static __inline off_t
513 vn_get_fpf_offset(struct file *fp)
514 {
515 	u_int	flags;
516 	u_int	nflags;
517 
518 	/*
519 	 * Shortcut critical path.
520 	 */
521 	flags = fp->f_flag & ~FOFFSETLOCK;
522 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
523 		return(fp->f_offset);
524 
525 	/*
526 	 * The hard way
527 	 */
528 	for (;;) {
529 		flags = fp->f_flag;
530 		if (flags & FOFFSETLOCK) {
531 			nflags = flags | FOFFSETWAKE;
532 			tsleep_interlock(&fp->f_flag, 0);
533 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
534 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
535 		} else {
536 			nflags = flags | FOFFSETLOCK;
537 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
538 				break;
539 		}
540 	}
541 	return(fp->f_offset);
542 }
543 
544 /*
545  * MPSAFE
546  */
547 static __inline void
548 vn_set_fpf_offset(struct file *fp, off_t offset)
549 {
550 	u_int	flags;
551 	u_int	nflags;
552 
553 	/*
554 	 * We hold the lock so we can set the offset without interference.
555 	 */
556 	fp->f_offset = offset;
557 
558 	/*
559 	 * Normal release is already a reasonably critical path.
560 	 */
561 	for (;;) {
562 		flags = fp->f_flag;
563 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
564 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
565 			if (flags & FOFFSETWAKE)
566 				wakeup(&fp->f_flag);
567 			break;
568 		}
569 	}
570 }
571 
572 /*
573  * MPSAFE
574  */
575 static __inline off_t
576 vn_poll_fpf_offset(struct file *fp)
577 {
578 #if defined(__x86_64__)
579 	return(fp->f_offset);
580 #else
581 	off_t off = vn_get_fpf_offset(fp);
582 	vn_set_fpf_offset(fp, off);
583 	return(off);
584 #endif
585 }
586 
587 /*
588  * Package up an I/O request on a vnode into a uio and do it.
589  *
590  * MPSAFE
591  */
592 int
593 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
594 	off_t offset, enum uio_seg segflg, int ioflg,
595 	struct ucred *cred, int *aresid)
596 {
597 	struct uio auio;
598 	struct iovec aiov;
599 	int error;
600 
601 	if ((ioflg & IO_NODELOCKED) == 0)
602 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
603 	auio.uio_iov = &aiov;
604 	auio.uio_iovcnt = 1;
605 	aiov.iov_base = base;
606 	aiov.iov_len = len;
607 	auio.uio_resid = len;
608 	auio.uio_offset = offset;
609 	auio.uio_segflg = segflg;
610 	auio.uio_rw = rw;
611 	auio.uio_td = curthread;
612 	if (rw == UIO_READ) {
613 		error = VOP_READ(vp, &auio, ioflg, cred);
614 	} else {
615 		error = VOP_WRITE(vp, &auio, ioflg, cred);
616 	}
617 	if (aresid)
618 		*aresid = auio.uio_resid;
619 	else
620 		if (auio.uio_resid && error == 0)
621 			error = EIO;
622 	if ((ioflg & IO_NODELOCKED) == 0)
623 		vn_unlock(vp);
624 	return (error);
625 }
626 
627 /*
628  * Package up an I/O request on a vnode into a uio and do it.  The I/O
629  * request is split up into smaller chunks and we try to avoid saturating
630  * the buffer cache while potentially holding a vnode locked, so we
631  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
632  * to give other processes a chance to lock the vnode (either other processes
633  * core'ing the same binary, or unrelated processes scanning the directory).
634  *
635  * MPSAFE
636  */
637 int
638 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
639 		 off_t offset, enum uio_seg segflg, int ioflg,
640 		 struct ucred *cred, int *aresid)
641 {
642 	int error = 0;
643 
644 	do {
645 		int chunk;
646 
647 		/*
648 		 * Force `offset' to a multiple of MAXBSIZE except possibly
649 		 * for the first chunk, so that filesystems only need to
650 		 * write full blocks except possibly for the first and last
651 		 * chunks.
652 		 */
653 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
654 
655 		if (chunk > len)
656 			chunk = len;
657 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
658 			switch(rw) {
659 			case UIO_READ:
660 				bwillread(chunk);
661 				break;
662 			case UIO_WRITE:
663 				bwillwrite(chunk);
664 				break;
665 			}
666 		}
667 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
668 				ioflg, cred, aresid);
669 		len -= chunk;	/* aresid calc already includes length */
670 		if (error)
671 			break;
672 		offset += chunk;
673 		base += chunk;
674 		lwkt_user_yield();
675 	} while (len);
676 	if (aresid)
677 		*aresid += len;
678 	return (error);
679 }
680 
681 /*
682  * File pointers can no longer get ripped up by revoke so
683  * we don't need to lock access to the vp.
684  *
685  * f_offset updates are not guaranteed against multiple readers
686  */
687 static int
688 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
689 {
690 	struct vnode *vp;
691 	int error, ioflag;
692 
693 	KASSERT(uio->uio_td == curthread,
694 		("uio_td %p is not td %p", uio->uio_td, curthread));
695 	vp = (struct vnode *)fp->f_data;
696 
697 	ioflag = 0;
698 	if (flags & O_FBLOCKING) {
699 		/* ioflag &= ~IO_NDELAY; */
700 	} else if (flags & O_FNONBLOCKING) {
701 		ioflag |= IO_NDELAY;
702 	} else if (fp->f_flag & FNONBLOCK) {
703 		ioflag |= IO_NDELAY;
704 	}
705 	if (fp->f_flag & O_DIRECT) {
706 		ioflag |= IO_DIRECT;
707 	}
708 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
709 		uio->uio_offset = vn_get_fpf_offset(fp);
710 	vn_lock(vp, LK_SHARED | LK_RETRY);
711 	ioflag |= sequential_heuristic(uio, fp);
712 
713 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
714 	fp->f_nextoff = uio->uio_offset;
715 	vn_unlock(vp);
716 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
717 		vn_set_fpf_offset(fp, uio->uio_offset);
718 	return (error);
719 }
720 
721 /*
722  * MPSAFE
723  */
724 static int
725 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
726 {
727 	struct vnode *vp;
728 	int error, ioflag;
729 
730 	KASSERT(uio->uio_td == curthread,
731 		("uio_td %p is not p %p", uio->uio_td, curthread));
732 	vp = (struct vnode *)fp->f_data;
733 
734 	ioflag = IO_UNIT;
735 	if (vp->v_type == VREG &&
736 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
737 		ioflag |= IO_APPEND;
738 	}
739 
740 	if (flags & O_FBLOCKING) {
741 		/* ioflag &= ~IO_NDELAY; */
742 	} else if (flags & O_FNONBLOCKING) {
743 		ioflag |= IO_NDELAY;
744 	} else if (fp->f_flag & FNONBLOCK) {
745 		ioflag |= IO_NDELAY;
746 	}
747 	if (fp->f_flag & O_DIRECT) {
748 		ioflag |= IO_DIRECT;
749 	}
750 	if (flags & O_FASYNCWRITE) {
751 		/* ioflag &= ~IO_SYNC; */
752 	} else if (flags & O_FSYNCWRITE) {
753 		ioflag |= IO_SYNC;
754 	} else if (fp->f_flag & O_FSYNC) {
755 		ioflag |= IO_SYNC;
756 	}
757 
758 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
759 		ioflag |= IO_SYNC;
760 	if ((flags & O_FOFFSET) == 0)
761 		uio->uio_offset = vn_get_fpf_offset(fp);
762 	if (vp->v_mount)
763 		VFS_MODIFYING(vp->v_mount);
764 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
765 	ioflag |= sequential_heuristic(uio, fp);
766 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
767 	fp->f_nextoff = uio->uio_offset;
768 	vn_unlock(vp);
769 	if ((flags & O_FOFFSET) == 0)
770 		vn_set_fpf_offset(fp, uio->uio_offset);
771 	return (error);
772 }
773 
774 /*
775  * MPSAFE
776  */
777 static int
778 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
779 {
780 	struct vnode *vp;
781 	int error;
782 
783 	vp = (struct vnode *)fp->f_data;
784 	error = vn_stat(vp, sb, cred);
785 	return (error);
786 }
787 
788 /*
789  * MPSAFE
790  */
791 int
792 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
793 {
794 	struct vattr vattr;
795 	struct vattr *vap;
796 	int error;
797 	u_short mode;
798 	cdev_t dev;
799 
800 	/*
801 	 * vp already has a ref and is validated, can call unlocked.
802 	 */
803 	vap = &vattr;
804 	error = VOP_GETATTR(vp, vap);
805 	if (error)
806 		return (error);
807 
808 	/*
809 	 * Zero the spare stat fields
810 	 */
811 	sb->st_lspare = 0;
812 	sb->st_qspare2 = 0;
813 
814 	/*
815 	 * Copy from vattr table
816 	 */
817 	if (vap->va_fsid != VNOVAL)
818 		sb->st_dev = vap->va_fsid;
819 	else
820 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
821 	sb->st_ino = vap->va_fileid;
822 	mode = vap->va_mode;
823 	switch (vap->va_type) {
824 	case VREG:
825 		mode |= S_IFREG;
826 		break;
827 	case VDATABASE:
828 		mode |= S_IFDB;
829 		break;
830 	case VDIR:
831 		mode |= S_IFDIR;
832 		break;
833 	case VBLK:
834 		mode |= S_IFBLK;
835 		break;
836 	case VCHR:
837 		mode |= S_IFCHR;
838 		break;
839 	case VLNK:
840 		mode |= S_IFLNK;
841 		/* This is a cosmetic change, symlinks do not have a mode. */
842 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
843 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
844 		else
845 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
846 		break;
847 	case VSOCK:
848 		mode |= S_IFSOCK;
849 		break;
850 	case VFIFO:
851 		mode |= S_IFIFO;
852 		break;
853 	default:
854 		return (EBADF);
855 	}
856 	sb->st_mode = mode;
857 	if (vap->va_nlink > (nlink_t)-1)
858 		sb->st_nlink = (nlink_t)-1;
859 	else
860 		sb->st_nlink = vap->va_nlink;
861 	sb->st_uid = vap->va_uid;
862 	sb->st_gid = vap->va_gid;
863 	sb->st_rdev = devid_from_dev(vp->v_rdev);
864 	sb->st_size = vap->va_size;
865 	sb->st_atimespec = vap->va_atime;
866 	sb->st_mtimespec = vap->va_mtime;
867 	sb->st_ctimespec = vap->va_ctime;
868 
869 	/*
870 	 * A VCHR and VBLK device may track the last access and last modified
871 	 * time independantly of the filesystem.  This is particularly true
872 	 * because device read and write calls may bypass the filesystem.
873 	 */
874 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
875 		dev = vp->v_rdev;
876 		if (dev != NULL) {
877 			if (dev->si_lastread) {
878 				sb->st_atimespec.tv_sec = time_second +
879 							  (time_uptime -
880 							   dev->si_lastread);
881 				sb->st_atimespec.tv_nsec = 0;
882 			}
883 			if (dev->si_lastwrite) {
884 				sb->st_atimespec.tv_sec = time_second +
885 							  (time_uptime -
886 							   dev->si_lastwrite);
887 				sb->st_atimespec.tv_nsec = 0;
888 			}
889 		}
890 	}
891 
892         /*
893 	 * According to www.opengroup.org, the meaning of st_blksize is
894 	 *   "a filesystem-specific preferred I/O block size for this
895 	 *    object.  In some filesystem types, this may vary from file
896 	 *    to file"
897 	 * Default to PAGE_SIZE after much discussion.
898 	 */
899 
900 	if (vap->va_type == VREG) {
901 		sb->st_blksize = vap->va_blocksize;
902 	} else if (vn_isdisk(vp, NULL)) {
903 		/*
904 		 * XXX this is broken.  If the device is not yet open (aka
905 		 * stat() call, aka v_rdev == NULL), how are we supposed
906 		 * to get a valid block size out of it?
907 		 */
908 		dev = vp->v_rdev;
909 
910 		sb->st_blksize = dev->si_bsize_best;
911 		if (sb->st_blksize < dev->si_bsize_phys)
912 			sb->st_blksize = dev->si_bsize_phys;
913 		if (sb->st_blksize < BLKDEV_IOSIZE)
914 			sb->st_blksize = BLKDEV_IOSIZE;
915 	} else {
916 		sb->st_blksize = PAGE_SIZE;
917 	}
918 
919 	sb->st_flags = vap->va_flags;
920 
921 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
922 	if (error)
923 		sb->st_gen = 0;
924 	else
925 		sb->st_gen = (u_int32_t)vap->va_gen;
926 
927 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
928 
929 	/*
930 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
931 	 * 5.7 master).
932 	 */
933 	sb->__old_st_blksize = sb->st_blksize;
934 
935 	return (0);
936 }
937 
938 /*
939  * MPALMOSTSAFE - acquires mplock
940  */
941 static int
942 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
943 	 struct sysmsg *msg)
944 {
945 	struct vnode *vp = ((struct vnode *)fp->f_data);
946 	struct vnode *ovp;
947 	struct vattr vattr;
948 	int error;
949 	off_t size;
950 
951 	switch (vp->v_type) {
952 	case VREG:
953 	case VDIR:
954 		if (com == FIONREAD) {
955 			error = VOP_GETATTR(vp, &vattr);
956 			if (error)
957 				break;
958 			size = vattr.va_size;
959 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
960 				size -= vn_poll_fpf_offset(fp);
961 			if (size > 0x7FFFFFFF)
962 				size = 0x7FFFFFFF;
963 			*(int *)data = size;
964 			error = 0;
965 			break;
966 		}
967 		if (com == FIOASYNC) {				/* XXX */
968 			error = 0;				/* XXX */
969 			break;
970 		}
971 		/* fall into ... */
972 	default:
973 #if 0
974 		return (ENOTTY);
975 #endif
976 	case VFIFO:
977 	case VCHR:
978 	case VBLK:
979 		if (com == FIODTYPE) {
980 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
981 				error = ENOTTY;
982 				break;
983 			}
984 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
985 			error = 0;
986 			break;
987 		}
988 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
989 		if (error == 0 && com == TIOCSCTTY) {
990 			struct proc *p = curthread->td_proc;
991 			struct session *sess;
992 
993 			if (p == NULL) {
994 				error = ENOTTY;
995 				break;
996 			}
997 
998 			get_mplock();
999 			sess = p->p_session;
1000 			/* Do nothing if reassigning same control tty */
1001 			if (sess->s_ttyvp == vp) {
1002 				error = 0;
1003 				rel_mplock();
1004 				break;
1005 			}
1006 
1007 			/* Get rid of reference to old control tty */
1008 			ovp = sess->s_ttyvp;
1009 			vref(vp);
1010 			sess->s_ttyvp = vp;
1011 			if (ovp)
1012 				vrele(ovp);
1013 			rel_mplock();
1014 		}
1015 		break;
1016 	}
1017 	return (error);
1018 }
1019 
1020 /*
1021  * Obtain the requested vnode lock
1022  *
1023  *	LK_RETRY	Automatically retry on timeout
1024  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
1025  *
1026  * Failures will occur if the vnode is undergoing recyclement, but not
1027  * all callers expect that the function will fail so the caller must pass
1028  * LK_FAILOK if it wants to process an error code.
1029  *
1030  * Errors can occur for other reasons if you pass in other LK_ flags,
1031  * regardless of whether you pass in LK_FAILRECLAIM
1032  */
1033 int
1034 vn_lock(struct vnode *vp, int flags)
1035 {
1036 	int error;
1037 
1038 	do {
1039 		error = lockmgr(&vp->v_lock, flags);
1040 		if (error == 0)
1041 			break;
1042 	} while (flags & LK_RETRY);
1043 
1044 	/*
1045 	 * Because we (had better!) have a ref on the vnode, once it
1046 	 * goes to VRECLAIMED state it will not be recycled until all
1047 	 * refs go away.  So we can just check the flag.
1048 	 */
1049 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1050 		if (flags & LK_FAILRECLAIM) {
1051 			lockmgr(&vp->v_lock, LK_RELEASE);
1052 			error = ENOENT;
1053 		}
1054 	}
1055 	return (error);
1056 }
1057 
1058 int
1059 vn_relock(struct vnode *vp, int flags)
1060 {
1061 	int error;
1062 
1063 	do {
1064 		error = lockmgr(&vp->v_lock, flags);
1065 		if (error == 0)
1066 			break;
1067 	} while (flags & LK_RETRY);
1068 
1069 	return error;
1070 }
1071 
1072 #ifdef DEBUG_VN_UNLOCK
1073 
1074 void
1075 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1076 {
1077 	kprintf("vn_unlock from %s:%d\n", filename, line);
1078 	lockmgr(&vp->v_lock, LK_RELEASE);
1079 }
1080 
1081 #else
1082 
1083 void
1084 vn_unlock(struct vnode *vp)
1085 {
1086 	lockmgr(&vp->v_lock, LK_RELEASE);
1087 }
1088 
1089 #endif
1090 
1091 /*
1092  * MPSAFE
1093  */
1094 int
1095 vn_islocked(struct vnode *vp)
1096 {
1097 	return (lockstatus(&vp->v_lock, curthread));
1098 }
1099 
1100 /*
1101  * Return the lock status of a vnode and unlock the vnode
1102  * if we owned the lock.  This is not a boolean, if the
1103  * caller cares what the lock status is the caller must
1104  * check the various possible values.
1105  *
1106  * This only unlocks exclusive locks held by the caller,
1107  * it will NOT unlock shared locks (there is no way to
1108  * tell who the shared lock belongs to).
1109  *
1110  * MPSAFE
1111  */
1112 int
1113 vn_islocked_unlock(struct vnode *vp)
1114 {
1115 	int vpls;
1116 
1117 	vpls = lockstatus(&vp->v_lock, curthread);
1118 	if (vpls == LK_EXCLUSIVE)
1119 		lockmgr(&vp->v_lock, LK_RELEASE);
1120 	return(vpls);
1121 }
1122 
1123 /*
1124  * Restore a vnode lock that we previously released via
1125  * vn_islocked_unlock().  This is a NOP if we did not
1126  * own the original lock.
1127  *
1128  * MPSAFE
1129  */
1130 void
1131 vn_islocked_relock(struct vnode *vp, int vpls)
1132 {
1133 	int error;
1134 
1135 	if (vpls == LK_EXCLUSIVE)
1136 		error = lockmgr(&vp->v_lock, vpls);
1137 }
1138 
1139 /*
1140  * MPSAFE
1141  */
1142 static int
1143 vn_closefile(struct file *fp)
1144 {
1145 	int error;
1146 
1147 	fp->f_ops = &badfileops;
1148 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1149 	return (error);
1150 }
1151 
1152 /*
1153  * MPSAFE
1154  */
1155 static int
1156 vn_kqfilter(struct file *fp, struct knote *kn)
1157 {
1158 	int error;
1159 
1160 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1161 	return (error);
1162 }
1163