xref: /dragonfly/sys/kern/vfs_vnops.c (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 
56 #include <sys/mplock2.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 
68 struct fileops vnode_fileops = {
69 	.fo_read = vn_read,
70 	.fo_write = vn_write,
71 	.fo_ioctl = vn_ioctl,
72 	.fo_kqfilter = vn_kqfilter,
73 	.fo_stat = vn_statfile,
74 	.fo_close = vn_closefile,
75 	.fo_shutdown = nofo_shutdown
76 };
77 
78 /*
79  * Common code for vnode open operations.  Check permissions, and call
80  * the VOP_NOPEN or VOP_NCREATE routine.
81  *
82  * The caller is responsible for setting up nd with nlookup_init() and
83  * for cleaning it up with nlookup_done(), whether we return an error
84  * or not.
85  *
86  * On success nd->nl_open_vp will hold a referenced and, if requested,
87  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
88  * is non-NULL the vnode will be installed in the file pointer.
89  *
90  * NOTE: If the caller wishes the namecache entry to be operated with
91  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
92  *	 then the vnode lock will also be shared.
93  *
94  * NOTE: The vnode is referenced just once on return whether or not it
95  *	 is also installed in the file pointer.
96  */
97 int
98 vn_open(struct nlookupdata *nd, struct file **fpp, int fmode, int cmode)
99 {
100 	struct file *fp = fpp ? *fpp : NULL;
101 	struct vnode *vp;
102 	struct ucred *cred = nd->nl_cred;
103 	struct vattr vat;
104 	struct vattr *vap = &vat;
105 	int error;
106 	int vpexcl;
107 	u_int flags;
108 	uint64_t osize;
109 	struct mount *mp;
110 
111 	/*
112 	 * Certain combinations are illegal
113 	 */
114 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
115 		return(EACCES);
116 
117 	/*
118 	 * Lookup the path and create or obtain the vnode.  After a
119 	 * successful lookup a locked nd->nl_nch will be returned.
120 	 *
121 	 * The result of this section should be a locked vnode.
122 	 *
123 	 * XXX with only a little work we should be able to avoid locking
124 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
125 	 */
126 	nd->nl_flags |= NLC_OPEN;
127 	if (fmode & O_APPEND)
128 		nd->nl_flags |= NLC_APPEND;
129 	if (fmode & O_TRUNC)
130 		nd->nl_flags |= NLC_TRUNCATE;
131 	if (fmode & FREAD)
132 		nd->nl_flags |= NLC_READ;
133 	if (fmode & FWRITE)
134 		nd->nl_flags |= NLC_WRITE;
135 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
136 		nd->nl_flags |= NLC_FOLLOW;
137 
138 	if (fmode & O_CREAT) {
139 		/*
140 		 * CONDITIONAL CREATE FILE CASE
141 		 *
142 		 * Setting NLC_CREATE causes a negative hit to store
143 		 * the negative hit ncp and not return an error.  Then
144 		 * nc_error or nc_vp may be checked to see if the ncp
145 		 * represents a negative hit.  NLC_CREATE also requires
146 		 * write permission on the governing directory or EPERM
147 		 * is returned.
148 		 *
149 		 * If the file exists but is missing write permission,
150 		 * nlookup() returns EACCES. This has to be handled specially
151 		 * when combined with O_EXCL.
152 		 */
153 		nd->nl_flags |= NLC_CREATE;
154 		nd->nl_flags |= NLC_REFDVP;
155 		bwillinode(1);
156 		error = nlookup(nd);
157 		if (error == EACCES && nd->nl_nch.ncp->nc_vp != NULL &&
158 		    (fmode & O_EXCL)) {
159 			error = EEXIST;
160 		}
161 
162 		/*
163 		 * If no error and nd->nl_dvp is NULL, the nlookup represents
164 		 * a mount-point or cross-mount situation.  e.g.
165 		 * open("/var/cache", O_CREAT), where /var/cache is a
166 		 * mount point or a null-mount point.
167 		 */
168 		if (error == 0 && nd->nl_dvp == NULL)
169 			error = EINVAL;
170 	} else {
171 		/*
172 		 * NORMAL OPEN FILE CASE
173 		 */
174 		error = nlookup(nd);
175 	}
176 
177 	if (error)
178 		return (error);
179 
180 	/*
181 	 * split case to allow us to re-resolve and retry the ncp in case
182 	 * we get ESTALE.
183 	 *
184 	 * (error is 0 on entry / retry)
185 	 */
186 again:
187 	/*
188 	 * Checks for (likely) filesystem-modifying cases and allows
189 	 * the filesystem to stall the front-end.
190 	 */
191 	if ((fmode & (FWRITE | O_TRUNC)) ||
192 	    ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
193 		error = ncp_writechk(&nd->nl_nch);
194 		if (error)
195 			return error;
196 	}
197 
198 	vpexcl = 1;
199 	if (fmode & O_CREAT) {
200 		if (nd->nl_nch.ncp->nc_vp == NULL) {
201 			VATTR_NULL(vap);
202 			vap->va_type = VREG;
203 			vap->va_mode = cmode;
204 			vap->va_fuseflags = fmode; /* FUSE */
205 			if (fmode & O_EXCL)
206 				vap->va_vaflags |= VA_EXCLUSIVE;
207 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
208 					    nd->nl_cred, vap);
209 			if (error)
210 				return (error);
211 			fmode &= ~O_TRUNC;
212 			/* locked vnode is returned */
213 		} else {
214 			if (fmode & O_EXCL) {
215 				error = EEXIST;
216 			} else {
217 				error = cache_vget(&nd->nl_nch, cred,
218 						    LK_EXCLUSIVE, &vp);
219 			}
220 			if (error)
221 				return (error);
222 			fmode &= ~O_CREAT;
223 		}
224 	} else {
225 		/*
226 		 * In most other cases a shared lock on the vnode is
227 		 * sufficient.  However, the O_RDWR case needs an
228 		 * exclusive lock if the vnode is executable.  The
229 		 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
230 		 * this.
231 		 *
232 		 * NOTE: If NCF_NOTX is not set, we do not know the
233 		 *	 the state of the 'x' bits and have to get
234 		 *	 an exclusive lock for the EXCLLOCK_IFEXEC case.
235 		 */
236 		if ((nd->nl_flags & NLC_SHAREDLOCK) &&
237 		    ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
238 		     nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
239 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
240 			vpexcl = 0;
241 		} else {
242 			error = cache_vget(&nd->nl_nch, cred,
243 					   LK_EXCLUSIVE, &vp);
244 		}
245 		if (error)
246 			return (error);
247 	}
248 
249 	/*
250 	 * We have a locked vnode and ncp now.  Note that the ncp will
251 	 * be cleaned up by the caller if nd->nl_nch is left intact.
252 	 */
253 	if (vp->v_type == VLNK) {
254 		error = EMLINK;
255 		goto bad;
256 	}
257 	if (vp->v_type == VSOCK) {
258 		error = EOPNOTSUPP;
259 		goto bad;
260 	}
261 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
262 		error = ENOTDIR;
263 		goto bad;
264 	}
265 	if ((fmode & O_CREAT) == 0) {
266 		if (fmode & (FWRITE | O_TRUNC)) {
267 			if (vp->v_type == VDIR) {
268 				error = EISDIR;
269 				goto bad;
270 			}
271 
272 			/*
273 			 * Additional checks on vnode (does not substitute
274 			 * for ncp_writechk()).
275 			 */
276 			error = vn_writechk(vp);
277 			if (error) {
278 				/*
279 				 * Special stale handling, re-resolve the
280 				 * vnode.
281 				 */
282 				if (error == ESTALE) {
283 					vput(vp);
284 					vp = NULL;
285 					if (vpexcl == 0) {
286 						cache_unlock(&nd->nl_nch);
287 						cache_lock(&nd->nl_nch);
288 					}
289 					cache_setunresolved(&nd->nl_nch);
290 					error = cache_resolve(&nd->nl_nch,
291 							      cred);
292 					if (error == 0)
293 						goto again;
294 				}
295 				goto bad;
296 			}
297 		}
298 	}
299 	if (fmode & O_TRUNC) {
300 		vn_unlock(vp);				/* XXX */
301 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
302 		osize = vp->v_filesize;
303 		VATTR_NULL(vap);
304 		vap->va_size = 0;
305 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
306 		if (error)
307 			goto bad;
308 		error = VOP_GETATTR(vp, vap);
309 		if (error)
310 			goto bad;
311 		mp = vq_vptomp(vp);
312 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
313 	}
314 
315 	/*
316 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
317 	 * These particular bits a tracked all the way from the root.
318 	 *
319 	 * NOTE: Might not work properly on NFS servers due to the
320 	 * disconnected namecache.
321 	 */
322 	flags = nd->nl_nch.ncp->nc_flag;
323 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
324 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
325 		vsetflags(vp, VSWAPCACHE);
326 	} else {
327 		vclrflags(vp, VSWAPCACHE);
328 	}
329 
330 	/*
331 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
332 	 * associated with the fp yet so we own it clean.
333 	 *
334 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
335 	 * directories but now we do it unconditionally so f*() ops
336 	 * such as fchmod() can access the actual namespace that was
337 	 * used to open the file.
338 	 */
339 	if (fp) {
340 		if (nd->nl_flags & NLC_APPENDONLY)
341 			fmode |= FAPPENDONLY;
342 		fp->f_nchandle = nd->nl_nch;
343 		cache_zero(&nd->nl_nch);
344 		cache_unlock(&fp->f_nchandle);
345 	}
346 
347 	/*
348 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
349 	 * vnode or the file pointer).
350 	 *
351 	 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
352 	 *	 since the VOP_OPEN may block, e.g. on /dev/ttyd0
353 	 *
354 	 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
355 	 *	 (it will fdrop/fhold), and can also set the *fpp up however
356 	 *	 it wants, not necessarily using DTYPE_VNODE.
357 	 */
358 	if (nd->nl_nch.ncp)
359 		cache_put(&nd->nl_nch);
360 
361 	error = VOP_OPEN(vp, fmode, cred, fpp);
362 	fp = fpp ? *fpp : NULL;
363 
364 	if (error) {
365 		/*
366 		 * setting f_ops to &badfileops will prevent the descriptor
367 		 * code from trying to close and release the vnode, since
368 		 * the open failed we do not want to call close.
369 		 */
370 		if (fp) {
371 			fp->f_data = NULL;
372 			fp->f_ops = &badfileops;
373 		}
374 		goto bad;
375 	}
376 
377 #if 0
378 	/*
379 	 * Assert that VREG files have been setup for vmio.
380 	 */
381 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
382 		("vn_open: regular file was not VMIO enabled!"));
383 #endif
384 
385 	/*
386 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
387 	 * only returned in the fp == NULL case.
388 	 *
389 	 * NOTE: vnode stored in fp may be different
390 	 */
391 	if (fp == NULL) {
392 		nd->nl_open_vp = vp;
393 		nd->nl_vp_fmode = fmode;
394 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
395 			vn_unlock(vp);
396 	} else {
397 		vput(vp);
398 	}
399 	return (0);
400 bad:
401 	if (vp)
402 		vput(vp);
403 	return (error);
404 }
405 
406 int
407 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
408 {
409 	struct vnode *vp;
410 	int error;
411 
412 	if (strncmp(devname, "/dev/", 5) == 0)
413 		devname += 5;
414 	if ((vp = getsynthvnode(devname)) == NULL) {
415 		error = ENODEV;
416 	} else {
417 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
418 		vn_unlock(vp);
419 		if (error) {
420 			vrele(vp);
421 			vp = NULL;
422 		}
423 	}
424 	*vpp = vp;
425 	return (error);
426 }
427 
428 /*
429  * Checks for special conditions on the vnode which might prevent writing
430  * after the vnode has (likely) been locked.  The vnode might or might not
431  * be locked as of this call, but will be at least referenced.
432  *
433  * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
434  * to the vnode being locked.
435  */
436 int
437 vn_writechk(struct vnode *vp)
438 {
439 	/*
440 	 * If there's shared text associated with
441 	 * the vnode, try to free it up once.  If
442 	 * we fail, we can't allow writing.
443 	 */
444 	if (vp->v_flag & VTEXT)
445 		return (ETXTBSY);
446 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
447 		return (EROFS);
448 	return 0;
449 }
450 
451 /*
452  * Check whether the underlying mount is read-only.  The mount point
453  * referenced by the namecache may be different from the mount point
454  * used by the underlying vnode in the case of NULLFS, so a separate
455  * check is needed.
456  *
457  * Must be called PRIOR to any vnodes being locked.
458  */
459 int
460 ncp_writechk(struct nchandle *nch)
461 {
462 	struct mount *mp;
463 
464 	if ((mp = nch->mount) != NULL) {
465 		if (mp->mnt_flag & MNT_RDONLY)
466 			return (EROFS);
467 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
468 			VFS_MODIFYING(mp);
469 	}
470 	return(0);
471 }
472 
473 /*
474  * Vnode close call
475  *
476  * MPSAFE
477  */
478 int
479 vn_close(struct vnode *vp, int flags, struct file *fp)
480 {
481 	int error;
482 
483 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
484 	if (error == 0) {
485 		error = VOP_CLOSE(vp, flags, fp);
486 		vn_unlock(vp);
487 	}
488 	vrele(vp);
489 	return (error);
490 }
491 
492 /*
493  * Sequential heuristic.
494  *
495  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
496  */
497 static __inline
498 int
499 sequential_heuristic(struct uio *uio, struct file *fp)
500 {
501 	/*
502 	 * Sequential heuristic - detect sequential operation
503 	 *
504 	 * NOTE: SMP: We allow f_seqcount updates to race.
505 	 */
506 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
507 	    uio->uio_offset == fp->f_nextoff) {
508 		int tmpseq = fp->f_seqcount;
509 
510 		tmpseq += howmany(uio->uio_resid, MAXBSIZE);
511 		if (tmpseq > IO_SEQMAX)
512 			tmpseq = IO_SEQMAX;
513 		fp->f_seqcount = tmpseq;
514 		return(fp->f_seqcount << IO_SEQSHIFT);
515 	}
516 
517 	/*
518 	 * Not sequential, quick draw-down of seqcount
519 	 *
520 	 * NOTE: SMP: We allow f_seqcount updates to race.
521 	 */
522 	if (fp->f_seqcount > 1)
523 		fp->f_seqcount = 1;
524 	else
525 		fp->f_seqcount = 0;
526 	return(0);
527 }
528 
529 /*
530  * get - lock and return the f_offset field.
531  * set - set and unlock the f_offset field.
532  *
533  * These routines serve the dual purpose of serializing access to the
534  * f_offset field (at least on x86) and guaranteeing operational integrity
535  * when multiple read()ers and write()ers are present on the same fp.
536  *
537  * MPSAFE
538  */
539 static __inline off_t
540 vn_get_fpf_offset(struct file *fp)
541 {
542 	u_int	flags;
543 	u_int	nflags;
544 
545 	/*
546 	 * Shortcut critical path.
547 	 */
548 	flags = fp->f_flag & ~FOFFSETLOCK;
549 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
550 		return(fp->f_offset);
551 
552 	/*
553 	 * The hard way
554 	 */
555 	for (;;) {
556 		flags = fp->f_flag;
557 		if (flags & FOFFSETLOCK) {
558 			nflags = flags | FOFFSETWAKE;
559 			tsleep_interlock(&fp->f_flag, 0);
560 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
561 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
562 		} else {
563 			nflags = flags | FOFFSETLOCK;
564 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
565 				break;
566 		}
567 	}
568 	return(fp->f_offset);
569 }
570 
571 /*
572  * MPSAFE
573  */
574 static __inline void
575 vn_set_fpf_offset(struct file *fp, off_t offset)
576 {
577 	u_int	flags;
578 	u_int	nflags;
579 
580 	/*
581 	 * We hold the lock so we can set the offset without interference.
582 	 */
583 	fp->f_offset = offset;
584 
585 	/*
586 	 * Normal release is already a reasonably critical path.
587 	 */
588 	for (;;) {
589 		flags = fp->f_flag;
590 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
591 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
592 			if (flags & FOFFSETWAKE)
593 				wakeup(&fp->f_flag);
594 			break;
595 		}
596 	}
597 }
598 
599 /*
600  * MPSAFE
601  */
602 static __inline off_t
603 vn_poll_fpf_offset(struct file *fp)
604 {
605 #if defined(__x86_64__)
606 	return(fp->f_offset);
607 #else
608 	off_t off = vn_get_fpf_offset(fp);
609 	vn_set_fpf_offset(fp, off);
610 	return(off);
611 #endif
612 }
613 
614 /*
615  * Package up an I/O request on a vnode into a uio and do it.
616  *
617  * MPSAFE
618  */
619 int
620 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
621 	off_t offset, enum uio_seg segflg, int ioflg,
622 	struct ucred *cred, int *aresid)
623 {
624 	struct uio auio;
625 	struct iovec aiov;
626 	int error;
627 
628 	if ((ioflg & IO_NODELOCKED) == 0)
629 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
630 	auio.uio_iov = &aiov;
631 	auio.uio_iovcnt = 1;
632 	aiov.iov_base = base;
633 	aiov.iov_len = len;
634 	auio.uio_resid = len;
635 	auio.uio_offset = offset;
636 	auio.uio_segflg = segflg;
637 	auio.uio_rw = rw;
638 	auio.uio_td = curthread;
639 	if (rw == UIO_READ) {
640 		error = VOP_READ(vp, &auio, ioflg, cred);
641 	} else {
642 		error = VOP_WRITE(vp, &auio, ioflg, cred);
643 	}
644 	if (aresid)
645 		*aresid = auio.uio_resid;
646 	else
647 		if (auio.uio_resid && error == 0)
648 			error = EIO;
649 	if ((ioflg & IO_NODELOCKED) == 0)
650 		vn_unlock(vp);
651 	return (error);
652 }
653 
654 /*
655  * Package up an I/O request on a vnode into a uio and do it.  The I/O
656  * request is split up into smaller chunks and we try to avoid saturating
657  * the buffer cache while potentially holding a vnode locked, so we
658  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
659  * to give other processes a chance to lock the vnode (either other processes
660  * core'ing the same binary, or unrelated processes scanning the directory).
661  *
662  * MPSAFE
663  */
664 int
665 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
666 		 off_t offset, enum uio_seg segflg, int ioflg,
667 		 struct ucred *cred, int *aresid)
668 {
669 	int error = 0;
670 
671 	do {
672 		int chunk;
673 
674 		/*
675 		 * Force `offset' to a multiple of MAXBSIZE except possibly
676 		 * for the first chunk, so that filesystems only need to
677 		 * write full blocks except possibly for the first and last
678 		 * chunks.
679 		 */
680 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
681 
682 		if (chunk > len)
683 			chunk = len;
684 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
685 			switch(rw) {
686 			case UIO_READ:
687 				bwillread(chunk);
688 				break;
689 			case UIO_WRITE:
690 				bwillwrite(chunk);
691 				break;
692 			}
693 		}
694 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
695 				ioflg, cred, aresid);
696 		len -= chunk;	/* aresid calc already includes length */
697 		if (error)
698 			break;
699 		offset += chunk;
700 		base += chunk;
701 		lwkt_user_yield();
702 	} while (len);
703 	if (aresid)
704 		*aresid += len;
705 	return (error);
706 }
707 
708 /*
709  * File pointers can no longer get ripped up by revoke so
710  * we don't need to lock access to the vp.
711  *
712  * f_offset updates are not guaranteed against multiple readers
713  */
714 static int
715 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
716 {
717 	struct vnode *vp;
718 	int error, ioflag;
719 
720 	KASSERT(uio->uio_td == curthread,
721 		("uio_td %p is not td %p", uio->uio_td, curthread));
722 	vp = (struct vnode *)fp->f_data;
723 
724 	ioflag = 0;
725 	if (flags & O_FBLOCKING) {
726 		/* ioflag &= ~IO_NDELAY; */
727 	} else if (flags & O_FNONBLOCKING) {
728 		ioflag |= IO_NDELAY;
729 	} else if (fp->f_flag & FNONBLOCK) {
730 		ioflag |= IO_NDELAY;
731 	}
732 	if (fp->f_flag & O_DIRECT) {
733 		ioflag |= IO_DIRECT;
734 	}
735 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
736 		uio->uio_offset = vn_get_fpf_offset(fp);
737 	vn_lock(vp, LK_SHARED | LK_RETRY);
738 	ioflag |= sequential_heuristic(uio, fp);
739 
740 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
741 	fp->f_nextoff = uio->uio_offset;
742 	vn_unlock(vp);
743 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
744 		vn_set_fpf_offset(fp, uio->uio_offset);
745 	return (error);
746 }
747 
748 /*
749  * MPSAFE
750  */
751 static int
752 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
753 {
754 	struct vnode *vp;
755 	int error, ioflag;
756 
757 	KASSERT(uio->uio_td == curthread,
758 		("uio_td %p is not p %p", uio->uio_td, curthread));
759 	vp = (struct vnode *)fp->f_data;
760 
761 	ioflag = IO_UNIT;
762 	if (vp->v_type == VREG &&
763 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
764 		ioflag |= IO_APPEND;
765 	}
766 
767 	if (flags & O_FBLOCKING) {
768 		/* ioflag &= ~IO_NDELAY; */
769 	} else if (flags & O_FNONBLOCKING) {
770 		ioflag |= IO_NDELAY;
771 	} else if (fp->f_flag & FNONBLOCK) {
772 		ioflag |= IO_NDELAY;
773 	}
774 	if (fp->f_flag & O_DIRECT) {
775 		ioflag |= IO_DIRECT;
776 	}
777 	if (flags & O_FASYNCWRITE) {
778 		/* ioflag &= ~IO_SYNC; */
779 	} else if (flags & O_FSYNCWRITE) {
780 		ioflag |= IO_SYNC;
781 	} else if (fp->f_flag & O_FSYNC) {
782 		ioflag |= IO_SYNC;
783 	}
784 
785 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
786 		ioflag |= IO_SYNC;
787 	if ((flags & O_FOFFSET) == 0)
788 		uio->uio_offset = vn_get_fpf_offset(fp);
789 	if (vp->v_mount)
790 		VFS_MODIFYING(vp->v_mount);
791 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
792 	ioflag |= sequential_heuristic(uio, fp);
793 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
794 	fp->f_nextoff = uio->uio_offset;
795 	vn_unlock(vp);
796 	if ((flags & O_FOFFSET) == 0)
797 		vn_set_fpf_offset(fp, uio->uio_offset);
798 	return (error);
799 }
800 
801 /*
802  * MPSAFE
803  */
804 static int
805 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
806 {
807 	struct vnode *vp;
808 	int error;
809 
810 	vp = (struct vnode *)fp->f_data;
811 	error = vn_stat(vp, sb, cred);
812 	return (error);
813 }
814 
815 /*
816  * MPSAFE
817  */
818 int
819 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
820 {
821 	struct vattr vattr;
822 	struct vattr *vap;
823 	int error;
824 	u_short mode;
825 	cdev_t dev;
826 
827 	/*
828 	 * vp already has a ref and is validated, can call unlocked.
829 	 */
830 	vap = &vattr;
831 	error = VOP_GETATTR(vp, vap);
832 	if (error)
833 		return (error);
834 
835 	/*
836 	 * Zero the spare stat fields
837 	 */
838 	sb->st_lspare = 0;
839 	sb->st_qspare2 = 0;
840 
841 	/*
842 	 * Copy from vattr table
843 	 */
844 	if (vap->va_fsid != VNOVAL)
845 		sb->st_dev = vap->va_fsid;
846 	else
847 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
848 	sb->st_ino = vap->va_fileid;
849 	mode = vap->va_mode;
850 	switch (vap->va_type) {
851 	case VREG:
852 		mode |= S_IFREG;
853 		break;
854 	case VDATABASE:
855 		mode |= S_IFDB;
856 		break;
857 	case VDIR:
858 		mode |= S_IFDIR;
859 		break;
860 	case VBLK:
861 		mode |= S_IFBLK;
862 		break;
863 	case VCHR:
864 		mode |= S_IFCHR;
865 		break;
866 	case VLNK:
867 		mode |= S_IFLNK;
868 		/* This is a cosmetic change, symlinks do not have a mode. */
869 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
870 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
871 		else
872 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
873 		break;
874 	case VSOCK:
875 		mode |= S_IFSOCK;
876 		break;
877 	case VFIFO:
878 		mode |= S_IFIFO;
879 		break;
880 	default:
881 		return (EBADF);
882 	}
883 	sb->st_mode = mode;
884 	if (vap->va_nlink > (nlink_t)-1)
885 		sb->st_nlink = (nlink_t)-1;
886 	else
887 		sb->st_nlink = vap->va_nlink;
888 	sb->st_uid = vap->va_uid;
889 	sb->st_gid = vap->va_gid;
890 	sb->st_rdev = devid_from_dev(vp->v_rdev);
891 	sb->st_size = vap->va_size;
892 	sb->st_atimespec = vap->va_atime;
893 	sb->st_mtimespec = vap->va_mtime;
894 	sb->st_ctimespec = vap->va_ctime;
895 
896 	/*
897 	 * A VCHR and VBLK device may track the last access and last modified
898 	 * time independantly of the filesystem.  This is particularly true
899 	 * because device read and write calls may bypass the filesystem.
900 	 */
901 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
902 		dev = vp->v_rdev;
903 		if (dev != NULL) {
904 			if (dev->si_lastread) {
905 				sb->st_atimespec.tv_sec = time_second +
906 							  (dev->si_lastread -
907 							   time_uptime);
908 				sb->st_atimespec.tv_nsec = 0;
909 			}
910 			if (dev->si_lastwrite) {
911 				sb->st_mtimespec.tv_sec = time_second +
912 							  (dev->si_lastwrite -
913 							   time_uptime);
914 				sb->st_mtimespec.tv_nsec = 0;
915 			}
916 		}
917 	}
918 
919         /*
920 	 * According to www.opengroup.org, the meaning of st_blksize is
921 	 *   "a filesystem-specific preferred I/O block size for this
922 	 *    object.  In some filesystem types, this may vary from file
923 	 *    to file"
924 	 * Default to PAGE_SIZE after much discussion.
925 	 */
926 
927 	if (vap->va_type == VREG) {
928 		sb->st_blksize = vap->va_blocksize;
929 	} else if (vn_isdisk(vp, NULL)) {
930 		/*
931 		 * XXX this is broken.  If the device is not yet open (aka
932 		 * stat() call, aka v_rdev == NULL), how are we supposed
933 		 * to get a valid block size out of it?
934 		 */
935 		dev = vp->v_rdev;
936 
937 		sb->st_blksize = dev->si_bsize_best;
938 		if (sb->st_blksize < dev->si_bsize_phys)
939 			sb->st_blksize = dev->si_bsize_phys;
940 		if (sb->st_blksize < BLKDEV_IOSIZE)
941 			sb->st_blksize = BLKDEV_IOSIZE;
942 	} else {
943 		sb->st_blksize = PAGE_SIZE;
944 	}
945 
946 	sb->st_flags = vap->va_flags;
947 
948 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
949 	if (error)
950 		sb->st_gen = 0;
951 	else
952 		sb->st_gen = (u_int32_t)vap->va_gen;
953 
954 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
955 
956 	/*
957 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
958 	 * 5.7 master).
959 	 */
960 	sb->__old_st_blksize = sb->st_blksize;
961 
962 	return (0);
963 }
964 
965 /*
966  * MPALMOSTSAFE - acquires mplock
967  */
968 static int
969 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
970 	 struct sysmsg *msg)
971 {
972 	struct vnode *vp = ((struct vnode *)fp->f_data);
973 	struct vnode *ovp;
974 	struct vattr vattr;
975 	int error;
976 	off_t size;
977 
978 	switch (vp->v_type) {
979 	case VREG:
980 	case VDIR:
981 		if (com == FIONREAD) {
982 			error = VOP_GETATTR(vp, &vattr);
983 			if (error)
984 				break;
985 			size = vattr.va_size;
986 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
987 				size -= vn_poll_fpf_offset(fp);
988 			if (size > 0x7FFFFFFF)
989 				size = 0x7FFFFFFF;
990 			*(int *)data = size;
991 			error = 0;
992 			break;
993 		}
994 		if (com == FIOASYNC) {				/* XXX */
995 			error = 0;				/* XXX */
996 			break;
997 		}
998 		/* fall into ... */
999 	default:
1000 #if 0
1001 		return (ENOTTY);
1002 #endif
1003 	case VFIFO:
1004 	case VCHR:
1005 	case VBLK:
1006 		if (com == FIODTYPE) {
1007 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
1008 				error = ENOTTY;
1009 				break;
1010 			}
1011 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1012 			error = 0;
1013 			break;
1014 		}
1015 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
1016 		if (error == 0 && com == TIOCSCTTY) {
1017 			struct proc *p = curthread->td_proc;
1018 			struct session *sess;
1019 
1020 			if (p == NULL) {
1021 				error = ENOTTY;
1022 				break;
1023 			}
1024 
1025 			get_mplock();
1026 			sess = p->p_session;
1027 			/* Do nothing if reassigning same control tty */
1028 			if (sess->s_ttyvp == vp) {
1029 				error = 0;
1030 				rel_mplock();
1031 				break;
1032 			}
1033 
1034 			/* Get rid of reference to old control tty */
1035 			ovp = sess->s_ttyvp;
1036 			vref(vp);
1037 			sess->s_ttyvp = vp;
1038 			if (ovp)
1039 				vrele(ovp);
1040 			rel_mplock();
1041 		}
1042 		break;
1043 	}
1044 	return (error);
1045 }
1046 
1047 /*
1048  * Obtain the requested vnode lock
1049  *
1050  *	LK_RETRY	Automatically retry on timeout
1051  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
1052  *
1053  * Failures will occur if the vnode is undergoing recyclement, but not
1054  * all callers expect that the function will fail so the caller must pass
1055  * LK_FAILOK if it wants to process an error code.
1056  *
1057  * Errors can occur for other reasons if you pass in other LK_ flags,
1058  * regardless of whether you pass in LK_FAILRECLAIM
1059  */
1060 int
1061 vn_lock(struct vnode *vp, int flags)
1062 {
1063 	int error;
1064 
1065 	do {
1066 		error = lockmgr(&vp->v_lock, flags);
1067 		if (error == 0)
1068 			break;
1069 	} while (flags & LK_RETRY);
1070 
1071 	/*
1072 	 * Because we (had better!) have a ref on the vnode, once it
1073 	 * goes to VRECLAIMED state it will not be recycled until all
1074 	 * refs go away.  So we can just check the flag.
1075 	 */
1076 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1077 		if (flags & LK_FAILRECLAIM) {
1078 			lockmgr(&vp->v_lock, LK_RELEASE);
1079 			error = ENOENT;
1080 		}
1081 	}
1082 	return (error);
1083 }
1084 
1085 int
1086 vn_relock(struct vnode *vp, int flags)
1087 {
1088 	int error;
1089 
1090 	do {
1091 		error = lockmgr(&vp->v_lock, flags);
1092 		if (error == 0)
1093 			break;
1094 	} while (flags & LK_RETRY);
1095 
1096 	return error;
1097 }
1098 
1099 #ifdef DEBUG_VN_UNLOCK
1100 
1101 void
1102 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1103 {
1104 	kprintf("vn_unlock from %s:%d\n", filename, line);
1105 	lockmgr(&vp->v_lock, LK_RELEASE);
1106 }
1107 
1108 #else
1109 
1110 void
1111 vn_unlock(struct vnode *vp)
1112 {
1113 	lockmgr(&vp->v_lock, LK_RELEASE);
1114 }
1115 
1116 #endif
1117 
1118 /*
1119  * MPSAFE
1120  */
1121 int
1122 vn_islocked(struct vnode *vp)
1123 {
1124 	return (lockstatus(&vp->v_lock, curthread));
1125 }
1126 
1127 /*
1128  * Return the lock status of a vnode and unlock the vnode
1129  * if we owned the lock.  This is not a boolean, if the
1130  * caller cares what the lock status is the caller must
1131  * check the various possible values.
1132  *
1133  * This only unlocks exclusive locks held by the caller,
1134  * it will NOT unlock shared locks (there is no way to
1135  * tell who the shared lock belongs to).
1136  *
1137  * MPSAFE
1138  */
1139 int
1140 vn_islocked_unlock(struct vnode *vp)
1141 {
1142 	int vpls;
1143 
1144 	vpls = lockstatus(&vp->v_lock, curthread);
1145 	if (vpls == LK_EXCLUSIVE)
1146 		lockmgr(&vp->v_lock, LK_RELEASE);
1147 	return(vpls);
1148 }
1149 
1150 /*
1151  * Restore a vnode lock that we previously released via
1152  * vn_islocked_unlock().  This is a NOP if we did not
1153  * own the original lock.
1154  *
1155  * MPSAFE
1156  */
1157 void
1158 vn_islocked_relock(struct vnode *vp, int vpls)
1159 {
1160 	int error;
1161 
1162 	if (vpls == LK_EXCLUSIVE)
1163 		error = lockmgr(&vp->v_lock, vpls);
1164 }
1165 
1166 /*
1167  * MPSAFE
1168  */
1169 static int
1170 vn_closefile(struct file *fp)
1171 {
1172 	int error;
1173 
1174 	fp->f_ops = &badfileops;
1175 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1176 	return (error);
1177 }
1178 
1179 /*
1180  * MPSAFE
1181  */
1182 static int
1183 vn_kqfilter(struct file *fp, struct knote *kn)
1184 {
1185 	int error;
1186 
1187 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1188 	return (error);
1189 }
1190