xref: /dragonfly/sys/kern/vfs_vnops.c (revision 631c21f2)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/uio.h>
41 #include <sys/fcntl.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/proc.h>
45 #include <sys/priv.h>
46 #include <sys/mount.h>
47 #include <sys/nlookup.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/filio.h>
51 #include <sys/ttycom.h>
52 #include <sys/conf.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 
56 #include <sys/mplock2.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred, struct sysmsg *msg);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int vn_kqfilter (struct file *fp, struct knote *kn);
64 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
65 static int vn_write (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 
68 struct fileops vnode_fileops = {
69 	.fo_read = vn_read,
70 	.fo_write = vn_write,
71 	.fo_ioctl = vn_ioctl,
72 	.fo_kqfilter = vn_kqfilter,
73 	.fo_stat = vn_statfile,
74 	.fo_close = vn_closefile,
75 	.fo_shutdown = nofo_shutdown
76 };
77 
78 /*
79  * Common code for vnode open operations.  Check permissions, and call
80  * the VOP_NOPEN or VOP_NCREATE routine.
81  *
82  * The caller is responsible for setting up nd with nlookup_init() and
83  * for cleaning it up with nlookup_done(), whether we return an error
84  * or not.
85  *
86  * On success nd->nl_open_vp will hold a referenced and, if requested,
87  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
88  * is non-NULL the vnode will be installed in the file pointer.
89  *
90  * NOTE: If the caller wishes the namecache entry to be operated with
91  *	 a shared lock it must use NLC_SHAREDLOCK.  If NLC_LOCKVP is set
92  *	 then the vnode lock will also be shared.
93  *
94  * NOTE: The vnode is referenced just once on return whether or not it
95  *	 is also installed in the file pointer.
96  */
97 int
98 vn_open(struct nlookupdata *nd, struct file **fpp, int fmode, int cmode)
99 {
100 	struct file *fp = fpp ? *fpp : NULL;
101 	struct vnode *vp;
102 	struct ucred *cred = nd->nl_cred;
103 	struct vattr vat;
104 	struct vattr *vap = &vat;
105 	int error;
106 	int vpexcl;
107 	u_int flags;
108 	uint64_t osize;
109 	struct mount *mp;
110 
111 	/*
112 	 * Certain combinations are illegal
113 	 */
114 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
115 		return(EACCES);
116 
117 	/*
118 	 * Lookup the path and create or obtain the vnode.  After a
119 	 * successful lookup a locked nd->nl_nch will be returned.
120 	 *
121 	 * The result of this section should be a locked vnode.
122 	 *
123 	 * XXX with only a little work we should be able to avoid locking
124 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
125 	 */
126 	nd->nl_flags |= NLC_OPEN;
127 	if (fmode & O_APPEND)
128 		nd->nl_flags |= NLC_APPEND;
129 	if (fmode & O_TRUNC)
130 		nd->nl_flags |= NLC_TRUNCATE;
131 	if (fmode & FREAD)
132 		nd->nl_flags |= NLC_READ;
133 	if (fmode & FWRITE)
134 		nd->nl_flags |= NLC_WRITE;
135 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
136 		nd->nl_flags |= NLC_FOLLOW;
137 
138 	if (fmode & O_CREAT) {
139 		/*
140 		 * CONDITIONAL CREATE FILE CASE
141 		 *
142 		 * Setting NLC_CREATE causes a negative hit to store
143 		 * the negative hit ncp and not return an error.  Then
144 		 * nc_error or nc_vp may be checked to see if the ncp
145 		 * represents a negative hit.  NLC_CREATE also requires
146 		 * write permission on the governing directory or EPERM
147 		 * is returned.
148 		 * If the file exists but is missing write permission,
149 		 * nlookup() returns EACCES. This has to be handled specially
150 		 * when combined with O_EXCL.
151 		 */
152 		nd->nl_flags |= NLC_CREATE;
153 		nd->nl_flags |= NLC_REFDVP;
154 		bwillinode(1);
155 		error = nlookup(nd);
156 		if (error == EACCES && nd->nl_nch.ncp->nc_vp != NULL &&
157 			(fmode & O_EXCL))
158 			error = EEXIST;
159 	} else {
160 		/*
161 		 * NORMAL OPEN FILE CASE
162 		 */
163 		error = nlookup(nd);
164 	}
165 
166 	if (error)
167 		return (error);
168 
169 	/*
170 	 * split case to allow us to re-resolve and retry the ncp in case
171 	 * we get ESTALE.
172 	 *
173 	 * (error is 0 on entry / retry)
174 	 */
175 again:
176 	/*
177 	 * Checks for (likely) filesystem-modifying cases and allows
178 	 * the filesystem to stall the front-end.
179 	 */
180 	if ((fmode & (FWRITE | O_TRUNC)) ||
181 	    ((fmode & O_CREAT) && nd->nl_nch.ncp->nc_vp == NULL)) {
182 		error = ncp_writechk(&nd->nl_nch);
183 		if (error)
184 			return error;
185 	}
186 
187 	vpexcl = 1;
188 	if (fmode & O_CREAT) {
189 		if (nd->nl_nch.ncp->nc_vp == NULL) {
190 			VATTR_NULL(vap);
191 			vap->va_type = VREG;
192 			vap->va_mode = cmode;
193 			vap->va_fuseflags = fmode; /* FUSE */
194 			if (fmode & O_EXCL)
195 				vap->va_vaflags |= VA_EXCLUSIVE;
196 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
197 					    nd->nl_cred, vap);
198 			if (error)
199 				return (error);
200 			fmode &= ~O_TRUNC;
201 			/* locked vnode is returned */
202 		} else {
203 			if (fmode & O_EXCL) {
204 				error = EEXIST;
205 			} else {
206 				error = cache_vget(&nd->nl_nch, cred,
207 						    LK_EXCLUSIVE, &vp);
208 			}
209 			if (error)
210 				return (error);
211 			fmode &= ~O_CREAT;
212 		}
213 	} else {
214 		/*
215 		 * In most other cases a shared lock on the vnode is
216 		 * sufficient.  However, the O_RDWR case needs an
217 		 * exclusive lock if the vnode is executable.  The
218 		 * NLC_EXCLLOCK_IFEXEC and NCF_NOTX flags help resolve
219 		 * this.
220 		 *
221 		 * NOTE: If NCF_NOTX is not set, we do not know the
222 		 *	 the state of the 'x' bits and have to get
223 		 *	 an exclusive lock for the EXCLLOCK_IFEXEC case.
224 		 */
225 		if ((nd->nl_flags & NLC_SHAREDLOCK) &&
226 		    ((nd->nl_flags & NLC_EXCLLOCK_IFEXEC) == 0 ||
227 		     nd->nl_nch.ncp->nc_flag & NCF_NOTX)) {
228 			error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
229 			vpexcl = 0;
230 		} else {
231 			error = cache_vget(&nd->nl_nch, cred,
232 					   LK_EXCLUSIVE, &vp);
233 		}
234 		if (error)
235 			return (error);
236 	}
237 
238 	/*
239 	 * We have a locked vnode and ncp now.  Note that the ncp will
240 	 * be cleaned up by the caller if nd->nl_nch is left intact.
241 	 */
242 	if (vp->v_type == VLNK) {
243 		error = EMLINK;
244 		goto bad;
245 	}
246 	if (vp->v_type == VSOCK) {
247 		error = EOPNOTSUPP;
248 		goto bad;
249 	}
250 	if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
251 		error = ENOTDIR;
252 		goto bad;
253 	}
254 	if ((fmode & O_CREAT) == 0) {
255 		if (fmode & (FWRITE | O_TRUNC)) {
256 			if (vp->v_type == VDIR) {
257 				error = EISDIR;
258 				goto bad;
259 			}
260 
261 			/*
262 			 * Additional checks on vnode (does not substitute
263 			 * for ncp_writechk()).
264 			 */
265 			error = vn_writechk(vp);
266 			if (error) {
267 				/*
268 				 * Special stale handling, re-resolve the
269 				 * vnode.
270 				 */
271 				if (error == ESTALE) {
272 					vput(vp);
273 					vp = NULL;
274 					if (vpexcl == 0) {
275 						cache_unlock(&nd->nl_nch);
276 						cache_lock(&nd->nl_nch);
277 					}
278 					cache_setunresolved(&nd->nl_nch);
279 					error = cache_resolve(&nd->nl_nch,
280 							      cred);
281 					if (error == 0)
282 						goto again;
283 				}
284 				goto bad;
285 			}
286 		}
287 	}
288 	if (fmode & O_TRUNC) {
289 		vn_unlock(vp);				/* XXX */
290 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
291 		osize = vp->v_filesize;
292 		VATTR_NULL(vap);
293 		vap->va_size = 0;
294 		error = VOP_SETATTR_FP(vp, vap, cred, fp);
295 		if (error)
296 			goto bad;
297 		error = VOP_GETATTR(vp, vap);
298 		if (error)
299 			goto bad;
300 		mp = vq_vptomp(vp);
301 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
302 	}
303 
304 	/*
305 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
306 	 * These particular bits a tracked all the way from the root.
307 	 *
308 	 * NOTE: Might not work properly on NFS servers due to the
309 	 * disconnected namecache.
310 	 */
311 	flags = nd->nl_nch.ncp->nc_flag;
312 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
313 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
314 		vsetflags(vp, VSWAPCACHE);
315 	} else {
316 		vclrflags(vp, VSWAPCACHE);
317 	}
318 
319 	/*
320 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
321 	 * associated with the fp yet so we own it clean.
322 	 *
323 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
324 	 * directories but now we do it unconditionally so f*() ops
325 	 * such as fchmod() can access the actual namespace that was
326 	 * used to open the file.
327 	 */
328 	if (fp) {
329 		if (nd->nl_flags & NLC_APPENDONLY)
330 			fmode |= FAPPENDONLY;
331 		fp->f_nchandle = nd->nl_nch;
332 		cache_zero(&nd->nl_nch);
333 		cache_unlock(&fp->f_nchandle);
334 	}
335 
336 	/*
337 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
338 	 * vnode or the file pointer).
339 	 *
340 	 * NOTE: We can't leave nl_nch locked through the VOP_OPEN anyway
341 	 *	 since the VOP_OPEN may block, e.g. on /dev/ttyd0
342 	 *
343 	 * NOTE: The VOP_OPEN() can replace the *fpp we supply with its own
344 	 *	 (it will fdrop/fhold), and can also set the *fpp up however
345 	 *	 it wants, not necessarily using DTYPE_VNODE.
346 	 */
347 	if (nd->nl_nch.ncp)
348 		cache_put(&nd->nl_nch);
349 
350 	error = VOP_OPEN(vp, fmode, cred, fpp);
351 	fp = fpp ? *fpp : NULL;
352 
353 	if (error) {
354 		/*
355 		 * setting f_ops to &badfileops will prevent the descriptor
356 		 * code from trying to close and release the vnode, since
357 		 * the open failed we do not want to call close.
358 		 */
359 		if (fp) {
360 			fp->f_data = NULL;
361 			fp->f_ops = &badfileops;
362 		}
363 		goto bad;
364 	}
365 
366 #if 0
367 	/*
368 	 * Assert that VREG files have been setup for vmio.
369 	 */
370 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
371 		("vn_open: regular file was not VMIO enabled!"));
372 #endif
373 
374 	/*
375 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
376 	 * only returned in the fp == NULL case.
377 	 *
378 	 * NOTE: vnode stored in fp may be different
379 	 */
380 	if (fp == NULL) {
381 		nd->nl_open_vp = vp;
382 		nd->nl_vp_fmode = fmode;
383 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
384 			vn_unlock(vp);
385 	} else {
386 		vput(vp);
387 	}
388 	return (0);
389 bad:
390 	if (vp)
391 		vput(vp);
392 	return (error);
393 }
394 
395 int
396 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
397 {
398 	struct vnode *vp;
399 	int error;
400 
401 	if (strncmp(devname, "/dev/", 5) == 0)
402 		devname += 5;
403 	if ((vp = getsynthvnode(devname)) == NULL) {
404 		error = ENODEV;
405 	} else {
406 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
407 		vn_unlock(vp);
408 		if (error) {
409 			vrele(vp);
410 			vp = NULL;
411 		}
412 	}
413 	*vpp = vp;
414 	return (error);
415 }
416 
417 /*
418  * Checks for special conditions on the vnode which might prevent writing
419  * after the vnode has (likely) been locked.  The vnode might or might not
420  * be locked as of this call, but will be at least referenced.
421  *
422  * Also re-checks the mount RDONLY flag that ncp_writechk() checked prior
423  * to the vnode being locked.
424  */
425 int
426 vn_writechk(struct vnode *vp)
427 {
428 	/*
429 	 * If there's shared text associated with
430 	 * the vnode, try to free it up once.  If
431 	 * we fail, we can't allow writing.
432 	 */
433 	if (vp->v_flag & VTEXT)
434 		return (ETXTBSY);
435 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_RDONLY))
436 		return (EROFS);
437 	return 0;
438 }
439 
440 /*
441  * Check whether the underlying mount is read-only.  The mount point
442  * referenced by the namecache may be different from the mount point
443  * used by the underlying vnode in the case of NULLFS, so a separate
444  * check is needed.
445  *
446  * Must be called PRIOR to any vnodes being locked.
447  */
448 int
449 ncp_writechk(struct nchandle *nch)
450 {
451 	struct mount *mp;
452 
453 	if ((mp = nch->mount) != NULL) {
454 		if (mp->mnt_flag & MNT_RDONLY)
455 			return (EROFS);
456 		if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
457 			VFS_MODIFYING(mp);
458 	}
459 	return(0);
460 }
461 
462 /*
463  * Vnode close call
464  *
465  * MPSAFE
466  */
467 int
468 vn_close(struct vnode *vp, int flags, struct file *fp)
469 {
470 	int error;
471 
472 	error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
473 	if (error == 0) {
474 		error = VOP_CLOSE(vp, flags, fp);
475 		vn_unlock(vp);
476 	}
477 	vrele(vp);
478 	return (error);
479 }
480 
481 /*
482  * Sequential heuristic.
483  *
484  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
485  */
486 static __inline
487 int
488 sequential_heuristic(struct uio *uio, struct file *fp)
489 {
490 	/*
491 	 * Sequential heuristic - detect sequential operation
492 	 *
493 	 * NOTE: SMP: We allow f_seqcount updates to race.
494 	 */
495 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
496 	    uio->uio_offset == fp->f_nextoff) {
497 		int tmpseq = fp->f_seqcount;
498 
499 		tmpseq += howmany(uio->uio_resid, MAXBSIZE);
500 		if (tmpseq > IO_SEQMAX)
501 			tmpseq = IO_SEQMAX;
502 		fp->f_seqcount = tmpseq;
503 		return(fp->f_seqcount << IO_SEQSHIFT);
504 	}
505 
506 	/*
507 	 * Not sequential, quick draw-down of seqcount
508 	 *
509 	 * NOTE: SMP: We allow f_seqcount updates to race.
510 	 */
511 	if (fp->f_seqcount > 1)
512 		fp->f_seqcount = 1;
513 	else
514 		fp->f_seqcount = 0;
515 	return(0);
516 }
517 
518 /*
519  * get - lock and return the f_offset field.
520  * set - set and unlock the f_offset field.
521  *
522  * These routines serve the dual purpose of serializing access to the
523  * f_offset field (at least on x86) and guaranteeing operational integrity
524  * when multiple read()ers and write()ers are present on the same fp.
525  *
526  * MPSAFE
527  */
528 static __inline off_t
529 vn_get_fpf_offset(struct file *fp)
530 {
531 	u_int	flags;
532 	u_int	nflags;
533 
534 	/*
535 	 * Shortcut critical path.
536 	 */
537 	flags = fp->f_flag & ~FOFFSETLOCK;
538 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
539 		return(fp->f_offset);
540 
541 	/*
542 	 * The hard way
543 	 */
544 	for (;;) {
545 		flags = fp->f_flag;
546 		if (flags & FOFFSETLOCK) {
547 			nflags = flags | FOFFSETWAKE;
548 			tsleep_interlock(&fp->f_flag, 0);
549 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
550 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
551 		} else {
552 			nflags = flags | FOFFSETLOCK;
553 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
554 				break;
555 		}
556 	}
557 	return(fp->f_offset);
558 }
559 
560 /*
561  * MPSAFE
562  */
563 static __inline void
564 vn_set_fpf_offset(struct file *fp, off_t offset)
565 {
566 	u_int	flags;
567 	u_int	nflags;
568 
569 	/*
570 	 * We hold the lock so we can set the offset without interference.
571 	 */
572 	fp->f_offset = offset;
573 
574 	/*
575 	 * Normal release is already a reasonably critical path.
576 	 */
577 	for (;;) {
578 		flags = fp->f_flag;
579 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
580 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
581 			if (flags & FOFFSETWAKE)
582 				wakeup(&fp->f_flag);
583 			break;
584 		}
585 	}
586 }
587 
588 /*
589  * MPSAFE
590  */
591 static __inline off_t
592 vn_poll_fpf_offset(struct file *fp)
593 {
594 #if defined(__x86_64__)
595 	return(fp->f_offset);
596 #else
597 	off_t off = vn_get_fpf_offset(fp);
598 	vn_set_fpf_offset(fp, off);
599 	return(off);
600 #endif
601 }
602 
603 /*
604  * Package up an I/O request on a vnode into a uio and do it.
605  *
606  * MPSAFE
607  */
608 int
609 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
610 	off_t offset, enum uio_seg segflg, int ioflg,
611 	struct ucred *cred, int *aresid)
612 {
613 	struct uio auio;
614 	struct iovec aiov;
615 	int error;
616 
617 	if ((ioflg & IO_NODELOCKED) == 0)
618 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
619 	auio.uio_iov = &aiov;
620 	auio.uio_iovcnt = 1;
621 	aiov.iov_base = base;
622 	aiov.iov_len = len;
623 	auio.uio_resid = len;
624 	auio.uio_offset = offset;
625 	auio.uio_segflg = segflg;
626 	auio.uio_rw = rw;
627 	auio.uio_td = curthread;
628 	if (rw == UIO_READ) {
629 		error = VOP_READ(vp, &auio, ioflg, cred);
630 	} else {
631 		error = VOP_WRITE(vp, &auio, ioflg, cred);
632 	}
633 	if (aresid)
634 		*aresid = auio.uio_resid;
635 	else
636 		if (auio.uio_resid && error == 0)
637 			error = EIO;
638 	if ((ioflg & IO_NODELOCKED) == 0)
639 		vn_unlock(vp);
640 	return (error);
641 }
642 
643 /*
644  * Package up an I/O request on a vnode into a uio and do it.  The I/O
645  * request is split up into smaller chunks and we try to avoid saturating
646  * the buffer cache while potentially holding a vnode locked, so we
647  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
648  * to give other processes a chance to lock the vnode (either other processes
649  * core'ing the same binary, or unrelated processes scanning the directory).
650  *
651  * MPSAFE
652  */
653 int
654 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
655 		 off_t offset, enum uio_seg segflg, int ioflg,
656 		 struct ucred *cred, int *aresid)
657 {
658 	int error = 0;
659 
660 	do {
661 		int chunk;
662 
663 		/*
664 		 * Force `offset' to a multiple of MAXBSIZE except possibly
665 		 * for the first chunk, so that filesystems only need to
666 		 * write full blocks except possibly for the first and last
667 		 * chunks.
668 		 */
669 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
670 
671 		if (chunk > len)
672 			chunk = len;
673 		if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
674 			switch(rw) {
675 			case UIO_READ:
676 				bwillread(chunk);
677 				break;
678 			case UIO_WRITE:
679 				bwillwrite(chunk);
680 				break;
681 			}
682 		}
683 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
684 				ioflg, cred, aresid);
685 		len -= chunk;	/* aresid calc already includes length */
686 		if (error)
687 			break;
688 		offset += chunk;
689 		base += chunk;
690 		lwkt_user_yield();
691 	} while (len);
692 	if (aresid)
693 		*aresid += len;
694 	return (error);
695 }
696 
697 /*
698  * File pointers can no longer get ripped up by revoke so
699  * we don't need to lock access to the vp.
700  *
701  * f_offset updates are not guaranteed against multiple readers
702  */
703 static int
704 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
705 {
706 	struct vnode *vp;
707 	int error, ioflag;
708 
709 	KASSERT(uio->uio_td == curthread,
710 		("uio_td %p is not td %p", uio->uio_td, curthread));
711 	vp = (struct vnode *)fp->f_data;
712 
713 	ioflag = 0;
714 	if (flags & O_FBLOCKING) {
715 		/* ioflag &= ~IO_NDELAY; */
716 	} else if (flags & O_FNONBLOCKING) {
717 		ioflag |= IO_NDELAY;
718 	} else if (fp->f_flag & FNONBLOCK) {
719 		ioflag |= IO_NDELAY;
720 	}
721 	if (fp->f_flag & O_DIRECT) {
722 		ioflag |= IO_DIRECT;
723 	}
724 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
725 		uio->uio_offset = vn_get_fpf_offset(fp);
726 	vn_lock(vp, LK_SHARED | LK_RETRY);
727 	ioflag |= sequential_heuristic(uio, fp);
728 
729 	error = VOP_READ_FP(vp, uio, ioflag, cred, fp);
730 	fp->f_nextoff = uio->uio_offset;
731 	vn_unlock(vp);
732 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
733 		vn_set_fpf_offset(fp, uio->uio_offset);
734 	return (error);
735 }
736 
737 /*
738  * MPSAFE
739  */
740 static int
741 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
742 {
743 	struct vnode *vp;
744 	int error, ioflag;
745 
746 	KASSERT(uio->uio_td == curthread,
747 		("uio_td %p is not p %p", uio->uio_td, curthread));
748 	vp = (struct vnode *)fp->f_data;
749 
750 	ioflag = IO_UNIT;
751 	if (vp->v_type == VREG &&
752 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
753 		ioflag |= IO_APPEND;
754 	}
755 
756 	if (flags & O_FBLOCKING) {
757 		/* ioflag &= ~IO_NDELAY; */
758 	} else if (flags & O_FNONBLOCKING) {
759 		ioflag |= IO_NDELAY;
760 	} else if (fp->f_flag & FNONBLOCK) {
761 		ioflag |= IO_NDELAY;
762 	}
763 	if (fp->f_flag & O_DIRECT) {
764 		ioflag |= IO_DIRECT;
765 	}
766 	if (flags & O_FASYNCWRITE) {
767 		/* ioflag &= ~IO_SYNC; */
768 	} else if (flags & O_FSYNCWRITE) {
769 		ioflag |= IO_SYNC;
770 	} else if (fp->f_flag & O_FSYNC) {
771 		ioflag |= IO_SYNC;
772 	}
773 
774 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
775 		ioflag |= IO_SYNC;
776 	if ((flags & O_FOFFSET) == 0)
777 		uio->uio_offset = vn_get_fpf_offset(fp);
778 	if (vp->v_mount)
779 		VFS_MODIFYING(vp->v_mount);
780 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
781 	ioflag |= sequential_heuristic(uio, fp);
782 	error = VOP_WRITE_FP(vp, uio, ioflag, cred, fp);
783 	fp->f_nextoff = uio->uio_offset;
784 	vn_unlock(vp);
785 	if ((flags & O_FOFFSET) == 0)
786 		vn_set_fpf_offset(fp, uio->uio_offset);
787 	return (error);
788 }
789 
790 /*
791  * MPSAFE
792  */
793 static int
794 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
795 {
796 	struct vnode *vp;
797 	int error;
798 
799 	vp = (struct vnode *)fp->f_data;
800 	error = vn_stat(vp, sb, cred);
801 	return (error);
802 }
803 
804 /*
805  * MPSAFE
806  */
807 int
808 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
809 {
810 	struct vattr vattr;
811 	struct vattr *vap;
812 	int error;
813 	u_short mode;
814 	cdev_t dev;
815 
816 	/*
817 	 * vp already has a ref and is validated, can call unlocked.
818 	 */
819 	vap = &vattr;
820 	error = VOP_GETATTR(vp, vap);
821 	if (error)
822 		return (error);
823 
824 	/*
825 	 * Zero the spare stat fields
826 	 */
827 	sb->st_lspare = 0;
828 	sb->st_qspare2 = 0;
829 
830 	/*
831 	 * Copy from vattr table
832 	 */
833 	if (vap->va_fsid != VNOVAL)
834 		sb->st_dev = vap->va_fsid;
835 	else
836 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
837 	sb->st_ino = vap->va_fileid;
838 	mode = vap->va_mode;
839 	switch (vap->va_type) {
840 	case VREG:
841 		mode |= S_IFREG;
842 		break;
843 	case VDATABASE:
844 		mode |= S_IFDB;
845 		break;
846 	case VDIR:
847 		mode |= S_IFDIR;
848 		break;
849 	case VBLK:
850 		mode |= S_IFBLK;
851 		break;
852 	case VCHR:
853 		mode |= S_IFCHR;
854 		break;
855 	case VLNK:
856 		mode |= S_IFLNK;
857 		/* This is a cosmetic change, symlinks do not have a mode. */
858 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
859 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
860 		else
861 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
862 		break;
863 	case VSOCK:
864 		mode |= S_IFSOCK;
865 		break;
866 	case VFIFO:
867 		mode |= S_IFIFO;
868 		break;
869 	default:
870 		return (EBADF);
871 	}
872 	sb->st_mode = mode;
873 	if (vap->va_nlink > (nlink_t)-1)
874 		sb->st_nlink = (nlink_t)-1;
875 	else
876 		sb->st_nlink = vap->va_nlink;
877 	sb->st_uid = vap->va_uid;
878 	sb->st_gid = vap->va_gid;
879 	sb->st_rdev = devid_from_dev(vp->v_rdev);
880 	sb->st_size = vap->va_size;
881 	sb->st_atimespec = vap->va_atime;
882 	sb->st_mtimespec = vap->va_mtime;
883 	sb->st_ctimespec = vap->va_ctime;
884 
885 	/*
886 	 * A VCHR and VBLK device may track the last access and last modified
887 	 * time independantly of the filesystem.  This is particularly true
888 	 * because device read and write calls may bypass the filesystem.
889 	 */
890 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
891 		dev = vp->v_rdev;
892 		if (dev != NULL) {
893 			if (dev->si_lastread) {
894 				sb->st_atimespec.tv_sec = time_second +
895 							  (dev->si_lastread -
896 							   time_uptime);
897 				sb->st_atimespec.tv_nsec = 0;
898 			}
899 			if (dev->si_lastwrite) {
900 				sb->st_mtimespec.tv_sec = time_second +
901 							  (dev->si_lastwrite -
902 							   time_uptime);
903 				sb->st_mtimespec.tv_nsec = 0;
904 			}
905 		}
906 	}
907 
908         /*
909 	 * According to www.opengroup.org, the meaning of st_blksize is
910 	 *   "a filesystem-specific preferred I/O block size for this
911 	 *    object.  In some filesystem types, this may vary from file
912 	 *    to file"
913 	 * Default to PAGE_SIZE after much discussion.
914 	 */
915 
916 	if (vap->va_type == VREG) {
917 		sb->st_blksize = vap->va_blocksize;
918 	} else if (vn_isdisk(vp, NULL)) {
919 		/*
920 		 * XXX this is broken.  If the device is not yet open (aka
921 		 * stat() call, aka v_rdev == NULL), how are we supposed
922 		 * to get a valid block size out of it?
923 		 */
924 		dev = vp->v_rdev;
925 
926 		sb->st_blksize = dev->si_bsize_best;
927 		if (sb->st_blksize < dev->si_bsize_phys)
928 			sb->st_blksize = dev->si_bsize_phys;
929 		if (sb->st_blksize < BLKDEV_IOSIZE)
930 			sb->st_blksize = BLKDEV_IOSIZE;
931 	} else {
932 		sb->st_blksize = PAGE_SIZE;
933 	}
934 
935 	sb->st_flags = vap->va_flags;
936 
937 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
938 	if (error)
939 		sb->st_gen = 0;
940 	else
941 		sb->st_gen = (u_int32_t)vap->va_gen;
942 
943 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
944 
945 	/*
946 	 * This is for ABI compatibility <= 5.7 (for ABI change made in
947 	 * 5.7 master).
948 	 */
949 	sb->__old_st_blksize = sb->st_blksize;
950 
951 	return (0);
952 }
953 
954 /*
955  * MPALMOSTSAFE - acquires mplock
956  */
957 static int
958 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
959 	 struct sysmsg *msg)
960 {
961 	struct vnode *vp = ((struct vnode *)fp->f_data);
962 	struct vnode *ovp;
963 	struct vattr vattr;
964 	int error;
965 	off_t size;
966 
967 	switch (vp->v_type) {
968 	case VREG:
969 	case VDIR:
970 		if (com == FIONREAD) {
971 			error = VOP_GETATTR(vp, &vattr);
972 			if (error)
973 				break;
974 			size = vattr.va_size;
975 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
976 				size -= vn_poll_fpf_offset(fp);
977 			if (size > 0x7FFFFFFF)
978 				size = 0x7FFFFFFF;
979 			*(int *)data = size;
980 			error = 0;
981 			break;
982 		}
983 		if (com == FIOASYNC) {				/* XXX */
984 			error = 0;				/* XXX */
985 			break;
986 		}
987 		/* fall into ... */
988 	default:
989 #if 0
990 		return (ENOTTY);
991 #endif
992 	case VFIFO:
993 	case VCHR:
994 	case VBLK:
995 		if (com == FIODTYPE) {
996 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
997 				error = ENOTTY;
998 				break;
999 			}
1000 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
1001 			error = 0;
1002 			break;
1003 		}
1004 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
1005 		if (error == 0 && com == TIOCSCTTY) {
1006 			struct proc *p = curthread->td_proc;
1007 			struct session *sess;
1008 
1009 			if (p == NULL) {
1010 				error = ENOTTY;
1011 				break;
1012 			}
1013 
1014 			get_mplock();
1015 			sess = p->p_session;
1016 			/* Do nothing if reassigning same control tty */
1017 			if (sess->s_ttyvp == vp) {
1018 				error = 0;
1019 				rel_mplock();
1020 				break;
1021 			}
1022 
1023 			/* Get rid of reference to old control tty */
1024 			ovp = sess->s_ttyvp;
1025 			vref(vp);
1026 			sess->s_ttyvp = vp;
1027 			if (ovp)
1028 				vrele(ovp);
1029 			rel_mplock();
1030 		}
1031 		break;
1032 	}
1033 	return (error);
1034 }
1035 
1036 /*
1037  * Obtain the requested vnode lock
1038  *
1039  *	LK_RETRY	Automatically retry on timeout
1040  *	LK_FAILRECLAIM	Fail if the vnode is being reclaimed
1041  *
1042  * Failures will occur if the vnode is undergoing recyclement, but not
1043  * all callers expect that the function will fail so the caller must pass
1044  * LK_FAILOK if it wants to process an error code.
1045  *
1046  * Errors can occur for other reasons if you pass in other LK_ flags,
1047  * regardless of whether you pass in LK_FAILRECLAIM
1048  */
1049 int
1050 vn_lock(struct vnode *vp, int flags)
1051 {
1052 	int error;
1053 
1054 	do {
1055 		error = lockmgr(&vp->v_lock, flags);
1056 		if (error == 0)
1057 			break;
1058 	} while (flags & LK_RETRY);
1059 
1060 	/*
1061 	 * Because we (had better!) have a ref on the vnode, once it
1062 	 * goes to VRECLAIMED state it will not be recycled until all
1063 	 * refs go away.  So we can just check the flag.
1064 	 */
1065 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1066 		if (flags & LK_FAILRECLAIM) {
1067 			lockmgr(&vp->v_lock, LK_RELEASE);
1068 			error = ENOENT;
1069 		}
1070 	}
1071 	return (error);
1072 }
1073 
1074 int
1075 vn_relock(struct vnode *vp, int flags)
1076 {
1077 	int error;
1078 
1079 	do {
1080 		error = lockmgr(&vp->v_lock, flags);
1081 		if (error == 0)
1082 			break;
1083 	} while (flags & LK_RETRY);
1084 
1085 	return error;
1086 }
1087 
1088 #ifdef DEBUG_VN_UNLOCK
1089 
1090 void
1091 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1092 {
1093 	kprintf("vn_unlock from %s:%d\n", filename, line);
1094 	lockmgr(&vp->v_lock, LK_RELEASE);
1095 }
1096 
1097 #else
1098 
1099 void
1100 vn_unlock(struct vnode *vp)
1101 {
1102 	lockmgr(&vp->v_lock, LK_RELEASE);
1103 }
1104 
1105 #endif
1106 
1107 /*
1108  * MPSAFE
1109  */
1110 int
1111 vn_islocked(struct vnode *vp)
1112 {
1113 	return (lockstatus(&vp->v_lock, curthread));
1114 }
1115 
1116 /*
1117  * Return the lock status of a vnode and unlock the vnode
1118  * if we owned the lock.  This is not a boolean, if the
1119  * caller cares what the lock status is the caller must
1120  * check the various possible values.
1121  *
1122  * This only unlocks exclusive locks held by the caller,
1123  * it will NOT unlock shared locks (there is no way to
1124  * tell who the shared lock belongs to).
1125  *
1126  * MPSAFE
1127  */
1128 int
1129 vn_islocked_unlock(struct vnode *vp)
1130 {
1131 	int vpls;
1132 
1133 	vpls = lockstatus(&vp->v_lock, curthread);
1134 	if (vpls == LK_EXCLUSIVE)
1135 		lockmgr(&vp->v_lock, LK_RELEASE);
1136 	return(vpls);
1137 }
1138 
1139 /*
1140  * Restore a vnode lock that we previously released via
1141  * vn_islocked_unlock().  This is a NOP if we did not
1142  * own the original lock.
1143  *
1144  * MPSAFE
1145  */
1146 void
1147 vn_islocked_relock(struct vnode *vp, int vpls)
1148 {
1149 	int error;
1150 
1151 	if (vpls == LK_EXCLUSIVE)
1152 		error = lockmgr(&vp->v_lock, vpls);
1153 }
1154 
1155 /*
1156  * MPSAFE
1157  */
1158 static int
1159 vn_closefile(struct file *fp)
1160 {
1161 	int error;
1162 
1163 	fp->f_ops = &badfileops;
1164 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
1165 	return (error);
1166 }
1167 
1168 /*
1169  * MPSAFE
1170  */
1171 static int
1172 vn_kqfilter(struct file *fp, struct knote *kn)
1173 {
1174 	int error;
1175 
1176 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1177 	return (error);
1178 }
1179