xref: /dragonfly/sys/kern/vfs_vnops.c (revision 255da09c)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 
60 #include <sys/thread2.h>
61 
62 static int vn_closefile (struct file *fp);
63 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
64 		struct ucred *cred, struct sysmsg *msg);
65 static int vn_read (struct file *fp, struct uio *uio,
66 		struct ucred *cred, int flags);
67 static int vn_poll (struct file *fp, int events, struct ucred *cred);
68 static int vn_kqfilter (struct file *fp, struct knote *kn);
69 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
70 static int vn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 #ifdef SMP
74 static int read_mpsafe = 0;
75 SYSCTL_INT(_vfs, OID_AUTO, read_mpsafe, CTLFLAG_RW, &read_mpsafe, 0, "");
76 static int write_mpsafe = 0;
77 SYSCTL_INT(_vfs, OID_AUTO, write_mpsafe, CTLFLAG_RW, &write_mpsafe, 0, "");
78 static int getattr_mpsafe = 0;
79 SYSCTL_INT(_vfs, OID_AUTO, getattr_mpsafe, CTLFLAG_RW, &getattr_mpsafe, 0, "");
80 #else
81 #define read_mpsafe	0
82 #define write_mpsafe	0
83 #define getattr_mpsafe	0
84 #endif
85 
86 struct fileops vnode_fileops = {
87 	.fo_read = vn_read,
88 	.fo_write = vn_write,
89 	.fo_ioctl = vn_ioctl,
90 	.fo_poll = vn_poll,
91 	.fo_kqfilter = vn_kqfilter,
92 	.fo_stat = vn_statfile,
93 	.fo_close = vn_closefile,
94 	.fo_shutdown = nofo_shutdown
95 };
96 
97 /*
98  * Common code for vnode open operations.  Check permissions, and call
99  * the VOP_NOPEN or VOP_NCREATE routine.
100  *
101  * The caller is responsible for setting up nd with nlookup_init() and
102  * for cleaning it up with nlookup_done(), whether we return an error
103  * or not.
104  *
105  * On success nd->nl_open_vp will hold a referenced and, if requested,
106  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
107  * is non-NULL the vnode will be installed in the file pointer.
108  *
109  * NOTE: The vnode is referenced just once on return whether or not it
110  * is also installed in the file pointer.
111  */
112 int
113 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
114 {
115 	struct vnode *vp;
116 	struct ucred *cred = nd->nl_cred;
117 	struct vattr vat;
118 	struct vattr *vap = &vat;
119 	int error;
120 
121 	/*
122 	 * Certain combinations are illegal
123 	 */
124 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
125 		return(EACCES);
126 
127 	/*
128 	 * Lookup the path and create or obtain the vnode.  After a
129 	 * successful lookup a locked nd->nl_nch will be returned.
130 	 *
131 	 * The result of this section should be a locked vnode.
132 	 *
133 	 * XXX with only a little work we should be able to avoid locking
134 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
135 	 */
136 	nd->nl_flags |= NLC_OPEN;
137 	if (fmode & O_APPEND)
138 		nd->nl_flags |= NLC_APPEND;
139 	if (fmode & O_TRUNC)
140 		nd->nl_flags |= NLC_TRUNCATE;
141 	if (fmode & FREAD)
142 		nd->nl_flags |= NLC_READ;
143 	if (fmode & FWRITE)
144 		nd->nl_flags |= NLC_WRITE;
145 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
146 		nd->nl_flags |= NLC_FOLLOW;
147 
148 	if (fmode & O_CREAT) {
149 		/*
150 		 * CONDITIONAL CREATE FILE CASE
151 		 *
152 		 * Setting NLC_CREATE causes a negative hit to store
153 		 * the negative hit ncp and not return an error.  Then
154 		 * nc_error or nc_vp may be checked to see if the ncp
155 		 * represents a negative hit.  NLC_CREATE also requires
156 		 * write permission on the governing directory or EPERM
157 		 * is returned.
158 		 */
159 		nd->nl_flags |= NLC_CREATE;
160 		nd->nl_flags |= NLC_REFDVP;
161 		bwillinode(1);
162 		error = nlookup(nd);
163 	} else {
164 		/*
165 		 * NORMAL OPEN FILE CASE
166 		 */
167 		error = nlookup(nd);
168 	}
169 
170 	if (error)
171 		return (error);
172 
173 	/*
174 	 * split case to allow us to re-resolve and retry the ncp in case
175 	 * we get ESTALE.
176 	 */
177 again:
178 	if (fmode & O_CREAT) {
179 		if (nd->nl_nch.ncp->nc_vp == NULL) {
180 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
181 				return (error);
182 			VATTR_NULL(vap);
183 			vap->va_type = VREG;
184 			vap->va_mode = cmode;
185 			if (fmode & O_EXCL)
186 				vap->va_vaflags |= VA_EXCLUSIVE;
187 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
188 					    nd->nl_cred, vap);
189 			if (error)
190 				return (error);
191 			fmode &= ~O_TRUNC;
192 			/* locked vnode is returned */
193 		} else {
194 			if (fmode & O_EXCL) {
195 				error = EEXIST;
196 			} else {
197 				error = cache_vget(&nd->nl_nch, cred,
198 						    LK_EXCLUSIVE, &vp);
199 			}
200 			if (error)
201 				return (error);
202 			fmode &= ~O_CREAT;
203 		}
204 	} else {
205 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
206 		if (error)
207 			return (error);
208 	}
209 
210 	/*
211 	 * We have a locked vnode and ncp now.  Note that the ncp will
212 	 * be cleaned up by the caller if nd->nl_nch is left intact.
213 	 */
214 	if (vp->v_type == VLNK) {
215 		error = EMLINK;
216 		goto bad;
217 	}
218 	if (vp->v_type == VSOCK) {
219 		error = EOPNOTSUPP;
220 		goto bad;
221 	}
222 	if ((fmode & O_CREAT) == 0) {
223 		if (fmode & (FWRITE | O_TRUNC)) {
224 			if (vp->v_type == VDIR) {
225 				error = EISDIR;
226 				goto bad;
227 			}
228 			error = vn_writechk(vp, &nd->nl_nch);
229 			if (error) {
230 				/*
231 				 * Special stale handling, re-resolve the
232 				 * vnode.
233 				 */
234 				if (error == ESTALE) {
235 					vput(vp);
236 					vp = NULL;
237 					cache_setunresolved(&nd->nl_nch);
238 					error = cache_resolve(&nd->nl_nch, cred);
239 					if (error == 0)
240 						goto again;
241 				}
242 				goto bad;
243 			}
244 		}
245 	}
246 	if (fmode & O_TRUNC) {
247 		vn_unlock(vp);				/* XXX */
248 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
249 		VATTR_NULL(vap);
250 		vap->va_size = 0;
251 		error = VOP_SETATTR(vp, vap, cred);
252 		if (error)
253 			goto bad;
254 	}
255 
256 	/*
257 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
258 	 * associated with the fp yet so we own it clean.
259 	 *
260 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
261 	 * directories but now we do it unconditionally so f*() ops
262 	 * such as fchmod() can access the actual namespace that was
263 	 * used to open the file.
264 	 */
265 	if (fp) {
266 		if (nd->nl_flags & NLC_APPENDONLY)
267 			fmode |= FAPPENDONLY;
268 		fp->f_nchandle = nd->nl_nch;
269 		cache_zero(&nd->nl_nch);
270 		cache_unlock(&fp->f_nchandle);
271 	}
272 
273 	/*
274 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
275 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
276 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
277 	 * on /dev/ttyd0
278 	 */
279 	if (nd->nl_nch.ncp)
280 		cache_put(&nd->nl_nch);
281 
282 	error = VOP_OPEN(vp, fmode, cred, fp);
283 	if (error) {
284 		/*
285 		 * setting f_ops to &badfileops will prevent the descriptor
286 		 * code from trying to close and release the vnode, since
287 		 * the open failed we do not want to call close.
288 		 */
289 		if (fp) {
290 			fp->f_data = NULL;
291 			fp->f_ops = &badfileops;
292 		}
293 		goto bad;
294 	}
295 
296 #if 0
297 	/*
298 	 * Assert that VREG files have been setup for vmio.
299 	 */
300 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
301 		("vn_open: regular file was not VMIO enabled!"));
302 #endif
303 
304 	/*
305 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
306 	 * only returned in the fp == NULL case.
307 	 */
308 	if (fp == NULL) {
309 		nd->nl_open_vp = vp;
310 		nd->nl_vp_fmode = fmode;
311 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
312 			vn_unlock(vp);
313 	} else {
314 		vput(vp);
315 	}
316 	return (0);
317 bad:
318 	if (vp)
319 		vput(vp);
320 	return (error);
321 }
322 
323 int
324 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
325 {
326 	struct vnode *vp;
327 	int error;
328 
329 	if (strncmp(devname, "/dev/", 5) == 0)
330 		devname += 5;
331 	if ((vp = getsynthvnode(devname)) == NULL) {
332 		error = ENODEV;
333 	} else {
334 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
335 		vn_unlock(vp);
336 		if (error) {
337 			vrele(vp);
338 			vp = NULL;
339 		}
340 	}
341 	*vpp = vp;
342 	return (error);
343 }
344 
345 /*
346  * Check for write permissions on the specified vnode.  nch may be NULL.
347  */
348 int
349 vn_writechk(struct vnode *vp, struct nchandle *nch)
350 {
351 	/*
352 	 * If there's shared text associated with
353 	 * the vnode, try to free it up once.  If
354 	 * we fail, we can't allow writing.
355 	 */
356 	if (vp->v_flag & VTEXT)
357 		return (ETXTBSY);
358 
359 	/*
360 	 * If the vnode represents a regular file, check the mount
361 	 * point via the nch.  This may be a different mount point
362 	 * then the one embedded in the vnode (e.g. nullfs).
363 	 *
364 	 * We can still write to non-regular files (e.g. devices)
365 	 * via read-only mounts.
366 	 */
367 	if (nch && nch->ncp && vp->v_type == VREG)
368 		return (ncp_writechk(nch));
369 	return (0);
370 }
371 
372 /*
373  * Check whether the underlying mount is read-only.  The mount point
374  * referenced by the namecache may be different from the mount point
375  * used by the underlying vnode in the case of NULLFS, so a separate
376  * check is needed.
377  */
378 int
379 ncp_writechk(struct nchandle *nch)
380 {
381 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
382 		return (EROFS);
383 	return(0);
384 }
385 
386 /*
387  * Vnode close call
388  */
389 int
390 vn_close(struct vnode *vp, int flags)
391 {
392 	int error;
393 
394 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
395 	if (error == 0) {
396 		error = VOP_CLOSE(vp, flags);
397 		vn_unlock(vp);
398 	}
399 	vrele(vp);
400 	return (error);
401 }
402 
403 static __inline
404 int
405 sequential_heuristic(struct uio *uio, struct file *fp)
406 {
407 	/*
408 	 * Sequential heuristic - detect sequential operation
409 	 *
410 	 * NOTE: SMP: We allow f_seqcount updates to race.
411 	 */
412 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
413 	    uio->uio_offset == fp->f_nextoff) {
414 		int tmpseq = fp->f_seqcount;
415 		/*
416 		 * XXX we assume that the filesystem block size is
417 		 * the default.  Not true, but still gives us a pretty
418 		 * good indicator of how sequential the read operations
419 		 * are.
420 		 */
421 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
422 		if (tmpseq > IO_SEQMAX)
423 			tmpseq = IO_SEQMAX;
424 		fp->f_seqcount = tmpseq;
425 		return(fp->f_seqcount << IO_SEQSHIFT);
426 	}
427 
428 	/*
429 	 * Not sequential, quick draw-down of seqcount
430 	 *
431 	 * NOTE: SMP: We allow f_seqcount updates to race.
432 	 */
433 	if (fp->f_seqcount > 1)
434 		fp->f_seqcount = 1;
435 	else
436 		fp->f_seqcount = 0;
437 	return(0);
438 }
439 
440 /*
441  * get - lock and return the f_offset field.
442  * set - set and unlock the f_offset field.
443  *
444  * These routines serve the dual purpose of serializing access to the
445  * f_offset field (at least on i386) and guaranteeing operational integrity
446  * when multiple read()ers and write()ers are present on the same fp.
447  */
448 static __inline off_t
449 vn_get_fpf_offset(struct file *fp)
450 {
451 	u_int	flags;
452 	u_int	nflags;
453 
454 	/*
455 	 * Shortcut critical path.
456 	 */
457 	flags = fp->f_flag & ~FOFFSETLOCK;
458 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
459 		return(fp->f_offset);
460 
461 	/*
462 	 * The hard way
463 	 */
464 	for (;;) {
465 		flags = fp->f_flag;
466 		if (flags & FOFFSETLOCK) {
467 			nflags = flags | FOFFSETWAKE;
468 			tsleep_interlock(&fp->f_flag, 0);
469 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
470 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
471 		} else {
472 			nflags = flags | FOFFSETLOCK;
473 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
474 				break;
475 		}
476 	}
477 	return(fp->f_offset);
478 }
479 
480 static __inline void
481 vn_set_fpf_offset(struct file *fp, off_t offset)
482 {
483 	u_int	flags;
484 	u_int	nflags;
485 
486 	/*
487 	 * We hold the lock so we can set the offset without interference.
488 	 */
489 	fp->f_offset = offset;
490 
491 	/*
492 	 * Normal release is already a reasonably critical path.
493 	 */
494 	for (;;) {
495 		flags = fp->f_flag;
496 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
497 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
498 			if (flags & FOFFSETWAKE)
499 				wakeup(&fp->f_flag);
500 			break;
501 		}
502 	}
503 }
504 
505 static __inline off_t
506 vn_poll_fpf_offset(struct file *fp)
507 {
508 #if defined(__amd64__) || !defined(SMP)
509 	return(fp->f_offset);
510 #else
511 	off_t off = vn_get_fpf_offset(fp);
512 	vn_set_fpf_offset(fp, off);
513 	return(off);
514 #endif
515 }
516 
517 /*
518  * Package up an I/O request on a vnode into a uio and do it.
519  */
520 int
521 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
522 	off_t offset, enum uio_seg segflg, int ioflg,
523 	struct ucred *cred, int *aresid)
524 {
525 	struct uio auio;
526 	struct iovec aiov;
527 	struct ccms_lock ccms_lock;
528 	int error;
529 
530 	if ((ioflg & IO_NODELOCKED) == 0)
531 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
532 	auio.uio_iov = &aiov;
533 	auio.uio_iovcnt = 1;
534 	aiov.iov_base = base;
535 	aiov.iov_len = len;
536 	auio.uio_resid = len;
537 	auio.uio_offset = offset;
538 	auio.uio_segflg = segflg;
539 	auio.uio_rw = rw;
540 	auio.uio_td = curthread;
541 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
542 	if (rw == UIO_READ) {
543 		error = VOP_READ(vp, &auio, ioflg, cred);
544 	} else {
545 		error = VOP_WRITE(vp, &auio, ioflg, cred);
546 	}
547 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
548 	if (aresid)
549 		*aresid = auio.uio_resid;
550 	else
551 		if (auio.uio_resid && error == 0)
552 			error = EIO;
553 	if ((ioflg & IO_NODELOCKED) == 0)
554 		vn_unlock(vp);
555 	return (error);
556 }
557 
558 /*
559  * Package up an I/O request on a vnode into a uio and do it.  The I/O
560  * request is split up into smaller chunks and we try to avoid saturating
561  * the buffer cache while potentially holding a vnode locked, so we
562  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
563  * to give other processes a chance to lock the vnode (either other processes
564  * core'ing the same binary, or unrelated processes scanning the directory).
565  */
566 int
567 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
568 		 off_t offset, enum uio_seg segflg, int ioflg,
569 		 struct ucred *cred, int *aresid)
570 {
571 	int error = 0;
572 
573 	do {
574 		int chunk;
575 
576 		/*
577 		 * Force `offset' to a multiple of MAXBSIZE except possibly
578 		 * for the first chunk, so that filesystems only need to
579 		 * write full blocks except possibly for the first and last
580 		 * chunks.
581 		 */
582 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
583 
584 		if (chunk > len)
585 			chunk = len;
586 		if (vp->v_type == VREG) {
587 			switch(rw) {
588 			case UIO_READ:
589 				bwillread(chunk);
590 				break;
591 			case UIO_WRITE:
592 				bwillwrite(chunk);
593 				break;
594 			}
595 		}
596 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
597 			    ioflg, cred, aresid);
598 		len -= chunk;	/* aresid calc already includes length */
599 		if (error)
600 			break;
601 		offset += chunk;
602 		base += chunk;
603 		uio_yield();
604 	} while (len);
605 	if (aresid)
606 		*aresid += len;
607 	return (error);
608 }
609 
610 /*
611  * MPALMOSTSAFE - acquires mplock
612  *
613  * File pointers can no longer get ripped up by revoke so
614  * we don't need to lock access to the vp.
615  *
616  * f_offset updates are not guaranteed against multiple readers
617  */
618 static int
619 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
620 {
621 	struct ccms_lock ccms_lock;
622 	struct vnode *vp;
623 	int error, ioflag;
624 
625 	KASSERT(uio->uio_td == curthread,
626 		("uio_td %p is not td %p", uio->uio_td, curthread));
627 	vp = (struct vnode *)fp->f_data;
628 
629 	ioflag = 0;
630 	if (flags & O_FBLOCKING) {
631 		/* ioflag &= ~IO_NDELAY; */
632 	} else if (flags & O_FNONBLOCKING) {
633 		ioflag |= IO_NDELAY;
634 	} else if (fp->f_flag & FNONBLOCK) {
635 		ioflag |= IO_NDELAY;
636 	}
637 	if (flags & O_FBUFFERED) {
638 		/* ioflag &= ~IO_DIRECT; */
639 	} else if (flags & O_FUNBUFFERED) {
640 		ioflag |= IO_DIRECT;
641 	} else if (fp->f_flag & O_DIRECT) {
642 		ioflag |= IO_DIRECT;
643 	}
644 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
645 		uio->uio_offset = vn_get_fpf_offset(fp);
646 	vn_lock(vp, LK_SHARED | LK_RETRY);
647 	ioflag |= sequential_heuristic(uio, fp);
648 
649 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
650 	if (read_mpsafe && (vp->v_flag & VMP_READ)) {
651 		error = VOP_READ(vp, uio, ioflag, cred);
652 	} else {
653 		get_mplock();
654 		error = VOP_READ(vp, uio, ioflag, cred);
655 		rel_mplock();
656 	}
657 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
658 	fp->f_nextoff = uio->uio_offset;
659 	vn_unlock(vp);
660 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
661 		vn_set_fpf_offset(fp, uio->uio_offset);
662 	return (error);
663 }
664 
665 /*
666  * MPALMOSTSAFE - acquires mplock
667  */
668 static int
669 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
670 {
671 	struct ccms_lock ccms_lock;
672 	struct vnode *vp;
673 	int error, ioflag;
674 
675 	KASSERT(uio->uio_td == curthread,
676 		("uio_td %p is not p %p", uio->uio_td, curthread));
677 	vp = (struct vnode *)fp->f_data;
678 
679 	ioflag = IO_UNIT;
680 	if (vp->v_type == VREG &&
681 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
682 		ioflag |= IO_APPEND;
683 	}
684 
685 	if (flags & O_FBLOCKING) {
686 		/* ioflag &= ~IO_NDELAY; */
687 	} else if (flags & O_FNONBLOCKING) {
688 		ioflag |= IO_NDELAY;
689 	} else if (fp->f_flag & FNONBLOCK) {
690 		ioflag |= IO_NDELAY;
691 	}
692 	if (flags & O_FBUFFERED) {
693 		/* ioflag &= ~IO_DIRECT; */
694 	} else if (flags & O_FUNBUFFERED) {
695 		ioflag |= IO_DIRECT;
696 	} else if (fp->f_flag & O_DIRECT) {
697 		ioflag |= IO_DIRECT;
698 	}
699 	if (flags & O_FASYNCWRITE) {
700 		/* ioflag &= ~IO_SYNC; */
701 	} else if (flags & O_FSYNCWRITE) {
702 		ioflag |= IO_SYNC;
703 	} else if (fp->f_flag & O_FSYNC) {
704 		ioflag |= IO_SYNC;
705 	}
706 
707 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
708 		ioflag |= IO_SYNC;
709 	if ((flags & O_FOFFSET) == 0)
710 		uio->uio_offset = vn_get_fpf_offset(fp);
711 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
712 	ioflag |= sequential_heuristic(uio, fp);
713 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
714 	if (write_mpsafe && (vp->v_flag & VMP_WRITE)) {
715 		error = VOP_WRITE(vp, uio, ioflag, cred);
716 	} else {
717 		get_mplock();
718 		error = VOP_WRITE(vp, uio, ioflag, cred);
719 		rel_mplock();
720 	}
721 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
722 	fp->f_nextoff = uio->uio_offset;
723 	vn_unlock(vp);
724 	if ((flags & O_FOFFSET) == 0)
725 		vn_set_fpf_offset(fp, uio->uio_offset);
726 	return (error);
727 }
728 
729 /*
730  * MPSAFE
731  */
732 static int
733 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
734 {
735 	struct vnode *vp;
736 	int error;
737 
738 	vp = (struct vnode *)fp->f_data;
739 	error = vn_stat(vp, sb, cred);
740 	return (error);
741 }
742 
743 /*
744  * MPSAFE (if vnode has VMP_GETATTR)
745  */
746 int
747 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
748 {
749 	struct vattr vattr;
750 	struct vattr *vap;
751 	int error;
752 	u_short mode;
753 	cdev_t dev;
754 
755 	vap = &vattr;
756 	if (getattr_mpsafe && (vp->v_flag & VMP_GETATTR)) {
757 		error = VOP_GETATTR(vp, vap);
758 	} else {
759 		get_mplock();
760 		error = VOP_GETATTR(vp, vap);
761 		rel_mplock();
762 	}
763 	if (error)
764 		return (error);
765 
766 	/*
767 	 * Zero the spare stat fields
768 	 */
769 	sb->st_lspare = 0;
770 	sb->st_qspare = 0;
771 
772 	/*
773 	 * Copy from vattr table
774 	 */
775 	if (vap->va_fsid != VNOVAL)
776 		sb->st_dev = vap->va_fsid;
777 	else
778 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
779 	sb->st_ino = vap->va_fileid;
780 	mode = vap->va_mode;
781 	switch (vap->va_type) {
782 	case VREG:
783 		mode |= S_IFREG;
784 		break;
785 	case VDATABASE:
786 		mode |= S_IFDB;
787 		break;
788 	case VDIR:
789 		mode |= S_IFDIR;
790 		break;
791 	case VBLK:
792 		mode |= S_IFBLK;
793 		break;
794 	case VCHR:
795 		mode |= S_IFCHR;
796 		break;
797 	case VLNK:
798 		mode |= S_IFLNK;
799 		/* This is a cosmetic change, symlinks do not have a mode. */
800 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
801 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
802 		else
803 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
804 		break;
805 	case VSOCK:
806 		mode |= S_IFSOCK;
807 		break;
808 	case VFIFO:
809 		mode |= S_IFIFO;
810 		break;
811 	default:
812 		return (EBADF);
813 	}
814 	sb->st_mode = mode;
815 	if (vap->va_nlink > (nlink_t)-1)
816 		sb->st_nlink = (nlink_t)-1;
817 	else
818 		sb->st_nlink = vap->va_nlink;
819 	sb->st_uid = vap->va_uid;
820 	sb->st_gid = vap->va_gid;
821 	sb->st_rdev = dev2udev(vp->v_rdev);
822 	sb->st_size = vap->va_size;
823 	sb->st_atimespec = vap->va_atime;
824 	sb->st_mtimespec = vap->va_mtime;
825 	sb->st_ctimespec = vap->va_ctime;
826 
827 	/*
828 	 * A VCHR and VBLK device may track the last access and last modified
829 	 * time independantly of the filesystem.  This is particularly true
830 	 * because device read and write calls may bypass the filesystem.
831 	 */
832 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
833 		dev = vp->v_rdev;
834 		if (dev != NULL) {
835 			if (dev->si_lastread) {
836 				sb->st_atimespec.tv_sec = dev->si_lastread;
837 				sb->st_atimespec.tv_nsec = 0;
838 			}
839 			if (dev->si_lastwrite) {
840 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
841 				sb->st_atimespec.tv_nsec = 0;
842 			}
843 		}
844 	}
845 
846         /*
847 	 * According to www.opengroup.org, the meaning of st_blksize is
848 	 *   "a filesystem-specific preferred I/O block size for this
849 	 *    object.  In some filesystem types, this may vary from file
850 	 *    to file"
851 	 * Default to PAGE_SIZE after much discussion.
852 	 */
853 
854 	if (vap->va_type == VREG) {
855 		sb->st_blksize = vap->va_blocksize;
856 	} else if (vn_isdisk(vp, NULL)) {
857 		/*
858 		 * XXX this is broken.  If the device is not yet open (aka
859 		 * stat() call, aka v_rdev == NULL), how are we supposed
860 		 * to get a valid block size out of it?
861 		 */
862 		dev = vp->v_rdev;
863 
864 		sb->st_blksize = dev->si_bsize_best;
865 		if (sb->st_blksize < dev->si_bsize_phys)
866 			sb->st_blksize = dev->si_bsize_phys;
867 		if (sb->st_blksize < BLKDEV_IOSIZE)
868 			sb->st_blksize = BLKDEV_IOSIZE;
869 	} else {
870 		sb->st_blksize = PAGE_SIZE;
871 	}
872 
873 	sb->st_flags = vap->va_flags;
874 
875 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
876 	if (error)
877 		sb->st_gen = 0;
878 	else
879 		sb->st_gen = (u_int32_t)vap->va_gen;
880 
881 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
882 	sb->st_fsmid = vap->va_fsmid;
883 	return (0);
884 }
885 
886 /*
887  * MPALMOSTSAFE - acquires mplock
888  */
889 static int
890 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
891 	 struct sysmsg *msg)
892 {
893 	struct vnode *vp = ((struct vnode *)fp->f_data);
894 	struct vnode *ovp;
895 	struct vattr vattr;
896 	int error;
897 	off_t size;
898 
899 	get_mplock();
900 
901 	switch (vp->v_type) {
902 	case VREG:
903 	case VDIR:
904 		if (com == FIONREAD) {
905 			error = VOP_GETATTR(vp, &vattr);
906 			if (error)
907 				break;
908 			size = vattr.va_size;
909 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
910 				size -= vn_poll_fpf_offset(fp);
911 			if (size > 0x7FFFFFFF)
912 				size = 0x7FFFFFFF;
913 			*(int *)data = size;
914 			error = 0;
915 			break;
916 		}
917 		if (com == FIOASYNC) {				/* XXX */
918 			error = 0;				/* XXX */
919 			break;
920 		}
921 		/* fall into ... */
922 	default:
923 #if 0
924 		return (ENOTTY);
925 #endif
926 	case VFIFO:
927 	case VCHR:
928 	case VBLK:
929 		if (com == FIODTYPE) {
930 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
931 				error = ENOTTY;
932 				break;
933 			}
934 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
935 			error = 0;
936 			break;
937 		}
938 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
939 		if (error == 0 && com == TIOCSCTTY) {
940 			struct proc *p = curthread->td_proc;
941 			struct session *sess;
942 
943 			if (p == NULL) {
944 				error = ENOTTY;
945 				break;
946 			}
947 
948 			sess = p->p_session;
949 			/* Do nothing if reassigning same control tty */
950 			if (sess->s_ttyvp == vp) {
951 				error = 0;
952 				break;
953 			}
954 
955 			/* Get rid of reference to old control tty */
956 			ovp = sess->s_ttyvp;
957 			vref(vp);
958 			sess->s_ttyvp = vp;
959 			if (ovp)
960 				vrele(ovp);
961 		}
962 		break;
963 	}
964 	rel_mplock();
965 	return (error);
966 }
967 
968 /*
969  * MPALMOSTSAFE - acquires mplock
970  */
971 static int
972 vn_poll(struct file *fp, int events, struct ucred *cred)
973 {
974 	int error;
975 
976 	get_mplock();
977 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
978 	rel_mplock();
979 	return (error);
980 }
981 
982 /*
983  * Check that the vnode is still valid, and if so
984  * acquire requested lock.
985  */
986 int
987 #ifndef	DEBUG_LOCKS
988 vn_lock(struct vnode *vp, int flags)
989 #else
990 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
991 #endif
992 {
993 	int error;
994 
995 	do {
996 #ifdef	DEBUG_LOCKS
997 		vp->filename = filename;
998 		vp->line = line;
999 		error = debuglockmgr(&vp->v_lock, flags,
1000 				     "vn_lock", filename, line);
1001 #else
1002 		error = lockmgr(&vp->v_lock, flags);
1003 #endif
1004 		if (error == 0)
1005 			break;
1006 	} while (flags & LK_RETRY);
1007 
1008 	/*
1009 	 * Because we (had better!) have a ref on the vnode, once it
1010 	 * goes to VRECLAIMED state it will not be recycled until all
1011 	 * refs go away.  So we can just check the flag.
1012 	 */
1013 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1014 		lockmgr(&vp->v_lock, LK_RELEASE);
1015 		error = ENOENT;
1016 	}
1017 	return (error);
1018 }
1019 
1020 void
1021 vn_unlock(struct vnode *vp)
1022 {
1023 	lockmgr(&vp->v_lock, LK_RELEASE);
1024 }
1025 
1026 int
1027 vn_islocked(struct vnode *vp)
1028 {
1029 	return (lockstatus(&vp->v_lock, curthread));
1030 }
1031 
1032 /*
1033  * MPALMOSTSAFE - acquires mplock
1034  */
1035 static int
1036 vn_closefile(struct file *fp)
1037 {
1038 	int error;
1039 
1040 	get_mplock();
1041 	fp->f_ops = &badfileops;
1042 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1043 	rel_mplock();
1044 	return (error);
1045 }
1046 
1047 /*
1048  * MPALMOSTSAFE - acquires mplock
1049  */
1050 static int
1051 vn_kqfilter(struct file *fp, struct knote *kn)
1052 {
1053 	int error;
1054 
1055 	get_mplock();
1056 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1057 	rel_mplock();
1058 	return (error);
1059 }
1060