xref: /dragonfly/sys/kern/vfs_vnops.c (revision ce0e08e2)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 		struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_shutdown = nofo_shutdown
82 };
83 
84 struct fileops specvnode_fileops = {
85 	.fo_read = svn_read,
86 	.fo_write = svn_write,
87 	.fo_ioctl = vn_ioctl,
88 	.fo_poll = vn_poll,
89 	.fo_kqfilter = vn_kqfilter,
90 	.fo_stat = vn_statfile,
91 	.fo_close = vn_closefile,
92 	.fo_shutdown = nofo_shutdown
93 };
94 
95 /*
96  * Shortcut the device read/write.  This avoids a lot of vnode junk.
97  * Basically the specfs vnops for read and write take the locked vnode,
98  * unlock it (because we can't hold the vnode locked while reading or writing
99  * a device which may block indefinitely), issues the device operation, then
100  * relock the vnode before returning, plus other junk.  This bypasses all
101  * of that and just does the device operation.
102  */
103 void
104 vn_setspecops(struct file *fp)
105 {
106 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 		fp->f_ops = &specvnode_fileops;
108 	}
109 }
110 
111 /*
112  * Common code for vnode open operations.  Check permissions, and call
113  * the VOP_NOPEN or VOP_NCREATE routine.
114  *
115  * The caller is responsible for setting up nd with nlookup_init() and
116  * for cleaning it up with nlookup_done(), whether we return an error
117  * or not.
118  *
119  * On success nd->nl_open_vp will hold a referenced and, if requested,
120  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
121  * is non-NULL the vnode will be installed in the file pointer.
122  *
123  * NOTE: The vnode is referenced just once on return whether or not it
124  * is also installed in the file pointer.
125  */
126 int
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
128 {
129 	struct vnode *vp;
130 	struct ucred *cred = nd->nl_cred;
131 	struct vattr vat;
132 	struct vattr *vap = &vat;
133 	int mode, error;
134 
135 	/*
136 	 * Lookup the path and create or obtain the vnode.  After a
137 	 * successful lookup a locked nd->nl_nch will be returned.
138 	 *
139 	 * The result of this section should be a locked vnode.
140 	 *
141 	 * XXX with only a little work we should be able to avoid locking
142 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
143 	 */
144 	if (fmode & O_CREAT) {
145 		/*
146 		 * CONDITIONAL CREATE FILE CASE
147 		 *
148 		 * Setting NLC_CREATE causes a negative hit to store
149 		 * the negative hit ncp and not return an error.  Then
150 		 * nc_error or nc_vp may be checked to see if the ncp
151 		 * represents a negative hit.  NLC_CREATE also requires
152 		 * write permission on the governing directory or EPERM
153 		 * is returned.
154 		 */
155 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
156 			nd->nl_flags |= NLC_FOLLOW;
157 		nd->nl_flags |= NLC_CREATE;
158 		nd->nl_flags |= NLC_REFDVP;
159 		bwillinode(1);
160 		error = nlookup(nd);
161 	} else {
162 		/*
163 		 * NORMAL OPEN FILE CASE
164 		 */
165 		error = nlookup(nd);
166 	}
167 
168 	if (error)
169 		return (error);
170 
171 	/*
172 	 * split case to allow us to re-resolve and retry the ncp in case
173 	 * we get ESTALE.
174 	 */
175 again:
176 	if (fmode & O_CREAT) {
177 		if (nd->nl_nch.ncp->nc_vp == NULL) {
178 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
179 				return (error);
180 			VATTR_NULL(vap);
181 			vap->va_type = VREG;
182 			vap->va_mode = cmode;
183 			if (fmode & O_EXCL)
184 				vap->va_vaflags |= VA_EXCLUSIVE;
185 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
186 					    nd->nl_cred, vap);
187 			if (error)
188 				return (error);
189 			fmode &= ~O_TRUNC;
190 			/* locked vnode is returned */
191 		} else {
192 			if (fmode & O_EXCL) {
193 				error = EEXIST;
194 			} else {
195 				error = cache_vget(&nd->nl_nch, cred,
196 						    LK_EXCLUSIVE, &vp);
197 			}
198 			if (error)
199 				return (error);
200 			fmode &= ~O_CREAT;
201 		}
202 	} else {
203 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
204 		if (error)
205 			return (error);
206 	}
207 
208 	/*
209 	 * We have a locked vnode and ncp now.  Note that the ncp will
210 	 * be cleaned up by the caller if nd->nl_nch is left intact.
211 	 */
212 	if (vp->v_type == VLNK) {
213 		error = EMLINK;
214 		goto bad;
215 	}
216 	if (vp->v_type == VSOCK) {
217 		error = EOPNOTSUPP;
218 		goto bad;
219 	}
220 	if ((fmode & O_CREAT) == 0) {
221 		mode = 0;
222 		if (fmode & (FWRITE | O_TRUNC)) {
223 			if (vp->v_type == VDIR) {
224 				error = EISDIR;
225 				goto bad;
226 			}
227 			error = vn_writechk(vp, &nd->nl_nch);
228 			if (error) {
229 				/*
230 				 * Special stale handling, re-resolve the
231 				 * vnode.
232 				 */
233 				if (error == ESTALE) {
234 					vput(vp);
235 					vp = NULL;
236 					cache_setunresolved(&nd->nl_nch);
237 					error = cache_resolve(&nd->nl_nch, cred);
238 					if (error == 0)
239 						goto again;
240 				}
241 				goto bad;
242 			}
243 			mode |= VWRITE;
244 		}
245 		if (fmode & FREAD)
246 			mode |= VREAD;
247 		if (mode) {
248 		        error = VOP_ACCESS(vp, mode, cred);
249 			if (error) {
250 				/*
251 				 * Special stale handling, re-resolve the
252 				 * vnode.
253 				 */
254 				if (error == ESTALE) {
255 					vput(vp);
256 					vp = NULL;
257 					cache_setunresolved(&nd->nl_nch);
258 					error = cache_resolve(&nd->nl_nch, cred);
259 					if (error == 0)
260 						goto again;
261 				}
262 				goto bad;
263 			}
264 		}
265 	}
266 	if (fmode & O_TRUNC) {
267 		vn_unlock(vp);				/* XXX */
268 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
269 		VATTR_NULL(vap);
270 		vap->va_size = 0;
271 		error = VOP_SETATTR(vp, vap, cred);
272 		if (error)
273 			goto bad;
274 	}
275 
276 	/*
277 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
278 	 * associated with the fp yet so we own it clean.
279 	 *
280 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
281 	 * directories but now we do it unconditionally so f*() ops
282 	 * such as fchmod() can access the actual namespace that was
283 	 * used to open the file.
284 	 */
285 	if (fp) {
286 		fp->f_nchandle = nd->nl_nch;
287 		cache_zero(&nd->nl_nch);
288 		cache_unlock(&fp->f_nchandle);
289 	}
290 
291 	/*
292 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
293 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
294 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
295 	 * on /dev/ttyd0
296 	 */
297 	if (nd->nl_nch.ncp)
298 		cache_put(&nd->nl_nch);
299 
300 	error = VOP_OPEN(vp, fmode, cred, fp);
301 	if (error) {
302 		/*
303 		 * setting f_ops to &badfileops will prevent the descriptor
304 		 * code from trying to close and release the vnode, since
305 		 * the open failed we do not want to call close.
306 		 */
307 		if (fp) {
308 			fp->f_data = NULL;
309 			fp->f_ops = &badfileops;
310 		}
311 		goto bad;
312 	}
313 
314 #if 0
315 	/*
316 	 * Assert that VREG files have been setup for vmio.
317 	 */
318 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
319 		("vn_open: regular file was not VMIO enabled!"));
320 #endif
321 
322 	/*
323 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
324 	 * only returned in the fp == NULL case.
325 	 */
326 	if (fp == NULL) {
327 		nd->nl_open_vp = vp;
328 		nd->nl_vp_fmode = fmode;
329 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
330 			vn_unlock(vp);
331 	} else {
332 		vput(vp);
333 	}
334 	return (0);
335 bad:
336 	if (vp)
337 		vput(vp);
338 	return (error);
339 }
340 
341 int
342 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
343 {
344 	struct vnode *vp;
345 	int error;
346 
347 	if (strncmp(devname, "/dev/", 5) == 0)
348 		devname += 5;
349 	if ((vp = getsynthvnode(devname)) == NULL) {
350 		error = ENODEV;
351 	} else {
352 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
353 		vn_unlock(vp);
354 		if (error) {
355 			vrele(vp);
356 			vp = NULL;
357 		}
358 	}
359 	*vpp = vp;
360 	return (error);
361 }
362 
363 /*
364  * Check for write permissions on the specified vnode.  nch may be NULL.
365  */
366 int
367 vn_writechk(struct vnode *vp, struct nchandle *nch)
368 {
369 	/*
370 	 * If there's shared text associated with
371 	 * the vnode, try to free it up once.  If
372 	 * we fail, we can't allow writing.
373 	 */
374 	if (vp->v_flag & VTEXT)
375 		return (ETXTBSY);
376 
377 	/*
378 	 * If the vnode represents a regular file, check the mount
379 	 * point via the nch.  This may be a different mount point
380 	 * then the one embedded in the vnode (e.g. nullfs).
381 	 *
382 	 * We can still write to non-regular files (e.g. devices)
383 	 * via read-only mounts.
384 	 */
385 	if (nch && nch->ncp && vp->v_type == VREG)
386 		return (ncp_writechk(nch));
387 	return (0);
388 }
389 
390 /*
391  * Check whether the underlying mount is read-only.  The mount point
392  * referenced by the namecache may be different from the mount point
393  * used by the underlying vnode in the case of NULLFS, so a separate
394  * check is needed.
395  */
396 int
397 ncp_writechk(struct nchandle *nch)
398 {
399 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
400 		return (EROFS);
401 	return(0);
402 }
403 
404 /*
405  * Vnode close call
406  */
407 int
408 vn_close(struct vnode *vp, int flags)
409 {
410 	int error;
411 
412 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
413 	if (error == 0) {
414 		error = VOP_CLOSE(vp, flags);
415 		vn_unlock(vp);
416 	}
417 	vrele(vp);
418 	return (error);
419 }
420 
421 static __inline
422 int
423 sequential_heuristic(struct uio *uio, struct file *fp)
424 {
425 	/*
426 	 * Sequential heuristic - detect sequential operation
427 	 */
428 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
429 	    uio->uio_offset == fp->f_nextoff) {
430 		int tmpseq = fp->f_seqcount;
431 		/*
432 		 * XXX we assume that the filesystem block size is
433 		 * the default.  Not true, but still gives us a pretty
434 		 * good indicator of how sequential the read operations
435 		 * are.
436 		 */
437 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
438 		if (tmpseq > IO_SEQMAX)
439 			tmpseq = IO_SEQMAX;
440 		fp->f_seqcount = tmpseq;
441 		return(fp->f_seqcount << IO_SEQSHIFT);
442 	}
443 
444 	/*
445 	 * Not sequential, quick draw-down of seqcount
446 	 */
447 	if (fp->f_seqcount > 1)
448 		fp->f_seqcount = 1;
449 	else
450 		fp->f_seqcount = 0;
451 	return(0);
452 }
453 
454 /*
455  * Package up an I/O request on a vnode into a uio and do it.
456  */
457 int
458 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
459 	off_t offset, enum uio_seg segflg, int ioflg,
460 	struct ucred *cred, int *aresid)
461 {
462 	struct uio auio;
463 	struct iovec aiov;
464 	struct ccms_lock ccms_lock;
465 	int error;
466 
467 	if ((ioflg & IO_NODELOCKED) == 0)
468 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
469 	auio.uio_iov = &aiov;
470 	auio.uio_iovcnt = 1;
471 	aiov.iov_base = base;
472 	aiov.iov_len = len;
473 	auio.uio_resid = len;
474 	auio.uio_offset = offset;
475 	auio.uio_segflg = segflg;
476 	auio.uio_rw = rw;
477 	auio.uio_td = curthread;
478 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
479 	if (rw == UIO_READ) {
480 		error = VOP_READ(vp, &auio, ioflg, cred);
481 	} else {
482 		error = VOP_WRITE(vp, &auio, ioflg, cred);
483 	}
484 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
485 	if (aresid)
486 		*aresid = auio.uio_resid;
487 	else
488 		if (auio.uio_resid && error == 0)
489 			error = EIO;
490 	if ((ioflg & IO_NODELOCKED) == 0)
491 		vn_unlock(vp);
492 	return (error);
493 }
494 
495 /*
496  * Package up an I/O request on a vnode into a uio and do it.  The I/O
497  * request is split up into smaller chunks and we try to avoid saturating
498  * the buffer cache while potentially holding a vnode locked, so we
499  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
500  * to give other processes a chance to lock the vnode (either other processes
501  * core'ing the same binary, or unrelated processes scanning the directory).
502  */
503 int
504 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
505 		 off_t offset, enum uio_seg segflg, int ioflg,
506 		 struct ucred *cred, int *aresid)
507 {
508 	int error = 0;
509 
510 	do {
511 		int chunk;
512 
513 		/*
514 		 * Force `offset' to a multiple of MAXBSIZE except possibly
515 		 * for the first chunk, so that filesystems only need to
516 		 * write full blocks except possibly for the first and last
517 		 * chunks.
518 		 */
519 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
520 
521 		if (chunk > len)
522 			chunk = len;
523 		if (vp->v_type == VREG) {
524 			switch(rw) {
525 			case UIO_READ:
526 				bwillread(chunk);
527 				break;
528 			case UIO_WRITE:
529 				bwillwrite(chunk);
530 				break;
531 			}
532 		}
533 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
534 			    ioflg, cred, aresid);
535 		len -= chunk;	/* aresid calc already includes length */
536 		if (error)
537 			break;
538 		offset += chunk;
539 		base += chunk;
540 		uio_yield();
541 	} while (len);
542 	if (aresid)
543 		*aresid += len;
544 	return (error);
545 }
546 
547 /*
548  * MPALMOSTSAFE - acquires mplock
549  */
550 static int
551 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
552 {
553 	struct ccms_lock ccms_lock;
554 	struct vnode *vp;
555 	int error, ioflag;
556 
557 	get_mplock();
558 	KASSERT(uio->uio_td == curthread,
559 		("uio_td %p is not td %p", uio->uio_td, curthread));
560 	vp = (struct vnode *)fp->f_data;
561 
562 	ioflag = 0;
563 	if (flags & O_FBLOCKING) {
564 		/* ioflag &= ~IO_NDELAY; */
565 	} else if (flags & O_FNONBLOCKING) {
566 		ioflag |= IO_NDELAY;
567 	} else if (fp->f_flag & FNONBLOCK) {
568 		ioflag |= IO_NDELAY;
569 	}
570 	if (flags & O_FBUFFERED) {
571 		/* ioflag &= ~IO_DIRECT; */
572 	} else if (flags & O_FUNBUFFERED) {
573 		ioflag |= IO_DIRECT;
574 	} else if (fp->f_flag & O_DIRECT) {
575 		ioflag |= IO_DIRECT;
576 	}
577 	vn_lock(vp, LK_SHARED | LK_RETRY);
578 	if ((flags & O_FOFFSET) == 0)
579 		uio->uio_offset = fp->f_offset;
580 	ioflag |= sequential_heuristic(uio, fp);
581 
582 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
583 	error = VOP_READ(vp, uio, ioflag, cred);
584 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
585 	if ((flags & O_FOFFSET) == 0)
586 		fp->f_offset = uio->uio_offset;
587 	fp->f_nextoff = uio->uio_offset;
588 	vn_unlock(vp);
589 	rel_mplock();
590 	return (error);
591 }
592 
593 /*
594  * Device-optimized file table vnode read routine.
595  *
596  * This bypasses the VOP table and talks directly to the device.  Most
597  * filesystems just route to specfs and can make this optimization.
598  *
599  * MPALMOSTSAFE - acquires mplock
600  */
601 static int
602 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
603 {
604 	struct vnode *vp;
605 	int ioflag;
606 	int error;
607 	cdev_t dev;
608 
609 	get_mplock();
610 	KASSERT(uio->uio_td == curthread,
611 		("uio_td %p is not td %p", uio->uio_td, curthread));
612 
613 	vp = (struct vnode *)fp->f_data;
614 	if (vp == NULL || vp->v_type == VBAD) {
615 		error = EBADF;
616 		goto done;
617 	}
618 
619 	if ((dev = vp->v_rdev) == NULL) {
620 		error = EBADF;
621 		goto done;
622 	}
623 	reference_dev(dev);
624 
625 	if (uio->uio_resid == 0) {
626 		error = 0;
627 		goto done;
628 	}
629 	if ((flags & O_FOFFSET) == 0)
630 		uio->uio_offset = fp->f_offset;
631 
632 	ioflag = 0;
633 	if (flags & O_FBLOCKING) {
634 		/* ioflag &= ~IO_NDELAY; */
635 	} else if (flags & O_FNONBLOCKING) {
636 		ioflag |= IO_NDELAY;
637 	} else if (fp->f_flag & FNONBLOCK) {
638 		ioflag |= IO_NDELAY;
639 	}
640 	if (flags & O_FBUFFERED) {
641 		/* ioflag &= ~IO_DIRECT; */
642 	} else if (flags & O_FUNBUFFERED) {
643 		ioflag |= IO_DIRECT;
644 	} else if (fp->f_flag & O_DIRECT) {
645 		ioflag |= IO_DIRECT;
646 	}
647 	ioflag |= sequential_heuristic(uio, fp);
648 
649 	error = dev_dread(dev, uio, ioflag);
650 
651 	release_dev(dev);
652 	if ((flags & O_FOFFSET) == 0)
653 		fp->f_offset = uio->uio_offset;
654 	fp->f_nextoff = uio->uio_offset;
655 done:
656 	rel_mplock();
657 	return (error);
658 }
659 
660 /*
661  * MPALMOSTSAFE - acquires mplock
662  */
663 static int
664 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
665 {
666 	struct ccms_lock ccms_lock;
667 	struct vnode *vp;
668 	int error, ioflag;
669 
670 	get_mplock();
671 	KASSERT(uio->uio_td == curthread,
672 		("uio_td %p is not p %p", uio->uio_td, curthread));
673 	vp = (struct vnode *)fp->f_data;
674 #if 0
675 	/* VOP_WRITE should handle this now */
676 	if (vp->v_type == VREG || vp->v_type == VDATABASE)
677 		bwillwrite();
678 #endif
679 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
680 
681 	ioflag = IO_UNIT;
682 	if (vp->v_type == VREG &&
683 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
684 		ioflag |= IO_APPEND;
685 	}
686 
687 	if (flags & O_FBLOCKING) {
688 		/* ioflag &= ~IO_NDELAY; */
689 	} else if (flags & O_FNONBLOCKING) {
690 		ioflag |= IO_NDELAY;
691 	} else if (fp->f_flag & FNONBLOCK) {
692 		ioflag |= IO_NDELAY;
693 	}
694 	if (flags & O_FBUFFERED) {
695 		/* ioflag &= ~IO_DIRECT; */
696 	} else if (flags & O_FUNBUFFERED) {
697 		ioflag |= IO_DIRECT;
698 	} else if (fp->f_flag & O_DIRECT) {
699 		ioflag |= IO_DIRECT;
700 	}
701 	if (flags & O_FASYNCWRITE) {
702 		/* ioflag &= ~IO_SYNC; */
703 	} else if (flags & O_FSYNCWRITE) {
704 		ioflag |= IO_SYNC;
705 	} else if (fp->f_flag & O_FSYNC) {
706 		ioflag |= IO_SYNC;
707 	}
708 
709 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
710 		ioflag |= IO_SYNC;
711 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
712 	if ((flags & O_FOFFSET) == 0)
713 		uio->uio_offset = fp->f_offset;
714 	ioflag |= sequential_heuristic(uio, fp);
715 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
716 	error = VOP_WRITE(vp, uio, ioflag, cred);
717 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
718 	if ((flags & O_FOFFSET) == 0)
719 		fp->f_offset = uio->uio_offset;
720 	fp->f_nextoff = uio->uio_offset;
721 	vn_unlock(vp);
722 	rel_mplock();
723 	return (error);
724 }
725 
726 /*
727  * Device-optimized file table vnode write routine.
728  *
729  * This bypasses the VOP table and talks directly to the device.  Most
730  * filesystems just route to specfs and can make this optimization.
731  *
732  * MPALMOSTSAFE - acquires mplock
733  */
734 static int
735 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
736 {
737 	struct vnode *vp;
738 	int ioflag;
739 	int error;
740 	cdev_t dev;
741 
742 	get_mplock();
743 	KASSERT(uio->uio_td == curthread,
744 		("uio_td %p is not p %p", uio->uio_td, curthread));
745 
746 	vp = (struct vnode *)fp->f_data;
747 	if (vp == NULL || vp->v_type == VBAD) {
748 		error = EBADF;
749 		goto done;
750 	}
751 	if (vp->v_type == VREG)
752 		bwillwrite(uio->uio_resid);
753 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
754 
755 	if ((dev = vp->v_rdev) == NULL) {
756 		error = EBADF;
757 		goto done;
758 	}
759 	reference_dev(dev);
760 
761 	if ((flags & O_FOFFSET) == 0)
762 		uio->uio_offset = fp->f_offset;
763 
764 	ioflag = IO_UNIT;
765 	if (vp->v_type == VREG &&
766 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
767 		ioflag |= IO_APPEND;
768 	}
769 
770 	if (flags & O_FBLOCKING) {
771 		/* ioflag &= ~IO_NDELAY; */
772 	} else if (flags & O_FNONBLOCKING) {
773 		ioflag |= IO_NDELAY;
774 	} else if (fp->f_flag & FNONBLOCK) {
775 		ioflag |= IO_NDELAY;
776 	}
777 	if (flags & O_FBUFFERED) {
778 		/* ioflag &= ~IO_DIRECT; */
779 	} else if (flags & O_FUNBUFFERED) {
780 		ioflag |= IO_DIRECT;
781 	} else if (fp->f_flag & O_DIRECT) {
782 		ioflag |= IO_DIRECT;
783 	}
784 	if (flags & O_FASYNCWRITE) {
785 		/* ioflag &= ~IO_SYNC; */
786 	} else if (flags & O_FSYNCWRITE) {
787 		ioflag |= IO_SYNC;
788 	} else if (fp->f_flag & O_FSYNC) {
789 		ioflag |= IO_SYNC;
790 	}
791 
792 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
793 		ioflag |= IO_SYNC;
794 	ioflag |= sequential_heuristic(uio, fp);
795 
796 	error = dev_dwrite(dev, uio, ioflag);
797 
798 	release_dev(dev);
799 	if ((flags & O_FOFFSET) == 0)
800 		fp->f_offset = uio->uio_offset;
801 	fp->f_nextoff = uio->uio_offset;
802 done:
803 	rel_mplock();
804 	return (error);
805 }
806 
807 /*
808  * MPALMOSTSAFE - acquires mplock
809  */
810 static int
811 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
812 {
813 	struct vnode *vp;
814 	int error;
815 
816 	get_mplock();
817 	vp = (struct vnode *)fp->f_data;
818 	error = vn_stat(vp, sb, cred);
819 	rel_mplock();
820 	return (error);
821 }
822 
823 int
824 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
825 {
826 	struct vattr vattr;
827 	struct vattr *vap;
828 	int error;
829 	u_short mode;
830 	cdev_t dev;
831 
832 	vap = &vattr;
833 	error = VOP_GETATTR(vp, vap);
834 	if (error)
835 		return (error);
836 
837 	/*
838 	 * Zero the spare stat fields
839 	 */
840 	sb->st_lspare = 0;
841 	sb->st_qspare = 0;
842 
843 	/*
844 	 * Copy from vattr table
845 	 */
846 	if (vap->va_fsid != VNOVAL)
847 		sb->st_dev = vap->va_fsid;
848 	else
849 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
850 	sb->st_ino = vap->va_fileid;
851 	mode = vap->va_mode;
852 	switch (vap->va_type) {
853 	case VREG:
854 		mode |= S_IFREG;
855 		break;
856 	case VDATABASE:
857 		mode |= S_IFDB;
858 		break;
859 	case VDIR:
860 		mode |= S_IFDIR;
861 		break;
862 	case VBLK:
863 		mode |= S_IFBLK;
864 		break;
865 	case VCHR:
866 		mode |= S_IFCHR;
867 		break;
868 	case VLNK:
869 		mode |= S_IFLNK;
870 		/* This is a cosmetic change, symlinks do not have a mode. */
871 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
872 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
873 		else
874 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
875 		break;
876 	case VSOCK:
877 		mode |= S_IFSOCK;
878 		break;
879 	case VFIFO:
880 		mode |= S_IFIFO;
881 		break;
882 	default:
883 		return (EBADF);
884 	}
885 	sb->st_mode = mode;
886 	if (vap->va_nlink > (nlink_t)-1)
887 		sb->st_nlink = (nlink_t)-1;
888 	else
889 		sb->st_nlink = vap->va_nlink;
890 	sb->st_uid = vap->va_uid;
891 	sb->st_gid = vap->va_gid;
892 	sb->st_rdev = makeudev(vap->va_rmajor, vap->va_rminor);
893 	sb->st_size = vap->va_size;
894 	sb->st_atimespec = vap->va_atime;
895 	sb->st_mtimespec = vap->va_mtime;
896 	sb->st_ctimespec = vap->va_ctime;
897 
898 	/*
899 	 * A VCHR and VBLK device may track the last access and last modified
900 	 * time independantly of the filesystem.  This is particularly true
901 	 * because device read and write calls may bypass the filesystem.
902 	 */
903 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
904 		dev = vp->v_rdev;
905 		if (dev != NULL) {
906 			if (dev->si_lastread) {
907 				sb->st_atimespec.tv_sec = dev->si_lastread;
908 				sb->st_atimespec.tv_nsec = 0;
909 			}
910 			if (dev->si_lastwrite) {
911 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
912 				sb->st_atimespec.tv_nsec = 0;
913 			}
914 		}
915 	}
916 
917         /*
918 	 * According to www.opengroup.org, the meaning of st_blksize is
919 	 *   "a filesystem-specific preferred I/O block size for this
920 	 *    object.  In some filesystem types, this may vary from file
921 	 *    to file"
922 	 * Default to PAGE_SIZE after much discussion.
923 	 */
924 
925 	if (vap->va_type == VREG) {
926 		sb->st_blksize = vap->va_blocksize;
927 	} else if (vn_isdisk(vp, NULL)) {
928 		/*
929 		 * XXX this is broken.  If the device is not yet open (aka
930 		 * stat() call, aka v_rdev == NULL), how are we supposed
931 		 * to get a valid block size out of it?
932 		 */
933 		dev = vp->v_rdev;
934 		if (dev == NULL && vp->v_type == VCHR) {
935 			dev = get_dev(vp->v_umajor, vp->v_uminor);
936 		}
937 		sb->st_blksize = dev->si_bsize_best;
938 		if (sb->st_blksize < dev->si_bsize_phys)
939 			sb->st_blksize = dev->si_bsize_phys;
940 		if (sb->st_blksize < BLKDEV_IOSIZE)
941 			sb->st_blksize = BLKDEV_IOSIZE;
942 	} else {
943 		sb->st_blksize = PAGE_SIZE;
944 	}
945 
946 	sb->st_flags = vap->va_flags;
947 	if (suser_cred(cred, 0))
948 		sb->st_gen = 0;
949 	else
950 		sb->st_gen = (u_int32_t)vap->va_gen;
951 
952 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
953 	sb->st_fsmid = vap->va_fsmid;
954 	return (0);
955 }
956 
957 /*
958  * MPALMOSTSAFE - acquires mplock
959  */
960 static int
961 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
962 {
963 	struct vnode *vp = ((struct vnode *)fp->f_data);
964 	struct vnode *ovp;
965 	struct vattr vattr;
966 	int error;
967 
968 	get_mplock();
969 
970 	switch (vp->v_type) {
971 	case VREG:
972 	case VDIR:
973 		if (com == FIONREAD) {
974 			error = VOP_GETATTR(vp, &vattr);
975 			if (error)
976 				break;
977 			*(int *)data = vattr.va_size - fp->f_offset;
978 			error = 0;
979 			break;
980 		}
981 		if (com == FIOASYNC) {				/* XXX */
982 			error = 0;				/* XXX */
983 			break;
984 		}
985 		/* fall into ... */
986 	default:
987 #if 0
988 		return (ENOTTY);
989 #endif
990 	case VFIFO:
991 	case VCHR:
992 	case VBLK:
993 		if (com == FIODTYPE) {
994 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
995 				error = ENOTTY;
996 				break;
997 			}
998 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
999 			error = 0;
1000 			break;
1001 		}
1002 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
1003 		if (error == 0 && com == TIOCSCTTY) {
1004 			struct proc *p = curthread->td_proc;
1005 			struct session *sess;
1006 
1007 			if (p == NULL) {
1008 				error = ENOTTY;
1009 				break;
1010 			}
1011 
1012 			sess = p->p_session;
1013 			/* Do nothing if reassigning same control tty */
1014 			if (sess->s_ttyvp == vp) {
1015 				error = 0;
1016 				break;
1017 			}
1018 
1019 			/* Get rid of reference to old control tty */
1020 			ovp = sess->s_ttyvp;
1021 			vref(vp);
1022 			sess->s_ttyvp = vp;
1023 			if (ovp)
1024 				vrele(ovp);
1025 		}
1026 		break;
1027 	}
1028 	rel_mplock();
1029 	return (error);
1030 }
1031 
1032 /*
1033  * MPALMOSTSAFE - acquires mplock
1034  */
1035 static int
1036 vn_poll(struct file *fp, int events, struct ucred *cred)
1037 {
1038 	int error;
1039 
1040 	get_mplock();
1041 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1042 	rel_mplock();
1043 	return (error);
1044 }
1045 
1046 /*
1047  * Check that the vnode is still valid, and if so
1048  * acquire requested lock.
1049  */
1050 int
1051 #ifndef	DEBUG_LOCKS
1052 vn_lock(struct vnode *vp, int flags)
1053 #else
1054 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1055 #endif
1056 {
1057 	int error;
1058 
1059 	do {
1060 #ifdef	DEBUG_LOCKS
1061 		vp->filename = filename;
1062 		vp->line = line;
1063 		error = debuglockmgr(&vp->v_lock, flags,
1064 				     "vn_lock", filename, line);
1065 #else
1066 		error = lockmgr(&vp->v_lock, flags);
1067 #endif
1068 		if (error == 0)
1069 			break;
1070 	} while (flags & LK_RETRY);
1071 
1072 	/*
1073 	 * Because we (had better!) have a ref on the vnode, once it
1074 	 * goes to VRECLAIMED state it will not be recycled until all
1075 	 * refs go away.  So we can just check the flag.
1076 	 */
1077 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1078 		lockmgr(&vp->v_lock, LK_RELEASE);
1079 		error = ENOENT;
1080 	}
1081 	return (error);
1082 }
1083 
1084 void
1085 vn_unlock(struct vnode *vp)
1086 {
1087 	lockmgr(&vp->v_lock, LK_RELEASE);
1088 }
1089 
1090 int
1091 vn_islocked(struct vnode *vp)
1092 {
1093 	return (lockstatus(&vp->v_lock, curthread));
1094 }
1095 
1096 /*
1097  * MPALMOSTSAFE - acquires mplock
1098  */
1099 static int
1100 vn_closefile(struct file *fp)
1101 {
1102 	int error;
1103 
1104 	get_mplock();
1105 	fp->f_ops = &badfileops;
1106 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1107 	rel_mplock();
1108 	return (error);
1109 }
1110 
1111 /*
1112  * MPALMOSTSAFE - acquires mplock
1113  */
1114 static int
1115 vn_kqfilter(struct file *fp, struct knote *kn)
1116 {
1117 	int error;
1118 
1119 	get_mplock();
1120 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1121 	rel_mplock();
1122 	return (error);
1123 }
1124