xref: /dragonfly/sys/kern/vfs_vnops.c (revision f02303f9)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.49 2006/10/27 04:56:31 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 		struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_shutdown = nofo_shutdown
82 };
83 
84 struct fileops specvnode_fileops = {
85 	.fo_read = svn_read,
86 	.fo_write = svn_write,
87 	.fo_ioctl = vn_ioctl,
88 	.fo_poll = vn_poll,
89 	.fo_kqfilter = vn_kqfilter,
90 	.fo_stat = vn_statfile,
91 	.fo_close = vn_closefile,
92 	.fo_shutdown = nofo_shutdown
93 };
94 
95 /*
96  * Shortcut the device read/write.  This avoids a lot of vnode junk.
97  * Basically the specfs vnops for read and write take the locked vnode,
98  * unlock it (because we can't hold the vnode locked while reading or writing
99  * a device which may block indefinitely), issues the device operation, then
100  * relock the vnode before returning, plus other junk.  This bypasses all
101  * of that and just does the device operation.
102  */
103 void
104 vn_setspecops(struct file *fp)
105 {
106 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 		fp->f_ops = &specvnode_fileops;
108 	}
109 }
110 
111 /*
112  * Common code for vnode open operations.  Check permissions, and call
113  * the VOP_NOPEN or VOP_NCREATE routine.
114  *
115  * The caller is responsible for setting up nd with nlookup_init() and
116  * for cleaning it up with nlookup_done(), whether we return an error
117  * or not.
118  *
119  * On success nd->nl_open_vp will hold a referenced and, if requested,
120  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
121  * is non-NULL the vnode will be installed in the file pointer.
122  *
123  * NOTE: The vnode is referenced just once on return whether or not it
124  * is also installed in the file pointer.
125  */
126 int
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
128 {
129 	struct vnode *vp;
130 	struct ucred *cred = nd->nl_cred;
131 	struct vattr vat;
132 	struct vattr *vap = &vat;
133 	int mode, error;
134 
135 	/*
136 	 * Lookup the path and create or obtain the vnode.  After a
137 	 * successful lookup a locked nd->nl_nch will be returned.
138 	 *
139 	 * The result of this section should be a locked vnode.
140 	 *
141 	 * XXX with only a little work we should be able to avoid locking
142 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
143 	 */
144 	if (fmode & O_CREAT) {
145 		/*
146 		 * CONDITIONAL CREATE FILE CASE
147 		 *
148 		 * Setting NLC_CREATE causes a negative hit to store
149 		 * the negative hit ncp and not return an error.  Then
150 		 * nc_error or nc_vp may be checked to see if the ncp
151 		 * represents a negative hit.  NLC_CREATE also requires
152 		 * write permission on the governing directory or EPERM
153 		 * is returned.
154 		 */
155 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
156 			nd->nl_flags |= NLC_FOLLOW;
157 		nd->nl_flags |= NLC_CREATE;
158 		bwillwrite();
159 		error = nlookup(nd);
160 	} else {
161 		/*
162 		 * NORMAL OPEN FILE CASE
163 		 */
164 		error = nlookup(nd);
165 	}
166 
167 	if (error)
168 		return (error);
169 
170 	/*
171 	 * split case to allow us to re-resolve and retry the ncp in case
172 	 * we get ESTALE.
173 	 */
174 again:
175 	if (fmode & O_CREAT) {
176 		if (nd->nl_nch.ncp->nc_vp == NULL) {
177 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
178 				return (error);
179 			VATTR_NULL(vap);
180 			vap->va_type = VREG;
181 			vap->va_mode = cmode;
182 			if (fmode & O_EXCL)
183 				vap->va_vaflags |= VA_EXCLUSIVE;
184 			error = VOP_NCREATE(&nd->nl_nch, &vp, nd->nl_cred, vap);
185 			if (error)
186 				return (error);
187 			fmode &= ~O_TRUNC;
188 			/* locked vnode is returned */
189 		} else {
190 			if (fmode & O_EXCL) {
191 				error = EEXIST;
192 			} else {
193 				error = cache_vget(&nd->nl_nch, cred,
194 						    LK_EXCLUSIVE, &vp);
195 			}
196 			if (error)
197 				return (error);
198 			fmode &= ~O_CREAT;
199 		}
200 	} else {
201 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
202 		if (error)
203 			return (error);
204 	}
205 
206 	/*
207 	 * We have a locked vnode and ncp now.  Note that the ncp will
208 	 * be cleaned up by the caller if nd->nl_nch is left intact.
209 	 */
210 	if (vp->v_type == VLNK) {
211 		error = EMLINK;
212 		goto bad;
213 	}
214 	if (vp->v_type == VSOCK) {
215 		error = EOPNOTSUPP;
216 		goto bad;
217 	}
218 	if ((fmode & O_CREAT) == 0) {
219 		mode = 0;
220 		if (fmode & (FWRITE | O_TRUNC)) {
221 			if (vp->v_type == VDIR) {
222 				error = EISDIR;
223 				goto bad;
224 			}
225 			error = vn_writechk(vp, &nd->nl_nch);
226 			if (error) {
227 				/*
228 				 * Special stale handling, re-resolve the
229 				 * vnode.
230 				 */
231 				if (error == ESTALE) {
232 					vput(vp);
233 					vp = NULL;
234 					cache_setunresolved(&nd->nl_nch);
235 					error = cache_resolve(&nd->nl_nch, cred);
236 					if (error == 0)
237 						goto again;
238 				}
239 				goto bad;
240 			}
241 			mode |= VWRITE;
242 		}
243 		if (fmode & FREAD)
244 			mode |= VREAD;
245 		if (mode) {
246 		        error = VOP_ACCESS(vp, mode, cred);
247 			if (error) {
248 				/*
249 				 * Special stale handling, re-resolve the
250 				 * vnode.
251 				 */
252 				if (error == ESTALE) {
253 					vput(vp);
254 					vp = NULL;
255 					cache_setunresolved(&nd->nl_nch);
256 					error = cache_resolve(&nd->nl_nch, cred);
257 					if (error == 0)
258 						goto again;
259 				}
260 				goto bad;
261 			}
262 		}
263 	}
264 	if (fmode & O_TRUNC) {
265 		vn_unlock(vp);				/* XXX */
266 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
267 		VATTR_NULL(vap);
268 		vap->va_size = 0;
269 		error = VOP_SETATTR(vp, vap, cred);
270 		if (error)
271 			goto bad;
272 	}
273 
274 	/*
275 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
276 	 * associated with the fp yet so we own it clean.
277 	 *
278 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
279 	 * directories but now we do it unconditionally so f*() ops
280 	 * such as fchmod() can access the actual namespace that was
281 	 * used to open the file.
282 	 */
283 	if (fp) {
284 		fp->f_nchandle = nd->nl_nch;
285 		cache_zero(&nd->nl_nch);
286 		cache_unlock(&fp->f_nchandle);
287 	}
288 
289 	/*
290 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
291 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
292 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
293 	 * on /dev/ttyd0
294 	 */
295 	if (nd->nl_nch.ncp)
296 		cache_put(&nd->nl_nch);
297 
298 	error = VOP_OPEN(vp, fmode, cred, fp);
299 	if (error) {
300 		/*
301 		 * setting f_ops to &badfileops will prevent the descriptor
302 		 * code from trying to close and release the vnode, since
303 		 * the open failed we do not want to call close.
304 		 */
305 		if (fp) {
306 			fp->f_data = NULL;
307 			fp->f_ops = &badfileops;
308 		}
309 		goto bad;
310 	}
311 
312 #if 0
313 	/*
314 	 * Assert that VREG files have been setup for vmio.
315 	 */
316 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
317 		("vn_open: regular file was not VMIO enabled!"));
318 #endif
319 
320 	/*
321 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
322 	 * only returned in the fp == NULL case.
323 	 */
324 	if (fp == NULL) {
325 		nd->nl_open_vp = vp;
326 		nd->nl_vp_fmode = fmode;
327 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
328 			vn_unlock(vp);
329 	} else {
330 		vput(vp);
331 	}
332 	return (0);
333 bad:
334 	if (vp)
335 		vput(vp);
336 	return (error);
337 }
338 
339 /*
340  * Check for write permissions on the specified vnode.  nch may be NULL.
341  */
342 int
343 vn_writechk(struct vnode *vp, struct nchandle *nch)
344 {
345 	/*
346 	 * If there's shared text associated with
347 	 * the vnode, try to free it up once.  If
348 	 * we fail, we can't allow writing.
349 	 */
350 	if (vp->v_flag & VTEXT)
351 		return (ETXTBSY);
352 
353 	/*
354 	 * If the vnode represents a regular file, check the mount
355 	 * point via the nch.  This may be a different mount point
356 	 * then the one embedded in the vnode (e.g. nullfs).
357 	 *
358 	 * We can still write to non-regular files (e.g. devices)
359 	 * via read-only mounts.
360 	 */
361 	if (nch && nch->ncp && vp->v_type == VREG)
362 		return (ncp_writechk(nch));
363 	return (0);
364 }
365 
366 /*
367  * Check whether the underlying mount is read-only.  The mount point
368  * referenced by the namecache may be different from the mount point
369  * used by the underlying vnode in the case of NULLFS, so a separate
370  * check is needed.
371  */
372 int
373 ncp_writechk(struct nchandle *nch)
374 {
375 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
376 		return (EROFS);
377 	return(0);
378 }
379 
380 /*
381  * Vnode close call
382  */
383 int
384 vn_close(struct vnode *vp, int flags)
385 {
386 	int error;
387 
388 	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
389 		error = VOP_CLOSE(vp, flags);
390 		vn_unlock(vp);
391 	}
392 	vrele(vp);
393 	return (error);
394 }
395 
396 static __inline
397 int
398 sequential_heuristic(struct uio *uio, struct file *fp)
399 {
400 	/*
401 	 * Sequential heuristic - detect sequential operation
402 	 */
403 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
404 	    uio->uio_offset == fp->f_nextoff) {
405 		int tmpseq = fp->f_seqcount;
406 		/*
407 		 * XXX we assume that the filesystem block size is
408 		 * the default.  Not true, but still gives us a pretty
409 		 * good indicator of how sequential the read operations
410 		 * are.
411 		 */
412 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
413 		if (tmpseq > IO_SEQMAX)
414 			tmpseq = IO_SEQMAX;
415 		fp->f_seqcount = tmpseq;
416 		return(fp->f_seqcount << IO_SEQSHIFT);
417 	}
418 
419 	/*
420 	 * Not sequential, quick draw-down of seqcount
421 	 */
422 	if (fp->f_seqcount > 1)
423 		fp->f_seqcount = 1;
424 	else
425 		fp->f_seqcount = 0;
426 	return(0);
427 }
428 
429 /*
430  * Package up an I/O request on a vnode into a uio and do it.
431  */
432 int
433 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
434 	off_t offset, enum uio_seg segflg, int ioflg,
435 	struct ucred *cred, int *aresid)
436 {
437 	struct uio auio;
438 	struct iovec aiov;
439 	struct ccms_lock ccms_lock;
440 	int error;
441 
442 	if ((ioflg & IO_NODELOCKED) == 0)
443 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
444 	auio.uio_iov = &aiov;
445 	auio.uio_iovcnt = 1;
446 	aiov.iov_base = base;
447 	aiov.iov_len = len;
448 	auio.uio_resid = len;
449 	auio.uio_offset = offset;
450 	auio.uio_segflg = segflg;
451 	auio.uio_rw = rw;
452 	auio.uio_td = curthread;
453 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
454 	if (rw == UIO_READ) {
455 		error = VOP_READ(vp, &auio, ioflg, cred);
456 	} else {
457 		error = VOP_WRITE(vp, &auio, ioflg, cred);
458 	}
459 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
460 	if (aresid)
461 		*aresid = auio.uio_resid;
462 	else
463 		if (auio.uio_resid && error == 0)
464 			error = EIO;
465 	if ((ioflg & IO_NODELOCKED) == 0)
466 		vn_unlock(vp);
467 	return (error);
468 }
469 
470 /*
471  * Package up an I/O request on a vnode into a uio and do it.  The I/O
472  * request is split up into smaller chunks and we try to avoid saturating
473  * the buffer cache while potentially holding a vnode locked, so we
474  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
475  * to give other processes a chance to lock the vnode (either other processes
476  * core'ing the same binary, or unrelated processes scanning the directory).
477  */
478 int
479 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
480 		 off_t offset, enum uio_seg segflg, int ioflg,
481 		 struct ucred *cred, int *aresid)
482 {
483 	int error = 0;
484 
485 	do {
486 		int chunk;
487 
488 		/*
489 		 * Force `offset' to a multiple of MAXBSIZE except possibly
490 		 * for the first chunk, so that filesystems only need to
491 		 * write full blocks except possibly for the first and last
492 		 * chunks.
493 		 */
494 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
495 
496 		if (chunk > len)
497 			chunk = len;
498 		if (rw != UIO_READ && vp->v_type == VREG)
499 			bwillwrite();
500 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
501 			    ioflg, cred, aresid);
502 		len -= chunk;	/* aresid calc already includes length */
503 		if (error)
504 			break;
505 		offset += chunk;
506 		base += chunk;
507 		uio_yield();
508 	} while (len);
509 	if (aresid)
510 		*aresid += len;
511 	return (error);
512 }
513 
514 /*
515  * MPALMOSTSAFE - acquires mplock
516  */
517 static int
518 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
519 {
520 	struct ccms_lock ccms_lock;
521 	struct vnode *vp;
522 	int error, ioflag;
523 
524 	get_mplock();
525 	KASSERT(uio->uio_td == curthread,
526 		("uio_td %p is not td %p", uio->uio_td, curthread));
527 	vp = (struct vnode *)fp->f_data;
528 
529 	ioflag = 0;
530 	if (flags & O_FBLOCKING) {
531 		/* ioflag &= ~IO_NDELAY; */
532 	} else if (flags & O_FNONBLOCKING) {
533 		ioflag |= IO_NDELAY;
534 	} else if (fp->f_flag & FNONBLOCK) {
535 		ioflag |= IO_NDELAY;
536 	}
537 	if (flags & O_FBUFFERED) {
538 		/* ioflag &= ~IO_DIRECT; */
539 	} else if (flags & O_FUNBUFFERED) {
540 		ioflag |= IO_DIRECT;
541 	} else if (fp->f_flag & O_DIRECT) {
542 		ioflag |= IO_DIRECT;
543 	}
544 	vn_lock(vp, LK_SHARED | LK_RETRY);
545 	if ((flags & O_FOFFSET) == 0)
546 		uio->uio_offset = fp->f_offset;
547 	ioflag |= sequential_heuristic(uio, fp);
548 
549 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
550 	error = VOP_READ(vp, uio, ioflag, cred);
551 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
552 	if ((flags & O_FOFFSET) == 0)
553 		fp->f_offset = uio->uio_offset;
554 	fp->f_nextoff = uio->uio_offset;
555 	vn_unlock(vp);
556 	rel_mplock();
557 	return (error);
558 }
559 
560 /*
561  * Device-optimized file table vnode read routine.
562  *
563  * This bypasses the VOP table and talks directly to the device.  Most
564  * filesystems just route to specfs and can make this optimization.
565  *
566  * MPALMOSTSAFE - acquires mplock
567  */
568 static int
569 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
570 {
571 	struct vnode *vp;
572 	int ioflag;
573 	int error;
574 	cdev_t dev;
575 
576 	get_mplock();
577 	KASSERT(uio->uio_td == curthread,
578 		("uio_td %p is not td %p", uio->uio_td, curthread));
579 
580 	vp = (struct vnode *)fp->f_data;
581 	if (vp == NULL || vp->v_type == VBAD) {
582 		error = EBADF;
583 		goto done;
584 	}
585 
586 	if ((dev = vp->v_rdev) == NULL) {
587 		error = EBADF;
588 		goto done;
589 	}
590 	reference_dev(dev);
591 
592 	if (uio->uio_resid == 0) {
593 		error = 0;
594 		goto done;
595 	}
596 	if ((flags & O_FOFFSET) == 0)
597 		uio->uio_offset = fp->f_offset;
598 
599 	ioflag = 0;
600 	if (flags & O_FBLOCKING) {
601 		/* ioflag &= ~IO_NDELAY; */
602 	} else if (flags & O_FNONBLOCKING) {
603 		ioflag |= IO_NDELAY;
604 	} else if (fp->f_flag & FNONBLOCK) {
605 		ioflag |= IO_NDELAY;
606 	}
607 	if (flags & O_FBUFFERED) {
608 		/* ioflag &= ~IO_DIRECT; */
609 	} else if (flags & O_FUNBUFFERED) {
610 		ioflag |= IO_DIRECT;
611 	} else if (fp->f_flag & O_DIRECT) {
612 		ioflag |= IO_DIRECT;
613 	}
614 	ioflag |= sequential_heuristic(uio, fp);
615 
616 	error = dev_dread(dev, uio, ioflag);
617 
618 	release_dev(dev);
619 	if ((flags & O_FOFFSET) == 0)
620 		fp->f_offset = uio->uio_offset;
621 	fp->f_nextoff = uio->uio_offset;
622 done:
623 	rel_mplock();
624 	return (error);
625 }
626 
627 /*
628  * MPALMOSTSAFE - acquires mplock
629  */
630 static int
631 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
632 {
633 	struct ccms_lock ccms_lock;
634 	struct vnode *vp;
635 	int error, ioflag;
636 
637 	get_mplock();
638 	KASSERT(uio->uio_td == curthread,
639 		("uio_procp %p is not p %p", uio->uio_td, curthread));
640 	vp = (struct vnode *)fp->f_data;
641 	if (vp->v_type == VREG)
642 		bwillwrite();
643 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
644 
645 	ioflag = IO_UNIT;
646 	if (vp->v_type == VREG &&
647 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
648 		ioflag |= IO_APPEND;
649 	}
650 
651 	if (flags & O_FBLOCKING) {
652 		/* ioflag &= ~IO_NDELAY; */
653 	} else if (flags & O_FNONBLOCKING) {
654 		ioflag |= IO_NDELAY;
655 	} else if (fp->f_flag & FNONBLOCK) {
656 		ioflag |= IO_NDELAY;
657 	}
658 	if (flags & O_FBUFFERED) {
659 		/* ioflag &= ~IO_DIRECT; */
660 	} else if (flags & O_FUNBUFFERED) {
661 		ioflag |= IO_DIRECT;
662 	} else if (fp->f_flag & O_DIRECT) {
663 		ioflag |= IO_DIRECT;
664 	}
665 	if (flags & O_FASYNCWRITE) {
666 		/* ioflag &= ~IO_SYNC; */
667 	} else if (flags & O_FSYNCWRITE) {
668 		ioflag |= IO_SYNC;
669 	} else if (fp->f_flag & O_FSYNC) {
670 		ioflag |= IO_SYNC;
671 	}
672 
673 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
674 		ioflag |= IO_SYNC;
675 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
676 	if ((flags & O_FOFFSET) == 0)
677 		uio->uio_offset = fp->f_offset;
678 	ioflag |= sequential_heuristic(uio, fp);
679 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
680 	error = VOP_WRITE(vp, uio, ioflag, cred);
681 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
682 	if ((flags & O_FOFFSET) == 0)
683 		fp->f_offset = uio->uio_offset;
684 	fp->f_nextoff = uio->uio_offset;
685 	vn_unlock(vp);
686 	rel_mplock();
687 	return (error);
688 }
689 
690 /*
691  * Device-optimized file table vnode write routine.
692  *
693  * This bypasses the VOP table and talks directly to the device.  Most
694  * filesystems just route to specfs and can make this optimization.
695  *
696  * MPALMOSTSAFE - acquires mplock
697  */
698 static int
699 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
700 {
701 	struct vnode *vp;
702 	int ioflag;
703 	int error;
704 	cdev_t dev;
705 
706 	get_mplock();
707 	KASSERT(uio->uio_td == curthread,
708 		("uio_procp %p is not p %p", uio->uio_td, curthread));
709 
710 	vp = (struct vnode *)fp->f_data;
711 	if (vp == NULL || vp->v_type == VBAD) {
712 		error = EBADF;
713 		goto done;
714 	}
715 	if (vp->v_type == VREG)
716 		bwillwrite();
717 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
718 
719 	if ((dev = vp->v_rdev) == NULL) {
720 		error = EBADF;
721 		goto done;
722 	}
723 	reference_dev(dev);
724 
725 	if ((flags & O_FOFFSET) == 0)
726 		uio->uio_offset = fp->f_offset;
727 
728 	ioflag = IO_UNIT;
729 	if (vp->v_type == VREG &&
730 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
731 		ioflag |= IO_APPEND;
732 	}
733 
734 	if (flags & O_FBLOCKING) {
735 		/* ioflag &= ~IO_NDELAY; */
736 	} else if (flags & O_FNONBLOCKING) {
737 		ioflag |= IO_NDELAY;
738 	} else if (fp->f_flag & FNONBLOCK) {
739 		ioflag |= IO_NDELAY;
740 	}
741 	if (flags & O_FBUFFERED) {
742 		/* ioflag &= ~IO_DIRECT; */
743 	} else if (flags & O_FUNBUFFERED) {
744 		ioflag |= IO_DIRECT;
745 	} else if (fp->f_flag & O_DIRECT) {
746 		ioflag |= IO_DIRECT;
747 	}
748 	if (flags & O_FASYNCWRITE) {
749 		/* ioflag &= ~IO_SYNC; */
750 	} else if (flags & O_FSYNCWRITE) {
751 		ioflag |= IO_SYNC;
752 	} else if (fp->f_flag & O_FSYNC) {
753 		ioflag |= IO_SYNC;
754 	}
755 
756 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
757 		ioflag |= IO_SYNC;
758 	ioflag |= sequential_heuristic(uio, fp);
759 
760 	error = dev_dwrite(dev, uio, ioflag);
761 
762 	release_dev(dev);
763 	if ((flags & O_FOFFSET) == 0)
764 		fp->f_offset = uio->uio_offset;
765 	fp->f_nextoff = uio->uio_offset;
766 done:
767 	rel_mplock();
768 	return (error);
769 }
770 
771 /*
772  * MPALMOSTSAFE - acquires mplock
773  */
774 static int
775 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
776 {
777 	struct vnode *vp;
778 	int error;
779 
780 	get_mplock();
781 	vp = (struct vnode *)fp->f_data;
782 	error = vn_stat(vp, sb, cred);
783 	rel_mplock();
784 	return (error);
785 }
786 
787 int
788 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
789 {
790 	struct vattr vattr;
791 	struct vattr *vap;
792 	int error;
793 	u_short mode;
794 	cdev_t dev;
795 
796 	vap = &vattr;
797 	error = VOP_GETATTR(vp, vap);
798 	if (error)
799 		return (error);
800 
801 	/*
802 	 * Zero the spare stat fields
803 	 */
804 	sb->st_lspare = 0;
805 	sb->st_qspare = 0;
806 
807 	/*
808 	 * Copy from vattr table
809 	 */
810 	if (vap->va_fsid != VNOVAL)
811 		sb->st_dev = vap->va_fsid;
812 	else
813 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
814 	sb->st_ino = vap->va_fileid;
815 	mode = vap->va_mode;
816 	switch (vap->va_type) {
817 	case VREG:
818 		mode |= S_IFREG;
819 		break;
820 	case VDIR:
821 		mode |= S_IFDIR;
822 		break;
823 	case VBLK:
824 		mode |= S_IFBLK;
825 		break;
826 	case VCHR:
827 		mode |= S_IFCHR;
828 		break;
829 	case VLNK:
830 		mode |= S_IFLNK;
831 		/* This is a cosmetic change, symlinks do not have a mode. */
832 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
833 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
834 		else
835 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
836 		break;
837 	case VSOCK:
838 		mode |= S_IFSOCK;
839 		break;
840 	case VFIFO:
841 		mode |= S_IFIFO;
842 		break;
843 	default:
844 		return (EBADF);
845 	};
846 	sb->st_mode = mode;
847 	sb->st_nlink = vap->va_nlink;
848 	sb->st_uid = vap->va_uid;
849 	sb->st_gid = vap->va_gid;
850 	sb->st_rdev = vap->va_rdev;
851 	sb->st_size = vap->va_size;
852 	sb->st_atimespec = vap->va_atime;
853 	sb->st_mtimespec = vap->va_mtime;
854 	sb->st_ctimespec = vap->va_ctime;
855 
856 	/*
857 	 * A VCHR and VBLK device may track the last access and last modified
858 	 * time independantly of the filesystem.  This is particularly true
859 	 * because device read and write calls may bypass the filesystem.
860 	 */
861 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
862 		if ((dev = vp->v_rdev) != NULL) {
863 			if (dev->si_lastread) {
864 				sb->st_atimespec.tv_sec = dev->si_lastread;
865 				sb->st_atimespec.tv_nsec = 0;
866 			}
867 			if (dev->si_lastwrite) {
868 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
869 				sb->st_atimespec.tv_nsec = 0;
870 			}
871 		}
872 	}
873 
874         /*
875 	 * According to www.opengroup.org, the meaning of st_blksize is
876 	 *   "a filesystem-specific preferred I/O block size for this
877 	 *    object.  In some filesystem types, this may vary from file
878 	 *    to file"
879 	 * Default to PAGE_SIZE after much discussion.
880 	 */
881 
882 	if (vap->va_type == VREG) {
883 		sb->st_blksize = vap->va_blocksize;
884 	} else if (vn_isdisk(vp, NULL)) {
885 		/*
886 		 * XXX this is broken.  If the device is not yet open (aka
887 		 * stat() call, aka v_rdev == NULL), how are we supposed
888 		 * to get a valid block size out of it?
889 		 */
890 		cdev_t dev;
891 
892 		if ((dev = vp->v_rdev) == NULL)
893 			dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
894 		sb->st_blksize = dev->si_bsize_best;
895 		if (sb->st_blksize < dev->si_bsize_phys)
896 			sb->st_blksize = dev->si_bsize_phys;
897 		if (sb->st_blksize < BLKDEV_IOSIZE)
898 			sb->st_blksize = BLKDEV_IOSIZE;
899 	} else {
900 		sb->st_blksize = PAGE_SIZE;
901 	}
902 
903 	sb->st_flags = vap->va_flags;
904 	if (suser_cred(cred, 0))
905 		sb->st_gen = 0;
906 	else
907 		sb->st_gen = vap->va_gen;
908 
909 #if (S_BLKSIZE == 512)
910 	/* Optimize this case */
911 	sb->st_blocks = vap->va_bytes >> 9;
912 #else
913 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
914 #endif
915 	sb->st_fsmid = vap->va_fsmid;
916 	return (0);
917 }
918 
919 /*
920  * MPALMOSTSAFE - acquires mplock
921  */
922 static int
923 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
924 {
925 	struct vnode *vp = ((struct vnode *)fp->f_data);
926 	struct vnode *ovp;
927 	struct vattr vattr;
928 	int error;
929 
930 	get_mplock();
931 
932 	switch (vp->v_type) {
933 	case VREG:
934 	case VDIR:
935 		if (com == FIONREAD) {
936 			if ((error = VOP_GETATTR(vp, &vattr)) != 0)
937 				break;
938 			*(int *)data = vattr.va_size - fp->f_offset;
939 			error = 0;
940 			break;
941 		}
942 		if (com == FIOASYNC) {				/* XXX */
943 			error = 0;				/* XXX */
944 			break;
945 		}
946 		/* fall into ... */
947 	default:
948 #if 0
949 		return (ENOTTY);
950 #endif
951 	case VFIFO:
952 	case VCHR:
953 	case VBLK:
954 		if (com == FIODTYPE) {
955 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
956 				error = ENOTTY;
957 				break;
958 			}
959 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
960 			error = 0;
961 			break;
962 		}
963 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
964 		if (error == 0 && com == TIOCSCTTY) {
965 			struct proc *p = curthread->td_proc;
966 			struct session *sess;
967 
968 			if (p == NULL) {
969 				error = ENOTTY;
970 				break;
971 			}
972 
973 			sess = p->p_session;
974 			/* Do nothing if reassigning same control tty */
975 			if (sess->s_ttyvp == vp) {
976 				error = 0;
977 				break;
978 			}
979 
980 			/* Get rid of reference to old control tty */
981 			ovp = sess->s_ttyvp;
982 			vref(vp);
983 			sess->s_ttyvp = vp;
984 			if (ovp)
985 				vrele(ovp);
986 		}
987 		break;
988 	}
989 	rel_mplock();
990 	return (error);
991 }
992 
993 /*
994  * MPALMOSTSAFE - acquires mplock
995  */
996 static int
997 vn_poll(struct file *fp, int events, struct ucred *cred)
998 {
999 	int error;
1000 
1001 	get_mplock();
1002 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1003 	rel_mplock();
1004 	return (error);
1005 }
1006 
1007 /*
1008  * Check that the vnode is still valid, and if so
1009  * acquire requested lock.
1010  */
1011 int
1012 #ifndef	DEBUG_LOCKS
1013 vn_lock(struct vnode *vp, int flags)
1014 #else
1015 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1016 #endif
1017 {
1018 	int error;
1019 
1020 	do {
1021 #ifdef	DEBUG_LOCKS
1022 		vp->filename = filename;
1023 		vp->line = line;
1024 		error = debuglockmgr(&vp->v_lock, flags,
1025 				     "vn_lock", filename, line);
1026 #else
1027 		error = lockmgr(&vp->v_lock, flags);
1028 #endif
1029 		if (error == 0)
1030 			break;
1031 	} while (flags & LK_RETRY);
1032 
1033 	/*
1034 	 * Because we (had better!) have a ref on the vnode, once it
1035 	 * goes to VRECLAIMED state it will not be recycled until all
1036 	 * refs go away.  So we can just check the flag.
1037 	 */
1038 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1039 		lockmgr(&vp->v_lock, LK_RELEASE);
1040 		error = ENOENT;
1041 	}
1042 	return (error);
1043 }
1044 
1045 void
1046 vn_unlock(struct vnode *vp)
1047 {
1048 	lockmgr(&vp->v_lock, LK_RELEASE);
1049 }
1050 
1051 int
1052 vn_islocked(struct vnode *vp)
1053 {
1054 	return (lockstatus(&vp->v_lock, curthread));
1055 }
1056 
1057 /*
1058  * MPALMOSTSAFE - acquires mplock
1059  */
1060 static int
1061 vn_closefile(struct file *fp)
1062 {
1063 	int error;
1064 
1065 	get_mplock();
1066 	fp->f_ops = &badfileops;
1067 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1068 	rel_mplock();
1069 	return(error);
1070 }
1071 
1072 /*
1073  * MPALMOSTSAFE - acquires mplock
1074  */
1075 static int
1076 vn_kqfilter(struct file *fp, struct knote *kn)
1077 {
1078 	int error;
1079 
1080 	get_mplock();
1081 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1082 	rel_mplock();
1083 	return (error);
1084 }
1085