xref: /dragonfly/sys/kern/vfs_vnops.c (revision 685c703c)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.43 2006/08/08 03:52:40 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/nlookup.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct ucred *cred);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags);
63 static int svn_read (struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags);
65 static int vn_poll (struct file *fp, int events, struct ucred *cred);
66 static int vn_kqfilter (struct file *fp, struct knote *kn);
67 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
68 static int vn_write (struct file *fp, struct uio *uio,
69 		struct ucred *cred, int flags);
70 static int svn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_poll = vn_poll,
78 	.fo_kqfilter = vn_kqfilter,
79 	.fo_stat = vn_statfile,
80 	.fo_close = vn_closefile,
81 	.fo_shutdown = nofo_shutdown
82 };
83 
84 struct fileops specvnode_fileops = {
85 	.fo_read = svn_read,
86 	.fo_write = svn_write,
87 	.fo_ioctl = vn_ioctl,
88 	.fo_poll = vn_poll,
89 	.fo_kqfilter = vn_kqfilter,
90 	.fo_stat = vn_statfile,
91 	.fo_close = vn_closefile,
92 	.fo_shutdown = nofo_shutdown
93 };
94 
95 /*
96  * Shortcut the device read/write.  This avoids a lot of vnode junk.
97  * Basically the specfs vnops for read and write take the locked vnode,
98  * unlock it (because we can't hold the vnode locked while reading or writing
99  * a device which may block indefinitely), issues the device operation, then
100  * relock the vnode before returning, plus other junk.  This bypasses all
101  * of that and just does the device operation.
102  */
103 void
104 vn_setspecops(struct file *fp)
105 {
106 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
107 		fp->f_ops = &specvnode_fileops;
108 	}
109 }
110 
111 /*
112  * Common code for vnode open operations.  Check permissions, and call
113  * the VOP_NOPEN or VOP_NCREATE routine.
114  *
115  * The caller is responsible for setting up nd with nlookup_init() and
116  * for cleaning it up with nlookup_done(), whether we return an error
117  * or not.
118  *
119  * On success nd->nl_open_vp will hold a referenced and, if requested,
120  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
121  * is non-NULL the vnode will be installed in the file pointer.
122  *
123  * NOTE: The vnode is referenced just once on return whether or not it
124  * is also installed in the file pointer.
125  */
126 int
127 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
128 {
129 	struct vnode *vp;
130 	struct ucred *cred = nd->nl_cred;
131 	struct vattr vat;
132 	struct vattr *vap = &vat;
133 	struct namecache *ncp;
134 	int mode, error;
135 
136 	/*
137 	 * Lookup the path and create or obtain the vnode.  After a
138 	 * successful lookup a locked nd->nl_ncp will be returned.
139 	 *
140 	 * The result of this section should be a locked vnode.
141 	 *
142 	 * XXX with only a little work we should be able to avoid locking
143 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
144 	 */
145 	if (fmode & O_CREAT) {
146 		/*
147 		 * CONDITIONAL CREATE FILE CASE
148 		 *
149 		 * Setting NLC_CREATE causes a negative hit to store
150 		 * the negative hit ncp and not return an error.  Then
151 		 * nc_error or nc_vp may be checked to see if the ncp
152 		 * represents a negative hit.  NLC_CREATE also requires
153 		 * write permission on the governing directory or EPERM
154 		 * is returned.
155 		 */
156 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
157 			nd->nl_flags |= NLC_FOLLOW;
158 		nd->nl_flags |= NLC_CREATE;
159 		bwillwrite();
160 		error = nlookup(nd);
161 	} else {
162 		/*
163 		 * NORMAL OPEN FILE CASE
164 		 */
165 		error = nlookup(nd);
166 	}
167 
168 	if (error)
169 		return (error);
170 	ncp = nd->nl_ncp;
171 
172 	/*
173 	 * split case to allow us to re-resolve and retry the ncp in case
174 	 * we get ESTALE.
175 	 */
176 again:
177 	if (fmode & O_CREAT) {
178 		if (ncp->nc_vp == NULL) {
179 			VATTR_NULL(vap);
180 			vap->va_type = VREG;
181 			vap->va_mode = cmode;
182 			if (fmode & O_EXCL)
183 				vap->va_vaflags |= VA_EXCLUSIVE;
184 			error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
185 			if (error)
186 				return (error);
187 			fmode &= ~O_TRUNC;
188 			ASSERT_VOP_LOCKED(vp, "create");
189 			/* locked vnode is returned */
190 		} else {
191 			if (fmode & O_EXCL) {
192 				error = EEXIST;
193 			} else {
194 				error = cache_vget(ncp, cred,
195 						    LK_EXCLUSIVE, &vp);
196 			}
197 			if (error)
198 				return (error);
199 			fmode &= ~O_CREAT;
200 		}
201 	} else {
202 		error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
203 		if (error)
204 			return (error);
205 	}
206 
207 	/*
208 	 * We have a locked vnode and ncp now.  Note that the ncp will
209 	 * be cleaned up by the caller if nd->nl_ncp is left intact.
210 	 */
211 	if (vp->v_type == VLNK) {
212 		error = EMLINK;
213 		goto bad;
214 	}
215 	if (vp->v_type == VSOCK) {
216 		error = EOPNOTSUPP;
217 		goto bad;
218 	}
219 	if ((fmode & O_CREAT) == 0) {
220 		mode = 0;
221 		if (fmode & (FWRITE | O_TRUNC)) {
222 			if (vp->v_type == VDIR) {
223 				error = EISDIR;
224 				goto bad;
225 			}
226 			error = vn_writechk(vp);
227 			if (error) {
228 				/*
229 				 * Special stale handling, re-resolve the
230 				 * vnode.
231 				 */
232 				if (error == ESTALE) {
233 					vput(vp);
234 					vp = NULL;
235 					cache_setunresolved(ncp);
236 					error = cache_resolve(ncp, cred);
237 					if (error == 0)
238 						goto again;
239 				}
240 				goto bad;
241 			}
242 			mode |= VWRITE;
243 		}
244 		if (fmode & FREAD)
245 			mode |= VREAD;
246 		if (mode) {
247 		        error = VOP_ACCESS(vp, mode, cred);
248 			if (error) {
249 				/*
250 				 * Special stale handling, re-resolve the
251 				 * vnode.
252 				 */
253 				if (error == ESTALE) {
254 					vput(vp);
255 					vp = NULL;
256 					cache_setunresolved(ncp);
257 					error = cache_resolve(ncp, cred);
258 					if (error == 0)
259 						goto again;
260 				}
261 				goto bad;
262 			}
263 		}
264 	}
265 	if (fmode & O_TRUNC) {
266 		VOP_UNLOCK(vp, 0);			/* XXX */
267 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
268 		VATTR_NULL(vap);
269 		vap->va_size = 0;
270 		error = VOP_SETATTR(vp, vap, cred);
271 		if (error)
272 			goto bad;
273 	}
274 
275 	/*
276 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
277 	 * associated with the fp yet so we own it clean.  f_ncp inherits
278 	 * nl_ncp .
279 	 */
280 	if (fp) {
281 		if (vp->v_type == VDIR) {
282 			fp->f_ncp = nd->nl_ncp;
283 			nd->nl_ncp = NULL;
284 			cache_unlock(fp->f_ncp);
285 		}
286 	}
287 
288 	/*
289 	 * Get rid of nl_ncp.  vn_open does not return it (it returns the
290 	 * vnode or the file pointer).  Note: we can't leave nl_ncp locked
291 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
292 	 * on /dev/ttyd0
293 	 */
294 	if (nd->nl_ncp) {
295 		cache_put(nd->nl_ncp);
296 		nd->nl_ncp = NULL;
297 	}
298 
299 	error = VOP_OPEN(vp, fmode, cred, fp);
300 	if (error) {
301 		/*
302 		 * setting f_ops to &badfileops will prevent the descriptor
303 		 * code from trying to close and release the vnode, since
304 		 * the open failed we do not want to call close.
305 		 */
306 		if (fp) {
307 			fp->f_data = NULL;
308 			fp->f_ops = &badfileops;
309 		}
310 		goto bad;
311 	}
312 
313 #if 0
314 	/*
315 	 * Assert that VREG files have been setup for vmio.
316 	 */
317 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
318 		("vn_open: regular file was not VMIO enabled!"));
319 #endif
320 
321 	/*
322 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
323 	 * only returned in the fp == NULL case.
324 	 */
325 	if (fp == NULL) {
326 		nd->nl_open_vp = vp;
327 		nd->nl_vp_fmode = fmode;
328 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
329 			VOP_UNLOCK(vp, 0);
330 	} else {
331 		vput(vp);
332 	}
333 	return (0);
334 bad:
335 	if (vp)
336 		vput(vp);
337 	return (error);
338 }
339 
340 /*
341  * Check for write permissions on the specified vnode.
342  * Prototype text segments cannot be written.
343  */
344 int
345 vn_writechk(vp)
346 	struct vnode *vp;
347 {
348 
349 	/*
350 	 * If there's shared text associated with
351 	 * the vnode, try to free it up once.  If
352 	 * we fail, we can't allow writing.
353 	 */
354 	if (vp->v_flag & VTEXT)
355 		return (ETXTBSY);
356 	return (0);
357 }
358 
359 /*
360  * Vnode close call
361  */
362 int
363 vn_close(struct vnode *vp, int flags)
364 {
365 	int error;
366 
367 	if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
368 		error = VOP_CLOSE(vp, flags);
369 		VOP_UNLOCK(vp, 0);
370 	}
371 	vrele(vp);
372 	return (error);
373 }
374 
375 static __inline
376 int
377 sequential_heuristic(struct uio *uio, struct file *fp)
378 {
379 	/*
380 	 * Sequential heuristic - detect sequential operation
381 	 */
382 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
383 	    uio->uio_offset == fp->f_nextoff) {
384 		int tmpseq = fp->f_seqcount;
385 		/*
386 		 * XXX we assume that the filesystem block size is
387 		 * the default.  Not true, but still gives us a pretty
388 		 * good indicator of how sequential the read operations
389 		 * are.
390 		 */
391 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
392 		if (tmpseq > IO_SEQMAX)
393 			tmpseq = IO_SEQMAX;
394 		fp->f_seqcount = tmpseq;
395 		return(fp->f_seqcount << IO_SEQSHIFT);
396 	}
397 
398 	/*
399 	 * Not sequential, quick draw-down of seqcount
400 	 */
401 	if (fp->f_seqcount > 1)
402 		fp->f_seqcount = 1;
403 	else
404 		fp->f_seqcount = 0;
405 	return(0);
406 }
407 
408 /*
409  * Package up an I/O request on a vnode into a uio and do it.
410  */
411 int
412 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
413 	off_t offset, enum uio_seg segflg, int ioflg,
414 	struct ucred *cred, int *aresid)
415 {
416 	struct uio auio;
417 	struct iovec aiov;
418 	int error;
419 
420 	if ((ioflg & IO_NODELOCKED) == 0)
421 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
422 	auio.uio_iov = &aiov;
423 	auio.uio_iovcnt = 1;
424 	aiov.iov_base = base;
425 	aiov.iov_len = len;
426 	auio.uio_resid = len;
427 	auio.uio_offset = offset;
428 	auio.uio_segflg = segflg;
429 	auio.uio_rw = rw;
430 	auio.uio_td = curthread;
431 	if (rw == UIO_READ) {
432 		error = VOP_READ(vp, &auio, ioflg, cred);
433 	} else {
434 		error = VOP_WRITE(vp, &auio, ioflg, cred);
435 	}
436 	if (aresid)
437 		*aresid = auio.uio_resid;
438 	else
439 		if (auio.uio_resid && error == 0)
440 			error = EIO;
441 	if ((ioflg & IO_NODELOCKED) == 0)
442 		VOP_UNLOCK(vp, 0);
443 	return (error);
444 }
445 
446 /*
447  * Package up an I/O request on a vnode into a uio and do it.  The I/O
448  * request is split up into smaller chunks and we try to avoid saturating
449  * the buffer cache while potentially holding a vnode locked, so we
450  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
451  * to give other processes a chance to lock the vnode (either other processes
452  * core'ing the same binary, or unrelated processes scanning the directory).
453  */
454 int
455 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
456 		 off_t offset, enum uio_seg segflg, int ioflg,
457 		 struct ucred *cred, int *aresid)
458 {
459 	int error = 0;
460 
461 	do {
462 		int chunk;
463 
464 		/*
465 		 * Force `offset' to a multiple of MAXBSIZE except possibly
466 		 * for the first chunk, so that filesystems only need to
467 		 * write full blocks except possibly for the first and last
468 		 * chunks.
469 		 */
470 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
471 
472 		if (chunk > len)
473 			chunk = len;
474 		if (rw != UIO_READ && vp->v_type == VREG)
475 			bwillwrite();
476 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
477 			    ioflg, cred, aresid);
478 		len -= chunk;	/* aresid calc already includes length */
479 		if (error)
480 			break;
481 		offset += chunk;
482 		base += chunk;
483 		uio_yield();
484 	} while (len);
485 	if (aresid)
486 		*aresid += len;
487 	return (error);
488 }
489 
490 /*
491  * MPALMOSTSAFE - acquires mplock
492  */
493 static int
494 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
495 {
496 	struct vnode *vp;
497 	int error, ioflag;
498 
499 	get_mplock();
500 	KASSERT(uio->uio_td == curthread,
501 		("uio_td %p is not td %p", uio->uio_td, curthread));
502 	vp = (struct vnode *)fp->f_data;
503 
504 	ioflag = 0;
505 	if (flags & O_FBLOCKING) {
506 		/* ioflag &= ~IO_NDELAY; */
507 	} else if (flags & O_FNONBLOCKING) {
508 		ioflag |= IO_NDELAY;
509 	} else if (fp->f_flag & FNONBLOCK) {
510 		ioflag |= IO_NDELAY;
511 	}
512 	if (flags & O_FBUFFERED) {
513 		/* ioflag &= ~IO_DIRECT; */
514 	} else if (flags & O_FUNBUFFERED) {
515 		ioflag |= IO_DIRECT;
516 	} else if (fp->f_flag & O_DIRECT) {
517 		ioflag |= IO_DIRECT;
518 	}
519 	vn_lock(vp, LK_SHARED | LK_RETRY);
520 	if ((flags & O_FOFFSET) == 0)
521 		uio->uio_offset = fp->f_offset;
522 
523 	ioflag |= sequential_heuristic(uio, fp);
524 
525 	error = VOP_READ(vp, uio, ioflag, cred);
526 	if ((flags & O_FOFFSET) == 0)
527 		fp->f_offset = uio->uio_offset;
528 	fp->f_nextoff = uio->uio_offset;
529 	VOP_UNLOCK(vp, 0);
530 	rel_mplock();
531 	return (error);
532 }
533 
534 /*
535  * Device-optimized file table vnode read routine.
536  *
537  * This bypasses the VOP table and talks directly to the device.  Most
538  * filesystems just route to specfs and can make this optimization.
539  *
540  * MPALMOSTSAFE - acquires mplock
541  */
542 static int
543 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
544 {
545 	struct vnode *vp;
546 	int ioflag;
547 	int error;
548 	dev_t dev;
549 
550 	get_mplock();
551 	KASSERT(uio->uio_td == curthread,
552 		("uio_td %p is not td %p", uio->uio_td, curthread));
553 
554 	vp = (struct vnode *)fp->f_data;
555 	if (vp == NULL || vp->v_type == VBAD) {
556 		error = EBADF;
557 		goto done;
558 	}
559 
560 	if ((dev = vp->v_rdev) == NULL) {
561 		error = EBADF;
562 		goto done;
563 	}
564 	reference_dev(dev);
565 
566 	if (uio->uio_resid == 0) {
567 		error = 0;
568 		goto done;
569 	}
570 	if ((flags & O_FOFFSET) == 0)
571 		uio->uio_offset = fp->f_offset;
572 
573 	ioflag = 0;
574 	if (flags & O_FBLOCKING) {
575 		/* ioflag &= ~IO_NDELAY; */
576 	} else if (flags & O_FNONBLOCKING) {
577 		ioflag |= IO_NDELAY;
578 	} else if (fp->f_flag & FNONBLOCK) {
579 		ioflag |= IO_NDELAY;
580 	}
581 	if (flags & O_FBUFFERED) {
582 		/* ioflag &= ~IO_DIRECT; */
583 	} else if (flags & O_FUNBUFFERED) {
584 		ioflag |= IO_DIRECT;
585 	} else if (fp->f_flag & O_DIRECT) {
586 		ioflag |= IO_DIRECT;
587 	}
588 	ioflag |= sequential_heuristic(uio, fp);
589 
590 	error = dev_dread(dev, uio, ioflag);
591 
592 	release_dev(dev);
593 	if ((flags & O_FOFFSET) == 0)
594 		fp->f_offset = uio->uio_offset;
595 	fp->f_nextoff = uio->uio_offset;
596 done:
597 	rel_mplock();
598 	return (error);
599 }
600 
601 /*
602  * MPALMOSTSAFE - acquires mplock
603  */
604 static int
605 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
606 {
607 	struct vnode *vp;
608 	int error, ioflag;
609 
610 	get_mplock();
611 	KASSERT(uio->uio_td == curthread,
612 		("uio_procp %p is not p %p", uio->uio_td, curthread));
613 	vp = (struct vnode *)fp->f_data;
614 	if (vp->v_type == VREG)
615 		bwillwrite();
616 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
617 
618 	ioflag = IO_UNIT;
619 	if (vp->v_type == VREG &&
620 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
621 		ioflag |= IO_APPEND;
622 	}
623 
624 	if (flags & O_FBLOCKING) {
625 		/* ioflag &= ~IO_NDELAY; */
626 	} else if (flags & O_FNONBLOCKING) {
627 		ioflag |= IO_NDELAY;
628 	} else if (fp->f_flag & FNONBLOCK) {
629 		ioflag |= IO_NDELAY;
630 	}
631 	if (flags & O_FBUFFERED) {
632 		/* ioflag &= ~IO_DIRECT; */
633 	} else if (flags & O_FUNBUFFERED) {
634 		ioflag |= IO_DIRECT;
635 	} else if (fp->f_flag & O_DIRECT) {
636 		ioflag |= IO_DIRECT;
637 	}
638 	if (flags & O_FASYNCWRITE) {
639 		/* ioflag &= ~IO_SYNC; */
640 	} else if (flags & O_FSYNCWRITE) {
641 		ioflag |= IO_SYNC;
642 	} else if (fp->f_flag & O_FSYNC) {
643 		ioflag |= IO_SYNC;
644 	}
645 
646 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
647 		ioflag |= IO_SYNC;
648 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
649 	if ((flags & O_FOFFSET) == 0)
650 		uio->uio_offset = fp->f_offset;
651 	ioflag |= sequential_heuristic(uio, fp);
652 	error = VOP_WRITE(vp, uio, ioflag, cred);
653 	if ((flags & O_FOFFSET) == 0)
654 		fp->f_offset = uio->uio_offset;
655 	fp->f_nextoff = uio->uio_offset;
656 	VOP_UNLOCK(vp, 0);
657 	rel_mplock();
658 	return (error);
659 }
660 
661 /*
662  * Device-optimized file table vnode write routine.
663  *
664  * This bypasses the VOP table and talks directly to the device.  Most
665  * filesystems just route to specfs and can make this optimization.
666  *
667  * MPALMOSTSAFE - acquires mplock
668  */
669 static int
670 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
671 {
672 	struct vnode *vp;
673 	int ioflag;
674 	int error;
675 	dev_t dev;
676 
677 	get_mplock();
678 	KASSERT(uio->uio_td == curthread,
679 		("uio_procp %p is not p %p", uio->uio_td, curthread));
680 
681 	vp = (struct vnode *)fp->f_data;
682 	if (vp == NULL || vp->v_type == VBAD) {
683 		error = EBADF;
684 		goto done;
685 	}
686 	if (vp->v_type == VREG)
687 		bwillwrite();
688 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
689 
690 	if ((dev = vp->v_rdev) == NULL) {
691 		error = EBADF;
692 		goto done;
693 	}
694 	reference_dev(dev);
695 
696 	if ((flags & O_FOFFSET) == 0)
697 		uio->uio_offset = fp->f_offset;
698 
699 	ioflag = IO_UNIT;
700 	if (vp->v_type == VREG &&
701 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
702 		ioflag |= IO_APPEND;
703 	}
704 
705 	if (flags & O_FBLOCKING) {
706 		/* ioflag &= ~IO_NDELAY; */
707 	} else if (flags & O_FNONBLOCKING) {
708 		ioflag |= IO_NDELAY;
709 	} else if (fp->f_flag & FNONBLOCK) {
710 		ioflag |= IO_NDELAY;
711 	}
712 	if (flags & O_FBUFFERED) {
713 		/* ioflag &= ~IO_DIRECT; */
714 	} else if (flags & O_FUNBUFFERED) {
715 		ioflag |= IO_DIRECT;
716 	} else if (fp->f_flag & O_DIRECT) {
717 		ioflag |= IO_DIRECT;
718 	}
719 	if (flags & O_FASYNCWRITE) {
720 		/* ioflag &= ~IO_SYNC; */
721 	} else if (flags & O_FSYNCWRITE) {
722 		ioflag |= IO_SYNC;
723 	} else if (fp->f_flag & O_FSYNC) {
724 		ioflag |= IO_SYNC;
725 	}
726 
727 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
728 		ioflag |= IO_SYNC;
729 	ioflag |= sequential_heuristic(uio, fp);
730 
731 	error = dev_dwrite(dev, uio, ioflag);
732 
733 	release_dev(dev);
734 	if ((flags & O_FOFFSET) == 0)
735 		fp->f_offset = uio->uio_offset;
736 	fp->f_nextoff = uio->uio_offset;
737 done:
738 	rel_mplock();
739 	return (error);
740 }
741 
742 /*
743  * MPALMOSTSAFE - acquires mplock
744  */
745 static int
746 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
747 {
748 	struct vnode *vp;
749 	int error;
750 
751 	get_mplock();
752 	vp = (struct vnode *)fp->f_data;
753 	error = vn_stat(vp, sb, cred);
754 	rel_mplock();
755 	return (error);
756 }
757 
758 int
759 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
760 {
761 	struct vattr vattr;
762 	struct vattr *vap;
763 	int error;
764 	u_short mode;
765 	dev_t dev;
766 
767 	vap = &vattr;
768 	error = VOP_GETATTR(vp, vap);
769 	if (error)
770 		return (error);
771 
772 	/*
773 	 * Zero the spare stat fields
774 	 */
775 	sb->st_lspare = 0;
776 	sb->st_qspare = 0;
777 
778 	/*
779 	 * Copy from vattr table
780 	 */
781 	if (vap->va_fsid != VNOVAL)
782 		sb->st_dev = vap->va_fsid;
783 	else
784 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
785 	sb->st_ino = vap->va_fileid;
786 	mode = vap->va_mode;
787 	switch (vap->va_type) {
788 	case VREG:
789 		mode |= S_IFREG;
790 		break;
791 	case VDIR:
792 		mode |= S_IFDIR;
793 		break;
794 	case VBLK:
795 		mode |= S_IFBLK;
796 		break;
797 	case VCHR:
798 		mode |= S_IFCHR;
799 		break;
800 	case VLNK:
801 		mode |= S_IFLNK;
802 		/* This is a cosmetic change, symlinks do not have a mode. */
803 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
804 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
805 		else
806 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
807 		break;
808 	case VSOCK:
809 		mode |= S_IFSOCK;
810 		break;
811 	case VFIFO:
812 		mode |= S_IFIFO;
813 		break;
814 	default:
815 		return (EBADF);
816 	};
817 	sb->st_mode = mode;
818 	sb->st_nlink = vap->va_nlink;
819 	sb->st_uid = vap->va_uid;
820 	sb->st_gid = vap->va_gid;
821 	sb->st_rdev = vap->va_rdev;
822 	sb->st_size = vap->va_size;
823 	sb->st_atimespec = vap->va_atime;
824 	sb->st_mtimespec = vap->va_mtime;
825 	sb->st_ctimespec = vap->va_ctime;
826 
827 	/*
828 	 * A VCHR and VBLK device may track the last access and last modified
829 	 * time independantly of the filesystem.  This is particularly true
830 	 * because device read and write calls may bypass the filesystem.
831 	 */
832 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
833 		if ((dev = vp->v_rdev) != NULL) {
834 			if (dev->si_lastread) {
835 				sb->st_atimespec.tv_sec = dev->si_lastread;
836 				sb->st_atimespec.tv_nsec = 0;
837 			}
838 			if (dev->si_lastwrite) {
839 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
840 				sb->st_atimespec.tv_nsec = 0;
841 			}
842 		}
843 	}
844 
845         /*
846 	 * According to www.opengroup.org, the meaning of st_blksize is
847 	 *   "a filesystem-specific preferred I/O block size for this
848 	 *    object.  In some filesystem types, this may vary from file
849 	 *    to file"
850 	 * Default to PAGE_SIZE after much discussion.
851 	 */
852 
853 	if (vap->va_type == VREG) {
854 		sb->st_blksize = vap->va_blocksize;
855 	} else if (vn_isdisk(vp, NULL)) {
856 		/*
857 		 * XXX this is broken.  If the device is not yet open (aka
858 		 * stat() call, aka v_rdev == NULL), how are we supposed
859 		 * to get a valid block size out of it?
860 		 */
861 		dev_t dev;
862 
863 		if ((dev = vp->v_rdev) == NULL)
864 			dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
865 		sb->st_blksize = dev->si_bsize_best;
866 		if (sb->st_blksize < dev->si_bsize_phys)
867 			sb->st_blksize = dev->si_bsize_phys;
868 		if (sb->st_blksize < BLKDEV_IOSIZE)
869 			sb->st_blksize = BLKDEV_IOSIZE;
870 	} else {
871 		sb->st_blksize = PAGE_SIZE;
872 	}
873 
874 	sb->st_flags = vap->va_flags;
875 	if (suser_cred(cred, 0))
876 		sb->st_gen = 0;
877 	else
878 		sb->st_gen = vap->va_gen;
879 
880 #if (S_BLKSIZE == 512)
881 	/* Optimize this case */
882 	sb->st_blocks = vap->va_bytes >> 9;
883 #else
884 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
885 #endif
886 	sb->st_fsmid = vap->va_fsmid;
887 	return (0);
888 }
889 
890 /*
891  * MPALMOSTSAFE - acquires mplock
892  */
893 static int
894 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
895 {
896 	struct vnode *vp = ((struct vnode *)fp->f_data);
897 	struct vnode *ovp;
898 	struct vattr vattr;
899 	int error;
900 
901 	get_mplock();
902 
903 	switch (vp->v_type) {
904 	case VREG:
905 	case VDIR:
906 		if (com == FIONREAD) {
907 			if ((error = VOP_GETATTR(vp, &vattr)) != 0)
908 				break;
909 			*(int *)data = vattr.va_size - fp->f_offset;
910 			error = 0;
911 			break;
912 		}
913 		if (com == FIOASYNC) {				/* XXX */
914 			error = 0;				/* XXX */
915 			break;
916 		}
917 		/* fall into ... */
918 	default:
919 #if 0
920 		return (ENOTTY);
921 #endif
922 	case VFIFO:
923 	case VCHR:
924 	case VBLK:
925 		if (com == FIODTYPE) {
926 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
927 				error = ENOTTY;
928 				break;
929 			}
930 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
931 			error = 0;
932 			break;
933 		}
934 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
935 		if (error == 0 && com == TIOCSCTTY) {
936 			struct proc *p = curthread->td_proc;
937 			struct session *sess;
938 
939 			if (p == NULL) {
940 				error = ENOTTY;
941 				break;
942 			}
943 
944 			sess = p->p_session;
945 			/* Do nothing if reassigning same control tty */
946 			if (sess->s_ttyvp == vp) {
947 				error = 0;
948 				break;
949 			}
950 
951 			/* Get rid of reference to old control tty */
952 			ovp = sess->s_ttyvp;
953 			vref(vp);
954 			sess->s_ttyvp = vp;
955 			if (ovp)
956 				vrele(ovp);
957 		}
958 		break;
959 	}
960 	rel_mplock();
961 	return (error);
962 }
963 
964 /*
965  * MPALMOSTSAFE - acquires mplock
966  */
967 static int
968 vn_poll(struct file *fp, int events, struct ucred *cred)
969 {
970 	int error;
971 
972 	get_mplock();
973 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
974 	rel_mplock();
975 	return (error);
976 }
977 
978 /*
979  * Check that the vnode is still valid, and if so
980  * acquire requested lock.
981  */
982 int
983 #ifndef	DEBUG_LOCKS
984 vn_lock(struct vnode *vp, int flags)
985 #else
986 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
987 #endif
988 {
989 	int error;
990 
991 	do {
992 #ifdef	DEBUG_LOCKS
993 		vp->filename = filename;
994 		vp->line = line;
995 #endif
996 		error = VOP_LOCK(vp, flags);
997 		if (error == 0)
998 			break;
999 	} while (flags & LK_RETRY);
1000 
1001 	/*
1002 	 * Because we (had better!) have a ref on the vnode, once it
1003 	 * goes to VRECLAIMED state it will not be recycled until all
1004 	 * refs go away.  So we can just check the flag.
1005 	 */
1006 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1007 		VOP_UNLOCK(vp, 0);
1008 		error = ENOENT;
1009 	}
1010 	return (error);
1011 }
1012 
1013 /*
1014  * MPALMOSTSAFE - acquires mplock
1015  */
1016 static int
1017 vn_closefile(struct file *fp)
1018 {
1019 	int error;
1020 
1021 	get_mplock();
1022 	fp->f_ops = &badfileops;
1023 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1024 	rel_mplock();
1025 	return(error);
1026 }
1027 
1028 /*
1029  * MPALMOSTSAFE - acquires mplock
1030  */
1031 static int
1032 vn_kqfilter(struct file *fp, struct knote *kn)
1033 {
1034 	int error;
1035 
1036 	get_mplock();
1037 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1038 	rel_mplock();
1039 	return (error);
1040 }
1041