xref: /dragonfly/sys/kern/vfs_vnops.c (revision 9f3fc534)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/syslog.h>
58 
59 static int vn_closefile (struct file *fp);
60 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
61 		struct ucred *cred);
62 static int vn_read (struct file *fp, struct uio *uio,
63 		struct ucred *cred, int flags);
64 static int svn_read (struct file *fp, struct uio *uio,
65 		struct ucred *cred, int flags);
66 static int vn_poll (struct file *fp, int events, struct ucred *cred);
67 static int vn_kqfilter (struct file *fp, struct knote *kn);
68 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
69 static int vn_write (struct file *fp, struct uio *uio,
70 		struct ucred *cred, int flags);
71 static int svn_write (struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags);
73 
74 struct fileops vnode_fileops = {
75 	.fo_read = vn_read,
76 	.fo_write = vn_write,
77 	.fo_ioctl = vn_ioctl,
78 	.fo_poll = vn_poll,
79 	.fo_kqfilter = vn_kqfilter,
80 	.fo_stat = vn_statfile,
81 	.fo_close = vn_closefile,
82 	.fo_shutdown = nofo_shutdown
83 };
84 
85 struct fileops specvnode_fileops = {
86 	.fo_read = svn_read,
87 	.fo_write = svn_write,
88 	.fo_ioctl = vn_ioctl,
89 	.fo_poll = vn_poll,
90 	.fo_kqfilter = vn_kqfilter,
91 	.fo_stat = vn_statfile,
92 	.fo_close = vn_closefile,
93 	.fo_shutdown = nofo_shutdown
94 };
95 
96 /*
97  * Shortcut the device read/write.  This avoids a lot of vnode junk.
98  * Basically the specfs vnops for read and write take the locked vnode,
99  * unlock it (because we can't hold the vnode locked while reading or writing
100  * a device which may block indefinitely), issues the device operation, then
101  * relock the vnode before returning, plus other junk.  This bypasses all
102  * of that and just does the device operation.
103  */
104 void
105 vn_setspecops(struct file *fp)
106 {
107 	if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
108 		fp->f_ops = &specvnode_fileops;
109 	}
110 }
111 
112 /*
113  * Common code for vnode open operations.  Check permissions, and call
114  * the VOP_NOPEN or VOP_NCREATE routine.
115  *
116  * The caller is responsible for setting up nd with nlookup_init() and
117  * for cleaning it up with nlookup_done(), whether we return an error
118  * or not.
119  *
120  * On success nd->nl_open_vp will hold a referenced and, if requested,
121  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
122  * is non-NULL the vnode will be installed in the file pointer.
123  *
124  * NOTE: The vnode is referenced just once on return whether or not it
125  * is also installed in the file pointer.
126  */
127 int
128 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
129 {
130 	struct vnode *vp;
131 	struct ucred *cred = nd->nl_cred;
132 	struct vattr vat;
133 	struct vattr *vap = &vat;
134 	int error;
135 
136 	/*
137 	 * Certain combinations are illegal
138 	 */
139 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
140 		return(EACCES);
141 
142 	/*
143 	 * Lookup the path and create or obtain the vnode.  After a
144 	 * successful lookup a locked nd->nl_nch will be returned.
145 	 *
146 	 * The result of this section should be a locked vnode.
147 	 *
148 	 * XXX with only a little work we should be able to avoid locking
149 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
150 	 */
151 	nd->nl_flags |= NLC_OPEN;
152 	if (fmode & O_APPEND)
153 		nd->nl_flags |= NLC_APPEND;
154 	if (fmode & O_TRUNC)
155 		nd->nl_flags |= NLC_TRUNCATE;
156 	if (fmode & FREAD)
157 		nd->nl_flags |= NLC_READ;
158 	if (fmode & FWRITE)
159 		nd->nl_flags |= NLC_WRITE;
160 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
161 		nd->nl_flags |= NLC_FOLLOW;
162 
163 	if (fmode & O_CREAT) {
164 		/*
165 		 * CONDITIONAL CREATE FILE CASE
166 		 *
167 		 * Setting NLC_CREATE causes a negative hit to store
168 		 * the negative hit ncp and not return an error.  Then
169 		 * nc_error or nc_vp may be checked to see if the ncp
170 		 * represents a negative hit.  NLC_CREATE also requires
171 		 * write permission on the governing directory or EPERM
172 		 * is returned.
173 		 */
174 		nd->nl_flags |= NLC_CREATE;
175 		nd->nl_flags |= NLC_REFDVP;
176 		bwillinode(1);
177 		error = nlookup(nd);
178 	} else {
179 		/*
180 		 * NORMAL OPEN FILE CASE
181 		 */
182 		error = nlookup(nd);
183 	}
184 
185 	if (error)
186 		return (error);
187 
188 	/*
189 	 * split case to allow us to re-resolve and retry the ncp in case
190 	 * we get ESTALE.
191 	 */
192 again:
193 	if (fmode & O_CREAT) {
194 		if (nd->nl_nch.ncp->nc_vp == NULL) {
195 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
196 				return (error);
197 			VATTR_NULL(vap);
198 			vap->va_type = VREG;
199 			vap->va_mode = cmode;
200 			if (fmode & O_EXCL)
201 				vap->va_vaflags |= VA_EXCLUSIVE;
202 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
203 					    nd->nl_cred, vap);
204 			if (error)
205 				return (error);
206 			fmode &= ~O_TRUNC;
207 			/* locked vnode is returned */
208 		} else {
209 			if (fmode & O_EXCL) {
210 				error = EEXIST;
211 			} else {
212 				error = cache_vget(&nd->nl_nch, cred,
213 						    LK_EXCLUSIVE, &vp);
214 			}
215 			if (error)
216 				return (error);
217 			fmode &= ~O_CREAT;
218 		}
219 	} else {
220 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
221 		if (error)
222 			return (error);
223 	}
224 
225 	/*
226 	 * We have a locked vnode and ncp now.  Note that the ncp will
227 	 * be cleaned up by the caller if nd->nl_nch is left intact.
228 	 */
229 	if (vp->v_type == VLNK) {
230 		error = EMLINK;
231 		goto bad;
232 	}
233 	if (vp->v_type == VSOCK) {
234 		error = EOPNOTSUPP;
235 		goto bad;
236 	}
237 	if ((fmode & O_CREAT) == 0) {
238 		if (fmode & (FWRITE | O_TRUNC)) {
239 			if (vp->v_type == VDIR) {
240 				error = EISDIR;
241 				goto bad;
242 			}
243 			error = vn_writechk(vp, &nd->nl_nch);
244 			if (error) {
245 				/*
246 				 * Special stale handling, re-resolve the
247 				 * vnode.
248 				 */
249 				if (error == ESTALE) {
250 					vput(vp);
251 					vp = NULL;
252 					cache_setunresolved(&nd->nl_nch);
253 					error = cache_resolve(&nd->nl_nch, cred);
254 					if (error == 0)
255 						goto again;
256 				}
257 				goto bad;
258 			}
259 		}
260 	}
261 	if (fmode & O_TRUNC) {
262 		vn_unlock(vp);				/* XXX */
263 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
264 		VATTR_NULL(vap);
265 		vap->va_size = 0;
266 		error = VOP_SETATTR(vp, vap, cred);
267 		if (error)
268 			goto bad;
269 	}
270 
271 	/*
272 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
273 	 * associated with the fp yet so we own it clean.
274 	 *
275 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
276 	 * directories but now we do it unconditionally so f*() ops
277 	 * such as fchmod() can access the actual namespace that was
278 	 * used to open the file.
279 	 */
280 	if (fp) {
281 		if (nd->nl_flags & NLC_APPENDONLY)
282 			fmode |= FAPPENDONLY;
283 		fp->f_nchandle = nd->nl_nch;
284 		cache_zero(&nd->nl_nch);
285 		cache_unlock(&fp->f_nchandle);
286 	}
287 
288 	/*
289 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
290 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
291 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
292 	 * on /dev/ttyd0
293 	 */
294 	if (nd->nl_nch.ncp)
295 		cache_put(&nd->nl_nch);
296 
297 	error = VOP_OPEN(vp, fmode, cred, fp);
298 	if (error) {
299 		/*
300 		 * setting f_ops to &badfileops will prevent the descriptor
301 		 * code from trying to close and release the vnode, since
302 		 * the open failed we do not want to call close.
303 		 */
304 		if (fp) {
305 			fp->f_data = NULL;
306 			fp->f_ops = &badfileops;
307 		}
308 		goto bad;
309 	}
310 
311 #if 0
312 	/*
313 	 * Assert that VREG files have been setup for vmio.
314 	 */
315 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
316 		("vn_open: regular file was not VMIO enabled!"));
317 #endif
318 
319 	/*
320 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
321 	 * only returned in the fp == NULL case.
322 	 */
323 	if (fp == NULL) {
324 		nd->nl_open_vp = vp;
325 		nd->nl_vp_fmode = fmode;
326 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
327 			vn_unlock(vp);
328 	} else {
329 		vput(vp);
330 	}
331 	return (0);
332 bad:
333 	if (vp)
334 		vput(vp);
335 	return (error);
336 }
337 
338 int
339 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
340 {
341 	struct vnode *vp;
342 	int error;
343 
344 	if (strncmp(devname, "/dev/", 5) == 0)
345 		devname += 5;
346 	if ((vp = getsynthvnode(devname)) == NULL) {
347 		error = ENODEV;
348 	} else {
349 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
350 		vn_unlock(vp);
351 		if (error) {
352 			vrele(vp);
353 			vp = NULL;
354 		}
355 	}
356 	*vpp = vp;
357 	return (error);
358 }
359 
360 /*
361  * Check for write permissions on the specified vnode.  nch may be NULL.
362  */
363 int
364 vn_writechk(struct vnode *vp, struct nchandle *nch)
365 {
366 	/*
367 	 * If there's shared text associated with
368 	 * the vnode, try to free it up once.  If
369 	 * we fail, we can't allow writing.
370 	 */
371 	if (vp->v_flag & VTEXT)
372 		return (ETXTBSY);
373 
374 	/*
375 	 * If the vnode represents a regular file, check the mount
376 	 * point via the nch.  This may be a different mount point
377 	 * then the one embedded in the vnode (e.g. nullfs).
378 	 *
379 	 * We can still write to non-regular files (e.g. devices)
380 	 * via read-only mounts.
381 	 */
382 	if (nch && nch->ncp && vp->v_type == VREG)
383 		return (ncp_writechk(nch));
384 	return (0);
385 }
386 
387 /*
388  * Check whether the underlying mount is read-only.  The mount point
389  * referenced by the namecache may be different from the mount point
390  * used by the underlying vnode in the case of NULLFS, so a separate
391  * check is needed.
392  */
393 int
394 ncp_writechk(struct nchandle *nch)
395 {
396 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
397 		return (EROFS);
398 	return(0);
399 }
400 
401 /*
402  * Vnode close call
403  */
404 int
405 vn_close(struct vnode *vp, int flags)
406 {
407 	int error;
408 
409 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
410 	if (error == 0) {
411 		error = VOP_CLOSE(vp, flags);
412 		vn_unlock(vp);
413 	}
414 	vrele(vp);
415 	return (error);
416 }
417 
418 static __inline
419 int
420 sequential_heuristic(struct uio *uio, struct file *fp)
421 {
422 	/*
423 	 * Sequential heuristic - detect sequential operation
424 	 */
425 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
426 	    uio->uio_offset == fp->f_nextoff) {
427 		int tmpseq = fp->f_seqcount;
428 		/*
429 		 * XXX we assume that the filesystem block size is
430 		 * the default.  Not true, but still gives us a pretty
431 		 * good indicator of how sequential the read operations
432 		 * are.
433 		 */
434 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
435 		if (tmpseq > IO_SEQMAX)
436 			tmpseq = IO_SEQMAX;
437 		fp->f_seqcount = tmpseq;
438 		return(fp->f_seqcount << IO_SEQSHIFT);
439 	}
440 
441 	/*
442 	 * Not sequential, quick draw-down of seqcount
443 	 */
444 	if (fp->f_seqcount > 1)
445 		fp->f_seqcount = 1;
446 	else
447 		fp->f_seqcount = 0;
448 	return(0);
449 }
450 
451 /*
452  * Package up an I/O request on a vnode into a uio and do it.
453  */
454 int
455 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
456 	off_t offset, enum uio_seg segflg, int ioflg,
457 	struct ucred *cred, int *aresid)
458 {
459 	struct uio auio;
460 	struct iovec aiov;
461 	struct ccms_lock ccms_lock;
462 	int error;
463 
464 	if ((ioflg & IO_NODELOCKED) == 0)
465 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
466 	auio.uio_iov = &aiov;
467 	auio.uio_iovcnt = 1;
468 	aiov.iov_base = base;
469 	aiov.iov_len = len;
470 	auio.uio_resid = len;
471 	auio.uio_offset = offset;
472 	auio.uio_segflg = segflg;
473 	auio.uio_rw = rw;
474 	auio.uio_td = curthread;
475 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
476 	if (rw == UIO_READ) {
477 		error = VOP_READ(vp, &auio, ioflg, cred);
478 	} else {
479 		error = VOP_WRITE(vp, &auio, ioflg, cred);
480 	}
481 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
482 	if (aresid)
483 		*aresid = auio.uio_resid;
484 	else
485 		if (auio.uio_resid && error == 0)
486 			error = EIO;
487 	if ((ioflg & IO_NODELOCKED) == 0)
488 		vn_unlock(vp);
489 	return (error);
490 }
491 
492 /*
493  * Package up an I/O request on a vnode into a uio and do it.  The I/O
494  * request is split up into smaller chunks and we try to avoid saturating
495  * the buffer cache while potentially holding a vnode locked, so we
496  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
497  * to give other processes a chance to lock the vnode (either other processes
498  * core'ing the same binary, or unrelated processes scanning the directory).
499  */
500 int
501 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
502 		 off_t offset, enum uio_seg segflg, int ioflg,
503 		 struct ucred *cred, int *aresid)
504 {
505 	int error = 0;
506 
507 	do {
508 		int chunk;
509 
510 		/*
511 		 * Force `offset' to a multiple of MAXBSIZE except possibly
512 		 * for the first chunk, so that filesystems only need to
513 		 * write full blocks except possibly for the first and last
514 		 * chunks.
515 		 */
516 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
517 
518 		if (chunk > len)
519 			chunk = len;
520 		if (vp->v_type == VREG) {
521 			switch(rw) {
522 			case UIO_READ:
523 				bwillread(chunk);
524 				break;
525 			case UIO_WRITE:
526 				bwillwrite(chunk);
527 				break;
528 			}
529 		}
530 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
531 			    ioflg, cred, aresid);
532 		len -= chunk;	/* aresid calc already includes length */
533 		if (error)
534 			break;
535 		offset += chunk;
536 		base += chunk;
537 		uio_yield();
538 	} while (len);
539 	if (aresid)
540 		*aresid += len;
541 	return (error);
542 }
543 
544 /*
545  * MPALMOSTSAFE - acquires mplock
546  */
547 static int
548 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
549 {
550 	struct ccms_lock ccms_lock;
551 	struct vnode *vp;
552 	int error, ioflag;
553 
554 	get_mplock();
555 	KASSERT(uio->uio_td == curthread,
556 		("uio_td %p is not td %p", uio->uio_td, curthread));
557 	vp = (struct vnode *)fp->f_data;
558 
559 	ioflag = 0;
560 	if (flags & O_FBLOCKING) {
561 		/* ioflag &= ~IO_NDELAY; */
562 	} else if (flags & O_FNONBLOCKING) {
563 		ioflag |= IO_NDELAY;
564 	} else if (fp->f_flag & FNONBLOCK) {
565 		ioflag |= IO_NDELAY;
566 	}
567 	if (flags & O_FBUFFERED) {
568 		/* ioflag &= ~IO_DIRECT; */
569 	} else if (flags & O_FUNBUFFERED) {
570 		ioflag |= IO_DIRECT;
571 	} else if (fp->f_flag & O_DIRECT) {
572 		ioflag |= IO_DIRECT;
573 	}
574 	vn_lock(vp, LK_SHARED | LK_RETRY);
575 	if ((flags & O_FOFFSET) == 0)
576 		uio->uio_offset = fp->f_offset;
577 	ioflag |= sequential_heuristic(uio, fp);
578 
579 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
580 	error = VOP_READ(vp, uio, ioflag, cred);
581 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
582 	if ((flags & O_FOFFSET) == 0)
583 		fp->f_offset = uio->uio_offset;
584 	fp->f_nextoff = uio->uio_offset;
585 	vn_unlock(vp);
586 	rel_mplock();
587 	return (error);
588 }
589 
590 /*
591  * Device-optimized file table vnode read routine.
592  *
593  * This bypasses the VOP table and talks directly to the device.  Most
594  * filesystems just route to specfs and can make this optimization.
595  *
596  * MPALMOSTSAFE - acquires mplock
597  */
598 static int
599 svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
600 {
601 	struct vnode *vp;
602 	int ioflag;
603 	int error;
604 	cdev_t dev;
605 
606 	get_mplock();
607 	KASSERT(uio->uio_td == curthread,
608 		("uio_td %p is not td %p", uio->uio_td, curthread));
609 
610 	vp = (struct vnode *)fp->f_data;
611 	if (vp == NULL || vp->v_type == VBAD) {
612 		error = EBADF;
613 		goto done;
614 	}
615 
616 	if ((dev = vp->v_rdev) == NULL) {
617 		error = EBADF;
618 		goto done;
619 	}
620 	reference_dev(dev);
621 
622 	if (uio->uio_resid == 0) {
623 		error = 0;
624 		goto done;
625 	}
626 	if ((flags & O_FOFFSET) == 0)
627 		uio->uio_offset = fp->f_offset;
628 
629 	ioflag = 0;
630 	if (flags & O_FBLOCKING) {
631 		/* ioflag &= ~IO_NDELAY; */
632 	} else if (flags & O_FNONBLOCKING) {
633 		ioflag |= IO_NDELAY;
634 	} else if (fp->f_flag & FNONBLOCK) {
635 		ioflag |= IO_NDELAY;
636 	}
637 	if (flags & O_FBUFFERED) {
638 		/* ioflag &= ~IO_DIRECT; */
639 	} else if (flags & O_FUNBUFFERED) {
640 		ioflag |= IO_DIRECT;
641 	} else if (fp->f_flag & O_DIRECT) {
642 		ioflag |= IO_DIRECT;
643 	}
644 	ioflag |= sequential_heuristic(uio, fp);
645 
646 	error = dev_dread(dev, uio, ioflag);
647 
648 	release_dev(dev);
649 	if ((flags & O_FOFFSET) == 0)
650 		fp->f_offset = uio->uio_offset;
651 	fp->f_nextoff = uio->uio_offset;
652 done:
653 	rel_mplock();
654 	return (error);
655 }
656 
657 /*
658  * MPALMOSTSAFE - acquires mplock
659  */
660 static int
661 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
662 {
663 	struct ccms_lock ccms_lock;
664 	struct vnode *vp;
665 	int error, ioflag;
666 
667 	get_mplock();
668 	KASSERT(uio->uio_td == curthread,
669 		("uio_td %p is not p %p", uio->uio_td, curthread));
670 	vp = (struct vnode *)fp->f_data;
671 #if 0
672 	/* VOP_WRITE should handle this now */
673 	if (vp->v_type == VREG || vp->v_type == VDATABASE)
674 		bwillwrite();
675 #endif
676 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
677 
678 	ioflag = IO_UNIT;
679 	if (vp->v_type == VREG &&
680 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
681 		ioflag |= IO_APPEND;
682 	}
683 
684 	if (flags & O_FBLOCKING) {
685 		/* ioflag &= ~IO_NDELAY; */
686 	} else if (flags & O_FNONBLOCKING) {
687 		ioflag |= IO_NDELAY;
688 	} else if (fp->f_flag & FNONBLOCK) {
689 		ioflag |= IO_NDELAY;
690 	}
691 	if (flags & O_FBUFFERED) {
692 		/* ioflag &= ~IO_DIRECT; */
693 	} else if (flags & O_FUNBUFFERED) {
694 		ioflag |= IO_DIRECT;
695 	} else if (fp->f_flag & O_DIRECT) {
696 		ioflag |= IO_DIRECT;
697 	}
698 	if (flags & O_FASYNCWRITE) {
699 		/* ioflag &= ~IO_SYNC; */
700 	} else if (flags & O_FSYNCWRITE) {
701 		ioflag |= IO_SYNC;
702 	} else if (fp->f_flag & O_FSYNC) {
703 		ioflag |= IO_SYNC;
704 	}
705 
706 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
707 		ioflag |= IO_SYNC;
708 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
709 	if ((flags & O_FOFFSET) == 0)
710 		uio->uio_offset = fp->f_offset;
711 	ioflag |= sequential_heuristic(uio, fp);
712 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
713 	error = VOP_WRITE(vp, uio, ioflag, cred);
714 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
715 	if ((flags & O_FOFFSET) == 0)
716 		fp->f_offset = uio->uio_offset;
717 	fp->f_nextoff = uio->uio_offset;
718 	vn_unlock(vp);
719 	rel_mplock();
720 	return (error);
721 }
722 
723 /*
724  * Device-optimized file table vnode write routine.
725  *
726  * This bypasses the VOP table and talks directly to the device.  Most
727  * filesystems just route to specfs and can make this optimization.
728  *
729  * MPALMOSTSAFE - acquires mplock
730  */
731 static int
732 svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
733 {
734 	struct vnode *vp;
735 	int ioflag;
736 	int error;
737 	cdev_t dev;
738 
739 	get_mplock();
740 	KASSERT(uio->uio_td == curthread,
741 		("uio_td %p is not p %p", uio->uio_td, curthread));
742 
743 	vp = (struct vnode *)fp->f_data;
744 	if (vp == NULL || vp->v_type == VBAD) {
745 		error = EBADF;
746 		goto done;
747 	}
748 	if (vp->v_type == VREG)
749 		bwillwrite(uio->uio_resid);
750 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
751 
752 	if ((dev = vp->v_rdev) == NULL) {
753 		error = EBADF;
754 		goto done;
755 	}
756 	reference_dev(dev);
757 
758 	if ((flags & O_FOFFSET) == 0)
759 		uio->uio_offset = fp->f_offset;
760 
761 	ioflag = IO_UNIT;
762 	if (vp->v_type == VREG &&
763 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
764 		ioflag |= IO_APPEND;
765 	}
766 
767 	if (flags & O_FBLOCKING) {
768 		/* ioflag &= ~IO_NDELAY; */
769 	} else if (flags & O_FNONBLOCKING) {
770 		ioflag |= IO_NDELAY;
771 	} else if (fp->f_flag & FNONBLOCK) {
772 		ioflag |= IO_NDELAY;
773 	}
774 	if (flags & O_FBUFFERED) {
775 		/* ioflag &= ~IO_DIRECT; */
776 	} else if (flags & O_FUNBUFFERED) {
777 		ioflag |= IO_DIRECT;
778 	} else if (fp->f_flag & O_DIRECT) {
779 		ioflag |= IO_DIRECT;
780 	}
781 	if (flags & O_FASYNCWRITE) {
782 		/* ioflag &= ~IO_SYNC; */
783 	} else if (flags & O_FSYNCWRITE) {
784 		ioflag |= IO_SYNC;
785 	} else if (fp->f_flag & O_FSYNC) {
786 		ioflag |= IO_SYNC;
787 	}
788 
789 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
790 		ioflag |= IO_SYNC;
791 	ioflag |= sequential_heuristic(uio, fp);
792 
793 	error = dev_dwrite(dev, uio, ioflag);
794 
795 	release_dev(dev);
796 	if ((flags & O_FOFFSET) == 0)
797 		fp->f_offset = uio->uio_offset;
798 	fp->f_nextoff = uio->uio_offset;
799 done:
800 	rel_mplock();
801 	return (error);
802 }
803 
804 /*
805  * MPALMOSTSAFE - acquires mplock
806  */
807 static int
808 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
809 {
810 	struct vnode *vp;
811 	int error;
812 
813 	get_mplock();
814 	vp = (struct vnode *)fp->f_data;
815 	error = vn_stat(vp, sb, cred);
816 	rel_mplock();
817 	return (error);
818 }
819 
820 int
821 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
822 {
823 	struct vattr vattr;
824 	struct vattr *vap;
825 	int error;
826 	u_short mode;
827 	cdev_t dev;
828 
829 	vap = &vattr;
830 	error = VOP_GETATTR(vp, vap);
831 	if (error)
832 		return (error);
833 
834 	/*
835 	 * Zero the spare stat fields
836 	 */
837 	sb->st_lspare = 0;
838 	sb->st_qspare = 0;
839 
840 	/*
841 	 * Copy from vattr table
842 	 */
843 	if (vap->va_fsid != VNOVAL)
844 		sb->st_dev = vap->va_fsid;
845 	else
846 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
847 	sb->st_ino = vap->va_fileid;
848 	mode = vap->va_mode;
849 	switch (vap->va_type) {
850 	case VREG:
851 		mode |= S_IFREG;
852 		break;
853 	case VDATABASE:
854 		mode |= S_IFDB;
855 		break;
856 	case VDIR:
857 		mode |= S_IFDIR;
858 		break;
859 	case VBLK:
860 		mode |= S_IFBLK;
861 		break;
862 	case VCHR:
863 		mode |= S_IFCHR;
864 		break;
865 	case VLNK:
866 		mode |= S_IFLNK;
867 		/* This is a cosmetic change, symlinks do not have a mode. */
868 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
869 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
870 		else
871 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
872 		break;
873 	case VSOCK:
874 		mode |= S_IFSOCK;
875 		break;
876 	case VFIFO:
877 		mode |= S_IFIFO;
878 		break;
879 	default:
880 		return (EBADF);
881 	}
882 	sb->st_mode = mode;
883 	if (vap->va_nlink > (nlink_t)-1)
884 		sb->st_nlink = (nlink_t)-1;
885 	else
886 		sb->st_nlink = vap->va_nlink;
887 	sb->st_uid = vap->va_uid;
888 	sb->st_gid = vap->va_gid;
889 	sb->st_rdev = makeudev(vap->va_rmajor, vap->va_rminor);
890 	sb->st_size = vap->va_size;
891 	sb->st_atimespec = vap->va_atime;
892 	sb->st_mtimespec = vap->va_mtime;
893 	sb->st_ctimespec = vap->va_ctime;
894 
895 	/*
896 	 * A VCHR and VBLK device may track the last access and last modified
897 	 * time independantly of the filesystem.  This is particularly true
898 	 * because device read and write calls may bypass the filesystem.
899 	 */
900 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
901 		dev = vp->v_rdev;
902 		if (dev != NULL) {
903 			if (dev->si_lastread) {
904 				sb->st_atimespec.tv_sec = dev->si_lastread;
905 				sb->st_atimespec.tv_nsec = 0;
906 			}
907 			if (dev->si_lastwrite) {
908 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
909 				sb->st_atimespec.tv_nsec = 0;
910 			}
911 		}
912 	}
913 
914         /*
915 	 * According to www.opengroup.org, the meaning of st_blksize is
916 	 *   "a filesystem-specific preferred I/O block size for this
917 	 *    object.  In some filesystem types, this may vary from file
918 	 *    to file"
919 	 * Default to PAGE_SIZE after much discussion.
920 	 */
921 
922 	if (vap->va_type == VREG) {
923 		sb->st_blksize = vap->va_blocksize;
924 	} else if (vn_isdisk(vp, NULL)) {
925 		/*
926 		 * XXX this is broken.  If the device is not yet open (aka
927 		 * stat() call, aka v_rdev == NULL), how are we supposed
928 		 * to get a valid block size out of it?
929 		 */
930 		dev = vp->v_rdev;
931 		if (dev == NULL && vp->v_type == VCHR) {
932 			dev = get_dev(vp->v_umajor, vp->v_uminor);
933 		}
934 		sb->st_blksize = dev->si_bsize_best;
935 		if (sb->st_blksize < dev->si_bsize_phys)
936 			sb->st_blksize = dev->si_bsize_phys;
937 		if (sb->st_blksize < BLKDEV_IOSIZE)
938 			sb->st_blksize = BLKDEV_IOSIZE;
939 	} else {
940 		sb->st_blksize = PAGE_SIZE;
941 	}
942 
943 	sb->st_flags = vap->va_flags;
944 
945 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
946 	if (error)
947 		sb->st_gen = 0;
948 	else
949 		sb->st_gen = (u_int32_t)vap->va_gen;
950 
951 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
952 	sb->st_fsmid = vap->va_fsmid;
953 	return (0);
954 }
955 
956 /*
957  * MPALMOSTSAFE - acquires mplock
958  */
959 static int
960 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
961 {
962 	struct vnode *vp = ((struct vnode *)fp->f_data);
963 	struct vnode *ovp;
964 	struct vattr vattr;
965 	int error;
966 
967 	get_mplock();
968 
969 	switch (vp->v_type) {
970 	case VREG:
971 	case VDIR:
972 		if (com == FIONREAD) {
973 			error = VOP_GETATTR(vp, &vattr);
974 			if (error)
975 				break;
976 			*(int *)data = vattr.va_size - fp->f_offset;
977 			error = 0;
978 			break;
979 		}
980 		if (com == FIOASYNC) {				/* XXX */
981 			error = 0;				/* XXX */
982 			break;
983 		}
984 		/* fall into ... */
985 	default:
986 #if 0
987 		return (ENOTTY);
988 #endif
989 	case VFIFO:
990 	case VCHR:
991 	case VBLK:
992 		if (com == FIODTYPE) {
993 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
994 				error = ENOTTY;
995 				break;
996 			}
997 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
998 			error = 0;
999 			break;
1000 		}
1001 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
1002 		if (error == 0 && com == TIOCSCTTY) {
1003 			struct proc *p = curthread->td_proc;
1004 			struct session *sess;
1005 
1006 			if (p == NULL) {
1007 				error = ENOTTY;
1008 				break;
1009 			}
1010 
1011 			sess = p->p_session;
1012 			/* Do nothing if reassigning same control tty */
1013 			if (sess->s_ttyvp == vp) {
1014 				error = 0;
1015 				break;
1016 			}
1017 
1018 			/* Get rid of reference to old control tty */
1019 			ovp = sess->s_ttyvp;
1020 			vref(vp);
1021 			sess->s_ttyvp = vp;
1022 			if (ovp)
1023 				vrele(ovp);
1024 		}
1025 		break;
1026 	}
1027 	rel_mplock();
1028 	return (error);
1029 }
1030 
1031 /*
1032  * MPALMOSTSAFE - acquires mplock
1033  */
1034 static int
1035 vn_poll(struct file *fp, int events, struct ucred *cred)
1036 {
1037 	int error;
1038 
1039 	get_mplock();
1040 	error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1041 	rel_mplock();
1042 	return (error);
1043 }
1044 
1045 /*
1046  * Check that the vnode is still valid, and if so
1047  * acquire requested lock.
1048  */
1049 int
1050 #ifndef	DEBUG_LOCKS
1051 vn_lock(struct vnode *vp, int flags)
1052 #else
1053 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
1054 #endif
1055 {
1056 	int error;
1057 
1058 	do {
1059 #ifdef	DEBUG_LOCKS
1060 		vp->filename = filename;
1061 		vp->line = line;
1062 		error = debuglockmgr(&vp->v_lock, flags,
1063 				     "vn_lock", filename, line);
1064 #else
1065 		error = lockmgr(&vp->v_lock, flags);
1066 #endif
1067 		if (error == 0)
1068 			break;
1069 	} while (flags & LK_RETRY);
1070 
1071 	/*
1072 	 * Because we (had better!) have a ref on the vnode, once it
1073 	 * goes to VRECLAIMED state it will not be recycled until all
1074 	 * refs go away.  So we can just check the flag.
1075 	 */
1076 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1077 		lockmgr(&vp->v_lock, LK_RELEASE);
1078 		error = ENOENT;
1079 	}
1080 	return (error);
1081 }
1082 
1083 void
1084 vn_unlock(struct vnode *vp)
1085 {
1086 	lockmgr(&vp->v_lock, LK_RELEASE);
1087 }
1088 
1089 int
1090 vn_islocked(struct vnode *vp)
1091 {
1092 	return (lockstatus(&vp->v_lock, curthread));
1093 }
1094 
1095 /*
1096  * MPALMOSTSAFE - acquires mplock
1097  */
1098 static int
1099 vn_closefile(struct file *fp)
1100 {
1101 	int error;
1102 
1103 	get_mplock();
1104 	fp->f_ops = &badfileops;
1105 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1106 	rel_mplock();
1107 	return (error);
1108 }
1109 
1110 /*
1111  * MPALMOSTSAFE - acquires mplock
1112  */
1113 static int
1114 vn_kqfilter(struct file *fp, struct knote *kn)
1115 {
1116 	int error;
1117 
1118 	get_mplock();
1119 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1120 	rel_mplock();
1121 	return (error);
1122 }
1123