xref: /dragonfly/sys/kern/vfs_vnops.c (revision 3bafb5c1)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 
60 #include <sys/thread2.h>
61 #include <sys/mplock2.h>
62 
63 static int vn_closefile (struct file *fp);
64 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
65 		struct ucred *cred, struct sysmsg *msg);
66 static int vn_read (struct file *fp, struct uio *uio,
67 		struct ucred *cred, int flags);
68 static int vn_kqfilter (struct file *fp, struct knote *kn);
69 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
70 static int vn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_kqfilter = vn_kqfilter,
78 	.fo_stat = vn_statfile,
79 	.fo_close = vn_closefile,
80 	.fo_shutdown = nofo_shutdown
81 };
82 
83 /*
84  * Common code for vnode open operations.  Check permissions, and call
85  * the VOP_NOPEN or VOP_NCREATE routine.
86  *
87  * The caller is responsible for setting up nd with nlookup_init() and
88  * for cleaning it up with nlookup_done(), whether we return an error
89  * or not.
90  *
91  * On success nd->nl_open_vp will hold a referenced and, if requested,
92  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
93  * is non-NULL the vnode will be installed in the file pointer.
94  *
95  * NOTE: The vnode is referenced just once on return whether or not it
96  * is also installed in the file pointer.
97  */
98 int
99 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
100 {
101 	struct vnode *vp;
102 	struct ucred *cred = nd->nl_cred;
103 	struct vattr vat;
104 	struct vattr *vap = &vat;
105 	int error;
106 	u_int flags;
107 	uint64_t osize;
108 	struct mount *mp;
109 
110 	/*
111 	 * Certain combinations are illegal
112 	 */
113 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
114 		return(EACCES);
115 
116 	/*
117 	 * Lookup the path and create or obtain the vnode.  After a
118 	 * successful lookup a locked nd->nl_nch will be returned.
119 	 *
120 	 * The result of this section should be a locked vnode.
121 	 *
122 	 * XXX with only a little work we should be able to avoid locking
123 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
124 	 */
125 	nd->nl_flags |= NLC_OPEN;
126 	if (fmode & O_APPEND)
127 		nd->nl_flags |= NLC_APPEND;
128 	if (fmode & O_TRUNC)
129 		nd->nl_flags |= NLC_TRUNCATE;
130 	if (fmode & FREAD)
131 		nd->nl_flags |= NLC_READ;
132 	if (fmode & FWRITE)
133 		nd->nl_flags |= NLC_WRITE;
134 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
135 		nd->nl_flags |= NLC_FOLLOW;
136 
137 	if (fmode & O_CREAT) {
138 		/*
139 		 * CONDITIONAL CREATE FILE CASE
140 		 *
141 		 * Setting NLC_CREATE causes a negative hit to store
142 		 * the negative hit ncp and not return an error.  Then
143 		 * nc_error or nc_vp may be checked to see if the ncp
144 		 * represents a negative hit.  NLC_CREATE also requires
145 		 * write permission on the governing directory or EPERM
146 		 * is returned.
147 		 */
148 		nd->nl_flags |= NLC_CREATE;
149 		nd->nl_flags |= NLC_REFDVP;
150 		bwillinode(1);
151 		error = nlookup(nd);
152 	} else {
153 		/*
154 		 * NORMAL OPEN FILE CASE
155 		 */
156 		error = nlookup(nd);
157 	}
158 
159 	if (error)
160 		return (error);
161 
162 	/*
163 	 * split case to allow us to re-resolve and retry the ncp in case
164 	 * we get ESTALE.
165 	 */
166 again:
167 	if (fmode & O_CREAT) {
168 		if (nd->nl_nch.ncp->nc_vp == NULL) {
169 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
170 				return (error);
171 			VATTR_NULL(vap);
172 			vap->va_type = VREG;
173 			vap->va_mode = cmode;
174 			if (fmode & O_EXCL)
175 				vap->va_vaflags |= VA_EXCLUSIVE;
176 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
177 					    nd->nl_cred, vap);
178 			if (error)
179 				return (error);
180 			fmode &= ~O_TRUNC;
181 			/* locked vnode is returned */
182 		} else {
183 			if (fmode & O_EXCL) {
184 				error = EEXIST;
185 			} else {
186 				error = cache_vget(&nd->nl_nch, cred,
187 						    LK_EXCLUSIVE, &vp);
188 			}
189 			if (error)
190 				return (error);
191 			fmode &= ~O_CREAT;
192 		}
193 	} else {
194 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
195 		if (error)
196 			return (error);
197 	}
198 
199 	/*
200 	 * We have a locked vnode and ncp now.  Note that the ncp will
201 	 * be cleaned up by the caller if nd->nl_nch is left intact.
202 	 */
203 	if (vp->v_type == VLNK) {
204 		error = EMLINK;
205 		goto bad;
206 	}
207 	if (vp->v_type == VSOCK) {
208 		error = EOPNOTSUPP;
209 		goto bad;
210 	}
211 	if ((fmode & O_CREAT) == 0) {
212 		if (fmode & (FWRITE | O_TRUNC)) {
213 			if (vp->v_type == VDIR) {
214 				error = EISDIR;
215 				goto bad;
216 			}
217 			error = vn_writechk(vp, &nd->nl_nch);
218 			if (error) {
219 				/*
220 				 * Special stale handling, re-resolve the
221 				 * vnode.
222 				 */
223 				if (error == ESTALE) {
224 					vput(vp);
225 					vp = NULL;
226 					cache_setunresolved(&nd->nl_nch);
227 					error = cache_resolve(&nd->nl_nch, cred);
228 					if (error == 0)
229 						goto again;
230 				}
231 				goto bad;
232 			}
233 		}
234 	}
235 	if (fmode & O_TRUNC) {
236 		vn_unlock(vp);				/* XXX */
237 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
238 		osize = vp->v_filesize;
239 		VATTR_NULL(vap);
240 		vap->va_size = 0;
241 		error = VOP_SETATTR(vp, vap, cred);
242 		if (error)
243 			goto bad;
244 		error = VOP_GETATTR(vp, vap);
245 		if (error)
246 			goto bad;
247 		mp = vq_vptomp(vp);
248 		VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
249 	}
250 
251 	/*
252 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
253 	 * These particular bits a tracked all the way from the root.
254 	 *
255 	 * NOTE: Might not work properly on NFS servers due to the
256 	 * disconnected namecache.
257 	 */
258 	flags = nd->nl_nch.ncp->nc_flag;
259 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
260 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
261 		vsetflags(vp, VSWAPCACHE);
262 	} else {
263 		vclrflags(vp, VSWAPCACHE);
264 	}
265 
266 	/*
267 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
268 	 * associated with the fp yet so we own it clean.
269 	 *
270 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
271 	 * directories but now we do it unconditionally so f*() ops
272 	 * such as fchmod() can access the actual namespace that was
273 	 * used to open the file.
274 	 */
275 	if (fp) {
276 		if (nd->nl_flags & NLC_APPENDONLY)
277 			fmode |= FAPPENDONLY;
278 		fp->f_nchandle = nd->nl_nch;
279 		cache_zero(&nd->nl_nch);
280 		cache_unlock(&fp->f_nchandle);
281 	}
282 
283 	/*
284 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
285 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
286 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
287 	 * on /dev/ttyd0
288 	 */
289 	if (nd->nl_nch.ncp)
290 		cache_put(&nd->nl_nch);
291 
292 	error = VOP_OPEN(vp, fmode, cred, fp);
293 	if (error) {
294 		/*
295 		 * setting f_ops to &badfileops will prevent the descriptor
296 		 * code from trying to close and release the vnode, since
297 		 * the open failed we do not want to call close.
298 		 */
299 		if (fp) {
300 			fp->f_data = NULL;
301 			fp->f_ops = &badfileops;
302 		}
303 		goto bad;
304 	}
305 
306 #if 0
307 	/*
308 	 * Assert that VREG files have been setup for vmio.
309 	 */
310 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
311 		("vn_open: regular file was not VMIO enabled!"));
312 #endif
313 
314 	/*
315 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
316 	 * only returned in the fp == NULL case.
317 	 */
318 	if (fp == NULL) {
319 		nd->nl_open_vp = vp;
320 		nd->nl_vp_fmode = fmode;
321 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
322 			vn_unlock(vp);
323 	} else {
324 		vput(vp);
325 	}
326 	return (0);
327 bad:
328 	if (vp)
329 		vput(vp);
330 	return (error);
331 }
332 
333 int
334 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
335 {
336 	struct vnode *vp;
337 	int error;
338 
339 	if (strncmp(devname, "/dev/", 5) == 0)
340 		devname += 5;
341 	if ((vp = getsynthvnode(devname)) == NULL) {
342 		error = ENODEV;
343 	} else {
344 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
345 		vn_unlock(vp);
346 		if (error) {
347 			vrele(vp);
348 			vp = NULL;
349 		}
350 	}
351 	*vpp = vp;
352 	return (error);
353 }
354 
355 /*
356  * Check for write permissions on the specified vnode.  nch may be NULL.
357  */
358 int
359 vn_writechk(struct vnode *vp, struct nchandle *nch)
360 {
361 	/*
362 	 * If there's shared text associated with
363 	 * the vnode, try to free it up once.  If
364 	 * we fail, we can't allow writing.
365 	 */
366 	if (vp->v_flag & VTEXT)
367 		return (ETXTBSY);
368 
369 	/*
370 	 * If the vnode represents a regular file, check the mount
371 	 * point via the nch.  This may be a different mount point
372 	 * then the one embedded in the vnode (e.g. nullfs).
373 	 *
374 	 * We can still write to non-regular files (e.g. devices)
375 	 * via read-only mounts.
376 	 */
377 	if (nch && nch->ncp && vp->v_type == VREG)
378 		return (ncp_writechk(nch));
379 	return (0);
380 }
381 
382 /*
383  * Check whether the underlying mount is read-only.  The mount point
384  * referenced by the namecache may be different from the mount point
385  * used by the underlying vnode in the case of NULLFS, so a separate
386  * check is needed.
387  */
388 int
389 ncp_writechk(struct nchandle *nch)
390 {
391 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
392 		return (EROFS);
393 	return(0);
394 }
395 
396 /*
397  * Vnode close call
398  *
399  * MPSAFE
400  */
401 int
402 vn_close(struct vnode *vp, int flags)
403 {
404 	int error;
405 
406 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
407 	if (error == 0) {
408 		error = VOP_CLOSE(vp, flags);
409 		vn_unlock(vp);
410 	}
411 	vrele(vp);
412 	return (error);
413 }
414 
415 /*
416  * Sequential heuristic.
417  *
418  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
419  */
420 static __inline
421 int
422 sequential_heuristic(struct uio *uio, struct file *fp)
423 {
424 	/*
425 	 * Sequential heuristic - detect sequential operation
426 	 *
427 	 * NOTE: SMP: We allow f_seqcount updates to race.
428 	 */
429 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
430 	    uio->uio_offset == fp->f_nextoff) {
431 		int tmpseq = fp->f_seqcount;
432 
433 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
434 		if (tmpseq > IO_SEQMAX)
435 			tmpseq = IO_SEQMAX;
436 		fp->f_seqcount = tmpseq;
437 		return(fp->f_seqcount << IO_SEQSHIFT);
438 	}
439 
440 	/*
441 	 * Not sequential, quick draw-down of seqcount
442 	 *
443 	 * NOTE: SMP: We allow f_seqcount updates to race.
444 	 */
445 	if (fp->f_seqcount > 1)
446 		fp->f_seqcount = 1;
447 	else
448 		fp->f_seqcount = 0;
449 	return(0);
450 }
451 
452 /*
453  * get - lock and return the f_offset field.
454  * set - set and unlock the f_offset field.
455  *
456  * These routines serve the dual purpose of serializing access to the
457  * f_offset field (at least on i386) and guaranteeing operational integrity
458  * when multiple read()ers and write()ers are present on the same fp.
459  *
460  * MPSAFE
461  */
462 static __inline off_t
463 vn_get_fpf_offset(struct file *fp)
464 {
465 	u_int	flags;
466 	u_int	nflags;
467 
468 	/*
469 	 * Shortcut critical path.
470 	 */
471 	flags = fp->f_flag & ~FOFFSETLOCK;
472 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
473 		return(fp->f_offset);
474 
475 	/*
476 	 * The hard way
477 	 */
478 	for (;;) {
479 		flags = fp->f_flag;
480 		if (flags & FOFFSETLOCK) {
481 			nflags = flags | FOFFSETWAKE;
482 			tsleep_interlock(&fp->f_flag, 0);
483 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
484 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
485 		} else {
486 			nflags = flags | FOFFSETLOCK;
487 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
488 				break;
489 		}
490 	}
491 	return(fp->f_offset);
492 }
493 
494 /*
495  * MPSAFE
496  */
497 static __inline void
498 vn_set_fpf_offset(struct file *fp, off_t offset)
499 {
500 	u_int	flags;
501 	u_int	nflags;
502 
503 	/*
504 	 * We hold the lock so we can set the offset without interference.
505 	 */
506 	fp->f_offset = offset;
507 
508 	/*
509 	 * Normal release is already a reasonably critical path.
510 	 */
511 	for (;;) {
512 		flags = fp->f_flag;
513 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
514 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
515 			if (flags & FOFFSETWAKE)
516 				wakeup(&fp->f_flag);
517 			break;
518 		}
519 	}
520 }
521 
522 /*
523  * MPSAFE
524  */
525 static __inline off_t
526 vn_poll_fpf_offset(struct file *fp)
527 {
528 #if defined(__x86_64__) || !defined(SMP)
529 	return(fp->f_offset);
530 #else
531 	off_t off = vn_get_fpf_offset(fp);
532 	vn_set_fpf_offset(fp, off);
533 	return(off);
534 #endif
535 }
536 
537 /*
538  * Package up an I/O request on a vnode into a uio and do it.
539  *
540  * MPSAFE
541  */
542 int
543 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
544 	off_t offset, enum uio_seg segflg, int ioflg,
545 	struct ucred *cred, int *aresid)
546 {
547 	struct uio auio;
548 	struct iovec aiov;
549 	struct ccms_lock ccms_lock;
550 	int error;
551 
552 	if ((ioflg & IO_NODELOCKED) == 0)
553 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
554 	auio.uio_iov = &aiov;
555 	auio.uio_iovcnt = 1;
556 	aiov.iov_base = base;
557 	aiov.iov_len = len;
558 	auio.uio_resid = len;
559 	auio.uio_offset = offset;
560 	auio.uio_segflg = segflg;
561 	auio.uio_rw = rw;
562 	auio.uio_td = curthread;
563 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
564 	if (rw == UIO_READ) {
565 		error = VOP_READ(vp, &auio, ioflg, cred);
566 	} else {
567 		error = VOP_WRITE(vp, &auio, ioflg, cred);
568 	}
569 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
570 	if (aresid)
571 		*aresid = auio.uio_resid;
572 	else
573 		if (auio.uio_resid && error == 0)
574 			error = EIO;
575 	if ((ioflg & IO_NODELOCKED) == 0)
576 		vn_unlock(vp);
577 	return (error);
578 }
579 
580 /*
581  * Package up an I/O request on a vnode into a uio and do it.  The I/O
582  * request is split up into smaller chunks and we try to avoid saturating
583  * the buffer cache while potentially holding a vnode locked, so we
584  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
585  * to give other processes a chance to lock the vnode (either other processes
586  * core'ing the same binary, or unrelated processes scanning the directory).
587  *
588  * MPSAFE
589  */
590 int
591 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
592 		 off_t offset, enum uio_seg segflg, int ioflg,
593 		 struct ucred *cred, int *aresid)
594 {
595 	int error = 0;
596 
597 	do {
598 		int chunk;
599 
600 		/*
601 		 * Force `offset' to a multiple of MAXBSIZE except possibly
602 		 * for the first chunk, so that filesystems only need to
603 		 * write full blocks except possibly for the first and last
604 		 * chunks.
605 		 */
606 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
607 
608 		if (chunk > len)
609 			chunk = len;
610 		if (vp->v_type == VREG) {
611 			switch(rw) {
612 			case UIO_READ:
613 				bwillread(chunk);
614 				break;
615 			case UIO_WRITE:
616 				bwillwrite(chunk);
617 				break;
618 			}
619 		}
620 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
621 				ioflg, cred, aresid);
622 		len -= chunk;	/* aresid calc already includes length */
623 		if (error)
624 			break;
625 		offset += chunk;
626 		base += chunk;
627 		lwkt_user_yield();
628 	} while (len);
629 	if (aresid)
630 		*aresid += len;
631 	return (error);
632 }
633 
634 /*
635  * File pointers can no longer get ripped up by revoke so
636  * we don't need to lock access to the vp.
637  *
638  * f_offset updates are not guaranteed against multiple readers
639  *
640  * MPSAFE
641  */
642 static int
643 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
644 {
645 	struct ccms_lock ccms_lock;
646 	struct vnode *vp;
647 	int error, ioflag;
648 
649 	KASSERT(uio->uio_td == curthread,
650 		("uio_td %p is not td %p", uio->uio_td, curthread));
651 	vp = (struct vnode *)fp->f_data;
652 
653 	ioflag = 0;
654 	if (flags & O_FRNONBLOCKING) {
655 		ioflag |= (IO_NDELAY | IO_NRDELAY);
656 	} else if (flags & O_FBLOCKING) {
657 		/* ioflag &= ~IO_NDELAY; */
658 	} else if (flags & O_FNONBLOCKING) {
659 		ioflag |= IO_NDELAY;
660 	} else if (fp->f_flag & FNONBLOCK) {
661 		ioflag |= IO_NDELAY;
662 	}
663 	if (flags & O_FBUFFERED) {
664 		/* ioflag &= ~IO_DIRECT; */
665 	} else if (flags & O_FUNBUFFERED) {
666 		ioflag |= IO_DIRECT;
667 	} else if (fp->f_flag & O_DIRECT) {
668 		ioflag |= IO_DIRECT;
669 	}
670 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
671 		uio->uio_offset = vn_get_fpf_offset(fp);
672 	vn_lock(vp, LK_SHARED | LK_RETRY);
673 	ioflag |= sequential_heuristic(uio, fp);
674 
675 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
676 	error = VOP_READ(vp, uio, ioflag, cred);
677 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
678 	fp->f_nextoff = uio->uio_offset;
679 	vn_unlock(vp);
680 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
681 		vn_set_fpf_offset(fp, uio->uio_offset);
682 	return (error);
683 }
684 
685 /*
686  * MPSAFE
687  */
688 static int
689 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
690 {
691 	struct ccms_lock ccms_lock;
692 	struct vnode *vp;
693 	int error, ioflag;
694 
695 	KASSERT(uio->uio_td == curthread,
696 		("uio_td %p is not p %p", uio->uio_td, curthread));
697 	vp = (struct vnode *)fp->f_data;
698 
699 	ioflag = IO_UNIT;
700 	if (vp->v_type == VREG &&
701 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
702 		ioflag |= IO_APPEND;
703 	}
704 
705 	if (flags & O_FBLOCKING) {
706 		/* ioflag &= ~IO_NDELAY; */
707 	} else if (flags & O_FNONBLOCKING) {
708 		ioflag |= IO_NDELAY;
709 	} else if (fp->f_flag & FNONBLOCK) {
710 		ioflag |= IO_NDELAY;
711 	}
712 	if (flags & O_FBUFFERED) {
713 		/* ioflag &= ~IO_DIRECT; */
714 	} else if (flags & O_FUNBUFFERED) {
715 		ioflag |= IO_DIRECT;
716 	} else if (fp->f_flag & O_DIRECT) {
717 		ioflag |= IO_DIRECT;
718 	}
719 	if (flags & O_FASYNCWRITE) {
720 		/* ioflag &= ~IO_SYNC; */
721 	} else if (flags & O_FSYNCWRITE) {
722 		ioflag |= IO_SYNC;
723 	} else if (fp->f_flag & O_FSYNC) {
724 		ioflag |= IO_SYNC;
725 	}
726 
727 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
728 		ioflag |= IO_SYNC;
729 	if ((flags & O_FOFFSET) == 0)
730 		uio->uio_offset = vn_get_fpf_offset(fp);
731 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
732 	ioflag |= sequential_heuristic(uio, fp);
733 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
734 	error = VOP_WRITE(vp, uio, ioflag, cred);
735 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
736 	fp->f_nextoff = uio->uio_offset;
737 	vn_unlock(vp);
738 	if ((flags & O_FOFFSET) == 0)
739 		vn_set_fpf_offset(fp, uio->uio_offset);
740 	return (error);
741 }
742 
743 /*
744  * MPSAFE
745  */
746 static int
747 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
748 {
749 	struct vnode *vp;
750 	int error;
751 
752 	vp = (struct vnode *)fp->f_data;
753 	error = vn_stat(vp, sb, cred);
754 	return (error);
755 }
756 
757 /*
758  * MPSAFE
759  */
760 int
761 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
762 {
763 	struct vattr vattr;
764 	struct vattr *vap;
765 	int error;
766 	u_short mode;
767 	cdev_t dev;
768 
769 	vap = &vattr;
770 	error = VOP_GETATTR(vp, vap);
771 	if (error)
772 		return (error);
773 
774 	/*
775 	 * Zero the spare stat fields
776 	 */
777 	sb->st_lspare = 0;
778 	sb->st_qspare1 = 0;
779 	sb->st_qspare2 = 0;
780 
781 	/*
782 	 * Copy from vattr table
783 	 */
784 	if (vap->va_fsid != VNOVAL)
785 		sb->st_dev = vap->va_fsid;
786 	else
787 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
788 	sb->st_ino = vap->va_fileid;
789 	mode = vap->va_mode;
790 	switch (vap->va_type) {
791 	case VREG:
792 		mode |= S_IFREG;
793 		break;
794 	case VDATABASE:
795 		mode |= S_IFDB;
796 		break;
797 	case VDIR:
798 		mode |= S_IFDIR;
799 		break;
800 	case VBLK:
801 		mode |= S_IFBLK;
802 		break;
803 	case VCHR:
804 		mode |= S_IFCHR;
805 		break;
806 	case VLNK:
807 		mode |= S_IFLNK;
808 		/* This is a cosmetic change, symlinks do not have a mode. */
809 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
810 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
811 		else
812 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
813 		break;
814 	case VSOCK:
815 		mode |= S_IFSOCK;
816 		break;
817 	case VFIFO:
818 		mode |= S_IFIFO;
819 		break;
820 	default:
821 		return (EBADF);
822 	}
823 	sb->st_mode = mode;
824 	if (vap->va_nlink > (nlink_t)-1)
825 		sb->st_nlink = (nlink_t)-1;
826 	else
827 		sb->st_nlink = vap->va_nlink;
828 	sb->st_uid = vap->va_uid;
829 	sb->st_gid = vap->va_gid;
830 	sb->st_rdev = dev2udev(vp->v_rdev);
831 	sb->st_size = vap->va_size;
832 	sb->st_atimespec = vap->va_atime;
833 	sb->st_mtimespec = vap->va_mtime;
834 	sb->st_ctimespec = vap->va_ctime;
835 
836 	/*
837 	 * A VCHR and VBLK device may track the last access and last modified
838 	 * time independantly of the filesystem.  This is particularly true
839 	 * because device read and write calls may bypass the filesystem.
840 	 */
841 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
842 		dev = vp->v_rdev;
843 		if (dev != NULL) {
844 			if (dev->si_lastread) {
845 				sb->st_atimespec.tv_sec = dev->si_lastread;
846 				sb->st_atimespec.tv_nsec = 0;
847 			}
848 			if (dev->si_lastwrite) {
849 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
850 				sb->st_atimespec.tv_nsec = 0;
851 			}
852 		}
853 	}
854 
855         /*
856 	 * According to www.opengroup.org, the meaning of st_blksize is
857 	 *   "a filesystem-specific preferred I/O block size for this
858 	 *    object.  In some filesystem types, this may vary from file
859 	 *    to file"
860 	 * Default to PAGE_SIZE after much discussion.
861 	 */
862 
863 	if (vap->va_type == VREG) {
864 		sb->st_blksize = vap->va_blocksize;
865 	} else if (vn_isdisk(vp, NULL)) {
866 		/*
867 		 * XXX this is broken.  If the device is not yet open (aka
868 		 * stat() call, aka v_rdev == NULL), how are we supposed
869 		 * to get a valid block size out of it?
870 		 */
871 		dev = vp->v_rdev;
872 
873 		sb->st_blksize = dev->si_bsize_best;
874 		if (sb->st_blksize < dev->si_bsize_phys)
875 			sb->st_blksize = dev->si_bsize_phys;
876 		if (sb->st_blksize < BLKDEV_IOSIZE)
877 			sb->st_blksize = BLKDEV_IOSIZE;
878 	} else {
879 		sb->st_blksize = PAGE_SIZE;
880 	}
881 
882 	sb->st_flags = vap->va_flags;
883 
884 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
885 	if (error)
886 		sb->st_gen = 0;
887 	else
888 		sb->st_gen = (u_int32_t)vap->va_gen;
889 
890 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
891 	return (0);
892 }
893 
894 /*
895  * MPALMOSTSAFE - acquires mplock
896  */
897 static int
898 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
899 	 struct sysmsg *msg)
900 {
901 	struct vnode *vp = ((struct vnode *)fp->f_data);
902 	struct vnode *ovp;
903 	struct vattr vattr;
904 	int error;
905 	off_t size;
906 
907 	switch (vp->v_type) {
908 	case VREG:
909 	case VDIR:
910 		if (com == FIONREAD) {
911 			error = VOP_GETATTR(vp, &vattr);
912 			if (error)
913 				break;
914 			size = vattr.va_size;
915 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
916 				size -= vn_poll_fpf_offset(fp);
917 			if (size > 0x7FFFFFFF)
918 				size = 0x7FFFFFFF;
919 			*(int *)data = size;
920 			error = 0;
921 			break;
922 		}
923 		if (com == FIOASYNC) {				/* XXX */
924 			error = 0;				/* XXX */
925 			break;
926 		}
927 		/* fall into ... */
928 	default:
929 #if 0
930 		return (ENOTTY);
931 #endif
932 	case VFIFO:
933 	case VCHR:
934 	case VBLK:
935 		if (com == FIODTYPE) {
936 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
937 				error = ENOTTY;
938 				break;
939 			}
940 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
941 			error = 0;
942 			break;
943 		}
944 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
945 		if (error == 0 && com == TIOCSCTTY) {
946 			struct proc *p = curthread->td_proc;
947 			struct session *sess;
948 
949 			if (p == NULL) {
950 				error = ENOTTY;
951 				break;
952 			}
953 
954 			get_mplock();
955 			sess = p->p_session;
956 			/* Do nothing if reassigning same control tty */
957 			if (sess->s_ttyvp == vp) {
958 				error = 0;
959 				rel_mplock();
960 				break;
961 			}
962 
963 			/* Get rid of reference to old control tty */
964 			ovp = sess->s_ttyvp;
965 			vref(vp);
966 			sess->s_ttyvp = vp;
967 			if (ovp)
968 				vrele(ovp);
969 			rel_mplock();
970 		}
971 		break;
972 	}
973 	return (error);
974 }
975 
976 /*
977  * Check that the vnode is still valid, and if so
978  * acquire requested lock.
979  */
980 int
981 #ifndef	DEBUG_LOCKS
982 vn_lock(struct vnode *vp, int flags)
983 #else
984 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
985 #endif
986 {
987 	int error;
988 
989 	do {
990 #ifdef	DEBUG_LOCKS
991 		vp->filename = filename;
992 		vp->line = line;
993 		error = debuglockmgr(&vp->v_lock, flags,
994 				     "vn_lock", filename, line);
995 #else
996 		error = lockmgr(&vp->v_lock, flags);
997 #endif
998 		if (error == 0)
999 			break;
1000 	} while (flags & LK_RETRY);
1001 
1002 	/*
1003 	 * Because we (had better!) have a ref on the vnode, once it
1004 	 * goes to VRECLAIMED state it will not be recycled until all
1005 	 * refs go away.  So we can just check the flag.
1006 	 */
1007 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1008 		lockmgr(&vp->v_lock, LK_RELEASE);
1009 		error = ENOENT;
1010 	}
1011 	return (error);
1012 }
1013 
1014 #ifdef DEBUG_VN_UNLOCK
1015 
1016 void
1017 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1018 {
1019 	kprintf("vn_unlock from %s:%d\n", filename, line);
1020 	lockmgr(&vp->v_lock, LK_RELEASE);
1021 }
1022 
1023 #else
1024 
1025 void
1026 vn_unlock(struct vnode *vp)
1027 {
1028 	lockmgr(&vp->v_lock, LK_RELEASE);
1029 }
1030 
1031 #endif
1032 
1033 /*
1034  * MPSAFE
1035  */
1036 int
1037 vn_islocked(struct vnode *vp)
1038 {
1039 	return (lockstatus(&vp->v_lock, curthread));
1040 }
1041 
1042 /*
1043  * Return the lock status of a vnode and unlock the vnode
1044  * if we owned the lock.  This is not a boolean, if the
1045  * caller cares what the lock status is the caller must
1046  * check the various possible values.
1047  *
1048  * This only unlocks exclusive locks held by the caller,
1049  * it will NOT unlock shared locks (there is no way to
1050  * tell who the shared lock belongs to).
1051  *
1052  * MPSAFE
1053  */
1054 int
1055 vn_islocked_unlock(struct vnode *vp)
1056 {
1057 	int vpls;
1058 
1059 	vpls = lockstatus(&vp->v_lock, curthread);
1060 	if (vpls == LK_EXCLUSIVE)
1061 		lockmgr(&vp->v_lock, LK_RELEASE);
1062 	return(vpls);
1063 }
1064 
1065 /*
1066  * Restore a vnode lock that we previously released via
1067  * vn_islocked_unlock().  This is a NOP if we did not
1068  * own the original lock.
1069  *
1070  * MPSAFE
1071  */
1072 void
1073 vn_islocked_relock(struct vnode *vp, int vpls)
1074 {
1075 	int error;
1076 
1077 	if (vpls == LK_EXCLUSIVE)
1078 		error = lockmgr(&vp->v_lock, vpls);
1079 }
1080 
1081 /*
1082  * MPSAFE
1083  */
1084 static int
1085 vn_closefile(struct file *fp)
1086 {
1087 	int error;
1088 
1089 	fp->f_ops = &badfileops;
1090 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1091 	return (error);
1092 }
1093 
1094 /*
1095  * MPSAFE
1096  */
1097 static int
1098 vn_kqfilter(struct file *fp, struct knote *kn)
1099 {
1100 	int error;
1101 
1102 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1103 	return (error);
1104 }
1105