xref: /dragonfly/sys/kern/vfs_vnops.c (revision 650094e1)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/mount.h>
51 #include <sys/nlookup.h>
52 #include <sys/vnode.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 
60 #include <sys/thread2.h>
61 #include <sys/mplock2.h>
62 
63 static int vn_closefile (struct file *fp);
64 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
65 		struct ucred *cred, struct sysmsg *msg);
66 static int vn_read (struct file *fp, struct uio *uio,
67 		struct ucred *cred, int flags);
68 static int vn_kqfilter (struct file *fp, struct knote *kn);
69 static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
70 static int vn_write (struct file *fp, struct uio *uio,
71 		struct ucred *cred, int flags);
72 
73 struct fileops vnode_fileops = {
74 	.fo_read = vn_read,
75 	.fo_write = vn_write,
76 	.fo_ioctl = vn_ioctl,
77 	.fo_kqfilter = vn_kqfilter,
78 	.fo_stat = vn_statfile,
79 	.fo_close = vn_closefile,
80 	.fo_shutdown = nofo_shutdown
81 };
82 
83 /*
84  * Common code for vnode open operations.  Check permissions, and call
85  * the VOP_NOPEN or VOP_NCREATE routine.
86  *
87  * The caller is responsible for setting up nd with nlookup_init() and
88  * for cleaning it up with nlookup_done(), whether we return an error
89  * or not.
90  *
91  * On success nd->nl_open_vp will hold a referenced and, if requested,
92  * locked vnode.  A locked vnode is requested via NLC_LOCKVP.  If fp
93  * is non-NULL the vnode will be installed in the file pointer.
94  *
95  * NOTE: The vnode is referenced just once on return whether or not it
96  * is also installed in the file pointer.
97  */
98 int
99 vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
100 {
101 	struct vnode *vp;
102 	struct ucred *cred = nd->nl_cred;
103 	struct vattr vat;
104 	struct vattr *vap = &vat;
105 	int error;
106 	u_int flags;
107 
108 	/*
109 	 * Certain combinations are illegal
110 	 */
111 	if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
112 		return(EACCES);
113 
114 	/*
115 	 * Lookup the path and create or obtain the vnode.  After a
116 	 * successful lookup a locked nd->nl_nch will be returned.
117 	 *
118 	 * The result of this section should be a locked vnode.
119 	 *
120 	 * XXX with only a little work we should be able to avoid locking
121 	 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
122 	 */
123 	nd->nl_flags |= NLC_OPEN;
124 	if (fmode & O_APPEND)
125 		nd->nl_flags |= NLC_APPEND;
126 	if (fmode & O_TRUNC)
127 		nd->nl_flags |= NLC_TRUNCATE;
128 	if (fmode & FREAD)
129 		nd->nl_flags |= NLC_READ;
130 	if (fmode & FWRITE)
131 		nd->nl_flags |= NLC_WRITE;
132 	if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
133 		nd->nl_flags |= NLC_FOLLOW;
134 
135 	if (fmode & O_CREAT) {
136 		/*
137 		 * CONDITIONAL CREATE FILE CASE
138 		 *
139 		 * Setting NLC_CREATE causes a negative hit to store
140 		 * the negative hit ncp and not return an error.  Then
141 		 * nc_error or nc_vp may be checked to see if the ncp
142 		 * represents a negative hit.  NLC_CREATE also requires
143 		 * write permission on the governing directory or EPERM
144 		 * is returned.
145 		 */
146 		nd->nl_flags |= NLC_CREATE;
147 		nd->nl_flags |= NLC_REFDVP;
148 		bwillinode(1);
149 		error = nlookup(nd);
150 	} else {
151 		/*
152 		 * NORMAL OPEN FILE CASE
153 		 */
154 		error = nlookup(nd);
155 	}
156 
157 	if (error)
158 		return (error);
159 
160 	/*
161 	 * split case to allow us to re-resolve and retry the ncp in case
162 	 * we get ESTALE.
163 	 */
164 again:
165 	if (fmode & O_CREAT) {
166 		if (nd->nl_nch.ncp->nc_vp == NULL) {
167 			if ((error = ncp_writechk(&nd->nl_nch)) != 0)
168 				return (error);
169 			VATTR_NULL(vap);
170 			vap->va_type = VREG;
171 			vap->va_mode = cmode;
172 			if (fmode & O_EXCL)
173 				vap->va_vaflags |= VA_EXCLUSIVE;
174 			error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
175 					    nd->nl_cred, vap);
176 			if (error)
177 				return (error);
178 			fmode &= ~O_TRUNC;
179 			/* locked vnode is returned */
180 		} else {
181 			if (fmode & O_EXCL) {
182 				error = EEXIST;
183 			} else {
184 				error = cache_vget(&nd->nl_nch, cred,
185 						    LK_EXCLUSIVE, &vp);
186 			}
187 			if (error)
188 				return (error);
189 			fmode &= ~O_CREAT;
190 		}
191 	} else {
192 		error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp);
193 		if (error)
194 			return (error);
195 	}
196 
197 	/*
198 	 * We have a locked vnode and ncp now.  Note that the ncp will
199 	 * be cleaned up by the caller if nd->nl_nch is left intact.
200 	 */
201 	if (vp->v_type == VLNK) {
202 		error = EMLINK;
203 		goto bad;
204 	}
205 	if (vp->v_type == VSOCK) {
206 		error = EOPNOTSUPP;
207 		goto bad;
208 	}
209 	if ((fmode & O_CREAT) == 0) {
210 		if (fmode & (FWRITE | O_TRUNC)) {
211 			if (vp->v_type == VDIR) {
212 				error = EISDIR;
213 				goto bad;
214 			}
215 			error = vn_writechk(vp, &nd->nl_nch);
216 			if (error) {
217 				/*
218 				 * Special stale handling, re-resolve the
219 				 * vnode.
220 				 */
221 				if (error == ESTALE) {
222 					vput(vp);
223 					vp = NULL;
224 					cache_setunresolved(&nd->nl_nch);
225 					error = cache_resolve(&nd->nl_nch, cred);
226 					if (error == 0)
227 						goto again;
228 				}
229 				goto bad;
230 			}
231 		}
232 	}
233 	if (fmode & O_TRUNC) {
234 		vn_unlock(vp);				/* XXX */
235 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
236 		VATTR_NULL(vap);
237 		vap->va_size = 0;
238 		error = VOP_SETATTR(vp, vap, cred);
239 		if (error)
240 			goto bad;
241 	}
242 
243 	/*
244 	 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
245 	 * These particular bits a tracked all the way from the root.
246 	 *
247 	 * NOTE: Might not work properly on NFS servers due to the
248 	 * disconnected namecache.
249 	 */
250 	flags = nd->nl_nch.ncp->nc_flag;
251 	if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
252 	    (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
253 		vsetflags(vp, VSWAPCACHE);
254 	} else {
255 		vclrflags(vp, VSWAPCACHE);
256 	}
257 
258 	/*
259 	 * Setup the fp so VOP_OPEN can override it.  No descriptor has been
260 	 * associated with the fp yet so we own it clean.
261 	 *
262 	 * f_nchandle inherits nl_nch.  This used to be necessary only for
263 	 * directories but now we do it unconditionally so f*() ops
264 	 * such as fchmod() can access the actual namespace that was
265 	 * used to open the file.
266 	 */
267 	if (fp) {
268 		if (nd->nl_flags & NLC_APPENDONLY)
269 			fmode |= FAPPENDONLY;
270 		fp->f_nchandle = nd->nl_nch;
271 		cache_zero(&nd->nl_nch);
272 		cache_unlock(&fp->f_nchandle);
273 	}
274 
275 	/*
276 	 * Get rid of nl_nch.  vn_open does not return it (it returns the
277 	 * vnode or the file pointer).  Note: we can't leave nl_nch locked
278 	 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
279 	 * on /dev/ttyd0
280 	 */
281 	if (nd->nl_nch.ncp)
282 		cache_put(&nd->nl_nch);
283 
284 	error = VOP_OPEN(vp, fmode, cred, fp);
285 	if (error) {
286 		/*
287 		 * setting f_ops to &badfileops will prevent the descriptor
288 		 * code from trying to close and release the vnode, since
289 		 * the open failed we do not want to call close.
290 		 */
291 		if (fp) {
292 			fp->f_data = NULL;
293 			fp->f_ops = &badfileops;
294 		}
295 		goto bad;
296 	}
297 
298 #if 0
299 	/*
300 	 * Assert that VREG files have been setup for vmio.
301 	 */
302 	KASSERT(vp->v_type != VREG || vp->v_object != NULL,
303 		("vn_open: regular file was not VMIO enabled!"));
304 #endif
305 
306 	/*
307 	 * Return the vnode.  XXX needs some cleaning up.  The vnode is
308 	 * only returned in the fp == NULL case.
309 	 */
310 	if (fp == NULL) {
311 		nd->nl_open_vp = vp;
312 		nd->nl_vp_fmode = fmode;
313 		if ((nd->nl_flags & NLC_LOCKVP) == 0)
314 			vn_unlock(vp);
315 	} else {
316 		vput(vp);
317 	}
318 	return (0);
319 bad:
320 	if (vp)
321 		vput(vp);
322 	return (error);
323 }
324 
325 int
326 vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
327 {
328 	struct vnode *vp;
329 	int error;
330 
331 	if (strncmp(devname, "/dev/", 5) == 0)
332 		devname += 5;
333 	if ((vp = getsynthvnode(devname)) == NULL) {
334 		error = ENODEV;
335 	} else {
336 		error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
337 		vn_unlock(vp);
338 		if (error) {
339 			vrele(vp);
340 			vp = NULL;
341 		}
342 	}
343 	*vpp = vp;
344 	return (error);
345 }
346 
347 /*
348  * Check for write permissions on the specified vnode.  nch may be NULL.
349  */
350 int
351 vn_writechk(struct vnode *vp, struct nchandle *nch)
352 {
353 	/*
354 	 * If there's shared text associated with
355 	 * the vnode, try to free it up once.  If
356 	 * we fail, we can't allow writing.
357 	 */
358 	if (vp->v_flag & VTEXT)
359 		return (ETXTBSY);
360 
361 	/*
362 	 * If the vnode represents a regular file, check the mount
363 	 * point via the nch.  This may be a different mount point
364 	 * then the one embedded in the vnode (e.g. nullfs).
365 	 *
366 	 * We can still write to non-regular files (e.g. devices)
367 	 * via read-only mounts.
368 	 */
369 	if (nch && nch->ncp && vp->v_type == VREG)
370 		return (ncp_writechk(nch));
371 	return (0);
372 }
373 
374 /*
375  * Check whether the underlying mount is read-only.  The mount point
376  * referenced by the namecache may be different from the mount point
377  * used by the underlying vnode in the case of NULLFS, so a separate
378  * check is needed.
379  */
380 int
381 ncp_writechk(struct nchandle *nch)
382 {
383 	if (nch->mount && (nch->mount->mnt_flag & MNT_RDONLY))
384 		return (EROFS);
385 	return(0);
386 }
387 
388 /*
389  * Vnode close call
390  *
391  * MPSAFE
392  */
393 int
394 vn_close(struct vnode *vp, int flags)
395 {
396 	int error;
397 
398 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
399 	if (error == 0) {
400 		error = VOP_CLOSE(vp, flags);
401 		vn_unlock(vp);
402 	}
403 	vrele(vp);
404 	return (error);
405 }
406 
407 /*
408  * Sequential heuristic.
409  *
410  * MPSAFE (f_seqcount and f_nextoff are allowed to race)
411  */
412 static __inline
413 int
414 sequential_heuristic(struct uio *uio, struct file *fp)
415 {
416 	/*
417 	 * Sequential heuristic - detect sequential operation
418 	 *
419 	 * NOTE: SMP: We allow f_seqcount updates to race.
420 	 */
421 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
422 	    uio->uio_offset == fp->f_nextoff) {
423 		int tmpseq = fp->f_seqcount;
424 
425 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
426 		if (tmpseq > IO_SEQMAX)
427 			tmpseq = IO_SEQMAX;
428 		fp->f_seqcount = tmpseq;
429 		return(fp->f_seqcount << IO_SEQSHIFT);
430 	}
431 
432 	/*
433 	 * Not sequential, quick draw-down of seqcount
434 	 *
435 	 * NOTE: SMP: We allow f_seqcount updates to race.
436 	 */
437 	if (fp->f_seqcount > 1)
438 		fp->f_seqcount = 1;
439 	else
440 		fp->f_seqcount = 0;
441 	return(0);
442 }
443 
444 /*
445  * get - lock and return the f_offset field.
446  * set - set and unlock the f_offset field.
447  *
448  * These routines serve the dual purpose of serializing access to the
449  * f_offset field (at least on i386) and guaranteeing operational integrity
450  * when multiple read()ers and write()ers are present on the same fp.
451  *
452  * MPSAFE
453  */
454 static __inline off_t
455 vn_get_fpf_offset(struct file *fp)
456 {
457 	u_int	flags;
458 	u_int	nflags;
459 
460 	/*
461 	 * Shortcut critical path.
462 	 */
463 	flags = fp->f_flag & ~FOFFSETLOCK;
464 	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
465 		return(fp->f_offset);
466 
467 	/*
468 	 * The hard way
469 	 */
470 	for (;;) {
471 		flags = fp->f_flag;
472 		if (flags & FOFFSETLOCK) {
473 			nflags = flags | FOFFSETWAKE;
474 			tsleep_interlock(&fp->f_flag, 0);
475 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
476 				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
477 		} else {
478 			nflags = flags | FOFFSETLOCK;
479 			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
480 				break;
481 		}
482 	}
483 	return(fp->f_offset);
484 }
485 
486 /*
487  * MPSAFE
488  */
489 static __inline void
490 vn_set_fpf_offset(struct file *fp, off_t offset)
491 {
492 	u_int	flags;
493 	u_int	nflags;
494 
495 	/*
496 	 * We hold the lock so we can set the offset without interference.
497 	 */
498 	fp->f_offset = offset;
499 
500 	/*
501 	 * Normal release is already a reasonably critical path.
502 	 */
503 	for (;;) {
504 		flags = fp->f_flag;
505 		nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
506 		if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
507 			if (flags & FOFFSETWAKE)
508 				wakeup(&fp->f_flag);
509 			break;
510 		}
511 	}
512 }
513 
514 /*
515  * MPSAFE
516  */
517 static __inline off_t
518 vn_poll_fpf_offset(struct file *fp)
519 {
520 #if defined(__x86_64__) || !defined(SMP)
521 	return(fp->f_offset);
522 #else
523 	off_t off = vn_get_fpf_offset(fp);
524 	vn_set_fpf_offset(fp, off);
525 	return(off);
526 #endif
527 }
528 
529 /*
530  * Package up an I/O request on a vnode into a uio and do it.
531  *
532  * MPSAFE
533  */
534 int
535 vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
536 	off_t offset, enum uio_seg segflg, int ioflg,
537 	struct ucred *cred, int *aresid)
538 {
539 	struct uio auio;
540 	struct iovec aiov;
541 	struct ccms_lock ccms_lock;
542 	int error;
543 
544 	if ((ioflg & IO_NODELOCKED) == 0)
545 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
546 	auio.uio_iov = &aiov;
547 	auio.uio_iovcnt = 1;
548 	aiov.iov_base = base;
549 	aiov.iov_len = len;
550 	auio.uio_resid = len;
551 	auio.uio_offset = offset;
552 	auio.uio_segflg = segflg;
553 	auio.uio_rw = rw;
554 	auio.uio_td = curthread;
555 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
556 	if (rw == UIO_READ) {
557 		error = VOP_READ(vp, &auio, ioflg, cred);
558 	} else {
559 		error = VOP_WRITE(vp, &auio, ioflg, cred);
560 	}
561 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
562 	if (aresid)
563 		*aresid = auio.uio_resid;
564 	else
565 		if (auio.uio_resid && error == 0)
566 			error = EIO;
567 	if ((ioflg & IO_NODELOCKED) == 0)
568 		vn_unlock(vp);
569 	return (error);
570 }
571 
572 /*
573  * Package up an I/O request on a vnode into a uio and do it.  The I/O
574  * request is split up into smaller chunks and we try to avoid saturating
575  * the buffer cache while potentially holding a vnode locked, so we
576  * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
577  * to give other processes a chance to lock the vnode (either other processes
578  * core'ing the same binary, or unrelated processes scanning the directory).
579  *
580  * MPSAFE
581  */
582 int
583 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
584 		 off_t offset, enum uio_seg segflg, int ioflg,
585 		 struct ucred *cred, int *aresid)
586 {
587 	int error = 0;
588 
589 	do {
590 		int chunk;
591 
592 		/*
593 		 * Force `offset' to a multiple of MAXBSIZE except possibly
594 		 * for the first chunk, so that filesystems only need to
595 		 * write full blocks except possibly for the first and last
596 		 * chunks.
597 		 */
598 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
599 
600 		if (chunk > len)
601 			chunk = len;
602 		if (vp->v_type == VREG) {
603 			switch(rw) {
604 			case UIO_READ:
605 				bwillread(chunk);
606 				break;
607 			case UIO_WRITE:
608 				bwillwrite(chunk);
609 				break;
610 			}
611 		}
612 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
613 				ioflg, cred, aresid);
614 		len -= chunk;	/* aresid calc already includes length */
615 		if (error)
616 			break;
617 		offset += chunk;
618 		base += chunk;
619 		lwkt_user_yield();
620 	} while (len);
621 	if (aresid)
622 		*aresid += len;
623 	return (error);
624 }
625 
626 /*
627  * File pointers can no longer get ripped up by revoke so
628  * we don't need to lock access to the vp.
629  *
630  * f_offset updates are not guaranteed against multiple readers
631  *
632  * MPSAFE
633  */
634 static int
635 vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
636 {
637 	struct ccms_lock ccms_lock;
638 	struct vnode *vp;
639 	int error, ioflag;
640 
641 	KASSERT(uio->uio_td == curthread,
642 		("uio_td %p is not td %p", uio->uio_td, curthread));
643 	vp = (struct vnode *)fp->f_data;
644 
645 	ioflag = 0;
646 	if (flags & O_FRNONBLOCKING) {
647 		ioflag |= (IO_NDELAY | IO_NRDELAY);
648 	} else if (flags & O_FBLOCKING) {
649 		/* ioflag &= ~IO_NDELAY; */
650 	} else if (flags & O_FNONBLOCKING) {
651 		ioflag |= IO_NDELAY;
652 	} else if (fp->f_flag & FNONBLOCK) {
653 		ioflag |= IO_NDELAY;
654 	}
655 	if (flags & O_FBUFFERED) {
656 		/* ioflag &= ~IO_DIRECT; */
657 	} else if (flags & O_FUNBUFFERED) {
658 		ioflag |= IO_DIRECT;
659 	} else if (fp->f_flag & O_DIRECT) {
660 		ioflag |= IO_DIRECT;
661 	}
662 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
663 		uio->uio_offset = vn_get_fpf_offset(fp);
664 	vn_lock(vp, LK_SHARED | LK_RETRY);
665 	ioflag |= sequential_heuristic(uio, fp);
666 
667 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
668 	error = VOP_READ(vp, uio, ioflag, cred);
669 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
670 	fp->f_nextoff = uio->uio_offset;
671 	vn_unlock(vp);
672 	if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
673 		vn_set_fpf_offset(fp, uio->uio_offset);
674 	return (error);
675 }
676 
677 /*
678  * MPSAFE
679  */
680 static int
681 vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
682 {
683 	struct ccms_lock ccms_lock;
684 	struct vnode *vp;
685 	int error, ioflag;
686 
687 	KASSERT(uio->uio_td == curthread,
688 		("uio_td %p is not p %p", uio->uio_td, curthread));
689 	vp = (struct vnode *)fp->f_data;
690 
691 	ioflag = IO_UNIT;
692 	if (vp->v_type == VREG &&
693 	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
694 		ioflag |= IO_APPEND;
695 	}
696 
697 	if (flags & O_FBLOCKING) {
698 		/* ioflag &= ~IO_NDELAY; */
699 	} else if (flags & O_FNONBLOCKING) {
700 		ioflag |= IO_NDELAY;
701 	} else if (fp->f_flag & FNONBLOCK) {
702 		ioflag |= IO_NDELAY;
703 	}
704 	if (flags & O_FBUFFERED) {
705 		/* ioflag &= ~IO_DIRECT; */
706 	} else if (flags & O_FUNBUFFERED) {
707 		ioflag |= IO_DIRECT;
708 	} else if (fp->f_flag & O_DIRECT) {
709 		ioflag |= IO_DIRECT;
710 	}
711 	if (flags & O_FASYNCWRITE) {
712 		/* ioflag &= ~IO_SYNC; */
713 	} else if (flags & O_FSYNCWRITE) {
714 		ioflag |= IO_SYNC;
715 	} else if (fp->f_flag & O_FSYNC) {
716 		ioflag |= IO_SYNC;
717 	}
718 
719 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
720 		ioflag |= IO_SYNC;
721 	if ((flags & O_FOFFSET) == 0)
722 		uio->uio_offset = vn_get_fpf_offset(fp);
723 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
724 	ioflag |= sequential_heuristic(uio, fp);
725 	ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
726 	error = VOP_WRITE(vp, uio, ioflag, cred);
727 	ccms_lock_put(&vp->v_ccms, &ccms_lock);
728 	fp->f_nextoff = uio->uio_offset;
729 	vn_unlock(vp);
730 	if ((flags & O_FOFFSET) == 0)
731 		vn_set_fpf_offset(fp, uio->uio_offset);
732 	return (error);
733 }
734 
735 /*
736  * MPSAFE
737  */
738 static int
739 vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
740 {
741 	struct vnode *vp;
742 	int error;
743 
744 	vp = (struct vnode *)fp->f_data;
745 	error = vn_stat(vp, sb, cred);
746 	return (error);
747 }
748 
749 /*
750  * MPSAFE
751  */
752 int
753 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
754 {
755 	struct vattr vattr;
756 	struct vattr *vap;
757 	int error;
758 	u_short mode;
759 	cdev_t dev;
760 
761 	vap = &vattr;
762 	error = VOP_GETATTR(vp, vap);
763 	if (error)
764 		return (error);
765 
766 	/*
767 	 * Zero the spare stat fields
768 	 */
769 	sb->st_lspare = 0;
770 	sb->st_qspare1 = 0;
771 	sb->st_qspare2 = 0;
772 
773 	/*
774 	 * Copy from vattr table
775 	 */
776 	if (vap->va_fsid != VNOVAL)
777 		sb->st_dev = vap->va_fsid;
778 	else
779 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
780 	sb->st_ino = vap->va_fileid;
781 	mode = vap->va_mode;
782 	switch (vap->va_type) {
783 	case VREG:
784 		mode |= S_IFREG;
785 		break;
786 	case VDATABASE:
787 		mode |= S_IFDB;
788 		break;
789 	case VDIR:
790 		mode |= S_IFDIR;
791 		break;
792 	case VBLK:
793 		mode |= S_IFBLK;
794 		break;
795 	case VCHR:
796 		mode |= S_IFCHR;
797 		break;
798 	case VLNK:
799 		mode |= S_IFLNK;
800 		/* This is a cosmetic change, symlinks do not have a mode. */
801 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
802 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
803 		else
804 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
805 		break;
806 	case VSOCK:
807 		mode |= S_IFSOCK;
808 		break;
809 	case VFIFO:
810 		mode |= S_IFIFO;
811 		break;
812 	default:
813 		return (EBADF);
814 	}
815 	sb->st_mode = mode;
816 	if (vap->va_nlink > (nlink_t)-1)
817 		sb->st_nlink = (nlink_t)-1;
818 	else
819 		sb->st_nlink = vap->va_nlink;
820 	sb->st_uid = vap->va_uid;
821 	sb->st_gid = vap->va_gid;
822 	sb->st_rdev = dev2udev(vp->v_rdev);
823 	sb->st_size = vap->va_size;
824 	sb->st_atimespec = vap->va_atime;
825 	sb->st_mtimespec = vap->va_mtime;
826 	sb->st_ctimespec = vap->va_ctime;
827 
828 	/*
829 	 * A VCHR and VBLK device may track the last access and last modified
830 	 * time independantly of the filesystem.  This is particularly true
831 	 * because device read and write calls may bypass the filesystem.
832 	 */
833 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
834 		dev = vp->v_rdev;
835 		if (dev != NULL) {
836 			if (dev->si_lastread) {
837 				sb->st_atimespec.tv_sec = dev->si_lastread;
838 				sb->st_atimespec.tv_nsec = 0;
839 			}
840 			if (dev->si_lastwrite) {
841 				sb->st_atimespec.tv_sec = dev->si_lastwrite;
842 				sb->st_atimespec.tv_nsec = 0;
843 			}
844 		}
845 	}
846 
847         /*
848 	 * According to www.opengroup.org, the meaning of st_blksize is
849 	 *   "a filesystem-specific preferred I/O block size for this
850 	 *    object.  In some filesystem types, this may vary from file
851 	 *    to file"
852 	 * Default to PAGE_SIZE after much discussion.
853 	 */
854 
855 	if (vap->va_type == VREG) {
856 		sb->st_blksize = vap->va_blocksize;
857 	} else if (vn_isdisk(vp, NULL)) {
858 		/*
859 		 * XXX this is broken.  If the device is not yet open (aka
860 		 * stat() call, aka v_rdev == NULL), how are we supposed
861 		 * to get a valid block size out of it?
862 		 */
863 		dev = vp->v_rdev;
864 
865 		sb->st_blksize = dev->si_bsize_best;
866 		if (sb->st_blksize < dev->si_bsize_phys)
867 			sb->st_blksize = dev->si_bsize_phys;
868 		if (sb->st_blksize < BLKDEV_IOSIZE)
869 			sb->st_blksize = BLKDEV_IOSIZE;
870 	} else {
871 		sb->st_blksize = PAGE_SIZE;
872 	}
873 
874 	sb->st_flags = vap->va_flags;
875 
876 	error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
877 	if (error)
878 		sb->st_gen = 0;
879 	else
880 		sb->st_gen = (u_int32_t)vap->va_gen;
881 
882 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
883 	return (0);
884 }
885 
886 /*
887  * MPALMOSTSAFE - acquires mplock
888  */
889 static int
890 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
891 	 struct sysmsg *msg)
892 {
893 	struct vnode *vp = ((struct vnode *)fp->f_data);
894 	struct vnode *ovp;
895 	struct vattr vattr;
896 	int error;
897 	off_t size;
898 
899 	switch (vp->v_type) {
900 	case VREG:
901 	case VDIR:
902 		if (com == FIONREAD) {
903 			error = VOP_GETATTR(vp, &vattr);
904 			if (error)
905 				break;
906 			size = vattr.va_size;
907 			if ((vp->v_flag & VNOTSEEKABLE) == 0)
908 				size -= vn_poll_fpf_offset(fp);
909 			if (size > 0x7FFFFFFF)
910 				size = 0x7FFFFFFF;
911 			*(int *)data = size;
912 			error = 0;
913 			break;
914 		}
915 		if (com == FIOASYNC) {				/* XXX */
916 			error = 0;				/* XXX */
917 			break;
918 		}
919 		/* fall into ... */
920 	default:
921 #if 0
922 		return (ENOTTY);
923 #endif
924 	case VFIFO:
925 	case VCHR:
926 	case VBLK:
927 		if (com == FIODTYPE) {
928 			if (vp->v_type != VCHR && vp->v_type != VBLK) {
929 				error = ENOTTY;
930 				break;
931 			}
932 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
933 			error = 0;
934 			break;
935 		}
936 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
937 		if (error == 0 && com == TIOCSCTTY) {
938 			struct proc *p = curthread->td_proc;
939 			struct session *sess;
940 
941 			if (p == NULL) {
942 				error = ENOTTY;
943 				break;
944 			}
945 
946 			get_mplock();
947 			sess = p->p_session;
948 			/* Do nothing if reassigning same control tty */
949 			if (sess->s_ttyvp == vp) {
950 				error = 0;
951 				rel_mplock();
952 				break;
953 			}
954 
955 			/* Get rid of reference to old control tty */
956 			ovp = sess->s_ttyvp;
957 			vref(vp);
958 			sess->s_ttyvp = vp;
959 			if (ovp)
960 				vrele(ovp);
961 			rel_mplock();
962 		}
963 		break;
964 	}
965 	return (error);
966 }
967 
968 /*
969  * Check that the vnode is still valid, and if so
970  * acquire requested lock.
971  */
972 int
973 #ifndef	DEBUG_LOCKS
974 vn_lock(struct vnode *vp, int flags)
975 #else
976 debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
977 #endif
978 {
979 	int error;
980 
981 	do {
982 #ifdef	DEBUG_LOCKS
983 		vp->filename = filename;
984 		vp->line = line;
985 		error = debuglockmgr(&vp->v_lock, flags,
986 				     "vn_lock", filename, line);
987 #else
988 		error = lockmgr(&vp->v_lock, flags);
989 #endif
990 		if (error == 0)
991 			break;
992 	} while (flags & LK_RETRY);
993 
994 	/*
995 	 * Because we (had better!) have a ref on the vnode, once it
996 	 * goes to VRECLAIMED state it will not be recycled until all
997 	 * refs go away.  So we can just check the flag.
998 	 */
999 	if (error == 0 && (vp->v_flag & VRECLAIMED)) {
1000 		lockmgr(&vp->v_lock, LK_RELEASE);
1001 		error = ENOENT;
1002 	}
1003 	return (error);
1004 }
1005 
1006 #ifdef DEBUG_VN_UNLOCK
1007 
1008 void
1009 debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1010 {
1011 	kprintf("vn_unlock from %s:%d\n", filename, line);
1012 	lockmgr(&vp->v_lock, LK_RELEASE);
1013 }
1014 
1015 #else
1016 
1017 void
1018 vn_unlock(struct vnode *vp)
1019 {
1020 	lockmgr(&vp->v_lock, LK_RELEASE);
1021 }
1022 
1023 #endif
1024 
1025 /*
1026  * MPSAFE
1027  */
1028 int
1029 vn_islocked(struct vnode *vp)
1030 {
1031 	return (lockstatus(&vp->v_lock, curthread));
1032 }
1033 
1034 /*
1035  * Return the lock status of a vnode and unlock the vnode
1036  * if we owned the lock.  This is not a boolean, if the
1037  * caller cares what the lock status is the caller must
1038  * check the various possible values.
1039  *
1040  * This only unlocks exclusive locks held by the caller,
1041  * it will NOT unlock shared locks (there is no way to
1042  * tell who the shared lock belongs to).
1043  *
1044  * MPSAFE
1045  */
1046 int
1047 vn_islocked_unlock(struct vnode *vp)
1048 {
1049 	int vpls;
1050 
1051 	vpls = lockstatus(&vp->v_lock, curthread);
1052 	if (vpls == LK_EXCLUSIVE)
1053 		lockmgr(&vp->v_lock, LK_RELEASE);
1054 	return(vpls);
1055 }
1056 
1057 /*
1058  * Restore a vnode lock that we previously released via
1059  * vn_islocked_unlock().  This is a NOP if we did not
1060  * own the original lock.
1061  *
1062  * MPSAFE
1063  */
1064 void
1065 vn_islocked_relock(struct vnode *vp, int vpls)
1066 {
1067 	int error;
1068 
1069 	if (vpls == LK_EXCLUSIVE)
1070 		error = lockmgr(&vp->v_lock, vpls);
1071 }
1072 
1073 /*
1074  * MPSAFE
1075  */
1076 static int
1077 vn_closefile(struct file *fp)
1078 {
1079 	int error;
1080 
1081 	fp->f_ops = &badfileops;
1082 	error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1083 	return (error);
1084 }
1085 
1086 /*
1087  * MPSAFE
1088  */
1089 static int
1090 vn_kqfilter(struct file *fp, struct knote *kn)
1091 {
1092 	int error;
1093 
1094 	error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1095 	return (error);
1096 }
1097