xref: /netbsd/sys/kern/vfs_vnops.c (revision c4a72b64)
1 /*	$NetBSD: vfs_vnops.c,v 1.59 2002/12/06 22:44:50 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.59 2002/12/06 22:44:50 christos Exp $");
45 
46 #include "fs_union.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/file.h>
52 #include <sys/stat.h>
53 #include <sys/buf.h>
54 #include <sys/proc.h>
55 #include <sys/mount.h>
56 #include <sys/namei.h>
57 #include <sys/vnode.h>
58 #include <sys/ioctl.h>
59 #include <sys/tty.h>
60 #include <sys/poll.h>
61 
62 #include <uvm/uvm_extern.h>
63 
64 #ifdef UNION
65 #include <miscfs/union/union.h>
66 #endif
67 #include <sys/verified_exec.h>
68 
69 extern LIST_HEAD(veriexec_devhead, veriexec_dev_list) veriexec_dev_head;
70 extern struct veriexec_devhead veriexec_file_dev_head;
71 
72 static int vn_read(struct file *fp, off_t *offset, struct uio *uio,
73 	    struct ucred *cred, int flags);
74 static int vn_write(struct file *fp, off_t *offset, struct uio *uio,
75 	    struct ucred *cred, int flags);
76 static int vn_closefile(struct file *fp, struct proc *p);
77 static int vn_poll(struct file *fp, int events, struct proc *p);
78 static int vn_fcntl(struct file *fp, u_int com, caddr_t data, struct proc *p);
79 static int vn_statfile(struct file *fp, struct stat *sb, struct proc *p);
80 static int vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p);
81 
82 struct 	fileops vnops = {
83 	vn_read, vn_write, vn_ioctl, vn_fcntl, vn_poll,
84 	vn_statfile, vn_closefile, vn_kqfilter
85 };
86 
87 /*
88  * Common code for vnode open operations.
89  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
90  */
91 int
92 vn_open(ndp, fmode, cmode)
93 	struct nameidata *ndp;
94 	int fmode, cmode;
95 {
96 	struct vnode *vp;
97 	struct proc *p = ndp->ni_cnd.cn_proc;
98 	struct ucred *cred = p->p_ucred;
99 	struct vattr va;
100 	int error;
101 #ifdef VERIFIED_EXEC
102 	char got_dev;
103 	struct veriexec_inode_list *veriexec_node;
104 	char fingerprint[MAXFINGERPRINTLEN];
105 #endif
106 
107 	if (fmode & O_CREAT) {
108 		ndp->ni_cnd.cn_nameiop = CREATE;
109 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
110 		if ((fmode & O_EXCL) == 0 &&
111 		    ((fmode & O_NOFOLLOW) == 0))
112 			ndp->ni_cnd.cn_flags |= FOLLOW;
113 		if ((error = namei(ndp)) != 0)
114 			return (error);
115 		if (ndp->ni_vp == NULL) {
116 			VATTR_NULL(&va);
117 			va.va_type = VREG;
118 			va.va_mode = cmode;
119 			if (fmode & O_EXCL)
120 				 va.va_vaflags |= VA_EXCLUSIVE;
121 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
122 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
123 					   &ndp->ni_cnd, &va);
124 			if (error)
125 				return (error);
126 			fmode &= ~O_TRUNC;
127 			vp = ndp->ni_vp;
128 		} else {
129 			VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd);
130 			if (ndp->ni_dvp == ndp->ni_vp)
131 				vrele(ndp->ni_dvp);
132 			else
133 				vput(ndp->ni_dvp);
134 			ndp->ni_dvp = NULL;
135 			vp = ndp->ni_vp;
136 			if (fmode & O_EXCL) {
137 				error = EEXIST;
138 				goto bad;
139 			}
140 			if (ndp->ni_vp->v_type == VLNK) {
141 				error = EFTYPE;
142 				goto bad;
143 			}
144 			fmode &= ~O_CREAT;
145 		}
146 	} else {
147 		ndp->ni_cnd.cn_nameiop = LOOKUP;
148 		ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF;
149 		if ((error = namei(ndp)) != 0)
150 			return (error);
151 		vp = ndp->ni_vp;
152 	}
153 	if (vp->v_type == VSOCK) {
154 		error = EOPNOTSUPP;
155 		goto bad;
156 	}
157 
158 #ifdef VERIFIED_EXEC
159 	veriexec_node = NULL;
160 
161 	if ((error = VOP_GETATTR(vp, &va, cred, p)) != 0)
162 		goto bad;
163 #endif
164 
165 	if ((fmode & O_CREAT) == 0) {
166 #ifdef VERIFIED_EXEC
167 		  /*
168 		   * Look for the file on the fingerprint lists iff
169 		   * it has not been seen before.
170 		   */
171 		if ((vp->fp_status == FINGERPRINT_INVALID) ||
172 		    (vp->fp_status == FINGERPRINT_NODEV)) {
173 			  /* check the file list for the finger print */
174 			veriexec_node = get_veriexec_inode(&veriexec_file_dev_head,
175 						     va.va_fsid,
176 						     va.va_fileid,
177 						     &got_dev);
178 			if (veriexec_node == NULL) {
179 				/* failing that, check the exec list */
180 				veriexec_node = get_veriexec_inode(
181 					&veriexec_dev_head, va.va_fsid,
182 					va.va_fileid, &got_dev);
183 			}
184 
185 			if ((veriexec_node == NULL) && (got_dev == 1))
186 				vp->fp_status = FINGERPRINT_NOENTRY;
187 
188 			if (veriexec_node != NULL) {
189 				if ((error = evaluate_fingerprint(vp,
190 						veriexec_node, p, va.va_size,
191 						fingerprint)) != 0)
192 					goto bad;
193 
194 				if (fingerprintcmp(veriexec_node,
195 						   fingerprint) == 0) {
196 					  /* fingerprint ok */
197 					vp->fp_status =	FINGERPRINT_VALID;
198 #ifdef VERIFIED_EXEC_DEBUG
199 					printf(
200 			"file fingerprint matches for dev %lu, file %lu\n",
201 						va.va_fsid, va.va_fileid);
202 #endif
203 				} else {
204 					vp->fp_status =	FINGERPRINT_NOMATCH;
205 				}
206 			}
207 		}
208 #endif
209 
210 		if (fmode & FREAD) {
211 			if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0)
212 				goto bad;
213 
214 #ifdef VERIFIED_EXEC
215 				/* file is on finger print list */
216 			if (vp->fp_status == FINGERPRINT_NOMATCH) {
217 				  /* fingerprint bad */
218 				printf(
219 		"file fingerprint does not match on dev %lu, file %lu\n",
220 					va.va_fsid, va.va_fileid);
221 				if (securelevel > 2) {
222 					error = EPERM;
223 					goto bad;
224 				}
225 			}
226 #endif
227 		}
228 		if (fmode & (FWRITE | O_TRUNC)) {
229 			if (vp->v_type == VDIR) {
230 				error = EISDIR;
231 				goto bad;
232 			}
233 			if ((error = vn_writechk(vp)) != 0 ||
234 			    (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0)
235 				goto bad;
236 #ifdef VERIFIED_EXEC
237 			  /*
238 			   * If file has a fingerprint then
239 			   * deny the write request, otherwise
240 			   * invalidate the status so we don't
241 			   * keep checking for the file having
242 			   * a fingerprint.
243 			   */
244 			if (vp->fp_status == FINGERPRINT_VALID) {
245 				printf(
246 		      "writing to fingerprinted file for dev %lu, file %lu\n",
247 		      va.va_fsid, va.va_fileid);
248 				if (securelevel > 2) {
249 					error = EPERM;
250 					goto bad;
251 				} else {
252 					vp->fp_status =	FINGERPRINT_INVALID;
253 				}
254 			}
255 #endif
256 		}
257 	}
258 	if (fmode & O_TRUNC) {
259 		VOP_UNLOCK(vp, 0);			/* XXX */
260 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
261 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);	/* XXX */
262 		VATTR_NULL(&va);
263 		va.va_size = 0;
264 		if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0)
265 			goto bad;
266 	}
267 	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
268 		goto bad;
269 	if (vp->v_type == VREG &&
270 	    uvn_attach(vp, fmode & FWRITE ? VM_PROT_WRITE : 0) == NULL) {
271 		error = EIO;
272 		goto bad;
273 	}
274 	if (fmode & FWRITE)
275 		vp->v_writecount++;
276 
277 	return (0);
278 bad:
279 	vput(vp);
280 	return (error);
281 }
282 
283 /*
284  * Check for write permissions on the specified vnode.
285  * Prototype text segments cannot be written.
286  */
287 int
288 vn_writechk(vp)
289 	struct vnode *vp;
290 {
291 
292 	/*
293 	 * If the vnode is in use as a process's text,
294 	 * we can't allow writing.
295 	 */
296 	if (vp->v_flag & VTEXT)
297 		return (ETXTBSY);
298 	return (0);
299 }
300 
301 /*
302  * Mark a vnode as having executable mappings.
303  */
304 void
305 vn_markexec(vp)
306 	struct vnode *vp;
307 {
308 	if ((vp->v_flag & VEXECMAP) == 0) {
309 		uvmexp.filepages -= vp->v_uobj.uo_npages;
310 		uvmexp.execpages += vp->v_uobj.uo_npages;
311 	}
312 	vp->v_flag |= VEXECMAP;
313 }
314 
315 /*
316  * Mark a vnode as being the text of a process.
317  * Fail if the vnode is currently writable.
318  */
319 int
320 vn_marktext(vp)
321 	struct vnode *vp;
322 {
323 
324 	if (vp->v_writecount != 0) {
325 		KASSERT((vp->v_flag & VTEXT) == 0);
326 		return (ETXTBSY);
327 	}
328 	vp->v_flag |= VTEXT;
329 	vn_markexec(vp);
330 	return (0);
331 }
332 
333 /*
334  * Vnode close call
335  *
336  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
337  */
338 int
339 vn_close(vp, flags, cred, p)
340 	struct vnode *vp;
341 	int flags;
342 	struct ucred *cred;
343 	struct proc *p;
344 {
345 	int error;
346 
347 	if (flags & FWRITE)
348 		vp->v_writecount--;
349 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
350 	error = VOP_CLOSE(vp, flags, cred, p);
351 	vput(vp);
352 	return (error);
353 }
354 
355 /*
356  * Package up an I/O request on a vnode into a uio and do it.
357  */
358 int
359 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
360 	enum uio_rw rw;
361 	struct vnode *vp;
362 	caddr_t base;
363 	int len;
364 	off_t offset;
365 	enum uio_seg segflg;
366 	int ioflg;
367 	struct ucred *cred;
368 	size_t *aresid;
369 	struct proc *p;
370 {
371 	struct uio auio;
372 	struct iovec aiov;
373 	int error;
374 
375 	if ((ioflg & IO_NODELOCKED) == 0) {
376 		if (rw == UIO_READ) {
377 			vn_lock(vp, LK_SHARED | LK_RETRY);
378 		} else {
379 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
380 		}
381 	}
382 	auio.uio_iov = &aiov;
383 	auio.uio_iovcnt = 1;
384 	aiov.iov_base = base;
385 	aiov.iov_len = len;
386 	auio.uio_resid = len;
387 	auio.uio_offset = offset;
388 	auio.uio_segflg = segflg;
389 	auio.uio_rw = rw;
390 	auio.uio_procp = p;
391 	if (rw == UIO_READ) {
392 		error = VOP_READ(vp, &auio, ioflg, cred);
393 	} else {
394 		error = VOP_WRITE(vp, &auio, ioflg, cred);
395 	}
396 	if (aresid)
397 		*aresid = auio.uio_resid;
398 	else
399 		if (auio.uio_resid && error == 0)
400 			error = EIO;
401 	if ((ioflg & IO_NODELOCKED) == 0)
402 		VOP_UNLOCK(vp, 0);
403 	return (error);
404 }
405 
406 int
407 vn_readdir(fp, buf, segflg, count, done, p, cookies, ncookies)
408 	struct file *fp;
409 	char *buf;
410 	int segflg, *done, *ncookies;
411 	u_int count;
412 	struct proc *p;
413 	off_t **cookies;
414 {
415 	struct vnode *vp = (struct vnode *)fp->f_data;
416 	struct iovec aiov;
417 	struct uio auio;
418 	int error, eofflag;
419 
420 unionread:
421 	if (vp->v_type != VDIR)
422 		return (EINVAL);
423 	aiov.iov_base = buf;
424 	aiov.iov_len = count;
425 	auio.uio_iov = &aiov;
426 	auio.uio_iovcnt = 1;
427 	auio.uio_rw = UIO_READ;
428 	auio.uio_segflg = segflg;
429 	auio.uio_procp = p;
430 	auio.uio_resid = count;
431 	vn_lock(vp, LK_SHARED | LK_RETRY);
432 	auio.uio_offset = fp->f_offset;
433 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
434 		    ncookies);
435 	fp->f_offset = auio.uio_offset;
436 	VOP_UNLOCK(vp, 0);
437 	if (error)
438 		return (error);
439 
440 #ifdef UNION
441 {
442 	extern struct vnode *union_dircache __P((struct vnode *));
443 
444 	if (count == auio.uio_resid && (vp->v_op == union_vnodeop_p)) {
445 		struct vnode *lvp;
446 
447 		lvp = union_dircache(vp);
448 		if (lvp != NULLVP) {
449 			struct vattr va;
450 
451 			/*
452 			 * If the directory is opaque,
453 			 * then don't show lower entries
454 			 */
455 			error = VOP_GETATTR(vp, &va, fp->f_cred, p);
456 			if (va.va_flags & OPAQUE) {
457 				vput(lvp);
458 				lvp = NULL;
459 			}
460 		}
461 
462 		if (lvp != NULLVP) {
463 			error = VOP_OPEN(lvp, FREAD, fp->f_cred, p);
464 			if (error) {
465 				vput(lvp);
466 				return (error);
467 			}
468 			VOP_UNLOCK(lvp, 0);
469 			fp->f_data = (caddr_t) lvp;
470 			fp->f_offset = 0;
471 			error = vn_close(vp, FREAD, fp->f_cred, p);
472 			if (error)
473 				return (error);
474 			vp = lvp;
475 			goto unionread;
476 		}
477 	}
478 }
479 #endif /* UNION */
480 
481 	if (count == auio.uio_resid && (vp->v_flag & VROOT) &&
482 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
483 		struct vnode *tvp = vp;
484 		vp = vp->v_mount->mnt_vnodecovered;
485 		VREF(vp);
486 		fp->f_data = (caddr_t) vp;
487 		fp->f_offset = 0;
488 		vrele(tvp);
489 		goto unionread;
490 	}
491 	*done = count - auio.uio_resid;
492 	return error;
493 }
494 
495 /*
496  * File table vnode read routine.
497  */
498 static int
499 vn_read(fp, offset, uio, cred, flags)
500 	struct file *fp;
501 	off_t *offset;
502 	struct uio *uio;
503 	struct ucred *cred;
504 	int flags;
505 {
506 	struct vnode *vp = (struct vnode *)fp->f_data;
507 	int count, error, ioflag = 0;
508 
509 	VOP_LEASE(vp, uio->uio_procp, cred, LEASE_READ);
510 	if (fp->f_flag & FNONBLOCK)
511 		ioflag |= IO_NDELAY;
512 	if ((fp->f_flag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
513 		ioflag |= IO_SYNC;
514 	if (fp->f_flag & FALTIO)
515 		ioflag |= IO_ALTSEMANTICS;
516 	vn_lock(vp, LK_SHARED | LK_RETRY);
517 	uio->uio_offset = *offset;
518 	count = uio->uio_resid;
519 	error = VOP_READ(vp, uio, ioflag, cred);
520 	if (flags & FOF_UPDATE_OFFSET)
521 		*offset += count - uio->uio_resid;
522 	VOP_UNLOCK(vp, 0);
523 	return (error);
524 }
525 
526 /*
527  * File table vnode write routine.
528  */
529 static int
530 vn_write(fp, offset, uio, cred, flags)
531 	struct file *fp;
532 	off_t *offset;
533 	struct uio *uio;
534 	struct ucred *cred;
535 	int flags;
536 {
537 	struct vnode *vp = (struct vnode *)fp->f_data;
538 	int count, error, ioflag = IO_UNIT;
539 
540 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
541 		ioflag |= IO_APPEND;
542 	if (fp->f_flag & FNONBLOCK)
543 		ioflag |= IO_NDELAY;
544 	if (fp->f_flag & FFSYNC ||
545 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
546 		ioflag |= IO_SYNC;
547 	else if (fp->f_flag & FDSYNC)
548 		ioflag |= IO_DSYNC;
549 	if (fp->f_flag & FALTIO)
550 		ioflag |= IO_ALTSEMANTICS;
551 	VOP_LEASE(vp, uio->uio_procp, cred, LEASE_WRITE);
552 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
553 	uio->uio_offset = *offset;
554 	count = uio->uio_resid;
555 	error = VOP_WRITE(vp, uio, ioflag, cred);
556 	if (flags & FOF_UPDATE_OFFSET) {
557 		if (ioflag & IO_APPEND)
558 			*offset = uio->uio_offset;
559 		else
560 			*offset += count - uio->uio_resid;
561 	}
562 	VOP_UNLOCK(vp, 0);
563 	return (error);
564 }
565 
566 /*
567  * File table vnode stat routine.
568  */
569 static int
570 vn_statfile(fp, sb, p)
571 	struct file *fp;
572 	struct stat *sb;
573 	struct proc *p;
574 {
575 	struct vnode *vp = (struct vnode *)fp->f_data;
576 
577 	return vn_stat(vp, sb, p);
578 }
579 
580 int
581 vn_stat(vp, sb, p)
582 	struct vnode *vp;
583 	struct stat *sb;
584 	struct proc *p;
585 {
586 	struct vattr va;
587 	int error;
588 	mode_t mode;
589 
590 	error = VOP_GETATTR(vp, &va, p->p_ucred, p);
591 	if (error)
592 		return (error);
593 	/*
594 	 * Copy from vattr table
595 	 */
596 	sb->st_dev = va.va_fsid;
597 	sb->st_ino = va.va_fileid;
598 	mode = va.va_mode;
599 	switch (vp->v_type) {
600 	case VREG:
601 		mode |= S_IFREG;
602 		break;
603 	case VDIR:
604 		mode |= S_IFDIR;
605 		break;
606 	case VBLK:
607 		mode |= S_IFBLK;
608 		break;
609 	case VCHR:
610 		mode |= S_IFCHR;
611 		break;
612 	case VLNK:
613 		mode |= S_IFLNK;
614 		break;
615 	case VSOCK:
616 		mode |= S_IFSOCK;
617 		break;
618 	case VFIFO:
619 		mode |= S_IFIFO;
620 		break;
621 	default:
622 		return (EBADF);
623 	};
624 	sb->st_mode = mode;
625 	sb->st_nlink = va.va_nlink;
626 	sb->st_uid = va.va_uid;
627 	sb->st_gid = va.va_gid;
628 	sb->st_rdev = va.va_rdev;
629 	sb->st_size = va.va_size;
630 	sb->st_atimespec = va.va_atime;
631 	sb->st_mtimespec = va.va_mtime;
632 	sb->st_ctimespec = va.va_ctime;
633 	sb->st_blksize = va.va_blocksize;
634 	sb->st_flags = va.va_flags;
635 	sb->st_gen = 0;
636 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
637 	return (0);
638 }
639 
640 /*
641  * File table vnode fcntl routine.
642  */
643 static int
644 vn_fcntl(fp, com, data, p)
645 	struct file *fp;
646 	u_int com;
647 	caddr_t data;
648 	struct proc *p;
649 {
650 	struct vnode *vp = ((struct vnode *)fp->f_data);
651 	int error;
652 
653 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
654 	error = VOP_FCNTL(vp, com, data, fp->f_flag, p->p_ucred, p);
655 	VOP_UNLOCK(vp, 0);
656 	return (error);
657 }
658 
659 /*
660  * File table vnode ioctl routine.
661  */
662 static int
663 vn_ioctl(fp, com, data, p)
664 	struct file *fp;
665 	u_long com;
666 	caddr_t data;
667 	struct proc *p;
668 {
669 	struct vnode *vp = ((struct vnode *)fp->f_data);
670 	struct vattr vattr;
671 	int error;
672 
673 	switch (vp->v_type) {
674 
675 	case VREG:
676 	case VDIR:
677 		if (com == FIONREAD) {
678 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
679 			if (error)
680 				return (error);
681 			*(int *)data = vattr.va_size - fp->f_offset;
682 			return (0);
683 		}
684 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
685 			return (0);			/* XXX */
686 		/* fall into ... */
687 
688 	default:
689 		return (EPASSTHROUGH);
690 
691 	case VFIFO:
692 	case VCHR:
693 	case VBLK:
694 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
695 		if (error == 0 && com == TIOCSCTTY) {
696 			if (p->p_session->s_ttyvp)
697 				vrele(p->p_session->s_ttyvp);
698 			p->p_session->s_ttyvp = vp;
699 			VREF(vp);
700 		}
701 		return (error);
702 	}
703 }
704 
705 /*
706  * File table vnode poll routine.
707  */
708 static int
709 vn_poll(fp, events, p)
710 	struct file *fp;
711 	int events;
712 	struct proc *p;
713 {
714 
715 	return (VOP_POLL(((struct vnode *)fp->f_data), events, p));
716 }
717 
718 /*
719  * File table vnode kqfilter routine.
720  */
721 int
722 vn_kqfilter(fp, kn)
723 	struct file *fp;
724 	struct knote *kn;
725 {
726 
727 	return (VOP_KQFILTER((struct vnode *)fp->f_data, kn));
728 }
729 
730 /*
731  * Check that the vnode is still valid, and if so
732  * acquire requested lock.
733  */
734 int
735 vn_lock(vp, flags)
736 	struct vnode *vp;
737 	int flags;
738 {
739 	int error;
740 
741 	do {
742 		if ((flags & LK_INTERLOCK) == 0)
743 			simple_lock(&vp->v_interlock);
744 		if (vp->v_flag & VXLOCK) {
745 			if (flags & LK_NOWAIT) {
746 				simple_unlock(&vp->v_interlock);
747 				return EBUSY;
748 			}
749 			vp->v_flag |= VXWANT;
750 			ltsleep(vp, PINOD | PNORELOCK,
751 			    "vn_lock", 0, &vp->v_interlock);
752 			error = ENOENT;
753 		} else {
754 			error = VOP_LOCK(vp, flags | LK_INTERLOCK);
755 			if (error == 0 || error == EDEADLK || error == EBUSY)
756 				return (error);
757 		}
758 		flags &= ~LK_INTERLOCK;
759 	} while (flags & LK_RETRY);
760 	return (error);
761 }
762 
763 /*
764  * File table vnode close routine.
765  */
766 static int
767 vn_closefile(fp, p)
768 	struct file *fp;
769 	struct proc *p;
770 {
771 
772 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
773 		fp->f_cred, p));
774 }
775 
776 /*
777  * Enable LK_CANRECURSE on lock. Return prior status.
778  */
779 u_int
780 vn_setrecurse(vp)
781 	struct vnode *vp;
782 {
783 	struct lock *lkp = &vp->v_lock;
784 	u_int retval = lkp->lk_flags & LK_CANRECURSE;
785 
786 	lkp->lk_flags |= LK_CANRECURSE;
787 	return retval;
788 }
789 
790 /*
791  * Called when done with locksetrecurse.
792  */
793 void
794 vn_restorerecurse(vp, flags)
795 	struct vnode *vp;
796 	u_int flags;
797 {
798 	struct lock *lkp = &vp->v_lock;
799 
800 	lkp->lk_flags &= ~LK_CANRECURSE;
801 	lkp->lk_flags |= flags;
802 }
803