xref: /dragonfly/sys/kern/vfs_vnops.c (revision 1de703da)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.2 2003/06/17 04:28:42 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile __P((struct file *fp, struct proc *p));
59 static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
60 		struct proc *p));
61 static int vn_read __P((struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags, struct proc *p));
63 static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
64 		struct proc *p));
65 static int vn_kqfilter __P((struct file *fp, struct knote *kn));
66 static int vn_statfile __P((struct file *fp, struct stat *sb, struct proc *p));
67 static int vn_write __P((struct file *fp, struct uio *uio,
68 		struct ucred *cred, int flags, struct proc *p));
69 
70 struct 	fileops vnops = {
71 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
72 	vn_statfile, vn_closefile
73 };
74 
75 /*
76  * Common code for vnode open operations.
77  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
78  *
79  * Note that this does NOT free nameidata for the successful case,
80  * due to the NDINIT being done elsewhere.
81  */
82 int
83 vn_open(ndp, fmode, cmode)
84 	register struct nameidata *ndp;
85 	int fmode, cmode;
86 {
87 	register struct vnode *vp;
88 	register struct proc *p = ndp->ni_cnd.cn_proc;
89 	register struct ucred *cred = p->p_ucred;
90 	struct vattr vat;
91 	struct vattr *vap = &vat;
92 	int mode, error;
93 
94 	if (fmode & O_CREAT) {
95 		ndp->ni_cnd.cn_nameiop = CREATE;
96 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
97 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
98 			ndp->ni_cnd.cn_flags |= FOLLOW;
99 		bwillwrite();
100 		error = namei(ndp);
101 		if (error)
102 			return (error);
103 		if (ndp->ni_vp == NULL) {
104 			VATTR_NULL(vap);
105 			vap->va_type = VREG;
106 			vap->va_mode = cmode;
107 			if (fmode & O_EXCL)
108 				vap->va_vaflags |= VA_EXCLUSIVE;
109 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
110 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
111 					   &ndp->ni_cnd, vap);
112 			if (error) {
113 				NDFREE(ndp, NDF_ONLY_PNBUF);
114 				vput(ndp->ni_dvp);
115 				return (error);
116 			}
117 			vput(ndp->ni_dvp);
118 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
119 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
120 			fmode &= ~O_TRUNC;
121 			vp = ndp->ni_vp;
122 		} else {
123 			if (ndp->ni_dvp == ndp->ni_vp)
124 				vrele(ndp->ni_dvp);
125 			else
126 				vput(ndp->ni_dvp);
127 			ndp->ni_dvp = NULL;
128 			vp = ndp->ni_vp;
129 			if (fmode & O_EXCL) {
130 				error = EEXIST;
131 				goto bad;
132 			}
133 			fmode &= ~O_CREAT;
134 		}
135 	} else {
136 		ndp->ni_cnd.cn_nameiop = LOOKUP;
137 		ndp->ni_cnd.cn_flags =
138 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
139 		error = namei(ndp);
140 		if (error)
141 			return (error);
142 		vp = ndp->ni_vp;
143 	}
144 	if (vp->v_type == VLNK) {
145 		error = EMLINK;
146 		goto bad;
147 	}
148 	if (vp->v_type == VSOCK) {
149 		error = EOPNOTSUPP;
150 		goto bad;
151 	}
152 	if ((fmode & O_CREAT) == 0) {
153 		mode = 0;
154 		if (fmode & (FWRITE | O_TRUNC)) {
155 			if (vp->v_type == VDIR) {
156 				error = EISDIR;
157 				goto bad;
158 			}
159 			error = vn_writechk(vp);
160 			if (error)
161 				goto bad;
162 			mode |= VWRITE;
163 		}
164 		if (fmode & FREAD)
165 			mode |= VREAD;
166 		if (mode) {
167 		        error = VOP_ACCESS(vp, mode, cred, p);
168 			if (error)
169 				goto bad;
170 		}
171 	}
172 	if (fmode & O_TRUNC) {
173 		VOP_UNLOCK(vp, 0, p);				/* XXX */
174 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
175 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);	/* XXX */
176 		VATTR_NULL(vap);
177 		vap->va_size = 0;
178 		error = VOP_SETATTR(vp, vap, cred, p);
179 		if (error)
180 			goto bad;
181 	}
182 	error = VOP_OPEN(vp, fmode, cred, p);
183 	if (error)
184 		goto bad;
185 	/*
186 	 * Make sure that a VM object is created for VMIO support.
187 	 */
188 	if (vn_canvmio(vp) == TRUE) {
189 		if ((error = vfs_object_create(vp, p, cred)) != 0)
190 			goto bad;
191 	}
192 
193 	if (fmode & FWRITE)
194 		vp->v_writecount++;
195 	return (0);
196 bad:
197 	NDFREE(ndp, NDF_ONLY_PNBUF);
198 	vput(vp);
199 	return (error);
200 }
201 
202 /*
203  * Check for write permissions on the specified vnode.
204  * Prototype text segments cannot be written.
205  */
206 int
207 vn_writechk(vp)
208 	register struct vnode *vp;
209 {
210 
211 	/*
212 	 * If there's shared text associated with
213 	 * the vnode, try to free it up once.  If
214 	 * we fail, we can't allow writing.
215 	 */
216 	if (vp->v_flag & VTEXT)
217 		return (ETXTBSY);
218 	return (0);
219 }
220 
221 /*
222  * Vnode close call
223  */
224 int
225 vn_close(vp, flags, cred, p)
226 	register struct vnode *vp;
227 	int flags;
228 	struct ucred *cred;
229 	struct proc *p;
230 {
231 	int error;
232 
233 	if (flags & FWRITE)
234 		vp->v_writecount--;
235 	error = VOP_CLOSE(vp, flags, cred, p);
236 	vrele(vp);
237 	return (error);
238 }
239 
240 static __inline
241 int
242 sequential_heuristic(struct uio *uio, struct file *fp)
243 {
244 	/*
245 	 * Sequential heuristic - detect sequential operation
246 	 */
247 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
248 	    uio->uio_offset == fp->f_nextoff) {
249 		int tmpseq = fp->f_seqcount;
250 		/*
251 		 * XXX we assume that the filesystem block size is
252 		 * the default.  Not true, but still gives us a pretty
253 		 * good indicator of how sequential the read operations
254 		 * are.
255 		 */
256 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
257 		if (tmpseq > IO_SEQMAX)
258 			tmpseq = IO_SEQMAX;
259 		fp->f_seqcount = tmpseq;
260 		return(fp->f_seqcount << IO_SEQSHIFT);
261 	}
262 
263 	/*
264 	 * Not sequential, quick draw-down of seqcount
265 	 */
266 	if (fp->f_seqcount > 1)
267 		fp->f_seqcount = 1;
268 	else
269 		fp->f_seqcount = 0;
270 	return(0);
271 }
272 
273 /*
274  * Package up an I/O request on a vnode into a uio and do it.
275  */
276 int
277 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
278 	enum uio_rw rw;
279 	struct vnode *vp;
280 	caddr_t base;
281 	int len;
282 	off_t offset;
283 	enum uio_seg segflg;
284 	int ioflg;
285 	struct ucred *cred;
286 	int *aresid;
287 	struct proc *p;
288 {
289 	struct uio auio;
290 	struct iovec aiov;
291 	int error;
292 
293 	if ((ioflg & IO_NODELOCKED) == 0)
294 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
295 	auio.uio_iov = &aiov;
296 	auio.uio_iovcnt = 1;
297 	aiov.iov_base = base;
298 	aiov.iov_len = len;
299 	auio.uio_resid = len;
300 	auio.uio_offset = offset;
301 	auio.uio_segflg = segflg;
302 	auio.uio_rw = rw;
303 	auio.uio_procp = p;
304 	if (rw == UIO_READ) {
305 		error = VOP_READ(vp, &auio, ioflg, cred);
306 	} else {
307 		error = VOP_WRITE(vp, &auio, ioflg, cred);
308 	}
309 	if (aresid)
310 		*aresid = auio.uio_resid;
311 	else
312 		if (auio.uio_resid && error == 0)
313 			error = EIO;
314 	if ((ioflg & IO_NODELOCKED) == 0)
315 		VOP_UNLOCK(vp, 0, p);
316 	return (error);
317 }
318 
319 /*
320  * Package up an I/O request on a vnode into a uio and do it.  The I/O
321  * request is split up into smaller chunks and we try to avoid saturating
322  * the buffer cache while potentially holding a vnode locked, so we
323  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
324  * to give other processes a chance to lock the vnode (either other processes
325  * core'ing the same binary, or unrelated processes scanning the directory).
326  */
327 int
328 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
329 	enum uio_rw rw;
330 	struct vnode *vp;
331 	caddr_t base;
332 	int len;
333 	off_t offset;
334 	enum uio_seg segflg;
335 	int ioflg;
336 	struct ucred *cred;
337 	int *aresid;
338 	struct proc *p;
339 {
340 	int error = 0;
341 
342 	do {
343 		int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
344 
345 		if (rw != UIO_READ && vp->v_type == VREG)
346 			bwillwrite();
347 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
348 		    ioflg, cred, aresid, p);
349 		len -= chunk;	/* aresid calc already includes length */
350 		if (error)
351 			break;
352 		offset += chunk;
353 		base += chunk;
354 		uio_yield();
355 	} while (len);
356 	if (aresid)
357 		*aresid += len;
358 	return (error);
359 }
360 
361 /*
362  * File table vnode read routine.
363  */
364 static int
365 vn_read(fp, uio, cred, flags, p)
366 	struct file *fp;
367 	struct uio *uio;
368 	struct ucred *cred;
369 	struct proc *p;
370 	int flags;
371 {
372 	struct vnode *vp;
373 	int error, ioflag;
374 
375 	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
376 	    uio->uio_procp, p));
377 	vp = (struct vnode *)fp->f_data;
378 	ioflag = 0;
379 	if (fp->f_flag & FNONBLOCK)
380 		ioflag |= IO_NDELAY;
381 	if (fp->f_flag & O_DIRECT)
382 		ioflag |= IO_DIRECT;
383 	VOP_LEASE(vp, p, cred, LEASE_READ);
384 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
385 	if ((flags & FOF_OFFSET) == 0)
386 		uio->uio_offset = fp->f_offset;
387 
388 	ioflag |= sequential_heuristic(uio, fp);
389 
390 	error = VOP_READ(vp, uio, ioflag, cred);
391 	if ((flags & FOF_OFFSET) == 0)
392 		fp->f_offset = uio->uio_offset;
393 	fp->f_nextoff = uio->uio_offset;
394 	VOP_UNLOCK(vp, 0, p);
395 	return (error);
396 }
397 
398 /*
399  * File table vnode write routine.
400  */
401 static int
402 vn_write(fp, uio, cred, flags, p)
403 	struct file *fp;
404 	struct uio *uio;
405 	struct ucred *cred;
406 	struct proc *p;
407 	int flags;
408 {
409 	struct vnode *vp;
410 	int error, ioflag;
411 
412 	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
413 	    uio->uio_procp, p));
414 	vp = (struct vnode *)fp->f_data;
415 	if (vp->v_type == VREG)
416 		bwillwrite();
417 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
418 	ioflag = IO_UNIT;
419 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
420 		ioflag |= IO_APPEND;
421 	if (fp->f_flag & FNONBLOCK)
422 		ioflag |= IO_NDELAY;
423 	if (fp->f_flag & O_DIRECT)
424 		ioflag |= IO_DIRECT;
425 	if ((fp->f_flag & O_FSYNC) ||
426 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
427 		ioflag |= IO_SYNC;
428 	VOP_LEASE(vp, p, cred, LEASE_WRITE);
429 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
430 	if ((flags & FOF_OFFSET) == 0)
431 		uio->uio_offset = fp->f_offset;
432 	ioflag |= sequential_heuristic(uio, fp);
433 	error = VOP_WRITE(vp, uio, ioflag, cred);
434 	if ((flags & FOF_OFFSET) == 0)
435 		fp->f_offset = uio->uio_offset;
436 	fp->f_nextoff = uio->uio_offset;
437 	VOP_UNLOCK(vp, 0, p);
438 	return (error);
439 }
440 
441 /*
442  * File table vnode stat routine.
443  */
444 static int
445 vn_statfile(fp, sb, p)
446 	struct file *fp;
447 	struct stat *sb;
448 	struct proc *p;
449 {
450 	struct vnode *vp = (struct vnode *)fp->f_data;
451 
452 	return vn_stat(vp, sb, p);
453 }
454 
455 int
456 vn_stat(vp, sb, p)
457 	struct vnode *vp;
458 	register struct stat *sb;
459 	struct proc *p;
460 {
461 	struct vattr vattr;
462 	register struct vattr *vap;
463 	int error;
464 	u_short mode;
465 
466 	vap = &vattr;
467 	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
468 	if (error)
469 		return (error);
470 
471 	/*
472 	 * Zero the spare stat fields
473 	 */
474 	sb->st_lspare = 0;
475 	sb->st_qspare[0] = 0;
476 	sb->st_qspare[1] = 0;
477 
478 	/*
479 	 * Copy from vattr table
480 	 */
481 	if (vap->va_fsid != VNOVAL)
482 		sb->st_dev = vap->va_fsid;
483 	else
484 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
485 	sb->st_ino = vap->va_fileid;
486 	mode = vap->va_mode;
487 	switch (vap->va_type) {
488 	case VREG:
489 		mode |= S_IFREG;
490 		break;
491 	case VDIR:
492 		mode |= S_IFDIR;
493 		break;
494 	case VBLK:
495 		mode |= S_IFBLK;
496 		break;
497 	case VCHR:
498 		mode |= S_IFCHR;
499 		break;
500 	case VLNK:
501 		mode |= S_IFLNK;
502 		/* This is a cosmetic change, symlinks do not have a mode. */
503 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
504 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
505 		else
506 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
507 		break;
508 	case VSOCK:
509 		mode |= S_IFSOCK;
510 		break;
511 	case VFIFO:
512 		mode |= S_IFIFO;
513 		break;
514 	default:
515 		return (EBADF);
516 	};
517 	sb->st_mode = mode;
518 	sb->st_nlink = vap->va_nlink;
519 	sb->st_uid = vap->va_uid;
520 	sb->st_gid = vap->va_gid;
521 	sb->st_rdev = vap->va_rdev;
522 	sb->st_size = vap->va_size;
523 	sb->st_atimespec = vap->va_atime;
524 	sb->st_mtimespec = vap->va_mtime;
525 	sb->st_ctimespec = vap->va_ctime;
526 
527         /*
528 	 * According to www.opengroup.org, the meaning of st_blksize is
529 	 *   "a filesystem-specific preferred I/O block size for this
530 	 *    object.  In some filesystem types, this may vary from file
531 	 *    to file"
532 	 * Default to PAGE_SIZE after much discussion.
533 	 */
534 
535 	if (vap->va_type == VREG) {
536 		sb->st_blksize = vap->va_blocksize;
537 	} else if (vn_isdisk(vp, NULL)) {
538 		sb->st_blksize = vp->v_rdev->si_bsize_best;
539 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
540 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
541 		if (sb->st_blksize < BLKDEV_IOSIZE)
542 			sb->st_blksize = BLKDEV_IOSIZE;
543 	} else {
544 		sb->st_blksize = PAGE_SIZE;
545 	}
546 
547 	sb->st_flags = vap->va_flags;
548 	if (suser_xxx(p->p_ucred, 0, 0))
549 		sb->st_gen = 0;
550 	else
551 		sb->st_gen = vap->va_gen;
552 
553 #if (S_BLKSIZE == 512)
554 	/* Optimize this case */
555 	sb->st_blocks = vap->va_bytes >> 9;
556 #else
557 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
558 #endif
559 	return (0);
560 }
561 
562 /*
563  * File table vnode ioctl routine.
564  */
565 static int
566 vn_ioctl(fp, com, data, p)
567 	struct file *fp;
568 	u_long com;
569 	caddr_t data;
570 	struct proc *p;
571 {
572 	register struct vnode *vp = ((struct vnode *)fp->f_data);
573 	struct vattr vattr;
574 	int error;
575 
576 	switch (vp->v_type) {
577 
578 	case VREG:
579 	case VDIR:
580 		if (com == FIONREAD) {
581 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
582 			if (error)
583 				return (error);
584 			*(int *)data = vattr.va_size - fp->f_offset;
585 			return (0);
586 		}
587 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
588 			return (0);			/* XXX */
589 		/* fall into ... */
590 
591 	default:
592 #if 0
593 		return (ENOTTY);
594 #endif
595 	case VFIFO:
596 	case VCHR:
597 	case VBLK:
598 		if (com == FIODTYPE) {
599 			if (vp->v_type != VCHR && vp->v_type != VBLK)
600 				return (ENOTTY);
601 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
602 			return (0);
603 		}
604 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
605 		if (error == 0 && com == TIOCSCTTY) {
606 
607 			/* Do nothing if reassigning same control tty */
608 			if (p->p_session->s_ttyvp == vp)
609 				return (0);
610 
611 			/* Get rid of reference to old control tty */
612 			if (p->p_session->s_ttyvp)
613 				vrele(p->p_session->s_ttyvp);
614 
615 			p->p_session->s_ttyvp = vp;
616 			VREF(vp);
617 		}
618 		return (error);
619 	}
620 }
621 
622 /*
623  * File table vnode poll routine.
624  */
625 static int
626 vn_poll(fp, events, cred, p)
627 	struct file *fp;
628 	int events;
629 	struct ucred *cred;
630 	struct proc *p;
631 {
632 
633 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, p));
634 }
635 
636 /*
637  * Check that the vnode is still valid, and if so
638  * acquire requested lock.
639  */
640 int
641 #ifndef	DEBUG_LOCKS
642 vn_lock(vp, flags, p)
643 #else
644 debug_vn_lock(vp, flags, p, filename, line)
645 #endif
646 	struct vnode *vp;
647 	int flags;
648 	struct proc *p;
649 #ifdef	DEBUG_LOCKS
650 	const char *filename;
651 	int line;
652 #endif
653 {
654 	int error;
655 
656 	do {
657 		if ((flags & LK_INTERLOCK) == 0)
658 			simple_lock(&vp->v_interlock);
659 		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
660 			vp->v_flag |= VXWANT;
661 			simple_unlock(&vp->v_interlock);
662 			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
663 			error = ENOENT;
664 		} else {
665 #if 0
666 			/* this can now occur in normal operation */
667 			if (vp->v_vxproc != NULL)
668 				log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n");
669 #endif
670 #ifdef	DEBUG_LOCKS
671 			vp->filename = filename;
672 			vp->line = line;
673 #endif
674 			error = VOP_LOCK(vp,
675 				    flags | LK_NOPAUSE | LK_INTERLOCK, p);
676 			if (error == 0)
677 				return (error);
678 		}
679 		flags &= ~LK_INTERLOCK;
680 	} while (flags & LK_RETRY);
681 	return (error);
682 }
683 
684 /*
685  * File table vnode close routine.
686  */
687 static int
688 vn_closefile(fp, p)
689 	struct file *fp;
690 	struct proc *p;
691 {
692 
693 	fp->f_ops = &badfileops;
694 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
695 		fp->f_cred, p));
696 }
697 
698 static int
699 vn_kqfilter(struct file *fp, struct knote *kn)
700 {
701 
702 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
703 }
704