xref: /dragonfly/sys/kern/vfs_vnops.c (revision 2d8a3be7)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40  * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.15 2003/10/09 22:27:19 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/fcntl.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 #include <sys/syslog.h>
57 
58 static int vn_closefile (struct file *fp, struct thread *td);
59 static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
60 		struct thread *td);
61 static int vn_read (struct file *fp, struct uio *uio,
62 		struct ucred *cred, int flags, struct thread *td);
63 static int vn_poll (struct file *fp, int events, struct ucred *cred,
64 		struct thread *td);
65 static int vn_kqfilter (struct file *fp, struct knote *kn);
66 static int vn_statfile (struct file *fp, struct stat *sb, struct thread *td);
67 static int vn_write (struct file *fp, struct uio *uio,
68 		struct ucred *cred, int flags, struct thread *td);
69 
70 struct 	fileops vnops = {
71 	NULL,	/* port */
72 	0,	/* autoq */
73 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
74 	vn_statfile, vn_closefile
75 };
76 
77 /*
78  * Common code for vnode open operations.
79  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
80  *
81  * Note that this does NOT free nameidata for the successful case,
82  * due to the NDINIT being done elsewhere.
83  */
84 int
85 vn_open(ndp, fmode, cmode)
86 	struct nameidata *ndp;
87 	int fmode, cmode;
88 {
89 	struct vnode *vp;
90 	struct thread *td = ndp->ni_cnd.cn_td;
91 	struct ucred *cred = ndp->ni_cnd.cn_cred;
92 	struct vattr vat;
93 	struct vattr *vap = &vat;
94 	int mode, error;
95 
96 	KKASSERT(cred == td->td_proc->p_ucred);
97 
98 	if (fmode & O_CREAT) {
99 		ndp->ni_cnd.cn_nameiop = NAMEI_CREATE;
100 		ndp->ni_cnd.cn_flags = CNP_LOCKPARENT | CNP_LOCKLEAF;
101 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
102 			ndp->ni_cnd.cn_flags |= CNP_FOLLOW;
103 		bwillwrite();
104 		error = namei(ndp);
105 		if (error)
106 			return (error);
107 		if (ndp->ni_vp == NULL) {
108 			VATTR_NULL(vap);
109 			vap->va_type = VREG;
110 			vap->va_mode = cmode;
111 			if (fmode & O_EXCL)
112 				vap->va_vaflags |= VA_EXCLUSIVE;
113 			VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
114 			error = VOP_CREATE(ndp->ni_dvp, NCPNULL, &ndp->ni_vp,
115 					   &ndp->ni_cnd, vap);
116 			if (error) {
117 				NDFREE(ndp, NDF_ONLY_PNBUF);
118 				vput(ndp->ni_dvp);
119 				return (error);
120 			}
121 			vput(ndp->ni_dvp);
122 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
123 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
124 			fmode &= ~O_TRUNC;
125 			vp = ndp->ni_vp;
126 		} else {
127 			if (ndp->ni_dvp == ndp->ni_vp)
128 				vrele(ndp->ni_dvp);
129 			else
130 				vput(ndp->ni_dvp);
131 			ndp->ni_dvp = NULL;
132 			vp = ndp->ni_vp;
133 			if (fmode & O_EXCL) {
134 				error = EEXIST;
135 				goto bad;
136 			}
137 			fmode &= ~O_CREAT;
138 		}
139 	} else {
140 		ndp->ni_cnd.cn_nameiop = NAMEI_LOOKUP;
141 		ndp->ni_cnd.cn_flags = CNP_LOCKLEAF |
142 		    ((fmode & O_NOFOLLOW) ? 0 : CNP_FOLLOW);
143 		error = namei(ndp);
144 		if (error)
145 			return (error);
146 		vp = ndp->ni_vp;
147 	}
148 	if (vp->v_type == VLNK) {
149 		error = EMLINK;
150 		goto bad;
151 	}
152 	if (vp->v_type == VSOCK) {
153 		error = EOPNOTSUPP;
154 		goto bad;
155 	}
156 	if ((fmode & O_CREAT) == 0) {
157 		mode = 0;
158 		if (fmode & (FWRITE | O_TRUNC)) {
159 			if (vp->v_type == VDIR) {
160 				error = EISDIR;
161 				goto bad;
162 			}
163 			error = vn_writechk(vp);
164 			if (error)
165 				goto bad;
166 			mode |= VWRITE;
167 		}
168 		if (fmode & FREAD)
169 			mode |= VREAD;
170 		if (mode) {
171 		        error = VOP_ACCESS(vp, mode, cred, td);
172 			if (error)
173 				goto bad;
174 		}
175 	}
176 	if (fmode & O_TRUNC) {
177 		VOP_UNLOCK(vp, 0, td);				/* XXX */
178 		VOP_LEASE(vp, td, cred, LEASE_WRITE);
179 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);	/* XXX */
180 		VATTR_NULL(vap);
181 		vap->va_size = 0;
182 		error = VOP_SETATTR(vp, vap, cred, td);
183 		if (error)
184 			goto bad;
185 	}
186 	error = VOP_OPEN(vp, fmode, cred, td);
187 	if (error)
188 		goto bad;
189 	/*
190 	 * Make sure that a VM object is created for VMIO support.
191 	 */
192 	if (vn_canvmio(vp) == TRUE) {
193 		if ((error = vfs_object_create(vp, td)) != 0)
194 			goto bad;
195 	}
196 
197 	if (fmode & FWRITE)
198 		vp->v_writecount++;
199 	return (0);
200 bad:
201 	NDFREE(ndp, NDF_ONLY_PNBUF);
202 	vput(vp);
203 	return (error);
204 }
205 
206 /*
207  * Check for write permissions on the specified vnode.
208  * Prototype text segments cannot be written.
209  */
210 int
211 vn_writechk(vp)
212 	struct vnode *vp;
213 {
214 
215 	/*
216 	 * If there's shared text associated with
217 	 * the vnode, try to free it up once.  If
218 	 * we fail, we can't allow writing.
219 	 */
220 	if (vp->v_flag & VTEXT)
221 		return (ETXTBSY);
222 	return (0);
223 }
224 
225 /*
226  * Vnode close call
227  */
228 int
229 vn_close(struct vnode *vp, int flags, struct thread *td)
230 {
231 	int error;
232 
233 	if (flags & FWRITE)
234 		vp->v_writecount--;
235 	error = VOP_CLOSE(vp, flags, td);
236 	vrele(vp);
237 	return (error);
238 }
239 
240 static __inline
241 int
242 sequential_heuristic(struct uio *uio, struct file *fp)
243 {
244 	/*
245 	 * Sequential heuristic - detect sequential operation
246 	 */
247 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
248 	    uio->uio_offset == fp->f_nextoff) {
249 		int tmpseq = fp->f_seqcount;
250 		/*
251 		 * XXX we assume that the filesystem block size is
252 		 * the default.  Not true, but still gives us a pretty
253 		 * good indicator of how sequential the read operations
254 		 * are.
255 		 */
256 		tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
257 		if (tmpseq > IO_SEQMAX)
258 			tmpseq = IO_SEQMAX;
259 		fp->f_seqcount = tmpseq;
260 		return(fp->f_seqcount << IO_SEQSHIFT);
261 	}
262 
263 	/*
264 	 * Not sequential, quick draw-down of seqcount
265 	 */
266 	if (fp->f_seqcount > 1)
267 		fp->f_seqcount = 1;
268 	else
269 		fp->f_seqcount = 0;
270 	return(0);
271 }
272 
273 /*
274  * Package up an I/O request on a vnode into a uio and do it.
275  */
276 int
277 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
278 	enum uio_rw rw;
279 	struct vnode *vp;
280 	caddr_t base;
281 	int len;
282 	off_t offset;
283 	enum uio_seg segflg;
284 	int ioflg;
285 	struct ucred *cred;
286 	int *aresid;
287 	struct thread *td;
288 {
289 	struct uio auio;
290 	struct iovec aiov;
291 	int error;
292 
293 	if ((ioflg & IO_NODELOCKED) == 0)
294 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
295 	auio.uio_iov = &aiov;
296 	auio.uio_iovcnt = 1;
297 	aiov.iov_base = base;
298 	aiov.iov_len = len;
299 	auio.uio_resid = len;
300 	auio.uio_offset = offset;
301 	auio.uio_segflg = segflg;
302 	auio.uio_rw = rw;
303 	auio.uio_td = td;
304 	if (rw == UIO_READ) {
305 		error = VOP_READ(vp, &auio, ioflg, cred);
306 	} else {
307 		error = VOP_WRITE(vp, &auio, ioflg, cred);
308 	}
309 	if (aresid)
310 		*aresid = auio.uio_resid;
311 	else
312 		if (auio.uio_resid && error == 0)
313 			error = EIO;
314 	if ((ioflg & IO_NODELOCKED) == 0)
315 		VOP_UNLOCK(vp, 0, td);
316 	return (error);
317 }
318 
319 /*
320  * Package up an I/O request on a vnode into a uio and do it.  The I/O
321  * request is split up into smaller chunks and we try to avoid saturating
322  * the buffer cache while potentially holding a vnode locked, so we
323  * check bwillwrite() before calling vn_rdwr().  We also call uio_yield()
324  * to give other processes a chance to lock the vnode (either other processes
325  * core'ing the same binary, or unrelated processes scanning the directory).
326  */
327 int
328 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
329 	enum uio_rw rw;
330 	struct vnode *vp;
331 	caddr_t base;
332 	int len;
333 	off_t offset;
334 	enum uio_seg segflg;
335 	int ioflg;
336 	struct ucred *cred;
337 	int *aresid;
338 	struct thread *td;
339 {
340 	int error = 0;
341 
342 	do {
343 		int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
344 
345 		if (rw != UIO_READ && vp->v_type == VREG)
346 			bwillwrite();
347 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
348 			    ioflg, cred, aresid, td);
349 		len -= chunk;	/* aresid calc already includes length */
350 		if (error)
351 			break;
352 		offset += chunk;
353 		base += chunk;
354 		uio_yield();
355 	} while (len);
356 	if (aresid)
357 		*aresid += len;
358 	return (error);
359 }
360 
361 /*
362  * File table vnode read routine.
363  */
364 static int
365 vn_read(fp, uio, cred, flags, td)
366 	struct file *fp;
367 	struct uio *uio;
368 	struct ucred *cred;
369 	struct thread *td;
370 	int flags;
371 {
372 	struct vnode *vp;
373 	int error, ioflag;
374 
375 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
376 	vp = (struct vnode *)fp->f_data;
377 	ioflag = 0;
378 	if (fp->f_flag & FNONBLOCK)
379 		ioflag |= IO_NDELAY;
380 	if (fp->f_flag & O_DIRECT)
381 		ioflag |= IO_DIRECT;
382 	VOP_LEASE(vp, td, cred, LEASE_READ);
383 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
384 	if ((flags & FOF_OFFSET) == 0)
385 		uio->uio_offset = fp->f_offset;
386 
387 	ioflag |= sequential_heuristic(uio, fp);
388 
389 	error = VOP_READ(vp, uio, ioflag, cred);
390 	if ((flags & FOF_OFFSET) == 0)
391 		fp->f_offset = uio->uio_offset;
392 	fp->f_nextoff = uio->uio_offset;
393 	VOP_UNLOCK(vp, 0, td);
394 	return (error);
395 }
396 
397 /*
398  * File table vnode write routine.
399  */
400 static int
401 vn_write(fp, uio, cred, flags, td)
402 	struct file *fp;
403 	struct uio *uio;
404 	struct ucred *cred;
405 	struct thread *td;
406 	int flags;
407 {
408 	struct vnode *vp;
409 	int error, ioflag;
410 
411 	KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
412 	    uio->uio_td, td));
413 	vp = (struct vnode *)fp->f_data;
414 	if (vp->v_type == VREG)
415 		bwillwrite();
416 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
417 	ioflag = IO_UNIT;
418 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
419 		ioflag |= IO_APPEND;
420 	if (fp->f_flag & FNONBLOCK)
421 		ioflag |= IO_NDELAY;
422 	if (fp->f_flag & O_DIRECT)
423 		ioflag |= IO_DIRECT;
424 	if ((fp->f_flag & O_FSYNC) ||
425 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
426 		ioflag |= IO_SYNC;
427 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
428 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
429 	if ((flags & FOF_OFFSET) == 0)
430 		uio->uio_offset = fp->f_offset;
431 	ioflag |= sequential_heuristic(uio, fp);
432 	error = VOP_WRITE(vp, uio, ioflag, cred);
433 	if ((flags & FOF_OFFSET) == 0)
434 		fp->f_offset = uio->uio_offset;
435 	fp->f_nextoff = uio->uio_offset;
436 	VOP_UNLOCK(vp, 0, td);
437 	return (error);
438 }
439 
440 /*
441  * File table vnode stat routine.
442  */
443 static int
444 vn_statfile(struct file *fp, struct stat *sb, struct thread *td)
445 {
446 	struct vnode *vp = (struct vnode *)fp->f_data;
447 
448 	return vn_stat(vp, sb, td);
449 }
450 
451 int
452 vn_stat(struct vnode *vp, struct stat *sb, struct thread *td)
453 {
454 	struct vattr vattr;
455 	struct vattr *vap;
456 	int error;
457 	u_short mode;
458 
459 	vap = &vattr;
460 	error = VOP_GETATTR(vp, vap, td);
461 	if (error)
462 		return (error);
463 
464 	/*
465 	 * Zero the spare stat fields
466 	 */
467 	sb->st_lspare = 0;
468 	sb->st_qspare[0] = 0;
469 	sb->st_qspare[1] = 0;
470 
471 	/*
472 	 * Copy from vattr table
473 	 */
474 	if (vap->va_fsid != VNOVAL)
475 		sb->st_dev = vap->va_fsid;
476 	else
477 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
478 	sb->st_ino = vap->va_fileid;
479 	mode = vap->va_mode;
480 	switch (vap->va_type) {
481 	case VREG:
482 		mode |= S_IFREG;
483 		break;
484 	case VDIR:
485 		mode |= S_IFDIR;
486 		break;
487 	case VBLK:
488 		mode |= S_IFBLK;
489 		break;
490 	case VCHR:
491 		mode |= S_IFCHR;
492 		break;
493 	case VLNK:
494 		mode |= S_IFLNK;
495 		/* This is a cosmetic change, symlinks do not have a mode. */
496 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
497 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
498 		else
499 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
500 		break;
501 	case VSOCK:
502 		mode |= S_IFSOCK;
503 		break;
504 	case VFIFO:
505 		mode |= S_IFIFO;
506 		break;
507 	default:
508 		return (EBADF);
509 	};
510 	sb->st_mode = mode;
511 	sb->st_nlink = vap->va_nlink;
512 	sb->st_uid = vap->va_uid;
513 	sb->st_gid = vap->va_gid;
514 	sb->st_rdev = vap->va_rdev;
515 	sb->st_size = vap->va_size;
516 	sb->st_atimespec = vap->va_atime;
517 	sb->st_mtimespec = vap->va_mtime;
518 	sb->st_ctimespec = vap->va_ctime;
519 
520         /*
521 	 * According to www.opengroup.org, the meaning of st_blksize is
522 	 *   "a filesystem-specific preferred I/O block size for this
523 	 *    object.  In some filesystem types, this may vary from file
524 	 *    to file"
525 	 * Default to PAGE_SIZE after much discussion.
526 	 */
527 
528 	if (vap->va_type == VREG) {
529 		sb->st_blksize = vap->va_blocksize;
530 	} else if (vn_isdisk(vp, NULL)) {
531 		sb->st_blksize = vp->v_rdev->si_bsize_best;
532 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
533 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
534 		if (sb->st_blksize < BLKDEV_IOSIZE)
535 			sb->st_blksize = BLKDEV_IOSIZE;
536 	} else {
537 		sb->st_blksize = PAGE_SIZE;
538 	}
539 
540 	sb->st_flags = vap->va_flags;
541 	if (suser(td))
542 		sb->st_gen = 0;
543 	else
544 		sb->st_gen = vap->va_gen;
545 
546 #if (S_BLKSIZE == 512)
547 	/* Optimize this case */
548 	sb->st_blocks = vap->va_bytes >> 9;
549 #else
550 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
551 #endif
552 	return (0);
553 }
554 
555 /*
556  * File table vnode ioctl routine.
557  */
558 static int
559 vn_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
560 {
561 	struct vnode *vp = ((struct vnode *)fp->f_data);
562 	struct ucred *ucred;
563 	struct vattr vattr;
564 	int error;
565 
566 	KKASSERT(td->td_proc != NULL);
567 	ucred = td->td_proc->p_ucred;
568 
569 	switch (vp->v_type) {
570 	case VREG:
571 	case VDIR:
572 		if (com == FIONREAD) {
573 			error = VOP_GETATTR(vp, &vattr, td);
574 			if (error)
575 				return (error);
576 			*(int *)data = vattr.va_size - fp->f_offset;
577 			return (0);
578 		}
579 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
580 			return (0);			/* XXX */
581 		/* fall into ... */
582 	default:
583 #if 0
584 		return (ENOTTY);
585 #endif
586 	case VFIFO:
587 	case VCHR:
588 	case VBLK:
589 		if (com == FIODTYPE) {
590 			if (vp->v_type != VCHR && vp->v_type != VBLK)
591 				return (ENOTTY);
592 			*(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
593 			return (0);
594 		}
595 		error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, td);
596 		if (error == 0 && com == TIOCSCTTY) {
597 			struct session *sess = td->td_proc->p_session;
598 
599 			/* Do nothing if reassigning same control tty */
600 			if (sess->s_ttyvp == vp)
601 				return (0);
602 
603 			/* Get rid of reference to old control tty */
604 			if (sess->s_ttyvp)
605 				vrele(sess->s_ttyvp);
606 
607 			sess->s_ttyvp = vp;
608 			VREF(vp);
609 		}
610 		return (error);
611 	}
612 }
613 
614 /*
615  * File table vnode poll routine.
616  */
617 static int
618 vn_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
619 {
620 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
621 }
622 
623 /*
624  * Check that the vnode is still valid, and if so
625  * acquire requested lock.
626  */
627 int
628 #ifndef	DEBUG_LOCKS
629 vn_lock(struct vnode *vp, int flags, struct thread *td)
630 #else
631 debug_vn_lock(struct vnode *vp, int flags, struct thread *td,
632 		const char *filename, int line)
633 #endif
634 {
635 	int error;
636 
637 	do {
638 		if ((flags & LK_INTERLOCK) == 0)
639 			lwkt_gettoken(&vp->v_interlock);
640 		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
641 			vp->v_flag |= VXWANT;
642 			lwkt_reltoken(&vp->v_interlock);
643 			tsleep((caddr_t)vp, 0, "vn_lock", 0);
644 			error = ENOENT;
645 		} else {
646 #if 0
647 			/* this can now occur in normal operation */
648 			if (vp->v_vxproc != NULL)
649 				log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n");
650 #endif
651 #ifdef	DEBUG_LOCKS
652 			vp->filename = filename;
653 			vp->line = line;
654 #endif
655 			error = VOP_LOCK(vp,
656 				    flags | LK_NOPAUSE | LK_INTERLOCK, td);
657 			if (error == 0)
658 				return (error);
659 		}
660 		flags &= ~LK_INTERLOCK;
661 	} while (flags & LK_RETRY);
662 	return (error);
663 }
664 
665 /*
666  * File table vnode close routine.
667  */
668 static int
669 vn_closefile(struct file *fp, struct thread *td)
670 {
671 	int err;
672 
673 	fp->f_ops = &badfileops;
674 	err = vn_close(((struct vnode *)fp->f_data), fp->f_flag, td);
675 	return(err);
676 }
677 
678 static int
679 vn_kqfilter(struct file *fp, struct knote *kn)
680 {
681 
682 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
683 }
684