xref: /openbsd/sys/kern/vfs_default.c (revision 0bc743c0)
1 /*	$OpenBSD: vfs_default.c,v 1.51 2022/04/27 14:52:25 claudio Exp $  */
2 
3 /*
4  * Portions of this code are:
5  *
6  * Copyright (c) 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/namei.h>
44 #include <sys/pool.h>
45 #include <sys/event.h>
46 #include <sys/specdev.h>
47 
48 int filt_generic_readwrite(struct knote *, long);
49 void filt_generic_detach(struct knote *);
50 
51 /*
52  * Eliminate all activity associated with the requested vnode
53  * and with all vnodes aliased to the requested vnode.
54  */
55 int
vop_generic_revoke(void * v)56 vop_generic_revoke(void *v)
57 {
58 	struct vop_revoke_args *ap = v;
59 	struct vnode *vp, *vq;
60 	struct proc *p = curproc;
61 
62 #ifdef DIAGNOSTIC
63 	if ((ap->a_flags & REVOKEALL) == 0)
64 		panic("vop_generic_revoke");
65 #endif
66 
67 	vp = ap->a_vp;
68 
69 	while (vp->v_type == VBLK && vp->v_specinfo != NULL &&
70 	    vp->v_specmountpoint != NULL) {
71 		struct mount *mp = vp->v_specmountpoint;
72 
73 		/*
74 		 * If we have a mount point associated with the vnode, we must
75 		 * flush it out now, as to not leave a dangling zombie mount
76 		 * point laying around in VFS.
77 		 */
78 		if (!vfs_busy(mp, VB_WRITE|VB_WAIT)) {
79 			dounmount(mp, MNT_FORCE | MNT_DOOMED, p);
80 			break;
81 		}
82 	}
83 
84 	if (vp->v_flag & VALIASED) {
85 		/*
86 		 * If a vgone (or vclean) is already in progress,
87 		 * wait until it is done and return.
88 		 */
89 		mtx_enter(&vnode_mtx);
90 		if (vp->v_lflag & VXLOCK) {
91 			vp->v_lflag |= VXWANT;
92 			msleep_nsec(vp, &vnode_mtx, PINOD,
93 			    "vop_generic_revokeall", INFSLP);
94 			mtx_leave(&vnode_mtx);
95 			return(0);
96 		}
97 
98 		/*
99 		 * Ensure that vp will not be vgone'd while we
100 		 * are eliminating its aliases.
101 		 */
102 		vp->v_lflag |= VXLOCK;
103 		mtx_leave(&vnode_mtx);
104 
105 		while (vp->v_flag & VALIASED) {
106 			SLIST_FOREACH(vq, vp->v_hashchain, v_specnext) {
107 				if (vq->v_rdev != vp->v_rdev ||
108 				    vq->v_type != vp->v_type || vp == vq)
109 					continue;
110 				vgonel(vq, p);
111 				break;
112 			}
113 		}
114 
115 		/*
116 		 * Remove the lock so that vgone below will
117 		 * really eliminate the vnode after which time
118 		 * vgone will awaken any sleepers.
119 		 */
120 		mtx_enter(&vnode_mtx);
121 		vp->v_lflag &= ~VXLOCK;
122 		mtx_leave(&vnode_mtx);
123 	}
124 
125 	vgonel(vp, p);
126 
127 	return (0);
128 }
129 
130 int
vop_generic_badop(void * v)131 vop_generic_badop(void *v)
132 {
133 	panic("%s", __func__);
134 }
135 
136 int
vop_generic_bmap(void * v)137 vop_generic_bmap(void *v)
138 {
139 	struct vop_bmap_args *ap = v;
140 
141 	if (ap->a_vpp)
142 		*ap->a_vpp = ap->a_vp;
143 	if (ap->a_bnp)
144 		*ap->a_bnp = ap->a_bn;
145 	if (ap->a_runp)
146 		*ap->a_runp = 0;
147 
148 	return (0);
149 }
150 
151 int
vop_generic_bwrite(void * v)152 vop_generic_bwrite(void *v)
153 {
154 	struct vop_bwrite_args *ap = v;
155 
156 	return (bwrite(ap->a_bp));
157 }
158 
159 int
vop_generic_abortop(void * v)160 vop_generic_abortop(void *v)
161 {
162 	struct vop_abortop_args *ap = v;
163 
164 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
165 		pool_put(&namei_pool, ap->a_cnp->cn_pnbuf);
166 
167 	return (0);
168 }
169 
170 const struct filterops generic_filtops = {
171 	.f_flags	= FILTEROP_ISFD,
172 	.f_attach	= NULL,
173 	.f_detach	= filt_generic_detach,
174 	.f_event	= filt_generic_readwrite,
175 };
176 
177 int
vop_generic_kqfilter(void * v)178 vop_generic_kqfilter(void *v)
179 {
180 	struct vop_kqfilter_args *ap = v;
181 	struct knote *kn = ap->a_kn;
182 
183 	switch (kn->kn_filter) {
184 	case EVFILT_READ:
185 	case EVFILT_WRITE:
186 		kn->kn_fop = &generic_filtops;
187 		break;
188 	default:
189 		return (EINVAL);
190 	}
191 
192 	return (0);
193 }
194 
195 /* Trivial lookup routine that always fails. */
196 int
vop_generic_lookup(void * v)197 vop_generic_lookup(void *v)
198 {
199 	struct vop_lookup_args	*ap = v;
200 
201 	*ap->a_vpp = NULL;
202 	return (ENOTDIR);
203 }
204 
205 void
filt_generic_detach(struct knote * kn)206 filt_generic_detach(struct knote *kn)
207 {
208 }
209 
210 int
filt_generic_readwrite(struct knote * kn,long hint)211 filt_generic_readwrite(struct knote *kn, long hint)
212 {
213 	/*
214 	 * filesystem is gone, so set the EOF flag and schedule
215 	 * the knote for deletion.
216 	 */
217 	if (hint == NOTE_REVOKE) {
218 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
219 		return (1);
220 	}
221 
222         kn->kn_data = 0;
223 
224         return (1);
225 }
226