xref: /openbsd/sys/kern/vfs_default.c (revision 771fbea0)
1 /*	$OpenBSD: vfs_default.c,v 1.48 2021/04/28 09:53:53 claudio Exp $  */
2 
3 /*
4  * Portions of this code are:
5  *
6  * Copyright (c) 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/namei.h>
44 #include <sys/pool.h>
45 #include <sys/event.h>
46 #include <sys/specdev.h>
47 
48 int filt_generic_readwrite(struct knote *, long);
49 void filt_generic_detach(struct knote *);
50 
51 /*
52  * Eliminate all activity associated with the requested vnode
53  * and with all vnodes aliased to the requested vnode.
54  */
55 int
56 vop_generic_revoke(void *v)
57 {
58 	struct vop_revoke_args *ap = v;
59 	struct vnode *vp, *vq;
60 	struct proc *p = curproc;
61 
62 #ifdef DIAGNOSTIC
63 	if ((ap->a_flags & REVOKEALL) == 0)
64 		panic("vop_generic_revoke");
65 #endif
66 
67 	vp = ap->a_vp;
68 
69 	while (vp->v_type == VBLK && vp->v_specinfo != NULL &&
70 	    vp->v_specmountpoint != NULL) {
71 		struct mount *mp = vp->v_specmountpoint;
72 
73 		/*
74 		 * If we have a mount point associated with the vnode, we must
75 		 * flush it out now, as to not leave a dangling zombie mount
76 		 * point laying around in VFS.
77 		 */
78 		if (!vfs_busy(mp, VB_WRITE|VB_WAIT)) {
79 			dounmount(mp, MNT_FORCE | MNT_DOOMED, p);
80 			break;
81 		}
82 	}
83 
84 	if (vp->v_flag & VALIASED) {
85 		/*
86 		 * If a vgone (or vclean) is already in progress,
87 		 * wait until it is done and return.
88 		 */
89 		mtx_enter(&vnode_mtx);
90 		if (vp->v_lflag & VXLOCK) {
91 			vp->v_lflag |= VXWANT;
92 			msleep_nsec(vp, &vnode_mtx, PINOD,
93 			    "vop_generic_revokeall", INFSLP);
94 			mtx_leave(&vnode_mtx);
95 			return(0);
96 		}
97 
98 		/*
99 		 * Ensure that vp will not be vgone'd while we
100 		 * are eliminating its aliases.
101 		 */
102 		vp->v_lflag |= VXLOCK;
103 		mtx_leave(&vnode_mtx);
104 
105 		while (vp->v_flag & VALIASED) {
106 			SLIST_FOREACH(vq, vp->v_hashchain, v_specnext) {
107 				if (vq->v_rdev != vp->v_rdev ||
108 				    vq->v_type != vp->v_type || vp == vq)
109 					continue;
110 				vgone(vq);
111 				break;
112 			}
113 		}
114 
115 		/*
116 		 * Remove the lock so that vgone below will
117 		 * really eliminate the vnode after which time
118 		 * vgone will awaken any sleepers.
119 		 */
120 		mtx_enter(&vnode_mtx);
121 		vp->v_lflag &= ~VXLOCK;
122 		mtx_leave(&vnode_mtx);
123 	}
124 
125 	vgonel(vp, p);
126 
127 	return (0);
128 }
129 
130 int
131 vop_generic_bmap(void *v)
132 {
133 	struct vop_bmap_args *ap = v;
134 
135 	if (ap->a_vpp)
136 		*ap->a_vpp = ap->a_vp;
137 	if (ap->a_bnp)
138 		*ap->a_bnp = ap->a_bn;
139 	if (ap->a_runp)
140 		*ap->a_runp = 0;
141 
142 	return (0);
143 }
144 
145 int
146 vop_generic_bwrite(void *v)
147 {
148 	struct vop_bwrite_args *ap = v;
149 
150 	return (bwrite(ap->a_bp));
151 }
152 
153 int
154 vop_generic_abortop(void *v)
155 {
156 	struct vop_abortop_args *ap = v;
157 
158 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
159 		pool_put(&namei_pool, ap->a_cnp->cn_pnbuf);
160 
161 	return (0);
162 }
163 
164 /*
165  * Stubs to use when there is no locking to be done on the underlying object.
166  * A minimal shared lock is necessary to ensure that the underlying object
167  * is not revoked while an operation is in progress. So, an active shared
168  * count should be maintained in an auxiliary vnode lock structure. However,
169  * that's not done now.
170  */
171 int
172 vop_generic_lock(void *v)
173 {
174 	return (0);
175 }
176 
177 /*
178  * Decrement the active use count. (Not done currently)
179  */
180 int
181 vop_generic_unlock(void *v)
182 {
183 	return (0);
184 }
185 
186 /*
187  * Return whether or not the node is in use. (Not done currently)
188  */
189 int
190 vop_generic_islocked(void *v)
191 {
192 	return (0);
193 }
194 
195 const struct filterops generic_filtops = {
196 	.f_flags	= FILTEROP_ISFD,
197 	.f_attach	= NULL,
198 	.f_detach	= filt_generic_detach,
199 	.f_event	= filt_generic_readwrite,
200 };
201 
202 int
203 vop_generic_kqfilter(void *v)
204 {
205 	struct vop_kqfilter_args *ap = v;
206 	struct knote *kn = ap->a_kn;
207 
208 	switch (kn->kn_filter) {
209 	case EVFILT_READ:
210 	case EVFILT_WRITE:
211 		kn->kn_fop = &generic_filtops;
212 		break;
213 	default:
214 		return (EINVAL);
215 	}
216 
217 	return (0);
218 }
219 
220 /* Trivial lookup routine that always fails. */
221 int
222 vop_generic_lookup(void *v)
223 {
224 	struct vop_lookup_args	*ap = v;
225 
226 	*ap->a_vpp = NULL;
227 	return (ENOTDIR);
228 }
229 
230 void
231 filt_generic_detach(struct knote *kn)
232 {
233 }
234 
235 int
236 filt_generic_readwrite(struct knote *kn, long hint)
237 {
238 	/*
239 	 * filesystem is gone, so set the EOF flag and schedule
240 	 * the knote for deletion.
241 	 */
242 	if (hint == NOTE_REVOKE) {
243 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
244 		return (1);
245 	}
246 
247         kn->kn_data = 0;
248 
249         return (1);
250 }
251