xref: /freebsd/sys/geom/geom_event.c (revision 9768746b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9  * and NAI Labs, the Security Research Division of Network Associates, Inc.
10  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11  * DARPA CHATS research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * XXX: How do we in general know that objects referenced in events
40  * have not been destroyed before we get around to handle the event ?
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/time.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 
58 #include <machine/stdarg.h>
59 
60 TAILQ_HEAD(event_tailq_head, g_event);
61 
62 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
63 static u_int g_pending_events;
64 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
65 static struct mtx g_eventlock;
66 static int g_wither_work;
67 
68 #define G_N_EVENTREFS		20
69 
70 struct g_event {
71 	TAILQ_ENTRY(g_event)	events;
72 	g_event_t		*func;
73 	void			*arg;
74 	int			flag;
75 	void			*ref[G_N_EVENTREFS];
76 };
77 
78 #define EV_DONE		0x80000
79 #define EV_WAKEUP	0x40000
80 #define EV_CANCELED	0x20000
81 #define EV_INPROGRESS	0x10000
82 
83 void
84 g_waitidle(struct thread *td)
85 {
86 
87 	g_topology_assert_not();
88 
89 	mtx_lock(&g_eventlock);
90 	TSWAIT("GEOM events");
91 	while (!TAILQ_EMPTY(&g_events))
92 		msleep(&g_pending_events, &g_eventlock, PPAUSE,
93 		    "g_waitidle", 0);
94 	TSUNWAIT("GEOM events");
95 	mtx_unlock(&g_eventlock);
96 	td->td_pflags &= ~TDP_GEOM;
97 }
98 
99 static void
100 ast_geom(struct thread *td, int tda __unused)
101 {
102 	/*
103 	 * If this thread tickled GEOM, we need to wait for the giggling to
104 	 * stop before we return to userland.
105 	 */
106 	g_waitidle(td);
107 }
108 
109 static void
110 geom_event_init(void *arg __unused)
111 {
112 	ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR,
113 	    TDP_GEOM, ast_geom);
114 }
115 SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL);
116 
117 struct g_attrchanged_args {
118 	struct g_provider *pp;
119 	const char *attr;
120 };
121 
122 static void
123 g_attr_changed_event(void *arg, int flag)
124 {
125 	struct g_attrchanged_args *args;
126 	struct g_provider *pp;
127 	struct g_consumer *cp;
128 	struct g_consumer *next_cp;
129 
130 	args = arg;
131 	pp = args->pp;
132 
133 	g_topology_assert();
134 	if (flag != EV_CANCEL && g_shutdown == 0) {
135 		/*
136 		 * Tell all consumers of the change.
137 		 */
138 		LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
139 			if (cp->geom->attrchanged != NULL)
140 				cp->geom->attrchanged(cp, args->attr);
141 		}
142 	}
143 	g_free(args);
144 }
145 
146 int
147 g_attr_changed(struct g_provider *pp, const char *attr, int flag)
148 {
149 	struct g_attrchanged_args *args;
150 	int error;
151 
152 	args = g_malloc(sizeof *args, flag);
153 	if (args == NULL)
154 		return (ENOMEM);
155 	args->pp = pp;
156 	args->attr = attr;
157 	error = g_post_event(g_attr_changed_event, args, flag, pp, NULL);
158 	if (error != 0)
159 		g_free(args);
160 	return (error);
161 }
162 
163 void
164 g_orphan_provider(struct g_provider *pp, int error)
165 {
166 
167 	/* G_VALID_PROVIDER(pp)  We likely lack topology lock */
168 	g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
169 	    pp, pp->name, error);
170 	KASSERT(error != 0,
171 	    ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
172 	     pp, pp->name));
173 
174 	pp->error = error;
175 	mtx_lock(&g_eventlock);
176 	KASSERT(!(pp->flags & G_PF_ORPHAN),
177 	    ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
178 	pp->flags |= G_PF_ORPHAN;
179 	TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
180 	mtx_unlock(&g_eventlock);
181 	wakeup(&g_wait_event);
182 }
183 
184 /*
185  * This function is called once on each provider which the event handler
186  * finds on its g_doorstep.
187  */
188 
189 static void
190 g_orphan_register(struct g_provider *pp)
191 {
192 	struct g_consumer *cp, *cp2;
193 	int wf;
194 
195 	g_topology_assert();
196 	G_VALID_PROVIDER(pp);
197 	g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
198 
199 	g_cancel_event(pp);
200 
201 	wf = pp->flags & G_PF_WITHER;
202 	pp->flags &= ~G_PF_WITHER;
203 
204 	/*
205 	 * Tell all consumers the bad news.
206 	 * Don't be surprised if they self-destruct.
207 	 */
208 	LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
209 		KASSERT(cp->geom->orphan != NULL,
210 		    ("geom %s has no orphan, class %s",
211 		    cp->geom->name, cp->geom->class->name));
212 		/*
213 		 * XXX: g_dev_orphan method does deferred destroying
214 		 * and it is possible, that other event could already
215 		 * call the orphan method. Check consumer's flags to
216 		 * do not schedule it twice.
217 		 */
218 		if (cp->flags & G_CF_ORPHAN)
219 			continue;
220 		cp->flags |= G_CF_ORPHAN;
221 		cp->geom->orphan(cp);
222 	}
223 	if (LIST_EMPTY(&pp->consumers) && wf)
224 		g_destroy_provider(pp);
225 	else
226 		pp->flags |= wf;
227 #ifdef notyet
228 	cp = LIST_FIRST(&pp->consumers);
229 	if (cp != NULL)
230 		return;
231 	if (pp->geom->flags & G_GEOM_WITHER)
232 		g_destroy_provider(pp);
233 #endif
234 }
235 
236 static int
237 one_event(void)
238 {
239 	struct g_event *ep;
240 	struct g_provider *pp;
241 
242 	g_topology_assert();
243 	mtx_lock(&g_eventlock);
244 	pp = TAILQ_FIRST(&g_doorstep);
245 	if (pp != NULL) {
246 		G_VALID_PROVIDER(pp);
247 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
248 		mtx_unlock(&g_eventlock);
249 		g_orphan_register(pp);
250 		return (1);
251 	}
252 
253 	ep = TAILQ_FIRST(&g_events);
254 	if (ep == NULL) {
255 		wakeup(&g_pending_events);
256 		return (0);
257 	}
258 	ep->flag |= EV_INPROGRESS;
259 	mtx_unlock(&g_eventlock);
260 	g_topology_assert();
261 	ep->func(ep->arg, 0);
262 	g_topology_assert();
263 	mtx_lock(&g_eventlock);
264 	TSRELEASE("GEOM events");
265 	TAILQ_REMOVE(&g_events, ep, events);
266 	ep->flag &= ~EV_INPROGRESS;
267 	if (ep->flag & EV_WAKEUP) {
268 		ep->flag |= EV_DONE;
269 		wakeup(ep);
270 		mtx_unlock(&g_eventlock);
271 	} else {
272 		mtx_unlock(&g_eventlock);
273 		g_free(ep);
274 	}
275 	return (1);
276 }
277 
278 void
279 g_run_events(void)
280 {
281 
282 	for (;;) {
283 		g_topology_lock();
284 		while (one_event())
285 			;
286 		mtx_assert(&g_eventlock, MA_OWNED);
287 		if (g_wither_work) {
288 			g_wither_work = 0;
289 			mtx_unlock(&g_eventlock);
290 			g_wither_washer();
291 			g_topology_unlock();
292 		} else {
293 			g_topology_unlock();
294 			msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP,
295 			    "-", 0);
296 		}
297 	}
298 	/* NOTREACHED */
299 }
300 
301 void
302 g_cancel_event(void *ref)
303 {
304 	struct g_event *ep, *epn;
305 	struct g_provider *pp;
306 	u_int n;
307 
308 	mtx_lock(&g_eventlock);
309 	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
310 		if (pp != ref)
311 			continue;
312 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
313 		break;
314 	}
315 	TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) {
316 		if (ep->flag & EV_INPROGRESS)
317 			continue;
318 		for (n = 0; n < G_N_EVENTREFS; n++) {
319 			if (ep->ref[n] == NULL)
320 				break;
321 			if (ep->ref[n] != ref)
322 				continue;
323 			TSRELEASE("GEOM events");
324 			TAILQ_REMOVE(&g_events, ep, events);
325 			ep->func(ep->arg, EV_CANCEL);
326 			mtx_assert(&g_eventlock, MA_OWNED);
327 			if (ep->flag & EV_WAKEUP) {
328 				ep->flag |= (EV_DONE|EV_CANCELED);
329 				wakeup(ep);
330 			} else {
331 				g_free(ep);
332 			}
333 			break;
334 		}
335 	}
336 	if (TAILQ_EMPTY(&g_events))
337 		wakeup(&g_pending_events);
338 	mtx_unlock(&g_eventlock);
339 }
340 
341 struct g_event *
342 g_alloc_event(int flag)
343 {
344 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
345 	    ("Wrong flag to g_alloc_event"));
346 
347 	return (g_malloc(sizeof(struct g_event), flag | M_ZERO));
348 }
349 
350 static void
351 g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
352     struct g_event *ep, va_list ap)
353 {
354 	void *p;
355 	u_int n;
356 
357 	ep->flag = wuflag;
358 	for (n = 0; n < G_N_EVENTREFS; n++) {
359 		p = va_arg(ap, void *);
360 		if (p == NULL)
361 			break;
362 		g_trace(G_T_TOPOLOGY, "  ref %p", p);
363 		ep->ref[n] = p;
364 	}
365 	KASSERT(p == NULL, ("Too many references to event"));
366 	ep->func = func;
367 	ep->arg = arg;
368 	mtx_lock(&g_eventlock);
369 	TSHOLD("GEOM events");
370 	TAILQ_INSERT_TAIL(&g_events, ep, events);
371 	mtx_unlock(&g_eventlock);
372 	wakeup(&g_wait_event);
373 	curthread->td_pflags |= TDP_GEOM;
374 	ast_sched(curthread, TDA_GEOM);
375 }
376 
377 void
378 g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...)
379 {
380 	va_list ap;
381 
382 	va_start(ap, ep);
383 	g_post_event_ep_va(func, arg, 0, ep, ap);
384 	va_end(ap);
385 }
386 
387 
388 static int
389 g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
390 {
391 	struct g_event *ep;
392 
393 	g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
394 	    func, arg, flag, wuflag);
395 	KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
396 	    ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
397 	ep = g_alloc_event(flag);
398 	if (ep == NULL)
399 		return (ENOMEM);
400 	if (epp != NULL)
401 		*epp = ep;
402 	g_post_event_ep_va(func, arg, wuflag, ep, ap);
403 	return (0);
404 }
405 
406 int
407 g_post_event(g_event_t *func, void *arg, int flag, ...)
408 {
409 	va_list ap;
410 	int i;
411 
412 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
413 	    ("Wrong flag to g_post_event"));
414 	va_start(ap, flag);
415 	i = g_post_event_x(func, arg, flag, 0, NULL, ap);
416 	va_end(ap);
417 	return (i);
418 }
419 
420 void
421 g_do_wither(void)
422 {
423 
424 	mtx_lock(&g_eventlock);
425 	g_wither_work = 1;
426 	mtx_unlock(&g_eventlock);
427 	wakeup(&g_wait_event);
428 }
429 
430 /*
431  * XXX: It might actually be useful to call this function with topology held.
432  * XXX: This would ensure that the event gets created before anything else
433  * XXX: changes.  At present all users have a handle on things in some other
434  * XXX: way, so this remains an XXX for now.
435  */
436 
437 int
438 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
439 {
440 	va_list ap;
441 	struct g_event *ep;
442 	int error;
443 
444 	g_topology_assert_not();
445 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
446 	    ("Wrong flag to g_post_event"));
447 	va_start(ap, flag);
448 	error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
449 	va_end(ap);
450 	if (error)
451 		return (error);
452 
453 	mtx_lock(&g_eventlock);
454 	while (!(ep->flag & EV_DONE))
455 		msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", 0);
456 	if (ep->flag & EV_CANCELED)
457 		error = EAGAIN;
458 	mtx_unlock(&g_eventlock);
459 
460 	g_free(ep);
461 	return (error);
462 }
463 
464 void
465 g_event_init(void)
466 {
467 
468 	mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
469 }
470