xref: /freebsd/sys/geom/geom_event.c (revision fdafd315)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9  * and NAI Labs, the Security Research Division of Network Associates, Inc.
10  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11  * DARPA CHATS research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote
22  *    products derived from this software without specific prior written
23  *    permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * XXX: How do we in general know that objects referenced in events
40  * have not been destroyed before we get around to handle the event ?
41  */
42 
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/time.h>
52 #include <geom/geom.h>
53 #include <geom/geom_int.h>
54 
55 #include <machine/stdarg.h>
56 
57 TAILQ_HEAD(event_tailq_head, g_event);
58 
59 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
60 static u_int g_pending_events;
61 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
62 static struct mtx g_eventlock;
63 static int g_wither_work;
64 
65 #define G_N_EVENTREFS		20
66 
67 struct g_event {
68 	TAILQ_ENTRY(g_event)	events;
69 	g_event_t		*func;
70 	void			*arg;
71 	int			flag;
72 	void			*ref[G_N_EVENTREFS];
73 };
74 
75 #define EV_DONE		0x80000
76 #define EV_WAKEUP	0x40000
77 #define EV_CANCELED	0x20000
78 #define EV_INPROGRESS	0x10000
79 
80 void
g_waitidle(struct thread * td)81 g_waitidle(struct thread *td)
82 {
83 
84 	g_topology_assert_not();
85 
86 	mtx_lock(&g_eventlock);
87 	TSWAIT("GEOM events");
88 	while (!TAILQ_EMPTY(&g_events))
89 		msleep(&g_pending_events, &g_eventlock, PPAUSE,
90 		    "g_waitidle", 0);
91 	TSUNWAIT("GEOM events");
92 	mtx_unlock(&g_eventlock);
93 	td->td_pflags &= ~TDP_GEOM;
94 }
95 
96 static void
ast_geom(struct thread * td,int tda __unused)97 ast_geom(struct thread *td, int tda __unused)
98 {
99 	/*
100 	 * If this thread tickled GEOM, we need to wait for the giggling to
101 	 * stop before we return to userland.
102 	 */
103 	g_waitidle(td);
104 }
105 
106 static void
geom_event_init(void * arg __unused)107 geom_event_init(void *arg __unused)
108 {
109 	ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR,
110 	    TDP_GEOM, ast_geom);
111 }
112 SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL);
113 
114 struct g_attrchanged_args {
115 	struct g_provider *pp;
116 	const char *attr;
117 };
118 
119 static void
g_attr_changed_event(void * arg,int flag)120 g_attr_changed_event(void *arg, int flag)
121 {
122 	struct g_attrchanged_args *args;
123 	struct g_provider *pp;
124 	struct g_consumer *cp;
125 	struct g_consumer *next_cp;
126 
127 	args = arg;
128 	pp = args->pp;
129 
130 	g_topology_assert();
131 	if (flag != EV_CANCEL && g_shutdown == 0) {
132 		/*
133 		 * Tell all consumers of the change.
134 		 */
135 		LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
136 			if (cp->geom->attrchanged != NULL)
137 				cp->geom->attrchanged(cp, args->attr);
138 		}
139 	}
140 	g_free(args);
141 }
142 
143 int
g_attr_changed(struct g_provider * pp,const char * attr,int flag)144 g_attr_changed(struct g_provider *pp, const char *attr, int flag)
145 {
146 	struct g_attrchanged_args *args;
147 	int error;
148 
149 	args = g_malloc(sizeof *args, flag);
150 	if (args == NULL)
151 		return (ENOMEM);
152 	args->pp = pp;
153 	args->attr = attr;
154 	error = g_post_event(g_attr_changed_event, args, flag, pp, NULL);
155 	if (error != 0)
156 		g_free(args);
157 	return (error);
158 }
159 
160 void
g_orphan_provider(struct g_provider * pp,int error)161 g_orphan_provider(struct g_provider *pp, int error)
162 {
163 
164 	/* G_VALID_PROVIDER(pp)  We likely lack topology lock */
165 	g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
166 	    pp, pp->name, error);
167 	KASSERT(error != 0,
168 	    ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
169 	     pp, pp->name));
170 
171 	pp->error = error;
172 	mtx_lock(&g_eventlock);
173 	KASSERT(!(pp->flags & G_PF_ORPHAN),
174 	    ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
175 	pp->flags |= G_PF_ORPHAN;
176 	TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
177 	mtx_unlock(&g_eventlock);
178 	wakeup(&g_wait_event);
179 }
180 
181 /*
182  * This function is called once on each provider which the event handler
183  * finds on its g_doorstep.
184  */
185 
186 static void
g_orphan_register(struct g_provider * pp)187 g_orphan_register(struct g_provider *pp)
188 {
189 	struct g_consumer *cp, *cp2;
190 	int wf;
191 
192 	g_topology_assert();
193 	G_VALID_PROVIDER(pp);
194 	g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
195 
196 	g_cancel_event(pp);
197 
198 	wf = pp->flags & G_PF_WITHER;
199 	pp->flags &= ~G_PF_WITHER;
200 
201 	/*
202 	 * Tell all consumers the bad news.
203 	 * Don't be surprised if they self-destruct.
204 	 */
205 	LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
206 		KASSERT(cp->geom->orphan != NULL,
207 		    ("geom %s has no orphan, class %s",
208 		    cp->geom->name, cp->geom->class->name));
209 		/*
210 		 * XXX: g_dev_orphan method does deferred destroying
211 		 * and it is possible, that other event could already
212 		 * call the orphan method. Check consumer's flags to
213 		 * do not schedule it twice.
214 		 */
215 		if (cp->flags & G_CF_ORPHAN)
216 			continue;
217 		cp->flags |= G_CF_ORPHAN;
218 		cp->geom->orphan(cp);
219 	}
220 	if (LIST_EMPTY(&pp->consumers) && wf)
221 		g_destroy_provider(pp);
222 	else
223 		pp->flags |= wf;
224 #ifdef notyet
225 	cp = LIST_FIRST(&pp->consumers);
226 	if (cp != NULL)
227 		return;
228 	if (pp->geom->flags & G_GEOM_WITHER)
229 		g_destroy_provider(pp);
230 #endif
231 }
232 
233 static int
one_event(void)234 one_event(void)
235 {
236 	struct g_event *ep;
237 	struct g_provider *pp;
238 
239 	g_topology_assert();
240 	mtx_lock(&g_eventlock);
241 	pp = TAILQ_FIRST(&g_doorstep);
242 	if (pp != NULL) {
243 		G_VALID_PROVIDER(pp);
244 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
245 		mtx_unlock(&g_eventlock);
246 		g_orphan_register(pp);
247 		return (1);
248 	}
249 
250 	ep = TAILQ_FIRST(&g_events);
251 	if (ep == NULL) {
252 		wakeup(&g_pending_events);
253 		return (0);
254 	}
255 	ep->flag |= EV_INPROGRESS;
256 	mtx_unlock(&g_eventlock);
257 	g_topology_assert();
258 	ep->func(ep->arg, 0);
259 	g_topology_assert();
260 	mtx_lock(&g_eventlock);
261 	TSRELEASE("GEOM events");
262 	TAILQ_REMOVE(&g_events, ep, events);
263 	ep->flag &= ~EV_INPROGRESS;
264 	if (ep->flag & EV_WAKEUP) {
265 		ep->flag |= EV_DONE;
266 		wakeup(ep);
267 		mtx_unlock(&g_eventlock);
268 	} else {
269 		mtx_unlock(&g_eventlock);
270 		g_free(ep);
271 	}
272 	return (1);
273 }
274 
275 void
g_run_events(void)276 g_run_events(void)
277 {
278 
279 	for (;;) {
280 		g_topology_lock();
281 		while (one_event())
282 			;
283 		mtx_assert(&g_eventlock, MA_OWNED);
284 		if (g_wither_work) {
285 			g_wither_work = 0;
286 			mtx_unlock(&g_eventlock);
287 			g_wither_washer();
288 			g_topology_unlock();
289 		} else {
290 			g_topology_unlock();
291 			msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP,
292 			    "-", 0);
293 		}
294 	}
295 	/* NOTREACHED */
296 }
297 
298 void
g_cancel_event(void * ref)299 g_cancel_event(void *ref)
300 {
301 	struct g_event *ep, *epn;
302 	struct g_provider *pp;
303 	u_int n;
304 
305 	mtx_lock(&g_eventlock);
306 	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
307 		if (pp != ref)
308 			continue;
309 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
310 		break;
311 	}
312 	TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) {
313 		if (ep->flag & EV_INPROGRESS)
314 			continue;
315 		for (n = 0; n < G_N_EVENTREFS; n++) {
316 			if (ep->ref[n] == NULL)
317 				break;
318 			if (ep->ref[n] != ref)
319 				continue;
320 			TSRELEASE("GEOM events");
321 			TAILQ_REMOVE(&g_events, ep, events);
322 			ep->func(ep->arg, EV_CANCEL);
323 			mtx_assert(&g_eventlock, MA_OWNED);
324 			if (ep->flag & EV_WAKEUP) {
325 				ep->flag |= (EV_DONE|EV_CANCELED);
326 				wakeup(ep);
327 			} else {
328 				g_free(ep);
329 			}
330 			break;
331 		}
332 	}
333 	if (TAILQ_EMPTY(&g_events))
334 		wakeup(&g_pending_events);
335 	mtx_unlock(&g_eventlock);
336 }
337 
338 struct g_event *
g_alloc_event(int flag)339 g_alloc_event(int flag)
340 {
341 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
342 	    ("Wrong flag to g_alloc_event"));
343 
344 	return (g_malloc(sizeof(struct g_event), flag | M_ZERO));
345 }
346 
347 static void
g_post_event_ep_va(g_event_t * func,void * arg,int wuflag,struct g_event * ep,va_list ap)348 g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
349     struct g_event *ep, va_list ap)
350 {
351 	void *p;
352 	u_int n;
353 
354 	ep->flag = wuflag;
355 	for (n = 0; n < G_N_EVENTREFS; n++) {
356 		p = va_arg(ap, void *);
357 		if (p == NULL)
358 			break;
359 		g_trace(G_T_TOPOLOGY, "  ref %p", p);
360 		ep->ref[n] = p;
361 	}
362 	KASSERT(p == NULL, ("Too many references to event"));
363 	ep->func = func;
364 	ep->arg = arg;
365 	mtx_lock(&g_eventlock);
366 	TSHOLD("GEOM events");
367 	TAILQ_INSERT_TAIL(&g_events, ep, events);
368 	mtx_unlock(&g_eventlock);
369 	wakeup(&g_wait_event);
370 	curthread->td_pflags |= TDP_GEOM;
371 	ast_sched(curthread, TDA_GEOM);
372 }
373 
374 void
g_post_event_ep(g_event_t * func,void * arg,struct g_event * ep,...)375 g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...)
376 {
377 	va_list ap;
378 
379 	va_start(ap, ep);
380 	g_post_event_ep_va(func, arg, 0, ep, ap);
381 	va_end(ap);
382 }
383 
384 
385 static int
g_post_event_x(g_event_t * func,void * arg,int flag,int wuflag,struct g_event ** epp,va_list ap)386 g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
387 {
388 	struct g_event *ep;
389 
390 	g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
391 	    func, arg, flag, wuflag);
392 	KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
393 	    ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
394 	ep = g_alloc_event(flag);
395 	if (ep == NULL)
396 		return (ENOMEM);
397 	if (epp != NULL)
398 		*epp = ep;
399 	g_post_event_ep_va(func, arg, wuflag, ep, ap);
400 	return (0);
401 }
402 
403 int
g_post_event(g_event_t * func,void * arg,int flag,...)404 g_post_event(g_event_t *func, void *arg, int flag, ...)
405 {
406 	va_list ap;
407 	int i;
408 
409 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
410 	    ("Wrong flag to g_post_event"));
411 	va_start(ap, flag);
412 	i = g_post_event_x(func, arg, flag, 0, NULL, ap);
413 	va_end(ap);
414 	return (i);
415 }
416 
417 void
g_do_wither(void)418 g_do_wither(void)
419 {
420 
421 	mtx_lock(&g_eventlock);
422 	g_wither_work = 1;
423 	mtx_unlock(&g_eventlock);
424 	wakeup(&g_wait_event);
425 }
426 
427 /*
428  * XXX: It might actually be useful to call this function with topology held.
429  * XXX: This would ensure that the event gets created before anything else
430  * XXX: changes.  At present all users have a handle on things in some other
431  * XXX: way, so this remains an XXX for now.
432  */
433 
434 int
g_waitfor_event(g_event_t * func,void * arg,int flag,...)435 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
436 {
437 	va_list ap;
438 	struct g_event *ep;
439 	int error;
440 
441 	g_topology_assert_not();
442 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
443 	    ("Wrong flag to g_post_event"));
444 	va_start(ap, flag);
445 	error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
446 	va_end(ap);
447 	if (error)
448 		return (error);
449 
450 	mtx_lock(&g_eventlock);
451 	while (!(ep->flag & EV_DONE))
452 		msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", 0);
453 	if (ep->flag & EV_CANCELED)
454 		error = EAGAIN;
455 	mtx_unlock(&g_eventlock);
456 
457 	g_free(ep);
458 	return (error);
459 }
460 
461 void
g_event_init(void)462 g_event_init(void)
463 {
464 
465 	mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
466 }
467