1 /*
2    Unix SMB/CIFS implementation.
3    main select loop and event handling
4    Copyright (C) Andrew Tridgell 2003
5    Copyright (C) Stefan Metzmacher 2009
6 
7      ** NOTE! The following LGPL license applies to the tevent
8      ** library. This does NOT imply that all of Samba is released
9      ** under the LGPL
10 
11    This library is free software; you can redistribute it and/or
12    modify it under the terms of the GNU Lesser General Public
13    License as published by the Free Software Foundation; either
14    version 3 of the License, or (at your option) any later version.
15 
16    This library is distributed in the hope that it will be useful,
17    but WITHOUT ANY WARRANTY; without even the implied warranty of
18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19    Lesser General Public License for more details.
20 
21    You should have received a copy of the GNU Lesser General Public
22    License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 
25 /*
26   PLEASE READ THIS BEFORE MODIFYING!
27 
28   This module is a general abstraction for the main select loop and
29   event handling. Do not ever put any localised hacks in here, instead
30   register one of the possible event types and implement that event
31   somewhere else.
32 
33   There are 2 types of event handling that are handled in this module:
34 
35   1) a file descriptor becoming readable or writeable. This is mostly
36      used for network sockets, but can be used for any type of file
37      descriptor. You may only register one handler for each file
38      descriptor/io combination or you will get unpredictable results
39      (this means that you can have a handler for read events, and a
40      separate handler for write events, but not two handlers that are
41      both handling read events)
42 
43   2) a timed event. You can register an event that happens at a
44      specific time.  You can register as many of these as you
45      like. They are single shot - add a new timed event in the event
46      handler to get another event.
47 
48   To setup a set of events you first need to create a event_context
49   structure using the function tevent_context_init(); This returns a
50   'struct tevent_context' that you use in all subsequent calls.
51 
52   After that you can add/remove events that you are interested in
53   using tevent_add_*() and talloc_free()
54 
55   Finally, you call tevent_loop_wait_once() to block waiting for one of the
56   events to occor or tevent_loop_wait() which will loop
57   forever.
58 
59 */
60 #include "replace.h"
61 #include "system/filesys.h"
62 #ifdef HAVE_PTHREAD
63 #include "system/threads.h"
64 #endif
65 #define TEVENT_DEPRECATED 1
66 #include "tevent.h"
67 #include "tevent_internal.h"
68 #include "tevent_util.h"
69 #ifdef HAVE_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
72 
73 struct tevent_ops_list {
74 	struct tevent_ops_list *next, *prev;
75 	const char *name;
76 	const struct tevent_ops *ops;
77 };
78 
79 /* list of registered event backends */
80 static struct tevent_ops_list *tevent_backends = NULL;
81 static char *tevent_default_backend = NULL;
82 
83 /*
84   register an events backend
85 */
tevent_register_backend(const char * name,const struct tevent_ops * ops)86 bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
87 {
88 	struct tevent_ops_list *e;
89 
90 	for (e = tevent_backends; e != NULL; e = e->next) {
91 		if (0 == strcmp(e->name, name)) {
92 			/* already registered, skip it */
93 			return true;
94 		}
95 	}
96 
97 	e = talloc(NULL, struct tevent_ops_list);
98 	if (e == NULL) return false;
99 
100 	e->name = name;
101 	e->ops = ops;
102 	DLIST_ADD(tevent_backends, e);
103 
104 	return true;
105 }
106 
107 /*
108   set the default event backend
109  */
tevent_set_default_backend(const char * backend)110 void tevent_set_default_backend(const char *backend)
111 {
112 	talloc_free(tevent_default_backend);
113 	tevent_default_backend = talloc_strdup(NULL, backend);
114 }
115 
116 /*
117   initialise backends if not already done
118 */
tevent_backend_init(void)119 static void tevent_backend_init(void)
120 {
121 	static bool done;
122 
123 	if (done) {
124 		return;
125 	}
126 
127 	done = true;
128 
129 	tevent_poll_init();
130 	tevent_poll_mt_init();
131 #if defined(HAVE_EPOLL)
132 	tevent_epoll_init();
133 #elif defined(HAVE_SOLARIS_PORTS)
134 	tevent_port_init();
135 #endif
136 
137 	tevent_standard_init();
138 }
139 
tevent_find_ops_byname(const char * name)140 _PRIVATE_ const struct tevent_ops *tevent_find_ops_byname(const char *name)
141 {
142 	struct tevent_ops_list *e;
143 
144 	tevent_backend_init();
145 
146 	if (name == NULL) {
147 		name = tevent_default_backend;
148 	}
149 	if (name == NULL) {
150 		name = "standard";
151 	}
152 
153 	for (e = tevent_backends; e != NULL; e = e->next) {
154 		if (0 == strcmp(e->name, name)) {
155 			return e->ops;
156 		}
157 	}
158 
159 	return NULL;
160 }
161 
162 /*
163   list available backends
164 */
tevent_backend_list(TALLOC_CTX * mem_ctx)165 const char **tevent_backend_list(TALLOC_CTX *mem_ctx)
166 {
167 	const char **list = NULL;
168 	struct tevent_ops_list *e;
169 
170 	tevent_backend_init();
171 
172 	for (e=tevent_backends;e;e=e->next) {
173 		list = ev_str_list_add(list, e->name);
174 	}
175 
176 	talloc_steal(mem_ctx, list);
177 
178 	return list;
179 }
180 
181 static void tevent_common_wakeup_fini(struct tevent_context *ev);
182 
183 #ifdef HAVE_PTHREAD
184 
185 static pthread_mutex_t tevent_contexts_mutex = PTHREAD_MUTEX_INITIALIZER;
186 static struct tevent_context *tevent_contexts = NULL;
187 static pthread_once_t tevent_atfork_initialized = PTHREAD_ONCE_INIT;
188 
tevent_atfork_prepare(void)189 static void tevent_atfork_prepare(void)
190 {
191 	struct tevent_context *ev;
192 	int ret;
193 
194 	ret = pthread_mutex_lock(&tevent_contexts_mutex);
195 	if (ret != 0) {
196 		abort();
197 	}
198 
199 	for (ev = tevent_contexts; ev != NULL; ev = ev->next) {
200 		struct tevent_threaded_context *tctx;
201 
202 		for (tctx = ev->threaded_contexts; tctx != NULL;
203 		     tctx = tctx->next) {
204 			ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
205 			if (ret != 0) {
206 				tevent_abort(ev, "pthread_mutex_lock failed");
207 			}
208 		}
209 
210 		ret = pthread_mutex_lock(&ev->scheduled_mutex);
211 		if (ret != 0) {
212 			tevent_abort(ev, "pthread_mutex_lock failed");
213 		}
214 	}
215 }
216 
tevent_atfork_parent(void)217 static void tevent_atfork_parent(void)
218 {
219 	struct tevent_context *ev;
220 	int ret;
221 
222 	for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
223 	     ev = DLIST_PREV(ev)) {
224 		struct tevent_threaded_context *tctx;
225 
226 		ret = pthread_mutex_unlock(&ev->scheduled_mutex);
227 		if (ret != 0) {
228 			tevent_abort(ev, "pthread_mutex_unlock failed");
229 		}
230 
231 		for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
232 		     tctx = DLIST_PREV(tctx)) {
233 			ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
234 			if (ret != 0) {
235 				tevent_abort(
236 					ev, "pthread_mutex_unlock failed");
237 			}
238 		}
239 	}
240 
241 	ret = pthread_mutex_unlock(&tevent_contexts_mutex);
242 	if (ret != 0) {
243 		abort();
244 	}
245 }
246 
tevent_atfork_child(void)247 static void tevent_atfork_child(void)
248 {
249 	struct tevent_context *ev;
250 	int ret;
251 
252 	for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
253 	     ev = DLIST_PREV(ev)) {
254 		struct tevent_threaded_context *tctx;
255 
256 		for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
257 		     tctx = DLIST_PREV(tctx)) {
258 			tctx->event_ctx = NULL;
259 
260 			ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
261 			if (ret != 0) {
262 				tevent_abort(
263 					ev, "pthread_mutex_unlock failed");
264 			}
265 		}
266 
267 		ev->threaded_contexts = NULL;
268 
269 		ret = pthread_mutex_unlock(&ev->scheduled_mutex);
270 		if (ret != 0) {
271 			tevent_abort(ev, "pthread_mutex_unlock failed");
272 		}
273 	}
274 
275 	ret = pthread_mutex_unlock(&tevent_contexts_mutex);
276 	if (ret != 0) {
277 		abort();
278 	}
279 }
280 
tevent_prep_atfork(void)281 static void tevent_prep_atfork(void)
282 {
283 	int ret;
284 
285 	ret = pthread_atfork(tevent_atfork_prepare,
286 			     tevent_atfork_parent,
287 			     tevent_atfork_child);
288 	if (ret != 0) {
289 		abort();
290 	}
291 }
292 
293 #endif
294 
tevent_common_context_destructor(struct tevent_context * ev)295 int tevent_common_context_destructor(struct tevent_context *ev)
296 {
297 	struct tevent_fd *fd, *fn;
298 	struct tevent_timer *te, *tn;
299 	struct tevent_immediate *ie, *in;
300 	struct tevent_signal *se, *sn;
301 	struct tevent_wrapper_glue *gl, *gn;
302 #ifdef HAVE_PTHREAD
303 	int ret;
304 #endif
305 
306 	if (ev->wrapper.glue != NULL) {
307 		tevent_abort(ev,
308 			"tevent_common_context_destructor() active on wrapper");
309 	}
310 
311 #ifdef HAVE_PTHREAD
312 	ret = pthread_mutex_lock(&tevent_contexts_mutex);
313 	if (ret != 0) {
314 		abort();
315 	}
316 
317 	DLIST_REMOVE(tevent_contexts, ev);
318 
319 	ret = pthread_mutex_unlock(&tevent_contexts_mutex);
320 	if (ret != 0) {
321 		abort();
322 	}
323 
324 	while (ev->threaded_contexts != NULL) {
325 		struct tevent_threaded_context *tctx = ev->threaded_contexts;
326 
327 		ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
328 		if (ret != 0) {
329 			abort();
330 		}
331 
332 		/*
333 		 * Indicate to the thread that the tevent_context is
334 		 * gone. The counterpart of this is in
335 		 * _tevent_threaded_schedule_immediate, there we read
336 		 * this under the threaded_context's mutex.
337 		 */
338 
339 		tctx->event_ctx = NULL;
340 
341 		ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
342 		if (ret != 0) {
343 			abort();
344 		}
345 
346 		DLIST_REMOVE(ev->threaded_contexts, tctx);
347 	}
348 
349 	ret = pthread_mutex_destroy(&ev->scheduled_mutex);
350 	if (ret != 0) {
351 		abort();
352 	}
353 #endif
354 
355 	for (gl = ev->wrapper.list; gl; gl = gn) {
356 		gn = gl->next;
357 
358 		gl->main_ev = NULL;
359 		DLIST_REMOVE(ev->wrapper.list, gl);
360 	}
361 
362 	tevent_common_wakeup_fini(ev);
363 
364 	for (fd = ev->fd_events; fd; fd = fn) {
365 		fn = fd->next;
366 		fd->wrapper = NULL;
367 		fd->event_ctx = NULL;
368 		DLIST_REMOVE(ev->fd_events, fd);
369 	}
370 
371 	ev->last_zero_timer = NULL;
372 	for (te = ev->timer_events; te; te = tn) {
373 		tn = te->next;
374 		te->wrapper = NULL;
375 		te->event_ctx = NULL;
376 		DLIST_REMOVE(ev->timer_events, te);
377 	}
378 
379 	for (ie = ev->immediate_events; ie; ie = in) {
380 		in = ie->next;
381 		ie->wrapper = NULL;
382 		ie->event_ctx = NULL;
383 		ie->cancel_fn = NULL;
384 		DLIST_REMOVE(ev->immediate_events, ie);
385 	}
386 
387 	for (se = ev->signal_events; se; se = sn) {
388 		sn = se->next;
389 		se->wrapper = NULL;
390 		se->event_ctx = NULL;
391 		DLIST_REMOVE(ev->signal_events, se);
392 		/*
393 		 * This is important, Otherwise signals
394 		 * are handled twice in child. eg, SIGHUP.
395 		 * one added in parent, and another one in
396 		 * the child. -- BoYang
397 		 */
398 		tevent_cleanup_pending_signal_handlers(se);
399 	}
400 
401 	/* removing nesting hook or we get an abort when nesting is
402 	 * not allowed. -- SSS
403 	 * Note that we need to leave the allowed flag at its current
404 	 * value, otherwise the use in tevent_re_initialise() will
405 	 * leave the event context with allowed forced to false, which
406 	 * will break users that expect nesting to be allowed
407 	 */
408 	ev->nesting.level = 0;
409 	ev->nesting.hook_fn = NULL;
410 	ev->nesting.hook_private = NULL;
411 
412 	return 0;
413 }
414 
tevent_common_context_constructor(struct tevent_context * ev)415 static int tevent_common_context_constructor(struct tevent_context *ev)
416 {
417 	int ret;
418 
419 #ifdef HAVE_PTHREAD
420 
421 	ret = pthread_once(&tevent_atfork_initialized, tevent_prep_atfork);
422 	if (ret != 0) {
423 		return ret;
424 	}
425 
426 	ret = pthread_mutex_init(&ev->scheduled_mutex, NULL);
427 	if (ret != 0) {
428 		return ret;
429 	}
430 
431 	ret = pthread_mutex_lock(&tevent_contexts_mutex);
432 	if (ret != 0) {
433 		pthread_mutex_destroy(&ev->scheduled_mutex);
434 		return ret;
435 	}
436 
437 	DLIST_ADD(tevent_contexts, ev);
438 
439 	ret = pthread_mutex_unlock(&tevent_contexts_mutex);
440 	if (ret != 0) {
441 		abort();
442 	}
443 #endif
444 
445 	talloc_set_destructor(ev, tevent_common_context_destructor);
446 
447 	return 0;
448 }
449 
tevent_common_check_double_free(TALLOC_CTX * ptr,const char * reason)450 void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason)
451 {
452 	void *parent_ptr = talloc_parent(ptr);
453 	size_t parent_blocks = talloc_total_blocks(parent_ptr);
454 
455 	if (parent_ptr != NULL && parent_blocks == 0) {
456 		/*
457 		 * This is an implicit talloc free, as we still have a parent
458 		 * but it's already being destroyed. Note that
459 		 * talloc_total_blocks(ptr) also just returns 0 if a
460 		 * talloc_free(ptr) is still in progress of freeing all
461 		 * children.
462 		 */
463 		return;
464 	}
465 
466 	tevent_abort(NULL, reason);
467 }
468 
469 /*
470   create a event_context structure for a specific implemementation.
471   This must be the first events call, and all subsequent calls pass
472   this event_context as the first element. Event handlers also
473   receive this as their first argument.
474 
475   This function is for allowing third-party-applications to hook in gluecode
476   to their own event loop code, so that they can make async usage of our client libs
477 
478   NOTE: use tevent_context_init() inside of samba!
479 */
tevent_context_init_ops(TALLOC_CTX * mem_ctx,const struct tevent_ops * ops,void * additional_data)480 struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx,
481 					       const struct tevent_ops *ops,
482 					       void *additional_data)
483 {
484 	struct tevent_context *ev;
485 	int ret;
486 
487 	ev = talloc_zero(mem_ctx, struct tevent_context);
488 	if (!ev) return NULL;
489 
490 	ret = tevent_common_context_constructor(ev);
491 	if (ret != 0) {
492 		talloc_free(ev);
493 		return NULL;
494 	}
495 
496 	ev->ops = ops;
497 	ev->additional_data = additional_data;
498 
499 	ret = ev->ops->context_init(ev);
500 	if (ret != 0) {
501 		talloc_free(ev);
502 		return NULL;
503 	}
504 
505 	return ev;
506 }
507 
508 /*
509   create a event_context structure. This must be the first events
510   call, and all subsequent calls pass this event_context as the first
511   element. Event handlers also receive this as their first argument.
512 */
tevent_context_init_byname(TALLOC_CTX * mem_ctx,const char * name)513 struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx,
514 						  const char *name)
515 {
516 	const struct tevent_ops *ops;
517 
518 	ops = tevent_find_ops_byname(name);
519 	if (ops == NULL) {
520 		return NULL;
521 	}
522 
523 	return tevent_context_init_ops(mem_ctx, ops, NULL);
524 }
525 
526 
527 /*
528   create a event_context structure. This must be the first events
529   call, and all subsequent calls pass this event_context as the first
530   element. Event handlers also receive this as their first argument.
531 */
tevent_context_init(TALLOC_CTX * mem_ctx)532 struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx)
533 {
534 	return tevent_context_init_byname(mem_ctx, NULL);
535 }
536 
537 /*
538   add a fd based event
539   return NULL on failure (memory allocation error)
540 */
_tevent_add_fd(struct tevent_context * ev,TALLOC_CTX * mem_ctx,int fd,uint16_t flags,tevent_fd_handler_t handler,void * private_data,const char * handler_name,const char * location)541 struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
542 				 TALLOC_CTX *mem_ctx,
543 				 int fd,
544 				 uint16_t flags,
545 				 tevent_fd_handler_t handler,
546 				 void *private_data,
547 				 const char *handler_name,
548 				 const char *location)
549 {
550 	return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data,
551 			       handler_name, location);
552 }
553 
554 /*
555   set a close function on the fd event
556 */
tevent_fd_set_close_fn(struct tevent_fd * fde,tevent_fd_close_fn_t close_fn)557 void tevent_fd_set_close_fn(struct tevent_fd *fde,
558 			    tevent_fd_close_fn_t close_fn)
559 {
560 	if (!fde) return;
561 	if (!fde->event_ctx) return;
562 	fde->event_ctx->ops->set_fd_close_fn(fde, close_fn);
563 }
564 
tevent_fd_auto_close_fn(struct tevent_context * ev,struct tevent_fd * fde,int fd,void * private_data)565 static void tevent_fd_auto_close_fn(struct tevent_context *ev,
566 				    struct tevent_fd *fde,
567 				    int fd,
568 				    void *private_data)
569 {
570 	close(fd);
571 }
572 
tevent_fd_set_auto_close(struct tevent_fd * fde)573 void tevent_fd_set_auto_close(struct tevent_fd *fde)
574 {
575 	tevent_fd_set_close_fn(fde, tevent_fd_auto_close_fn);
576 }
577 
578 /*
579   return the fd event flags
580 */
tevent_fd_get_flags(struct tevent_fd * fde)581 uint16_t tevent_fd_get_flags(struct tevent_fd *fde)
582 {
583 	if (!fde) return 0;
584 	if (!fde->event_ctx) return 0;
585 	return fde->event_ctx->ops->get_fd_flags(fde);
586 }
587 
588 /*
589   set the fd event flags
590 */
tevent_fd_set_flags(struct tevent_fd * fde,uint16_t flags)591 void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags)
592 {
593 	if (!fde) return;
594 	if (!fde->event_ctx) return;
595 	fde->event_ctx->ops->set_fd_flags(fde, flags);
596 }
597 
tevent_signal_support(struct tevent_context * ev)598 bool tevent_signal_support(struct tevent_context *ev)
599 {
600 	if (ev->ops->add_signal) {
601 		return true;
602 	}
603 	return false;
604 }
605 
606 static void (*tevent_abort_fn)(const char *reason);
607 
tevent_set_abort_fn(void (* abort_fn)(const char * reason))608 void tevent_set_abort_fn(void (*abort_fn)(const char *reason))
609 {
610 	tevent_abort_fn = abort_fn;
611 }
612 
tevent_abort(struct tevent_context * ev,const char * reason)613 void tevent_abort(struct tevent_context *ev, const char *reason)
614 {
615 	if (ev != NULL) {
616 		tevent_debug(ev, TEVENT_DEBUG_FATAL,
617 			     "abort: %s\n", reason);
618 	}
619 
620 	if (!tevent_abort_fn) {
621 		abort();
622 	}
623 
624 	tevent_abort_fn(reason);
625 }
626 
627 /*
628   add a timer event
629   return NULL on failure
630 */
_tevent_add_timer(struct tevent_context * ev,TALLOC_CTX * mem_ctx,struct timeval next_event,tevent_timer_handler_t handler,void * private_data,const char * handler_name,const char * location)631 struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
632 				       TALLOC_CTX *mem_ctx,
633 				       struct timeval next_event,
634 				       tevent_timer_handler_t handler,
635 				       void *private_data,
636 				       const char *handler_name,
637 				       const char *location)
638 {
639 	return ev->ops->add_timer(ev, mem_ctx, next_event, handler, private_data,
640 				  handler_name, location);
641 }
642 
643 /*
644   allocate an immediate event
645   return NULL on failure (memory allocation error)
646 */
_tevent_create_immediate(TALLOC_CTX * mem_ctx,const char * location)647 struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
648 						  const char *location)
649 {
650 	struct tevent_immediate *im;
651 
652 	im = talloc(mem_ctx, struct tevent_immediate);
653 	if (im == NULL) return NULL;
654 
655 	*im = (struct tevent_immediate) { .create_location = location };
656 
657 	return im;
658 }
659 
660 /*
661   schedule an immediate event
662 */
_tevent_schedule_immediate(struct tevent_immediate * im,struct tevent_context * ev,tevent_immediate_handler_t handler,void * private_data,const char * handler_name,const char * location)663 void _tevent_schedule_immediate(struct tevent_immediate *im,
664 				struct tevent_context *ev,
665 				tevent_immediate_handler_t handler,
666 				void *private_data,
667 				const char *handler_name,
668 				const char *location)
669 {
670 	ev->ops->schedule_immediate(im, ev, handler, private_data,
671 				    handler_name, location);
672 }
673 
674 /*
675   add a signal event
676 
677   sa_flags are flags to sigaction(2)
678 
679   return NULL on failure
680 */
_tevent_add_signal(struct tevent_context * ev,TALLOC_CTX * mem_ctx,int signum,int sa_flags,tevent_signal_handler_t handler,void * private_data,const char * handler_name,const char * location)681 struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
682 					 TALLOC_CTX *mem_ctx,
683 					 int signum,
684 					 int sa_flags,
685 					 tevent_signal_handler_t handler,
686 					 void *private_data,
687 					 const char *handler_name,
688 					 const char *location)
689 {
690 	return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data,
691 				   handler_name, location);
692 }
693 
tevent_loop_allow_nesting(struct tevent_context * ev)694 void tevent_loop_allow_nesting(struct tevent_context *ev)
695 {
696 	if (ev->wrapper.glue != NULL) {
697 		tevent_abort(ev, "tevent_loop_allow_nesting() on wrapper");
698 		return;
699 	}
700 
701 	if (ev->wrapper.list != NULL) {
702 		tevent_abort(ev, "tevent_loop_allow_nesting() with wrapper");
703 		return;
704 	}
705 
706 	ev->nesting.allowed = true;
707 }
708 
tevent_loop_set_nesting_hook(struct tevent_context * ev,tevent_nesting_hook hook,void * private_data)709 void tevent_loop_set_nesting_hook(struct tevent_context *ev,
710 				  tevent_nesting_hook hook,
711 				  void *private_data)
712 {
713 	if (ev->nesting.hook_fn &&
714 	    (ev->nesting.hook_fn != hook ||
715 	     ev->nesting.hook_private != private_data)) {
716 		/* the way the nesting hook code is currently written
717 		   we cannot support two different nesting hooks at the
718 		   same time. */
719 		tevent_abort(ev, "tevent: Violation of nesting hook rules\n");
720 	}
721 	ev->nesting.hook_fn = hook;
722 	ev->nesting.hook_private = private_data;
723 }
724 
tevent_abort_nesting(struct tevent_context * ev,const char * location)725 static void tevent_abort_nesting(struct tevent_context *ev, const char *location)
726 {
727 	const char *reason;
728 
729 	reason = talloc_asprintf(NULL, "tevent_loop_once() nesting at %s",
730 				 location);
731 	if (!reason) {
732 		reason = "tevent_loop_once() nesting";
733 	}
734 
735 	tevent_abort(ev, reason);
736 }
737 
738 /*
739   do a single event loop using the events defined in ev
740 */
_tevent_loop_once(struct tevent_context * ev,const char * location)741 int _tevent_loop_once(struct tevent_context *ev, const char *location)
742 {
743 	int ret;
744 	void *nesting_stack_ptr = NULL;
745 
746 	ev->nesting.level++;
747 
748 	if (ev->nesting.level > 1) {
749 		if (!ev->nesting.allowed) {
750 			tevent_abort_nesting(ev, location);
751 			errno = ELOOP;
752 			return -1;
753 		}
754 	}
755 	if (ev->nesting.level > 0) {
756 		if (ev->nesting.hook_fn) {
757 			int ret2;
758 			ret2 = ev->nesting.hook_fn(ev,
759 						   ev->nesting.hook_private,
760 						   ev->nesting.level,
761 						   true,
762 						   (void *)&nesting_stack_ptr,
763 						   location);
764 			if (ret2 != 0) {
765 				ret = ret2;
766 				goto done;
767 			}
768 		}
769 	}
770 
771 	tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
772 	ret = ev->ops->loop_once(ev, location);
773 	tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
774 
775 	if (ev->nesting.level > 0) {
776 		if (ev->nesting.hook_fn) {
777 			int ret2;
778 			ret2 = ev->nesting.hook_fn(ev,
779 						   ev->nesting.hook_private,
780 						   ev->nesting.level,
781 						   false,
782 						   (void *)&nesting_stack_ptr,
783 						   location);
784 			if (ret2 != 0) {
785 				ret = ret2;
786 				goto done;
787 			}
788 		}
789 	}
790 
791 done:
792 	ev->nesting.level--;
793 	return ret;
794 }
795 
796 /*
797   this is a performance optimization for the samba4 nested event loop problems
798 */
_tevent_loop_until(struct tevent_context * ev,bool (* finished)(void * private_data),void * private_data,const char * location)799 int _tevent_loop_until(struct tevent_context *ev,
800 		       bool (*finished)(void *private_data),
801 		       void *private_data,
802 		       const char *location)
803 {
804 	int ret = 0;
805 	void *nesting_stack_ptr = NULL;
806 
807 	ev->nesting.level++;
808 
809 	if (ev->nesting.level > 1) {
810 		if (!ev->nesting.allowed) {
811 			tevent_abort_nesting(ev, location);
812 			errno = ELOOP;
813 			return -1;
814 		}
815 	}
816 	if (ev->nesting.level > 0) {
817 		if (ev->nesting.hook_fn) {
818 			int ret2;
819 			ret2 = ev->nesting.hook_fn(ev,
820 						   ev->nesting.hook_private,
821 						   ev->nesting.level,
822 						   true,
823 						   (void *)&nesting_stack_ptr,
824 						   location);
825 			if (ret2 != 0) {
826 				ret = ret2;
827 				goto done;
828 			}
829 		}
830 	}
831 
832 	while (!finished(private_data)) {
833 		tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
834 		ret = ev->ops->loop_once(ev, location);
835 		tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
836 		if (ret != 0) {
837 			break;
838 		}
839 	}
840 
841 	if (ev->nesting.level > 0) {
842 		if (ev->nesting.hook_fn) {
843 			int ret2;
844 			ret2 = ev->nesting.hook_fn(ev,
845 						   ev->nesting.hook_private,
846 						   ev->nesting.level,
847 						   false,
848 						   (void *)&nesting_stack_ptr,
849 						   location);
850 			if (ret2 != 0) {
851 				ret = ret2;
852 				goto done;
853 			}
854 		}
855 	}
856 
857 done:
858 	ev->nesting.level--;
859 	return ret;
860 }
861 
tevent_common_have_events(struct tevent_context * ev)862 bool tevent_common_have_events(struct tevent_context *ev)
863 {
864 	if (ev->fd_events != NULL) {
865 		if (ev->fd_events != ev->wakeup_fde) {
866 			return true;
867 		}
868 		if (ev->fd_events->next != NULL) {
869 			return true;
870 		}
871 
872 		/*
873 		 * At this point we just have the wakeup pipe event as
874 		 * the only fd_event. That one does not count as a
875 		 * regular event, so look at the other event types.
876 		 */
877 	}
878 
879 	return ((ev->timer_events != NULL) ||
880 		(ev->immediate_events != NULL) ||
881 		(ev->signal_events != NULL));
882 }
883 
884 /*
885   return on failure or (with 0) if all fd events are removed
886 */
tevent_common_loop_wait(struct tevent_context * ev,const char * location)887 int tevent_common_loop_wait(struct tevent_context *ev,
888 			    const char *location)
889 {
890 	/*
891 	 * loop as long as we have events pending
892 	 */
893 	while (tevent_common_have_events(ev)) {
894 		int ret;
895 		ret = _tevent_loop_once(ev, location);
896 		if (ret != 0) {
897 			tevent_debug(ev, TEVENT_DEBUG_FATAL,
898 				     "_tevent_loop_once() failed: %d - %s\n",
899 				     ret, strerror(errno));
900 			return ret;
901 		}
902 	}
903 
904 	tevent_debug(ev, TEVENT_DEBUG_WARNING,
905 		     "tevent_common_loop_wait() out of events\n");
906 	return 0;
907 }
908 
909 /*
910   return on failure or (with 0) if all fd events are removed
911 */
_tevent_loop_wait(struct tevent_context * ev,const char * location)912 int _tevent_loop_wait(struct tevent_context *ev, const char *location)
913 {
914 	return ev->ops->loop_wait(ev, location);
915 }
916 
917 
918 /*
919   re-initialise a tevent context. This leaves you with the same
920   event context, but all events are wiped and the structure is
921   re-initialised. This is most useful after a fork()
922 
923   zero is returned on success, non-zero on failure
924 */
tevent_re_initialise(struct tevent_context * ev)925 int tevent_re_initialise(struct tevent_context *ev)
926 {
927 	tevent_common_context_destructor(ev);
928 
929 	tevent_common_context_constructor(ev);
930 
931 	return ev->ops->context_init(ev);
932 }
933 
wakeup_pipe_handler(struct tevent_context * ev,struct tevent_fd * fde,uint16_t flags,void * _private)934 static void wakeup_pipe_handler(struct tevent_context *ev,
935 				struct tevent_fd *fde,
936 				uint16_t flags, void *_private)
937 {
938 	ssize_t ret;
939 
940 	do {
941 		/*
942 		 * This is the boilerplate for eventfd, but it works
943 		 * for pipes too. And as we don't care about the data
944 		 * we read, we're fine.
945 		 */
946 		uint64_t val;
947 		ret = read(fde->fd, &val, sizeof(val));
948 	} while (ret == -1 && errno == EINTR);
949 }
950 
951 /*
952  * Initialize the wakeup pipe and pipe fde
953  */
954 
tevent_common_wakeup_init(struct tevent_context * ev)955 int tevent_common_wakeup_init(struct tevent_context *ev)
956 {
957 	int ret, read_fd;
958 
959 	if (ev->wakeup_fde != NULL) {
960 		return 0;
961 	}
962 
963 #ifdef HAVE_EVENTFD
964 	ret = eventfd(0, EFD_NONBLOCK);
965 	if (ret == -1) {
966 		return errno;
967 	}
968 	read_fd = ev->wakeup_fd = ret;
969 #else
970 	{
971 		int pipe_fds[2];
972 		ret = pipe(pipe_fds);
973 		if (ret == -1) {
974 			return errno;
975 		}
976 		ev->wakeup_fd = pipe_fds[1];
977 		ev->wakeup_read_fd = pipe_fds[0];
978 
979 		ev_set_blocking(ev->wakeup_fd, false);
980 		ev_set_blocking(ev->wakeup_read_fd, false);
981 
982 		read_fd = ev->wakeup_read_fd;
983 	}
984 #endif
985 
986 	ev->wakeup_fde = tevent_add_fd(ev, ev, read_fd, TEVENT_FD_READ,
987 				     wakeup_pipe_handler, NULL);
988 	if (ev->wakeup_fde == NULL) {
989 		close(ev->wakeup_fd);
990 #ifndef HAVE_EVENTFD
991 		close(ev->wakeup_read_fd);
992 #endif
993 		return ENOMEM;
994 	}
995 
996 	return 0;
997 }
998 
tevent_common_wakeup_fd(int fd)999 int tevent_common_wakeup_fd(int fd)
1000 {
1001 	ssize_t ret;
1002 
1003 	do {
1004 #ifdef HAVE_EVENTFD
1005 		uint64_t val = 1;
1006 		ret = write(fd, &val, sizeof(val));
1007 #else
1008 		char c = '\0';
1009 		ret = write(fd, &c, 1);
1010 #endif
1011 	} while ((ret == -1) && (errno == EINTR));
1012 
1013 	return 0;
1014 }
1015 
tevent_common_wakeup(struct tevent_context * ev)1016 int tevent_common_wakeup(struct tevent_context *ev)
1017 {
1018 	if (ev->wakeup_fde == NULL) {
1019 		return ENOTCONN;
1020 	}
1021 
1022 	return tevent_common_wakeup_fd(ev->wakeup_fd);
1023 }
1024 
tevent_common_wakeup_fini(struct tevent_context * ev)1025 static void tevent_common_wakeup_fini(struct tevent_context *ev)
1026 {
1027 	if (ev->wakeup_fde == NULL) {
1028 		return;
1029 	}
1030 
1031 	TALLOC_FREE(ev->wakeup_fde);
1032 
1033 	close(ev->wakeup_fd);
1034 #ifndef HAVE_EVENTFD
1035 	close(ev->wakeup_read_fd);
1036 #endif
1037 }
1038