1 /*	$NetBSD: event-internal.h,v 1.5 2020/05/25 20:47:33 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef EVENT_INTERNAL_H_INCLUDED_
30 #define EVENT_INTERNAL_H_INCLUDED_
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 #include "event2/event-config.h"
37 #include "evconfig-private.h"
38 
39 #include <time.h>
40 #include <sys/queue.h>
41 #include "event2/event_struct.h"
42 #include "minheap-internal.h"
43 #include "evsignal-internal.h"
44 #include "mm-internal.h"
45 #include "defer-internal.h"
46 
47 /* map union members back */
48 
49 /* mutually exclusive */
50 #define ev_signal_next	ev_.ev_signal.ev_signal_next
51 #define ev_io_next	ev_.ev_io.ev_io_next
52 #define ev_io_timeout	ev_.ev_io.ev_timeout
53 
54 /* used only by signals */
55 #define ev_ncalls	ev_.ev_signal.ev_ncalls
56 #define ev_pncalls	ev_.ev_signal.ev_pncalls
57 
58 #define ev_pri ev_evcallback.evcb_pri
59 #define ev_flags ev_evcallback.evcb_flags
60 #define ev_closure ev_evcallback.evcb_closure
61 #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
62 #define ev_arg ev_evcallback.evcb_arg
63 
64 /** @name Event closure codes
65 
66     Possible values for evcb_closure in struct event_callback
67 
68     @{
69  */
70 /** A regular event. Uses the evcb_callback callback */
71 #define EV_CLOSURE_EVENT 0
72 /** A signal event. Uses the evcb_callback callback */
73 #define EV_CLOSURE_EVENT_SIGNAL 1
74 /** A persistent non-signal event. Uses the evcb_callback callback */
75 #define EV_CLOSURE_EVENT_PERSIST 2
76 /** A simple callback. Uses the evcb_selfcb callback. */
77 #define EV_CLOSURE_CB_SELF 3
78 /** A finalizing callback. Uses the evcb_cbfinalize callback. */
79 #define EV_CLOSURE_CB_FINALIZE 4
80 /** A finalizing event. Uses the evcb_evfinalize callback. */
81 #define EV_CLOSURE_EVENT_FINALIZE 5
82 /** A finalizing event that should get freed after. Uses the evcb_evfinalize
83  * callback. */
84 #define EV_CLOSURE_EVENT_FINALIZE_FREE 6
85 /** @} */
86 
87 /** Structure to define the backend of a given event_base. */
88 struct eventop {
89 	/** The name of this backend. */
90 	const char *name;
91 	/** Function to set up an event_base to use this backend.  It should
92 	 * create a new structure holding whatever information is needed to
93 	 * run the backend, and return it.  The returned pointer will get
94 	 * stored by event_init into the event_base.evbase field.  On failure,
95 	 * this function should return NULL. */
96 	void *(*init)(struct event_base *);
97 	/** Enable reading/writing on a given fd or signal.  'events' will be
98 	 * the events that we're trying to enable: one or more of EV_READ,
99 	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
100 	 * were enabled on this fd previously.  'fdinfo' will be a structure
101 	 * associated with the fd by the evmap; its size is defined by the
102 	 * fdinfo field below.  It will be set to 0 the first time the fd is
103 	 * added.  The function should return 0 on success and -1 on error.
104 	 */
105 	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
106 	/** As "add", except 'events' contains the events we mean to disable. */
107 	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
108 	/** Function to implement the core of an event loop.  It must see which
109 	    added events are ready, and cause event_active to be called for each
110 	    active event (usually via event_io_active or such).  It should
111 	    return 0 on success and -1 on error.
112 	 */
113 	int (*dispatch)(struct event_base *, struct timeval *);
114 	/** Function to clean up and free our data from the event_base. */
115 	void (*dealloc)(struct event_base *);
116 	/** Flag: set if we need to reinitialize the event base after we fork.
117 	 */
118 	int need_reinit;
119 	/** Bit-array of supported event_method_features that this backend can
120 	 * provide. */
121 	enum event_method_feature features;
122 	/** Length of the extra information we should record for each fd that
123 	    has one or more active events.  This information is recorded
124 	    as part of the evmap entry for each fd, and passed as an argument
125 	    to the add and del functions above.
126 	 */
127 	size_t fdinfo_len;
128 };
129 
130 #ifdef _WIN32
131 /* If we're on win32, then file descriptors are not nice low densely packed
132    integers.  Instead, they are pointer-like windows handles, and we want to
133    use a hashtable instead of an array to map fds to events.
134 */
135 #define EVMAP_USE_HT
136 #endif
137 
138 /* #define HT_CACHE_HASH_VALS */
139 
140 #ifdef EVMAP_USE_HT
141 #define HT_NO_CACHE_HASH_VALUES
142 #include "ht-internal.h"
143 struct event_map_entry;
144 HT_HEAD(event_io_map, event_map_entry);
145 #else
146 #define event_io_map event_signal_map
147 #endif
148 
149 /* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
150    defined, this structure is also used as event_io_map, which maps fds to a
151    list of events.
152 */
153 struct event_signal_map {
154 	/* An array of evmap_io * or of evmap_signal *; empty entries are
155 	 * set to NULL. */
156 	void **entries;
157 	/* The number of entries available in entries */
158 	int nentries;
159 };
160 
161 /* A list of events waiting on a given 'common' timeout value.  Ordinarily,
162  * events waiting for a timeout wait on a minheap.  Sometimes, however, a
163  * queue can be faster.
164  **/
165 struct common_timeout_list {
166 	/* List of events currently waiting in the queue. */
167 	struct event_list events;
168 	/* 'magic' timeval used to indicate the duration of events in this
169 	 * queue. */
170 	struct timeval duration;
171 	/* Event that triggers whenever one of the events in the queue is
172 	 * ready to activate */
173 	struct event timeout_event;
174 	/* The event_base that this timeout list is part of */
175 	struct event_base *base;
176 };
177 
178 /** Mask used to get the real tv_usec value from a common timeout. */
179 #define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
180 
181 struct event_change;
182 
183 /* List of 'changes' since the last call to eventop.dispatch.  Only maintained
184  * if the backend is using changesets. */
185 struct event_changelist {
186 	struct event_change *changes;
187 	int n_changes;
188 	int changes_size;
189 };
190 
191 #ifndef EVENT__DISABLE_DEBUG_MODE
192 /* Global internal flag: set to one if debug mode is on. */
193 extern int event_debug_mode_on_;
194 #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
195 #else
196 #define EVENT_DEBUG_MODE_IS_ON() (0)
197 #endif
198 
199 TAILQ_HEAD(evcallback_list, event_callback);
200 
201 /* Sets up an event for processing once */
202 struct event_once {
203 	LIST_ENTRY(event_once) next_once;
204 	struct event ev;
205 
206 	void (*cb)(evutil_socket_t, short, void *);
207 	void *arg;
208 };
209 
210 struct event_base {
211 	/** Function pointers and other data to describe this event_base's
212 	 * backend. */
213 	const struct eventop *evsel;
214 	/** Pointer to backend-specific data. */
215 	void *evbase;
216 
217 	/** List of changes to tell backend about at next dispatch.  Only used
218 	 * by the O(1) backends. */
219 	struct event_changelist changelist;
220 
221 	/** Function pointers used to describe the backend that this event_base
222 	 * uses for signals */
223 	const struct eventop *evsigsel;
224 	/** Data to implement the common signal handelr code. */
225 	struct evsig_info sig;
226 
227 	/** Number of virtual events */
228 	int virtual_event_count;
229 	/** Maximum number of virtual events active */
230 	int virtual_event_count_max;
231 	/** Number of total events added to this event_base */
232 	int event_count;
233 	/** Maximum number of total events added to this event_base */
234 	int event_count_max;
235 	/** Number of total events active in this event_base */
236 	int event_count_active;
237 	/** Maximum number of total events active in this event_base */
238 	int event_count_active_max;
239 
240 	/** Set if we should terminate the loop once we're done processing
241 	 * events. */
242 	int event_gotterm;
243 	/** Set if we should terminate the loop immediately */
244 	int event_break;
245 	/** Set if we should start a new instance of the loop immediately. */
246 	int event_continue;
247 
248 	/** The currently running priority of events */
249 	int event_running_priority;
250 
251 	/** Set if we're running the event_base_loop function, to prevent
252 	 * reentrant invocation. */
253 	int running_loop;
254 
255 	/** Set to the number of deferred_cbs we've made 'active' in the
256 	 * loop.  This is a hack to prevent starvation; it would be smarter
257 	 * to just use event_config_set_max_dispatch_interval's max_callbacks
258 	 * feature */
259 	int n_deferreds_queued;
260 
261 	/* Active event management. */
262 	/** An array of nactivequeues queues for active event_callbacks (ones
263 	 * that have triggered, and whose callbacks need to be called).  Low
264 	 * priority numbers are more important, and stall higher ones.
265 	 */
266 	struct evcallback_list *activequeues;
267 	/** The length of the activequeues array */
268 	int nactivequeues;
269 	/** A list of event_callbacks that should become active the next time
270 	 * we process events, but not this time. */
271 	struct evcallback_list active_later_queue;
272 
273 	/* common timeout logic */
274 
275 	/** An array of common_timeout_list* for all of the common timeout
276 	 * values we know. */
277 	struct common_timeout_list **common_timeout_queues;
278 	/** The number of entries used in common_timeout_queues */
279 	int n_common_timeouts;
280 	/** The total size of common_timeout_queues. */
281 	int n_common_timeouts_allocated;
282 
283 	/** Mapping from file descriptors to enabled (added) events */
284 	struct event_io_map io;
285 
286 	/** Mapping from signal numbers to enabled (added) events. */
287 	struct event_signal_map sigmap;
288 
289 	/** Priority queue of events with timeouts. */
290 	struct min_heap timeheap;
291 
292 	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
293 	 * too often. */
294 	struct timeval tv_cache;
295 
296 	struct evutil_monotonic_timer monotonic_timer;
297 
298 	/** Difference between internal time (maybe from clock_gettime) and
299 	 * gettimeofday. */
300 	struct timeval tv_clock_diff;
301 	/** Second in which we last updated tv_clock_diff, in monotonic time. */
302 	time_t last_updated_clock_diff;
303 
304 #ifndef EVENT__DISABLE_THREAD_SUPPORT
305 	/* threading support */
306 	/** The thread currently running the event_loop for this base */
307 	unsigned long th_owner_id;
308 	/** A lock to prevent conflicting accesses to this event_base */
309 	void *th_base_lock;
310 	/** A condition that gets signalled when we're done processing an
311 	 * event with waiters on it. */
312 	void *current_event_cond;
313 	/** Number of threads blocking on current_event_cond. */
314 	int current_event_waiters;
315 #endif
316 	/** The event whose callback is executing right now */
317 	struct event_callback *current_event;
318 
319 #ifdef _WIN32
320 	/** IOCP support structure, if IOCP is enabled. */
321 	struct event_iocp_port *iocp;
322 #endif
323 
324 	/** Flags that this base was configured with */
325 	enum event_base_config_flag flags;
326 
327 	struct timeval max_dispatch_time;
328 	int max_dispatch_callbacks;
329 	int limit_callbacks_after_prio;
330 
331 	/* Notify main thread to wake up break, etc. */
332 	/** True if the base already has a pending notify, and we don't need
333 	 * to add any more. */
334 	int is_notify_pending;
335 	/** A socketpair used by some th_notify functions to wake up the main
336 	 * thread. */
337 	evutil_socket_t th_notify_fd[2];
338 	/** An event used by some th_notify functions to wake up the main
339 	 * thread. */
340 	struct event th_notify;
341 	/** A function used to wake up the main thread from another thread. */
342 	int (*th_notify_fn)(struct event_base *base);
343 
344 	/** Saved seed for weak random number generator. Some backends use
345 	 * this to produce fairness among sockets. Protected by th_base_lock. */
346 	struct evutil_weakrand_state weakrand_seed;
347 
348 	/** List of event_onces that have not yet fired. */
349 	LIST_HEAD(once_event_list, event_once) once_events;
350 
351 };
352 
353 struct event_config_entry {
354 	TAILQ_ENTRY(event_config_entry) next;
355 
356 	const char *avoid_method;
357 };
358 
359 /** Internal structure: describes the configuration we want for an event_base
360  * that we're about to allocate. */
361 struct event_config {
362 	TAILQ_HEAD(event_configq, event_config_entry) entries;
363 
364 	int n_cpus_hint;
365 	struct timeval max_dispatch_interval;
366 	int max_dispatch_callbacks;
367 	int limit_callbacks_after_prio;
368 	enum event_method_feature require_features;
369 	enum event_base_config_flag flags;
370 };
371 
372 /* Internal use only: Functions that might be missing from <sys/queue.h> */
373 #if defined(EVENT__HAVE_SYS_QUEUE_H) && !defined(EVENT__HAVE_TAILQFOREACH)
374 #ifndef TAILQ_FIRST
375 #define	TAILQ_FIRST(head)		((head)->tqh_first)
376 #endif
377 #ifndef TAILQ_END
378 #define	TAILQ_END(head)			NULL
379 #endif
380 #ifndef TAILQ_NEXT
381 #define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
382 #endif
383 
384 #ifndef TAILQ_FOREACH
385 #define TAILQ_FOREACH(var, head, field)					\
386 	for ((var) = TAILQ_FIRST(head);					\
387 	     (var) != TAILQ_END(head);					\
388 	     (var) = TAILQ_NEXT(var, field))
389 #endif
390 
391 #ifndef TAILQ_INSERT_BEFORE
392 #define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
393 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
394 	(elm)->field.tqe_next = (listelm);				\
395 	*(listelm)->field.tqe_prev = (elm);				\
396 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
397 } while (0)
398 #endif
399 #endif /* TAILQ_FOREACH */
400 
401 #define N_ACTIVE_CALLBACKS(base)					\
402 	((base)->event_count_active)
403 
404 int evsig_set_handler_(struct event_base *base, int evsignal,
405 			  void (*fn)(int));
406 int evsig_restore_handler_(struct event_base *base, int evsignal);
407 
408 int event_add_nolock_(struct event *ev,
409     const struct timeval *tv, int tv_is_absolute);
410 /** Argument for event_del_nolock_. Tells event_del not to block on the event
411  * if it's running in another thread. */
412 #define EVENT_DEL_NOBLOCK 0
413 /** Argument for event_del_nolock_. Tells event_del to block on the event
414  * if it's running in another thread, regardless of its value for EV_FINALIZE
415  */
416 #define EVENT_DEL_BLOCK 1
417 /** Argument for event_del_nolock_. Tells event_del to block on the event
418  * if it is running in another thread and it doesn't have EV_FINALIZE set.
419  */
420 #define EVENT_DEL_AUTOBLOCK 2
421 /** Argument for event_del_nolock_. Tells event_del to procede even if the
422  * event is set up for finalization rather for regular use.*/
423 #define EVENT_DEL_EVEN_IF_FINALIZING 3
424 int event_del_nolock_(struct event *ev, int blocking);
425 int event_remove_timer_nolock_(struct event *ev);
426 
427 void event_active_nolock_(struct event *ev, int res, short count);
428 int event_callback_activate_(struct event_base *, struct event_callback *);
429 int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
430 int event_callback_cancel_(struct event_base *base,
431     struct event_callback *evcb);
432 
433 void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
434 void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
435 int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
436 
437 
438 void event_active_later_(struct event *ev, int res);
439 void event_active_later_nolock_(struct event *ev, int res);
440 void event_callback_activate_later_nolock_(struct event_base *base,
441     struct event_callback *evcb);
442 int event_callback_cancel_nolock_(struct event_base *base,
443     struct event_callback *evcb, int even_if_finalizing);
444 void event_callback_init_(struct event_base *base,
445     struct event_callback *cb);
446 
447 /* FIXME document. */
448 void event_base_add_virtual_(struct event_base *base);
449 void event_base_del_virtual_(struct event_base *base);
450 
451 /** For debugging: unless assertions are disabled, verify the referential
452     integrity of the internal data structures of 'base'.  This operation can
453     be expensive.
454 
455     Returns on success; aborts on failure.
456 */
457 void event_base_assert_ok_(struct event_base *base);
458 void event_base_assert_ok_nolock_(struct event_base *base);
459 
460 
461 /* Helper function: Call 'fn' exactly once every inserted or active event in
462  * the event_base 'base'.
463  *
464  * If fn returns 0, continue on to the next event. Otherwise, return the same
465  * value that fn returned.
466  *
467  * Requires that 'base' be locked.
468  */
469 int event_base_foreach_event_nolock_(struct event_base *base,
470     event_base_foreach_event_cb cb, void *arg);
471 
472 #ifdef __cplusplus
473 }
474 #endif
475 
476 #endif /* EVENT_INTERNAL_H_INCLUDED_ */
477