1 /*++
2 /* NAME
3 /* events 3
4 /* SUMMARY
5 /* event manager
6 /* SYNOPSIS
7 /* #include <events.h>
8 /*
9 /* time_t event_time()
10 /*
11 /* void event_loop(delay)
12 /* int delay;
13 /*
14 /* time_t event_request_timer(callback, context, delay)
15 /* void (*callback)(int event, void *context);
16 /* void *context;
17 /* int delay;
18 /*
19 /* int event_cancel_timer(callback, context)
20 /* void (*callback)(int event, void *context);
21 /* void *context;
22 /*
23 /* void event_enable_read(fd, callback, context)
24 /* int fd;
25 /* void (*callback)(int event, void *context);
26 /* void *context;
27 /*
28 /* void event_enable_write(fd, callback, context)
29 /* int fd;
30 /* void (*callback)(int event, void *context);
31 /* void *context;
32 /*
33 /* void event_disable_readwrite(fd)
34 /* int fd;
35 /*
36 /* void event_drain(time_limit)
37 /* int time_limit;
38 /*
39 /* void event_fork(void)
40 /* DESCRIPTION
41 /* This module delivers I/O and timer events.
42 /* Multiple I/O streams and timers can be monitored simultaneously.
43 /* Events are delivered via callback routines provided by the
44 /* application. When requesting an event, the application can provide
45 /* private context that is passed back when the callback routine is
46 /* executed.
47 /*
48 /* event_time() returns a cached value of the current time.
49 /*
50 /* event_loop() monitors all I/O channels for which the application has
51 /* expressed interest, and monitors the timer request queue.
52 /* It notifies the application whenever events of interest happen.
53 /* A negative delay value causes the function to pause until something
54 /* happens; a positive delay value causes event_loop() to return when
55 /* the next event happens or when the delay time in seconds is over,
56 /* whatever happens first. A zero delay effectuates a poll.
57 /*
58 /* Note: in order to avoid race conditions, event_loop() cannot
59 /* not be called recursively.
60 /*
61 /* event_request_timer() causes the specified callback function to
62 /* be called with the specified context argument after \fIdelay\fR
63 /* seconds, or as soon as possible thereafter. The delay should
64 /* not be negative (the manifest EVENT_NULL_DELAY provides for
65 /* convenient zero-delay notification).
66 /* The event argument is equal to EVENT_TIME.
67 /* Only one timer request can be active per (callback, context) pair.
68 /* Calling event_request_timer() with an existing (callback, context)
69 /* pair does not schedule a new event, but updates the time of event
70 /* delivery. The result is the absolute time at which the timer is
71 /* scheduled to go off.
72 /*
73 /* event_cancel_timer() cancels the specified (callback, context) request.
74 /* The application is allowed to cancel non-existing requests. The result
75 /* value is the amount of time left before the timer would have gone off,
76 /* or -1 in case of no pending timer.
77 /*
78 /* event_enable_read() (event_enable_write()) enables read (write) events
79 /* on the named I/O channel. It is up to the application to assemble
80 /* partial reads or writes.
81 /* An I/O channel cannot handle more than one request at the
82 /* same time. The application is allowed to enable an event that
83 /* is already enabled (same channel, same read or write operation,
84 /* but perhaps a different callback or context). On systems with
85 /* kernel-based event filters this is preferred usage, because
86 /* each disable and enable request would cost a system call.
87 /*
88 /* The manifest constants EVENT_NULL_CONTEXT and EVENT_NULL_TYPE
89 /* provide convenient null values.
90 /*
91 /* The callback routine has the following arguments:
92 /* .IP fd
93 /* The stream on which the event happened.
94 /* .IP event
95 /* An indication of the event type:
96 /* .RS
97 /* .IP EVENT_READ
98 /* read event,
99 /* .IP EVENT_WRITE
100 /* write event,
101 /* .IP EVENT_XCPT
102 /* exception (actually, any event other than read or write).
103 /* .RE
104 /* .IP context
105 /* Application context given to event_enable_read() (event_enable_write()).
106 /* .PP
107 /* event_disable_readwrite() disables further I/O events on the specified
108 /* I/O channel. The application is allowed to cancel non-existing
109 /* I/O event requests.
110 /*
111 /* event_drain() repeatedly calls event_loop() until no more timer
112 /* events or I/O events are pending or until the time limit is reached.
113 /* This routine must not be called from an event_whatever() callback
114 /* routine. Note: this function assumes that no new I/O events
115 /* will be registered.
116 /*
117 /* event_fork() must be called by a child process after it is
118 /* created with fork(), to re-initialize event processing.
119 /* DIAGNOSTICS
120 /* Panics: interface violations. Fatal errors: out of memory,
121 /* system call failure. Warnings: the number of available
122 /* file descriptors is much less than FD_SETSIZE.
123 /* BUGS
124 /* This module is based on event selection. It assumes that the
125 /* event_loop() routine is called frequently. This approach is
126 /* not suitable for applications with compute-bound loops that
127 /* take a significant amount of time.
128 /* LICENSE
129 /* .ad
130 /* .fi
131 /* The Secure Mailer license must be distributed with this software.
132 /* AUTHOR(S)
133 /* Wietse Venema
134 /* IBM T.J. Watson Research
135 /* P.O. Box 704
136 /* Yorktown Heights, NY 10598, USA
137 /*--*/
138
139 /* System libraries. */
140
141 #include "sys_defs.h"
142 #include <sys/time.h> /* XXX: 44BSD uses bzero() */
143 #include <time.h>
144 #include <errno.h>
145 #include <unistd.h>
146 #include <stddef.h> /* offsetof() */
147 #include <string.h> /* bzero() prototype for 44BSD */
148 #include <limits.h> /* INT_MAX */
149
150 #ifdef USE_SYS_SELECT_H
151 #include <sys/select.h>
152 #endif
153
154 /* Application-specific. */
155
156 #include "mymalloc.h"
157 #include "msg.h"
158 #include "iostuff.h"
159 #include "ring.h"
160 #include "events.h"
161
162 #if !defined(EVENTS_STYLE)
163 #error "must define EVENTS_STYLE"
164 #endif
165
166 /*
167 * Traditional BSD-style select(2). Works everywhere, but has a built-in
168 * upper bound on the number of file descriptors, and that limit is hard to
169 * change on Linux. Is sometimes emulated with SYSV-style poll(2) which
170 * doesn't have the file descriptor limit, but unfortunately does not help
171 * to improve the performance of servers with lots of connections.
172 */
173 #define EVENT_ALLOC_INCR 10
174
175 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
176 typedef fd_set EVENT_MASK;
177
178 #define EVENT_MASK_BYTE_COUNT(mask) sizeof(*(mask))
179 #define EVENT_MASK_ZERO(mask) FD_ZERO(mask)
180 #define EVENT_MASK_SET(fd, mask) FD_SET((fd), (mask))
181 #define EVENT_MASK_ISSET(fd, mask) FD_ISSET((fd), (mask))
182 #define EVENT_MASK_CLR(fd, mask) FD_CLR((fd), (mask))
183 #define EVENT_MASK_CMP(m1, m2) memcmp((m1), (m2), EVENT_MASK_BYTE_COUNT(m1))
184 #else
185
186 /*
187 * Kernel-based event filters (kqueue, /dev/poll, epoll). We use the
188 * following file descriptor mask structure which is expanded on the fly.
189 */
190 typedef struct {
191 char *data; /* bit mask */
192 size_t data_len; /* data byte count */
193 } EVENT_MASK;
194
195 /* Bits per byte, byte in vector, bit offset in byte, bytes per set. */
196 #define EVENT_MASK_NBBY (8)
197 #define EVENT_MASK_FD_BYTE(fd, mask) \
198 (((unsigned char *) (mask)->data)[(fd) / EVENT_MASK_NBBY])
199 #define EVENT_MASK_FD_BIT(fd) (1 << ((fd) % EVENT_MASK_NBBY))
200 #define EVENT_MASK_BYTES_NEEDED(len) \
201 (((len) + (EVENT_MASK_NBBY -1)) / EVENT_MASK_NBBY)
202 #define EVENT_MASK_BYTE_COUNT(mask) ((mask)->data_len)
203
204 /* Memory management. */
205 #define EVENT_MASK_ALLOC(mask, bit_len) do { \
206 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
207 (mask)->data = mymalloc(_byte_len); \
208 memset((mask)->data, 0, _byte_len); \
209 (mask)->data_len = _byte_len; \
210 } while (0)
211 #define EVENT_MASK_REALLOC(mask, bit_len) do { \
212 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
213 size_t _old_len = (mask)->data_len; \
214 (mask)->data = myrealloc((mask)->data, _byte_len); \
215 if (_byte_len > _old_len) \
216 memset((mask)->data + _old_len, 0, _byte_len - _old_len); \
217 (mask)->data_len = _byte_len; \
218 } while (0)
219 #define EVENT_MASK_FREE(mask) myfree((mask)->data)
220
221 /* Set operations, modeled after FD_ZERO/SET/ISSET/CLR. */
222 #define EVENT_MASK_ZERO(mask) \
223 memset((mask)->data, 0, (mask)->data_len)
224 #define EVENT_MASK_SET(fd, mask) \
225 (EVENT_MASK_FD_BYTE((fd), (mask)) |= EVENT_MASK_FD_BIT(fd))
226 #define EVENT_MASK_ISSET(fd, mask) \
227 (EVENT_MASK_FD_BYTE((fd), (mask)) & EVENT_MASK_FD_BIT(fd))
228 #define EVENT_MASK_CLR(fd, mask) \
229 (EVENT_MASK_FD_BYTE((fd), (mask)) &= ~EVENT_MASK_FD_BIT(fd))
230 #define EVENT_MASK_CMP(m1, m2) \
231 memcmp((m1)->data, (m2)->data, EVENT_MASK_BYTE_COUNT(m1))
232 #endif
233
234 /*
235 * I/O events.
236 */
237 typedef struct EVENT_FDTABLE EVENT_FDTABLE;
238
239 struct EVENT_FDTABLE {
240 EVENT_NOTIFY_RDWR_FN callback;
241 char *context;
242 };
243 static EVENT_MASK event_rmask; /* enabled read events */
244 static EVENT_MASK event_wmask; /* enabled write events */
245 static EVENT_MASK event_xmask; /* for bad news mostly */
246 static int event_fdlimit; /* per-process open file limit */
247 static EVENT_FDTABLE *event_fdtable; /* one slot per file descriptor */
248 static int event_fdslots; /* number of file descriptor slots */
249 static int event_max_fd = -1; /* highest fd number seen */
250
251 /*
252 * FreeBSD kqueue supports no system call to find out what descriptors are
253 * registered in the kernel-based filter. To implement our own sanity checks
254 * we maintain our own descriptor bitmask.
255 *
256 * FreeBSD kqueue does support application context pointers. Unfortunately,
257 * changing that information would cost a system call, and some of the
258 * competitors don't support application context. To keep the implementation
259 * simple we maintain our own table with call-back information.
260 *
261 * FreeBSD kqueue silently unregisters a descriptor from its filter when the
262 * descriptor is closed, so our information could get out of sync with the
263 * kernel. But that will never happen, because we have to meticulously
264 * unregister a file descriptor before it is closed, to avoid errors on
265 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
266 */
267 #if (EVENTS_STYLE == EVENTS_STYLE_KQUEUE)
268 #include <sys/event.h>
269
270 /*
271 * Some early FreeBSD implementations don't have the EV_SET macro.
272 */
273 #ifndef EV_SET
274 #define EV_SET(kp, id, fi, fl, ffl, da, ud) do { \
275 (kp)->ident = (id); \
276 (kp)->filter = (fi); \
277 (kp)->flags = (fl); \
278 (kp)->fflags = (ffl); \
279 (kp)->data = (da); \
280 (kp)->udata = (ud); \
281 } while(0)
282 #endif
283
284 /*
285 * Macros to initialize the kernel-based filter; see event_init().
286 */
287 static int event_kq; /* handle to event filter */
288
289 #define EVENT_REG_INIT_HANDLE(er, n) do { \
290 er = event_kq = kqueue(); \
291 } while (0)
292 #define EVENT_REG_INIT_TEXT "kqueue"
293
294 #define EVENT_REG_FORK_HANDLE(er, n) do { \
295 (void) close(event_kq); \
296 EVENT_REG_INIT_HANDLE(er, (n)); \
297 } while (0)
298
299 /*
300 * Macros to update the kernel-based filter; see event_enable_read(),
301 * event_enable_write() and event_disable_readwrite().
302 */
303 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \
304 struct kevent dummy; \
305 EV_SET(&dummy, (fh), (ev), (op), 0, 0, 0); \
306 (er) = kevent(event_kq, &dummy, 1, 0, 0, 0); \
307 } while (0)
308
309 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_ADD)
310 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_READ)
311 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_WRITE)
312 #define EVENT_REG_ADD_TEXT "kevent EV_ADD"
313
314 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_DELETE)
315 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_READ)
316 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_WRITE)
317 #define EVENT_REG_DEL_TEXT "kevent EV_DELETE"
318
319 /*
320 * Macros to retrieve event buffers from the kernel; see event_loop().
321 */
322 typedef struct kevent EVENT_BUFFER;
323
324 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
325 struct timespec ts; \
326 struct timespec *tsp; \
327 if ((delay) < 0) { \
328 tsp = 0; \
329 } else { \
330 tsp = &ts; \
331 ts.tv_nsec = 0; \
332 ts.tv_sec = (delay); \
333 } \
334 (event_count) = kevent(event_kq, (struct kevent *) 0, 0, (event_buf), \
335 (buflen), (tsp)); \
336 } while (0)
337 #define EVENT_BUFFER_READ_TEXT "kevent"
338
339 /*
340 * Macros to process event buffers from the kernel; see event_loop().
341 */
342 #define EVENT_GET_FD(bp) ((bp)->ident)
343 #define EVENT_GET_TYPE(bp) ((bp)->filter)
344 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) == EVFILT_READ)
345 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) == EVFILT_WRITE)
346
347 #endif
348
349 /*
350 * Solaris /dev/poll does not support application context, so we have to
351 * maintain our own. This has the benefit of avoiding an expensive system
352 * call just to change a call-back function or argument.
353 *
354 * Solaris /dev/poll does have a way to query if a specific descriptor is
355 * registered. However, we maintain a descriptor mask anyway because a) it
356 * avoids having to make an expensive system call to find out if something
357 * is registered, b) some EVENTS_STYLE_MUMBLE implementations need a
358 * descriptor bitmask anyway and c) we use the bitmask already to implement
359 * sanity checks.
360 */
361 #if (EVENTS_STYLE == EVENTS_STYLE_DEVPOLL)
362 #include <sys/devpoll.h>
363 #include <fcntl.h>
364
365 /*
366 * Macros to initialize the kernel-based filter; see event_init().
367 */
368 static int event_pollfd; /* handle to file descriptor set */
369
370 #define EVENT_REG_INIT_HANDLE(er, n) do { \
371 er = event_pollfd = open("/dev/poll", O_RDWR); \
372 if (event_pollfd >= 0) close_on_exec(event_pollfd, CLOSE_ON_EXEC); \
373 } while (0)
374 #define EVENT_REG_INIT_TEXT "open /dev/poll"
375
376 #define EVENT_REG_FORK_HANDLE(er, n) do { \
377 (void) close(event_pollfd); \
378 EVENT_REG_INIT_HANDLE(er, (n)); \
379 } while (0)
380
381 /*
382 * Macros to update the kernel-based filter; see event_enable_read(),
383 * event_enable_write() and event_disable_readwrite().
384 */
385 #define EVENT_REG_FD_OP(er, fh, ev) do { \
386 struct pollfd dummy; \
387 dummy.fd = (fh); \
388 dummy.events = (ev); \
389 (er) = write(event_pollfd, (void *) &dummy, \
390 sizeof(dummy)) != sizeof(dummy) ? -1 : 0; \
391 } while (0)
392
393 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_FD_OP((e), (f), POLLIN)
394 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_FD_OP((e), (f), POLLOUT)
395 #define EVENT_REG_ADD_TEXT "write /dev/poll"
396
397 #define EVENT_REG_DEL_BOTH(e, f) EVENT_REG_FD_OP((e), (f), POLLREMOVE)
398 #define EVENT_REG_DEL_TEXT "write /dev/poll"
399
400 /*
401 * Macros to retrieve event buffers from the kernel; see event_loop().
402 */
403 typedef struct pollfd EVENT_BUFFER;
404
405 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
406 struct dvpoll dvpoll; \
407 dvpoll.dp_fds = (event_buf); \
408 dvpoll.dp_nfds = (buflen); \
409 dvpoll.dp_timeout = (delay) < 0 ? -1 : (delay) * 1000; \
410 (event_count) = ioctl(event_pollfd, DP_POLL, &dvpoll); \
411 } while (0)
412 #define EVENT_BUFFER_READ_TEXT "ioctl DP_POLL"
413
414 /*
415 * Macros to process event buffers from the kernel; see event_loop().
416 */
417 #define EVENT_GET_FD(bp) ((bp)->fd)
418 #define EVENT_GET_TYPE(bp) ((bp)->revents)
419 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & POLLIN)
420 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & POLLOUT)
421
422 #endif
423
424 /*
425 * Linux epoll supports no system call to find out what descriptors are
426 * registered in the kernel-based filter. To implement our own sanity checks
427 * we maintain our own descriptor bitmask.
428 *
429 * Linux epoll does support application context pointers. Unfortunately,
430 * changing that information would cost a system call, and some of the
431 * competitors don't support application context. To keep the implementation
432 * simple we maintain our own table with call-back information.
433 *
434 * Linux epoll silently unregisters a descriptor from its filter when the
435 * descriptor is closed, so our information could get out of sync with the
436 * kernel. But that will never happen, because we have to meticulously
437 * unregister a file descriptor before it is closed, to avoid errors on
438 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
439 */
440 #if (EVENTS_STYLE == EVENTS_STYLE_EPOLL)
441 #include <sys/epoll.h>
442
443 /*
444 * Macros to initialize the kernel-based filter; see event_init().
445 */
446 static int event_epollfd; /* epoll handle */
447
448 #define EVENT_REG_INIT_HANDLE(er, n) do { \
449 er = event_epollfd = epoll_create(n); \
450 if (event_epollfd >= 0) close_on_exec(event_epollfd, CLOSE_ON_EXEC); \
451 } while (0)
452 #define EVENT_REG_INIT_TEXT "epoll_create"
453
454 #define EVENT_REG_FORK_HANDLE(er, n) do { \
455 (void) close(event_epollfd); \
456 EVENT_REG_INIT_HANDLE(er, (n)); \
457 } while (0)
458
459 /*
460 * Macros to update the kernel-based filter; see event_enable_read(),
461 * event_enable_write() and event_disable_readwrite().
462 */
463 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \
464 struct epoll_event dummy; \
465 dummy.events = (ev); \
466 dummy.data.fd = (fh); \
467 (er) = epoll_ctl(event_epollfd, (op), (fh), &dummy); \
468 } while (0)
469
470 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_ADD)
471 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLIN)
472 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLOUT)
473 #define EVENT_REG_ADD_TEXT "epoll_ctl EPOLL_CTL_ADD"
474
475 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_DEL)
476 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLIN)
477 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLOUT)
478 #define EVENT_REG_DEL_TEXT "epoll_ctl EPOLL_CTL_DEL"
479
480 /*
481 * Macros to retrieve event buffers from the kernel; see event_loop().
482 */
483 typedef struct epoll_event EVENT_BUFFER;
484
485 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
486 (event_count) = epoll_wait(event_epollfd, (event_buf), (buflen), \
487 (delay) < 0 ? -1 : (delay) * 1000); \
488 } while (0)
489 #define EVENT_BUFFER_READ_TEXT "epoll_wait"
490
491 /*
492 * Macros to process event buffers from the kernel; see event_loop().
493 */
494 #define EVENT_GET_FD(bp) ((bp)->data.fd)
495 #define EVENT_GET_TYPE(bp) ((bp)->events)
496 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & EPOLLIN)
497 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & EPOLLOUT)
498
499 #endif
500
501 /*
502 * Timer events. Timer requests are kept sorted, in a circular list. We use
503 * the RING abstraction, so we get to use a couple ugly macros.
504 *
505 * When a call-back function adds a timer request, we label the request with
506 * the event_loop() call instance that invoked the call-back. We use this to
507 * prevent zero-delay timer requests from running in a tight loop and
508 * starving I/O events.
509 */
510 typedef struct EVENT_TIMER EVENT_TIMER;
511
512 struct EVENT_TIMER {
513 time_t when; /* when event is wanted */
514 EVENT_NOTIFY_TIME_FN callback; /* callback function */
515 char *context; /* callback context */
516 long loop_instance; /* event_loop() call instance */
517 RING ring; /* linkage */
518 };
519
520 static RING event_timer_head; /* timer queue head */
521 static long event_loop_instance; /* event_loop() call instance */
522
523 #define RING_TO_TIMER(r) \
524 ((EVENT_TIMER *) ((void *) (r) - offsetof(EVENT_TIMER, ring)))
525
526 #define FOREACH_QUEUE_ENTRY(entry, head) \
527 for (entry = ring_succ(head); entry != (head); entry = ring_succ(entry))
528
529 #define FIRST_TIMER(head) \
530 (ring_succ(head) != (head) ? RING_TO_TIMER(ring_succ(head)) : 0)
531
532 /*
533 * Other private data structures.
534 */
535 static time_t event_present; /* cached time of day */
536
537 #define EVENT_INIT_NEEDED() (event_present == 0)
538
539 /* event_init - set up tables and such */
540
event_init(void)541 static void event_init(void)
542 {
543 EVENT_FDTABLE *fdp;
544 int err;
545
546 if (!EVENT_INIT_NEEDED())
547 msg_panic("event_init: repeated call");
548
549 /*
550 * Initialize the file descriptor masks and the call-back table. Where
551 * possible we extend these data structures on the fly. With select(2)
552 * based implementations we can only handle FD_SETSIZE open files.
553 */
554 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
555 if ((event_fdlimit = open_limit(FD_SETSIZE)) < 0)
556 msg_fatal("unable to determine open file limit");
557 #else
558 if ((event_fdlimit = open_limit(INT_MAX)) < 0)
559 msg_fatal("unable to determine open file limit");
560 #endif
561 if (event_fdlimit < FD_SETSIZE / 2 && event_fdlimit < 256)
562 msg_warn("could allocate space for only %d open files", event_fdlimit);
563 event_fdslots = EVENT_ALLOC_INCR;
564 event_fdtable = (EVENT_FDTABLE *)
565 mymalloc(sizeof(EVENT_FDTABLE) * event_fdslots);
566 for (fdp = event_fdtable; fdp < event_fdtable + event_fdslots; fdp++) {
567 fdp->callback = 0;
568 fdp->context = 0;
569 }
570
571 /*
572 * Initialize the I/O event request masks.
573 */
574 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
575 EVENT_MASK_ZERO(&event_rmask);
576 EVENT_MASK_ZERO(&event_wmask);
577 EVENT_MASK_ZERO(&event_xmask);
578 #else
579 EVENT_MASK_ALLOC(&event_rmask, event_fdslots);
580 EVENT_MASK_ALLOC(&event_wmask, event_fdslots);
581 EVENT_MASK_ALLOC(&event_xmask, event_fdslots);
582
583 /*
584 * Initialize the kernel-based filter.
585 */
586 EVENT_REG_INIT_HANDLE(err, event_fdslots);
587 if (err < 0)
588 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
589 #endif
590
591 /*
592 * Initialize timer stuff.
593 */
594 ring_init(&event_timer_head);
595 (void) time(&event_present);
596
597 /*
598 * Avoid an infinite initialization loop.
599 */
600 if (EVENT_INIT_NEEDED())
601 msg_panic("event_init: unable to initialize");
602 }
603
604 /* event_extend - make room for more descriptor slots */
605
event_extend(int fd)606 static void event_extend(int fd)
607 {
608 const char *myname = "event_extend";
609 int old_slots = event_fdslots;
610 int new_slots = (event_fdslots > fd / 2 ?
611 2 * old_slots : fd + EVENT_ALLOC_INCR);
612 EVENT_FDTABLE *fdp;
613
614 #ifdef EVENT_REG_UPD_HANDLE
615 int err;
616
617 #endif
618
619 if (msg_verbose > 2)
620 msg_info("%s: fd %d", myname, fd);
621 event_fdtable = (EVENT_FDTABLE *)
622 myrealloc((void *) event_fdtable, sizeof(EVENT_FDTABLE) * new_slots);
623 event_fdslots = new_slots;
624 for (fdp = event_fdtable + old_slots;
625 fdp < event_fdtable + new_slots; fdp++) {
626 fdp->callback = 0;
627 fdp->context = 0;
628 }
629
630 /*
631 * Initialize the I/O event request masks.
632 */
633 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
634 EVENT_MASK_REALLOC(&event_rmask, new_slots);
635 EVENT_MASK_REALLOC(&event_wmask, new_slots);
636 EVENT_MASK_REALLOC(&event_xmask, new_slots);
637 #endif
638 #ifdef EVENT_REG_UPD_HANDLE
639 EVENT_REG_UPD_HANDLE(err, new_slots);
640 if (err < 0)
641 msg_fatal("%s: %s: %m", myname, EVENT_REG_UPD_TEXT);
642 #endif
643 }
644
645 /* event_time - look up cached time of day */
646
event_time(void)647 time_t event_time(void)
648 {
649 if (EVENT_INIT_NEEDED())
650 event_init();
651
652 return (event_present);
653 }
654
655 /* event_drain - loop until all pending events are done */
656
event_drain(int time_limit)657 void event_drain(int time_limit)
658 {
659 EVENT_MASK zero_mask;
660 time_t max_time;
661
662 if (EVENT_INIT_NEEDED())
663 return;
664
665 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
666 EVENT_MASK_ZERO(&zero_mask);
667 #else
668 EVENT_MASK_ALLOC(&zero_mask, event_fdslots);
669 #endif
670 (void) time(&event_present);
671 max_time = event_present + time_limit;
672 while (event_present < max_time
673 && (event_timer_head.pred != &event_timer_head
674 || EVENT_MASK_CMP(&zero_mask, &event_xmask) != 0)) {
675 event_loop(1);
676 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
677 if (EVENT_MASK_BYTE_COUNT(&zero_mask)
678 != EVENT_MASK_BYTES_NEEDED(event_fdslots))
679 EVENT_MASK_REALLOC(&zero_mask, event_fdslots);
680 #endif
681 }
682 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
683 EVENT_MASK_FREE(&zero_mask);
684 #endif
685 }
686
687 /* event_fork - resume event processing after fork() */
688
event_fork(void)689 void event_fork(void)
690 {
691 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
692 EVENT_FDTABLE *fdp;
693 int err;
694 int fd;
695
696 /*
697 * No event was ever registered, so there's nothing to be done.
698 */
699 if (EVENT_INIT_NEEDED())
700 return;
701
702 /*
703 * Close the existing filter handle and open a new kernel-based filter.
704 */
705 EVENT_REG_FORK_HANDLE(err, event_fdslots);
706 if (err < 0)
707 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
708
709 /*
710 * Populate the new kernel-based filter with events that were registered
711 * in the parent process.
712 */
713 for (fd = 0; fd <= event_max_fd; fd++) {
714 if (EVENT_MASK_ISSET(fd, &event_wmask)) {
715 EVENT_MASK_CLR(fd, &event_wmask);
716 fdp = event_fdtable + fd;
717 event_enable_write(fd, fdp->callback, fdp->context);
718 } else if (EVENT_MASK_ISSET(fd, &event_rmask)) {
719 EVENT_MASK_CLR(fd, &event_rmask);
720 fdp = event_fdtable + fd;
721 event_enable_read(fd, fdp->callback, fdp->context);
722 }
723 }
724 #endif
725 }
726
727 /* event_enable_read - enable read events */
728
event_enable_read(int fd,EVENT_NOTIFY_RDWR_FN callback,void * context)729 void event_enable_read(int fd, EVENT_NOTIFY_RDWR_FN callback, void *context)
730 {
731 const char *myname = "event_enable_read";
732 EVENT_FDTABLE *fdp;
733 int err;
734
735 if (EVENT_INIT_NEEDED())
736 event_init();
737
738 /*
739 * Sanity checks.
740 */
741 if (fd < 0 || fd >= event_fdlimit)
742 msg_panic("%s: bad file descriptor: %d", myname, fd);
743
744 if (msg_verbose > 2)
745 msg_info("%s: fd %d", myname, fd);
746
747 if (fd >= event_fdslots)
748 event_extend(fd);
749
750 /*
751 * Disallow mixed (i.e. read and write) requests on the same descriptor.
752 */
753 if (EVENT_MASK_ISSET(fd, &event_wmask))
754 msg_panic("%s: fd %d: read/write I/O request", myname, fd);
755
756 /*
757 * Postfix 2.4 allows multiple event_enable_read() calls on the same
758 * descriptor without requiring event_disable_readwrite() calls between
759 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
760 * wasteful to make system calls when we change only application
761 * call-back information. It has a noticeable effect on smtp-source
762 * performance.
763 */
764 if (EVENT_MASK_ISSET(fd, &event_rmask) == 0) {
765 EVENT_MASK_SET(fd, &event_xmask);
766 EVENT_MASK_SET(fd, &event_rmask);
767 if (event_max_fd < fd)
768 event_max_fd = fd;
769 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
770 EVENT_REG_ADD_READ(err, fd);
771 if (err < 0)
772 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
773 #endif
774 }
775 fdp = event_fdtable + fd;
776 if (fdp->callback != callback || fdp->context != context) {
777 fdp->callback = callback;
778 fdp->context = context;
779 }
780 }
781
782 /* event_enable_write - enable write events */
783
event_enable_write(int fd,EVENT_NOTIFY_RDWR_FN callback,void * context)784 void event_enable_write(int fd, EVENT_NOTIFY_RDWR_FN callback, void *context)
785 {
786 const char *myname = "event_enable_write";
787 EVENT_FDTABLE *fdp;
788 int err;
789
790 if (EVENT_INIT_NEEDED())
791 event_init();
792
793 /*
794 * Sanity checks.
795 */
796 if (fd < 0 || fd >= event_fdlimit)
797 msg_panic("%s: bad file descriptor: %d", myname, fd);
798
799 if (msg_verbose > 2)
800 msg_info("%s: fd %d", myname, fd);
801
802 if (fd >= event_fdslots)
803 event_extend(fd);
804
805 /*
806 * Disallow mixed (i.e. read and write) requests on the same descriptor.
807 */
808 if (EVENT_MASK_ISSET(fd, &event_rmask))
809 msg_panic("%s: fd %d: read/write I/O request", myname, fd);
810
811 /*
812 * Postfix 2.4 allows multiple event_enable_write() calls on the same
813 * descriptor without requiring event_disable_readwrite() calls between
814 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
815 * incredibly wasteful to make unregister and register system calls when
816 * we change only application call-back information. It has a noticeable
817 * effect on smtp-source performance.
818 */
819 if (EVENT_MASK_ISSET(fd, &event_wmask) == 0) {
820 EVENT_MASK_SET(fd, &event_xmask);
821 EVENT_MASK_SET(fd, &event_wmask);
822 if (event_max_fd < fd)
823 event_max_fd = fd;
824 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
825 EVENT_REG_ADD_WRITE(err, fd);
826 if (err < 0)
827 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
828 #endif
829 }
830 fdp = event_fdtable + fd;
831 if (fdp->callback != callback || fdp->context != context) {
832 fdp->callback = callback;
833 fdp->context = context;
834 }
835 }
836
837 /* event_disable_readwrite - disable request for read or write events */
838
event_disable_readwrite(int fd)839 void event_disable_readwrite(int fd)
840 {
841 const char *myname = "event_disable_readwrite";
842 EVENT_FDTABLE *fdp;
843 int err;
844
845 if (EVENT_INIT_NEEDED())
846 event_init();
847
848 /*
849 * Sanity checks.
850 */
851 if (fd < 0 || fd >= event_fdlimit)
852 msg_panic("%s: bad file descriptor: %d", myname, fd);
853
854 if (msg_verbose > 2)
855 msg_info("%s: fd %d", myname, fd);
856
857 /*
858 * Don't complain when there is nothing to cancel. The request may have
859 * been canceled from another thread.
860 */
861 if (fd >= event_fdslots)
862 return;
863 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
864 #ifdef EVENT_REG_DEL_BOTH
865 /* XXX Can't seem to disable READ and WRITE events selectively. */
866 if (EVENT_MASK_ISSET(fd, &event_rmask)
867 || EVENT_MASK_ISSET(fd, &event_wmask)) {
868 EVENT_REG_DEL_BOTH(err, fd);
869 if (err < 0)
870 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
871 }
872 #else
873 if (EVENT_MASK_ISSET(fd, &event_rmask)) {
874 EVENT_REG_DEL_READ(err, fd);
875 if (err < 0)
876 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
877 } else if (EVENT_MASK_ISSET(fd, &event_wmask)) {
878 EVENT_REG_DEL_WRITE(err, fd);
879 if (err < 0)
880 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
881 }
882 #endif /* EVENT_REG_DEL_BOTH */
883 #endif /* != EVENTS_STYLE_SELECT */
884 EVENT_MASK_CLR(fd, &event_xmask);
885 EVENT_MASK_CLR(fd, &event_rmask);
886 EVENT_MASK_CLR(fd, &event_wmask);
887 fdp = event_fdtable + fd;
888 fdp->callback = 0;
889 fdp->context = 0;
890 }
891
892 /* event_request_timer - (re)set timer */
893
event_request_timer(EVENT_NOTIFY_TIME_FN callback,void * context,int delay)894 time_t event_request_timer(EVENT_NOTIFY_TIME_FN callback, void *context, int delay)
895 {
896 const char *myname = "event_request_timer";
897 RING *ring;
898 EVENT_TIMER *timer;
899
900 if (EVENT_INIT_NEEDED())
901 event_init();
902
903 /*
904 * Sanity checks.
905 */
906 if (delay < 0)
907 msg_panic("%s: invalid delay: %d", myname, delay);
908
909 /*
910 * Make sure we schedule this event at the right time.
911 */
912 time(&event_present);
913
914 /*
915 * See if they are resetting an existing timer request. If so, take the
916 * request away from the timer queue so that it can be inserted at the
917 * right place.
918 */
919 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
920 timer = RING_TO_TIMER(ring);
921 if (timer->callback == callback && timer->context == context) {
922 timer->when = event_present + delay;
923 timer->loop_instance = event_loop_instance;
924 ring_detach(ring);
925 if (msg_verbose > 2)
926 msg_info("%s: reset 0x%lx 0x%lx %d", myname,
927 (long) callback, (long) context, delay);
928 break;
929 }
930 }
931
932 /*
933 * If not found, schedule a new timer request.
934 */
935 if (ring == &event_timer_head) {
936 timer = (EVENT_TIMER *) mymalloc(sizeof(EVENT_TIMER));
937 timer->when = event_present + delay;
938 timer->callback = callback;
939 timer->context = context;
940 timer->loop_instance = event_loop_instance;
941 if (msg_verbose > 2)
942 msg_info("%s: set 0x%lx 0x%lx %d", myname,
943 (long) callback, (long) context, delay);
944 }
945
946 /*
947 * Timer requests are kept sorted to reduce lookup overhead in the event
948 * loop.
949 *
950 * XXX Append the new request after existing requests for the same time
951 * slot. The event_loop() routine depends on this to avoid starving I/O
952 * events when a call-back function schedules a zero-delay timer request.
953 */
954 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
955 if (timer->when < RING_TO_TIMER(ring)->when)
956 break;
957 }
958 ring_prepend(ring, &timer->ring);
959
960 return (timer->when);
961 }
962
963 /* event_cancel_timer - cancel timer */
964
event_cancel_timer(EVENT_NOTIFY_TIME_FN callback,void * context)965 int event_cancel_timer(EVENT_NOTIFY_TIME_FN callback, void *context)
966 {
967 const char *myname = "event_cancel_timer";
968 RING *ring;
969 EVENT_TIMER *timer;
970 int time_left = -1;
971
972 if (EVENT_INIT_NEEDED())
973 event_init();
974
975 /*
976 * See if they are canceling an existing timer request. Do not complain
977 * when the request is not found. It might have been canceled from some
978 * other thread.
979 */
980 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
981 timer = RING_TO_TIMER(ring);
982 if (timer->callback == callback && timer->context == context) {
983 if ((time_left = timer->when - event_present) < 0)
984 time_left = 0;
985 ring_detach(ring);
986 myfree((void *) timer);
987 break;
988 }
989 }
990 if (msg_verbose > 2)
991 msg_info("%s: 0x%lx 0x%lx %d", myname,
992 (long) callback, (long) context, time_left);
993 return (time_left);
994 }
995
996 /* event_loop - wait for the next event */
997
event_loop(int delay)998 void event_loop(int delay)
999 {
1000 const char *myname = "event_loop";
1001 static int nested;
1002
1003 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1004 fd_set rmask;
1005 fd_set wmask;
1006 fd_set xmask;
1007 struct timeval tv;
1008 struct timeval *tvp;
1009 int new_max_fd;
1010
1011 #else
1012 EVENT_BUFFER event_buf[100];
1013 EVENT_BUFFER *bp;
1014
1015 #endif
1016 int event_count;
1017 EVENT_TIMER *timer;
1018 int fd;
1019 EVENT_FDTABLE *fdp;
1020 int select_delay;
1021
1022 if (EVENT_INIT_NEEDED())
1023 event_init();
1024
1025 /*
1026 * XXX Also print the select() masks?
1027 */
1028 if (msg_verbose > 2) {
1029 RING *ring;
1030
1031 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
1032 timer = RING_TO_TIMER(ring);
1033 msg_info("%s: time left %3d for 0x%lx 0x%lx", myname,
1034 (int) (timer->when - event_present),
1035 (long) timer->callback, (long) timer->context);
1036 }
1037 }
1038
1039 /*
1040 * Find out when the next timer would go off. Timer requests are sorted.
1041 * If any timer is scheduled, adjust the delay appropriately.
1042 */
1043 if ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1044 event_present = time((time_t *) 0);
1045 if ((select_delay = timer->when - event_present) < 0) {
1046 select_delay = 0;
1047 } else if (delay >= 0 && select_delay > delay) {
1048 select_delay = delay;
1049 }
1050 } else {
1051 select_delay = delay;
1052 }
1053 if (msg_verbose > 2)
1054 msg_info("event_loop: select_delay %d", select_delay);
1055
1056 /*
1057 * Negative delay means: wait until something happens. Zero delay means:
1058 * poll. Positive delay means: wait at most this long.
1059 */
1060 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1061 if (select_delay < 0) {
1062 tvp = 0;
1063 } else {
1064 tvp = &tv;
1065 tv.tv_usec = 0;
1066 tv.tv_sec = select_delay;
1067 }
1068
1069 /*
1070 * Pause until the next event happens. When select() has a problem, don't
1071 * go into a tight loop. Allow select() to be interrupted due to the
1072 * arrival of a signal.
1073 */
1074 rmask = event_rmask;
1075 wmask = event_wmask;
1076 xmask = event_xmask;
1077
1078 event_count = select(event_max_fd + 1, &rmask, &wmask, &xmask, tvp);
1079 if (event_count < 0) {
1080 if (errno != EINTR)
1081 msg_fatal("event_loop: select: %m");
1082 return;
1083 }
1084 #else
1085 EVENT_BUFFER_READ(event_count, event_buf,
1086 sizeof(event_buf) / sizeof(event_buf[0]),
1087 select_delay);
1088 if (event_count < 0) {
1089 if (errno != EINTR)
1090 msg_fatal("event_loop: " EVENT_BUFFER_READ_TEXT ": %m");
1091 return;
1092 }
1093 #endif
1094
1095 /*
1096 * Before entering the application call-back routines, make sure we
1097 * aren't being called from a call-back routine. Doing so would make us
1098 * vulnerable to all kinds of race conditions.
1099 */
1100 if (nested++ > 0)
1101 msg_panic("event_loop: recursive call");
1102
1103 /*
1104 * Deliver timer events. Allow the application to add/delete timer queue
1105 * requests while it is being called back. Requests are sorted: we keep
1106 * running over the timer request queue from the start, and stop when we
1107 * reach the future or the list end. We also stop when we reach a timer
1108 * request that was added by a call-back that was invoked from this
1109 * event_loop() call instance, for reasons that are explained below.
1110 *
1111 * To avoid dangling pointer problems 1) we must remove a request from the
1112 * timer queue before delivering its event to the application and 2) we
1113 * must look up the next timer request *after* calling the application.
1114 * The latter complicates the handling of zero-delay timer requests that
1115 * are added by event_loop() call-back functions.
1116 *
1117 * XXX When a timer event call-back function adds a new timer request,
1118 * event_request_timer() labels the request with the event_loop() call
1119 * instance that invoked the timer event call-back. We use this instance
1120 * label here to prevent zero-delay timer requests from running in a
1121 * tight loop and starving I/O events. To make this solution work,
1122 * event_request_timer() appends a new request after existing requests
1123 * for the same time slot.
1124 */
1125 event_present = time((time_t *) 0);
1126 event_loop_instance += 1;
1127
1128 while ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1129 if (timer->when > event_present)
1130 break;
1131 if (timer->loop_instance == event_loop_instance)
1132 break;
1133 ring_detach(&timer->ring); /* first this */
1134 if (msg_verbose > 2)
1135 msg_info("%s: timer 0x%lx 0x%lx", myname,
1136 (long) timer->callback, (long) timer->context);
1137 timer->callback(EVENT_TIME, timer->context); /* then this */
1138 myfree((void *) timer);
1139 }
1140
1141 /*
1142 * Deliver I/O events. Allow the application to cancel event requests
1143 * while it is being called back. To this end, we keep an eye on the
1144 * contents of event_xmask, so that we deliver only events that are still
1145 * wanted. We do not change the event request masks. It is up to the
1146 * application to determine when a read or write is complete.
1147 */
1148 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1149 if (event_count > 0) {
1150 for (new_max_fd = 0, fd = 0; fd <= event_max_fd; fd++) {
1151 if (FD_ISSET(fd, &event_xmask)) {
1152 new_max_fd = fd;
1153 /* In case event_fdtable is updated. */
1154 fdp = event_fdtable + fd;
1155 if (FD_ISSET(fd, &xmask)) {
1156 if (msg_verbose > 2)
1157 msg_info("%s: exception fd=%d act=0x%lx 0x%lx", myname,
1158 fd, (long) fdp->callback, (long) fdp->context);
1159 fdp->callback(EVENT_XCPT, fdp->context);
1160 } else if (FD_ISSET(fd, &wmask)) {
1161 if (msg_verbose > 2)
1162 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1163 fd, (long) fdp->callback, (long) fdp->context);
1164 fdp->callback(EVENT_WRITE, fdp->context);
1165 } else if (FD_ISSET(fd, &rmask)) {
1166 if (msg_verbose > 2)
1167 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1168 fd, (long) fdp->callback, (long) fdp->context);
1169 fdp->callback(EVENT_READ, fdp->context);
1170 }
1171 }
1172 }
1173 event_max_fd = new_max_fd;
1174 }
1175 #else
1176 for (bp = event_buf; bp < event_buf + event_count; bp++) {
1177 fd = EVENT_GET_FD(bp);
1178 if (fd < 0 || fd > event_max_fd)
1179 msg_panic("%s: bad file descriptor: %d", myname, fd);
1180 if (EVENT_MASK_ISSET(fd, &event_xmask)) {
1181 fdp = event_fdtable + fd;
1182 if (EVENT_TEST_READ(bp)) {
1183 if (msg_verbose > 2)
1184 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1185 fd, (long) fdp->callback, (long) fdp->context);
1186 fdp->callback(EVENT_READ, fdp->context);
1187 } else if (EVENT_TEST_WRITE(bp)) {
1188 if (msg_verbose > 2)
1189 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1190 fd, (long) fdp->callback,
1191 (long) fdp->context);
1192 fdp->callback(EVENT_WRITE, fdp->context);
1193 } else {
1194 if (msg_verbose > 2)
1195 msg_info("%s: other fd=%d act=0x%lx 0x%lx", myname,
1196 fd, (long) fdp->callback, (long) fdp->context);
1197 fdp->callback(EVENT_XCPT, fdp->context);
1198 }
1199 }
1200 }
1201 #endif
1202 nested--;
1203 }
1204
1205 #ifdef TEST
1206
1207 /*
1208 * Proof-of-concept test program for the event manager. Schedule a series of
1209 * events at one-second intervals and let them happen, while echoing any
1210 * lines read from stdin.
1211 */
1212 #include <stdio.h>
1213 #include <ctype.h>
1214 #include <stdlib.h>
1215
1216 /* timer_event - display event */
1217
timer_event(int unused_event,void * context)1218 static void timer_event(int unused_event, void *context)
1219 {
1220 printf("%ld: %s\n", (long) event_present, context);
1221 fflush(stdout);
1222 }
1223
1224 /* echo - echo text received on stdin */
1225
echo(int unused_event,void * unused_context)1226 static void echo(int unused_event, void *unused_context)
1227 {
1228 char buf[BUFSIZ];
1229
1230 if (fgets(buf, sizeof(buf), stdin) == 0)
1231 exit(0);
1232 printf("Result: %s", buf);
1233 }
1234
1235 /* request - request a bunch of timer events */
1236
request(int unused_event,void * unused_context)1237 static void request(int unused_event, void *unused_context)
1238 {
1239 event_request_timer(timer_event, "3 first", 3);
1240 event_request_timer(timer_event, "3 second", 3);
1241 event_request_timer(timer_event, "4 first", 4);
1242 event_request_timer(timer_event, "4 second", 4);
1243 event_request_timer(timer_event, "2 first", 2);
1244 event_request_timer(timer_event, "2 second", 2);
1245 event_request_timer(timer_event, "1 first", 1);
1246 event_request_timer(timer_event, "1 second", 1);
1247 event_request_timer(timer_event, "0 first", 0);
1248 event_request_timer(timer_event, "0 second", 0);
1249 }
1250
main(int argc,void ** argv)1251 int main(int argc, void **argv)
1252 {
1253 if (argv[1])
1254 msg_verbose = atoi(argv[1]);
1255 event_request_timer(request, (void *) 0, 0);
1256 event_enable_read(fileno(stdin), echo, (void *) 0);
1257 event_drain(10);
1258 exit(0);
1259 }
1260
1261 #endif
1262