1 /* $NetBSD: events.c,v 1.1.1.2 2010/06/17 18:07:13 tron Exp $ */ 2 3 /*++ 4 /* NAME 5 /* events 3 6 /* SUMMARY 7 /* event manager 8 /* SYNOPSIS 9 /* #include <events.h> 10 /* 11 /* time_t event_time() 12 /* 13 /* void event_loop(delay) 14 /* int delay; 15 /* 16 /* time_t event_request_timer(callback, context, delay) 17 /* void (*callback)(int event, char *context); 18 /* char *context; 19 /* int delay; 20 /* 21 /* int event_cancel_timer(callback, context) 22 /* void (*callback)(int event, char *context); 23 /* char *context; 24 /* 25 /* void event_enable_read(fd, callback, context) 26 /* int fd; 27 /* void (*callback)(int event, char *context); 28 /* char *context; 29 /* 30 /* void event_enable_write(fd, callback, context) 31 /* int fd; 32 /* void (*callback)(int event, char *context); 33 /* char *context; 34 /* 35 /* void event_disable_readwrite(fd) 36 /* int fd; 37 /* 38 /* void event_drain(time_limit) 39 /* int time_limit; 40 /* 41 /* void event_fork(void) 42 /* DESCRIPTION 43 /* This module delivers I/O and timer events. 44 /* Multiple I/O streams and timers can be monitored simultaneously. 45 /* Events are delivered via callback routines provided by the 46 /* application. When requesting an event, the application can provide 47 /* private context that is passed back when the callback routine is 48 /* executed. 49 /* 50 /* event_time() returns a cached value of the current time. 51 /* 52 /* event_loop() monitors all I/O channels for which the application has 53 /* expressed interest, and monitors the timer request queue. 54 /* It notifies the application whenever events of interest happen. 55 /* A negative delay value causes the function to pause until something 56 /* happens; a positive delay value causes event_loop() to return when 57 /* the next event happens or when the delay time in seconds is over, 58 /* whatever happens first. A zero delay effectuates a poll. 59 /* 60 /* Note: in order to avoid race conditions, event_loop() cannot 61 /* not be called recursively. 62 /* 63 /* event_request_timer() causes the specified callback function to 64 /* be called with the specified context argument after \fIdelay\fR 65 /* seconds, or as soon as possible thereafter. The delay should 66 /* not be negative. 67 /* The event argument is equal to EVENT_TIME. 68 /* Only one timer request can be active per (callback, context) pair. 69 /* Calling event_request_timer() with an existing (callback, context) 70 /* pair does not schedule a new event, but updates the time of event 71 /* delivery. The result is the absolute time at which the timer is 72 /* scheduled to go off. 73 /* 74 /* event_cancel_timer() cancels the specified (callback, context) request. 75 /* The application is allowed to cancel non-existing requests. The result 76 /* value is the amount of time left before the timer would have gone off, 77 /* or -1 in case of no pending timer. 78 /* 79 /* event_enable_read() (event_enable_write()) enables read (write) events 80 /* on the named I/O channel. It is up to the application to assemble 81 /* partial reads or writes. 82 /* An I/O channel cannot handle more than one request at the 83 /* same time. The application is allowed to enable an event that 84 /* is already enabled (same channel, same read or write operation, 85 /* but perhaps a different callback or context). On systems with 86 /* kernel-based event filters this is preferred usage, because 87 /* each disable and enable request would cost a system call. 88 /* 89 /* The manifest constants EVENT_NULL_CONTEXT and EVENT_NULL_TYPE 90 /* provide convenient null values. 91 /* 92 /* The callback routine has the following arguments: 93 /* .IP fd 94 /* The stream on which the event happened. 95 /* .IP event 96 /* An indication of the event type: 97 /* .RS 98 /* .IP EVENT_READ 99 /* read event, 100 /* .IP EVENT_WRITE 101 /* write event, 102 /* .IP EVENT_XCPT 103 /* exception (actually, any event other than read or write). 104 /* .RE 105 /* .IP context 106 /* Application context given to event_enable_read() (event_enable_write()). 107 /* .PP 108 /* event_disable_readwrite() disables further I/O events on the specified 109 /* I/O channel. The application is allowed to cancel non-existing 110 /* I/O event requests. 111 /* 112 /* event_drain() repeatedly calls event_loop() until no more timer 113 /* events or I/O events are pending or until the time limit is reached. 114 /* This routine must not be called from an event_whatever() callback 115 /* routine. Note: this function assumes that no new I/O events 116 /* will be registered. 117 /* 118 /* event_fork() must be called by a child process after it is 119 /* created with fork(), to re-initialize event processing. 120 /* DIAGNOSTICS 121 /* Panics: interface violations. Fatal errors: out of memory, 122 /* system call failure. Warnings: the number of available 123 /* file descriptors is much less than FD_SETSIZE. 124 /* BUGS 125 /* This module is based on event selection. It assumes that the 126 /* event_loop() routine is called frequently. This approach is 127 /* not suitable for applications with compute-bound loops that 128 /* take a significant amount of time. 129 /* LICENSE 130 /* .ad 131 /* .fi 132 /* The Secure Mailer license must be distributed with this software. 133 /* AUTHOR(S) 134 /* Wietse Venema 135 /* IBM T.J. Watson Research 136 /* P.O. Box 704 137 /* Yorktown Heights, NY 10598, USA 138 /*--*/ 139 140 /* System libraries. */ 141 142 #include "sys_defs.h" 143 #include <sys/time.h> /* XXX: 44BSD uses bzero() */ 144 #include <time.h> 145 #include <errno.h> 146 #include <unistd.h> 147 #include <stddef.h> /* offsetof() */ 148 #include <string.h> /* bzero() prototype for 44BSD */ 149 #include <limits.h> /* INT_MAX */ 150 151 #ifdef USE_SYS_SELECT_H 152 #include <sys/select.h> 153 #endif 154 155 /* Application-specific. */ 156 157 #include "mymalloc.h" 158 #include "msg.h" 159 #include "iostuff.h" 160 #include "ring.h" 161 #include "events.h" 162 163 #if !defined(EVENTS_STYLE) 164 #error "must define EVENTS_STYLE" 165 #endif 166 167 /* 168 * Traditional BSD-style select(2). Works everywhere, but has a built-in 169 * upper bound on the number of file descriptors, and that limit is hard to 170 * change on Linux. Is sometimes emulated with SYSV-style poll(2) which 171 * doesn't have the file descriptor limit, but unfortunately does not help 172 * to improve the performance of servers with lots of connections. 173 */ 174 #define EVENT_ALLOC_INCR 10 175 176 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 177 typedef fd_set EVENT_MASK; 178 179 #define EVENT_MASK_BYTE_COUNT(mask) sizeof(*(mask)) 180 #define EVENT_MASK_ZERO(mask) FD_ZERO(mask) 181 #define EVENT_MASK_SET(fd, mask) FD_SET((fd), (mask)) 182 #define EVENT_MASK_ISSET(fd, mask) FD_ISSET((fd), (mask)) 183 #define EVENT_MASK_CLR(fd, mask) FD_CLR((fd), (mask)) 184 #else 185 186 /* 187 * Kernel-based event filters (kqueue, /dev/poll, epoll). We use the 188 * following file descriptor mask structure which is expanded on the fly. 189 */ 190 typedef struct { 191 char *data; /* bit mask */ 192 size_t data_len; /* data byte count */ 193 } EVENT_MASK; 194 195 /* Bits per byte, byte in vector, bit offset in byte, bytes per set. */ 196 #define EVENT_MASK_NBBY (8) 197 #define EVENT_MASK_FD_BYTE(fd, mask) \ 198 (((unsigned char *) (mask)->data)[(fd) / EVENT_MASK_NBBY]) 199 #define EVENT_MASK_FD_BIT(fd) (1 << ((fd) % EVENT_MASK_NBBY)) 200 #define EVENT_MASK_BYTES_NEEDED(len) \ 201 (((len) + (EVENT_MASK_NBBY -1)) / EVENT_MASK_NBBY) 202 #define EVENT_MASK_BYTE_COUNT(mask) ((mask)->data_len) 203 204 /* Memory management. */ 205 #define EVENT_MASK_ALLOC(mask, bit_len) do { \ 206 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \ 207 (mask)->data = mymalloc(_byte_len); \ 208 memset((mask)->data, 0, _byte_len); \ 209 (mask)->data_len = _byte_len; \ 210 } while (0) 211 #define EVENT_MASK_REALLOC(mask, bit_len) do { \ 212 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \ 213 size_t _old_len = (mask)->data_len; \ 214 (mask)->data = myrealloc((mask)->data, _byte_len); \ 215 if (_byte_len > _old_len) \ 216 memset((mask)->data + _old_len, 0, _byte_len - _old_len); \ 217 (mask)->data_len = _byte_len; \ 218 } while (0) 219 #define EVENT_MASK_FREE(mask) myfree((mask)->data) 220 221 /* Set operations, modeled after FD_ZERO/SET/ISSET/CLR. */ 222 #define EVENT_MASK_ZERO(mask) \ 223 memset((mask)->data, 0, (mask)->data_len) 224 #define EVENT_MASK_SET(fd, mask) \ 225 (EVENT_MASK_FD_BYTE((fd), (mask)) |= EVENT_MASK_FD_BIT(fd)) 226 #define EVENT_MASK_ISSET(fd, mask) \ 227 (EVENT_MASK_FD_BYTE((fd), (mask)) & EVENT_MASK_FD_BIT(fd)) 228 #define EVENT_MASK_CLR(fd, mask) \ 229 (EVENT_MASK_FD_BYTE((fd), (mask)) &= ~EVENT_MASK_FD_BIT(fd)) 230 #endif 231 232 /* 233 * I/O events. 234 */ 235 typedef struct EVENT_FDTABLE EVENT_FDTABLE; 236 237 struct EVENT_FDTABLE { 238 EVENT_NOTIFY_RDWR callback; 239 char *context; 240 }; 241 static EVENT_MASK event_rmask; /* enabled read events */ 242 static EVENT_MASK event_wmask; /* enabled write events */ 243 static EVENT_MASK event_xmask; /* for bad news mostly */ 244 static int event_fdlimit; /* per-process open file limit */ 245 static EVENT_FDTABLE *event_fdtable; /* one slot per file descriptor */ 246 static int event_fdslots; /* number of file descriptor slots */ 247 static int event_max_fd = -1; /* highest fd number seen */ 248 249 /* 250 * FreeBSD kqueue supports no system call to find out what descriptors are 251 * registered in the kernel-based filter. To implement our own sanity checks 252 * we maintain our own descriptor bitmask. 253 * 254 * FreeBSD kqueue does support application context pointers. Unfortunately, 255 * changing that information would cost a system call, and some of the 256 * competitors don't support application context. To keep the implementation 257 * simple we maintain our own table with call-back information. 258 * 259 * FreeBSD kqueue silently unregisters a descriptor from its filter when the 260 * descriptor is closed, so our information could get out of sync with the 261 * kernel. But that will never happen, because we have to meticulously 262 * unregister a file descriptor before it is closed, to avoid errors on 263 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT. 264 */ 265 #if (EVENTS_STYLE == EVENTS_STYLE_KQUEUE) 266 #include <sys/event.h> 267 268 /* 269 * Some early FreeBSD implementations don't have the EV_SET macro. 270 */ 271 #ifndef EV_SET 272 #define EV_SET(kp, id, fi, fl, ffl, da, ud) do { \ 273 (kp)->ident = (id); \ 274 (kp)->filter = (fi); \ 275 (kp)->flags = (fl); \ 276 (kp)->fflags = (ffl); \ 277 (kp)->data = (da); \ 278 (kp)->udata = (ud); \ 279 } while(0) 280 #endif 281 282 /* 283 * Macros to initialize the kernel-based filter; see event_init(). 284 */ 285 static int event_kq; /* handle to event filter */ 286 287 #define EVENT_REG_INIT_HANDLE(er, n) do { \ 288 er = event_kq = kqueue(); \ 289 } while (0) 290 #define EVENT_REG_INIT_TEXT "kqueue" 291 292 #define EVENT_REG_FORK_HANDLE(er, n) do { \ 293 (void) close(event_kq); \ 294 EVENT_REG_INIT_HANDLE(er, (n)); \ 295 } while (0) 296 297 /* 298 * Macros to update the kernel-based filter; see event_enable_read(), 299 * event_enable_write() and event_disable_readwrite(). 300 */ 301 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \ 302 struct kevent dummy; \ 303 EV_SET(&dummy, (fh), (ev), (op), 0, 0, 0); \ 304 (er) = kevent(event_kq, &dummy, 1, 0, 0, 0); \ 305 } while (0) 306 307 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_ADD) 308 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_READ) 309 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_WRITE) 310 #define EVENT_REG_ADD_TEXT "kevent EV_ADD" 311 312 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_DELETE) 313 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_READ) 314 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_WRITE) 315 #define EVENT_REG_DEL_TEXT "kevent EV_DELETE" 316 317 /* 318 * Macros to retrieve event buffers from the kernel; see event_loop(). 319 */ 320 typedef struct kevent EVENT_BUFFER; 321 322 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 323 struct timespec ts; \ 324 struct timespec *tsp; \ 325 if ((delay) < 0) { \ 326 tsp = 0; \ 327 } else { \ 328 tsp = &ts; \ 329 ts.tv_nsec = 0; \ 330 ts.tv_sec = (delay); \ 331 } \ 332 (event_count) = kevent(event_kq, (struct kevent *) 0, 0, (event_buf), \ 333 (buflen), (tsp)); \ 334 } while (0) 335 #define EVENT_BUFFER_READ_TEXT "kevent" 336 337 /* 338 * Macros to process event buffers from the kernel; see event_loop(). 339 */ 340 #define EVENT_GET_FD(bp) ((bp)->ident) 341 #define EVENT_GET_TYPE(bp) ((bp)->filter) 342 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) == EVFILT_READ) 343 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) == EVFILT_WRITE) 344 345 #endif 346 347 /* 348 * Solaris /dev/poll does not support application context, so we have to 349 * maintain our own. This has the benefit of avoiding an expensive system 350 * call just to change a call-back function or argument. 351 * 352 * Solaris /dev/poll does have a way to query if a specific descriptor is 353 * registered. However, we maintain a descriptor mask anyway because a) it 354 * avoids having to make an expensive system call to find out if something 355 * is registered, b) some EVENTS_STYLE_MUMBLE implementations need a 356 * descriptor bitmask anyway and c) we use the bitmask already to implement 357 * sanity checks. 358 */ 359 #if (EVENTS_STYLE == EVENTS_STYLE_DEVPOLL) 360 #include <sys/devpoll.h> 361 #include <fcntl.h> 362 363 /* 364 * Macros to initialize the kernel-based filter; see event_init(). 365 */ 366 static int event_pollfd; /* handle to file descriptor set */ 367 368 #define EVENT_REG_INIT_HANDLE(er, n) do { \ 369 er = event_pollfd = open("/dev/poll", O_RDWR); \ 370 if (event_pollfd >= 0) close_on_exec(event_pollfd, CLOSE_ON_EXEC); \ 371 } while (0) 372 #define EVENT_REG_INIT_TEXT "open /dev/poll" 373 374 #define EVENT_REG_FORK_HANDLE(er, n) do { \ 375 (void) close(event_pollfd); \ 376 EVENT_REG_INIT_HANDLE(er, (n)); \ 377 } while (0) 378 379 /* 380 * Macros to update the kernel-based filter; see event_enable_read(), 381 * event_enable_write() and event_disable_readwrite(). 382 */ 383 #define EVENT_REG_FD_OP(er, fh, ev) do { \ 384 struct pollfd dummy; \ 385 dummy.fd = (fh); \ 386 dummy.events = (ev); \ 387 (er) = write(event_pollfd, (char *) &dummy, \ 388 sizeof(dummy)) != sizeof(dummy) ? -1 : 0; \ 389 } while (0) 390 391 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_FD_OP((e), (f), POLLIN) 392 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_FD_OP((e), (f), POLLOUT) 393 #define EVENT_REG_ADD_TEXT "write /dev/poll" 394 395 #define EVENT_REG_DEL_BOTH(e, f) EVENT_REG_FD_OP((e), (f), POLLREMOVE) 396 #define EVENT_REG_DEL_TEXT "write /dev/poll" 397 398 /* 399 * Macros to retrieve event buffers from the kernel; see event_loop(). 400 */ 401 typedef struct pollfd EVENT_BUFFER; 402 403 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 404 struct dvpoll dvpoll; \ 405 dvpoll.dp_fds = (event_buf); \ 406 dvpoll.dp_nfds = (buflen); \ 407 dvpoll.dp_timeout = (delay) < 0 ? -1 : (delay) * 1000; \ 408 (event_count) = ioctl(event_pollfd, DP_POLL, &dvpoll); \ 409 } while (0) 410 #define EVENT_BUFFER_READ_TEXT "ioctl DP_POLL" 411 412 /* 413 * Macros to process event buffers from the kernel; see event_loop(). 414 */ 415 #define EVENT_GET_FD(bp) ((bp)->fd) 416 #define EVENT_GET_TYPE(bp) ((bp)->revents) 417 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & POLLIN) 418 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & POLLOUT) 419 420 #endif 421 422 /* 423 * Linux epoll supports no system call to find out what descriptors are 424 * registered in the kernel-based filter. To implement our own sanity checks 425 * we maintain our own descriptor bitmask. 426 * 427 * Linux epoll does support application context pointers. Unfortunately, 428 * changing that information would cost a system call, and some of the 429 * competitors don't support application context. To keep the implementation 430 * simple we maintain our own table with call-back information. 431 * 432 * Linux epoll silently unregisters a descriptor from its filter when the 433 * descriptor is closed, so our information could get out of sync with the 434 * kernel. But that will never happen, because we have to meticulously 435 * unregister a file descriptor before it is closed, to avoid errors on 436 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT. 437 */ 438 #if (EVENTS_STYLE == EVENTS_STYLE_EPOLL) 439 #include <sys/epoll.h> 440 441 /* 442 * Macros to initialize the kernel-based filter; see event_init(). 443 */ 444 static int event_epollfd; /* epoll handle */ 445 446 #define EVENT_REG_INIT_HANDLE(er, n) do { \ 447 er = event_epollfd = epoll_create(n); \ 448 if (event_epollfd >= 0) close_on_exec(event_epollfd, CLOSE_ON_EXEC); \ 449 } while (0) 450 #define EVENT_REG_INIT_TEXT "epoll_create" 451 452 #define EVENT_REG_FORK_HANDLE(er, n) do { \ 453 (void) close(event_epollfd); \ 454 EVENT_REG_INIT_HANDLE(er, (n)); \ 455 } while (0) 456 457 /* 458 * Macros to update the kernel-based filter; see event_enable_read(), 459 * event_enable_write() and event_disable_readwrite(). 460 */ 461 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \ 462 struct epoll_event dummy; \ 463 dummy.events = (ev); \ 464 dummy.data.fd = (fh); \ 465 (er) = epoll_ctl(event_epollfd, (op), (fh), &dummy); \ 466 } while (0) 467 468 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_ADD) 469 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLIN) 470 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLOUT) 471 #define EVENT_REG_ADD_TEXT "epoll_ctl EPOLL_CTL_ADD" 472 473 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_DEL) 474 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLIN) 475 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLOUT) 476 #define EVENT_REG_DEL_TEXT "epoll_ctl EPOLL_CTL_DEL" 477 478 /* 479 * Macros to retrieve event buffers from the kernel; see event_loop(). 480 */ 481 typedef struct epoll_event EVENT_BUFFER; 482 483 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 484 (event_count) = epoll_wait(event_epollfd, (event_buf), (buflen), \ 485 (delay) < 0 ? -1 : (delay) * 1000); \ 486 } while (0) 487 #define EVENT_BUFFER_READ_TEXT "epoll_wait" 488 489 /* 490 * Macros to process event buffers from the kernel; see event_loop(). 491 */ 492 #define EVENT_GET_FD(bp) ((bp)->data.fd) 493 #define EVENT_GET_TYPE(bp) ((bp)->events) 494 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & EPOLLIN) 495 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & EPOLLOUT) 496 497 #endif 498 499 /* 500 * Timer events. Timer requests are kept sorted, in a circular list. We use 501 * the RING abstraction, so we get to use a couple ugly macros. 502 * 503 * When a call-back function adds a timer request, we label the request with 504 * the event_loop() call instance that invoked the call-back. We use this to 505 * prevent zero-delay timer requests from running in a tight loop and 506 * starving I/O events. 507 */ 508 typedef struct EVENT_TIMER EVENT_TIMER; 509 510 struct EVENT_TIMER { 511 time_t when; /* when event is wanted */ 512 EVENT_NOTIFY_TIME callback; /* callback function */ 513 char *context; /* callback context */ 514 long loop_instance; /* event_loop() call instance */ 515 RING ring; /* linkage */ 516 }; 517 518 static RING event_timer_head; /* timer queue head */ 519 static long event_loop_instance; /* event_loop() call instance */ 520 521 #define RING_TO_TIMER(r) \ 522 ((EVENT_TIMER *) ((char *) (r) - offsetof(EVENT_TIMER, ring))) 523 524 #define FOREACH_QUEUE_ENTRY(entry, head) \ 525 for (entry = ring_succ(head); entry != (head); entry = ring_succ(entry)) 526 527 #define FIRST_TIMER(head) \ 528 (ring_succ(head) != (head) ? RING_TO_TIMER(ring_succ(head)) : 0) 529 530 /* 531 * Other private data structures. 532 */ 533 static time_t event_present; /* cached time of day */ 534 535 #define EVENT_INIT_NEEDED() (event_present == 0) 536 537 /* event_init - set up tables and such */ 538 539 static void event_init(void) 540 { 541 EVENT_FDTABLE *fdp; 542 int err; 543 544 if (!EVENT_INIT_NEEDED()) 545 msg_panic("event_init: repeated call"); 546 547 /* 548 * Initialize the file descriptor masks and the call-back table. Where 549 * possible we extend these data structures on the fly. With select(2) 550 * based implementations we can only handle FD_SETSIZE open files. 551 */ 552 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 553 if ((event_fdlimit = open_limit(FD_SETSIZE)) < 0) 554 msg_fatal("unable to determine open file limit"); 555 #else 556 if ((event_fdlimit = open_limit(INT_MAX)) < 0) 557 msg_fatal("unable to determine open file limit"); 558 #endif 559 if (event_fdlimit < FD_SETSIZE / 2 && event_fdlimit < 256) 560 msg_warn("could allocate space for only %d open files", event_fdlimit); 561 event_fdslots = EVENT_ALLOC_INCR; 562 event_fdtable = (EVENT_FDTABLE *) 563 mymalloc(sizeof(EVENT_FDTABLE) * event_fdslots); 564 for (fdp = event_fdtable; fdp < event_fdtable + event_fdslots; fdp++) { 565 fdp->callback = 0; 566 fdp->context = 0; 567 } 568 569 /* 570 * Initialize the I/O event request masks. 571 */ 572 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 573 EVENT_MASK_ZERO(&event_rmask); 574 EVENT_MASK_ZERO(&event_wmask); 575 EVENT_MASK_ZERO(&event_xmask); 576 #else 577 EVENT_MASK_ALLOC(&event_rmask, event_fdslots); 578 EVENT_MASK_ALLOC(&event_wmask, event_fdslots); 579 EVENT_MASK_ALLOC(&event_xmask, event_fdslots); 580 581 /* 582 * Initialize the kernel-based filter. 583 */ 584 EVENT_REG_INIT_HANDLE(err, event_fdslots); 585 if (err < 0) 586 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT); 587 #endif 588 589 /* 590 * Initialize timer stuff. 591 */ 592 ring_init(&event_timer_head); 593 (void) time(&event_present); 594 595 /* 596 * Avoid an infinite initialization loop. 597 */ 598 if (EVENT_INIT_NEEDED()) 599 msg_panic("event_init: unable to initialize"); 600 } 601 602 /* event_extend - make room for more descriptor slots */ 603 604 static void event_extend(int fd) 605 { 606 const char *myname = "event_extend"; 607 int old_slots = event_fdslots; 608 int new_slots = (event_fdslots > fd / 2 ? 609 2 * old_slots : fd + EVENT_ALLOC_INCR); 610 EVENT_FDTABLE *fdp; 611 int err; 612 613 if (msg_verbose > 2) 614 msg_info("%s: fd %d", myname, fd); 615 event_fdtable = (EVENT_FDTABLE *) 616 myrealloc((char *) event_fdtable, sizeof(EVENT_FDTABLE) * new_slots); 617 event_fdslots = new_slots; 618 for (fdp = event_fdtable + old_slots; 619 fdp < event_fdtable + new_slots; fdp++) { 620 fdp->callback = 0; 621 fdp->context = 0; 622 } 623 624 /* 625 * Initialize the I/O event request masks. 626 */ 627 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 628 EVENT_MASK_REALLOC(&event_rmask, new_slots); 629 EVENT_MASK_REALLOC(&event_wmask, new_slots); 630 EVENT_MASK_REALLOC(&event_xmask, new_slots); 631 #endif 632 #ifdef EVENT_REG_UPD_HANDLE 633 EVENT_REG_UPD_HANDLE(err, new_slots); 634 if (err < 0) 635 msg_fatal("%s: %s: %m", myname, EVENT_REG_UPD_TEXT); 636 #endif 637 } 638 639 /* event_time - look up cached time of day */ 640 641 time_t event_time(void) 642 { 643 if (EVENT_INIT_NEEDED()) 644 event_init(); 645 646 return (event_present); 647 } 648 649 /* event_drain - loop until all pending events are done */ 650 651 void event_drain(int time_limit) 652 { 653 EVENT_MASK zero_mask; 654 time_t max_time; 655 656 if (EVENT_INIT_NEEDED()) 657 return; 658 659 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 660 EVENT_MASK_ZERO(&zero_mask); 661 #else 662 EVENT_MASK_ALLOC(&zero_mask, event_fdslots); 663 #endif 664 (void) time(&event_present); 665 max_time = event_present + time_limit; 666 while (event_present < max_time 667 && (event_timer_head.pred != &event_timer_head 668 || memcmp(&zero_mask, &event_xmask, 669 EVENT_MASK_BYTE_COUNT(&zero_mask)) != 0)) { 670 event_loop(1); 671 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 672 if (EVENT_MASK_BYTE_COUNT(&zero_mask) 673 != EVENT_MASK_BYTES_NEEDED(event_fdslots)) 674 EVENT_MASK_REALLOC(&zero_mask, event_fdslots); 675 #endif 676 } 677 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 678 EVENT_MASK_FREE(&zero_mask); 679 #endif 680 } 681 682 /* event_fork - resume event processing after fork() */ 683 684 void event_fork(void) 685 { 686 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 687 EVENT_FDTABLE *fdp; 688 int err; 689 int fd; 690 691 /* 692 * No event was ever registered, so there's nothing to be done. 693 */ 694 if (EVENT_INIT_NEEDED()) 695 return; 696 697 /* 698 * Close the existing filter handle and open a new kernel-based filter. 699 */ 700 EVENT_REG_FORK_HANDLE(err, event_fdslots); 701 if (err < 0) 702 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT); 703 704 /* 705 * Populate the new kernel-based filter with events that were registered 706 * in the parent process. 707 */ 708 for (fd = 0; fd <= event_max_fd; fd++) { 709 if (EVENT_MASK_ISSET(fd, &event_wmask)) { 710 EVENT_MASK_CLR(fd, &event_wmask); 711 fdp = event_fdtable + fd; 712 event_enable_write(fd, fdp->callback, fdp->context); 713 } else if (EVENT_MASK_ISSET(fd, &event_rmask)) { 714 EVENT_MASK_CLR(fd, &event_rmask); 715 fdp = event_fdtable + fd; 716 event_enable_read(fd, fdp->callback, fdp->context); 717 } 718 } 719 #endif 720 } 721 722 /* event_enable_read - enable read events */ 723 724 void event_enable_read(int fd, EVENT_NOTIFY_RDWR callback, char *context) 725 { 726 const char *myname = "event_enable_read"; 727 EVENT_FDTABLE *fdp; 728 int err; 729 730 if (EVENT_INIT_NEEDED()) 731 event_init(); 732 733 /* 734 * Sanity checks. 735 */ 736 if (fd < 0 || fd >= event_fdlimit) 737 msg_panic("%s: bad file descriptor: %d", myname, fd); 738 739 if (msg_verbose > 2) 740 msg_info("%s: fd %d", myname, fd); 741 742 if (fd >= event_fdslots) 743 event_extend(fd); 744 745 /* 746 * Disallow mixed (i.e. read and write) requests on the same descriptor. 747 */ 748 if (EVENT_MASK_ISSET(fd, &event_wmask)) 749 msg_panic("%s: fd %d: read/write I/O request", myname, fd); 750 751 /* 752 * Postfix 2.4 allows multiple event_enable_read() calls on the same 753 * descriptor without requiring event_disable_readwrite() calls between 754 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's 755 * wasteful to make system calls when we change only application 756 * call-back information. It has a noticeable effect on smtp-source 757 * performance. 758 */ 759 if (EVENT_MASK_ISSET(fd, &event_rmask) == 0) { 760 EVENT_MASK_SET(fd, &event_xmask); 761 EVENT_MASK_SET(fd, &event_rmask); 762 if (event_max_fd < fd) 763 event_max_fd = fd; 764 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 765 EVENT_REG_ADD_READ(err, fd); 766 if (err < 0) 767 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT); 768 #endif 769 } 770 fdp = event_fdtable + fd; 771 if (fdp->callback != callback || fdp->context != context) { 772 fdp->callback = callback; 773 fdp->context = context; 774 } 775 } 776 777 /* event_enable_write - enable write events */ 778 779 void event_enable_write(int fd, EVENT_NOTIFY_RDWR callback, char *context) 780 { 781 const char *myname = "event_enable_write"; 782 EVENT_FDTABLE *fdp; 783 int err; 784 785 if (EVENT_INIT_NEEDED()) 786 event_init(); 787 788 /* 789 * Sanity checks. 790 */ 791 if (fd < 0 || fd >= event_fdlimit) 792 msg_panic("%s: bad file descriptor: %d", myname, fd); 793 794 if (msg_verbose > 2) 795 msg_info("%s: fd %d", myname, fd); 796 797 if (fd >= event_fdslots) 798 event_extend(fd); 799 800 /* 801 * Disallow mixed (i.e. read and write) requests on the same descriptor. 802 */ 803 if (EVENT_MASK_ISSET(fd, &event_rmask)) 804 msg_panic("%s: fd %d: read/write I/O request", myname, fd); 805 806 /* 807 * Postfix 2.4 allows multiple event_enable_write() calls on the same 808 * descriptor without requiring event_disable_readwrite() calls between 809 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's 810 * incredibly wasteful to make unregister and register system calls when 811 * we change only application call-back information. It has a noticeable 812 * effect on smtp-source performance. 813 */ 814 if (EVENT_MASK_ISSET(fd, &event_wmask) == 0) { 815 EVENT_MASK_SET(fd, &event_xmask); 816 EVENT_MASK_SET(fd, &event_wmask); 817 if (event_max_fd < fd) 818 event_max_fd = fd; 819 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 820 EVENT_REG_ADD_WRITE(err, fd); 821 if (err < 0) 822 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT); 823 #endif 824 } 825 fdp = event_fdtable + fd; 826 if (fdp->callback != callback || fdp->context != context) { 827 fdp->callback = callback; 828 fdp->context = context; 829 } 830 } 831 832 /* event_disable_readwrite - disable request for read or write events */ 833 834 void event_disable_readwrite(int fd) 835 { 836 const char *myname = "event_disable_readwrite"; 837 EVENT_FDTABLE *fdp; 838 int err; 839 840 if (EVENT_INIT_NEEDED()) 841 event_init(); 842 843 /* 844 * Sanity checks. 845 */ 846 if (fd < 0 || fd >= event_fdlimit) 847 msg_panic("%s: bad file descriptor: %d", myname, fd); 848 849 if (msg_verbose > 2) 850 msg_info("%s: fd %d", myname, fd); 851 852 /* 853 * Don't complain when there is nothing to cancel. The request may have 854 * been canceled from another thread. 855 */ 856 if (fd >= event_fdslots) 857 return; 858 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 859 #ifdef EVENT_REG_DEL_BOTH 860 /* XXX Can't seem to disable READ and WRITE events selectively. */ 861 if (EVENT_MASK_ISSET(fd, &event_rmask) 862 || EVENT_MASK_ISSET(fd, &event_wmask)) { 863 EVENT_REG_DEL_BOTH(err, fd); 864 if (err < 0) 865 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 866 } 867 #else 868 if (EVENT_MASK_ISSET(fd, &event_rmask)) { 869 EVENT_REG_DEL_READ(err, fd); 870 if (err < 0) 871 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 872 } else if (EVENT_MASK_ISSET(fd, &event_wmask)) { 873 EVENT_REG_DEL_WRITE(err, fd); 874 if (err < 0) 875 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 876 } 877 #endif /* EVENT_REG_DEL_BOTH */ 878 #endif /* != EVENTS_STYLE_SELECT */ 879 EVENT_MASK_CLR(fd, &event_xmask); 880 EVENT_MASK_CLR(fd, &event_rmask); 881 EVENT_MASK_CLR(fd, &event_wmask); 882 fdp = event_fdtable + fd; 883 fdp->callback = 0; 884 fdp->context = 0; 885 } 886 887 /* event_request_timer - (re)set timer */ 888 889 time_t event_request_timer(EVENT_NOTIFY_TIME callback, char *context, int delay) 890 { 891 const char *myname = "event_request_timer"; 892 RING *ring; 893 EVENT_TIMER *timer; 894 895 if (EVENT_INIT_NEEDED()) 896 event_init(); 897 898 /* 899 * Sanity checks. 900 */ 901 if (delay < 0) 902 msg_panic("%s: invalid delay: %d", myname, delay); 903 904 /* 905 * Make sure we schedule this event at the right time. 906 */ 907 time(&event_present); 908 909 /* 910 * See if they are resetting an existing timer request. If so, take the 911 * request away from the timer queue so that it can be inserted at the 912 * right place. 913 */ 914 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 915 timer = RING_TO_TIMER(ring); 916 if (timer->callback == callback && timer->context == context) { 917 timer->when = event_present + delay; 918 timer->loop_instance = event_loop_instance; 919 ring_detach(ring); 920 if (msg_verbose > 2) 921 msg_info("%s: reset 0x%lx 0x%lx %d", myname, 922 (long) callback, (long) context, delay); 923 break; 924 } 925 } 926 927 /* 928 * If not found, schedule a new timer request. 929 */ 930 if (ring == &event_timer_head) { 931 timer = (EVENT_TIMER *) mymalloc(sizeof(EVENT_TIMER)); 932 timer->when = event_present + delay; 933 timer->callback = callback; 934 timer->context = context; 935 timer->loop_instance = event_loop_instance; 936 if (msg_verbose > 2) 937 msg_info("%s: set 0x%lx 0x%lx %d", myname, 938 (long) callback, (long) context, delay); 939 } 940 941 /* 942 * Timer requests are kept sorted to reduce lookup overhead in the event 943 * loop. 944 * 945 * XXX Append the new request after existing requests for the same time 946 * slot. The event_loop() routine depends on this to avoid starving I/O 947 * events when a call-back function schedules a zero-delay timer request. 948 */ 949 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 950 if (timer->when < RING_TO_TIMER(ring)->when) 951 break; 952 } 953 ring_prepend(ring, &timer->ring); 954 955 return (timer->when); 956 } 957 958 /* event_cancel_timer - cancel timer */ 959 960 int event_cancel_timer(EVENT_NOTIFY_TIME callback, char *context) 961 { 962 const char *myname = "event_cancel_timer"; 963 RING *ring; 964 EVENT_TIMER *timer; 965 int time_left = -1; 966 967 if (EVENT_INIT_NEEDED()) 968 event_init(); 969 970 /* 971 * See if they are canceling an existing timer request. Do not complain 972 * when the request is not found. It might have been canceled from some 973 * other thread. 974 */ 975 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 976 timer = RING_TO_TIMER(ring); 977 if (timer->callback == callback && timer->context == context) { 978 if ((time_left = timer->when - event_present) < 0) 979 time_left = 0; 980 ring_detach(ring); 981 myfree((char *) timer); 982 break; 983 } 984 } 985 if (msg_verbose > 2) 986 msg_info("%s: 0x%lx 0x%lx %d", myname, 987 (long) callback, (long) context, time_left); 988 return (time_left); 989 } 990 991 /* event_loop - wait for the next event */ 992 993 void event_loop(int delay) 994 { 995 const char *myname = "event_loop"; 996 static int nested; 997 998 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 999 fd_set rmask; 1000 fd_set wmask; 1001 fd_set xmask; 1002 struct timeval tv; 1003 struct timeval *tvp; 1004 int new_max_fd; 1005 1006 #else 1007 EVENT_BUFFER event_buf[100]; 1008 EVENT_BUFFER *bp; 1009 1010 #endif 1011 int event_count; 1012 EVENT_TIMER *timer; 1013 int fd; 1014 EVENT_FDTABLE *fdp; 1015 int select_delay; 1016 1017 if (EVENT_INIT_NEEDED()) 1018 event_init(); 1019 1020 /* 1021 * XXX Also print the select() masks? 1022 */ 1023 if (msg_verbose > 2) { 1024 RING *ring; 1025 1026 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 1027 timer = RING_TO_TIMER(ring); 1028 msg_info("%s: time left %3d for 0x%lx 0x%lx", myname, 1029 (int) (timer->when - event_present), 1030 (long) timer->callback, (long) timer->context); 1031 } 1032 } 1033 1034 /* 1035 * Find out when the next timer would go off. Timer requests are sorted. 1036 * If any timer is scheduled, adjust the delay appropriately. 1037 */ 1038 if ((timer = FIRST_TIMER(&event_timer_head)) != 0) { 1039 event_present = time((time_t *) 0); 1040 if ((select_delay = timer->when - event_present) < 0) { 1041 select_delay = 0; 1042 } else if (delay >= 0 && select_delay > delay) { 1043 select_delay = delay; 1044 } 1045 } else { 1046 select_delay = delay; 1047 } 1048 if (msg_verbose > 2) 1049 msg_info("event_loop: select_delay %d", select_delay); 1050 1051 /* 1052 * Negative delay means: wait until something happens. Zero delay means: 1053 * poll. Positive delay means: wait at most this long. 1054 */ 1055 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 1056 if (select_delay < 0) { 1057 tvp = 0; 1058 } else { 1059 tvp = &tv; 1060 tv.tv_usec = 0; 1061 tv.tv_sec = select_delay; 1062 } 1063 1064 /* 1065 * Pause until the next event happens. When select() has a problem, don't 1066 * go into a tight loop. Allow select() to be interrupted due to the 1067 * arrival of a signal. 1068 */ 1069 rmask = event_rmask; 1070 wmask = event_wmask; 1071 xmask = event_xmask; 1072 1073 event_count = select(event_max_fd + 1, &rmask, &wmask, &xmask, tvp); 1074 if (event_count < 0) { 1075 if (errno != EINTR) 1076 msg_fatal("event_loop: select: %m"); 1077 return; 1078 } 1079 #else 1080 EVENT_BUFFER_READ(event_count, event_buf, 1081 sizeof(event_buf) / sizeof(event_buf[0]), 1082 select_delay); 1083 if (event_count < 0) { 1084 if (errno != EINTR) 1085 msg_fatal("event_loop: " EVENT_BUFFER_READ_TEXT ": %m"); 1086 return; 1087 } 1088 #endif 1089 1090 /* 1091 * Before entering the application call-back routines, make sure we 1092 * aren't being called from a call-back routine. Doing so would make us 1093 * vulnerable to all kinds of race conditions. 1094 */ 1095 if (nested++ > 0) 1096 msg_panic("event_loop: recursive call"); 1097 1098 /* 1099 * Deliver timer events. Allow the application to add/delete timer queue 1100 * requests while it is being called back. Requests are sorted: we keep 1101 * running over the timer request queue from the start, and stop when we 1102 * reach the future or the list end. We also stop when we reach a timer 1103 * request that was added by a call-back that was invoked from this 1104 * event_loop() call instance, for reasons that are explained below. 1105 * 1106 * To avoid dangling pointer problems 1) we must remove a request from the 1107 * timer queue before delivering its event to the application and 2) we 1108 * must look up the next timer request *after* calling the application. 1109 * The latter complicates the handling of zero-delay timer requests that 1110 * are added by event_loop() call-back functions. 1111 * 1112 * XXX When a timer event call-back function adds a new timer request, 1113 * event_request_timer() labels the request with the event_loop() call 1114 * instance that invoked the timer event call-back. We use this instance 1115 * label here to prevent zero-delay timer requests from running in a 1116 * tight loop and starving I/O events. To make this solution work, 1117 * event_request_timer() appends a new request after existing requests 1118 * for the same time slot. 1119 */ 1120 event_present = time((time_t *) 0); 1121 event_loop_instance += 1; 1122 1123 while ((timer = FIRST_TIMER(&event_timer_head)) != 0) { 1124 if (timer->when > event_present) 1125 break; 1126 if (timer->loop_instance == event_loop_instance) 1127 break; 1128 ring_detach(&timer->ring); /* first this */ 1129 if (msg_verbose > 2) 1130 msg_info("%s: timer 0x%lx 0x%lx", myname, 1131 (long) timer->callback, (long) timer->context); 1132 timer->callback(EVENT_TIME, timer->context); /* then this */ 1133 myfree((char *) timer); 1134 } 1135 1136 /* 1137 * Deliver I/O events. Allow the application to cancel event requests 1138 * while it is being called back. To this end, we keep an eye on the 1139 * contents of event_xmask, so that we deliver only events that are still 1140 * wanted. We do not change the event request masks. It is up to the 1141 * application to determine when a read or write is complete. 1142 */ 1143 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 1144 if (event_count > 0) { 1145 for (new_max_fd = 0, fd = 0; fd <= event_max_fd; fd++) { 1146 if (FD_ISSET(fd, &event_xmask)) { 1147 new_max_fd = fd; 1148 /* In case event_fdtable is updated. */ 1149 fdp = event_fdtable + fd; 1150 if (FD_ISSET(fd, &xmask)) { 1151 if (msg_verbose > 2) 1152 msg_info("%s: exception fd=%d act=0x%lx 0x%lx", myname, 1153 fd, (long) fdp->callback, (long) fdp->context); 1154 fdp->callback(EVENT_XCPT, fdp->context); 1155 } else if (FD_ISSET(fd, &wmask)) { 1156 if (msg_verbose > 2) 1157 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname, 1158 fd, (long) fdp->callback, (long) fdp->context); 1159 fdp->callback(EVENT_WRITE, fdp->context); 1160 } else if (FD_ISSET(fd, &rmask)) { 1161 if (msg_verbose > 2) 1162 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname, 1163 fd, (long) fdp->callback, (long) fdp->context); 1164 fdp->callback(EVENT_READ, fdp->context); 1165 } 1166 } 1167 } 1168 event_max_fd = new_max_fd; 1169 } 1170 #else 1171 for (bp = event_buf; bp < event_buf + event_count; bp++) { 1172 fd = EVENT_GET_FD(bp); 1173 if (fd < 0 || fd > event_max_fd) 1174 msg_panic("%s: bad file descriptor: %d", myname, fd); 1175 if (EVENT_MASK_ISSET(fd, &event_xmask)) { 1176 fdp = event_fdtable + fd; 1177 if (EVENT_TEST_READ(bp)) { 1178 if (msg_verbose > 2) 1179 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname, 1180 fd, (long) fdp->callback, (long) fdp->context); 1181 fdp->callback(EVENT_READ, fdp->context); 1182 } else if (EVENT_TEST_WRITE(bp)) { 1183 if (msg_verbose > 2) 1184 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname, 1185 fd, (long) fdp->callback, 1186 (long) fdp->context); 1187 fdp->callback(EVENT_WRITE, fdp->context); 1188 } else { 1189 if (msg_verbose > 2) 1190 msg_info("%s: other fd=%d act=0x%lx 0x%lx", myname, 1191 fd, (long) fdp->callback, (long) fdp->context); 1192 fdp->callback(EVENT_XCPT, fdp->context); 1193 } 1194 } 1195 } 1196 #endif 1197 nested--; 1198 } 1199 1200 #ifdef TEST 1201 1202 /* 1203 * Proof-of-concept test program for the event manager. Schedule a series of 1204 * events at one-second intervals and let them happen, while echoing any 1205 * lines read from stdin. 1206 */ 1207 #include <stdio.h> 1208 #include <ctype.h> 1209 #include <stdlib.h> 1210 1211 /* timer_event - display event */ 1212 1213 static void timer_event(int unused_event, char *context) 1214 { 1215 printf("%ld: %s\n", (long) event_present, context); 1216 fflush(stdout); 1217 } 1218 1219 /* echo - echo text received on stdin */ 1220 1221 static void echo(int unused_event, char *unused_context) 1222 { 1223 char buf[BUFSIZ]; 1224 1225 if (fgets(buf, sizeof(buf), stdin) == 0) 1226 exit(0); 1227 printf("Result: %s", buf); 1228 } 1229 1230 /* request - request a bunch of timer events */ 1231 1232 static void request(int unused_event, char *unused_context) 1233 { 1234 event_request_timer(timer_event, "3 first", 3); 1235 event_request_timer(timer_event, "3 second", 3); 1236 event_request_timer(timer_event, "4 first", 4); 1237 event_request_timer(timer_event, "4 second", 4); 1238 event_request_timer(timer_event, "2 first", 2); 1239 event_request_timer(timer_event, "2 second", 2); 1240 event_request_timer(timer_event, "1 first", 1); 1241 event_request_timer(timer_event, "1 second", 1); 1242 event_request_timer(timer_event, "0 first", 0); 1243 event_request_timer(timer_event, "0 second", 0); 1244 } 1245 1246 int main(int argc, char **argv) 1247 { 1248 if (argv[1]) 1249 msg_verbose = atoi(argv[1]); 1250 event_request_timer(request, (char *) 0, 0); 1251 event_enable_read(fileno(stdin), echo, (char *) 0); 1252 event_drain(10); 1253 exit(0); 1254 } 1255 1256 #endif 1257