1 /* $OpenBSD: event.c,v 1.23 2010/04/21 21:02:46 nicm Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #ifdef HAVE_CONFIG_H 30 #include "config.h" 31 #endif 32 33 #ifdef WIN32 34 #define WIN32_LEAN_AND_MEAN 35 #include <windows.h> 36 #undef WIN32_LEAN_AND_MEAN 37 #endif 38 #include <sys/types.h> 39 #ifdef HAVE_SYS_TIME_H 40 #include <sys/time.h> 41 #else 42 #include <sys/_libevent_time.h> 43 #endif 44 #include <sys/queue.h> 45 #include <stdio.h> 46 #include <stdlib.h> 47 #ifndef WIN32 48 #include <unistd.h> 49 #endif 50 #include <errno.h> 51 #include <signal.h> 52 #include <string.h> 53 #include <assert.h> 54 #include <time.h> 55 56 #include "event.h" 57 #include "event-internal.h" 58 #include "evutil.h" 59 #include "log.h" 60 61 #ifdef HAVE_EVENT_PORTS 62 extern const struct eventop evportops; 63 #endif 64 #ifdef HAVE_SELECT 65 extern const struct eventop selectops; 66 #endif 67 #ifdef HAVE_POLL 68 extern const struct eventop pollops; 69 #endif 70 #ifdef HAVE_EPOLL 71 extern const struct eventop epollops; 72 #endif 73 #ifdef HAVE_WORKING_KQUEUE 74 extern const struct eventop kqops; 75 #endif 76 #ifdef HAVE_DEVPOLL 77 extern const struct eventop devpollops; 78 #endif 79 #ifdef WIN32 80 extern const struct eventop win32ops; 81 #endif 82 83 /* In order of preference */ 84 static const struct eventop *eventops[] = { 85 #ifdef HAVE_EVENT_PORTS 86 &evportops, 87 #endif 88 #ifdef HAVE_WORKING_KQUEUE 89 &kqops, 90 #endif 91 #ifdef HAVE_EPOLL 92 &epollops, 93 #endif 94 #ifdef HAVE_DEVPOLL 95 &devpollops, 96 #endif 97 #ifdef HAVE_POLL 98 &pollops, 99 #endif 100 #ifdef HAVE_SELECT 101 &selectops, 102 #endif 103 #ifdef WIN32 104 &win32ops, 105 #endif 106 NULL 107 }; 108 109 /* Global state */ 110 struct event_base *current_base = NULL; 111 extern struct event_base *evsignal_base; 112 static int use_monotonic; 113 114 /* Prototypes */ 115 static void event_queue_insert(struct event_base *, struct event *, int); 116 static void event_queue_remove(struct event_base *, struct event *, int); 117 static int event_haveevents(struct event_base *); 118 119 static void event_process_active(struct event_base *); 120 121 static int timeout_next(struct event_base *, struct timeval **); 122 static void timeout_process(struct event_base *); 123 static void timeout_correct(struct event_base *, struct timeval *); 124 125 static void 126 detect_monotonic(void) 127 { 128 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 129 struct timespec ts; 130 131 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) 132 use_monotonic = 1; 133 #endif 134 } 135 136 static int 137 gettime(struct event_base *base, struct timeval *tp) 138 { 139 if (base->tv_cache.tv_sec) { 140 *tp = base->tv_cache; 141 return (0); 142 } 143 144 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 145 if (use_monotonic) { 146 struct timespec ts; 147 148 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) 149 return (-1); 150 151 tp->tv_sec = ts.tv_sec; 152 tp->tv_usec = ts.tv_nsec / 1000; 153 return (0); 154 } 155 #endif 156 157 return (evutil_gettimeofday(tp, NULL)); 158 } 159 160 struct event_base * 161 event_init(void) 162 { 163 struct event_base *base = event_base_new(); 164 165 if (base != NULL) 166 current_base = base; 167 168 return (base); 169 } 170 171 struct event_base * 172 event_base_new(void) 173 { 174 int i; 175 struct event_base *base; 176 177 if ((base = calloc(1, sizeof(struct event_base))) == NULL) 178 event_err(1, "%s: calloc", __func__); 179 180 detect_monotonic(); 181 gettime(base, &base->event_tv); 182 183 min_heap_ctor(&base->timeheap); 184 TAILQ_INIT(&base->eventqueue); 185 base->sig.ev_signal_pair[0] = -1; 186 base->sig.ev_signal_pair[1] = -1; 187 188 base->evbase = NULL; 189 for (i = 0; eventops[i] && !base->evbase; i++) { 190 base->evsel = eventops[i]; 191 192 base->evbase = base->evsel->init(base); 193 } 194 195 if (base->evbase == NULL) 196 event_errx(1, "%s: no event mechanism available", __func__); 197 198 if (evutil_getenv("EVENT_SHOW_METHOD")) 199 event_msgx("libevent using: %s\n", 200 base->evsel->name); 201 202 /* allocate a single active event queue */ 203 event_base_priority_init(base, 1); 204 205 return (base); 206 } 207 208 void 209 event_base_free(struct event_base *base) 210 { 211 int i, n_deleted=0; 212 struct event *ev; 213 214 if (base == NULL && current_base) 215 base = current_base; 216 if (base == current_base) 217 current_base = NULL; 218 219 /* XXX(niels) - check for internal events first */ 220 assert(base); 221 /* Delete all non-internal events. */ 222 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) { 223 struct event *next = TAILQ_NEXT(ev, ev_next); 224 if (!(ev->ev_flags & EVLIST_INTERNAL)) { 225 event_del(ev); 226 ++n_deleted; 227 } 228 ev = next; 229 } 230 while ((ev = min_heap_top(&base->timeheap)) != NULL) { 231 event_del(ev); 232 ++n_deleted; 233 } 234 235 for (i = 0; i < base->nactivequeues; ++i) { 236 for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) { 237 struct event *next = TAILQ_NEXT(ev, ev_active_next); 238 if (!(ev->ev_flags & EVLIST_INTERNAL)) { 239 event_del(ev); 240 ++n_deleted; 241 } 242 ev = next; 243 } 244 } 245 246 if (n_deleted) 247 event_debug(("%s: %d events were still set in base", 248 __func__, n_deleted)); 249 250 if (base->evsel->dealloc != NULL) 251 base->evsel->dealloc(base, base->evbase); 252 253 for (i = 0; i < base->nactivequeues; ++i) 254 assert(TAILQ_EMPTY(base->activequeues[i])); 255 256 assert(min_heap_empty(&base->timeheap)); 257 min_heap_dtor(&base->timeheap); 258 259 for (i = 0; i < base->nactivequeues; ++i) 260 free(base->activequeues[i]); 261 free(base->activequeues); 262 263 assert(TAILQ_EMPTY(&base->eventqueue)); 264 265 free(base); 266 } 267 268 /* reinitialized the event base after a fork */ 269 int 270 event_reinit(struct event_base *base) 271 { 272 const struct eventop *evsel = base->evsel; 273 void *evbase = base->evbase; 274 int res = 0; 275 struct event *ev; 276 277 /* check if this event mechanism requires reinit */ 278 if (!evsel->need_reinit) 279 return (0); 280 281 /* prevent internal delete */ 282 if (base->sig.ev_signal_added) { 283 /* we cannot call event_del here because the base has 284 * not been reinitialized yet. */ 285 event_queue_remove(base, &base->sig.ev_signal, 286 EVLIST_INSERTED); 287 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) 288 event_queue_remove(base, &base->sig.ev_signal, 289 EVLIST_ACTIVE); 290 base->sig.ev_signal_added = 0; 291 } 292 293 if (base->evsel->dealloc != NULL) 294 base->evsel->dealloc(base, base->evbase); 295 evbase = base->evbase = evsel->init(base); 296 if (base->evbase == NULL) 297 event_errx(1, "%s: could not reinitialize event mechanism", 298 __func__); 299 300 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { 301 if (evsel->add(evbase, ev) == -1) 302 res = -1; 303 } 304 305 return (res); 306 } 307 308 int 309 event_priority_init(int npriorities) 310 { 311 return event_base_priority_init(current_base, npriorities); 312 } 313 314 int 315 event_base_priority_init(struct event_base *base, int npriorities) 316 { 317 int i; 318 319 if (base->event_count_active) 320 return (-1); 321 322 if (base->nactivequeues && npriorities != base->nactivequeues) { 323 for (i = 0; i < base->nactivequeues; ++i) { 324 free(base->activequeues[i]); 325 } 326 free(base->activequeues); 327 } 328 329 /* Allocate our priority queues */ 330 base->nactivequeues = npriorities; 331 base->activequeues = (struct event_list **) 332 calloc(base->nactivequeues, sizeof(struct event_list *)); 333 if (base->activequeues == NULL) 334 event_err(1, "%s: calloc", __func__); 335 336 for (i = 0; i < base->nactivequeues; ++i) { 337 base->activequeues[i] = malloc(sizeof(struct event_list)); 338 if (base->activequeues[i] == NULL) 339 event_err(1, "%s: malloc", __func__); 340 TAILQ_INIT(base->activequeues[i]); 341 } 342 343 return (0); 344 } 345 346 int 347 event_haveevents(struct event_base *base) 348 { 349 return (base->event_count > 0); 350 } 351 352 /* 353 * Active events are stored in priority queues. Lower priorities are always 354 * process before higher priorities. Low priority events can starve high 355 * priority ones. 356 */ 357 358 static void 359 event_process_active(struct event_base *base) 360 { 361 struct event *ev; 362 struct event_list *activeq = NULL; 363 int i; 364 short ncalls; 365 366 for (i = 0; i < base->nactivequeues; ++i) { 367 if (TAILQ_FIRST(base->activequeues[i]) != NULL) { 368 activeq = base->activequeues[i]; 369 break; 370 } 371 } 372 373 assert(activeq != NULL); 374 375 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { 376 if (ev->ev_events & EV_PERSIST) 377 event_queue_remove(base, ev, EVLIST_ACTIVE); 378 else 379 event_del(ev); 380 381 /* Allows deletes to work */ 382 ncalls = ev->ev_ncalls; 383 ev->ev_pncalls = &ncalls; 384 while (ncalls) { 385 ncalls--; 386 ev->ev_ncalls = ncalls; 387 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); 388 if (base->event_break) 389 return; 390 } 391 } 392 } 393 394 /* 395 * Wait continously for events. We exit only if no events are left. 396 */ 397 398 int 399 event_dispatch(void) 400 { 401 return (event_loop(0)); 402 } 403 404 int 405 event_base_dispatch(struct event_base *event_base) 406 { 407 return (event_base_loop(event_base, 0)); 408 } 409 410 const char * 411 event_base_get_method(struct event_base *base) 412 { 413 assert(base); 414 return (base->evsel->name); 415 } 416 417 static void 418 event_loopexit_cb(int fd, short what, void *arg) 419 { 420 struct event_base *base = arg; 421 base->event_gotterm = 1; 422 } 423 424 /* not thread safe */ 425 int 426 event_loopexit(const struct timeval *tv) 427 { 428 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, 429 current_base, tv)); 430 } 431 432 int 433 event_base_loopexit(struct event_base *event_base, const struct timeval *tv) 434 { 435 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, 436 event_base, tv)); 437 } 438 439 /* not thread safe */ 440 int 441 event_loopbreak(void) 442 { 443 return (event_base_loopbreak(current_base)); 444 } 445 446 int 447 event_base_loopbreak(struct event_base *event_base) 448 { 449 if (event_base == NULL) 450 return (-1); 451 452 event_base->event_break = 1; 453 return (0); 454 } 455 456 457 458 /* not thread safe */ 459 460 int 461 event_loop(int flags) 462 { 463 return event_base_loop(current_base, flags); 464 } 465 466 int 467 event_base_loop(struct event_base *base, int flags) 468 { 469 const struct eventop *evsel = base->evsel; 470 void *evbase = base->evbase; 471 struct timeval tv; 472 struct timeval *tv_p; 473 int res, done; 474 475 /* clear time cache */ 476 base->tv_cache.tv_sec = 0; 477 478 if (base->sig.ev_signal_added) 479 evsignal_base = base; 480 done = 0; 481 while (!done) { 482 /* Terminate the loop if we have been asked to */ 483 if (base->event_gotterm) { 484 base->event_gotterm = 0; 485 break; 486 } 487 488 if (base->event_break) { 489 base->event_break = 0; 490 break; 491 } 492 493 timeout_correct(base, &tv); 494 495 tv_p = &tv; 496 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { 497 timeout_next(base, &tv_p); 498 } else { 499 /* 500 * if we have active events, we just poll new events 501 * without waiting. 502 */ 503 evutil_timerclear(&tv); 504 } 505 506 /* If we have no events, we just exit */ 507 if (!event_haveevents(base)) { 508 event_debug(("%s: no events registered.", __func__)); 509 return (1); 510 } 511 512 /* update last old time */ 513 gettime(base, &base->event_tv); 514 515 /* clear time cache */ 516 base->tv_cache.tv_sec = 0; 517 518 res = evsel->dispatch(base, evbase, tv_p); 519 520 if (res == -1) 521 return (-1); 522 gettime(base, &base->tv_cache); 523 524 timeout_process(base); 525 526 if (base->event_count_active) { 527 event_process_active(base); 528 if (!base->event_count_active && (flags & EVLOOP_ONCE)) 529 done = 1; 530 } else if (flags & EVLOOP_NONBLOCK) 531 done = 1; 532 } 533 534 /* clear time cache */ 535 base->tv_cache.tv_sec = 0; 536 537 event_debug(("%s: asked to terminate loop.", __func__)); 538 return (0); 539 } 540 541 /* Sets up an event for processing once */ 542 543 struct event_once { 544 struct event ev; 545 546 void (*cb)(int, short, void *); 547 void *arg; 548 }; 549 550 /* One-time callback, it deletes itself */ 551 552 static void 553 event_once_cb(int fd, short events, void *arg) 554 { 555 struct event_once *eonce = arg; 556 557 (*eonce->cb)(fd, events, eonce->arg); 558 free(eonce); 559 } 560 561 /* not threadsafe, event scheduled once. */ 562 int 563 event_once(int fd, short events, 564 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) 565 { 566 return event_base_once(current_base, fd, events, callback, arg, tv); 567 } 568 569 /* Schedules an event once */ 570 int 571 event_base_once(struct event_base *base, int fd, short events, 572 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) 573 { 574 struct event_once *eonce; 575 struct timeval etv; 576 int res; 577 578 /* We cannot support signals that just fire once */ 579 if (events & EV_SIGNAL) 580 return (-1); 581 582 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) 583 return (-1); 584 585 eonce->cb = callback; 586 eonce->arg = arg; 587 588 if (events == EV_TIMEOUT) { 589 if (tv == NULL) { 590 evutil_timerclear(&etv); 591 tv = &etv; 592 } 593 594 evtimer_set(&eonce->ev, event_once_cb, eonce); 595 } else if (events & (EV_READ|EV_WRITE)) { 596 events &= EV_READ|EV_WRITE; 597 598 event_set(&eonce->ev, fd, events, event_once_cb, eonce); 599 } else { 600 /* Bad event combination */ 601 free(eonce); 602 return (-1); 603 } 604 605 res = event_base_set(base, &eonce->ev); 606 if (res == 0) 607 res = event_add(&eonce->ev, tv); 608 if (res != 0) { 609 free(eonce); 610 return (res); 611 } 612 613 return (0); 614 } 615 616 void 617 event_set(struct event *ev, int fd, short events, 618 void (*callback)(int, short, void *), void *arg) 619 { 620 /* Take the current base - caller needs to set the real base later */ 621 ev->ev_base = current_base; 622 623 ev->ev_callback = callback; 624 ev->ev_arg = arg; 625 ev->ev_fd = fd; 626 ev->ev_events = events; 627 ev->ev_res = 0; 628 ev->ev_flags = EVLIST_INIT; 629 ev->ev_ncalls = 0; 630 ev->ev_pncalls = NULL; 631 632 min_heap_elem_init(ev); 633 634 /* by default, we put new events into the middle priority */ 635 if(current_base) 636 ev->ev_pri = current_base->nactivequeues/2; 637 } 638 639 int 640 event_base_set(struct event_base *base, struct event *ev) 641 { 642 /* Only innocent events may be assigned to a different base */ 643 if (ev->ev_flags != EVLIST_INIT) 644 return (-1); 645 646 ev->ev_base = base; 647 ev->ev_pri = base->nactivequeues/2; 648 649 return (0); 650 } 651 652 /* 653 * Set's the priority of an event - if an event is already scheduled 654 * changing the priority is going to fail. 655 */ 656 657 int 658 event_priority_set(struct event *ev, int pri) 659 { 660 if (ev->ev_flags & EVLIST_ACTIVE) 661 return (-1); 662 if (pri < 0 || pri >= ev->ev_base->nactivequeues) 663 return (-1); 664 665 ev->ev_pri = pri; 666 667 return (0); 668 } 669 670 /* 671 * Checks if a specific event is pending or scheduled. 672 */ 673 674 int 675 event_pending(struct event *ev, short event, struct timeval *tv) 676 { 677 struct timeval now, res; 678 int flags = 0; 679 680 if (ev->ev_flags & EVLIST_INSERTED) 681 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); 682 if (ev->ev_flags & EVLIST_ACTIVE) 683 flags |= ev->ev_res; 684 if (ev->ev_flags & EVLIST_TIMEOUT) 685 flags |= EV_TIMEOUT; 686 687 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); 688 689 /* See if there is a timeout that we should report */ 690 if (tv != NULL && (flags & event & EV_TIMEOUT)) { 691 gettime(ev->ev_base, &now); 692 evutil_timersub(&ev->ev_timeout, &now, &res); 693 /* correctly remap to real time */ 694 evutil_gettimeofday(&now, NULL); 695 evutil_timeradd(&now, &res, tv); 696 } 697 698 return (flags & event); 699 } 700 701 int 702 event_add(struct event *ev, const struct timeval *tv) 703 { 704 struct event_base *base = ev->ev_base; 705 const struct eventop *evsel = base->evsel; 706 void *evbase = base->evbase; 707 int res = 0; 708 709 event_debug(( 710 "event_add: event: %p, %s%s%scall %p", 711 ev, 712 ev->ev_events & EV_READ ? "EV_READ " : " ", 713 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", 714 tv ? "EV_TIMEOUT " : " ", 715 ev->ev_callback)); 716 717 assert(!(ev->ev_flags & ~EVLIST_ALL)); 718 719 /* 720 * prepare for timeout insertion further below, if we get a 721 * failure on any step, we should not change any state. 722 */ 723 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { 724 if (min_heap_reserve(&base->timeheap, 725 1 + min_heap_size(&base->timeheap)) == -1) 726 return (-1); /* ENOMEM == errno */ 727 } 728 729 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && 730 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { 731 res = evsel->add(evbase, ev); 732 if (res != -1) 733 event_queue_insert(base, ev, EVLIST_INSERTED); 734 } 735 736 /* 737 * we should change the timout state only if the previous event 738 * addition succeeded. 739 */ 740 if (res != -1 && tv != NULL) { 741 struct timeval now; 742 743 /* 744 * we already reserved memory above for the case where we 745 * are not replacing an exisiting timeout. 746 */ 747 if (ev->ev_flags & EVLIST_TIMEOUT) 748 event_queue_remove(base, ev, EVLIST_TIMEOUT); 749 750 /* Check if it is active due to a timeout. Rescheduling 751 * this timeout before the callback can be executed 752 * removes it from the active list. */ 753 if ((ev->ev_flags & EVLIST_ACTIVE) && 754 (ev->ev_res & EV_TIMEOUT)) { 755 /* See if we are just active executing this 756 * event in a loop 757 */ 758 if (ev->ev_ncalls && ev->ev_pncalls) { 759 /* Abort loop */ 760 *ev->ev_pncalls = 0; 761 } 762 763 event_queue_remove(base, ev, EVLIST_ACTIVE); 764 } 765 766 gettime(base, &now); 767 evutil_timeradd(&now, tv, &ev->ev_timeout); 768 769 event_debug(( 770 "event_add: timeout in %ld seconds, call %p", 771 tv->tv_sec, ev->ev_callback)); 772 773 event_queue_insert(base, ev, EVLIST_TIMEOUT); 774 } 775 776 return (res); 777 } 778 779 int 780 event_del(struct event *ev) 781 { 782 struct event_base *base; 783 const struct eventop *evsel; 784 void *evbase; 785 786 event_debug(("event_del: %p, callback %p", 787 ev, ev->ev_callback)); 788 789 /* An event without a base has not been added */ 790 if (ev->ev_base == NULL) 791 return (-1); 792 793 base = ev->ev_base; 794 evsel = base->evsel; 795 evbase = base->evbase; 796 797 assert(!(ev->ev_flags & ~EVLIST_ALL)); 798 799 /* See if we are just active executing this event in a loop */ 800 if (ev->ev_ncalls && ev->ev_pncalls) { 801 /* Abort loop */ 802 *ev->ev_pncalls = 0; 803 } 804 805 if (ev->ev_flags & EVLIST_TIMEOUT) 806 event_queue_remove(base, ev, EVLIST_TIMEOUT); 807 808 if (ev->ev_flags & EVLIST_ACTIVE) 809 event_queue_remove(base, ev, EVLIST_ACTIVE); 810 811 if (ev->ev_flags & EVLIST_INSERTED) { 812 event_queue_remove(base, ev, EVLIST_INSERTED); 813 return (evsel->del(evbase, ev)); 814 } 815 816 return (0); 817 } 818 819 void 820 event_active(struct event *ev, int res, short ncalls) 821 { 822 /* We get different kinds of events, add them together */ 823 if (ev->ev_flags & EVLIST_ACTIVE) { 824 ev->ev_res |= res; 825 return; 826 } 827 828 ev->ev_res = res; 829 ev->ev_ncalls = ncalls; 830 ev->ev_pncalls = NULL; 831 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); 832 } 833 834 static int 835 timeout_next(struct event_base *base, struct timeval **tv_p) 836 { 837 struct timeval now; 838 struct event *ev; 839 struct timeval *tv = *tv_p; 840 841 if ((ev = min_heap_top(&base->timeheap)) == NULL) { 842 /* if no time-based events are active wait for I/O */ 843 *tv_p = NULL; 844 return (0); 845 } 846 847 if (gettime(base, &now) == -1) 848 return (-1); 849 850 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { 851 evutil_timerclear(tv); 852 return (0); 853 } 854 855 evutil_timersub(&ev->ev_timeout, &now, tv); 856 857 assert(tv->tv_sec >= 0); 858 assert(tv->tv_usec >= 0); 859 860 event_debug(("timeout_next: in %ld seconds", tv->tv_sec)); 861 return (0); 862 } 863 864 /* 865 * Determines if the time is running backwards by comparing the current 866 * time against the last time we checked. Not needed when using clock 867 * monotonic. 868 */ 869 870 static void 871 timeout_correct(struct event_base *base, struct timeval *tv) 872 { 873 struct event **pev; 874 unsigned int size; 875 struct timeval off; 876 877 if (use_monotonic) 878 return; 879 880 /* Check if time is running backwards */ 881 gettime(base, tv); 882 if (evutil_timercmp(tv, &base->event_tv, >=)) { 883 base->event_tv = *tv; 884 return; 885 } 886 887 event_debug(("%s: time is running backwards, corrected", 888 __func__)); 889 evutil_timersub(&base->event_tv, tv, &off); 890 891 /* 892 * We can modify the key element of the node without destroying 893 * the key, beause we apply it to all in the right order. 894 */ 895 pev = base->timeheap.p; 896 size = base->timeheap.n; 897 for (; size-- > 0; ++pev) { 898 struct timeval *ev_tv = &(**pev).ev_timeout; 899 evutil_timersub(ev_tv, &off, ev_tv); 900 } 901 /* Now remember what the new time turned out to be. */ 902 base->event_tv = *tv; 903 } 904 905 void 906 timeout_process(struct event_base *base) 907 { 908 struct timeval now; 909 struct event *ev; 910 911 if (min_heap_empty(&base->timeheap)) 912 return; 913 914 gettime(base, &now); 915 916 while ((ev = min_heap_top(&base->timeheap))) { 917 if (evutil_timercmp(&ev->ev_timeout, &now, >)) 918 break; 919 920 /* delete this event from the I/O queues */ 921 event_del(ev); 922 923 event_debug(("timeout_process: call %p", 924 ev->ev_callback)); 925 event_active(ev, EV_TIMEOUT, 1); 926 } 927 } 928 929 void 930 event_queue_remove(struct event_base *base, struct event *ev, int queue) 931 { 932 if (!(ev->ev_flags & queue)) 933 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, 934 ev, ev->ev_fd, queue); 935 936 if (~ev->ev_flags & EVLIST_INTERNAL) 937 base->event_count--; 938 939 ev->ev_flags &= ~queue; 940 switch (queue) { 941 case EVLIST_INSERTED: 942 TAILQ_REMOVE(&base->eventqueue, ev, ev_next); 943 break; 944 case EVLIST_ACTIVE: 945 base->event_count_active--; 946 TAILQ_REMOVE(base->activequeues[ev->ev_pri], 947 ev, ev_active_next); 948 break; 949 case EVLIST_TIMEOUT: 950 min_heap_erase(&base->timeheap, ev); 951 break; 952 default: 953 event_errx(1, "%s: unknown queue %x", __func__, queue); 954 } 955 } 956 957 void 958 event_queue_insert(struct event_base *base, struct event *ev, int queue) 959 { 960 if (ev->ev_flags & queue) { 961 /* Double insertion is possible for active events */ 962 if (queue & EVLIST_ACTIVE) 963 return; 964 965 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, 966 ev, ev->ev_fd, queue); 967 } 968 969 if (~ev->ev_flags & EVLIST_INTERNAL) 970 base->event_count++; 971 972 ev->ev_flags |= queue; 973 switch (queue) { 974 case EVLIST_INSERTED: 975 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); 976 break; 977 case EVLIST_ACTIVE: 978 base->event_count_active++; 979 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], 980 ev,ev_active_next); 981 break; 982 case EVLIST_TIMEOUT: { 983 min_heap_push(&base->timeheap, ev); 984 break; 985 } 986 default: 987 event_errx(1, "%s: unknown queue %x", __func__, queue); 988 } 989 } 990 991 /* Functions for debugging */ 992 993 const char * 994 event_get_version(void) 995 { 996 return ("1.4.13-stable"); 997 } 998 999 /* 1000 * No thread-safe interface needed - the information should be the same 1001 * for all threads. 1002 */ 1003 1004 const char * 1005 event_get_method(void) 1006 { 1007 return (current_base->evsel->name); 1008 } 1009