1 /* 2 * netio.c -- network I/O support. 3 * 4 * Copyright (c) 2001-2006, NLnet Labs. All rights reserved. 5 * 6 * See LICENSE for the license. 7 * 8 */ 9 #include "config.h" 10 11 #include <assert.h> 12 #include <errno.h> 13 #include <sys/time.h> 14 #include <string.h> 15 #include <stdlib.h> 16 #include <poll.h> 17 18 #include "netio.h" 19 #include "util.h" 20 21 #define MAX_NETIO_FDS 1024 22 23 netio_type * 24 netio_create(region_type *region) 25 { 26 netio_type *result; 27 28 assert(region); 29 30 result = (netio_type *) region_alloc(region, sizeof(netio_type)); 31 result->region = region; 32 result->handlers = NULL; 33 result->deallocated = NULL; 34 result->dispatch_next = NULL; 35 return result; 36 } 37 38 void 39 netio_add_handler(netio_type *netio, netio_handler_type *handler) 40 { 41 netio_handler_list_type *elt; 42 43 assert(netio); 44 assert(handler); 45 46 if (netio->deallocated) { 47 /* 48 * If we have deallocated handler list elements, reuse 49 * the first one. 50 */ 51 elt = netio->deallocated; 52 netio->deallocated = elt->next; 53 } else { 54 /* 55 * Allocate a new one. 56 */ 57 elt = (netio_handler_list_type *) region_alloc( 58 netio->region, sizeof(netio_handler_list_type)); 59 } 60 61 elt->next = netio->handlers; 62 elt->handler = handler; 63 elt->handler->pfd = -1; 64 netio->handlers = elt; 65 } 66 67 void 68 netio_remove_handler(netio_type *netio, netio_handler_type *handler) 69 { 70 netio_handler_list_type **elt_ptr; 71 72 assert(netio); 73 assert(handler); 74 75 for (elt_ptr = &netio->handlers; *elt_ptr; elt_ptr = &(*elt_ptr)->next) { 76 if ((*elt_ptr)->handler == handler) { 77 netio_handler_list_type *next = (*elt_ptr)->next; 78 if ((*elt_ptr) == netio->dispatch_next) 79 netio->dispatch_next = next; 80 (*elt_ptr)->handler = NULL; 81 (*elt_ptr)->next = netio->deallocated; 82 netio->deallocated = *elt_ptr; 83 *elt_ptr = next; 84 break; 85 } 86 } 87 } 88 89 const struct timespec * 90 netio_current_time(netio_type *netio) 91 { 92 assert(netio); 93 94 if (!netio->have_current_time) { 95 struct timeval current_timeval; 96 if (gettimeofday(¤t_timeval, NULL) == -1) { 97 log_msg(LOG_ERR, "gettimeofday: %s, aborting.", strerror(errno)); 98 abort(); 99 } 100 timeval_to_timespec(&netio->cached_current_time, ¤t_timeval); 101 netio->have_current_time = 1; 102 } 103 104 return &netio->cached_current_time; 105 } 106 107 int 108 netio_dispatch(netio_type *netio, const struct timespec *timeout, const sigset_t *sigmask) 109 { 110 /* static arrays to avoid allocation */ 111 static struct pollfd fds[MAX_NETIO_FDS]; 112 int numfd; 113 int have_timeout = 0; 114 struct timespec minimum_timeout; 115 netio_handler_type *timeout_handler = NULL; 116 netio_handler_list_type *elt; 117 int rc; 118 int result = 0; 119 #ifndef HAVE_PPOLL 120 sigset_t origmask; 121 #endif 122 123 assert(netio); 124 125 /* 126 * Clear the cached current time. 127 */ 128 netio->have_current_time = 0; 129 130 /* 131 * Initialize the minimum timeout with the timeout parameter. 132 */ 133 if (timeout) { 134 have_timeout = 1; 135 memcpy(&minimum_timeout, timeout, sizeof(struct timespec)); 136 } 137 138 /* 139 * Initialize the fd_sets and timeout based on the handler 140 * information. 141 */ 142 numfd = 0; 143 144 for (elt = netio->handlers; elt; elt = elt->next) { 145 netio_handler_type *handler = elt->handler; 146 if (handler->fd != -1 && numfd < MAX_NETIO_FDS) { 147 fds[numfd].fd = handler->fd; 148 fds[numfd].events = 0; 149 fds[numfd].revents = 0; 150 handler->pfd = numfd; 151 if (handler->event_types & NETIO_EVENT_READ) { 152 fds[numfd].events |= POLLIN; 153 } 154 if (handler->event_types & NETIO_EVENT_WRITE) { 155 fds[numfd].events |= POLLOUT; 156 } 157 numfd++; 158 } else { 159 handler->pfd = -1; 160 } 161 if (handler->timeout && (handler->event_types & NETIO_EVENT_TIMEOUT)) { 162 struct timespec relative; 163 164 relative.tv_sec = handler->timeout->tv_sec; 165 relative.tv_nsec = handler->timeout->tv_nsec; 166 timespec_subtract(&relative, netio_current_time(netio)); 167 168 if (!have_timeout || 169 timespec_compare(&relative, &minimum_timeout) < 0) 170 { 171 have_timeout = 1; 172 minimum_timeout.tv_sec = relative.tv_sec; 173 minimum_timeout.tv_nsec = relative.tv_nsec; 174 timeout_handler = handler; 175 } 176 } 177 } 178 179 if (have_timeout && minimum_timeout.tv_sec < 0) { 180 /* 181 * On negative timeout for a handler, immediately 182 * dispatch the timeout event without checking for 183 * other events. 184 */ 185 if (timeout_handler && (timeout_handler->event_types & NETIO_EVENT_TIMEOUT)) { 186 timeout_handler->event_handler(netio, timeout_handler, NETIO_EVENT_TIMEOUT); 187 } 188 return result; 189 } 190 191 /* Check for events. */ 192 #ifdef HAVE_PPOLL 193 rc = ppoll(fds, numfd, (have_timeout?&minimum_timeout:NULL), sigmask); 194 #else 195 sigprocmask(SIG_SETMASK, sigmask, &origmask); 196 rc = poll(fds, numfd, (have_timeout?minimum_timeout.tv_sec*1000+ 197 minimum_timeout.tv_nsec/1000000:-1)); 198 sigprocmask(SIG_SETMASK, &origmask, NULL); 199 #endif /* HAVE_PPOLL */ 200 if (rc == -1) { 201 if(errno == EINVAL || errno == EACCES || errno == EBADF) { 202 log_msg(LOG_ERR, "fatal error poll: %s.", 203 strerror(errno)); 204 exit(1); 205 } 206 return -1; 207 } 208 209 /* 210 * Clear the cached current_time (pselect(2) may block for 211 * some time so the cached value is likely to be old). 212 */ 213 netio->have_current_time = 0; 214 215 if (rc == 0) { 216 /* 217 * No events before the minimum timeout expired. 218 * Dispatch to handler if interested. 219 */ 220 if (timeout_handler && (timeout_handler->event_types & NETIO_EVENT_TIMEOUT)) { 221 timeout_handler->event_handler(netio, timeout_handler, NETIO_EVENT_TIMEOUT); 222 } 223 } else { 224 /* 225 * Dispatch all the events to interested handlers 226 * based on the fd_sets. Note that a handler might 227 * deinstall itself, so store the next handler before 228 * calling the current handler! 229 */ 230 assert(netio->dispatch_next == NULL); 231 232 for (elt = netio->handlers; elt && rc; ) { 233 netio_handler_type *handler = elt->handler; 234 netio->dispatch_next = elt->next; 235 if (handler->fd != -1 && handler->pfd != -1) { 236 netio_event_types_type event_types 237 = NETIO_EVENT_NONE; 238 if ((fds[handler->pfd].revents & POLLIN)) { 239 event_types |= NETIO_EVENT_READ; 240 } 241 if ((fds[handler->pfd].revents & POLLOUT)) { 242 event_types |= NETIO_EVENT_WRITE; 243 } 244 if ((fds[handler->pfd].revents & 245 (POLLNVAL|POLLHUP|POLLERR))) { 246 /* closed/error: give a read event, 247 * or otherwise, a write event */ 248 if((handler->event_types&NETIO_EVENT_READ)) 249 event_types |= NETIO_EVENT_READ; 250 else if((handler->event_types&NETIO_EVENT_WRITE)) 251 event_types |= NETIO_EVENT_WRITE; 252 } 253 254 if (event_types & handler->event_types) { 255 handler->event_handler(netio, handler, event_types & handler->event_types); 256 ++result; 257 } 258 } 259 elt = netio->dispatch_next; 260 } 261 netio->dispatch_next = NULL; 262 } 263 264 return result; 265 } 266