1 /***************************************************************************
2 * engine_poll.c -- poll(2) based IO engine. *
3 * *
4 ***********************IMPORTANT NSOCK LICENSE TERMS***********************
5 * *
6 * The nsock parallel socket event library is (C) 1999-2017 Insecure.Com *
7 * LLC This library is free software; you may redistribute and/or *
8 * modify it under the terms of the GNU General Public License as *
9 * published by the Free Software Foundation; Version 2. This guarantees *
10 * your right to use, modify, and redistribute this software under certain *
11 * conditions. If this license is unacceptable to you, Insecure.Com LLC *
12 * may be willing to sell alternative licenses (contact *
13 * sales@insecure.com ). *
14 * *
15 * As a special exception to the GPL terms, Insecure.Com LLC grants *
16 * permission to link the code of this program with any version of the *
17 * OpenSSL library which is distributed under a license identical to that *
18 * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
19 * linked combinations including the two. You must obey the GNU GPL in all *
20 * respects for all of the code used other than OpenSSL. If you modify *
21 * this file, you may extend this exception to your version of the file, *
22 * but you are not obligated to do so. *
23 * *
24 * If you received these files with a written license agreement stating *
25 * terms other than the (GPL) terms above, then that alternative license *
26 * agreement takes precedence over this comment. *
27 * *
28 * Source is provided to this software because we believe users have a *
29 * right to know exactly what a program is going to do before they run it. *
30 * This also allows you to audit the software for security holes. *
31 * *
32 * Source code also allows you to port Nmap to new platforms, fix bugs, *
33 * and add new features. You are highly encouraged to send your changes *
34 * to the dev@nmap.org mailing list for possible incorporation into the *
35 * main distribution. By sending these changes to Fyodor or one of the *
36 * Insecure.Org development mailing lists, or checking them into the Nmap *
37 * source code repository, it is understood (unless you specify otherwise) *
38 * that you are offering the Nmap Project (Insecure.Com LLC) the *
39 * unlimited, non-exclusive right to reuse, modify, and relicense the *
40 * code. Nmap will always be available Open Source, but this is important *
41 * because the inability to relicense code has caused devastating problems *
42 * for other Free Software projects (such as KDE and NASM). We also *
43 * occasionally relicense the code to third parties as discussed above. *
44 * If you wish to specify special license conditions of your *
45 * contributions, just say so when you send them. *
46 * *
47 * This program is distributed in the hope that it will be useful, but *
48 * WITHOUT ANY WARRANTY; without even the implied warranty of *
49 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
50 * General Public License v2.0 for more details *
51 * (http://www.gnu.org/licenses/gpl-2.0.html). *
52 * *
53 ***************************************************************************/
54
55 /* $Id$ */
56
57 #ifndef WIN32
58 /* Allow the use of POLLRDHUP, if available. */
59 #define _GNU_SOURCE
60 #endif
61
62 #ifdef HAVE_CONFIG_H
63 #include "nsock_config.h"
64 #elif WIN32
65 #include "nsock_winconfig.h"
66 #endif
67
68 #if HAVE_POLL
69
70 #include <errno.h>
71
72 #ifndef WIN32
73 #include <poll.h>
74 #else
75 #include <Winsock2.h>
76 #endif /* ^WIN32 */
77
78 #include "nsock_internal.h"
79 #include "nsock_log.h"
80
81 #if HAVE_PCAP
82 #include "nsock_pcap.h"
83 #endif
84
85 #define EV_LIST_INIT_SIZE 1024
86
87 #ifdef WIN32
88 #define Poll WSAPoll
89 #define POLLFD WSAPOLLFD
90 #else
91 #define Poll poll
92 #define POLLFD struct pollfd
93 #endif
94
95 #ifdef WIN32
96 #define POLL_R_FLAGS (POLLIN)
97 #else
98 #define POLL_R_FLAGS (POLLIN | POLLPRI)
99 #endif /* WIN32 */
100
101 #define POLL_W_FLAGS POLLOUT
102 #ifdef POLLRDHUP
103 #define POLL_X_FLAGS (POLLERR | POLLHUP | POLLRDHUP)
104 #else
105 /* POLLRDHUP was introduced later and might be unavailable on older systems. */
106 #define POLL_X_FLAGS (POLLERR | POLLHUP)
107 #endif /* POLLRDHUP */
108
109 extern struct io_operations posix_io_operations;
110
111 /* --- ENGINE INTERFACE PROTOTYPES --- */
112 static int poll_init(struct npool *nsp);
113 static void poll_destroy(struct npool *nsp);
114 static int poll_iod_register(struct npool *nsp, struct niod *iod, struct nevent *nse, int ev);
115 static int poll_iod_unregister(struct npool *nsp, struct niod *iod);
116 static int poll_iod_modify(struct npool *nsp, struct niod *iod, struct nevent *nse, int ev_set, int ev_clr);
117 static int poll_loop(struct npool *nsp, int msec_timeout);
118
119
120 /* ---- ENGINE DEFINITION ---- */
121 struct io_engine engine_poll = {
122 "poll",
123 poll_init,
124 poll_destroy,
125 poll_iod_register,
126 poll_iod_unregister,
127 poll_iod_modify,
128 poll_loop,
129 &posix_io_operations
130 };
131
132
133 /* --- INTERNAL PROTOTYPES --- */
134 static void iterate_through_event_lists(struct npool *nsp);
135
136 /* defined in nsock_core.c */
137 void process_iod_events(struct npool *nsp, struct niod *nsi, int ev);
138 void process_event(struct npool *nsp, gh_list_t *evlist, struct nevent *nse, int ev);
139 void process_expired_events(struct npool *nsp);
140 #if HAVE_PCAP
141 #ifndef PCAP_CAN_DO_SELECT
142 int pcap_read_on_nonselect(struct npool *nsp);
143 #endif
144 #endif
145
146 /* defined in nsock_event.c */
147 void update_first_events(struct nevent *nse);
148
149
150 extern struct timeval nsock_tod;
151
152
153 /*
154 * Engine specific data structure
155 */
156 struct poll_engine_info {
157 int capacity;
158 int max_fd;
159 /* index of the highest poll event */
160 POLLFD *events;
161 };
162
163
164
lower_max_fd(struct poll_engine_info * pinfo)165 static inline int lower_max_fd(struct poll_engine_info *pinfo) {
166 do {
167 pinfo->max_fd--;
168 } while (pinfo->max_fd >= 0 && pinfo->events[pinfo->max_fd].fd == -1);
169
170 return pinfo->max_fd;
171 }
172
evlist_grow(struct poll_engine_info * pinfo)173 static inline int evlist_grow(struct poll_engine_info *pinfo) {
174 int i;
175
176 i = pinfo->capacity;
177
178 if (pinfo->capacity == 0) {
179 pinfo->capacity = EV_LIST_INIT_SIZE;
180 pinfo->events = (POLLFD *)safe_malloc(sizeof(POLLFD) * pinfo->capacity);
181 } else {
182 pinfo->capacity *= 2;
183 pinfo->events = (POLLFD *)safe_realloc(pinfo->events, sizeof(POLLFD) * pinfo->capacity);
184 }
185
186 while (i < pinfo->capacity) {
187 pinfo->events[i].fd = -1;
188 pinfo->events[i].events = 0;
189 pinfo->events[i].revents = 0;
190 i++;
191 }
192 return pinfo->capacity;
193 }
194
195
poll_init(struct npool * nsp)196 int poll_init(struct npool *nsp) {
197 struct poll_engine_info *pinfo;
198
199 pinfo = (struct poll_engine_info *)safe_malloc(sizeof(struct poll_engine_info));
200 pinfo->capacity = 0;
201 pinfo->max_fd = -1;
202 evlist_grow(pinfo);
203
204 nsp->engine_data = (void *)pinfo;
205
206 return 1;
207 }
208
poll_destroy(struct npool * nsp)209 void poll_destroy(struct npool *nsp) {
210 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
211
212 assert(pinfo != NULL);
213 free(pinfo->events);
214 free(pinfo);
215 }
216
poll_iod_register(struct npool * nsp,struct niod * iod,struct nevent * nse,int ev)217 int poll_iod_register(struct npool *nsp, struct niod *iod, struct nevent *nse, int ev) {
218 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
219 int sd;
220
221 assert(!IOD_PROPGET(iod, IOD_REGISTERED));
222
223 iod->watched_events = ev;
224
225 sd = nsock_iod_get_sd(iod);
226 while (pinfo->capacity < sd + 1)
227 evlist_grow(pinfo);
228
229 pinfo->events[sd].fd = sd;
230 pinfo->events[sd].events = 0;
231 pinfo->events[sd].revents = 0;
232
233 pinfo->max_fd = MAX(pinfo->max_fd, sd);
234
235 if (ev & EV_READ)
236 pinfo->events[sd].events |= POLL_R_FLAGS;
237 if (ev & EV_WRITE)
238 pinfo->events[sd].events |= POLL_W_FLAGS;
239 #ifndef WIN32
240 if (ev & EV_EXCEPT)
241 pinfo->events[sd].events |= POLL_X_FLAGS;
242 #endif
243
244 IOD_PROPSET(iod, IOD_REGISTERED);
245 return 1;
246 }
247
poll_iod_unregister(struct npool * nsp,struct niod * iod)248 int poll_iod_unregister(struct npool *nsp, struct niod *iod) {
249 iod->watched_events = EV_NONE;
250
251 /* some IODs can be unregistered here if they're associated to an event that was
252 * immediately completed */
253 if (IOD_PROPGET(iod, IOD_REGISTERED)) {
254 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
255 int sd;
256
257 sd = nsock_iod_get_sd(iod);
258 pinfo->events[sd].fd = -1;
259 pinfo->events[sd].events = 0;
260 pinfo->events[sd].revents = 0;
261
262 if (pinfo->max_fd == sd)
263 lower_max_fd(pinfo);
264
265 IOD_PROPCLR(iod, IOD_REGISTERED);
266 }
267 return 1;
268 }
269
poll_iod_modify(struct npool * nsp,struct niod * iod,struct nevent * nse,int ev_set,int ev_clr)270 int poll_iod_modify(struct npool *nsp, struct niod *iod, struct nevent *nse, int ev_set, int ev_clr) {
271 int sd;
272 int new_events;
273 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
274
275 assert((ev_set & ev_clr) == 0);
276 assert(IOD_PROPGET(iod, IOD_REGISTERED));
277
278 new_events = iod->watched_events;
279 new_events |= ev_set;
280 new_events &= ~ev_clr;
281
282 if (new_events == iod->watched_events)
283 return 1; /* nothing to do */
284
285 iod->watched_events = new_events;
286
287 sd = nsock_iod_get_sd(iod);
288
289 pinfo->events[sd].fd = sd;
290 pinfo->events[sd].events = 0;
291
292 /* regenerate the current set of events for this IOD */
293 if (iod->watched_events & EV_READ)
294 pinfo->events[sd].events |= POLL_R_FLAGS;
295 if (iod->watched_events & EV_WRITE)
296 pinfo->events[sd].events |= POLL_W_FLAGS;
297
298 return 1;
299 }
300
poll_loop(struct npool * nsp,int msec_timeout)301 int poll_loop(struct npool *nsp, int msec_timeout) {
302 int results_left = 0;
303 int event_msecs; /* msecs before an event goes off */
304 int combined_msecs;
305 int sock_err = 0;
306 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
307
308 assert(msec_timeout >= -1);
309
310 if (nsp->events_pending == 0)
311 return 0; /* No need to wait on 0 events ... */
312
313 do {
314 struct nevent *nse;
315
316 nsock_log_debug_all("wait for events");
317
318 nse = next_expirable_event(nsp);
319 if (!nse)
320 event_msecs = -1; /* None of the events specified a timeout */
321 else
322 event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));
323
324 #if HAVE_PCAP
325 #ifndef PCAP_CAN_DO_SELECT
326 /* Force a low timeout when capturing packets on systems where
327 * the pcap descriptor is not select()able. */
328 if (gh_list_count(&nsp->pcap_read_events) > 0)
329 if (event_msecs > PCAP_POLL_INTERVAL)
330 event_msecs = PCAP_POLL_INTERVAL;
331 #endif
332 #endif
333
334 /* We cast to unsigned because we want -1 to be very high (since it means no
335 * timeout) */
336 combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
337
338 #if HAVE_PCAP
339 #ifndef PCAP_CAN_DO_SELECT
340 /* do non-blocking read on pcap devices that doesn't support select()
341 * If there is anything read, just leave this loop. */
342 if (pcap_read_on_nonselect(nsp)) {
343 /* okay, something was read. */
344 } else
345 #endif
346 #endif
347 {
348 results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs);
349 if (results_left == -1)
350 sock_err = socket_errno();
351 }
352
353 gettimeofday(&nsock_tod, NULL); /* Due to poll delay */
354 } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
355
356 if (results_left == -1 && sock_err != EINTR) {
357 #ifdef WIN32
358 for (int i = 0; sock_err != EINVAL || i <= pinfo->max_fd; i++) {
359 if (sock_err != EINVAL || pinfo->events[i].fd != -1) {
360 #endif
361 nsock_log_error("nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
362 nsp->errnum = sock_err;
363 return -1;
364 #ifdef WIN32
365 }
366 }
367 #endif
368 }
369
370 iterate_through_event_lists(nsp);
371
372 return 1;
373 }
374
375
376 /* ---- INTERNAL FUNCTIONS ---- */
377
get_evmask(struct npool * nsp,struct niod * nsi)378 static inline int get_evmask(struct npool *nsp, struct niod *nsi) {
379 struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
380 int sd, evmask = EV_NONE;
381 POLLFD *pev;
382
383 if (nsi->state != NSIOD_STATE_DELETED
384 && nsi->events_pending
385 && IOD_PROPGET(nsi, IOD_REGISTERED)) {
386
387 #if HAVE_PCAP
388 if (nsi->pcap)
389 sd = ((mspcap *)nsi->pcap)->pcap_desc;
390 else
391 #endif
392 sd = nsi->sd;
393
394 assert(sd < pinfo->capacity);
395 pev = &pinfo->events[sd];
396
397 if (pev->revents & POLL_R_FLAGS)
398 evmask |= EV_READ;
399 if (pev->revents & POLL_W_FLAGS)
400 evmask |= EV_WRITE;
401 if (pev->events && (pev->revents & POLL_X_FLAGS))
402 evmask |= EV_EXCEPT;
403 }
404 return evmask;
405 }
406
407 /* Iterate through all the event lists (such as connect_events, read_events,
408 * timer_events, etc) and take action for those that have completed (due to
409 * timeout, i/o, etc) */
iterate_through_event_lists(struct npool * nsp)410 void iterate_through_event_lists(struct npool *nsp) {
411 gh_lnode_t *current, *next, *last;
412
413 last = gh_list_last_elem(&nsp->active_iods);
414
415 for (current = gh_list_first_elem(&nsp->active_iods);
416 current != NULL && gh_lnode_prev(current) != last;
417 current = next) {
418 struct niod *nsi = container_of(current, struct niod, nodeq);
419
420 process_iod_events(nsp, nsi, get_evmask(nsp, nsi));
421
422 next = gh_lnode_next(current);
423 if (nsi->state == NSIOD_STATE_DELETED) {
424 gh_list_remove(&nsp->active_iods, current);
425 gh_list_prepend(&nsp->free_iods, current);
426 }
427 }
428
429 /* iterate through timers and expired events */
430 process_expired_events(nsp);
431 }
432
433 #endif /* HAVE_POLL */
434