1 /* -*- mode: c; c-file-style: "openbsd" -*- */
2 /*
3 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "lldpd.h"
19 #include "trace.h"
20
21 #include <unistd.h>
22 #include <signal.h>
23 #include <errno.h>
24 #include <time.h>
25 #include <fcntl.h>
26 #if defined(__clang__)
27 #pragma clang diagnostic push
28 #pragma clang diagnostic ignored "-Wdocumentation"
29 #endif
30 #include <event2/event.h>
31 #include <event2/bufferevent.h>
32 #include <event2/buffer.h>
33 #if defined(__clang__)
34 #pragma clang diagnostic pop
35 #endif
36
37 #define EVENT_BUFFER 1024
38
39 static void
levent_log_cb(int severity,const char * msg)40 levent_log_cb(int severity, const char *msg)
41 {
42 switch (severity) {
43 case _EVENT_LOG_DEBUG: log_debug("libevent", "%s", msg); break;
44 case _EVENT_LOG_MSG: log_info ("libevent", "%s", msg); break;
45 case _EVENT_LOG_WARN: log_warnx("libevent", "%s", msg); break;
46 case _EVENT_LOG_ERR: log_warnx("libevent", "%s", msg); break;
47 }
48 }
49
50 struct lldpd_events {
51 TAILQ_ENTRY(lldpd_events) next;
52 struct event *ev;
53 };
54 TAILQ_HEAD(ev_l, lldpd_events);
55
56 #define levent_snmp_fds(cfg) ((struct ev_l*)(cfg)->g_snmp_fds)
57 #define levent_hardware_fds(hardware) ((struct ev_l*)(hardware)->h_recv)
58
59 #ifdef USE_SNMP
60 #include <net-snmp/net-snmp-config.h>
61 #include <net-snmp/net-snmp-includes.h>
62 #include <net-snmp/agent/net-snmp-agent-includes.h>
63 #include <net-snmp/agent/snmp_vars.h>
64
65 /* Compatibility with older versions of NetSNMP */
66 #ifndef HAVE_SNMP_SELECT_INFO2
67 # define netsnmp_large_fd_set fd_set
68 # define snmp_read2 snmp_read
69 # define snmp_select_info2 snmp_select_info
70 # define netsnmp_large_fd_set_init(...)
71 # define netsnmp_large_fd_set_cleanup(...)
72 # define NETSNMP_LARGE_FD_SET FD_SET
73 # define NETSNMP_LARGE_FD_CLR FD_CLR
74 # define NETSNMP_LARGE_FD_ZERO FD_ZERO
75 # define NETSNMP_LARGE_FD_ISSET FD_ISSET
76 #else
77 # include <net-snmp/library/large_fd_set.h>
78 #endif
79
80 static void levent_snmp_update(struct lldpd *);
81
82 /*
83 * Callback function when we have something to read from SNMP.
84 *
85 * This function is called because we have a read event on one SNMP
86 * file descriptor. When need to call snmp_read() on it.
87 */
88 static void
levent_snmp_read(evutil_socket_t fd,short what,void * arg)89 levent_snmp_read(evutil_socket_t fd, short what, void *arg)
90 {
91 struct lldpd *cfg = arg;
92 netsnmp_large_fd_set fdset;
93 (void)what;
94 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
95 NETSNMP_LARGE_FD_ZERO(&fdset);
96 NETSNMP_LARGE_FD_SET(fd, &fdset);
97 snmp_read2(&fdset);
98 levent_snmp_update(cfg);
99 }
100
101 /*
102 * Callback function for a SNMP timeout.
103 *
104 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
105 */
106 static void
levent_snmp_timeout(evutil_socket_t fd,short what,void * arg)107 levent_snmp_timeout(evutil_socket_t fd, short what, void *arg)
108 {
109 struct lldpd *cfg = arg;
110 (void)what; (void)fd;
111 snmp_timeout();
112 run_alarms();
113 levent_snmp_update(cfg);
114 }
115
116 /*
117 * Watch a new SNMP FD.
118 *
119 * @param base The libevent base we are working on.
120 * @param fd The file descriptor we want to watch.
121 *
122 * The file descriptor is appended to the list of file descriptors we
123 * want to watch.
124 */
125 static void
levent_snmp_add_fd(struct lldpd * cfg,int fd)126 levent_snmp_add_fd(struct lldpd *cfg, int fd)
127 {
128 struct event_base *base = cfg->g_base;
129 struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events));
130 if (!snmpfd) {
131 log_warn("event", "unable to allocate memory for new SNMP event");
132 return;
133 }
134 levent_make_socket_nonblocking(fd);
135 if ((snmpfd->ev = event_new(base, fd,
136 EV_READ | EV_PERSIST,
137 levent_snmp_read,
138 cfg)) == NULL) {
139 log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd);
140 free(snmpfd);
141 return;
142 }
143 if (event_add(snmpfd->ev, NULL) == -1) {
144 log_warnx("event", "unable to schedule new SNMP event for FD %d", fd);
145 event_free(snmpfd->ev);
146 free(snmpfd);
147 return;
148 }
149 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next);
150 }
151
152 /*
153 * Update SNMP event loop.
154 *
155 * New events are added and some other are removed. This function
156 * should be called every time a SNMP event happens: either when
157 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
158 * packet. This function will keep libevent in sync with NetSNMP.
159 *
160 * @param base The libevent base we are working on.
161 */
162 static void
levent_snmp_update(struct lldpd * cfg)163 levent_snmp_update(struct lldpd *cfg)
164 {
165 int maxfd = 0;
166 int block = 1;
167 struct timeval timeout;
168 static int howmany = 0;
169 int added = 0, removed = 0, current = 0;
170 struct lldpd_events *snmpfd, *snmpfd_next;
171
172 /* snmp_select_info() can be tricky to understand. We set `block` to
173 1 to means that we don't request a timeout. snmp_select_info()
174 will reset `block` to 0 if it wants us to setup a timeout. In
175 this timeout, `snmp_timeout()` should be invoked.
176
177 Each FD in `fdset` will need to be watched for reading. If one of
178 them become active, `snmp_read()` should be called on it.
179 */
180
181 netsnmp_large_fd_set fdset;
182 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
183 NETSNMP_LARGE_FD_ZERO(&fdset);
184 snmp_select_info2(&maxfd, &fdset, &timeout, &block);
185
186 /* We need to untrack any event whose FD is not in `fdset`
187 anymore */
188 for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg));
189 snmpfd;
190 snmpfd = snmpfd_next) {
191 snmpfd_next = TAILQ_NEXT(snmpfd, next);
192 if (event_get_fd(snmpfd->ev) >= maxfd ||
193 (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) {
194 event_free(snmpfd->ev);
195 TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next);
196 free(snmpfd);
197 removed++;
198 } else {
199 NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset);
200 current++;
201 }
202 }
203
204 /* Invariant: FD in `fdset` are not in list of FD */
205 for (int fd = 0; fd < maxfd; fd++) {
206 if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) {
207 levent_snmp_add_fd(cfg, fd);
208 added++;
209 }
210 }
211 current += added;
212 if (howmany != current) {
213 log_debug("event", "added %d events, removed %d events, total of %d events",
214 added, removed, current);
215 howmany = current;
216 }
217
218 /* If needed, handle timeout */
219 if (evtimer_add(cfg->g_snmp_timeout, block?NULL:&timeout) == -1)
220 log_warnx("event", "unable to schedule timeout function for SNMP");
221
222 netsnmp_large_fd_set_cleanup(&fdset);
223 }
224 #endif /* USE_SNMP */
225
226 struct lldpd_one_client {
227 TAILQ_ENTRY(lldpd_one_client) next;
228 struct lldpd *cfg;
229 struct bufferevent *bev;
230 int subscribed; /* Is this client subscribed to changes? */
231 };
232 TAILQ_HEAD(, lldpd_one_client) lldpd_clients;
233
234 static void
levent_ctl_free_client(struct lldpd_one_client * client)235 levent_ctl_free_client(struct lldpd_one_client *client)
236 {
237 if (client && client->bev) bufferevent_free(client->bev);
238 if (client) {
239 TAILQ_REMOVE(&lldpd_clients, client, next);
240 free(client);
241 }
242 }
243
244 static void
levent_ctl_close_clients()245 levent_ctl_close_clients()
246 {
247 struct lldpd_one_client *client, *client_next;
248 for (client = TAILQ_FIRST(&lldpd_clients);
249 client;
250 client = client_next) {
251 client_next = TAILQ_NEXT(client, next);
252 levent_ctl_free_client(client);
253 }
254 }
255
256 static ssize_t
levent_ctl_send(struct lldpd_one_client * client,int type,void * data,size_t len)257 levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len)
258 {
259 struct bufferevent *bev = client->bev;
260 struct hmsg_header hdr = { .len = len, .type = type };
261 bufferevent_disable(bev, EV_WRITE);
262 if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 ||
263 (len > 0 && bufferevent_write(bev, data, len) == -1)) {
264 log_warnx("event", "unable to create answer to client");
265 levent_ctl_free_client(client);
266 return -1;
267 }
268 bufferevent_enable(bev, EV_WRITE);
269 return len;
270 }
271
272 void
levent_ctl_notify(char * ifname,int state,struct lldpd_port * neighbor)273 levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor)
274 {
275 struct lldpd_one_client *client, *client_next;
276 struct lldpd_neighbor_change neigh = {
277 .ifname = ifname,
278 .state = state,
279 .neighbor = neighbor
280 };
281 void *output = NULL;
282 ssize_t output_len = 0;
283
284 /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */
285 log_debug("control", "notify clients of neighbor changes");
286 for (client = TAILQ_FIRST(&lldpd_clients);
287 client;
288 client = client_next) {
289 client_next = TAILQ_NEXT(client, next);
290 if (!client->subscribed) continue;
291
292 if (output == NULL) {
293 /* Ugly hack: we don't want to transmit a list of
294 * ports. We patch the port to avoid this. */
295 TAILQ_ENTRY(lldpd_port) backup_p_entries;
296 memcpy(&backup_p_entries, &neighbor->p_entries,
297 sizeof(backup_p_entries));
298 memset(&neighbor->p_entries, 0,
299 sizeof(backup_p_entries));
300 output_len = lldpd_neighbor_change_serialize(&neigh, &output);
301 memcpy(&neighbor->p_entries, &backup_p_entries,
302 sizeof(backup_p_entries));
303
304 if (output_len <= 0) {
305 log_warnx("event", "unable to serialize changed neighbor");
306 return;
307 }
308 }
309
310 levent_ctl_send(client, NOTIFICATION, output, output_len);
311 }
312
313 free(output);
314 }
315
316 static ssize_t
levent_ctl_send_cb(void * out,int type,void * data,size_t len)317 levent_ctl_send_cb(void *out, int type, void *data, size_t len)
318 {
319 struct lldpd_one_client *client = out;
320 return levent_ctl_send(client, type, data, len);
321 }
322
323 static void
levent_ctl_recv(struct bufferevent * bev,void * ptr)324 levent_ctl_recv(struct bufferevent *bev, void *ptr)
325 {
326 struct lldpd_one_client *client = ptr;
327 struct evbuffer *buffer = bufferevent_get_input(bev);
328 size_t buffer_len = evbuffer_get_length(buffer);
329 struct hmsg_header hdr;
330 void *data = NULL;
331
332 log_debug("control", "receive data on Unix socket");
333 if (buffer_len < sizeof(struct hmsg_header))
334 return; /* Not enough data yet */
335 if (evbuffer_copyout(buffer, &hdr,
336 sizeof(struct hmsg_header)) != sizeof(struct hmsg_header)) {
337 log_warnx("event", "not able to read header");
338 return;
339 }
340 if (hdr.len > HMSG_MAX_SIZE) {
341 log_warnx("event", "message received is too large");
342 goto recv_error;
343 }
344
345 if (buffer_len < hdr.len + sizeof(struct hmsg_header))
346 return; /* Not enough data yet */
347 if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) {
348 log_warnx("event", "not enough memory");
349 goto recv_error;
350 }
351 evbuffer_drain(buffer, sizeof(struct hmsg_header));
352 if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len);
353
354 /* Currently, we should not receive notification acknowledgment. But if
355 * we receive one, we can discard it. */
356 if (hdr.len == 0 && hdr.type == NOTIFICATION) return;
357 if (client_handle_client(client->cfg,
358 levent_ctl_send_cb, client,
359 hdr.type, data, hdr.len,
360 &client->subscribed) == -1) goto recv_error;
361 free(data);
362 return;
363
364 recv_error:
365 free(data);
366 levent_ctl_free_client(client);
367 }
368
369 static void
levent_ctl_event(struct bufferevent * bev,short events,void * ptr)370 levent_ctl_event(struct bufferevent *bev, short events, void *ptr)
371 {
372 struct lldpd_one_client *client = ptr;
373 if (events & BEV_EVENT_ERROR) {
374 log_warnx("event", "an error occurred with client: %s",
375 evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
376 levent_ctl_free_client(client);
377 } else if (events & BEV_EVENT_EOF) {
378 log_debug("event", "client has been disconnected");
379 levent_ctl_free_client(client);
380 }
381 }
382
383 static void
levent_ctl_accept(evutil_socket_t fd,short what,void * arg)384 levent_ctl_accept(evutil_socket_t fd, short what, void *arg)
385 {
386 struct lldpd *cfg = arg;
387 struct lldpd_one_client *client = NULL;
388 int s;
389 (void)what;
390
391 log_debug("control", "accept a new connection");
392 if ((s = accept(fd, NULL, NULL)) == -1) {
393 log_warn("event", "unable to accept connection from socket");
394 return;
395 }
396 client = calloc(1, sizeof(struct lldpd_one_client));
397 if (!client) {
398 log_warnx("event", "unable to allocate memory for new client");
399 close(s);
400 goto accept_failed;
401 }
402 client->cfg = cfg;
403 levent_make_socket_nonblocking(s);
404 TAILQ_INSERT_TAIL(&lldpd_clients, client, next);
405 if ((client->bev = bufferevent_socket_new(cfg->g_base, s,
406 BEV_OPT_CLOSE_ON_FREE)) == NULL) {
407 log_warnx("event", "unable to allocate a new buffer event for new client");
408 close(s);
409 goto accept_failed;
410 }
411 bufferevent_setcb(client->bev,
412 levent_ctl_recv, NULL, levent_ctl_event,
413 client);
414 bufferevent_enable(client->bev, EV_READ | EV_WRITE);
415 log_debug("event", "new client accepted");
416 /* coverity[leaked_handle]
417 s has been saved by bufferevent_socket_new */
418 return;
419 accept_failed:
420 levent_ctl_free_client(client);
421 }
422
423 static void
levent_priv(evutil_socket_t fd,short what,void * arg)424 levent_priv(evutil_socket_t fd, short what, void *arg)
425 {
426 struct event_base *base = arg;
427 ssize_t n;
428 int err;
429 char one;
430 (void)what;
431 /* Check if we have some data available. We need to pass the socket in
432 * non-blocking mode to be able to run the check without disruption. */
433 levent_make_socket_nonblocking(fd);
434 n = read(fd, &one, 0); err = errno;
435 levent_make_socket_blocking(fd);
436
437 switch (n) {
438 case -1:
439 if (err == EAGAIN || err == EWOULDBLOCK)
440 /* No data, all good */
441 return;
442 log_warnx("event", "unable to poll monitor process, exit");
443 break;
444 case 0:
445 log_warnx("event", "monitor process has terminated, exit");
446 break;
447 default:
448 /* Unfortunately, dead code, if we have data, we have requested
449 * 0 byte, so we will fall in the previous case. It seems safer
450 * to ask for 0 byte than asking for 1 byte. In the later case,
451 * if we have to speak with the monitor again before exiting, we
452 * would be out of sync. */
453 log_warnx("event", "received unexpected data from monitor process, exit");
454 break;
455 }
456 event_base_loopbreak(base);
457 }
458
459 static void
levent_dump(evutil_socket_t fd,short what,void * arg)460 levent_dump(evutil_socket_t fd, short what, void *arg)
461 {
462 struct event_base *base = arg;
463 (void)fd; (void)what;
464 log_debug("event", "dumping all events");
465 event_base_dump_events(base, stderr);
466 }
467 static void
levent_stop(evutil_socket_t fd,short what,void * arg)468 levent_stop(evutil_socket_t fd, short what, void *arg)
469 {
470 struct event_base *base = arg;
471 (void)fd; (void)what;
472 event_base_loopbreak(base);
473 }
474
475 static void
levent_update_and_send(evutil_socket_t fd,short what,void * arg)476 levent_update_and_send(evutil_socket_t fd, short what, void *arg)
477 {
478 struct lldpd *cfg = arg;
479 struct timeval tv;
480 long interval_ms = cfg->g_config.c_tx_interval;
481
482 (void)fd; (void)what;
483 lldpd_loop(cfg);
484 if (cfg->g_iface_event != NULL)
485 interval_ms *= 20;
486 if (interval_ms < 30000)
487 interval_ms = 30000;
488 tv.tv_sec = interval_ms / 1000;
489 tv.tv_usec = (interval_ms % 1000) * 1000;
490 event_add(cfg->g_main_loop, &tv);
491 }
492
493 void
levent_update_now(struct lldpd * cfg)494 levent_update_now(struct lldpd *cfg)
495 {
496 if (cfg->g_main_loop)
497 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
498 }
499
500 void
levent_send_now(struct lldpd * cfg)501 levent_send_now(struct lldpd *cfg)
502 {
503 struct lldpd_hardware *hardware;
504 TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) {
505 if (hardware->h_timer)
506 event_active(hardware->h_timer, EV_TIMEOUT, 1);
507 else
508 log_warnx("event", "BUG: no timer present for interface %s",
509 hardware->h_ifname);
510 }
511 }
512
513 static void
levent_init(struct lldpd * cfg)514 levent_init(struct lldpd *cfg)
515 {
516 /* Setup libevent */
517 log_debug("event", "initialize libevent");
518 event_set_log_callback(levent_log_cb);
519 if (!(cfg->g_base = event_base_new()))
520 fatalx("event", "unable to create a new libevent base");
521 log_info("event", "libevent %s initialized with %s method",
522 event_get_version(),
523 event_base_get_method(cfg->g_base));
524
525 /* Setup SNMP */
526 #ifdef USE_SNMP
527 if (cfg->g_snmp) {
528 agent_init(cfg, cfg->g_snmp_agentx);
529 cfg->g_snmp_timeout = evtimer_new(cfg->g_base,
530 levent_snmp_timeout,
531 cfg);
532 if (!cfg->g_snmp_timeout)
533 fatalx("event", "unable to setup timeout function for SNMP");
534 if ((cfg->g_snmp_fds =
535 malloc(sizeof(struct ev_l))) == NULL)
536 fatalx("event", "unable to allocate memory for SNMP events");
537 TAILQ_INIT(levent_snmp_fds(cfg));
538 }
539 #endif
540
541 /* Setup loop that will run every X seconds. */
542 log_debug("event", "register loop timer");
543 if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0,
544 levent_update_and_send,
545 cfg)))
546 fatalx("event", "unable to setup main timer");
547 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
548
549 /* Setup unix socket */
550 struct event *ctl_event;
551 log_debug("event", "register Unix socket");
552 TAILQ_INIT(&lldpd_clients);
553 levent_make_socket_nonblocking(cfg->g_ctl);
554 if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl,
555 EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL)
556 fatalx("event", "unable to setup control socket event");
557 event_add(ctl_event, NULL);
558
559 /* Somehow monitor the monitor process */
560 struct event *monitor_event;
561 log_debug("event", "monitor the monitor process");
562 if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED),
563 EV_READ|EV_PERSIST, levent_priv, cfg->g_base)) == NULL)
564 fatalx("event", "unable to monitor monitor process");
565 event_add(monitor_event, NULL);
566
567 /* Signals */
568 log_debug("event", "register signals");
569 evsignal_add(evsignal_new(cfg->g_base, SIGUSR1,
570 levent_dump, cfg->g_base),
571 NULL);
572 evsignal_add(evsignal_new(cfg->g_base, SIGINT,
573 levent_stop, cfg->g_base),
574 NULL);
575 evsignal_add(evsignal_new(cfg->g_base, SIGTERM,
576 levent_stop, cfg->g_base),
577 NULL);
578 }
579
580 /* Initialize libevent and start the event loop */
581 void
levent_loop(struct lldpd * cfg)582 levent_loop(struct lldpd *cfg)
583 {
584 levent_init(cfg);
585 lldpd_loop(cfg);
586 #ifdef USE_SNMP
587 if (cfg->g_snmp) levent_snmp_update(cfg);
588 #endif
589
590 /* libevent loop */
591 do {
592 TRACE(LLDPD_EVENT_LOOP());
593 if (event_base_got_break(cfg->g_base) ||
594 event_base_got_exit(cfg->g_base))
595 break;
596 } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0);
597
598 if (cfg->g_iface_timer_event != NULL)
599 event_free(cfg->g_iface_timer_event);
600
601 #ifdef USE_SNMP
602 if (cfg->g_snmp)
603 agent_shutdown();
604 #endif /* USE_SNMP */
605
606 levent_ctl_close_clients();
607 }
608
609 /* Release libevent resources */
610 void
levent_shutdown(struct lldpd * cfg)611 levent_shutdown(struct lldpd *cfg)
612 {
613 if (cfg->g_iface_event)
614 event_free(cfg->g_iface_event);
615 if (cfg->g_cleanup_timer)
616 event_free(cfg->g_cleanup_timer);
617 event_base_free(cfg->g_base);
618 }
619
620 static void
levent_hardware_recv(evutil_socket_t fd,short what,void * arg)621 levent_hardware_recv(evutil_socket_t fd, short what, void *arg)
622 {
623 struct lldpd_hardware *hardware = arg;
624 struct lldpd *cfg = hardware->h_cfg;
625 (void)what;
626 log_debug("event", "received something for %s",
627 hardware->h_ifname);
628 lldpd_recv(cfg, hardware, fd);
629 levent_schedule_cleanup(cfg);
630 }
631
632 void
levent_hardware_init(struct lldpd_hardware * hardware)633 levent_hardware_init(struct lldpd_hardware *hardware)
634 {
635 log_debug("event", "initialize events for %s", hardware->h_ifname);
636 if ((hardware->h_recv =
637 malloc(sizeof(struct ev_l))) == NULL) {
638 log_warnx("event", "unable to allocate memory for %s",
639 hardware->h_ifname);
640 return;
641 }
642 TAILQ_INIT(levent_hardware_fds(hardware));
643 }
644
645 void
levent_hardware_add_fd(struct lldpd_hardware * hardware,int fd)646 levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd)
647 {
648 struct lldpd_events *hfd = NULL;
649 if (!hardware->h_recv) return;
650
651 hfd = calloc(1, sizeof(struct lldpd_events));
652 if (!hfd) {
653 log_warnx("event", "unable to allocate new event for %s",
654 hardware->h_ifname);
655 return;
656 }
657 levent_make_socket_nonblocking(fd);
658 if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd,
659 EV_READ | EV_PERSIST,
660 levent_hardware_recv,
661 hardware)) == NULL) {
662 log_warnx("event", "unable to allocate a new event for %s",
663 hardware->h_ifname);
664 free(hfd);
665 return;
666 }
667 if (event_add(hfd->ev, NULL) == -1) {
668 log_warnx("event", "unable to schedule new event for %s",
669 hardware->h_ifname);
670 event_free(hfd->ev);
671 free(hfd);
672 return;
673 }
674 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next);
675 }
676
677 void
levent_hardware_release(struct lldpd_hardware * hardware)678 levent_hardware_release(struct lldpd_hardware *hardware)
679 {
680 struct lldpd_events *ev, *ev_next;
681 if (hardware->h_timer) {
682 event_free(hardware->h_timer);
683 hardware->h_timer = NULL;
684 }
685 if (!hardware->h_recv) return;
686
687 log_debug("event", "release events for %s", hardware->h_ifname);
688 for (ev = TAILQ_FIRST(levent_hardware_fds(hardware));
689 ev;
690 ev = ev_next) {
691 ev_next = TAILQ_NEXT(ev, next);
692 /* We may close several time the same FD. This is harmless. */
693 close(event_get_fd(ev->ev));
694 event_free(ev->ev);
695 TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next);
696 free(ev);
697 }
698 free(levent_hardware_fds(hardware));
699 }
700
701 static void
levent_iface_trigger(evutil_socket_t fd,short what,void * arg)702 levent_iface_trigger(evutil_socket_t fd, short what, void *arg)
703 {
704 struct lldpd *cfg = arg;
705 log_debug("event",
706 "triggering update of all interfaces");
707 lldpd_update_localports(cfg);
708 }
709
710 static void
levent_iface_recv(evutil_socket_t fd,short what,void * arg)711 levent_iface_recv(evutil_socket_t fd, short what, void *arg)
712 {
713 struct lldpd *cfg = arg;
714 char buffer[EVENT_BUFFER];
715 int n;
716
717 if (cfg->g_iface_cb == NULL) {
718 /* Discard the message */
719 while (1) {
720 n = read(fd, buffer, sizeof(buffer));
721 if (n == -1 &&
722 (errno == EWOULDBLOCK ||
723 errno == EAGAIN)) break;
724 if (n == -1) {
725 log_warn("event",
726 "unable to receive interface change notification message");
727 return;
728 }
729 if (n == 0) {
730 log_warnx("event",
731 "end of file reached while getting interface change notification message");
732 return;
733 }
734 }
735 } else {
736 cfg->g_iface_cb(cfg);
737 }
738
739 /* Schedule local port update. We don't run it right away because we may
740 * receive a batch of events like this. */
741 struct timeval one_sec = {1, 0};
742 TRACE(LLDPD_INTERFACES_NOTIFICATION());
743 log_debug("event",
744 "received notification change, schedule an update of all interfaces in one second");
745 if (cfg->g_iface_timer_event == NULL) {
746 if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base,
747 levent_iface_trigger, cfg)) == NULL) {
748 log_warnx("event",
749 "unable to create a new event to trigger interface update");
750 return;
751 }
752 }
753 if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) {
754 log_warnx("event",
755 "unable to schedule interface updates");
756 return;
757 }
758 }
759
760 int
levent_iface_subscribe(struct lldpd * cfg,int socket)761 levent_iface_subscribe(struct lldpd *cfg, int socket)
762 {
763 log_debug("event", "subscribe to interface changes from socket %d",
764 socket);
765 levent_make_socket_nonblocking(socket);
766 cfg->g_iface_event = event_new(cfg->g_base, socket,
767 EV_READ | EV_PERSIST, levent_iface_recv, cfg);
768 if (cfg->g_iface_event == NULL) {
769 log_warnx("event",
770 "unable to allocate a new event for interface changes");
771 return -1;
772 }
773 if (event_add(cfg->g_iface_event, NULL) == -1) {
774 log_warnx("event",
775 "unable to schedule new interface changes event");
776 event_free(cfg->g_iface_event);
777 cfg->g_iface_event = NULL;
778 return -1;
779 }
780 return 0;
781 }
782
783 static void
levent_trigger_cleanup(evutil_socket_t fd,short what,void * arg)784 levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg)
785 {
786 struct lldpd *cfg = arg;
787 lldpd_cleanup(cfg);
788 }
789
790 void
levent_schedule_cleanup(struct lldpd * cfg)791 levent_schedule_cleanup(struct lldpd *cfg)
792 {
793 log_debug("event", "schedule next cleanup");
794 if (cfg->g_cleanup_timer != NULL) {
795 event_free(cfg->g_cleanup_timer);
796 }
797 cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg);
798 if (cfg->g_cleanup_timer == NULL) {
799 log_warnx("event",
800 "unable to allocate a new event for cleanup tasks");
801 return;
802 }
803
804 /* Compute the next TTL event */
805 struct timeval tv = { cfg->g_config.c_ttl, 0 };
806 time_t now = time(NULL);
807 time_t next;
808 struct lldpd_hardware *hardware;
809 struct lldpd_port *port;
810 TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) {
811 TAILQ_FOREACH(port, &hardware->h_rports, p_entries) {
812 if (now >= port->p_lastupdate + port->p_ttl) {
813 tv.tv_sec = 0;
814 log_debug("event", "immediate cleanup on port %s (%lld, %d, %lld)",
815 hardware->h_ifname,
816 (long long)now,
817 port->p_ttl,
818 (long long)port->p_lastupdate);
819 break;
820 }
821 next = port->p_ttl - (now - port->p_lastupdate);
822 if (next < tv.tv_sec)
823 tv.tv_sec = next;
824 }
825 }
826
827 log_debug("event", "next cleanup in %ld seconds",
828 (long)tv.tv_sec);
829 if (event_add(cfg->g_cleanup_timer, &tv) == -1) {
830 log_warnx("event",
831 "unable to schedule cleanup task");
832 event_free(cfg->g_cleanup_timer);
833 cfg->g_cleanup_timer = NULL;
834 return;
835 }
836 }
837
838 static void
levent_send_pdu(evutil_socket_t fd,short what,void * arg)839 levent_send_pdu(evutil_socket_t fd, short what, void *arg)
840 {
841 struct lldpd_hardware *hardware = arg;
842 int tx_interval = hardware->h_cfg->g_config.c_tx_interval;
843
844 log_debug("event", "trigger sending PDU for port %s",
845 hardware->h_ifname);
846 lldpd_send(hardware);
847
848 #ifdef ENABLE_LLDPMED
849 if (hardware->h_tx_fast > 0)
850 hardware->h_tx_fast--;
851
852 if (hardware->h_tx_fast > 0)
853 tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval * 1000;
854 #endif
855
856 struct timeval tv;
857 tv.tv_sec = tx_interval / 1000;
858 tv.tv_usec = (tx_interval % 1000) * 1000;
859 if (event_add(hardware->h_timer, &tv) == -1) {
860 log_warnx("event", "unable to re-register timer event for port %s",
861 hardware->h_ifname);
862 event_free(hardware->h_timer);
863 hardware->h_timer = NULL;
864 return;
865 }
866 }
867
868 void
levent_schedule_pdu(struct lldpd_hardware * hardware)869 levent_schedule_pdu(struct lldpd_hardware *hardware)
870 {
871 log_debug("event", "schedule sending PDU on %s",
872 hardware->h_ifname);
873 if (hardware->h_timer == NULL) {
874 hardware->h_timer = evtimer_new(hardware->h_cfg->g_base,
875 levent_send_pdu, hardware);
876 if (hardware->h_timer == NULL) {
877 log_warnx("event", "unable to schedule PDU sending for port %s",
878 hardware->h_ifname);
879 return;
880 }
881 }
882
883 struct timeval tv = { 0, 0 };
884 if (event_add(hardware->h_timer, &tv) == -1) {
885 log_warnx("event", "unable to register timer event for port %s",
886 hardware->h_ifname);
887 event_free(hardware->h_timer);
888 hardware->h_timer = NULL;
889 return;
890 }
891 }
892
893 int
levent_make_socket_nonblocking(int fd)894 levent_make_socket_nonblocking(int fd)
895 {
896 int flags;
897 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
898 log_warn("event", "fcntl(%d, F_GETFL)", fd);
899 return -1;
900 }
901 if (flags & O_NONBLOCK) return 0;
902 if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
903 log_warn("event", "fcntl(%d, F_SETFL)", fd);
904 return -1;
905 }
906 return 0;
907 }
908
909 int
levent_make_socket_blocking(int fd)910 levent_make_socket_blocking(int fd)
911 {
912 int flags;
913 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
914 log_warn("event", "fcntl(%d, F_GETFL)", fd);
915 return -1;
916 }
917 if (!(flags & O_NONBLOCK)) return 0;
918 if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {
919 log_warn("event", "fcntl(%d, F_SETFL)", fd);
920 return -1;
921 }
922 return 0;
923 }
924
925 #ifdef HOST_OS_LINUX
926 /* Receive and log error from a socket when there is suspicion of an error. */
927 void
levent_recv_error(int fd,const char * source)928 levent_recv_error(int fd, const char *source)
929 {
930 do {
931 ssize_t n;
932 char buf[1024] = {};
933 struct msghdr msg = {
934 .msg_control = buf,
935 .msg_controllen = sizeof(buf)
936 };
937 if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) {
938 return;
939 }
940 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
941 if (cmsg == NULL)
942 log_warnx("event", "received unknown error on %s",
943 source);
944 else
945 log_warnx("event", "received error (level=%d/type=%d) on %s",
946 cmsg->cmsg_level, cmsg->cmsg_type, source);
947 } while (1);
948 }
949 #endif
950