1 /*
2  *  OpenVPN -- An application to securely tunnel IP networks
3  *             over a single TCP/UDP port, with support for SSL/TLS-based
4  *             session authentication and key exchange,
5  *             packet encryption, packet authentication, and
6  *             packet compression.
7  *
8  *  Copyright (C) 2002-2018 OpenVPN Inc <sales@openvpn.net>
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2
12  *  as published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License along
20  *  with this program; if not, write to the Free Software Foundation, Inc.,
21  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23 
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #elif defined(_MSC_VER)
27 #include "config-msvc.h"
28 #endif
29 
30 #include "syshead.h"
31 
32 #include "forward.h"
33 #include "init.h"
34 #include "push.h"
35 #include "gremlin.h"
36 #include "mss.h"
37 #include "event.h"
38 #include "occ.h"
39 #include "pf.h"
40 #include "ping.h"
41 #include "ps.h"
42 #include "dhcp.h"
43 #include "common.h"
44 #include "ssl_verify.h"
45 
46 #include "memdbg.h"
47 
48 #include "mstats.h"
49 
50 counter_type link_read_bytes_global;  /* GLOBAL */
51 counter_type link_write_bytes_global; /* GLOBAL */
52 
53 /* show event wait debugging info */
54 
55 #ifdef ENABLE_DEBUG
56 
57 static const char *
wait_status_string(struct context * c,struct gc_arena * gc)58 wait_status_string(struct context *c, struct gc_arena *gc)
59 {
60     struct buffer out = alloc_buf_gc(64, gc);
61     buf_printf(&out, "I/O WAIT %s|%s|%s|%s %s",
62                tun_stat(c->c1.tuntap, EVENT_READ, gc),
63                tun_stat(c->c1.tuntap, EVENT_WRITE, gc),
64                socket_stat(c->c2.link_socket, EVENT_READ, gc),
65                socket_stat(c->c2.link_socket, EVENT_WRITE, gc),
66                tv_string(&c->c2.timeval, gc));
67     return BSTR(&out);
68 }
69 
70 static void
show_wait_status(struct context * c)71 show_wait_status(struct context *c)
72 {
73     struct gc_arena gc = gc_new();
74     dmsg(D_EVENT_WAIT, "%s", wait_status_string(c, &gc));
75     gc_free(&gc);
76 }
77 
78 #endif /* ifdef ENABLE_DEBUG */
79 
80 static void
check_tls_errors_co(struct context * c)81 check_tls_errors_co(struct context *c)
82 {
83     msg(D_STREAM_ERRORS, "Fatal TLS error (check_tls_errors_co), restarting");
84     register_signal(c, c->c2.tls_exit_signal, "tls-error"); /* SOFT-SIGUSR1 -- TLS error */
85 }
86 
87 static void
check_tls_errors_nco(struct context * c)88 check_tls_errors_nco(struct context *c)
89 {
90     register_signal(c, c->c2.tls_exit_signal, "tls-error"); /* SOFT-SIGUSR1 -- TLS error */
91 }
92 
93 /*
94  * TLS errors are fatal in TCP mode.
95  * Also check for --tls-exit trigger.
96  */
97 static inline void
check_tls_errors(struct context * c)98 check_tls_errors(struct context *c)
99 {
100     if (c->c2.tls_multi && c->c2.tls_exit_signal)
101     {
102         if (link_socket_connection_oriented(c->c2.link_socket))
103         {
104             if (c->c2.tls_multi->n_soft_errors)
105             {
106                 check_tls_errors_co(c);
107             }
108         }
109         else
110         {
111             if (c->c2.tls_multi->n_hard_errors)
112             {
113                 check_tls_errors_nco(c);
114             }
115         }
116     }
117 }
118 
119 /*
120  * Set our wakeup to 0 seconds, so we will be rescheduled
121  * immediately.
122  */
123 static inline void
context_immediate_reschedule(struct context * c)124 context_immediate_reschedule(struct context *c)
125 {
126     c->c2.timeval.tv_sec = 0;  /* ZERO-TIMEOUT */
127     c->c2.timeval.tv_usec = 0;
128 }
129 
130 static inline void
context_reschedule_sec(struct context * c,int sec)131 context_reschedule_sec(struct context *c, int sec)
132 {
133     if (sec < 0)
134     {
135         sec = 0;
136     }
137     if (sec < c->c2.timeval.tv_sec)
138     {
139         c->c2.timeval.tv_sec = sec;
140         c->c2.timeval.tv_usec = 0;
141     }
142 }
143 
144 /*
145  * In TLS mode, let TLS level respond to any control-channel
146  * packets which were received, or prepare any packets for
147  * transmission.
148  *
149  * tmp_int is purely an optimization that allows us to call
150  * tls_multi_process less frequently when there's not much
151  * traffic on the control-channel.
152  *
153  */
154 static void
check_tls(struct context * c)155 check_tls(struct context *c)
156 {
157     interval_t wakeup = BIG_TIMEOUT;
158 
159     if (interval_test(&c->c2.tmp_int))
160     {
161         const int tmp_status = tls_multi_process
162                                    (c->c2.tls_multi, &c->c2.to_link, &c->c2.to_link_addr,
163                                    get_link_socket_info(c), &wakeup);
164         if (tmp_status == TLSMP_ACTIVE)
165         {
166             update_time();
167             interval_action(&c->c2.tmp_int);
168         }
169         else if (tmp_status == TLSMP_KILL)
170         {
171             if (c->options.mode == MODE_SERVER)
172             {
173                 send_auth_failed(c, c->c2.tls_multi->client_reason);
174             }
175             else
176             {
177                 register_signal(c, SIGTERM, "auth-control-exit");
178             }
179         }
180 
181         interval_future_trigger(&c->c2.tmp_int, wakeup);
182     }
183 
184     interval_schedule_wakeup(&c->c2.tmp_int, &wakeup);
185 
186     if (wakeup)
187     {
188         context_reschedule_sec(c, wakeup);
189     }
190 }
191 
192 /*
193  * Handle incoming configuration
194  * messages on the control channel.
195  */
196 static void
check_incoming_control_channel(struct context * c)197 check_incoming_control_channel(struct context *c)
198 {
199     int len = tls_test_payload_len(c->c2.tls_multi);
200     /* We should only be called with len >0 */
201     ASSERT(len > 0);
202 
203     struct gc_arena gc = gc_new();
204     struct buffer buf = alloc_buf_gc(len, &gc);
205     if (tls_rec_payload(c->c2.tls_multi, &buf))
206     {
207         /* force null termination of message */
208         buf_null_terminate(&buf);
209 
210         /* enforce character class restrictions */
211         string_mod(BSTR(&buf), CC_PRINT, CC_CRLF, 0);
212 
213         if (buf_string_match_head_str(&buf, "AUTH_FAILED"))
214         {
215             receive_auth_failed(c, &buf);
216         }
217         else if (buf_string_match_head_str(&buf, "PUSH_"))
218         {
219             incoming_push_message(c, &buf);
220         }
221         else if (buf_string_match_head_str(&buf, "RESTART"))
222         {
223             server_pushed_signal(c, &buf, true, 7);
224         }
225         else if (buf_string_match_head_str(&buf, "HALT"))
226         {
227             server_pushed_signal(c, &buf, false, 4);
228         }
229         else if (buf_string_match_head_str(&buf, "INFO_PRE"))
230         {
231             server_pushed_info(c, &buf, 8);
232         }
233         else if (buf_string_match_head_str(&buf, "INFO"))
234         {
235             server_pushed_info(c, &buf, 4);
236         }
237         else if (buf_string_match_head_str(&buf, "CR_RESPONSE"))
238         {
239             receive_cr_response(c, &buf);
240         }
241         else if (buf_string_match_head_str(&buf, "AUTH_PENDING"))
242         {
243             receive_auth_pending(c, &buf);
244         }
245         else
246         {
247             msg(D_PUSH_ERRORS, "WARNING: Received unknown control message: %s", BSTR(&buf));
248         }
249     }
250     else
251     {
252         msg(D_PUSH_ERRORS, "WARNING: Receive control message failed");
253     }
254 
255     gc_free(&gc);
256 }
257 
258 /*
259  * Periodically resend PUSH_REQUEST until PUSH message received
260  */
261 static void
check_push_request(struct context * c)262 check_push_request(struct context *c)
263 {
264     send_push_request(c);
265 
266     /* if no response to first push_request, retry at PUSH_REQUEST_INTERVAL second intervals */
267     event_timeout_modify_wakeup(&c->c2.push_request_interval, PUSH_REQUEST_INTERVAL);
268 }
269 
270 /*
271  * Things that need to happen immediately after connection initiation should go here.
272  *
273  * Options like --up-delay need to be triggered by this function which
274  * checks for connection establishment.
275  *
276  * Note: The process_incoming_push_reply currently assumes that this function
277  * only sets up the pull request timer when pull is enabled.
278  */
279 static void
check_connection_established(struct context * c)280 check_connection_established(struct context *c)
281 {
282 
283     if (CONNECTION_ESTABLISHED(c))
284     {
285         /* if --pull was specified, send a push request to server */
286         if (c->c2.tls_multi && c->options.pull)
287         {
288 #ifdef ENABLE_MANAGEMENT
289             if (management)
290             {
291                 management_set_state(management,
292                                      OPENVPN_STATE_GET_CONFIG,
293                                      NULL,
294                                      NULL,
295                                      NULL,
296                                      NULL,
297                                      NULL);
298             }
299 #endif
300             /* fire up push request right away (already 1s delayed) */
301             /* We might receive a AUTH_PENDING request before we armed this
302              * timer. In that case we don't change the value */
303             if (c->c2.push_request_timeout < now)
304             {
305                 c->c2.push_request_timeout = now + c->options.handshake_window;
306             }
307             event_timeout_init(&c->c2.push_request_interval, 0, now);
308             reset_coarse_timers(c);
309         }
310         else
311         {
312             do_up(c, false, 0);
313         }
314 
315         event_timeout_clear(&c->c2.wait_for_connect);
316     }
317 
318 }
319 
320 bool
send_control_channel_string_dowork(struct tls_multi * multi,const char * str,int msglevel)321 send_control_channel_string_dowork(struct tls_multi *multi,
322                                    const char *str, int msglevel)
323 {
324     struct gc_arena gc = gc_new();
325     bool stat;
326 
327     /* buffered cleartext write onto TLS control channel */
328     stat = tls_send_payload(multi, (uint8_t *) str, strlen(str) + 1);
329 
330     msg(msglevel, "SENT CONTROL [%s]: '%s' (status=%d)",
331         tls_common_name(multi, false),
332         sanitize_control_message(str, &gc),
333         (int) stat);
334 
335     gc_free(&gc);
336     return stat;
337 }
338 
reschedule_multi_process(struct context * c)339 void reschedule_multi_process(struct context *c)
340 {
341     interval_action(&c->c2.tmp_int);
342     context_immediate_reschedule(c); /* ZERO-TIMEOUT */
343 }
344 
345 bool
send_control_channel_string(struct context * c,const char * str,int msglevel)346 send_control_channel_string(struct context *c, const char *str, int msglevel)
347 {
348     if (c->c2.tls_multi)
349     {
350         bool ret = send_control_channel_string_dowork(c->c2.tls_multi,
351                                                       str, msglevel);
352         reschedule_multi_process(c);
353 
354         return ret;
355     }
356     return true;
357 }
358 /*
359  * Add routes.
360  */
361 
362 static void
check_add_routes_action(struct context * c,const bool errors)363 check_add_routes_action(struct context *c, const bool errors)
364 {
365     do_route(&c->options, c->c1.route_list, c->c1.route_ipv6_list,
366              c->c1.tuntap, c->plugins, c->c2.es, &c->net_ctx);
367     update_time();
368     event_timeout_clear(&c->c2.route_wakeup);
369     event_timeout_clear(&c->c2.route_wakeup_expire);
370     initialization_sequence_completed(c, errors ? ISC_ERRORS : 0); /* client/p2p --route-delay was defined */
371 }
372 
373 static void
check_add_routes(struct context * c)374 check_add_routes(struct context *c)
375 {
376     if (test_routes(c->c1.route_list, c->c1.tuntap))
377     {
378         check_add_routes_action(c, false);
379     }
380     else if (event_timeout_trigger(&c->c2.route_wakeup_expire, &c->c2.timeval, ETT_DEFAULT))
381     {
382         check_add_routes_action(c, true);
383     }
384     else
385     {
386         msg(D_ROUTE, "Route: Waiting for TUN/TAP interface to come up...");
387         if (c->c1.tuntap)
388         {
389             if (!tun_standby(c->c1.tuntap))
390             {
391                 register_signal(c, SIGHUP, "ip-fail");
392                 c->persist.restart_sleep_seconds = 10;
393 #ifdef _WIN32
394                 show_routes(M_INFO|M_NOPREFIX);
395                 show_adapters(M_INFO|M_NOPREFIX);
396 #endif
397             }
398         }
399         update_time();
400         if (c->c2.route_wakeup.n != 1)
401         {
402             event_timeout_init(&c->c2.route_wakeup, 1, now);
403         }
404         event_timeout_reset(&c->c2.ping_rec_interval);
405     }
406 }
407 
408 /*
409  * Should we exit due to inactivity timeout?
410  */
411 static void
check_inactivity_timeout(struct context * c)412 check_inactivity_timeout(struct context *c)
413 {
414     msg(M_INFO, "Inactivity timeout (--inactive), exiting");
415     register_signal(c, SIGTERM, "inactive");
416 }
417 
418 int
get_server_poll_remaining_time(struct event_timeout * server_poll_timeout)419 get_server_poll_remaining_time(struct event_timeout *server_poll_timeout)
420 {
421     update_time();
422     int remaining = event_timeout_remaining(server_poll_timeout);
423     return max_int(0, remaining);
424 }
425 
426 static void
check_server_poll_timeout(struct context * c)427 check_server_poll_timeout(struct context *c)
428 {
429     event_timeout_reset(&c->c2.server_poll_interval);
430     ASSERT(c->c2.tls_multi);
431     if (!tls_initial_packet_received(c->c2.tls_multi))
432     {
433         msg(M_INFO, "Server poll timeout, restarting");
434         register_signal(c, SIGUSR1, "server_poll");
435         c->persist.restart_sleep_seconds = -1;
436     }
437 }
438 
439 /*
440  * Schedule a signal n_seconds from now.
441  */
442 void
schedule_exit(struct context * c,const int n_seconds,const int signal)443 schedule_exit(struct context *c, const int n_seconds, const int signal)
444 {
445     tls_set_single_session(c->c2.tls_multi);
446     update_time();
447     reset_coarse_timers(c);
448     event_timeout_init(&c->c2.scheduled_exit, n_seconds, now);
449     c->c2.scheduled_exit_signal = signal;
450     msg(D_SCHED_EXIT, "Delayed exit in %d seconds", n_seconds);
451 }
452 
453 /*
454  * Scheduled exit?
455  */
456 static void
check_scheduled_exit(struct context * c)457 check_scheduled_exit(struct context *c)
458 {
459     register_signal(c, c->c2.scheduled_exit_signal, "delayed-exit");
460 }
461 
462 /*
463  * Should we write timer-triggered status file.
464  */
465 static void
check_status_file(struct context * c)466 check_status_file(struct context *c)
467 {
468     if (c->c1.status_output)
469     {
470         print_status(c, c->c1.status_output);
471     }
472 }
473 
474 #ifdef ENABLE_FRAGMENT
475 /*
476  * Should we deliver a datagram fragment to remote?
477  */
478 static void
check_fragment(struct context * c)479 check_fragment(struct context *c)
480 {
481     struct link_socket_info *lsi = get_link_socket_info(c);
482 
483     /* OS MTU Hint? */
484     if (lsi->mtu_changed)
485     {
486         frame_adjust_path_mtu(&c->c2.frame_fragment, c->c2.link_socket->mtu,
487                               c->options.ce.proto);
488         lsi->mtu_changed = false;
489     }
490 
491     if (fragment_outgoing_defined(c->c2.fragment))
492     {
493         if (!c->c2.to_link.len)
494         {
495             /* encrypt a fragment for output to TCP/UDP port */
496             ASSERT(fragment_ready_to_send(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment));
497             encrypt_sign(c, false);
498         }
499     }
500 
501     fragment_housekeeping(c->c2.fragment, &c->c2.frame_fragment, &c->c2.timeval);
502 }
503 #endif /* ifdef ENABLE_FRAGMENT */
504 
505 /*
506  * Buffer reallocation, for use with null encryption.
507  */
508 static inline void
buffer_turnover(const uint8_t * orig_buf,struct buffer * dest_stub,struct buffer * src_stub,struct buffer * storage)509 buffer_turnover(const uint8_t *orig_buf, struct buffer *dest_stub, struct buffer *src_stub, struct buffer *storage)
510 {
511     if (orig_buf == src_stub->data && src_stub->data != storage->data)
512     {
513         buf_assign(storage, src_stub);
514         *dest_stub = *storage;
515     }
516     else
517     {
518         *dest_stub = *src_stub;
519     }
520 }
521 
522 /*
523  * Compress, fragment, encrypt and HMAC-sign an outgoing packet.
524  * Input: c->c2.buf
525  * Output: c->c2.to_link
526  */
527 void
encrypt_sign(struct context * c,bool comp_frag)528 encrypt_sign(struct context *c, bool comp_frag)
529 {
530     struct context_buffers *b = c->c2.buffers;
531     const uint8_t *orig_buf = c->c2.buf.data;
532     struct crypto_options *co = NULL;
533 
534     /*
535      * Drop non-TLS outgoing packet if client-connect script/plugin
536      * has not yet succeeded. In non-TLS tls_multi mode is not defined
537      * and we always pass packets.
538      */
539     if (c->c2.tls_multi && c->c2.tls_multi->multi_state != CAS_SUCCEEDED)
540     {
541         c->c2.buf.len = 0;
542     }
543 
544     if (comp_frag)
545     {
546 #ifdef USE_COMP
547         /* Compress the packet. */
548         if (c->c2.comp_context)
549         {
550             (*c->c2.comp_context->alg.compress)(&c->c2.buf, b->compress_buf, c->c2.comp_context, &c->c2.frame);
551         }
552 #endif
553 #ifdef ENABLE_FRAGMENT
554         if (c->c2.fragment)
555         {
556             fragment_outgoing(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment);
557         }
558 #endif
559     }
560 
561     /* initialize work buffer with FRAME_HEADROOM bytes of prepend capacity */
562     ASSERT(buf_init(&b->encrypt_buf, FRAME_HEADROOM(&c->c2.frame)));
563 
564     if (c->c2.tls_multi)
565     {
566         /* Get the key we will use to encrypt the packet. */
567         tls_pre_encrypt(c->c2.tls_multi, &c->c2.buf, &co);
568         /* If using P_DATA_V2, prepend the 1-byte opcode and 3-byte peer-id to the
569          * packet before openvpn_encrypt(), so we can authenticate the opcode too.
570          */
571         if (c->c2.buf.len > 0 && c->c2.tls_multi->use_peer_id)
572         {
573             tls_prepend_opcode_v2(c->c2.tls_multi, &b->encrypt_buf);
574         }
575     }
576     else
577     {
578         co = &c->c2.crypto_options;
579     }
580 
581     /* Encrypt and authenticate the packet */
582     openvpn_encrypt(&c->c2.buf, b->encrypt_buf, co);
583 
584     /* Do packet administration */
585     if (c->c2.tls_multi)
586     {
587         if (c->c2.buf.len > 0 && !c->c2.tls_multi->use_peer_id)
588         {
589             tls_prepend_opcode_v1(c->c2.tls_multi, &c->c2.buf);
590         }
591         tls_post_encrypt(c->c2.tls_multi, &c->c2.buf);
592     }
593 
594     /*
595      * Get the address we will be sending the packet to.
596      */
597     link_socket_get_outgoing_addr(&c->c2.buf, get_link_socket_info(c),
598                                   &c->c2.to_link_addr);
599 
600     /* if null encryption, copy result to read_tun_buf */
601     buffer_turnover(orig_buf, &c->c2.to_link, &c->c2.buf, &b->read_tun_buf);
602 }
603 
604 /*
605  * Coarse timers work to 1 second resolution.
606  */
607 static void
process_coarse_timers(struct context * c)608 process_coarse_timers(struct context *c)
609 {
610     /* flush current packet-id to file once per 60
611     * seconds if --replay-persist was specified */
612     if (packet_id_persist_enabled(&c->c1.pid_persist)
613         && event_timeout_trigger(&c->c2.packet_id_persist_interval, &c->c2.timeval, ETT_DEFAULT))
614     {
615         packet_id_persist_save(&c->c1.pid_persist);
616     }
617 
618     /* Should we write timer-triggered status file */
619     if (c->c1.status_output
620         && event_timeout_trigger(&c->c1.status_output->et, &c->c2.timeval, ETT_DEFAULT))
621     {
622         check_status_file(c);
623     }
624 
625     /* process connection establishment items */
626     if (event_timeout_trigger(&c->c2.wait_for_connect, &c->c2.timeval, ETT_DEFAULT))
627     {
628         check_connection_established(c);
629     }
630 
631     /* see if we should send a push_request (option --pull) */
632     if (event_timeout_trigger(&c->c2.push_request_interval, &c->c2.timeval, ETT_DEFAULT))
633     {
634         check_push_request(c);
635     }
636 
637 #ifdef PLUGIN_PF
638     if (c->c2.pf.enabled
639         && event_timeout_trigger(&c->c2.pf.reload, &c->c2.timeval, ETT_DEFAULT))
640     {
641         pf_check_reload(c);
642     }
643 #endif
644 
645     /* process --route options */
646     if (event_timeout_trigger(&c->c2.route_wakeup, &c->c2.timeval, ETT_DEFAULT))
647     {
648         check_add_routes(c);
649     }
650 
651     /* possibly exit due to --inactive */
652     if (c->options.inactivity_timeout
653         && event_timeout_trigger(&c->c2.inactivity_interval, &c->c2.timeval, ETT_DEFAULT))
654     {
655         check_inactivity_timeout(c);
656     }
657 
658     if (c->sig->signal_received)
659     {
660         return;
661     }
662 
663     /* restart if ping not received */
664     check_ping_restart(c);
665     if (c->sig->signal_received)
666     {
667         return;
668     }
669 
670     if (c->c2.tls_multi)
671     {
672         if (c->options.ce.connect_timeout
673             && event_timeout_trigger(&c->c2.server_poll_interval, &c->c2.timeval, ETT_DEFAULT))
674         {
675             check_server_poll_timeout(c);
676         }
677         if (c->sig->signal_received)
678         {
679             return;
680         }
681         if (event_timeout_trigger(&c->c2.scheduled_exit, &c->c2.timeval, ETT_DEFAULT))
682         {
683             check_scheduled_exit(c);
684         }
685         if (c->sig->signal_received)
686         {
687             return;
688         }
689     }
690 
691     /* Should we send an OCC_REQUEST message? */
692     check_send_occ_req(c);
693 
694     /* Should we send an MTU load test? */
695     check_send_occ_load_test(c);
696 
697     /* Should we send an OCC_EXIT message to remote? */
698     if (c->c2.explicit_exit_notification_time_wait)
699     {
700         process_explicit_exit_notification_timer_wakeup(c);
701     }
702 
703     /* Should we ping the remote? */
704     check_ping_send(c);
705 }
706 
707 static void
check_coarse_timers(struct context * c)708 check_coarse_timers(struct context *c)
709 {
710     if (now < c->c2.coarse_timer_wakeup)
711     {
712         context_reschedule_sec(c, c->c2.coarse_timer_wakeup - now);
713         return;
714     }
715 
716     const struct timeval save = c->c2.timeval;
717     c->c2.timeval.tv_sec = BIG_TIMEOUT;
718     c->c2.timeval.tv_usec = 0;
719     process_coarse_timers(c);
720     c->c2.coarse_timer_wakeup = now + c->c2.timeval.tv_sec;
721 
722     dmsg(D_INTERVAL, "TIMER: coarse timer wakeup %" PRIi64 " seconds", (int64_t)c->c2.timeval.tv_sec);
723 
724     /* Is the coarse timeout NOT the earliest one? */
725     if (c->c2.timeval.tv_sec > save.tv_sec)
726     {
727         c->c2.timeval = save;
728     }
729 }
730 
731 static void
check_timeout_random_component_dowork(struct context * c)732 check_timeout_random_component_dowork(struct context *c)
733 {
734     const int update_interval = 10; /* seconds */
735     c->c2.update_timeout_random_component = now + update_interval;
736     c->c2.timeout_random_component.tv_usec = (time_t) get_random() & 0x0003FFFF;
737     c->c2.timeout_random_component.tv_sec = 0;
738 
739     dmsg(D_INTERVAL, "RANDOM USEC=%ld", (long) c->c2.timeout_random_component.tv_usec);
740 }
741 
742 static inline void
check_timeout_random_component(struct context * c)743 check_timeout_random_component(struct context *c)
744 {
745     if (now >= c->c2.update_timeout_random_component)
746     {
747         check_timeout_random_component_dowork(c);
748     }
749     if (c->c2.timeval.tv_sec >= 1)
750     {
751         tv_add(&c->c2.timeval, &c->c2.timeout_random_component);
752     }
753 }
754 
755 /*
756  * Handle addition and removal of the 10-byte Socks5 header
757  * in UDP packets.
758  */
759 
760 static inline void
socks_postprocess_incoming_link(struct context * c)761 socks_postprocess_incoming_link(struct context *c)
762 {
763     if (c->c2.link_socket->socks_proxy && c->c2.link_socket->info.proto == PROTO_UDP)
764     {
765         socks_process_incoming_udp(&c->c2.buf, &c->c2.from);
766     }
767 }
768 
769 static inline void
socks_preprocess_outgoing_link(struct context * c,struct link_socket_actual ** to_addr,int * size_delta)770 socks_preprocess_outgoing_link(struct context *c,
771                                struct link_socket_actual **to_addr,
772                                int *size_delta)
773 {
774     if (c->c2.link_socket->socks_proxy && c->c2.link_socket->info.proto == PROTO_UDP)
775     {
776         *size_delta += socks_process_outgoing_udp(&c->c2.to_link, c->c2.to_link_addr);
777         *to_addr = &c->c2.link_socket->socks_relay;
778     }
779 }
780 
781 /* undo effect of socks_preprocess_outgoing_link */
782 static inline void
link_socket_write_post_size_adjust(int * size,int size_delta,struct buffer * buf)783 link_socket_write_post_size_adjust(int *size,
784                                    int size_delta,
785                                    struct buffer *buf)
786 {
787     if (size_delta > 0 && *size > size_delta)
788     {
789         *size -= size_delta;
790         if (!buf_advance(buf, size_delta))
791         {
792             *size = 0;
793         }
794     }
795 }
796 
797 /*
798  * Output: c->c2.buf
799  */
800 
801 void
read_incoming_link(struct context * c)802 read_incoming_link(struct context *c)
803 {
804     /*
805      * Set up for recvfrom call to read datagram
806      * sent to our TCP/UDP port.
807      */
808     int status;
809 
810     /*ASSERT (!c->c2.to_tun.len);*/
811 
812     perf_push(PERF_READ_IN_LINK);
813 
814     c->c2.buf = c->c2.buffers->read_link_buf;
815     ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM_ADJ(&c->c2.frame, FRAME_HEADROOM_MARKER_READ_LINK)));
816 
817     status = link_socket_read(c->c2.link_socket,
818                               &c->c2.buf,
819                               &c->c2.from);
820 
821     if (socket_connection_reset(c->c2.link_socket, status))
822     {
823 #if PORT_SHARE
824         if (port_share && socket_foreign_protocol_detected(c->c2.link_socket))
825         {
826             const struct buffer *fbuf = socket_foreign_protocol_head(c->c2.link_socket);
827             const int sd = socket_foreign_protocol_sd(c->c2.link_socket);
828             port_share_redirect(port_share, fbuf, sd);
829             register_signal(c, SIGTERM, "port-share-redirect");
830         }
831         else
832 #endif
833         {
834             /* received a disconnect from a connection-oriented protocol */
835             if (event_timeout_defined(&c->c2.explicit_exit_notification_interval))
836             {
837                 msg(D_STREAM_ERRORS, "Connection reset during exit notification period, ignoring [%d]", status);
838                 management_sleep(1);
839             }
840             else
841             {
842                 register_signal(c, SIGUSR1, "connection-reset"); /* SOFT-SIGUSR1 -- TCP connection reset */
843                 msg(D_STREAM_ERRORS, "Connection reset, restarting [%d]", status);
844             }
845         }
846         perf_pop();
847         return;
848     }
849 
850     /* check recvfrom status */
851     check_status(status, "read", c->c2.link_socket, NULL);
852 
853     /* Remove socks header if applicable */
854     socks_postprocess_incoming_link(c);
855 
856     perf_pop();
857 }
858 
859 bool
process_incoming_link_part1(struct context * c,struct link_socket_info * lsi,bool floated)860 process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, bool floated)
861 {
862     struct gc_arena gc = gc_new();
863     bool decrypt_status = false;
864 
865     if (c->c2.buf.len > 0)
866     {
867         c->c2.link_read_bytes += c->c2.buf.len;
868         link_read_bytes_global += c->c2.buf.len;
869 #ifdef ENABLE_MEMSTATS
870         if (mmap_stats)
871         {
872             mmap_stats->link_read_bytes = link_read_bytes_global;
873         }
874 #endif
875         c->c2.original_recv_size = c->c2.buf.len;
876 #ifdef ENABLE_MANAGEMENT
877         if (management)
878         {
879             management_bytes_in(management, c->c2.buf.len);
880             management_bytes_server(management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context);
881         }
882 #endif
883     }
884     else
885     {
886         c->c2.original_recv_size = 0;
887     }
888 
889 #ifdef ENABLE_DEBUG
890     /* take action to corrupt packet if we are in gremlin test mode */
891     if (c->options.gremlin)
892     {
893         if (!ask_gremlin(c->options.gremlin))
894         {
895             c->c2.buf.len = 0;
896         }
897         corrupt_gremlin(&c->c2.buf, c->options.gremlin);
898     }
899 #endif
900 
901     /* log incoming packet */
902 #ifdef LOG_RW
903     if (c->c2.log_rw && c->c2.buf.len > 0)
904     {
905         fprintf(stderr, "R");
906     }
907 #endif
908     msg(D_LINK_RW, "%s READ [%d] from %s: %s",
909         proto2ascii(lsi->proto, lsi->af, true),
910         BLEN(&c->c2.buf),
911         print_link_socket_actual(&c->c2.from, &gc),
912         PROTO_DUMP(&c->c2.buf, &gc));
913 
914     /*
915      * Good, non-zero length packet received.
916      * Commence multi-stage processing of packet,
917      * such as authenticate, decrypt, decompress.
918      * If any stage fails, it sets buf.len to 0 or -1,
919      * telling downstream stages to ignore the packet.
920      */
921     if (c->c2.buf.len > 0)
922     {
923         struct crypto_options *co = NULL;
924         const uint8_t *ad_start = NULL;
925         if (!link_socket_verify_incoming_addr(&c->c2.buf, lsi, &c->c2.from))
926         {
927             link_socket_bad_incoming_addr(&c->c2.buf, lsi, &c->c2.from);
928         }
929 
930         if (c->c2.tls_multi)
931         {
932             /*
933              * If tls_pre_decrypt returns true, it means the incoming
934              * packet was a good TLS control channel packet.  If so, TLS code
935              * will deal with the packet and set buf.len to 0 so downstream
936              * stages ignore it.
937              *
938              * If the packet is a data channel packet, tls_pre_decrypt
939              * will load crypto_options with the correct encryption key
940              * and return false.
941              */
942             uint8_t opcode = *BPTR(&c->c2.buf) >> P_OPCODE_SHIFT;
943             if (tls_pre_decrypt(c->c2.tls_multi, &c->c2.from, &c->c2.buf, &co,
944                                 floated, &ad_start))
945             {
946                 /* Restore pre-NCP frame parameters */
947                 if (is_hard_reset_method2(opcode))
948                 {
949                     c->c2.frame = c->c2.frame_initial;
950 #ifdef ENABLE_FRAGMENT
951                     c->c2.frame_fragment = c->c2.frame_fragment_initial;
952 #endif
953                 }
954 
955                 interval_action(&c->c2.tmp_int);
956 
957                 /* reset packet received timer if TLS packet */
958                 if (c->options.ping_rec_timeout)
959                 {
960                     event_timeout_reset(&c->c2.ping_rec_interval);
961                 }
962             }
963         }
964         else
965         {
966             co = &c->c2.crypto_options;
967         }
968 
969         /*
970          * Drop non-TLS packet if client-connect script/plugin and cipher selection
971          * has not yet succeeded. In non-TLS mode tls_multi is not defined
972          * and we always pass packets.
973          */
974         if (c->c2.tls_multi && c->c2.tls_multi->multi_state != CAS_SUCCEEDED)
975         {
976             c->c2.buf.len = 0;
977         }
978 
979         /* authenticate and decrypt the incoming packet */
980         decrypt_status = openvpn_decrypt(&c->c2.buf, c->c2.buffers->decrypt_buf,
981                                          co, &c->c2.frame, ad_start);
982 
983         if (!decrypt_status && link_socket_connection_oriented(c->c2.link_socket))
984         {
985             /* decryption errors are fatal in TCP mode */
986             register_signal(c, SIGUSR1, "decryption-error"); /* SOFT-SIGUSR1 -- decryption error in TCP mode */
987             msg(D_STREAM_ERRORS, "Fatal decryption error (process_incoming_link), restarting");
988         }
989     }
990     else
991     {
992         buf_reset(&c->c2.to_tun);
993     }
994     gc_free(&gc);
995 
996     return decrypt_status;
997 }
998 
999 void
process_incoming_link_part2(struct context * c,struct link_socket_info * lsi,const uint8_t * orig_buf)1000 process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, const uint8_t *orig_buf)
1001 {
1002     if (c->c2.buf.len > 0)
1003     {
1004 #ifdef ENABLE_FRAGMENT
1005         if (c->c2.fragment)
1006         {
1007             fragment_incoming(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment);
1008         }
1009 #endif
1010 
1011 #ifdef USE_COMP
1012         /* decompress the incoming packet */
1013         if (c->c2.comp_context)
1014         {
1015             (*c->c2.comp_context->alg.decompress)(&c->c2.buf, c->c2.buffers->decompress_buf, c->c2.comp_context, &c->c2.frame);
1016         }
1017 #endif
1018 
1019 #ifdef PACKET_TRUNCATION_CHECK
1020         /* if (c->c2.buf.len > 1) --c->c2.buf.len; */
1021         ipv4_packet_size_verify(BPTR(&c->c2.buf),
1022                                 BLEN(&c->c2.buf),
1023                                 TUNNEL_TYPE(c->c1.tuntap),
1024                                 "POST_DECRYPT",
1025                                 &c->c2.n_trunc_post_decrypt);
1026 #endif
1027 
1028         /*
1029          * Set our "official" outgoing address, since
1030          * if buf.len is non-zero, we know the packet
1031          * authenticated.  In TLS mode we do nothing
1032          * because TLS mode takes care of source address
1033          * authentication.
1034          *
1035          * Also, update the persisted version of our packet-id.
1036          */
1037         if (!TLS_MODE(c) && c->c2.buf.len > 0)
1038         {
1039             link_socket_set_outgoing_addr(lsi, &c->c2.from, NULL, c->c2.es);
1040         }
1041 
1042         /* reset packet received timer */
1043         if (c->options.ping_rec_timeout && c->c2.buf.len > 0)
1044         {
1045             event_timeout_reset(&c->c2.ping_rec_interval);
1046         }
1047 
1048         /* increment authenticated receive byte count */
1049         if (c->c2.buf.len > 0)
1050         {
1051             c->c2.link_read_bytes_auth += c->c2.buf.len;
1052             c->c2.max_recv_size_local = max_int(c->c2.original_recv_size, c->c2.max_recv_size_local);
1053         }
1054 
1055         /* Did we just receive an openvpn ping packet? */
1056         if (is_ping_msg(&c->c2.buf))
1057         {
1058             dmsg(D_PING, "RECEIVED PING PACKET");
1059             c->c2.buf.len = 0; /* drop packet */
1060         }
1061 
1062         /* Did we just receive an OCC packet? */
1063         if (is_occ_msg(&c->c2.buf))
1064         {
1065             process_received_occ_msg(c);
1066         }
1067 
1068         buffer_turnover(orig_buf, &c->c2.to_tun, &c->c2.buf, &c->c2.buffers->read_link_buf);
1069 
1070         /* to_tun defined + unopened tuntap can cause deadlock */
1071         if (!tuntap_defined(c->c1.tuntap))
1072         {
1073             c->c2.to_tun.len = 0;
1074         }
1075     }
1076     else
1077     {
1078         buf_reset(&c->c2.to_tun);
1079     }
1080 }
1081 
1082 static void
process_incoming_link(struct context * c)1083 process_incoming_link(struct context *c)
1084 {
1085     perf_push(PERF_PROC_IN_LINK);
1086 
1087     struct link_socket_info *lsi = get_link_socket_info(c);
1088     const uint8_t *orig_buf = c->c2.buf.data;
1089 
1090     process_incoming_link_part1(c, lsi, false);
1091     process_incoming_link_part2(c, lsi, orig_buf);
1092 
1093     perf_pop();
1094 }
1095 
1096 /*
1097  * Output: c->c2.buf
1098  */
1099 
1100 void
read_incoming_tun(struct context * c)1101 read_incoming_tun(struct context *c)
1102 {
1103     /*
1104      * Setup for read() call on TUN/TAP device.
1105      */
1106     /*ASSERT (!c->c2.to_link.len);*/
1107 
1108     perf_push(PERF_READ_IN_TUN);
1109 
1110     c->c2.buf = c->c2.buffers->read_tun_buf;
1111 
1112 #ifdef _WIN32
1113     if (c->c1.tuntap->windows_driver == WINDOWS_DRIVER_WINTUN)
1114     {
1115         read_wintun(c->c1.tuntap, &c->c2.buf);
1116         if (c->c2.buf.len == -1)
1117         {
1118             register_signal(c, SIGHUP, "tun-abort");
1119             c->persist.restart_sleep_seconds = 1;
1120             msg(M_INFO, "Wintun read error, restarting");
1121             perf_pop();
1122             return;
1123         }
1124     }
1125     else
1126     {
1127         read_tun_buffered(c->c1.tuntap, &c->c2.buf);
1128     }
1129 #else  /* ifdef _WIN32 */
1130     ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM(&c->c2.frame)));
1131     ASSERT(buf_safe(&c->c2.buf, MAX_RW_SIZE_TUN(&c->c2.frame)));
1132     c->c2.buf.len = read_tun(c->c1.tuntap, BPTR(&c->c2.buf), MAX_RW_SIZE_TUN(&c->c2.frame));
1133 #endif /* ifdef _WIN32 */
1134 
1135 #ifdef PACKET_TRUNCATION_CHECK
1136     ipv4_packet_size_verify(BPTR(&c->c2.buf),
1137                             BLEN(&c->c2.buf),
1138                             TUNNEL_TYPE(c->c1.tuntap),
1139                             "READ_TUN",
1140                             &c->c2.n_trunc_tun_read);
1141 #endif
1142 
1143     /* Was TUN/TAP interface stopped? */
1144     if (tuntap_stop(c->c2.buf.len))
1145     {
1146         register_signal(c, SIGTERM, "tun-stop");
1147         msg(M_INFO, "TUN/TAP interface has been stopped, exiting");
1148         perf_pop();
1149         return;
1150     }
1151 
1152     /* Was TUN/TAP I/O operation aborted? */
1153     if (tuntap_abort(c->c2.buf.len))
1154     {
1155         register_signal(c, SIGHUP, "tun-abort");
1156         c->persist.restart_sleep_seconds = 10;
1157         msg(M_INFO, "TUN/TAP I/O operation aborted, restarting");
1158         perf_pop();
1159         return;
1160     }
1161 
1162     /* Check the status return from read() */
1163     check_status(c->c2.buf.len, "read from TUN/TAP", NULL, c->c1.tuntap);
1164 
1165     perf_pop();
1166 }
1167 
1168 /**
1169  * Drops UDP packets which OS decided to route via tun.
1170  *
1171  * On Windows and OS X when netwotk adapter is disabled or
1172  * disconnected, platform starts to use tun as external interface.
1173  * When packet is sent to tun, it comes to openvpn, encapsulated
1174  * and sent to routing table, which sends it again to tun.
1175  */
1176 static void
drop_if_recursive_routing(struct context * c,struct buffer * buf)1177 drop_if_recursive_routing(struct context *c, struct buffer *buf)
1178 {
1179     bool drop = false;
1180     struct openvpn_sockaddr tun_sa;
1181     int ip_hdr_offset = 0;
1182 
1183     if (c->c2.to_link_addr == NULL) /* no remote addr known */
1184     {
1185         return;
1186     }
1187 
1188     tun_sa = c->c2.to_link_addr->dest;
1189 
1190     int proto_ver = get_tun_ip_ver(TUNNEL_TYPE(c->c1.tuntap), &c->c2.buf, &ip_hdr_offset);
1191 
1192     if (proto_ver == 4)
1193     {
1194         const struct openvpn_iphdr *pip;
1195 
1196         /* make sure we got whole IP header */
1197         if (BLEN(buf) < ((int) sizeof(struct openvpn_iphdr) + ip_hdr_offset))
1198         {
1199             return;
1200         }
1201 
1202         /* skip ipv4 packets for ipv6 tun */
1203         if (tun_sa.addr.sa.sa_family != AF_INET)
1204         {
1205             return;
1206         }
1207 
1208         pip = (struct openvpn_iphdr *) (BPTR(buf) + ip_hdr_offset);
1209 
1210         /* drop packets with same dest addr as gateway */
1211         if (tun_sa.addr.in4.sin_addr.s_addr == pip->daddr)
1212         {
1213             drop = true;
1214         }
1215     }
1216     else if (proto_ver == 6)
1217     {
1218         const struct openvpn_ipv6hdr *pip6;
1219 
1220         /* make sure we got whole IPv6 header */
1221         if (BLEN(buf) < ((int) sizeof(struct openvpn_ipv6hdr) + ip_hdr_offset))
1222         {
1223             return;
1224         }
1225 
1226         /* skip ipv6 packets for ipv4 tun */
1227         if (tun_sa.addr.sa.sa_family != AF_INET6)
1228         {
1229             return;
1230         }
1231 
1232         /* drop packets with same dest addr as gateway */
1233         pip6 = (struct openvpn_ipv6hdr *) (BPTR(buf) + ip_hdr_offset);
1234         if (IN6_ARE_ADDR_EQUAL(&tun_sa.addr.in6.sin6_addr, &pip6->daddr))
1235         {
1236             drop = true;
1237         }
1238     }
1239 
1240     if (drop)
1241     {
1242         struct gc_arena gc = gc_new();
1243 
1244         c->c2.buf.len = 0;
1245 
1246         msg(D_LOW, "Recursive routing detected, drop tun packet to %s",
1247             print_link_socket_actual(c->c2.to_link_addr, &gc));
1248         gc_free(&gc);
1249     }
1250 }
1251 
1252 /*
1253  * Input:  c->c2.buf
1254  * Output: c->c2.to_link
1255  */
1256 
1257 void
process_incoming_tun(struct context * c)1258 process_incoming_tun(struct context *c)
1259 {
1260     struct gc_arena gc = gc_new();
1261 
1262     perf_push(PERF_PROC_IN_TUN);
1263 
1264     if (c->c2.buf.len > 0)
1265     {
1266         c->c2.tun_read_bytes += c->c2.buf.len;
1267     }
1268 
1269 #ifdef LOG_RW
1270     if (c->c2.log_rw && c->c2.buf.len > 0)
1271     {
1272         fprintf(stderr, "r");
1273     }
1274 #endif
1275 
1276     /* Show packet content */
1277     dmsg(D_TUN_RW, "TUN READ [%d]", BLEN(&c->c2.buf));
1278 
1279     if (c->c2.buf.len > 0)
1280     {
1281         if ((c->options.mode == MODE_POINT_TO_POINT) && (!c->options.allow_recursive_routing))
1282         {
1283             drop_if_recursive_routing(c, &c->c2.buf);
1284         }
1285         /*
1286          * The --passtos and --mssfix options require
1287          * us to examine the IP header (IPv4 or IPv6).
1288          */
1289         unsigned int flags = PIPV4_PASSTOS | PIP_MSSFIX | PIPV4_CLIENT_NAT
1290                              | PIPV6_IMCP_NOHOST_CLIENT;
1291         process_ip_header(c, flags, &c->c2.buf);
1292 
1293 #ifdef PACKET_TRUNCATION_CHECK
1294         /* if (c->c2.buf.len > 1) --c->c2.buf.len; */
1295         ipv4_packet_size_verify(BPTR(&c->c2.buf),
1296                                 BLEN(&c->c2.buf),
1297                                 TUNNEL_TYPE(c->c1.tuntap),
1298                                 "PRE_ENCRYPT",
1299                                 &c->c2.n_trunc_pre_encrypt);
1300 #endif
1301 
1302     }
1303     if (c->c2.buf.len > 0)
1304     {
1305         encrypt_sign(c, true);
1306     }
1307     else
1308     {
1309         buf_reset(&c->c2.to_link);
1310     }
1311     perf_pop();
1312     gc_free(&gc);
1313 }
1314 
1315 /**
1316  * Forges a IPv6 ICMP packet with a no route to host error code from the
1317  * IPv6 packet in buf and sends it directly back to the client via the tun
1318  * device when used on a client and via the link if used on the server.
1319  *
1320  * @param buf       - The buf containing the packet for which the icmp6
1321  *                    unreachable should be constructed.
1322  *
1323  * @param client    - determines whether to the send packet back via tun or link
1324  */
1325 void
ipv6_send_icmp_unreachable(struct context * c,struct buffer * buf,bool client)1326 ipv6_send_icmp_unreachable(struct context *c, struct buffer *buf, bool client)
1327 {
1328 #define MAX_ICMPV6LEN 1280
1329     struct openvpn_icmp6hdr icmp6out;
1330     CLEAR(icmp6out);
1331 
1332     /*
1333      * Get a buffer to the ip packet, is_ipv6 automatically forwards
1334      * the buffer to the ip packet
1335      */
1336     struct buffer inputipbuf = *buf;
1337 
1338     is_ipv6(TUNNEL_TYPE(c->c1.tuntap), &inputipbuf);
1339 
1340     if (BLEN(&inputipbuf) < (int)sizeof(struct openvpn_ipv6hdr))
1341     {
1342         return;
1343     }
1344 
1345     const struct openvpn_ipv6hdr *pip6 = (struct openvpn_ipv6hdr *)BPTR(&inputipbuf);
1346 
1347     /* Copy version, traffic class, flow label from input packet */
1348     struct openvpn_ipv6hdr pip6out = *pip6;
1349 
1350     pip6out.version_prio = pip6->version_prio;
1351     pip6out.daddr = pip6->saddr;
1352 
1353     /*
1354      * Use the IPv6 remote address if we have one, otherwise use a fake one
1355      * using the remote address is preferred since it makes debugging and
1356      * understanding where the ICMPv6 error originates easier
1357      */
1358     if (c->options.ifconfig_ipv6_remote)
1359     {
1360         inet_pton(AF_INET6, c->options.ifconfig_ipv6_remote, &pip6out.saddr);
1361     }
1362     else
1363     {
1364         inet_pton(AF_INET6, "fe80::7", &pip6out.saddr);
1365     }
1366 
1367     pip6out.nexthdr = OPENVPN_IPPROTO_ICMPV6;
1368 
1369     /*
1370      * The ICMPv6 unreachable code worked best in my (arne) tests with Windows,
1371      * Linux and Android. Windows did not like the administratively prohibited
1372      * return code (no fast fail)
1373      */
1374     icmp6out.icmp6_type = OPENVPN_ICMP6_DESTINATION_UNREACHABLE;
1375     icmp6out.icmp6_code = OPENVPN_ICMP6_DU_NOROUTE;
1376 
1377     int icmpheader_len = sizeof(struct openvpn_ipv6hdr)
1378                          + sizeof(struct openvpn_icmp6hdr);
1379     int totalheader_len = icmpheader_len;
1380 
1381     if (TUNNEL_TYPE(c->c1.tuntap) == DEV_TYPE_TAP)
1382     {
1383         totalheader_len += sizeof(struct openvpn_ethhdr);
1384     }
1385 
1386     /*
1387      * Calculate size for payload, defined in the standard that the resulting
1388      * frame should be <= 1280 and have as much as possible of the original
1389      * packet
1390      */
1391     int max_payload_size = min_int(MAX_ICMPV6LEN,
1392                                    TUN_MTU_SIZE(&c->c2.frame) - icmpheader_len);
1393     int payload_len = min_int(max_payload_size, BLEN(&inputipbuf));
1394 
1395     pip6out.payload_len = htons(sizeof(struct openvpn_icmp6hdr) + payload_len);
1396 
1397     /* Construct the packet as outgoing packet back to the client */
1398     struct buffer *outbuf;
1399     if (client)
1400     {
1401         c->c2.to_tun = c->c2.buffers->aux_buf;
1402         outbuf = &(c->c2.to_tun);
1403     }
1404     else
1405     {
1406         c->c2.to_link = c->c2.buffers->aux_buf;
1407         outbuf = &(c->c2.to_link);
1408     }
1409     ASSERT(buf_init(outbuf, totalheader_len));
1410 
1411     /* Fill the end of the buffer with original packet */
1412     ASSERT(buf_safe(outbuf, payload_len));
1413     ASSERT(buf_copy_n(outbuf, &inputipbuf, payload_len));
1414 
1415     /* ICMP Header, copy into buffer to allow checksum calculation */
1416     ASSERT(buf_write_prepend(outbuf, &icmp6out, sizeof(struct openvpn_icmp6hdr)));
1417 
1418     /* Calculate checksum over the packet and write to header */
1419 
1420     uint16_t new_csum = ip_checksum(AF_INET6, BPTR(outbuf), BLEN(outbuf),
1421                                     (const uint8_t *)&pip6out.saddr,
1422                                     (uint8_t *)&pip6out.daddr, OPENVPN_IPPROTO_ICMPV6);
1423     ((struct openvpn_icmp6hdr *) BPTR(outbuf))->icmp6_cksum = htons(new_csum);
1424 
1425 
1426     /* IPv6 Header */
1427     ASSERT(buf_write_prepend(outbuf, &pip6out, sizeof(struct openvpn_ipv6hdr)));
1428 
1429     /*
1430      * Tap mode, we also need to create an Ethernet header.
1431      */
1432     if (TUNNEL_TYPE(c->c1.tuntap) == DEV_TYPE_TAP)
1433     {
1434         if (BLEN(buf) < (int)sizeof(struct openvpn_ethhdr))
1435         {
1436             return;
1437         }
1438 
1439         const struct openvpn_ethhdr *orig_ethhdr = (struct openvpn_ethhdr *) BPTR(buf);
1440 
1441         /* Copy frametype and reverse source/destination for the response */
1442         struct openvpn_ethhdr ethhdr;
1443         memcpy(ethhdr.source, orig_ethhdr->dest, OPENVPN_ETH_ALEN);
1444         memcpy(ethhdr.dest, orig_ethhdr->source, OPENVPN_ETH_ALEN);
1445         ethhdr.proto = htons(OPENVPN_ETH_P_IPV6);
1446         ASSERT(buf_write_prepend(outbuf, &ethhdr, sizeof(struct openvpn_ethhdr)));
1447     }
1448 #undef MAX_ICMPV6LEN
1449 }
1450 
1451 void
process_ip_header(struct context * c,unsigned int flags,struct buffer * buf)1452 process_ip_header(struct context *c, unsigned int flags, struct buffer *buf)
1453 {
1454     if (!c->options.ce.mssfix)
1455     {
1456         flags &= ~PIP_MSSFIX;
1457     }
1458 #if PASSTOS_CAPABILITY
1459     if (!c->options.passtos)
1460     {
1461         flags &= ~PIPV4_PASSTOS;
1462     }
1463 #endif
1464     if (!c->options.client_nat)
1465     {
1466         flags &= ~PIPV4_CLIENT_NAT;
1467     }
1468     if (!c->options.route_gateway_via_dhcp)
1469     {
1470         flags &= ~PIPV4_EXTRACT_DHCP_ROUTER;
1471     }
1472     if (!c->options.block_ipv6)
1473     {
1474         flags &= ~(PIPV6_IMCP_NOHOST_CLIENT | PIPV6_IMCP_NOHOST_SERVER);
1475     }
1476 
1477     if (buf->len > 0)
1478     {
1479         /*
1480          * The --passtos and --mssfix options require
1481          * us to examine the IPv4 header.
1482          */
1483 
1484         if (flags & (PIP_MSSFIX
1485 #if PASSTOS_CAPABILITY
1486                      | PIPV4_PASSTOS
1487 #endif
1488                      | PIPV4_CLIENT_NAT
1489                      ))
1490         {
1491             struct buffer ipbuf = *buf;
1492             if (is_ipv4(TUNNEL_TYPE(c->c1.tuntap), &ipbuf))
1493             {
1494 #if PASSTOS_CAPABILITY
1495                 /* extract TOS from IP header */
1496                 if (flags & PIPV4_PASSTOS)
1497                 {
1498                     link_socket_extract_tos(c->c2.link_socket, &ipbuf);
1499                 }
1500 #endif
1501 
1502                 /* possibly alter the TCP MSS */
1503                 if (flags & PIP_MSSFIX)
1504                 {
1505                     mss_fixup_ipv4(&ipbuf, MTU_TO_MSS(TUN_MTU_SIZE_DYNAMIC(&c->c2.frame)));
1506                 }
1507 
1508                 /* possibly do NAT on packet */
1509                 if ((flags & PIPV4_CLIENT_NAT) && c->options.client_nat)
1510                 {
1511                     const int direction = (flags & PIP_OUTGOING) ? CN_INCOMING : CN_OUTGOING;
1512                     client_nat_transform(c->options.client_nat, &ipbuf, direction);
1513                 }
1514                 /* possibly extract a DHCP router message */
1515                 if (flags & PIPV4_EXTRACT_DHCP_ROUTER)
1516                 {
1517                     const in_addr_t dhcp_router = dhcp_extract_router_msg(&ipbuf);
1518                     if (dhcp_router)
1519                     {
1520                         route_list_add_vpn_gateway(c->c1.route_list, c->c2.es, dhcp_router);
1521                     }
1522                 }
1523             }
1524             else if (is_ipv6(TUNNEL_TYPE(c->c1.tuntap), &ipbuf))
1525             {
1526                 /* possibly alter the TCP MSS */
1527                 if (flags & PIP_MSSFIX)
1528                 {
1529                     mss_fixup_ipv6(&ipbuf,
1530                                    MTU_TO_MSS(TUN_MTU_SIZE_DYNAMIC(&c->c2.frame)));
1531                 }
1532                 if (!(flags & PIP_OUTGOING) && (flags
1533                                                 &(PIPV6_IMCP_NOHOST_CLIENT | PIPV6_IMCP_NOHOST_SERVER)))
1534                 {
1535                     ipv6_send_icmp_unreachable(c, buf,
1536                                                (bool)(flags & PIPV6_IMCP_NOHOST_CLIENT));
1537                     /* Drop the IPv6 packet */
1538                     buf->len = 0;
1539                 }
1540 
1541             }
1542         }
1543     }
1544 }
1545 
1546 /*
1547  * Input: c->c2.to_link
1548  */
1549 
1550 void
process_outgoing_link(struct context * c)1551 process_outgoing_link(struct context *c)
1552 {
1553     struct gc_arena gc = gc_new();
1554     int error_code = 0;
1555 
1556     perf_push(PERF_PROC_OUT_LINK);
1557 
1558     if (c->c2.to_link.len > 0 && c->c2.to_link.len <= EXPANDED_SIZE(&c->c2.frame))
1559     {
1560         /*
1561          * Setup for call to send/sendto which will send
1562          * packet to remote over the TCP/UDP port.
1563          */
1564         int size = 0;
1565         ASSERT(link_socket_actual_defined(c->c2.to_link_addr));
1566 
1567 #ifdef ENABLE_DEBUG
1568         /* In gremlin-test mode, we may choose to drop this packet */
1569         if (!c->options.gremlin || ask_gremlin(c->options.gremlin))
1570 #endif
1571         {
1572             /*
1573              * Let the traffic shaper know how many bytes
1574              * we wrote.
1575              */
1576             if (c->options.shaper)
1577             {
1578                 shaper_wrote_bytes(&c->c2.shaper, BLEN(&c->c2.to_link)
1579                                    + datagram_overhead(c->options.ce.proto));
1580             }
1581 
1582             /*
1583              * Let the pinger know that we sent a packet.
1584              */
1585             if (c->options.ping_send_timeout)
1586             {
1587                 event_timeout_reset(&c->c2.ping_send_interval);
1588             }
1589 
1590 #if PASSTOS_CAPABILITY
1591             /* Set TOS */
1592             link_socket_set_tos(c->c2.link_socket);
1593 #endif
1594 
1595             /* Log packet send */
1596 #ifdef LOG_RW
1597             if (c->c2.log_rw)
1598             {
1599                 fprintf(stderr, "W");
1600             }
1601 #endif
1602             msg(D_LINK_RW, "%s WRITE [%d] to %s: %s",
1603                 proto2ascii(c->c2.link_socket->info.proto, c->c2.link_socket->info.af, true),
1604                 BLEN(&c->c2.to_link),
1605                 print_link_socket_actual(c->c2.to_link_addr, &gc),
1606                 PROTO_DUMP(&c->c2.to_link, &gc));
1607 
1608             /* Packet send complexified by possible Socks5 usage */
1609             {
1610                 struct link_socket_actual *to_addr = c->c2.to_link_addr;
1611                 int size_delta = 0;
1612 
1613                 /* If Socks5 over UDP, prepend header */
1614                 socks_preprocess_outgoing_link(c, &to_addr, &size_delta);
1615 
1616                 /* Send packet */
1617                 size = link_socket_write(c->c2.link_socket,
1618                                          &c->c2.to_link,
1619                                          to_addr);
1620 
1621                 /* Undo effect of prepend */
1622                 link_socket_write_post_size_adjust(&size, size_delta, &c->c2.to_link);
1623             }
1624 
1625             if (size > 0)
1626             {
1627                 c->c2.max_send_size_local = max_int(size, c->c2.max_send_size_local);
1628                 c->c2.link_write_bytes += size;
1629                 link_write_bytes_global += size;
1630 #ifdef ENABLE_MEMSTATS
1631                 if (mmap_stats)
1632                 {
1633                     mmap_stats->link_write_bytes = link_write_bytes_global;
1634                 }
1635 #endif
1636 #ifdef ENABLE_MANAGEMENT
1637                 if (management)
1638                 {
1639                     management_bytes_out(management, size);
1640                     management_bytes_server(management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context);
1641                 }
1642 #endif
1643             }
1644         }
1645 
1646         /* Check return status */
1647         error_code = openvpn_errno();
1648         check_status(size, "write", c->c2.link_socket, NULL);
1649 
1650         if (size > 0)
1651         {
1652             /* Did we write a different size packet than we intended? */
1653             if (size != BLEN(&c->c2.to_link))
1654             {
1655                 msg(D_LINK_ERRORS,
1656                     "TCP/UDP packet was truncated/expanded on write to %s (tried=%d,actual=%d)",
1657                     print_link_socket_actual(c->c2.to_link_addr, &gc),
1658                     BLEN(&c->c2.to_link),
1659                     size);
1660             }
1661         }
1662 
1663         /* if not a ping/control message, indicate activity regarding --inactive parameter */
1664         if (c->c2.buf.len > 0)
1665         {
1666             register_activity(c, size);
1667         }
1668 
1669         /* for unreachable network and "connecting" state switch to the next host */
1670         if (size < 0 && ENETUNREACH == error_code && c->c2.tls_multi
1671             && !tls_initial_packet_received(c->c2.tls_multi) && c->options.mode == MODE_POINT_TO_POINT)
1672         {
1673             msg(M_INFO, "Network unreachable, restarting");
1674             register_signal(c, SIGUSR1, "network-unreachable");
1675         }
1676     }
1677     else
1678     {
1679         if (c->c2.to_link.len > 0)
1680         {
1681             msg(D_LINK_ERRORS, "TCP/UDP packet too large on write to %s (tried=%d,max=%d)",
1682                 print_link_socket_actual(c->c2.to_link_addr, &gc),
1683                 c->c2.to_link.len,
1684                 EXPANDED_SIZE(&c->c2.frame));
1685         }
1686     }
1687 
1688     buf_reset(&c->c2.to_link);
1689 
1690     perf_pop();
1691     gc_free(&gc);
1692 }
1693 
1694 /*
1695  * Input: c->c2.to_tun
1696  */
1697 
1698 void
process_outgoing_tun(struct context * c)1699 process_outgoing_tun(struct context *c)
1700 {
1701     struct gc_arena gc = gc_new();
1702 
1703     /*
1704      * Set up for write() call to TUN/TAP
1705      * device.
1706      */
1707     if (c->c2.to_tun.len <= 0)
1708     {
1709         return;
1710     }
1711 
1712     perf_push(PERF_PROC_OUT_TUN);
1713 
1714     /*
1715      * The --mssfix option requires
1716      * us to examine the IP header (IPv4 or IPv6).
1717      */
1718     process_ip_header(c,
1719                       PIP_MSSFIX | PIPV4_EXTRACT_DHCP_ROUTER | PIPV4_CLIENT_NAT | PIP_OUTGOING,
1720                       &c->c2.to_tun);
1721 
1722     if (c->c2.to_tun.len <= MAX_RW_SIZE_TUN(&c->c2.frame))
1723     {
1724         /*
1725          * Write to TUN/TAP device.
1726          */
1727         int size;
1728 
1729 #ifdef LOG_RW
1730         if (c->c2.log_rw)
1731         {
1732             fprintf(stderr, "w");
1733         }
1734 #endif
1735         dmsg(D_TUN_RW, "TUN WRITE [%d]", BLEN(&c->c2.to_tun));
1736 
1737 #ifdef PACKET_TRUNCATION_CHECK
1738         ipv4_packet_size_verify(BPTR(&c->c2.to_tun),
1739                                 BLEN(&c->c2.to_tun),
1740                                 TUNNEL_TYPE(c->c1.tuntap),
1741                                 "WRITE_TUN",
1742                                 &c->c2.n_trunc_tun_write);
1743 #endif
1744 
1745 #ifdef _WIN32
1746         size = write_tun_buffered(c->c1.tuntap, &c->c2.to_tun);
1747 #else
1748         size = write_tun(c->c1.tuntap, BPTR(&c->c2.to_tun), BLEN(&c->c2.to_tun));
1749 #endif
1750 
1751         if (size > 0)
1752         {
1753             c->c2.tun_write_bytes += size;
1754         }
1755         check_status(size, "write to TUN/TAP", NULL, c->c1.tuntap);
1756 
1757         /* check written packet size */
1758         if (size > 0)
1759         {
1760             /* Did we write a different size packet than we intended? */
1761             if (size != BLEN(&c->c2.to_tun))
1762             {
1763                 msg(D_LINK_ERRORS,
1764                     "TUN/TAP packet was destructively fragmented on write to %s (tried=%d,actual=%d)",
1765                     c->c1.tuntap->actual_name,
1766                     BLEN(&c->c2.to_tun),
1767                     size);
1768             }
1769 
1770             /* indicate activity regarding --inactive parameter */
1771             register_activity(c, size);
1772         }
1773     }
1774     else
1775     {
1776         /*
1777          * This should never happen, probably indicates some kind
1778          * of MTU mismatch.
1779          */
1780         msg(D_LINK_ERRORS, "tun packet too large on write (tried=%d,max=%d)",
1781             c->c2.to_tun.len,
1782             MAX_RW_SIZE_TUN(&c->c2.frame));
1783     }
1784 
1785     buf_reset(&c->c2.to_tun);
1786 
1787     perf_pop();
1788     gc_free(&gc);
1789 }
1790 
1791 void
pre_select(struct context * c)1792 pre_select(struct context *c)
1793 {
1794     /* make sure current time (now) is updated on function entry */
1795 
1796     /*
1797      * Start with an effectively infinite timeout, then let it
1798      * reduce to a timeout that reflects the component which
1799      * needs the earliest service.
1800      */
1801     c->c2.timeval.tv_sec = BIG_TIMEOUT;
1802     c->c2.timeval.tv_usec = 0;
1803 
1804 #if defined(_WIN32)
1805     if (check_debug_level(D_TAP_WIN_DEBUG))
1806     {
1807         c->c2.timeval.tv_sec = 1;
1808         if (tuntap_defined(c->c1.tuntap))
1809         {
1810             tun_show_debug(c->c1.tuntap);
1811         }
1812     }
1813 #endif
1814 
1815     /* check coarse timers? */
1816     check_coarse_timers(c);
1817     if (c->sig->signal_received)
1818     {
1819         return;
1820     }
1821 
1822     /* If tls is enabled, do tls control channel packet processing. */
1823     if (c->c2.tls_multi)
1824     {
1825         check_tls(c);
1826     }
1827 
1828     /* In certain cases, TLS errors will require a restart */
1829     check_tls_errors(c);
1830     if (c->sig->signal_received)
1831     {
1832         return;
1833     }
1834 
1835     /* check for incoming control messages on the control channel like
1836      * push request/reply, or authentication failure and 2FA messages */
1837     if (tls_test_payload_len(c->c2.tls_multi) > 0)
1838     {
1839         check_incoming_control_channel(c);
1840     }
1841 
1842     /* Should we send an OCC message? */
1843     check_send_occ_msg(c);
1844 
1845 #ifdef ENABLE_FRAGMENT
1846     /* Should we deliver a datagram fragment to remote? */
1847     if (c->c2.fragment)
1848     {
1849         check_fragment(c);
1850     }
1851 #endif
1852 
1853     /* Update random component of timeout */
1854     check_timeout_random_component(c);
1855 }
1856 
1857 /*
1858  * Wait for I/O events.  Used for both TCP & UDP sockets
1859  * in point-to-point mode and for UDP sockets in
1860  * point-to-multipoint mode.
1861  */
1862 
1863 void
io_wait_dowork(struct context * c,const unsigned int flags)1864 io_wait_dowork(struct context *c, const unsigned int flags)
1865 {
1866     unsigned int socket = 0;
1867     unsigned int tuntap = 0;
1868     struct event_set_return esr[4];
1869 
1870     /* These shifts all depend on EVENT_READ (=1) and EVENT_WRITE (=2)
1871      * and are added to the shift. Check openvpn.h for more details.
1872      */
1873     static int socket_shift = SOCKET_SHIFT;
1874     static int tun_shift = TUN_SHIFT;
1875     static int err_shift = ERR_SHIFT;
1876 #ifdef ENABLE_MANAGEMENT
1877     static int management_shift = MANAGEMENT_SHIFT;
1878 #endif
1879 #ifdef ENABLE_ASYNC_PUSH
1880     static int file_shift = FILE_SHIFT;
1881 #endif
1882 
1883     /*
1884      * Decide what kind of events we want to wait for.
1885      */
1886     event_reset(c->c2.event_set);
1887 
1888     /*
1889      * On win32 we use the keyboard or an event object as a source
1890      * of asynchronous signals.
1891      */
1892     if (flags & IOW_WAIT_SIGNAL)
1893     {
1894         wait_signal(c->c2.event_set, (void *)&err_shift);
1895     }
1896 
1897     /*
1898      * If outgoing data (for TCP/UDP port) pending, wait for ready-to-send
1899      * status from TCP/UDP port. Otherwise, wait for incoming data on
1900      * TUN/TAP device.
1901      */
1902     if (flags & IOW_TO_LINK)
1903     {
1904         if (flags & IOW_SHAPER)
1905         {
1906             /*
1907              * If sending this packet would put us over our traffic shaping
1908              * quota, don't send -- instead compute the delay we must wait
1909              * until it will be OK to send the packet.
1910              */
1911             int delay = 0;
1912 
1913             /* set traffic shaping delay in microseconds */
1914             if (c->options.shaper)
1915             {
1916                 delay = max_int(delay, shaper_delay(&c->c2.shaper));
1917             }
1918 
1919             if (delay < 1000)
1920             {
1921                 socket |= EVENT_WRITE;
1922             }
1923             else
1924             {
1925                 shaper_soonest_event(&c->c2.timeval, delay);
1926             }
1927         }
1928         else
1929         {
1930             socket |= EVENT_WRITE;
1931         }
1932     }
1933     else if (!((flags & IOW_FRAG) && TO_LINK_FRAG(c)))
1934     {
1935         if (flags & IOW_READ_TUN)
1936         {
1937             tuntap |= EVENT_READ;
1938         }
1939     }
1940 
1941     /*
1942      * If outgoing data (for TUN/TAP device) pending, wait for ready-to-send status
1943      * from device.  Otherwise, wait for incoming data on TCP/UDP port.
1944      */
1945     if (flags & IOW_TO_TUN)
1946     {
1947         tuntap |= EVENT_WRITE;
1948     }
1949     else
1950     {
1951         if (flags & IOW_READ_LINK)
1952         {
1953             socket |= EVENT_READ;
1954         }
1955     }
1956 
1957     /*
1958      * outgoing bcast buffer waiting to be sent?
1959      */
1960     if (flags & IOW_MBUF)
1961     {
1962         socket |= EVENT_WRITE;
1963     }
1964 
1965     /*
1966      * Force wait on TUN input, even if also waiting on TCP/UDP output
1967      */
1968     if (flags & IOW_READ_TUN_FORCE)
1969     {
1970         tuntap |= EVENT_READ;
1971     }
1972 
1973 #ifdef _WIN32
1974     if (tuntap_is_wintun(c->c1.tuntap))
1975     {
1976         /*
1977          * With wintun we are only interested in read event. Ring buffer is
1978          * always ready for write, so we don't do wait.
1979          */
1980         tuntap = EVENT_READ;
1981     }
1982 #endif
1983 
1984     /*
1985      * Configure event wait based on socket, tuntap flags.
1986      */
1987     socket_set(c->c2.link_socket, c->c2.event_set, socket, (void *)&socket_shift, NULL);
1988     tun_set(c->c1.tuntap, c->c2.event_set, tuntap, (void *)&tun_shift, NULL);
1989 
1990 #ifdef ENABLE_MANAGEMENT
1991     if (management)
1992     {
1993         management_socket_set(management, c->c2.event_set, (void *)&management_shift, NULL);
1994     }
1995 #endif
1996 
1997 #ifdef ENABLE_ASYNC_PUSH
1998     /* arm inotify watcher */
1999     if (c->options.mode == MODE_SERVER)
2000     {
2001         event_ctl(c->c2.event_set, c->c2.inotify_fd, EVENT_READ, (void *)&file_shift);
2002     }
2003 #endif
2004 
2005     /*
2006      * Possible scenarios:
2007      *  (1) tcp/udp port has data available to read
2008      *  (2) tcp/udp port is ready to accept more data to write
2009      *  (3) tun dev has data available to read
2010      *  (4) tun dev is ready to accept more data to write
2011      *  (5) we received a signal (handler sets signal_received)
2012      *  (6) timeout (tv) expired
2013      */
2014 
2015     c->c2.event_set_status = ES_ERROR;
2016 
2017     if (!c->sig->signal_received)
2018     {
2019         if (!(flags & IOW_CHECK_RESIDUAL) || !socket_read_residual(c->c2.link_socket))
2020         {
2021             int status;
2022 
2023 #ifdef ENABLE_DEBUG
2024             if (check_debug_level(D_EVENT_WAIT))
2025             {
2026                 show_wait_status(c);
2027             }
2028 #endif
2029 
2030             /*
2031              * Wait for something to happen.
2032              */
2033             status = event_wait(c->c2.event_set, &c->c2.timeval, esr, SIZE(esr));
2034 
2035             check_status(status, "event_wait", NULL, NULL);
2036 
2037             if (status > 0)
2038             {
2039                 int i;
2040                 c->c2.event_set_status = 0;
2041                 for (i = 0; i < status; ++i)
2042                 {
2043                     const struct event_set_return *e = &esr[i];
2044                     c->c2.event_set_status |= ((e->rwflags & 3) << *((int *)e->arg));
2045                 }
2046             }
2047             else if (status == 0)
2048             {
2049                 c->c2.event_set_status = ES_TIMEOUT;
2050             }
2051         }
2052         else
2053         {
2054             c->c2.event_set_status = SOCKET_READ;
2055         }
2056     }
2057 
2058     /* 'now' should always be a reasonably up-to-date timestamp */
2059     update_time();
2060 
2061     /* set signal_received if a signal was received */
2062     if (c->c2.event_set_status & ES_ERROR)
2063     {
2064         get_signal(&c->sig->signal_received);
2065     }
2066 
2067     dmsg(D_EVENT_WAIT, "I/O WAIT status=0x%04x", c->c2.event_set_status);
2068 }
2069 
2070 void
process_io(struct context * c)2071 process_io(struct context *c)
2072 {
2073     const unsigned int status = c->c2.event_set_status;
2074 
2075 #ifdef ENABLE_MANAGEMENT
2076     if (status & (MANAGEMENT_READ|MANAGEMENT_WRITE))
2077     {
2078         ASSERT(management);
2079         management_io(management);
2080     }
2081 #endif
2082 
2083     /* TCP/UDP port ready to accept write */
2084     if (status & SOCKET_WRITE)
2085     {
2086         process_outgoing_link(c);
2087     }
2088     /* TUN device ready to accept write */
2089     else if (status & TUN_WRITE)
2090     {
2091         process_outgoing_tun(c);
2092     }
2093     /* Incoming data on TCP/UDP port */
2094     else if (status & SOCKET_READ)
2095     {
2096         read_incoming_link(c);
2097         if (!IS_SIG(c))
2098         {
2099             process_incoming_link(c);
2100         }
2101     }
2102     /* Incoming data on TUN device */
2103     else if (status & TUN_READ)
2104     {
2105         read_incoming_tun(c);
2106         if (!IS_SIG(c))
2107         {
2108             process_incoming_tun(c);
2109         }
2110     }
2111 }
2112