1 /*
2  *  OpenVPN -- An application to securely tunnel IP networks
3  *             over a single UDP port, with support for SSL/TLS-based
4  *             session authentication and key exchange,
5  *             packet encryption, packet authentication, and
6  *             packet compression.
7  *
8  *  Copyright (C) 2002-2022 OpenVPN Inc <sales@openvpn.net>
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2
12  *  as published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License along
20  *  with this program; if not, write to the Free Software Foundation, Inc.,
21  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23 
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #elif defined(_MSC_VER)
27 #include "config-msvc.h"
28 #endif
29 
30 #include "syshead.h"
31 
32 #if PORT_SHARE
33 
34 #include "event.h"
35 #include "socket.h"
36 #include "fdmisc.h"
37 #include "crypto.h"
38 #include "ps.h"
39 
40 #include "memdbg.h"
41 
42 struct port_share *port_share = NULL; /* GLOBAL */
43 
44 /* size of i/o buffers */
45 #define PROXY_CONNECTION_BUFFER_SIZE 1500
46 
47 /* Command codes for foreground -> background communication */
48 #define COMMAND_REDIRECT 10
49 #define COMMAND_EXIT     11
50 
51 /* Response codes for background -> foreground communication */
52 #define RESPONSE_INIT_SUCCEEDED   20
53 #define RESPONSE_INIT_FAILED      21
54 
55 /*
56  * Return values for proxy_connection_io functions
57  */
58 
59 #define IOSTAT_EAGAIN_ON_READ   0 /* recv returned EAGAIN */
60 #define IOSTAT_EAGAIN_ON_WRITE  1 /* send returned EAGAIN */
61 #define IOSTAT_READ_ERROR       2 /* the other end of our read socket (pc) was closed */
62 #define IOSTAT_WRITE_ERROR      3 /* the other end of our write socket (pc->counterpart) was closed */
63 #define IOSTAT_GOOD             4 /* nothing to report */
64 
65 /*
66  * A foreign (non-OpenVPN) connection we are proxying,
67  * usually HTTPS
68  */
69 struct proxy_connection {
70     bool defined;
71     struct proxy_connection *next;
72     struct proxy_connection *counterpart;
73     struct buffer buf;
74     bool buffer_initial;
75     int rwflags;
76     int sd;
77     char *jfn;
78 };
79 
80 #if 0
81 static const char *
82 headc(const struct buffer *buf)
83 {
84     static char foo[16];
85     strncpy(foo, BSTR(buf), 15);
86     foo[15] = 0;
87     return foo;
88 }
89 #endif
90 
91 static inline void
close_socket_if_defined(const socket_descriptor_t sd)92 close_socket_if_defined(const socket_descriptor_t sd)
93 {
94     if (socket_defined(sd))
95     {
96         openvpn_close_socket(sd);
97     }
98 }
99 
100 /*
101  * Close most of parent's fds.
102  * Keep stdin/stdout/stderr, plus one
103  * other fd which is presumed to be
104  * our pipe back to parent.
105  * Admittedly, a bit of a kludge,
106  * but posix doesn't give us a kind
107  * of FD_CLOEXEC which will stop
108  * fds from crossing a fork().
109  */
110 static void
close_fds_except(int keep)111 close_fds_except(int keep)
112 {
113     socket_descriptor_t i;
114     closelog();
115     for (i = 3; i <= 100; ++i)
116     {
117         if (i != keep)
118         {
119             openvpn_close_socket(i);
120         }
121     }
122 }
123 
124 /*
125  * Usually we ignore signals, because our parent will
126  * deal with them.
127  */
128 static void
set_signals(void)129 set_signals(void)
130 {
131     signal(SIGTERM, SIG_DFL);
132 
133     signal(SIGINT, SIG_IGN);
134     signal(SIGHUP, SIG_IGN);
135     signal(SIGUSR1, SIG_IGN);
136     signal(SIGUSR2, SIG_IGN);
137     signal(SIGPIPE, SIG_IGN);
138 }
139 
140 /*
141  * Socket read/write functions.
142  */
143 
144 static int
recv_control(const socket_descriptor_t fd)145 recv_control(const socket_descriptor_t fd)
146 {
147     unsigned char c;
148     const ssize_t size = read(fd, &c, sizeof(c));
149     if (size == sizeof(c))
150     {
151         return c;
152     }
153     else
154     {
155         return -1;
156     }
157 }
158 
159 static int
send_control(const socket_descriptor_t fd,int code)160 send_control(const socket_descriptor_t fd, int code)
161 {
162     unsigned char c = (unsigned char) code;
163     const ssize_t size = write(fd, &c, sizeof(c));
164     if (size == sizeof(c))
165     {
166         return (int) size;
167     }
168     else
169     {
170         return -1;
171     }
172 }
173 
174 static int
cmsg_size(void)175 cmsg_size(void)
176 {
177     return CMSG_SPACE(sizeof(socket_descriptor_t));
178 }
179 
180 /*
181  * Send a command (char), data (head), and a file descriptor (sd_send) to a local process
182  * over unix socket sd.  Unfortunately, there's no portable way to send file descriptors
183  * to other processes, so this code, as well as its analog (control_message_from_parent below),
184  * is Linux-specific. This function runs in the context of the main process and is used to
185  * send commands, data, and file descriptors to the background process.
186  */
187 static void
port_share_sendmsg(const socket_descriptor_t sd,const char command,const struct buffer * head,const socket_descriptor_t sd_send)188 port_share_sendmsg(const socket_descriptor_t sd,
189                    const char command,
190                    const struct buffer *head,
191                    const socket_descriptor_t sd_send)
192 {
193     if (socket_defined(sd))
194     {
195         struct msghdr mesg;
196         struct cmsghdr *h;
197         struct iovec iov[2];
198         socket_descriptor_t sd_null[2] = { SOCKET_UNDEFINED, SOCKET_UNDEFINED };
199         char cmd;
200         ssize_t status;
201 
202         dmsg(D_PS_PROXY_DEBUG, "PORT SHARE: sendmsg sd=%d len=%d",
203              (int)sd_send,
204              head ? BLEN(head) : -1);
205 
206         CLEAR(mesg);
207 
208         cmd = command;
209 
210         iov[0].iov_base = &cmd;
211         iov[0].iov_len = sizeof(cmd);
212         mesg.msg_iovlen = 1;
213 
214         if (head)
215         {
216             iov[1].iov_base = BPTR(head);
217             iov[1].iov_len = BLEN(head);
218             mesg.msg_iovlen = 2;
219         }
220 
221         mesg.msg_iov = iov;
222 
223         mesg.msg_controllen = cmsg_size();
224         mesg.msg_control = (char *) malloc(mesg.msg_controllen);
225         check_malloc_return(mesg.msg_control);
226         mesg.msg_flags = 0;
227 
228         h = CMSG_FIRSTHDR(&mesg);
229         h->cmsg_level = SOL_SOCKET;
230         h->cmsg_type = SCM_RIGHTS;
231         h->cmsg_len = CMSG_LEN(sizeof(socket_descriptor_t));
232 
233         if (socket_defined(sd_send))
234         {
235             memcpy(CMSG_DATA(h), &sd_send, sizeof(sd_send));
236         }
237         else
238         {
239             socketpair(PF_UNIX, SOCK_DGRAM, 0, sd_null);
240             memcpy(CMSG_DATA(h), &sd_null[0], sizeof(sd_null[0]));
241         }
242 
243         status = sendmsg(sd, &mesg, MSG_NOSIGNAL);
244         if (status == -1)
245         {
246             msg(M_WARN|M_ERRNO, "PORT SHARE: sendmsg failed -- unable to communicate with background process (%d,%d,%d,%d)",
247                 sd, sd_send, sd_null[0], sd_null[1]
248                 );
249         }
250 
251         close_socket_if_defined(sd_null[0]);
252         close_socket_if_defined(sd_null[1]);
253         free(mesg.msg_control);
254     }
255 }
256 
257 static void
proxy_entry_close_sd(struct proxy_connection * pc,struct event_set * es)258 proxy_entry_close_sd(struct proxy_connection *pc, struct event_set *es)
259 {
260     if (pc->defined && socket_defined(pc->sd))
261     {
262         dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: delete sd=%d", (int)pc->sd);
263         if (es)
264         {
265             event_del(es, pc->sd);
266         }
267         openvpn_close_socket(pc->sd);
268         pc->sd = SOCKET_UNDEFINED;
269     }
270 }
271 
272 /*
273  * Mark a proxy entry and its counterpart for close.
274  */
275 static void
proxy_entry_mark_for_close(struct proxy_connection * pc,struct event_set * es)276 proxy_entry_mark_for_close(struct proxy_connection *pc, struct event_set *es)
277 {
278     if (pc->defined)
279     {
280         struct proxy_connection *cp = pc->counterpart;
281         proxy_entry_close_sd(pc, es);
282         free_buf(&pc->buf);
283         pc->buffer_initial = false;
284         pc->rwflags = 0;
285         pc->defined = false;
286         if (pc->jfn)
287         {
288             unlink(pc->jfn);
289             free(pc->jfn);
290             pc->jfn = NULL;
291         }
292         if (cp && cp->defined && cp->counterpart == pc)
293         {
294             proxy_entry_mark_for_close(cp, es);
295         }
296     }
297 }
298 
299 /*
300  * Run through the proxy entry list and delete all entries marked
301  * for close.
302  */
303 static void
proxy_list_housekeeping(struct proxy_connection ** list)304 proxy_list_housekeeping(struct proxy_connection **list)
305 {
306     if (list)
307     {
308         struct proxy_connection *prev = NULL;
309         struct proxy_connection *pc = *list;
310 
311         while (pc)
312         {
313             struct proxy_connection *next = pc->next;
314             if (!pc->defined)
315             {
316                 free(pc);
317                 if (prev)
318                 {
319                     prev->next = next;
320                 }
321                 else
322                 {
323                     *list = next;
324                 }
325             }
326             else
327             {
328                 prev = pc;
329             }
330             pc = next;
331         }
332     }
333 }
334 
335 /*
336  * Record IP/port of client in filesystem, so that server receiving
337  * the proxy can determine true client origin.
338  */
339 static void
journal_add(const char * journal_dir,struct proxy_connection * pc,struct proxy_connection * cp)340 journal_add(const char *journal_dir, struct proxy_connection *pc, struct proxy_connection *cp)
341 {
342     struct gc_arena gc = gc_new();
343     struct openvpn_sockaddr from, to;
344     socklen_t slen, dlen;
345     int fnlen;
346     char *jfn;
347     int fd;
348 
349     slen = sizeof(from.addr.sa);
350     dlen = sizeof(to.addr.sa);
351     if (!getpeername(pc->sd, (struct sockaddr *) &from.addr.sa, &slen)
352         && !getsockname(cp->sd, (struct sockaddr *) &to.addr.sa, &dlen))
353     {
354         const char *f = print_openvpn_sockaddr(&from, &gc);
355         const char *t = print_openvpn_sockaddr(&to, &gc);
356         fnlen =  strlen(journal_dir) + strlen(t) + 2;
357         jfn = (char *) malloc(fnlen);
358         check_malloc_return(jfn);
359         openvpn_snprintf(jfn, fnlen, "%s/%s", journal_dir, t);
360         dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: client origin %s -> %s", jfn, f);
361         fd = platform_open(jfn, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP);
362         if (fd != -1)
363         {
364             if (write(fd, f, strlen(f)) != strlen(f))
365             {
366                 msg(M_WARN, "PORT SHARE: writing to journal file (%s) failed", jfn);
367             }
368             close(fd);
369             cp->jfn = jfn;
370         }
371         else
372         {
373             msg(M_WARN|M_ERRNO, "PORT SHARE: unable to write journal file in %s", jfn);
374             free(jfn);
375         }
376     }
377     gc_free(&gc);
378 }
379 
380 /*
381  * Cleanup function, on proxy process exit.
382  */
383 static void
proxy_list_close(struct proxy_connection ** list)384 proxy_list_close(struct proxy_connection **list)
385 {
386     if (list)
387     {
388         struct proxy_connection *pc = *list;
389         while (pc)
390         {
391             proxy_entry_mark_for_close(pc, NULL);
392             pc = pc->next;
393         }
394         proxy_list_housekeeping(list);
395     }
396 }
397 
398 static inline void
proxy_connection_io_requeue(struct proxy_connection * pc,const int rwflags_new,struct event_set * es)399 proxy_connection_io_requeue(struct proxy_connection *pc, const int rwflags_new, struct event_set *es)
400 {
401     if (socket_defined(pc->sd) && pc->rwflags != rwflags_new)
402     {
403         /*dmsg (D_PS_PROXY_DEBUG, "PORT SHARE PROXY: requeue[%d] rwflags=%d", (int)pc->sd, rwflags_new);*/
404         event_ctl(es, pc->sd, rwflags_new, (void *)pc);
405         pc->rwflags = rwflags_new;
406     }
407 }
408 
409 /*
410  * Create a new pair of proxy_connection entries, one for each
411  * socket file descriptor involved in the proxy.  We are given
412  * the client fd, and we should derive our own server fd by connecting
413  * to the server given by server_addr/server_port.  Return true
414  * on success and false on failure to connect to server.
415  */
416 static bool
proxy_entry_new(struct proxy_connection ** list,struct event_set * es,const struct sockaddr_in server_addr,const socket_descriptor_t sd_client,struct buffer * initial_data,const char * journal_dir)417 proxy_entry_new(struct proxy_connection **list,
418                 struct event_set *es,
419                 const struct sockaddr_in server_addr,
420                 const socket_descriptor_t sd_client,
421                 struct buffer *initial_data,
422                 const char *journal_dir)
423 {
424     socket_descriptor_t sd_server;
425     int status;
426     struct proxy_connection *pc;
427     struct proxy_connection *cp;
428 
429     /* connect to port share server */
430     if ((sd_server = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0)
431     {
432         msg(M_WARN|M_ERRNO, "PORT SHARE PROXY: cannot create socket");
433         return false;
434     }
435     status = openvpn_connect(sd_server,(const struct sockaddr *)  &server_addr, 5, NULL);
436     if (status)
437     {
438         msg(M_WARN, "PORT SHARE PROXY: connect to port-share server failed");
439         openvpn_close_socket(sd_server);
440         return false;
441     }
442     dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: connect to port-share server succeeded");
443 
444     set_nonblock(sd_client);
445     set_nonblock(sd_server);
446 
447     /* allocate 2 new proxy_connection objects */
448     ALLOC_OBJ_CLEAR(pc, struct proxy_connection);
449     ALLOC_OBJ_CLEAR(cp, struct proxy_connection);
450 
451     /* client object */
452     pc->defined = true;
453     pc->next = cp;
454     pc->counterpart = cp;
455     pc->buf = *initial_data;
456     pc->buffer_initial = true;
457     pc->rwflags = EVENT_UNDEF;
458     pc->sd = sd_client;
459 
460     /* server object */
461     cp->defined = true;
462     cp->next = *list;
463     cp->counterpart = pc;
464     cp->buf = alloc_buf(PROXY_CONNECTION_BUFFER_SIZE);
465     cp->buffer_initial = false;
466     cp->rwflags = EVENT_UNDEF;
467     cp->sd = sd_server;
468 
469     /* add to list */
470     *list = pc;
471 
472     /* add journal entry */
473     if (journal_dir)
474     {
475         journal_add(journal_dir, pc, cp);
476     }
477 
478     dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: NEW CONNECTION [c=%d s=%d]", (int)sd_client, (int)sd_server);
479 
480     /* set initial i/o states */
481     proxy_connection_io_requeue(pc, EVENT_READ, es);
482     proxy_connection_io_requeue(cp, EVENT_READ|EVENT_WRITE, es);
483 
484     return true;
485 }
486 
487 /*
488  * This function runs in the context of the background proxy process.
489  * Receive a control message from the parent (sent by the port_share_sendmsg
490  * function above) and act on it.  Return false if the proxy process should
491  * exit, true otherwise.
492  */
493 static bool
control_message_from_parent(const socket_descriptor_t sd_control,struct proxy_connection ** list,struct event_set * es,const struct sockaddr_in server_addr,const int max_initial_buf,const char * journal_dir)494 control_message_from_parent(const socket_descriptor_t sd_control,
495                             struct proxy_connection **list,
496                             struct event_set *es,
497                             const struct sockaddr_in server_addr,
498                             const int max_initial_buf,
499                             const char *journal_dir)
500 {
501     /* this buffer needs to be large enough to handle the largest buffer
502      * that might be returned by the link_socket_read call in read_incoming_link. */
503     struct buffer buf = alloc_buf(max_initial_buf);
504 
505     struct msghdr mesg;
506     struct cmsghdr *h;
507     struct iovec iov[2];
508     char command = 0;
509     ssize_t status;
510     int ret = true;
511 
512     CLEAR(mesg);
513 
514     iov[0].iov_base = &command;
515     iov[0].iov_len = sizeof(command);
516     iov[1].iov_base = BPTR(&buf);
517     iov[1].iov_len = BCAP(&buf);
518     mesg.msg_iov = iov;
519     mesg.msg_iovlen = 2;
520 
521     mesg.msg_controllen = cmsg_size();
522     mesg.msg_control = (char *) malloc(mesg.msg_controllen);
523     check_malloc_return(mesg.msg_control);
524     mesg.msg_flags = 0;
525 
526     h = CMSG_FIRSTHDR(&mesg);
527     h->cmsg_len = CMSG_LEN(sizeof(socket_descriptor_t));
528     h->cmsg_level = SOL_SOCKET;
529     h->cmsg_type = SCM_RIGHTS;
530     static const socket_descriptor_t socket_undefined = SOCKET_UNDEFINED;
531     memcpy(CMSG_DATA(h), &socket_undefined, sizeof(socket_undefined));
532 
533     status = recvmsg(sd_control, &mesg, MSG_NOSIGNAL);
534     if (status != -1)
535     {
536         if (h == NULL
537             || h->cmsg_len    != CMSG_LEN(sizeof(socket_descriptor_t))
538             || h->cmsg_level  != SOL_SOCKET
539             || h->cmsg_type   != SCM_RIGHTS)
540         {
541             msg(M_WARN, "PORT SHARE PROXY: received unknown message");
542         }
543         else
544         {
545             socket_descriptor_t received_fd;
546             memcpy(&received_fd, CMSG_DATA(h), sizeof(received_fd));
547             dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: RECEIVED sd=%d", (int)received_fd);
548 
549             if (status >= 2 && command == COMMAND_REDIRECT)
550             {
551                 buf.len = status - 1;
552                 if (proxy_entry_new(list,
553                                     es,
554                                     server_addr,
555                                     received_fd,
556                                     &buf,
557                                     journal_dir))
558                 {
559                     CLEAR(buf); /* we gave the buffer to proxy_entry_new */
560                 }
561                 else
562                 {
563                     openvpn_close_socket(received_fd);
564                 }
565             }
566             else if (status >= 1 && command == COMMAND_EXIT)
567             {
568                 dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: RECEIVED COMMAND_EXIT");
569                 openvpn_close_socket(received_fd); /* null socket */
570                 ret = false;
571             }
572         }
573     }
574     free(mesg.msg_control);
575     free_buf(&buf);
576     return ret;
577 }
578 
579 static int
proxy_connection_io_recv(struct proxy_connection * pc)580 proxy_connection_io_recv(struct proxy_connection *pc)
581 {
582     /* recv data from socket */
583     const int status = recv(pc->sd, BPTR(&pc->buf), BCAP(&pc->buf), MSG_NOSIGNAL);
584     if (status < 0)
585     {
586         return (errno == EAGAIN) ? IOSTAT_EAGAIN_ON_READ : IOSTAT_READ_ERROR;
587     }
588     else
589     {
590         if (!status)
591         {
592             return IOSTAT_READ_ERROR;
593         }
594         dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: read[%d] %d", (int)pc->sd, status);
595         pc->buf.len = status;
596     }
597     return IOSTAT_GOOD;
598 }
599 
600 static int
proxy_connection_io_send(struct proxy_connection * pc,int * bytes_sent)601 proxy_connection_io_send(struct proxy_connection *pc, int *bytes_sent)
602 {
603     const socket_descriptor_t sd = pc->counterpart->sd;
604     const int status = send(sd, BPTR(&pc->buf), BLEN(&pc->buf), MSG_NOSIGNAL);
605 
606     if (status < 0)
607     {
608         const int e = errno;
609         return (e == EAGAIN) ? IOSTAT_EAGAIN_ON_WRITE : IOSTAT_WRITE_ERROR;
610     }
611     else
612     {
613         *bytes_sent += status;
614         if (status != pc->buf.len)
615         {
616             dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: partial write[%d], tried=%d got=%d", (int)sd, pc->buf.len, status);
617             buf_advance(&pc->buf, status);
618             return IOSTAT_EAGAIN_ON_WRITE;
619         }
620         else
621         {
622             dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: wrote[%d] %d", (int)sd, status);
623             pc->buf.len = 0;
624             pc->buf.offset = 0;
625         }
626     }
627 
628     /* realloc send buffer after initial send */
629     if (pc->buffer_initial)
630     {
631         free_buf(&pc->buf);
632         pc->buf = alloc_buf(PROXY_CONNECTION_BUFFER_SIZE);
633         pc->buffer_initial = false;
634     }
635     return IOSTAT_GOOD;
636 }
637 
638 /*
639  * Forward data from pc to pc->counterpart.
640  */
641 
642 static int
proxy_connection_io_xfer(struct proxy_connection * pc,const int max_transfer)643 proxy_connection_io_xfer(struct proxy_connection *pc, const int max_transfer)
644 {
645     int transferred = 0;
646     while (transferred < max_transfer)
647     {
648         if (!BLEN(&pc->buf))
649         {
650             const int status = proxy_connection_io_recv(pc);
651             if (status != IOSTAT_GOOD)
652             {
653                 return status;
654             }
655         }
656 
657         if (BLEN(&pc->buf))
658         {
659             const int status = proxy_connection_io_send(pc, &transferred);
660             if (status != IOSTAT_GOOD)
661             {
662                 return status;
663             }
664         }
665     }
666     return IOSTAT_EAGAIN_ON_READ;
667 }
668 
669 /*
670  * Decide how the receipt of an EAGAIN status should affect our next IO queueing.
671  */
672 static bool
proxy_connection_io_status(const int status,int * rwflags_pc,int * rwflags_cp)673 proxy_connection_io_status(const int status, int *rwflags_pc, int *rwflags_cp)
674 {
675     switch (status)
676     {
677         case IOSTAT_EAGAIN_ON_READ:
678             *rwflags_pc |= EVENT_READ;
679             *rwflags_cp &= ~EVENT_WRITE;
680             return true;
681 
682         case IOSTAT_EAGAIN_ON_WRITE:
683             *rwflags_pc &= ~EVENT_READ;
684             *rwflags_cp |= EVENT_WRITE;
685             return true;
686 
687         case IOSTAT_READ_ERROR:
688             return false;
689 
690         case IOSTAT_WRITE_ERROR:
691             return false;
692 
693         default:
694             msg(M_FATAL, "PORT SHARE PROXY: unexpected status=%d", status);
695     }
696     return false; /* NOTREACHED */
697 }
698 
699 /*
700  * Dispatch function for forwarding data between the two socket fds involved
701  * in the proxied connection.
702  */
703 static int
proxy_connection_io_dispatch(struct proxy_connection * pc,const int rwflags,struct event_set * es)704 proxy_connection_io_dispatch(struct proxy_connection *pc,
705                              const int rwflags,
706                              struct event_set *es)
707 {
708     const int max_transfer_per_iteration = 10000;
709     struct proxy_connection *cp = pc->counterpart;
710     int rwflags_pc = pc->rwflags;
711     int rwflags_cp = cp->rwflags;
712 
713     ASSERT(pc->defined && cp->defined && cp->counterpart == pc);
714 
715     if (rwflags & EVENT_READ)
716     {
717         const int status = proxy_connection_io_xfer(pc, max_transfer_per_iteration);
718         if (!proxy_connection_io_status(status, &rwflags_pc, &rwflags_cp))
719         {
720             goto bad;
721         }
722     }
723     if (rwflags & EVENT_WRITE)
724     {
725         const int status = proxy_connection_io_xfer(cp, max_transfer_per_iteration);
726         if (!proxy_connection_io_status(status, &rwflags_cp, &rwflags_pc))
727         {
728             goto bad;
729         }
730     }
731     proxy_connection_io_requeue(pc, rwflags_pc, es);
732     proxy_connection_io_requeue(cp, rwflags_cp, es);
733 
734     return true;
735 
736 bad:
737     proxy_entry_mark_for_close(pc, es);
738     return false;
739 }
740 
741 /*
742  * This is the main function for the port share proxy background process.
743  */
744 static void
port_share_proxy(const struct sockaddr_in hostaddr,const socket_descriptor_t sd_control,const int max_initial_buf,const char * journal_dir)745 port_share_proxy(const struct sockaddr_in hostaddr,
746                  const socket_descriptor_t sd_control,
747                  const int max_initial_buf,
748                  const char *journal_dir)
749 {
750     if (send_control(sd_control, RESPONSE_INIT_SUCCEEDED) >= 0)
751     {
752         void *sd_control_marker = (void *)1;
753         int maxevents = 256;
754         struct event_set *es;
755         struct event_set_return esr[64];
756         struct proxy_connection *list = NULL;
757         time_t last_housekeeping = 0;
758 
759         msg(D_PS_PROXY, "PORT SHARE PROXY: proxy starting");
760 
761         es = event_set_init(&maxevents, 0);
762         event_ctl(es, sd_control, EVENT_READ, sd_control_marker);
763         while (true)
764         {
765             int n_events;
766             struct timeval tv;
767             time_t current;
768 
769             tv.tv_sec = 10;
770             tv.tv_usec = 0;
771             n_events = event_wait(es, &tv, esr, SIZE(esr));
772             /*dmsg (D_PS_PROXY_DEBUG, "PORT SHARE PROXY: event_wait returned %d", n_events);*/
773             current = time(NULL);
774             if (n_events > 0)
775             {
776                 int i;
777                 for (i = 0; i < n_events; ++i)
778                 {
779                     const struct event_set_return *e = &esr[i];
780                     if (e->arg == sd_control_marker)
781                     {
782                         if (!control_message_from_parent(sd_control, &list, es, hostaddr, max_initial_buf, journal_dir))
783                         {
784                             goto done;
785                         }
786                     }
787                     else
788                     {
789                         struct proxy_connection *pc = (struct proxy_connection *)e->arg;
790                         if (pc->defined)
791                         {
792                             proxy_connection_io_dispatch(pc, e->rwflags, es);
793                         }
794                     }
795                 }
796             }
797             else if (n_events < 0)
798             {
799                 dmsg(D_PS_PROXY_DEBUG, "PORT SHARE PROXY: event_wait failed");
800             }
801             if (current > last_housekeeping)
802             {
803                 proxy_list_housekeeping(&list);
804                 last_housekeeping = current;
805             }
806         }
807 
808 done:
809         proxy_list_close(&list);
810         event_free(es);
811     }
812     msg(M_INFO, "PORT SHARE PROXY: proxy exiting");
813 }
814 
815 /*
816  * Called from the main OpenVPN process to enable the port
817  * share proxy.
818  */
819 struct port_share *
port_share_open(const char * host,const char * port,const int max_initial_buf,const char * journal_dir)820 port_share_open(const char *host,
821                 const char *port,
822                 const int max_initial_buf,
823                 const char *journal_dir)
824 {
825     pid_t pid;
826     socket_descriptor_t fd[2];
827     struct sockaddr_in hostaddr;
828     struct port_share *ps;
829     int status;
830     struct addrinfo *ai;
831 
832     ALLOC_OBJ_CLEAR(ps, struct port_share);
833     ps->foreground_fd = -1;
834     ps->background_pid = -1;
835 
836     /*
837      * Get host's IP address
838      */
839 
840     status = openvpn_getaddrinfo(GETADDR_RESOLVE|GETADDR_FATAL,
841                                  host, port,  0, NULL, AF_INET, &ai);
842     ASSERT(status==0);
843     hostaddr = *((struct sockaddr_in *) ai->ai_addr);
844     freeaddrinfo(ai);
845 
846     /*
847      * Make a socket for foreground and background processes
848      * to communicate.
849      */
850     if (socketpair(PF_UNIX, SOCK_DGRAM, 0, fd) == -1)
851     {
852         msg(M_WARN, "PORT SHARE: socketpair call failed");
853         goto error;
854     }
855 
856     /*
857      * Fork off background proxy process.
858      */
859     pid = fork();
860 
861     if (pid)
862     {
863         int status;
864 
865         /*
866          * Foreground Process
867          */
868 
869         ps->background_pid = pid;
870 
871         /* close our copy of child's socket */
872         openvpn_close_socket(fd[1]);
873 
874         /* don't let future subprocesses inherit child socket */
875         set_cloexec(fd[0]);
876 
877         /* wait for background child process to initialize */
878         status = recv_control(fd[0]);
879         if (status == RESPONSE_INIT_SUCCEEDED)
880         {
881             /* note that this will cause possible EAGAIN when writing to
882              * control socket if proxy process is backlogged */
883             set_nonblock(fd[0]);
884 
885             ps->foreground_fd = fd[0];
886             return ps;
887         }
888         else
889         {
890             msg(M_ERR, "PORT SHARE: unexpected init recv_control status=%d", status);
891         }
892     }
893     else
894     {
895         /*
896          * Background Process
897          */
898 
899         /* Ignore most signals (the parent will receive them) */
900         set_signals();
901 
902         /* Let msg know that we forked */
903         msg_forked();
904 
905 #ifdef ENABLE_MANAGEMENT
906         /* Don't interact with management interface */
907         management = NULL;
908 #endif
909 
910         /* close all parent fds except our socket back to parent */
911         close_fds_except(fd[1]);
912 
913         /* no blocking on control channel back to parent */
914         set_nonblock(fd[1]);
915 
916         /* initialize prng */
917         prng_init(NULL, 0);
918 
919         /* execute the event loop */
920         port_share_proxy(hostaddr, fd[1], max_initial_buf, journal_dir);
921 
922         openvpn_close_socket(fd[1]);
923 
924         exit(0);
925         return NULL; /* NOTREACHED */
926     }
927 
928 error:
929     port_share_close(ps);
930     return NULL;
931 }
932 
933 void
port_share_close(struct port_share * ps)934 port_share_close(struct port_share *ps)
935 {
936     if (ps)
937     {
938         if (ps->foreground_fd >= 0)
939         {
940             /* tell background process to exit */
941             port_share_sendmsg(ps->foreground_fd, COMMAND_EXIT, NULL, SOCKET_UNDEFINED);
942 
943             /* wait for background process to exit */
944             dmsg(D_PS_PROXY_DEBUG, "PORT SHARE: waiting for background process to exit");
945             if (ps->background_pid > 0)
946             {
947                 waitpid(ps->background_pid, NULL, 0);
948             }
949             dmsg(D_PS_PROXY_DEBUG, "PORT SHARE: background process exited");
950 
951             openvpn_close_socket(ps->foreground_fd);
952             ps->foreground_fd = -1;
953         }
954 
955         free(ps);
956     }
957 }
958 
959 void
port_share_abort(struct port_share * ps)960 port_share_abort(struct port_share *ps)
961 {
962     if (ps)
963     {
964         /* tell background process to exit */
965         if (ps->foreground_fd >= 0)
966         {
967             send_control(ps->foreground_fd, COMMAND_EXIT);
968             openvpn_close_socket(ps->foreground_fd);
969             ps->foreground_fd = -1;
970         }
971     }
972 }
973 
974 /*
975  * Given either the first 2 or 3 bytes of an initial client -> server
976  * data payload, return true if the protocol is that of an OpenVPN
977  * client attempting to connect with an OpenVPN server.
978  */
979 bool
is_openvpn_protocol(const struct buffer * buf)980 is_openvpn_protocol(const struct buffer *buf)
981 {
982     const unsigned char *p = (const unsigned char *) BSTR(buf);
983     const int len = BLEN(buf);
984     if (len >= 3)
985     {
986         int plen = (p[0] << 8) | p[1];
987 
988         if (p[2] == (P_CONTROL_HARD_RESET_CLIENT_V3 << P_OPCODE_SHIFT))
989         {
990             /* WKc is at least 290 byte (not including metadata):
991              *
992              * 16 bit len + 256 bit HMAC + 2048 bit Kc = 2320 bit
993              *
994              * This is increased by the normal length of client handshake +
995              * tls-crypt overhead (32)
996              *
997              * For metadata tls-crypt-v2.txt does not explicitly specify
998              * an upper limit but we also have TLS_CRYPT_V2_MAX_WKC_LEN
999              * as 1024 bytes. We err on the safe side with 255 extra overhead
1000              *
1001              * We don't do the 2 byte check for tls-crypt-v2 because it is very
1002              * unrealistic to have only 2 bytes available.
1003              */
1004             return  (plen >= 336 && plen < (1024 + 255));
1005         }
1006         else
1007         {
1008             /* For non tls-crypt2 we assume the packet length to valid between
1009              * 14 and 255 */
1010             return plen >= 14 && plen <= 255
1011                    && (p[2] == (P_CONTROL_HARD_RESET_CLIENT_V2 << P_OPCODE_SHIFT));
1012         }
1013     }
1014     else if (len >= 2)
1015     {
1016         int plen = (p[0] << 8) | p[1];
1017         return plen >= 14 && plen <= 255;
1018     }
1019     else
1020     {
1021         return true;
1022     }
1023 }
1024 
1025 /*
1026  * Called from the foreground process.  Send a message to the background process that it
1027  * should proxy the TCP client on sd to the host/port defined in the initial port_share_open
1028  * call.
1029  */
1030 void
port_share_redirect(struct port_share * ps,const struct buffer * head,socket_descriptor_t sd)1031 port_share_redirect(struct port_share *ps, const struct buffer *head, socket_descriptor_t sd)
1032 {
1033     if (ps)
1034     {
1035         port_share_sendmsg(ps->foreground_fd, COMMAND_REDIRECT, head, sd);
1036     }
1037 }
1038 
1039 #endif /* if PORT_SHARE */
1040