1
2 /*
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
4 * 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017, 2020
5 * Inferno Nettverk A/S, Norway. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. The above copyright notice, this list of conditions and the following
11 * disclaimer must appear in all copies of the software, derivative works
12 * or modified versions, and any portions thereof, aswell as in all
13 * supporting documentation.
14 * 2. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by
17 * Inferno Nettverk A/S, Norway.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Inferno Nettverk A/S requests users of this software to return to
33 *
34 * Software Distribution Coordinator or sdc@inet.no
35 * Inferno Nettverk A/S
36 * Oslo Research Park
37 * Gaustadall�en 21
38 * NO-0349 Oslo
39 * Norway
40 *
41 * any improvements or extensions that they make and grant Inferno Nettverk A/S
42 * the rights to redistribute these changes.
43 *
44 */
45
46 #include "common.h"
47 #include "config_parse.h"
48
49 static const char rcsid[] =
50 "$Id: sockd_io.c,v 1.1229.4.4.2.5.4.2 2020/11/11 16:12:01 karls Exp $";
51
52 /*
53 * IO-child:
54 * Accept io objects from mother and do io on them. Unless
55 * Covenant, we never send back ancillary data, only ordinary data.
56 *
57 * XXX remove io_allocated()? Add some variables instead that we
58 * always keep updated.
59 */
60
61 #if HAVE_UDP_SUPPORT
62
63 #define UDP_INITIALCLIENTCOUNT (16) /*
64 * Number of UDP clients to allocate
65 * memory for initially.
66 */
67
68 #endif /* HAVE_UDP_SUPPORT */
69
70 #if BAREFOOTD
71 #define RAW_SOCKETBUFFER (100 * 1000) /* minsize for raw socket buffer. */
72 #endif /* BAREFOOTD */
73
74 static void siginfo(int sig, siginfo_t *si, void *sc);
75 static void proctitleupdate(void);
76
77 static int
78 io_connectisinprogress(const sockd_io_t *io);
79 /*
80 * Returns true if "io" belongs to a connect whos current state is marked
81 * as being in progress, but does not check whether it's status has changed
82 * since last time state was updated.
83 * Returns false otherwise.
84 */
85
86 static size_t
87 io_allocated(size_t *tcp_io, size_t *tcp_fd, size_t *udp_io, size_t *udp_fd);
88 /*
89 * If "tcp_io" is not NULL, on return it will contain the number of i/os
90 * allocated for tcp.
91 *
92 * If "udp_io" is not NULL, on return it will contain the number of i/os
93 * allocated for udp.
94 *
95 * Likewise for tcp_fd and udp_fd, though in this case, it's the number
96 * of fds in use for tcp and udp, rather than the number of i/o-objects.
97 *
98 * Returns the number of allocated (active) ios in total (udp and tcp).
99 */
100
101 static sockd_io_t *
102 io_getset(const int nfds, const fd_set *set);
103 /*
104 * Goes through our i/o list until it finds an io object where at least one
105 * of the descriptors in "set" is set. "nfds" gives the number of
106 * descriptors in "set" to check
107 *
108 * Returns NULL if none found.
109 *
110 * Special notes for Barefoot:
111 * Does not go through io.dstv, because that is never used by callers
112 * of this function.
113 */
114
115 static sockd_io_t *
116 io_finddescriptor(int d);
117 /*
118 * Finds the io object where one of the descriptors matches "fd".
119 */
120
121 static int
122 io_fillset(fd_set *set, int antiflags, fd_set *antiflags_set,
123 struct timeval *bwoverflowtil);
124 /*
125 * Sets all descriptors from our list, in "set".
126 *
127 * If "antiflags" is set, ios with any of the flags in "antiflags" set
128 * will be excluded from "set", but set in "antiflags_set" instead.
129 * If "antiflags" is not set, antiflags_set may be NULL.
130 *
131 * ios with state.fin_received set, ios that have not finished connecting,
132 * and ios that have overflown the bandwidth limit, will not be set in any
133 * set.
134 *
135 * If any ios were excluded due to having overflown the bandwidth limit,
136 * the earliest time we can again do i/o over one of the bandwidth-excluded
137 * ios will be stored in "bwoverflowtil", if not NULL.
138 *
139 * Returns the highest descriptor in our list, or -1 if we don't
140 * have any descriptors we want to select() on currently.
141 */
142
143 static int
144 io_fillset_connectinprogress(fd_set *set);
145 /*
146 * Similar to io_fillset(), but fills "set" only with descriptors belonging
147 * to connects that are marked as still being in progress.
148 *
149 * In addition, "set" may be NULL, in which case it can be used to simply
150 * check whether there are *any* connects in progress (return code will
151 * be the fd of the first connect in progress found).
152 */
153
154 static void
155 io_clearset(const sockd_io_t *io, const int clearalltargets, fd_set *set);
156 /*
157 * Clears all file descriptors in the i/o object "io" from set "set".
158 *
159 * If "clearalltargets" is set, the function also clears any fds
160 * from the array io->dst.dstv, rather than just io->dst.s.
161 */
162
163
164 #if HAVE_UDP_SUPPORT
165
166 /*
167 * In Dante's case, we only use this for forwarding icmp errors.
168 *
169 * In Barefoot's case we also use it to listen for icmp errors so that
170 * we can more quickly remove sessions that are (presumably) no longer in
171 * use. We are much more aggressive about this in Barefoot's case because
172 * we have no control-connection we can use to check whether the session
173 * is still active or not, and we want to remove expired session at the
174 * first error.
175 */
176 int rawsocket = -1;
177
178 #endif /* HAVE_UDP_SUPPORT */
179
180 static int
181 io_timeoutispossible(const sockd_io_t *io);
182 /*
183 * Returns true if it's possible the io object "io" could time out, i.e.,
184 * the config and state of the io object is such that it is possible.
185 *
186 * Returns false if it is not possible for the i/o object to time out
187 * in it's current state with the current config.
188 */
189
190 static time_t
191 io_timeuntiltimeout(sockd_io_t *io, const struct timeval *tnow,
192 timeouttype_t *type, const int doudpsync);
193 /*
194 * "tnow" is the current time.
195 * "type", if not NULL, is filled in with the type of timeout that will
196 * occur at that time, if any.
197 *
198 * Returns the number of seconds til the io object "io" will timeout.
199 *
200 * 0 if the timeout has already been reached, or
201 * -1 if no timeout on the io is currently set.
202 *
203 * Special notes for Barefoot:
204 * If "io" belongs to a udp-session and "doudpsync" is set, the function
205 * will sync "io" with the udpclient struct belonging to the udp session
206 * that has timed out.
207 */
208
209 static struct timeval *
210 io_gettimeout(struct timeval *timeout);
211 /*
212 * If there is an applicable timeout on the current clients for how long
213 * we should wait for them to do i/o again, this function fills in "timeout"
214 * with the time remaining until then.
215 *
216 * Returns:
217 * If there is a timeout: timeout.
218 * If there is no applicable timeout currently: NULL.
219 */
220
221 static sockd_io_t *
222 io_gettimedout(void);
223 /*
224 * Scans all clients for one that has timed out according to sockscf
225 * settings.
226 *
227 * Returns:
228 * If timed out client found: pointer to timed out i/o object.
229 * Else: NULL.
230 */
231
232 static int
233 getnewios(void);
234 /*
235 * Receives new ios from mother.
236 * Returns the number of new ios received, or -1 on error.
237 */
238
239 static void
240 freebuffers(const sockd_io_t *io);
241 /*
242 * Frees buffers, if any, used by "io".
243 */
244
245 static int
246 connectstatus(sockd_io_t *io, int *badfd);
247 /*
248 * Checks if the socket on "io->dst" has finished connecting, and fills
249 * in status flags as appropriate. Note that this function should only
250 * be called once the connect has completed (successfully or otherwise).
251 *
252 * Note that this function must be called after the connect has completed,
253 * as in the socks case (and some covenant cases) we need to send a
254 * response back to the client before it will start sending us data.
255 * We can thus not delay calling this function til we get ordinary i/o
256 * from one side, as it's possible none will be coming til after we
257 * have sent the response to the client.
258 *
259 * Returns 0 if the socket connected successfully.
260 * Returns -1 if the socket was not connected successfully, or some other error
261 * occurred. In this case, "badfd" has the value of the "bad" fd,
262 * otherwise it will be -1.
263 */
264
265 #if SOCKS_SERVER
266
267 /*
268 * instead of including memory for this as part of the i/o object, we
269 * set up the pointer from the i/o object to the appropriate index into
270 * this array when we receive the i/o object. We can do that since
271 * we only use the io.sreplyrule object in this process.
272 * The only reason for going through with this hassle is so we can reduce
273 * the size of the i/o object. Since the i/o object is passed around between
274 * processes, we want it to be as small as possible, reducing the min-size
275 * of the socket buffers between mother and child.
276 *
277 * The rules are needed because while the original io.crule and io.rule
278 * are used to establish the session, we also need to do a rulespermit()
279 * on a per-packet basis (except in Dante when we have connected to the
280 * destination).
281 */
282 static rule_t fwdrulev[SOCKD_IOMAX];
283 static rule_t replyrulev[SOCKD_IOMAX];
284
285 /*
286 * Each udpsession can have up to two target sockets. One for IPv4 and
287 * one for IPv6.
288 */
289 udptarget_t udptargetv[SOCKD_IOMAX][2];
290
291 #endif /* SOCKS_SERVER */
292
293 /* calls io_cleartcpset() on all fd_sets. */
294 #define IO_CLEAR_ALL_SETS(io, clearalltargets) \
295 do { \
296 io_clearset((io), (clearalltargets), buffwset); \
297 io_clearset((io), (clearalltargets), bufrset); \
298 io_clearset((io), (clearalltargets), newrset); \
299 io_clearset((io), (clearalltargets), rset); \
300 io_clearset((io), (clearalltargets), tmpset); \
301 io_clearset((io), (clearalltargets), udprset); \
302 io_clearset((io), (clearalltargets), wset); \
303 io_clearset((io), (clearalltargets), xset); \
304 } while (/* CONSTCOND */ 0)
305
306 sockd_io_t iov[SOCKD_IOMAX]; /* each child has these ios. */
307 const size_t ioc = ELEMENTS(iov);
308 iostate_t iostate;
309
310 /*
311 * if not 0, we have "overflowed" according to max bandwidth configured.
312 * We can not attribute it to any given client though, so we penalize
313 * all by delaying a little. This object gives the earliest time at which we
314 * can again do i/o over one of the object that has overflown it's bandwidth
315 * limit.
316 */
317 static struct timeval bwoverflowtil;
318
319 void
run_io()320 run_io()
321 {
322 const char *function = "run_io()";
323 struct sigaction sigact;
324 fd_set *rset, *wset, *xset, *newrset, *tmpset, *bufrset, *buffwset, *udprset,
325 *zeroset;
326 int p, mayhavetimedout;
327 #if DIAGNOSTIC && 0 /* XXX not fully tested yet. */
328 size_t freefds_initially, logfds_initially;
329 #endif /* DIAGNOSTIC */
330
331 bzero(&sigact, sizeof(sigact));
332 sigact.sa_flags = SA_RESTART | SA_NOCLDSTOP | SA_SIGINFO;
333 sigact.sa_sigaction = siginfo;
334
335 #if HAVE_SIGNAL_SIGINFO
336 if (sigaction(SIGINFO, &sigact, NULL) != 0)
337 serr("%s: sigaction(SIGINFO)", function);
338 #endif /* HAVE_SIGNAL_SIGINFO */
339
340 /* same handler, for systems without SIGINFO. */
341 if (sigaction(SIGUSR1, &sigact, NULL) != 0)
342 serr("%s: sigaction(SIGUSR1)", function);
343
344 #if HAVE_UDP_SUPPORT
345 sockd_priv(SOCKD_PRIV_NET_ICMPACCESS, PRIV_ON);
346 if ((rawsocket = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP)) == -1)
347 slog(BAREFOOTD ? LOG_NOTICE : LOG_DEBUG,
348 "%s: could not open raw socket for improved UDP compatibility. "
349 "Usually root privileges are required for this",
350 function);
351 else
352 slog(LOG_DEBUG, "%s: created raw socket, fd %d", function, rawsocket);
353 sockd_priv(SOCKD_PRIV_NET_ICMPACCESS, PRIV_OFF);
354
355 if (rawsocket != -1)
356 if (setnonblocking(rawsocket, "rawsocket") == -1)
357 serr("%s: could not make rawsocket non-blocking", function);
358
359 #if HAVE_PRIVILEGES
360 /* don't need this privilege any more, permanently loose it. */
361
362 if (sockscf.state.haveprivs) {
363 priv_delset(sockscf.privileges.privileged, PRIV_NET_ICMPACCESS);
364 if (setppriv(PRIV_SET, PRIV_PERMITTED, sockscf.privileges.privileged)
365 != 0)
366 swarn("%s: setppriv() to relinquish PRIV_NET_ICMPACCESS failed",
367 function);
368 }
369 #endif /* HAVE_PRIVILEGES */
370
371 #if BAREFOOTD /* only Barefoot reads from the raw socket. */
372 if (rawsocket != -1) {
373 socklen_t optlen = sizeof(p);
374 if (getsockopt(rawsocket, SOL_SOCKET, SO_RCVBUF, &p, &optlen) != 0)
375 swarn("%s: getsockopt(SO_RCVBUF)", function);
376 else {
377 if (p < RAW_SOCKETBUFFER) {
378 p = RAW_SOCKETBUFFER;
379 if (setsockopt(rawsocket, SOL_SOCKET, SO_RCVBUF, &p, sizeof(p))
380 != 0)
381 swarn("%s: failed setsockopt(SO_RCVBUF, %d) on raw socket",
382 function, p);
383 else
384 slog(LOG_DEBUG, "%s: changed buffer size to %d bytes",
385 function, p);
386 }
387 else
388 slog(LOG_DEBUG, "%s: default buffer size is %d bytes, keeping it",
389 function, p);
390 }
391 }
392 #endif /* BAREFOOTD */
393 #endif /* HAVE_UDP_SUPPORT */
394
395 buffwset = allocate_maxsize_fdset();
396 bufrset = allocate_maxsize_fdset();
397 newrset = allocate_maxsize_fdset();
398 rset = allocate_maxsize_fdset();
399 tmpset = allocate_maxsize_fdset();
400 udprset = allocate_maxsize_fdset();
401 wset = allocate_maxsize_fdset();
402 xset = allocate_maxsize_fdset();
403 zeroset = allocate_maxsize_fdset();
404
405 FD_ZERO(zeroset);
406
407 proctitleupdate();
408
409 iostate.freefds = (size_t)freedescriptors(NULL, NULL);
410
411 mayhavetimedout = 0;
412
413 sockd_print_child_ready_message(iostate.freefds);
414
415 #if DIAGNOSTIC && 0 /* XXX not fully tested yet. */
416 freefds_initially = iostate.freefds;
417 logfds_initially
418 = sockscf.log.filenoc + (sockscf.log.type & LOGTYPE_SYSLOG)
419 + sockscf.errlog.filenoc + (sockscf.errlog.type & LOGTYPE_SYSLOG);
420 #endif /* DIAGNOSTIC */
421
422 /* CONSTCOND */
423 while (1) {
424 /*
425 * The heart and soul of the server. This is the loop where
426 * all i/o is done and involves some tricky stuff.
427 *
428 * We need to check for write separately to avoid busy-looping.
429 * The problem is that if the descriptor is ready for reading but
430 * the corresponding descriptor to write out on is not ready, we will
431 * be busy-looping; above select will keep returning descriptors set,
432 * but we will not be able to write (and thus won't read) them.
433 *
434 * Our solution to this is two select(2) calls. One to see
435 * what descriptors are readable, and another select(2) call to
436 * block until at least one of the descriptors on the corresponding
437 * write-side has become writable.
438 * We therefore only set in wset the descriptors that have the
439 * corresponding read descriptor readable, so that when the
440 * second select() returns, the io objects we get from wset will
441 * be both readable and writable.
442 *
443 * XXX Now that we have the iobuffers, perhaps we can improve on the
444 * above by not bothering with the select(2) for write as long as
445 * there is some room in the write buffer?
446 *
447 * Another problem is that if while we wait for writability, a new
448 * descriptor becomes readable, we thus can't block forever here.
449 * We solve this by in the second select() also checking for
450 * readability, but now only the descriptors that were not found
451 * to be readable in the previous select().
452 * This means that a positive return from the second select does not
453 * necessarily indicate we have i/o to do, but it does mean we
454 * either have it or a new descriptor became readable; in either
455 * case, something has happened.
456 * Reason we do not check for exceptions in this second select is that
457 * there is nothing we do about them until the descriptor becomes
458 * readable too, thus any new exceptions will be in newrset before
459 * we have reason to care about them.
460 */
461 iostatus_t iostatus;
462 sockd_io_t *io;
463 struct timeval timeout, *timeoutpointer;
464 int i, bits, first_rbits, rbits, wbits, udprbits,
465 newsocketsconnected, badfd;
466
467 #if DIAGNOSTIC && 0 /* XXX not fully tested yet. */
468 size_t tcpfd, udpfd, freefds_now, logfds_now;
469
470 io_allocated(NULL, &tcpfd, NULL, &udpfd);
471
472 logfds_now
473 = sockscf.log.filenoc + (sockscf.log.type & LOGTYPE_SYSLOG)
474 + sockscf.errlog.filenoc + (sockscf.errlog.type & LOGTYPE_SYSLOG);
475
476 freefds_now = (size_t)freedescriptors(NULL, NULL);
477
478 SASSERTX(freefds_now - logfds_now
479 >= freefds_initially - logfds_initially - (tcpfd + udpfd));
480 #endif /* DIAGNOSTIC */
481
482 errno = 0; /* reset for each iteration. */
483
484 /* look for timed-out clients and calculate the next timeout, if any. */
485 if (mayhavetimedout) {
486 while ((io = io_gettimedout()) != NULL) {
487 #if HAVE_NEGOTIATE_PHASE
488 if (io_connectisinprogress(io)
489 && (SOCKS_SERVER || io->reqflags.httpconnect)) {
490 response_t response;
491
492 create_response(NULL,
493 &io->src.auth,
494 io->state.proxyprotocol,
495 (int)sockscode(io->state.proxyprotocol,
496 SOCKS_TTLEXPIRED),
497 &response);
498
499 if (send_response(io->src.s, &response) != 0)
500 errno = 0; /* real error is the timeout. */
501 }
502 #endif /* HAVE_NEGOTIATE_PHASE */
503
504 io_delete(sockscf.state.mother.ack, io, -1, IO_TIMEOUT);
505 }
506 }
507
508 mayhavetimedout = 0;
509 rbits = io_fillset(rset, MSG_OOB, xset, &bwoverflowtil);
510
511 /*
512 * buffwset. What descriptors do we want to check for having data
513 * buffered for write? Having data buffered for write means we have
514 * data to write on them, thus we want to know if they are writable.
515 *
516 * Pretty much any client-related descriptor we want to check for
517 * having data buffered for write, except those specifically
518 * skipped (due to e.g., bw overflow).
519 */
520 FD_COPY(buffwset, rset);
521
522 /* likewise for having data buffered for read. */
523 FD_COPY(bufrset, rset);
524
525 if (sockscf.state.mother.s != -1) {
526 FD_SET(sockscf.state.mother.s, rset);
527 rbits = MAX(rbits, sockscf.state.mother.s);
528
529 /* checked so we know if mother goes away. */
530 FD_SET(sockscf.state.mother.ack, rset);
531 rbits = MAX(rbits, sockscf.state.mother.ack);
532 }
533 else { /* no mother. Do we have any other descriptors to work with? */
534 if (rbits == -1 && !timerisset(&bwoverflowtil)) {
535 /* no clients in fd_sets, and not due to bwoverflow ... */
536 SASSERTX(io_allocated(NULL, NULL, NULL, NULL) == 0);
537
538 slog(LOG_DEBUG, "%s: no connection to mother, no clients; exiting",
539 function);
540
541 #if HAVE_VALGRIND_VALGRIND_H
542 if (RUNNING_ON_VALGRIND) {
543 /* for some reason Valgrind complains the rset pointer is lost. */
544 free(rset);
545 }
546 #endif /* HAVE_VALGRIND_VALGRIND_H */
547
548 sockdexit(EXIT_SUCCESS);
549 }
550 }
551
552 #if BAREFOOTD
553 if (rawsocket != -1 && (io_udpclients(ioc, iov, 1) > 0)) {
554 /* raw socket is only of interest if we have udp clients. */
555
556 FD_SET(rawsocket, rset);
557 rbits = MAX(rbits, rawsocket);
558 }
559 #endif /* BAREFOOTD */
560
561 /*
562 * In the first select(2) we check for descriptors that are readable;
563 * we won't write if we can't read.
564 *
565 * Connects that are in progress is a special case that we also need
566 * to check for here. Once the connect(2) has completed, successfully
567 * or not, the socket will become writable and we may need to send a
568 * status response (if there is a negotiate phase) to the client.
569 *
570 * Also select(2) for exceptions so we later can tell the i/o function
571 * if there's one pending.
572 */
573
574 wbits = io_fillset_connectinprogress(wset);
575 newsocketsconnected = 0;
576 bits = MAX(rbits, wbits) + 1;
577
578 slog(LOG_DEBUG, "%s: first select; readable/connected?", function);
579 switch (selectn(bits,
580 rset,
581 bufrset,
582 buffwset,
583 wset,
584 xset,
585 io_gettimeout(&timeout))) {
586 case -1:
587 SASSERT(ERRNOISTMP(errno));
588 continue;
589
590 case 0:
591 mayhavetimedout = 1;
592 continue; /* restart the loop. */
593 }
594
595 if (sockscf.state.mother.ack != -1
596 && FD_ISSET(sockscf.state.mother.ack, rset)) { /* only eof expected. */
597 sockd_readmotherscontrolsocket(function, sockscf.state.mother.ack);
598 sockscf.state.mother.s = sockscf.state.mother.ack = -1;
599
600 #if BAREFOOTD
601 /*
602 * terminate all udp sessions as if we do not, a restart of mother
603 * will not be able to rebind the ports used.
604 * Not a problem for TCP, so those sessions can continue to run
605 * until the session ends for other reasons.
606 */
607 io_remove_session(ioc, iov, NULL, SOCKS_UDP, IO_ADMINTERMINATION);
608 proctitleupdate();
609
610 #else /* !BAREFOOTD */
611 /*
612 * this process can continue independent of mother as long as it
613 * has clients, because each client has it's own unique
614 * udp socket on the client-side also.
615 */
616 #endif /* !BAREFOOTD */
617
618 /*
619 * safest to regenerate the fd_sets for this once in a life-time
620 * event.
621 */
622 continue;
623 }
624
625 /*
626 * this needs to be after check of ack-pipe to limit error messages,
627 * because the ack-pipe is a stream pipe, so hopefully we will handle
628 * the EOF from mother on the ack-pipe before we get the error on
629 * the data-pipe.
630 */
631 if (sockscf.state.mother.s != -1
632 && FD_ISSET(sockscf.state.mother.s, rset)) {
633 if (getnewios() == -1)
634 continue; /* loop around and check control connection again. */
635 else {
636 proctitleupdate();
637 continue; /* need to scan rset again; should have a new client. */
638 /*
639 * XXX Or can we instead add it to newrset, and rescan as
640 * normal after that?
641 */
642 }
643 }
644
645 first_rbits = bits;
646
647 /*
648 * First check if any new connect(2)s to targets have finished.
649 */
650 if (FD_CMP(zeroset, wset) != 0) {
651 for (p = 0; p < bits; ++p) {
652 if (FD_ISSET(p, wset)) {
653
654 io = io_finddescriptor(p);
655 SASSERTX(io != NULL);
656 SASSERTX(p == io->dst.s);
657
658 if (connectstatus(io, &badfd) == 0)
659 ++newsocketsconnected;
660 else {
661 SASSERTX(badfd != -1);
662 SASSERTX(badfd == io->src.s
663 || badfd == io->dst.s
664 || badfd == io->control.s);
665
666 IO_CLEAR_ALL_SETS(io, 1);
667 io_delete(sockscf.state.mother.ack, io, badfd, IO_IOERROR);
668 }
669 }
670 }
671
672 if (newsocketsconnected > 0)
673 proctitleupdate();
674
675 slog(LOG_DEBUG, "%s: %d new socket%s finished connecting",
676 function,
677 newsocketsconnected,
678 newsocketsconnected == 1 ? "" : "s");
679 }
680
681 /*
682 * Add bufrset to rset, so rset now contains all sockets we can
683 * read from, whether it's from the socket or from our local buffer.
684 */
685 fdsetop(bits, '|', rset, bufrset, rset);
686
687 #if BAREFOOTD
688 if (rawsocket != -1 && FD_ISSET(rawsocket, rset)) {
689 FD_CLR(rawsocket, rset);
690
691 if (rawsocket_recv(rawsocket, ioc, iov) == RAWSOCKET_IO_DELETED)
692 /*
693 * one or more ios were deleted. Don't know which ones, so
694 * need to regenerate the descriptor sets for select.
695 */
696 continue;
697 }
698 #endif /* BAREFOOTD */
699
700 /*
701 * We now know what descriptors are readable; rset.
702 *
703 * Next prepare for the second select(2), where we want to
704 * know which of the descriptors, paired with the above readable
705 * descriptors, we can write to. In that select(2) we also need to
706 * check for read again, but only those descriptors that are not
707 * already readable, as that constitutes at least a status change
708 * which we should loop around for. Likewise, we again need to
709 * check whether some new sockets have finished connecting.
710 */
711
712 i = io_fillset(tmpset, 0, NULL, &bwoverflowtil);
713 rbits = fdsetop(i, '^', rset, tmpset, newrset);
714
715 if (sockscf.state.mother.s != -1) { /* mother status may change too. */
716 FD_SET(sockscf.state.mother.s, newrset);
717 rbits = MAX(rbits, sockscf.state.mother.s);
718
719 /* checked so we know if mother goes away. */
720 SASSERTX(sockscf.state.mother.ack != -1);
721 FD_SET(sockscf.state.mother.ack, newrset);
722 rbits = MAX(rbits, sockscf.state.mother.ack);
723 }
724
725 #if BAREFOOTD
726 /*
727 * Checked before, and checked again now, as it's status too may
728 * change and it may become readable (again).
729 */
730 if (rawsocket != -1) {
731 FD_SET(rawsocket, newrset);
732 rbits = MAX(rbits, rawsocket);
733 }
734 #endif /* BAREFOOTD */
735
736 /*
737 * Use a separate set to to store all udp fds that should be writable.
738 * We don't bother actually checking udp sockets for writability
739 * because if the udp write ends up failing, it wouldn't make any
740 * difference whether the socket was marked as writable or not; for
741 * all we know it's "writability" could have have been limited to a
742 * one byte write/packet, while the corresponding packet read was much
743 * larger, in which case our write could have failed anyway.
744 */
745 FD_ZERO(udprset);
746 udprbits = -1;
747
748 /*
749 * descriptors to check for writability:
750 * - those with the corresponding read-descriptor set.
751 * - those with data already buffered for write.
752 * - the connects that are still in progress.
753 *
754 * Initialize with the set of connects still in progress, and then add
755 * those fds that have the corresponding other side readable.
756 */
757 wbits = io_fillset_connectinprogress(wset);
758
759 for (p = 0; p < MAX(bits, first_rbits); ++p) {
760 int sockettoset;
761
762 if (FD_ISSET(p, buffwset)) {
763 /*
764 * Descriptor has data buffered for write. That means we should
765 * mark the other side as readable. Regardless of whether we
766 * can read from the other side or not at the moment, we have
767 * data that we previously read from it which which we need to
768 * forward to the other side.
769 */
770 int other_side;
771
772 io = io_finddescriptor(p);
773
774 SASSERTX(io != NULL);
775 SASSERTX(socks_bufferhasbytes(p, WRITE_BUF));
776 SASSERTX(io->state.protocol == SOCKS_TCP); /* we only buffer tcp. */
777
778 if (p == io->src.s)
779 other_side = io->dst.s;
780 else if (p == io->dst.s)
781 other_side = io->src.s;
782 else
783 SERRX(p);
784
785 slog(LOG_DEBUG,
786 "%s: fd %d has data buffered for write. Checking it for "
787 "writability and marking other side, fd %d, as readable",
788 function, p, other_side);
789
790 FD_SET(other_side, rset);
791 rbits = MAX(other_side, rbits);
792
793 /*
794 * ok, we know we have data buffered for write, but /can/
795 * we write? For TCP, need to check.
796 *
797 * XXX possible optimization target: if we have enough room
798 * in the writebuffer, we can pretend the fd is writable
799 * as long as we do not read too much (gssapi encapsulation
800 * included). As of now, we just use the buffer to even
801 * out differences between the two sides, but if one side
802 * stops reading completely, the fd will not be writable
803 * and we won't try to write anything, not even to our own
804 * buffer even though that might be possible. No big deal.
805 */
806
807 FD_SET(p, wset);
808 wbits = MAX(wbits, p);
809 }
810 else {
811 /*
812 * No data buffered for write. Check is the socket is readable,
813 * from the buffer or from the socket itself.
814 */
815 io = NULL;
816 }
817
818 if (!FD_ISSET(p, rset))
819 continue; /* socket is not readable. */
820
821 /*
822 * Have data to read. Figure out from what and to which socket.
823 */
824
825 if (io == NULL)
826 io = io_finddescriptor(p);
827
828 SASSERTX(io != NULL);
829
830 /*
831 * find out what corresponding socket we should check for writability.
832 */
833
834 sockettoset = -1;
835
836 /*
837 * In the case of udp, we have a one-to-many (many = 2 in the socks
838 * case) scenario, where packets received on "in" can go to many
839 * different "outs." and we don't know which out socket to use until
840 * we have read the packet from the client.
841 *
842 * UDP sockets shouldn't normally block though, so selecting
843 * for writability is not something we care about it this case.
844 *
845 * The reverse, when a packet comes in on one of the out sockets
846 * is slightly more complicated. To detect that we need to select(2)
847 * for readability on all the target/dst sockets. This is handled
848 * as usual in io_fillset().
849 */
850
851 if (p == io->src.s || p == io->dst.s) {
852 if (io->state.protocol == SOCKS_UDP) {
853 /*
854 * Just set this fd in udprset. We don't bother actually
855 * select(2)'ing on the corresponding udp socket for
856 * writability.
857 */
858
859 sockettoset = p;
860
861 FD_SET(sockettoset, udprset);
862 udprbits = MAX(udprbits, sockettoset);
863 }
864 else {
865 /*
866 * For TCP-sockets we need to know whether we can write or
867 * not to the other (non-read) side.
868 */
869
870 if (p == io->src.s) {
871 /*
872 * read from src (client) requires writable out (target).
873 */
874 sockettoset = io->dst.s;
875 }
876 else {
877 SASSERTX(p == io->dst.s);
878 /*
879 * read from dst (target) requires writable in (client).
880 */
881 sockettoset = io->src.s;
882 }
883
884 SASSERTX(sockettoset != -1);
885
886 FD_SET(sockettoset, wset);
887 wbits = MAX(wbits, sockettoset);
888 }
889 }
890 #if HAVE_CONTROL_CONNECTION
891 else {
892 /*
893 * control connection is also readable without matching
894 * writable and is used to signal session close in udp and
895 * bind extension cases.
896 * Since it doesn't have any side to forward the data to
897 * it is simpler to handle it here and now.
898 */
899 ssize_t r;
900 char buf[1024];
901
902 SASSERTX(io->control.s == p);
903 SASSERTX(io->control.s != io->src.s);
904 SASSERTX(io->control.s != io->dst.s);
905
906 sockettoset = io->control.s;
907
908 SASSERTX(io->state.command == SOCKS_UDPASSOCIATE
909 || (io->state.command == SOCKS_BINDREPLY
910 && io->state.extension.bind));
911
912 /*
913 * Only thing we expect from client's control connection is
914 * an eof.
915 */
916 r = socks_recvfrom(io->control.s,
917 buf,
918 sizeof(buf),
919 0,
920 NULL,
921 NULL,
922 NULL,
923 &io->control.auth);
924
925 if (r <= 0) {
926 slog(LOG_DEBUG,
927 "%s: TCP control connection from client %s closed: %s",
928 function,
929 sockaddr2string(&CONTROLIO(io)->raddr, NULL, 0),
930 r == 0 ? "EOF" : strerror(errno));
931
932 if (io->srule.mstats_shmid != 0
933 && (io->srule.alarmsconfigured & ALARM_DISCONNECT)) {
934 clientinfo_t cinfo;
935
936 cinfo.from = io->control.raddr;
937 HOSTIDCOPY(&io->state, &cinfo);
938
939 SASSERTX(!io->control.state.alarmdisconnectdone);
940 alarm_add_disconnect(0,
941 &io->srule,
942 ALARM_INTERNAL,
943 &cinfo,
944 r == 0 ? "EOF" : strerror(errno),
945 sockscf.shmemfd);
946
947 io->control.state.alarmdisconnectdone = 1;
948 }
949
950 IO_CLEAR_ALL_SETS(io, 1);
951
952 io_delete(sockscf.state.mother.ack,
953 io,
954 io->control.s,
955 r == 0 ? IO_CLOSE : IO_IOERROR);
956 }
957 else {
958 char visbuf[256];
959
960 slog(LOG_NOTICE, "%s: %ld unexpected byte%s over control "
961 "connection from client %s: %s",
962 function,
963 (long)r,
964 r == 1 ? "" : "s",
965 sockaddr2string(&CONTROLIO(io)->raddr, NULL, 0),
966 str2vis(buf,
967 (size_t)r,
968 visbuf,
969 sizeof(visbuf)));
970
971 FD_CLR(io->control.s, rset);
972
973 FD_SET(io->control.s, newrset);
974 rbits = MAX(rbits, io->control.s);
975 }
976 }
977 #endif /* HAVE_CONTROL_CONNECTION */
978
979 SASSERTX(sockettoset != -1);
980 }
981
982 if (newsocketsconnected || udprbits > -1) {
983 /*
984 * Don't wait. Handle what we can now and restart the loop,
985 * which will then include handling of any new sockets.
986 */
987 timeoutpointer = &timeout;
988 bzero(timeoutpointer, sizeof(*timeoutpointer));
989 }
990 else
991 timeoutpointer = io_gettimeout(&timeout);
992
993 ++rbits;
994 ++wbits;
995
996 bits = MAX(rbits, wbits);
997
998 if (bits == 0) {
999 slog(LOG_DEBUG,
1000 "%s: no fds to select(2) on ... restarting loop", function);
1001
1002 continue;
1003 }
1004
1005 if (FD_CMP(zeroset, newrset) == 0
1006 && FD_CMP(zeroset, wset) == 0) {
1007 slog(LOG_DEBUG,
1008 "%s: no fds to select(2) on ... restarting loop", function);
1009
1010 continue;
1011 }
1012
1013 slog(LOG_DEBUG, "%s: second select; what is writable?", function);
1014 switch (selectn(bits, newrset, NULL, NULL, wset, NULL, timeoutpointer)) {
1015 case -1:
1016 SASSERT(ERRNOISTMP(errno));
1017 continue;
1018
1019 case 0:
1020 mayhavetimedout = 1;
1021 }
1022
1023 if (sockscf.state.mother.ack != -1
1024 && FD_ISSET(sockscf.state.mother.ack, newrset))
1025 continue; /* eof presumably, but handle it in one place, above. */
1026
1027 if (sockscf.state.mother.s != -1
1028 && FD_ISSET(sockscf.state.mother.s, newrset)) {
1029 FD_CLR(sockscf.state.mother.s, newrset);
1030 getnewios();
1031 }
1032
1033 /*
1034 * If newrset has any udp i/os, add them to udprset.
1035 */
1036 for (p = 0; p < rbits; ++p) {
1037 if (!FD_ISSET(p, newrset))
1038 continue;
1039
1040 #if BAREFOOTD
1041 /* specialcased as it's not part of any i/o object. */
1042 if (rawsocket != -1 && p == rawsocket)
1043 continue;
1044 #endif /* BAREFOOTD */
1045
1046 io = io_finddescriptor(p);
1047 SASSERTX(io != NULL);
1048
1049 if (io->state.protocol == SOCKS_UDP
1050 && (p == io->src.s || p == io->dst.s)) {
1051 FD_SET(p, udprset);
1052 udprbits = MAX(udprbits, p);
1053
1054 /* enough to have it in udprset. */
1055 FD_CLR(p, newrset);
1056 }
1057 }
1058
1059 /*
1060 * Status now is as follows:
1061 *
1062 * newrset: tcp descriptors newly readable (from the second select).
1063 * We don't do anything with them here as we don't know if
1064 * the other side is writable, but instead loop around and
1065 * check for writability first.
1066 *
1067 * rset: descriptors readable, from buffer or from socket.
1068 *
1069 * udprset: udp sockets that are readable.
1070 *
1071 * wset: descriptors writable with at least one of:
1072 * a) a matching descriptor in rset.
1073 * b) data buffered for write.
1074 * c) a connect that was previously in progress, now completed.
1075 *
1076 * a) and b) we can do i/o over, c) we can't know for sure.
1077 *
1078 * xset: subset of rset with exceptions pending.
1079 *
1080 * The sockets in udprset and wset are the sockets we can possibly
1081 * do i/o over.
1082 *
1083 * For wset we need check which of a), b), or c) is the case.
1084 * If it's a) or b), do i/o, remove the socket from wset, and
1085 * get the next socket set in wset. If c), we can not do i/o now.
1086 *
1087 * For udprset, we have no corresponding write-side we bother checking
1088 * as we assume we can always write in the udp case.
1089 */
1090
1091 #if HAVE_UDP_SUPPORT
1092 /*
1093 * First handle UDP case; udprset.
1094 */
1095
1096 ++udprbits;
1097
1098 while ((io = io_getset(udprbits, udprset)) != NULL) {
1099 SASSERTX(io->state.protocol == SOCKS_UDP);
1100
1101 /*
1102 * Since udp is a "point-to-multipoint" case, the descriptor in
1103 * udprset could be:
1104 * a) the one socket (io->src.s) we use to read from all clients.
1105 * b) any of the sockets in io->dst.dstv, which we use for
1106 * sending/reading data to/from targets.
1107 *
1108 * In Barefoot's case, if it's a), there could be a lot of packets
1109 * queued up, since that one socket will be handling io->dst.dstc
1110 * number * of clients. If so, we'll want to read from the socket
1111 * until there are no more packets, to reduce the chance of packets
1112 * overflowing the kernels socket buffer and becoming lost.
1113 * That we don't know how many packets there are in Barefoot's
1114 * case is no different from Dante's case, but in Barefoot's case
1115 * there is a good chance there will be many due to the
1116 * multipoint-to-point nature of things, so read packets until
1117 * there are no more to read to reduce the chance of packets *
1118 * being discarded due to the socket buffer filling up.
1119 *
1120 * If it's b), we need to find out which one(s) it is and set
1121 * io->dst.s appropriately, so that doio() reads from the correct
1122 * socket. This is however done by the function that fetches us
1123 * the io above (io_getset()).
1124 *
1125 * The thing we need to be careful about is to not clear a
1126 * fd we have not read from. If we read from the client-fd,
1127 * the target-fd should be -1, so we can't read or clear that.
1128 * If we read from the target-fd however, the client-fd can also
1129 * be set. doio() handles this by reading from both sides,
1130 * if both sides are set, which is good enough for Dante.
1131 * In Barefoot we have the extra "read until we can read no more"
1132 * for the client-side however, to reduce the chance of packetloss,
1133 * which is the main reason for the difference here.
1134 *
1135 * XXX how to handle bwoverflow?
1136 */
1137
1138 #if BAREFOOTD
1139 if (FD_ISSET(io->src.s, udprset)) {
1140 /*
1141 * case a): packet from client to target.
1142 *
1143 * Don't know what the destination socket is until we've
1144 * read the packet and found the destination socket, based
1145 * on what client sent the packet. Set it to the dummy socket
1146 * for now and let the i/o function demux it when the client
1147 * packet has been read.
1148 *
1149 * Try to read as many packets as we can as presumably there is
1150 * a much greater risk of loosing packets on on the client side,
1151 * since all the clients send to one address/socket.
1152 */
1153
1154 do {
1155 io->dst.s = -1; /* don't yet know what dst will be. */
1156 iostatus = doio_udp(io, udprset, &badfd);
1157
1158 if (IOSTATUS_FATALERROR(iostatus) && badfd != -1) {
1159 if (io->dst.s != -1)
1160 FD_CLR(io->dst.s, udprset);
1161
1162 io_delete(sockscf.state.mother.ack, io, badfd, iostatus);
1163 }
1164 } while (iostatus != IO_EAGAIN
1165 && io->dst.dstc > 0);
1166
1167 if (io->src.s != -1)
1168 FD_CLR(io->src.s, udprset);
1169 }
1170 else {
1171 /*
1172 * case b): reply from target to client.
1173 */
1174 SASSERTX(io->dst.s != -1);
1175
1176 /*
1177 * can't be set as we first checked the client-side of this i/o
1178 * and then read till there were no more packets to be read.
1179 */
1180 SASSERTX(!FD_ISSET(io->src.s, udprset));
1181
1182 iostatus = doio_udp(io, udprset, &badfd);
1183
1184 if (io->dst.s != -1)
1185 FD_CLR(io->dst.s, udprset);
1186
1187 if (IOSTATUS_FATALERROR(iostatus) && badfd != -1)
1188 io_delete(sockscf.state.mother.ack, io, badfd, iostatus);
1189 }
1190
1191 #else /* Dante */
1192
1193 /*
1194 * With the advent of IPv6, Dante is also a p-to-mp now, not
1195 * completely unlike Barefoot, though in Dante's case the "mp"
1196 * refers to only up to two points per client-side socket,
1197 * one for ipv4 and one for ipv6.
1198 * Like for Barefoot, we know what client socket we're going to
1199 * read the packet from, as there is only one per i/o object,
1200 * but we don't know what target socket, if any, to write the
1201 * packet to until we've read the client packet.
1202 */
1203
1204 iostatus = doio_udp(io, udprset, &badfd);
1205
1206 io_clearset(io, IOSTATUS_FATALERROR(iostatus), udprset);
1207
1208 if (IOSTATUS_FATALERROR(iostatus))
1209 io_delete(sockscf.state.mother.ack, io, badfd, iostatus);
1210
1211 #endif /* Dante */
1212 }
1213 #endif /* HAVE_UDP_SUPPORT */
1214
1215 /*
1216 * Next; TCP-case in wset. In the tcp-case, we don't try to
1217 * read from one side unless we are sure we can write to the
1218 * other side.
1219 */
1220
1221 FD_ZERO(tmpset);
1222 while ((io = io_getset(bits, wset)) != NULL) {
1223 int flags;
1224
1225 SASSERTX(io->state.protocol == SOCKS_TCP);
1226
1227 if (io->state.command == SOCKS_CONNECT
1228 && !io->dst.state.isconnected) {
1229
1230 SASSERTX(FD_ISSET(io->dst.s, wset));
1231 FD_CLR(io->dst.s, wset);
1232
1233 if (connectstatus(io, &badfd) == -1) {
1234 SASSERTX(badfd == io->src.s || badfd == io->dst.s);
1235 io_delete(sockscf.state.mother.ack, io, badfd, IO_IOERROR);
1236 }
1237
1238 /*
1239 * regardless of whether the connect was successful or not we
1240 * can't do anything here as we don't know what the status of
1241 * src.s is, so have to loop around.
1242 * XXX we could check though --- if we coincidentally know that
1243 * src is readable, and the connect succeeded, we can do i/o, no?
1244 */
1245 continue;
1246 }
1247
1248 if (FD_CMP(tmpset, xset) != 0
1249 && (FD_ISSET(io->src.s, xset) || FD_ISSET(io->dst.s, xset)))
1250 flags = MSG_OOB;
1251 else
1252 flags = 0;
1253
1254 iostatus = doio_tcp(io, rset, wset, flags, &badfd);
1255
1256 io_clearset(io, IOSTATUS_FATALERROR(iostatus), rset);
1257 io_clearset(io, IOSTATUS_FATALERROR(iostatus), wset);
1258
1259 if (IOSTATUS_FATALERROR(iostatus))
1260 io_delete(sockscf.state.mother.ack, io, badfd, iostatus);
1261 }
1262
1263 #if BAREFOOTD
1264 /*
1265 * And lastly, raw socket. Only Barefoot reads from it.
1266 */
1267 if (rawsocket != -1 && FD_ISSET(rawsocket, newrset)) {
1268 FD_CLR(rawsocket, newrset);
1269 (void)rawsocket_recv(rawsocket, ioc, iov);
1270 }
1271 #endif /* BAREFOOTD */
1272
1273 /*
1274 * XXX optimization target.
1275 * At this time, newrset should contain mostly the same tcp-sockets
1276 * that would be returned as writable by the next select(2) ("first
1277 * select"), no? It would be very nice if we could somewhat "continue"
1278 * and use that, which would save us wasting a lot of time repeating
1279 * the same select(2) which we just performed.
1280 */
1281 }
1282 }
1283
1284 void
io_preconfigload(void)1285 io_preconfigload(void)
1286 {
1287 const char *function = "io_preconfigload()";
1288
1289 slog(LOG_DEBUG, "%s", function);
1290 }
1291
1292 void
io_postconfigload(void)1293 io_postconfigload(void)
1294 {
1295 const char *function = "io_postconfigload()";
1296 size_t i;
1297
1298 slog(LOG_DEBUG, "%s: ioc = %ld", function, (long)ioc);
1299
1300 for (i = 0; i < ioc; ++i) {
1301 if (!iov[i].allocated)
1302 continue;
1303
1304 io_updatemonitor(&iov[i]);
1305
1306 if (iov[i].state.protocol == SOCKS_TCP)
1307 continue;
1308
1309 SASSERTX(iov[i].state.protocol == SOCKS_UDP);
1310
1311 /*
1312 * In the UDP case we should check rules on each packet, so indicate
1313 * we should not reuse the saved rules after sighup, which may have
1314 * changed the ACLs.
1315 * XXX ideally we would also do a rulespermit() to remove clients that
1316 * no longer have any hope if being allowed to pass packets through.
1317 */
1318
1319 #if !BAREFOOTD
1320 iov[i].src.state.use_saved_srule = 0;
1321 iov[i].dst.state.use_saved_srule = 0;
1322
1323 #else /* BAREFOOTD */
1324 /*
1325 * Nothing to do. Those clients that were permitted we continue
1326 * to permit.
1327 */
1328
1329 slog(LOG_DEBUG, "%s: iov #%ld, dstc = %ld",
1330 function, (long)ioc, (long)iov[i].dst.dstc);
1331 #endif /* BAREFOOTD */
1332 }
1333
1334 #if BAREFOOTD
1335 #warning "missing code to remove unused internal udp sessions."
1336 /*
1337 * i.e. sessions belonging to internal addresses we should no longer
1338 * listen on. Clients belonging to those sessions must also be
1339 * removed. So perhaps all clients that are no longer permitted should
1340 * also be removed then?
1341 */
1342 #if 0
1343 io_remove_session(ioc,
1344 iov,
1345 &oldinternal.addrv[i].addr,
1346 oldinternal.addrv[i].protocol,
1347 IO_ADMINTERMINATION);
1348 proctitleupdate();
1349 #endif
1350 #endif /* BAREFOOTD */
1351 }
1352
1353 void
close_iodescriptors(io)1354 close_iodescriptors(io)
1355 const sockd_io_t *io;
1356 {
1357 const char *function = "close_iodescriptors()";
1358 size_t i;
1359 int errno_s, fdv[] = { io->control.s, io->src.s, io->dst.s, -1 };
1360
1361 #if SOCKS_SERVER
1362 if (io->state.protocol == SOCKS_UDP) {
1363 /*
1364 * dst socket may be up to two sockets (ipv4 and ipv6),
1365 * not just io->dst.s.
1366 */
1367 const size_t dstoffset = 2; /* control = 0, src = 1, dst = 2. */
1368
1369 SASSERTX(io->dst.dstc <= 2);
1370
1371 for (i = 0; i < io->dst.dstc; ++i)
1372 fdv[dstoffset + i] = io->dst.dstv[i].s;
1373
1374 for (i = dstoffset + i; i < ELEMENTS(fdv); ++i)
1375 fdv[i] = -1;
1376 }
1377 #endif /* SOCKS_SERVER */
1378
1379 errno_s = errno;
1380
1381 for (i = 0; i < ELEMENTS(fdv); ++i) {
1382 if (fdv[i] != -1) {
1383 if (close(fdv[i]) == 0) {
1384 ++iostate.freefds;
1385 fdv[i] = -1;
1386 }
1387 else
1388 SWARN(fdv[i]);
1389 }
1390 }
1391
1392 errno = errno_s;
1393 }
1394
1395 int
recv_io(s,io)1396 recv_io(s, io)
1397 int s;
1398 sockd_io_t *io;
1399 {
1400 const char *function = "recv_io()";
1401 #if HAVE_GSSAPI
1402 gss_buffer_desc gssapistate;
1403 char gssapistatemem[MAX_GSS_STATE];
1404 #endif /* HAVE_GSSAPI */
1405 struct iovec iovecv[2];
1406 struct timeval tnow;
1407 struct msghdr msg;
1408 sockd_io_t tmpio;
1409 ssize_t received;
1410 size_t ioi;
1411 int wearechild, fdexpect, fdreceived, iovecc;
1412 CMSG_AALLOC(cmsg, sizeof(int) * FDPASS_MAX);
1413
1414 bzero(iovecv, sizeof(iovecv));
1415 iovecc = 0;
1416
1417 iovecv[iovecc].iov_base = &tmpio;
1418 iovecv[iovecc].iov_len = sizeof(tmpio);
1419 ++iovecc;
1420
1421 #if HAVE_GSSAPI
1422 iovecv[iovecc].iov_base = gssapistatemem;
1423 iovecv[iovecc].iov_len = sizeof(gssapistatemem);
1424 ++iovecc;
1425 #endif /* HAVE_GSSAPI */
1426
1427 bzero(&msg, sizeof(msg));
1428 msg.msg_iov = iovecv;
1429 msg.msg_iovlen = iovecc;
1430 msg.msg_name = NULL;
1431 msg.msg_namelen = 0;
1432
1433 /* LINTED pointer casts may be troublesome */
1434 CMSG_SETHDR_RECV(msg, cmsg, CMSG_MEMSIZE(cmsg));
1435
1436 if (io == NULL) /* child semantics; find a free io ourselves. */
1437 wearechild = 1;
1438 else
1439 wearechild = 0;
1440
1441 slog(LOG_DEBUG, "%s: %s",
1442 function, wearechild ? "we are child" : "we are mother");
1443
1444 if ((received = recvmsgn(s, &msg, 0)) < (ssize_t)sizeof(*io)) {
1445 if (received == -1 && errno == EAGAIN)
1446 ;
1447 else
1448 slog(LOG_DEBUG, "%s: recvmsg(): unexpected short read on socket "
1449 "%d (%ld < %lu): %s",
1450 function,
1451 s,
1452 (long)received,
1453 (unsigned long)(sizeof(*io)),
1454 strerror(errno));
1455
1456 return -1;
1457 }
1458
1459 if (socks_msghaserrors(function, &msg))
1460 return -1;
1461
1462 /*
1463 * ok, received a io. Find out where to store it.
1464 */
1465 ioi = 0;
1466 if (wearechild) { /* child semantics; find a free io ourselves. */
1467 SASSERTX(io == NULL);
1468
1469 for (; ioi < ioc; ++ioi)
1470 if (!iov[ioi].allocated) {
1471 io = &iov[ioi];
1472 break;
1473 }
1474
1475 if (io == NULL) {
1476 /*
1477 * Only reason this should happen is if mother died/closed connection,
1478 * and what we are getting now is not actually an i/o, but just an
1479 * error indicator.
1480 */
1481 char buf;
1482
1483 if (recv(s, &buf, sizeof(buf), MSG_PEEK) > 0)
1484 /*
1485 * not an error indicator, but a mismatch between us and mother.
1486 * Should never happen.
1487 */
1488 SERRX(io_allocated(NULL, NULL, NULL, NULL));
1489
1490 return -1;
1491 }
1492 }
1493
1494 SASSERTX(tmpio.allocated == 0);
1495 *io = tmpio;
1496
1497 SASSERTX(io->crule.bw == NULL);
1498 SASSERTX(io->crule.ss == NULL);
1499 SASSERTX(io->srule.bw == NULL);
1500 SASSERTX(io->srule.ss == NULL);
1501
1502 /* figure out how many descriptors we are supposed to be passed. */
1503 switch (io->state.command) {
1504 case SOCKS_BIND:
1505 case SOCKS_BINDREPLY:
1506 if (io->state.extension.bind)
1507 fdexpect = 3; /* in, out, control. */
1508 else
1509 fdexpect = 2; /* in and out. */
1510 break;
1511
1512 case SOCKS_CONNECT:
1513 fdexpect = 2; /* in and out */
1514 break;
1515
1516 #if HAVE_UDP_SUPPORT
1517 case SOCKS_UDPASSOCIATE:
1518
1519 #if BAREFOOTD
1520 fdexpect = 1; /* in. */
1521
1522 #else /* SOCKS_SERVER */
1523 fdexpect = 2; /* in and control. */
1524
1525 #endif /* SOCKS_SERVER */
1526
1527 break;
1528 #endif /* HAVE_UDP_SUPPORT */
1529
1530 default:
1531 SERRX(io->state.command);
1532 }
1533
1534 if (!CMSG_RCPTLEN_ISOK(msg, sizeof(int) * fdexpect)) {
1535 swarnx("%s: received control message has the invalid len of %d",
1536 function, (int)CMSG_TOTLEN(msg));
1537
1538 return -1;
1539 }
1540
1541 /*
1542 * Get descriptors sent us. Should be at least two.
1543 */
1544
1545 SASSERTX(cmsg->cmsg_level == SOL_SOCKET);
1546 SASSERTX(cmsg->cmsg_type == SCM_RIGHTS);
1547
1548 fdreceived = 0;
1549
1550 CMSG_GETOBJECT(io->src.s, cmsg, sizeof(io->src.s) * fdreceived++);
1551
1552 if (io->state.protocol == SOCKS_TCP)
1553 CMSG_GETOBJECT(io->dst.s, cmsg, sizeof(io->dst.s) * fdreceived++);
1554
1555 #if DIAGNOSTIC
1556 checksockoptions(io->src.s,
1557 io->src.laddr.ss_family,
1558 io->state.protocol == SOCKS_TCP ? SOCK_STREAM : SOCK_DGRAM,
1559 1);
1560
1561 if (io->state.protocol == SOCKS_TCP)
1562 checksockoptions(io->dst.s, io->dst.laddr.ss_family, SOCK_STREAM, 0);
1563 #endif /* DIAGNOSTIC */
1564
1565 #if HAVE_GSSAPI
1566 gssapistate.value = gssapistatemem;
1567 gssapistate.length = received - sizeof(*io);
1568
1569 if (gssapistate.length > 0)
1570 if (sockscf.option.debug >= DEBUG_VERBOSE)
1571 slog(LOG_DEBUG, "%s: read gssapistate of size %ld",
1572 function, (unsigned long)gssapistate.length);
1573 #endif /* HAVE_GSSAPI */
1574
1575 /* any more descriptors to expect? */
1576 switch (io->state.command) {
1577 case SOCKS_BINDREPLY:
1578 #if HAVE_GSSAPI
1579 if (io->dst.auth.method == AUTHMETHOD_GSSAPI) {
1580 if (gssapi_import_state(&io->dst.auth.mdata.gssapi.state.id,
1581 &gssapistate) != 0)
1582 return -1;
1583 }
1584 #endif /* HAVE_GSSAPI */
1585
1586 if (io->state.extension.bind) {
1587 CMSG_GETOBJECT(io->control.s,
1588 cmsg,
1589 sizeof(io->control.s) * fdreceived++);
1590
1591 #if DIAGNOSTIC
1592 checksockoptions(io->control.s,
1593 io->control.laddr.ss_family,
1594 SOCK_STREAM,
1595 1);
1596 #endif /* DIAGNOSTIC */
1597
1598 }
1599 else
1600 SASSERTX(io->control.s == -1);
1601 break;
1602
1603 case SOCKS_CONNECT:
1604 #if HAVE_GSSAPI
1605 if (io->src.auth.method == AUTHMETHOD_GSSAPI) {
1606 if (gssapi_import_state(&io->src.auth.mdata.gssapi.state.id,
1607 &gssapistate) != 0)
1608 return -1;
1609 }
1610 #endif /* HAVE_GSSAPI */
1611
1612 SASSERTX(io->control.s == -1);
1613 break;
1614
1615 case SOCKS_UDPASSOCIATE:
1616 #if SOCKS_SERVER
1617 /* LINTED pointer casts may be troublesome */
1618 CMSG_GETOBJECT(io->control.s,
1619 cmsg,
1620 sizeof(io->control.s) * fdreceived++);
1621
1622 #if HAVE_GSSAPI
1623 if (io->src.auth.method == AUTHMETHOD_GSSAPI) {
1624 if (gssapi_import_state(&io->src.auth.mdata.gssapi.state.id,
1625 &gssapistate) != 0)
1626 return -1;
1627 }
1628 #endif /* HAVE_GSSAPI */
1629
1630 #else /* !SOCKS_SERVER */
1631 SASSERTX(io->control.s == -1);
1632
1633 #endif /* !SOCKS_SERVER */
1634
1635 SASSERTX(io->dst.s == -1); /* will be allocated when needed. */
1636 break;
1637
1638 default:
1639 SERRX(io->state.command);
1640 }
1641
1642 if (sockscf.option.debug >= DEBUG_VERBOSE) {
1643 #if DIAGNOSTIC
1644 struct sockaddr_storage addr;
1645 socklen_t len;
1646 #endif /* DIAGNOSTIC */
1647 size_t bufused;
1648 char buf[1024];
1649
1650
1651 bufused =
1652 snprintf(buf, sizeof(buf),
1653 "received %d descriptor(s) for command %d. Control: fd %d, "
1654 "src: fd %d, dst: fd %d. Allocated to iov #%lu.\n",
1655 fdreceived,
1656 io->state.command,
1657 io->control.s,
1658 io->src.s,
1659 io->dst.s,
1660 (unsigned long)ioi);
1661
1662 bufused +=
1663 snprintf(&buf[bufused], sizeof(buf) - bufused,
1664 "src fd %d (%s)", io->src.s, socket2string(io->src.s, NULL, 0));
1665
1666 bufused +=
1667 snprintf(&buf[bufused], sizeof(buf) - bufused,
1668 ", dst fd %d (%s)",
1669 io->dst.s,
1670 io->dst.s == -1 ? "N/A" : socket2string(io->dst.s, NULL, 0));
1671
1672 if (io->control.s != -1) {
1673 bufused +=
1674 snprintf(&buf[bufused], sizeof(buf) - bufused,
1675 ", control fd %d (%s)",
1676 io->control.s, socket2string(io->control.s, NULL, 0));
1677 }
1678
1679 slog(LOG_DEBUG, "%s: %s", function, buf);
1680
1681 #if DIAGNOSTIC
1682 len = sizeof(addr);
1683 if (getsockname(io->src.s, TOSA(&addr), &len) == 0
1684 && IPADDRISBOUND(&addr))
1685 SASSERTX(sockaddrareeq(&io->src.laddr, &addr, 0));
1686
1687 if (io->src.state.isconnected) {
1688 len = sizeof(addr);
1689 if (getpeername(io->src.s, TOSA(&addr), &len) == 0
1690 && IPADDRISBOUND(&addr))
1691 SASSERTX(sockaddrareeq(&io->src.raddr, &addr, 0));
1692 }
1693
1694 if (io->dst.s != -1) {
1695 len = sizeof(addr);
1696 if (getsockname(io->dst.s, TOSA(&addr), &len) == 0
1697 && IPADDRISBOUND(&addr))
1698 SASSERTX(sockaddrareeq(&io->dst.laddr, &addr, 0));
1699
1700 if (io->dst.state.isconnected) {
1701 len = sizeof(addr);
1702 if (getpeername(io->dst.s, TOSA(&addr), &len) == 0
1703 && IPADDRISBOUND(&addr))
1704 SASSERTX(sockaddrareeq(&io->dst.raddr, &addr, 0));
1705 }
1706 }
1707
1708 if (io->control.s != -1) {
1709 len = sizeof(addr);
1710 if (getsockname(io->control.s, TOSA(&addr), &len) == 0
1711 && IPADDRISBOUND(&addr))
1712 SASSERTX(sockaddrareeq(&io->control.laddr, &addr, 0));
1713
1714 len = sizeof(addr);
1715 if (getpeername(io->control.s, TOSA(&addr), &len) == 0)
1716 SASSERTX(sockaddrareeq(&io->control.raddr, &addr, 0));
1717 }
1718 #endif /* DIAGNOSTIC */
1719 }
1720
1721 if (wearechild) { /* only child does i/o, wait till then before initing. */
1722 size_t i;
1723
1724 for (i = 0; i < io->extsocketoptionc; ++i)
1725 io->extsocketoptionv[i].info
1726 = optval2sockopt(io->extsocketoptionv[i].level,
1727 io->extsocketoptionv[i].optname);
1728
1729 gettimeofday_monotonic(&tnow);
1730
1731 sockd_check_ipclatency("client object received from request process",
1732 &io->state.time.requestend,
1733 &tnow,
1734 &tnow);
1735
1736 /* needs to be set now for correct bandwidth calculation/limiting. */
1737 io->lastio = tnow;
1738
1739 switch (io->state.command) {
1740 #if SOCKS_SERVER
1741 case SOCKS_BINDREPLY:
1742 socks_allocbuffer(io->src.s, SOCK_STREAM);
1743 socks_allocbuffer(io->dst.s, SOCK_STREAM);
1744
1745 io->dst.isclientside = 1;
1746
1747 if (io->control.s != -1) {
1748 SASSERTX(io->state.extension.bind);
1749 socks_allocbuffer(io->control.s, SOCK_STREAM);
1750 }
1751
1752 break;
1753 #endif /* SOCKS_SERVER */
1754
1755 #if HAVE_UDP_SUPPORT
1756 case SOCKS_UDPASSOCIATE: {
1757
1758 io->src.isclientside = 1;
1759
1760 #if SOCKS_SERVER
1761 io->cmd.udp.sfwdrule = &fwdrulev[ioi];
1762 io->cmd.udp.sreplyrule = &replyrulev[ioi];
1763 io->dst.dstv = udptargetv[ioi];
1764 io->dst.dstcmax = ELEMENTS(udptargetv[ioi]);
1765 io->dst.dstc = 0;
1766
1767 if (ADDRISBOUND(&io->src.raddr))
1768 SASSERTX(io->src.state.isconnected);
1769 else
1770 SASSERTX(!io->src.state.isconnected);
1771
1772 SASSERTX(io->control.s != -1);
1773 socks_allocbuffer(io->control.s, SOCK_STREAM);
1774
1775 #else /* BAREFOOTD */
1776 const size_t mallocsize
1777 = UDP_INITIALCLIENTCOUNT * sizeof(*io->dst.dstv);
1778
1779 SASSERTX(io->src.auth.method == AUTHMETHOD_NONE);
1780
1781 SASSERTX(io->dst.s == -1);
1782
1783 /* only used for select(2) regarding writability. */
1784 io->dst.s = io->src.s;
1785
1786 if ((io->dst.dstv = malloc(mallocsize)) == NULL) {
1787 struct sockaddr_storage laddr;
1788 char buf[MAXSOCKADDRSTRING];
1789 socklen_t len = sizeof(laddr);
1790
1791 if (getsockname(io->src.s, TOSA(&laddr), &len) == 0)
1792 sockaddr2string(&laddr, buf, sizeof(buf));
1793 else {
1794 SWARN(errno);
1795 snprintf(buf, sizeof(buf), "<unknown>");
1796 }
1797
1798 swarn("%s: failed to allocate %lu bytes of memory for initial "
1799 "%d udp clients to accept on internal address %s",
1800 function,
1801 (unsigned long)(mallocsize),
1802 UDP_INITIALCLIENTCOUNT,
1803 buf);
1804
1805 close(io->src.s);
1806
1807 return -1;
1808 }
1809
1810 io->dst.dstc = 0;
1811 io->dst.dstcmax = UDP_INITIALCLIENTCOUNT;
1812 #endif /* BAREFOOTD */
1813
1814 /*
1815 * Each client will have it's own target/dst object, set once
1816 * we receive the first packet from it. The client/source socket
1817 * is however the same for all clients.
1818 */
1819 socks_allocbuffer(io->src.s, SOCK_DGRAM);
1820
1821 break;
1822 }
1823 #endif /* HAVE_UDP_SUPPORT */
1824
1825 case SOCKS_CONNECT:
1826 if (!io->dst.state.isconnected)
1827 iostate.haveconnectinprogress = 1;
1828
1829 io->src.isclientside = 1;
1830
1831 socks_allocbuffer(io->src.s, SOCK_STREAM);
1832 socks_allocbuffer(io->dst.s, SOCK_STREAM);
1833 break;
1834
1835 }
1836
1837 #if HAVE_NEGOTIATE_PHASE
1838 if (io->clientdatalen != 0) {
1839 slog(LOG_DEBUG,
1840 "%s: adding initial data of size %ld from client %s to iobuf",
1841 function,
1842 (long)io->clientdatalen,
1843 sockshost2string(&io->src.host, NULL, 0));
1844
1845 /*
1846 * XXX if covenant, this request has already been parsed and we
1847 * already know we need to forward it; should optimize away
1848 * re-parsing.
1849 */
1850
1851 socks_addtobuffer(CONTROLIO(io)->s,
1852 READ_BUF,
1853 0,
1854 io->clientdata,
1855 io->clientdatalen);
1856
1857 io->clientdatalen = 0;
1858 }
1859 #endif /* HAVE_NEGOTIATE_PHASE */
1860
1861 log_ruleinfo_shmid(CRULE_OR_HRULE(io), function, NULL);
1862
1863 if (io->srule.type == object_none) {
1864 SASSERTX(io->state.protocol == SOCKS_UDP);
1865 SASSERTX(!HAVE_SOCKS_RULES);
1866 }
1867 else
1868 log_ruleinfo_shmid(&io->srule, function, NULL);
1869
1870 /*
1871 * attach to shmem now and keep attached, so we don't have to
1872 * attach/detach for every i/o op later. Session is not important
1873 * to attach to as we won't need it until we delete the client,
1874 * though it would still be preferable to attach to that also to
1875 * avoid having to wait for pagein of sessionmemory upon disconnect.
1876 */
1877 (void)sockd_shmat(SHMEMRULE(io), SHMEM_ALL);
1878
1879 io_updatemonitor(io);
1880
1881 /*
1882 * only update now, as it's added to us (us, the i/o process)
1883 * without problems.
1884 */
1885 io->allocated = 1;
1886 }
1887
1888 iostate.freefds -= fdreceived;
1889
1890 return 0;
1891 }
1892
1893 static void
io_clearset(io,clearalltargets,set)1894 io_clearset(io, clearalltargets, set)
1895 const sockd_io_t *io;
1896 const int clearalltargets;
1897 fd_set *set;
1898 {
1899
1900 SASSERTX(io->src.s != -1);
1901 FD_CLR(io->src.s, set);
1902
1903 if (io->state.protocol == SOCKS_TCP)
1904 SASSERTX(io->dst.s != -1);
1905
1906 if (io->dst.s != -1)
1907 FD_CLR(io->dst.s, set);
1908
1909 switch (io->state.command) {
1910 case SOCKS_CONNECT:
1911 break;
1912
1913 case SOCKS_BIND:
1914 case SOCKS_BINDREPLY:
1915 if (io->state.extension.bind) {
1916 SASSERTX(io->control.s != -1);
1917 FD_CLR(io->control.s, set);
1918 }
1919 break;
1920
1921 #if HAVE_UDP_SUPPORT
1922 case SOCKS_UDPASSOCIATE: {
1923 size_t i;
1924
1925 #if HAVE_CONTROL_CONNECTION
1926 SASSERTX(io->control.s != -1);
1927 FD_CLR(io->control.s, set);
1928 #endif /* HAVE_CONTROL_CONNECTION */
1929
1930
1931 if (clearalltargets)
1932 for (i = 0; i < io->dst.dstc; ++i)
1933 FD_CLR(io->dst.dstv[i].s, set);
1934
1935 break;
1936 }
1937 #endif /* HAVE_UDP_SUPPORT */
1938
1939 default:
1940 SERRX(io->state.command);
1941 }
1942 }
1943
1944 static size_t
io_allocated(tcpio,tcpfd,udpio,udpfd)1945 io_allocated(tcpio, tcpfd, udpio, udpfd)
1946 size_t *tcpio;
1947 size_t *tcpfd;
1948 size_t *udpio;
1949 size_t *udpfd;
1950 {
1951 const char *function = "io_allocated()";
1952 size_t i, tcpio_mem, tcpfd_mem, udpio_mem, udpfd_mem;
1953
1954 if (tcpio == NULL)
1955 tcpio = &tcpio_mem;
1956
1957 if (tcpfd == NULL)
1958 tcpfd = &tcpfd_mem;
1959
1960 if (udpio == NULL)
1961 udpio = &udpio_mem;
1962
1963 if (udpfd == NULL)
1964 udpfd = &udpfd_mem;
1965
1966 *tcpio = *tcpfd = *udpio = *udpfd = 0;
1967
1968 for (i = 0; i < ioc; ++i) {
1969 if (!iov[i].allocated)
1970 continue;
1971
1972 switch (iov[i].state.protocol) {
1973 #if HAVE_UDP_SUPPORT
1974 case SOCKS_UDP:
1975 ++(*udpio);
1976
1977 ++(*udpfd); /* internal-side. Always one. */
1978 (*udpfd) += iov[i].dst.dstc; /* external side. Varies. */
1979
1980 #if HAVE_CONTROL_CONNECTION
1981 ++(*tcpfd);
1982 #endif /* HAVE_CONTROL_CONNECTION */
1983
1984 break;
1985 #endif /* HAVE_UDP_SUPPORT */
1986
1987 case SOCKS_TCP:
1988 ++(*tcpio);
1989
1990 (*tcpfd) += 1 /* internal side */ + 1 /* external side. */;
1991 break;
1992
1993 default:
1994 SERRX(iov[i].state.protocol);
1995 }
1996
1997 if (sockscf.option.debug >= DEBUG_VERBOSE)
1998 slog(LOG_DEBUG, "%s: iov #%lu allocated for %s",
1999 function, (unsigned long)i, protocol2string(iov[i].state.protocol));
2000 }
2001
2002 if (sockscf.option.debug >= DEBUG_VERBOSE)
2003 slog(LOG_DEBUG, "%s: allocated for tcp: %lu, udp: %lu",
2004 function, (unsigned long)*tcpio, (unsigned long)*udpio);
2005
2006 return *tcpio + *udpio;
2007 }
2008
2009 static void
proctitleupdate(void)2010 proctitleupdate(void)
2011 {
2012 size_t inprogress;
2013
2014 if (iostate.haveconnectinprogress) {
2015 size_t i;
2016
2017 for (i = inprogress = 0; i < ioc; ++i)
2018 if (io_connectisinprogress(&iov[i]))
2019 ++inprogress;
2020 }
2021 else
2022 inprogress = 0;
2023
2024 setproctitle("%s: %lu/%lu (%lu in progress)",
2025 childtype2string(sockscf.state.type),
2026 (unsigned long)io_allocated(NULL, NULL, NULL, NULL),
2027 (unsigned long)SOCKD_IOMAX,
2028 (unsigned long)inprogress);
2029 }
2030
2031 static sockd_io_t *
io_getset(nfds,set)2032 io_getset(nfds, set)
2033 const int nfds;
2034 const fd_set *set;
2035 {
2036 const char *function = "io_getset()";
2037 sockd_io_t *best, *evaluating;
2038 size_t i;
2039 int s;
2040
2041 for (s = 0, best = NULL; s < nfds; ++s) {
2042 if (!FD_ISSET(s, set))
2043 continue;
2044
2045 /*
2046 * find the io 's' is part of.
2047 */
2048 for (i = 0, evaluating = NULL; i < ioc; ++i) {
2049 if (!iov[i].allocated)
2050 continue;
2051
2052 switch (iov[i].state.command) {
2053 case SOCKS_CONNECT:
2054 if (s == iov[i].src.s || s == iov[i].dst.s)
2055 evaluating = &iov[i];
2056
2057 break;
2058
2059 #if SOCKS_SERVER
2060 case SOCKS_BINDREPLY:
2061 if (s == iov[i].src.s || s == iov[i].dst.s)
2062 evaluating = &iov[i];
2063 else if (iov[i].state.extension.bind && s == iov[i].control.s)
2064 evaluating = &iov[i];
2065
2066 break;
2067 #endif /* SOCKS_SERVER */
2068
2069 #if HAVE_UDP_SUPPORT
2070 case SOCKS_UDPASSOCIATE: {
2071 udptarget_t *target;
2072
2073 if (s == iov[i].src.s) {
2074 /* will have to demux later based on packet read from src. */
2075 iov[i].dst.s = -1;
2076 evaluating = &iov[i];
2077 }
2078 else if ((target = clientofsocket(s,
2079 iov[i].dst.dstc,
2080 iov[i].dst.dstv)) != NULL) {
2081 io_syncudp(&iov[i], target);
2082
2083 SASSERTX(iov[i].dst.s != -1);
2084 SASSERTX(iov[i].dst.s == s);
2085
2086 evaluating = &iov[i];
2087 }
2088 #if HAVE_CONTROL_CONNECTION
2089 else if (s == iov[i].control.s)
2090 evaluating = &iov[i];
2091 #endif /* HAVE_CONTROL_CONNECTION */
2092
2093 break;
2094 }
2095 #endif /* HAVE_UDP_SUPPORT */
2096
2097 default:
2098 break;
2099 }
2100
2101 if (evaluating != NULL)
2102 break;
2103 }
2104
2105 SASSERTX(evaluating != NULL);
2106
2107 /* want the i/o object that has least recently done i/o. */
2108 if (best == NULL || timercmp(&evaluating->lastio, &best->lastio, <))
2109 best = evaluating;
2110 }
2111
2112 return best;
2113 }
2114
2115 static sockd_io_t *
io_finddescriptor(d)2116 io_finddescriptor(d)
2117 int d;
2118 {
2119 size_t i;
2120
2121 for (i = 0; i < ioc; ++i) {
2122 if (!iov[i].allocated)
2123 continue;
2124
2125 switch (iov[i].state.command) {
2126 case SOCKS_BIND:
2127 case SOCKS_BINDREPLY:
2128 if (d == iov[i].src.s || d == iov[i].dst.s)
2129 return &iov[i];
2130 else if (!iov[i].state.extension.bind) {
2131 if (d == iov[i].control.s)
2132 return &iov[i];
2133 }
2134 break;
2135
2136 case SOCKS_CONNECT:
2137 if (d == iov[i].src.s || d == iov[i].dst.s)
2138 return &iov[i];
2139 break;
2140
2141 #if HAVE_UDP_SUPPORT
2142 case SOCKS_UDPASSOCIATE: {
2143 udptarget_t *target;
2144
2145 if (d == iov[i].src.s) {
2146 /* will have to demux later based on packet read from src. */
2147 iov[i].dst.s = -1;
2148 return &iov[i];
2149 }
2150
2151 target = clientofsocket(d, iov[i].dst.dstc, iov[i].dst.dstv);
2152 if (target != NULL) {
2153 io_syncudp(&iov[i], target);
2154
2155 SASSERTX(iov[i].dst.s != -1);
2156 SASSERTX(iov[i].dst.s == d);
2157
2158 return &iov[i];
2159 }
2160
2161 #if HAVE_CONTROL_CONNECTION
2162 if (d == iov[i].control.s)
2163 return &iov[i];
2164 #endif /* HAVE_CONTROL_CONNECTION */
2165
2166 break;
2167 }
2168 #endif /* HAVE_UDP_SUPPORT */
2169
2170 default:
2171 SERRX(iov[i].state.command);
2172 }
2173 }
2174
2175 return NULL;
2176 }
2177
2178 static int
io_fillset(set,antiflags,antiflags_set,bwoverflowtil)2179 io_fillset(set, antiflags, antiflags_set, bwoverflowtil)
2180 fd_set *set;
2181 int antiflags;
2182 fd_set *antiflags_set;
2183 struct timeval *bwoverflowtil;
2184 {
2185 const char *function = "io_fillset()";
2186 struct timeval tnow, firstbwoverflowok;
2187 size_t i;
2188 int max;
2189
2190 if (antiflags != 0)
2191 SASSERTX(antiflags_set != NULL);
2192
2193 gettimeofday_monotonic(&tnow);
2194 timerclear(&firstbwoverflowok);
2195
2196 FD_ZERO(set);
2197
2198 if (antiflags_set != 0)
2199 FD_ZERO(antiflags_set);
2200
2201 for (i = 0, max = -1; i < ioc; ++i) {
2202 sockd_io_t *io = &iov[i];
2203
2204 if (!io->allocated)
2205 continue;
2206
2207 /* should have been removed already if so. */
2208 SASSERTX(!(io->src.state.fin_received && io->dst.state.fin_received));
2209
2210 #if HAVE_CONTROL_CONNECTION
2211 /*
2212 * Don't care about bandwidth-limits on control-connections.
2213 */
2214 if (io->control.s != -1) {
2215 if (antiflags & io->control.flags)
2216 FD_SET(io->control.s, antiflags_set);
2217 else
2218 FD_SET(io->control.s, set);
2219
2220 max = MAX(max, io->control.s);
2221 }
2222 #endif /* HAVE_CONTROL_CONNECTION */
2223
2224
2225 #if BAREFOOTD
2226 /*
2227 * udp-clients need special handling in barefootd regarding bw,
2228 * but the tcp case is the same.
2229 */
2230 if (io->state.protocol == SOCKS_TCP) {
2231 #endif /* BAREFOOTD */
2232
2233 if (SHMEMRULE(io)->bw_shmid != 0) {
2234 struct timeval bwoverflowok, howlongtil;
2235
2236 if (bw_rulehasoverflown(SHMEMRULE(io), &tnow, &bwoverflowok)) {
2237 if (!timerisset(&firstbwoverflowok)
2238 || timercmp(&bwoverflowok, &firstbwoverflowok, <))
2239 firstbwoverflowok = bwoverflowok;
2240
2241 SASSERTX(!timercmp(&bwoverflowok, &tnow, <));
2242 timersub(&bwoverflowok, &tnow, &howlongtil);
2243 slog(LOG_DEBUG,
2244 "%s: skipping io #%lu belonging to rule #%lu/bw_shmid %lu "
2245 "due to bwoverflow. Have fd %d, fd %d, fd %d ctrl/src/dst. "
2246 "Have to wait for %ld.%06lds, until %ld.%06ld",
2247 function,
2248 (unsigned long)i,
2249 (unsigned long)SHMEMRULE(io)->number,
2250 (unsigned long)SHMEMRULE(io)->bw_shmid,
2251 io->control.s,
2252 io->src.s,
2253 io->dst.s,
2254 (long)howlongtil.tv_sec,
2255 (long)howlongtil.tv_usec,
2256 (long)bwoverflowok.tv_sec,
2257 (long)bwoverflowok.tv_usec);
2258
2259 continue;
2260 }
2261 }
2262
2263 #if BAREFOOTD
2264 }
2265 #endif /* BAREFOOTD */
2266
2267 switch (io->state.command) {
2268 case SOCKS_BINDREPLY:
2269 case SOCKS_CONNECT:
2270 if (!io->src.state.fin_received) {
2271 if (antiflags & io->src.flags)
2272 FD_SET(io->src.s, antiflags_set);
2273 else
2274 FD_SET(io->src.s, set);
2275
2276 max = MAX(max, io->src.s);
2277 }
2278
2279 if (io->dst.state.isconnected
2280 && !io->dst.state.fin_received) {
2281 if (antiflags & io->dst.flags)
2282 FD_SET(io->dst.s, antiflags_set);
2283 else
2284 FD_SET(io->dst.s, set);
2285
2286 max = MAX(max, io->dst.s);
2287 }
2288
2289 break;
2290
2291 #if HAVE_UDP_SUPPORT
2292 case SOCKS_UDPASSOCIATE: {
2293 size_t j;
2294
2295 /* no flags for udp so far. */
2296 SASSERTX(io->src.flags == 0);
2297 SASSERTX(io->dst.flags == 0);
2298
2299 FD_SET(io->src.s, set);
2300 max = MAX(max, io->src.s);
2301
2302 #if BAREFOOTD
2303 /*
2304 * the client-socket is shared among many clients, so set it
2305 * regardless of bw-limits as we don't know from what
2306 * client the packet is til we've read the packet.
2307 *
2308 * XXX But what do we do if the bw overflows? We can't know
2309 * that until we've read the packet and seen what client it's
2310 * from. Should we then drop the packet? Probably.
2311 */
2312
2313 for (j = 0; j < io->dst.dstc; ++j) {
2314 if (io->dst.dstv[j].crule.bw_shmid != 0) {
2315 struct timeval bwoverflowok;
2316
2317 slog(LOG_DEBUG,
2318 "%s: checking client %s for bw overflow "
2319 "according to bw_shmid %lu ...",
2320 function,
2321 sockaddr2string(&io->dst.dstv[j].client, NULL, 0),
2322 (unsigned long)io->dst.dstv[j].crule.bw_shmid);
2323
2324 SASSERTX(io->dst.dstv[j].crule.bw != NULL);
2325 if (bw_rulehasoverflown(&io->dst.dstv[j].crule,
2326 &tnow,
2327 &bwoverflowok)) {
2328 if (!timerisset(&firstbwoverflowok)
2329 || timercmp(&bwoverflowok, &firstbwoverflowok, <))
2330 firstbwoverflowok = bwoverflowok;
2331
2332 continue;
2333 }
2334 }
2335
2336 FD_SET(io->dst.dstv[j].s, set);
2337 max = MAX(max, io->dst.dstv[j].s);
2338 }
2339
2340 #else /* SOCKS_SERVER */
2341 /*
2342 * Each client can have up to two target sockets, one
2343 * IPv4 socket and one IPv6 socket. Set all allocated.
2344 */
2345
2346 if (io->src.state.isconnected) {
2347 for (j = 0; j < io->dst.dstc; ++j) {
2348 SASSERTX(io->dst.dstv[j].s != -1);
2349
2350 FD_SET(io->dst.dstv[j].s, set);
2351 max = MAX(max, io->dst.dstv[j].s);
2352 }
2353 }
2354 else {
2355 /*
2356 * means we don't yet know what address the client will send
2357 * us packets from, and it has not sent us any packets yet,
2358 * so we can hardly expect any reply. Even if we got a
2359 * replypacket, we wouldn't know where to send it.
2360 */
2361 SASSERTX(io->dst.dstc == 0);
2362 SASSERTX(io->dst.s == -1);
2363 }
2364 #endif /* SOCKS_SERVER */
2365
2366 break;
2367 }
2368 #endif /* HAVE_UDP_SUPPORT */
2369 }
2370 }
2371
2372 if (bwoverflowtil != NULL)
2373 *bwoverflowtil = firstbwoverflowok;
2374
2375 return max;
2376 }
2377
2378 static int
io_fillset_connectinprogress(set)2379 io_fillset_connectinprogress(set)
2380 fd_set *set;
2381 {
2382 const char *function = "io_fillset_connectinprogress()";
2383 int i, bits, count;
2384
2385 if (set != NULL)
2386 FD_ZERO(set);
2387
2388 if (!iostate.haveconnectinprogress)
2389 return -1;
2390
2391 for (i = count = 0, bits = -1; (size_t)i < ioc; ++i) {
2392 if (io_connectisinprogress(&iov[i])) {
2393 if (set == NULL)
2394 return iov[i].dst.s;
2395
2396 FD_SET(iov[i].dst.s, set);
2397
2398 bits = MAX(bits, iov[i].dst.s);
2399
2400 slog(LOG_DEBUG, "%s: fd %d marked as still connecting",
2401 function, iov[i].dst.s);
2402
2403 ++count;
2404 }
2405 }
2406
2407 return bits;
2408 }
2409
2410 static struct timeval *
io_gettimeout(timeout)2411 io_gettimeout(timeout)
2412 struct timeval *timeout;
2413 {
2414 const char *function = "io_gettimeout()";
2415 static struct timeval last_timeout;
2416 static time_t last_time;
2417 static int last_timeout_isset;
2418 struct timeval tnow, time_havebw;
2419 size_t i, tcpc, udpc;
2420 int havetimeout;
2421
2422 gettimeofday_monotonic(&tnow);
2423
2424 if (timerisset(&bwoverflowtil)) {
2425 const struct timeval shortenough = { 1, 0 };
2426
2427 timersub(&bwoverflowtil, &tnow, &time_havebw);
2428
2429 slog(LOG_DEBUG,
2430 "%s: bwoverflowtil is set until %ld.%06ld (in %ld.%06lds)",
2431 function,
2432 (long)bwoverflowtil.tv_sec,
2433 (long)bwoverflowtil.tv_usec,
2434 (long)time_havebw.tv_sec,
2435 (long)time_havebw.tv_usec);
2436
2437 if (time_havebw.tv_sec < 0) {
2438 timerclear(&bwoverflowtil);
2439 timerclear(timeout);
2440
2441 return timeout;
2442 }
2443
2444 *timeout = time_havebw;
2445 if (timercmp(timeout, &shortenough, <))
2446 return timeout;
2447
2448 havetimeout = 1;
2449 }
2450 else
2451 havetimeout = 0;
2452
2453 slog(LOG_DEBUG,
2454 "%s: last_time = %ld, tnow = %ld, last_timeout_isset = %ld, "
2455 "last_timeout = %ld.%06ld",
2456 function,
2457 (long)last_time,
2458 (long)tnow.tv_sec,
2459 (long)last_timeout_isset,
2460 (long)last_timeout.tv_sec,
2461 (long)last_timeout.tv_usec);
2462
2463 /*
2464 * If last timeout scan was the same second as now, see if we
2465 * can reuse it without scanning all clients again.
2466 */
2467 if (last_timeout_isset && last_time == tnow.tv_sec) {
2468 /*
2469 * Don't know if the last timeout is still valid. Could be it was a
2470 * temporary timeout that no longer is valid (e.g., a timeout waiting
2471 * for connect(2) to complete. Since we don't know, make sure we
2472 * don't wait too long, and not too little either.
2473 */
2474 last_timeout.tv_sec = 1;
2475 last_timeout.tv_usec = 0;
2476
2477 if (!havetimeout
2478 || (havetimeout && timercmp(timeout, &last_timeout, <)))
2479 *timeout = last_timeout;
2480
2481 return timeout;
2482 }
2483
2484 /*
2485 * Could perhaps add a "timeoutispossible" object also by checking
2486 * each io object as we receive it (and each udp client as we
2487 * add it). If we find one where timeout is possible, set the
2488 * global timeoutispossible, if not, don't set it. Each time
2489 * we io_delete(), we change timeoutispossible to true, and
2490 * upon scanning through all i/o's here, we may possible set it
2491 * to false again. Since the default is to not have any timeout
2492 * in the i/o phase (except for FIN_WAIT), this might save time in
2493 * the common cases.
2494 */
2495 if (io_allocated(&tcpc, NULL, &udpc, NULL) == 0) {
2496 last_timeout_isset = 0;
2497 return NULL;
2498 }
2499
2500 last_time = tnow.tv_sec;
2501
2502 /*
2503 * go through all i/o-objects, finding the one who has least left
2504 * until timeout, or the first with a timeout that is soon enough.
2505 */
2506 for (i = 0; i < ioc && (tcpc > 0 || udpc > 0); ++i) {
2507 struct timeval timeout_found;
2508
2509 if (!iov[i].allocated)
2510 continue;
2511
2512 if (iov[i].state.protocol == SOCKS_TCP)
2513 --tcpc;
2514 else {
2515 SASSERTX(iov[i].state.protocol == SOCKS_UDP);
2516 --udpc;
2517 }
2518
2519 timeout_found.tv_sec = io_timeuntiltimeout(&iov[i], &tnow, NULL, 0);
2520 timeout_found.tv_usec = 0;
2521
2522 slog(LOG_DEBUG, "%s: timeout for iov #%lu is in %lds",
2523 function, (unsigned long)i, (long)timeout_found.tv_sec);
2524
2525 if (timeout_found.tv_sec != (time_t)-1) {
2526 if (!havetimeout || timercmp(&timeout_found, timeout, <)) {
2527 havetimeout = 1;
2528 *timeout = timeout_found;
2529 }
2530
2531 SASSERTX(havetimeout);
2532 if (timeout->tv_sec <= 0)
2533 break; /* timeout soon enough or already there. */
2534 }
2535 }
2536
2537 if (havetimeout) {
2538 SASSERTX(timeout->tv_sec >= 0);
2539 SASSERTX(timeout->tv_usec >= 0);
2540
2541 /*
2542 * never mind sub-second accuracy, but do make sure we don't end up
2543 * with {0, 0} if there is less than one second till timeout. If
2544 * there is more than one second, never mind if the timeout is a
2545 * little longer than necessary.
2546 */
2547 timeout->tv_usec = 999999;
2548
2549 last_timeout = *timeout;
2550 }
2551 else
2552 timeout = NULL;
2553
2554 last_timeout_isset = havetimeout;
2555
2556 return timeout;
2557 }
2558
2559 static sockd_io_t *
io_gettimedout(void)2560 io_gettimedout(void)
2561 {
2562 const char *function = "io_gettimedout()";
2563 struct timeval tnow;
2564 size_t i;
2565
2566 gettimeofday_monotonic(&tnow);
2567 for (i = 0; i < ioc; ++i) {
2568 struct timeval timeout;
2569
2570 if (!iov[i].allocated)
2571 continue;
2572
2573 if ((timeout.tv_sec = io_timeuntiltimeout(&iov[i], &tnow, NULL, 1))
2574 == (time_t)-1)
2575 continue; /* no timeout on this object. */
2576
2577 timeout.tv_usec = 0; /* whole seconds is good enough. */
2578 if (timeout.tv_sec <= 0) { /* has timed out already. */
2579 slog(LOG_DEBUG,
2580 "%s: io #%lu with control %d, src %d, dst %d, has reached the "
2581 "timeout point. I/O last done at %ld.%06ld",
2582 function,
2583 (unsigned long)i,
2584 iov[i].control.s,
2585 iov[i].src.s,
2586 iov[i].dst.s,
2587 (long)iov[i].lastio.tv_sec,
2588 (long)iov[i].lastio.tv_usec);
2589
2590 return &iov[i];
2591 }
2592 }
2593
2594 return NULL;
2595 }
2596
2597 static int
io_timeoutispossible(io)2598 io_timeoutispossible(io)
2599 const sockd_io_t *io;
2600 {
2601
2602 if (!io->allocated)
2603 return 0;
2604
2605 #if HAVE_UDP_SUPPORT
2606 if (io->state.protocol == SOCKS_UDP) {
2607
2608 #if BAREFOOTD
2609 size_t i;
2610
2611 for (i = 0; i < io->dst.dstc; ++i) {
2612 if (io->dst.dstv[i].crule.timeout.udpio != 0)
2613 return 1;
2614 }
2615
2616 return 0;
2617
2618 #else /* !BAREFOOTD */
2619
2620 return io->srule.timeout.udpio != 0;
2621
2622 #endif /* !BAREFOOTD */
2623
2624 }
2625 #endif /* HAVE_UDP_SUPPORT */
2626
2627 /*
2628 * TCP is the same for all.
2629 */
2630 SASSERTX(io->state.protocol == SOCKS_TCP);
2631
2632 if (io->dst.state.isconnected) {
2633 if (io->srule.timeout.tcp_fin_wait != 0
2634 || io->srule.timeout.tcpio != 0)
2635 return 1;
2636 else
2637 return 0;
2638 }
2639 else
2640 return io->srule.timeout.connect != 0;
2641
2642 /* NOTREACHED */
2643 }
2644
2645 static time_t
io_timeuntiltimeout(io,tnow,timeouttype,doudpsync)2646 io_timeuntiltimeout(io, tnow, timeouttype, doudpsync)
2647 sockd_io_t *io;
2648 const struct timeval *tnow;
2649 timeouttype_t *timeouttype;
2650 const int doudpsync;
2651 {
2652 const char *function = "io_timeuntiltimeout()";
2653 timeouttype_t timeouttype_mem;
2654 time_t *lastio;
2655 long protocoltimeout;
2656
2657 if (timeouttype == NULL)
2658 timeouttype = &timeouttype_mem;
2659
2660 *timeouttype = TIMEOUT_NOTSET;
2661
2662 if (!io_timeoutispossible(io))
2663 return -1;
2664
2665 /*
2666 * First find out what the correct timeoutobject to use for this
2667 * io at this time is, and then see if a timeout value has been
2668 * set in that object (i.e., is not 0).
2669 */
2670 if (io->state.protocol == SOCKS_UDP) {
2671 #if BAREFOOTD
2672 size_t i;
2673 time_t timeout;
2674
2675 slog(LOG_DEBUG, "%s: scanning %lu udp clients for nearest timeout",
2676 function, (unsigned long)io->dst.dstc);
2677
2678 for (i = 0, timeout = -1; i < io->dst.dstc; ++i) {
2679 udptarget_t *udpclient = &io->dst.dstv[i];
2680
2681 SASSERTX(tnow->tv_sec >= udpclient->lastio.tv_sec);
2682
2683 timeout
2684 = socks_difftime(udpclient->crule.timeout.udpio,
2685 socks_difftime(tnow->tv_sec, udpclient->lastio.tv_sec));
2686
2687 slog(LOG_DEBUG, "%s: time until timeout for udpclient %s is %ld",
2688 function,
2689 sockaddr2string(&udpclient->client, NULL, 0),
2690 (long)timeout);
2691
2692 timeout = MAX(0, timeout);
2693 *timeouttype = TIMEOUT_IO;
2694
2695 if (timeout <= 0) {
2696 SASSERTX(udpclient != NULL);
2697
2698 if (doudpsync)
2699 io_syncudp(io, udpclient);
2700
2701 break; /* timeout is now. */
2702 }
2703 }
2704
2705 return timeout;
2706 #else /* SOCKS_SERVER */
2707
2708 *timeouttype = TIMEOUT_IO; /* only type possible for an udp client. */
2709 protocoltimeout = io->srule.timeout.udpio;
2710 lastio = (time_t *)&io->lastio.tv_sec;
2711 #endif /* SOCKS_SERVER */
2712 }
2713 else {
2714 SASSERTX(io->state.protocol == SOCKS_TCP);
2715
2716 if (io->dst.state.isconnected) {
2717 if (io->src.state.fin_received || io->dst.state.fin_received) {
2718 if (io->srule.timeout.tcp_fin_wait == 0)
2719 *timeouttype = TIMEOUT_IO;
2720 else {
2721 if (io->srule.timeout.tcpio == 0
2722 || io->srule.timeout.tcpio > io->srule.timeout.tcp_fin_wait) {
2723 *timeouttype = TIMEOUT_TCP_FIN_WAIT;
2724 }
2725 else
2726 *timeouttype = TIMEOUT_IO;
2727 }
2728 }
2729 else
2730 *timeouttype = TIMEOUT_IO;
2731
2732 if (*timeouttype == TIMEOUT_IO)
2733 protocoltimeout = io->srule.timeout.tcpio;
2734 else {
2735 SASSERTX(*timeouttype == TIMEOUT_TCP_FIN_WAIT);
2736 protocoltimeout = io->srule.timeout.tcp_fin_wait;
2737 }
2738
2739 lastio = (time_t *)&io->lastio.tv_sec;
2740 }
2741 else {
2742 *timeouttype = TIMEOUT_CONNECT;
2743 protocoltimeout = io->srule.timeout.connect;
2744 lastio = (time_t *)&io->state.time.negotiateend.tv_sec;
2745 }
2746 }
2747
2748 if (protocoltimeout == 0)
2749 return -1;
2750
2751 SASSERTX(socks_difftime(*lastio, (time_t)tnow->tv_sec) <= 0);
2752
2753 if (sockscf.option.debug)
2754 if (MAX(0, protocoltimeout - socks_difftime(tnow->tv_sec, *lastio)) == 0)
2755 slog(LOG_DEBUG,
2756 "%s: timeouttype = %d, protocoltimeout = %ld, tnow = %lu, "
2757 "lastio = %ld (%lds ago), timeout reached %lds ago",
2758 function,
2759 (int)*timeouttype,
2760 protocoltimeout,
2761 (unsigned long)tnow->tv_sec,
2762 (long)*lastio,
2763 (long)socks_difftime(tnow->tv_sec, *lastio),
2764 (long)(protocoltimeout - socks_difftime(tnow->tv_sec, *lastio)));
2765
2766 return MAX(0, protocoltimeout - socks_difftime(tnow->tv_sec, *lastio));
2767 }
2768
2769 static int
getnewios()2770 getnewios()
2771 {
2772 const char *function = "getnewios()";
2773 const size_t freec = SOCKD_IOMAX - io_allocated(NULL, NULL, NULL, NULL);
2774 size_t receivedc;
2775
2776 receivedc = errno = 0;
2777 while ( recv_io(sockscf.state.mother.s, NULL) == 0
2778 && receivedc < freec)
2779 ++receivedc;
2780
2781 slog(LOG_DEBUG, "%s: received %lu new io%s, errno = %d (%s)",
2782 function,
2783 (long)receivedc,
2784 receivedc == 1 ? "" : "s",
2785 errno,
2786 strerror(errno));
2787
2788 if (receivedc > 0) {
2789 errno = 0;
2790 return receivedc;
2791 }
2792 else {
2793 slog(LOG_DEBUG,
2794 "%s: strange ... we were called to receive a new client (%lu/%lu), "
2795 "but no new client was there to receive: %s",
2796 function,
2797 (unsigned long)(freec + 1),
2798 (unsigned long)SOCKD_IOMAX,
2799 strerror(errno));
2800
2801 return -1;
2802 }
2803 }
2804
2805 /* ARGSUSED */
2806 static void
siginfo(sig,si,sc)2807 siginfo(sig, si, sc)
2808 int sig;
2809 siginfo_t *si;
2810 void *sc;
2811 {
2812 const char *function = "siginfo()";
2813 const int errno_s = errno;
2814 #if HAVE_UDP_SUPPORT
2815 iostat_t *stats;
2816 #endif /* HAVE_UDP_SUPPORT */
2817 unsigned long days, hours, minutes, seconds;
2818 time_t tnow;
2819 size_t i;
2820
2821 SIGNAL_PROLOGUE(sig, si, errno_s);
2822
2823 seconds = (unsigned long)socks_difftime(time_monotonic(&tnow),
2824 sockscf.stat.boot);
2825
2826 seconds2days(&seconds, &days, &hours, &minutes);
2827
2828 slog(LOG_INFO, "io-child up %lu day%s, %lu:%.2lu:%.2lu",
2829 days, days == 1 ? "" : "s", hours, minutes, seconds);
2830
2831 #if HAVE_UDP_SUPPORT
2832 if ((stats = io_get_ro_stats()) == NULL)
2833 slog(LOG_INFO, "no read-only latency information available (yet)");
2834 else
2835 slog(LOG_INFO,
2836 "read-only latency statistics based on last %lu packets: "
2837 "min/max/median/average/last/stddev: "
2838 "%lu/%lu/%lu/%lu/%lu/%lu (us)",
2839 (unsigned long)stats->latencyc,
2840 stats->min_us,
2841 stats->max_us,
2842 stats->median_us,
2843 stats->average_us,
2844 stats->last_us,
2845 stats->stddev_us);
2846
2847 if ((stats = io_get_io_stats()) == NULL)
2848 slog(LOG_INFO, "no i/o latency information available (yet)");
2849 else
2850 slog(LOG_INFO,
2851 "i/o latency statistics based on last %lu packets: "
2852 "min/max/median/average/last/stddev: "
2853 "%lu/%lu/%lu/%lu/%lu/%lu (us)",
2854 (unsigned long)stats->latencyc,
2855 stats->min_us,
2856 stats->max_us,
2857 stats->median_us,
2858 stats->average_us,
2859 stats->last_us,
2860 stats->stddev_us);
2861 #endif /* HAVE_UDP_SUPPORT */
2862
2863 for (i = 0; i < ioc; ++i) {
2864 const int isreversed = (iov[i].state.command == SOCKS_BINDREPLY ? 1 : 0);
2865 uint64_t src_written, dst_written;
2866 sockd_io_direction_t *src, *dst;
2867 sockshost_t a, b;
2868 char srcstring[MAX_IOLOGADDR], dststring[MAX_IOLOGADDR],
2869 timeinfo[64], idlestr[64];
2870
2871 if (!iov[i].allocated)
2872 continue;
2873
2874 if (isreversed) {
2875 src = &iov[i].dst;
2876 dst = &iov[i].src;
2877 }
2878 else {
2879 src = &iov[i].src;
2880 dst = &iov[i].dst;
2881 }
2882
2883 if (iov[i].state.protocol == SOCKS_UDP
2884 || dst->state.isconnected)
2885 snprintfn(idlestr, sizeof(idlestr), "%lds",
2886 (long)socks_difftime(tnow, iov[i].lastio.tv_sec));
2887 else
2888 snprintfn(idlestr, sizeof(idlestr),
2889 "%lds (waiting for connect to complete)",
2890 (long)socks_difftime(tnow,
2891 iov[i].state.time.requestend.tv_sec));
2892
2893 snprintf(timeinfo, sizeof(timeinfo),
2894 "age: %lds, idle: %s",
2895 (long)socks_difftime(tnow, iov[i].state.time.accepted.tv_sec),
2896 idlestr);
2897
2898
2899 /*
2900 * When printing current state display the IP-addresses in actual
2901 * use, rather than any hostnames the client may have provided.
2902 */
2903
2904 if (iov[i].state.protocol == SOCKS_TCP) {
2905 size_t src_buffered, dst_buffered;
2906 char src_bufferinfo[64], dst_bufferinfo[sizeof(src_bufferinfo)],
2907 tcpinfo[MAXTCPINFOLEN];
2908 int havesocketinfo;
2909
2910 #if HAVE_RECVBUF_IOCTL
2911 int src_so_rcvbuf, dst_so_rcvbuf;
2912 #endif /* HAVE_RECVBUF_IOCTL */
2913
2914 #if HAVE_SENDBUF_IOCTL
2915 int src_so_sndbuf, dst_so_sndbuf;
2916 #endif /* HAVE_SENDBUF_IOCTL */
2917
2918 build_addrstr_src(GET_HOSTIDV(&iov[i].state),
2919 GET_HOSTIDC(&iov[i].state),
2920 &src->host,
2921 NULL,
2922 NULL,
2923 sockaddr2sockshost(&src->laddr, &b),
2924 &src->auth,
2925 NULL,
2926 srcstring,
2927 sizeof(srcstring));
2928
2929 build_addrstr_dst(sockaddr2sockshost(&dst->laddr, NULL),
2930 iov[i].state.proxychain.proxyprotocol == PROXY_DIRECT ?
2931 NULL : sockaddr2sockshost(&dst->raddr, &a),
2932 iov[i].state.proxychain.proxyprotocol == PROXY_DIRECT ?
2933 NULL : &iov[i].state.proxychain.extaddr,
2934 iov[i].state.proxychain.proxyprotocol == PROXY_DIRECT ?
2935 sockaddr2sockshost(&dst->raddr, &a)
2936 : &dst->host,
2937 &dst->auth,
2938 NULL,
2939 (struct in_addr *)NULL,
2940 0,
2941 dststring,
2942 sizeof(dststring));
2943
2944
2945 src_buffered
2946 = socks_bytesinbuffer(src->s,
2947 WRITE_BUF,
2948 #if SOCKS_SERVER && HAVE_GSSAPI
2949 src->auth.method == AUTHMETHOD_GSSAPI
2950 && src->auth.mdata.gssapi.state.wrap ?
2951 1 : 0
2952 #else /* !SOCKS_SERVER */
2953 0
2954 #endif /* !SOCKS_SERVER */
2955 );
2956
2957 dst_buffered = socks_bytesinbuffer(dst->s, WRITE_BUF, 0);
2958
2959 *src_bufferinfo = NUL;
2960 *dst_bufferinfo = NUL;
2961
2962 #if HAVE_RECVBUF_IOCTL || HAVE_SENDBUF_IOCTL
2963
2964 havesocketinfo = 1;
2965
2966 #else /* ! (HAVE_RECVBUF_IOCTL || HAVE_SENDBUF_IOCTL) */
2967
2968 havesocketinfo = 0;
2969
2970 #endif /* ! (HAVE_RECVBUF_IOCTL || HAVE_SENDBUF_IOCTL) */
2971
2972 #if HAVE_RECVBUF_IOCTL
2973 if (ioctl(dst->s, RECVBUF_IOCTLVAL, &dst_so_rcvbuf) != 0) {
2974 swarn("%s: rcvbuf size ioctl() on dst-fd %d failed",
2975 function, dst->s);
2976 }
2977 else
2978 havesocketinfo = 1;
2979
2980 if (havesocketinfo) {
2981 if (ioctl(src->s, RECVBUF_IOCTLVAL, &src_so_rcvbuf) != 0) {
2982 swarn("%s: recvbuf size ioctl() on src-fd %d failed",
2983 function, src->s);
2984 havesocketinfo = 0;
2985 }
2986 }
2987 #endif /* HAVE_RECVBUF_IOCTL */
2988
2989 #if HAVE_SENDBUF_IOCTL
2990 if (havesocketinfo) {
2991 if (ioctl(src->s, SENDBUF_IOCTLVAL, &src_so_sndbuf) != 0) {
2992 swarn("%s: sendbuf size ioctl() on src-fd %d failed",
2993 function, src->s);
2994 havesocketinfo = 0;
2995 }
2996 }
2997
2998 if (havesocketinfo) {
2999 if (ioctl(dst->s, SENDBUF_IOCTLVAL, &dst_so_sndbuf) != 0) {
3000 swarn("%s: sendbuf size ioctl() on dst-fd %d failed",
3001 function, dst->s);
3002 havesocketinfo = 0;
3003 }
3004 }
3005 #endif /* HAVE_SENDBUF_IOCTL */
3006
3007 #if HAVE_SENDBUF_IOCTL && HAVE_RECVBUF_IOCTL
3008 if (havesocketinfo) {
3009 snprintf(src_bufferinfo, sizeof(src_bufferinfo),
3010 "%lu buffered (%lu + %lu + %lu)",
3011 (unsigned long)(src_buffered
3012 + dst_so_rcvbuf
3013 + src_so_sndbuf),
3014 (unsigned long)dst_so_rcvbuf,
3015 (unsigned long)src_buffered,
3016 (unsigned long)src_so_sndbuf);
3017
3018 snprintf(dst_bufferinfo, sizeof(dst_bufferinfo),
3019 "%lu buffered (%lu + %lu + %lu)",
3020 (unsigned long)(dst_buffered
3021 + src_so_rcvbuf
3022 + dst_so_sndbuf),
3023 (unsigned long)src_so_rcvbuf,
3024 (unsigned long)dst_buffered,
3025 (unsigned long)dst_so_sndbuf);
3026 }
3027
3028 #elif HAVE_SENDBUF_IOCTL && !HAVE_RECVBUF_IOCTL
3029 if (havesocketinfo) {
3030 snprintf(src_bufferinfo, sizeof(src_bufferinfo),
3031 "%lu buffered (? + %lu + %lu)",
3032 (unsigned long)(src_buffered + src_so_sndbuf),
3033 (unsigned long)src_buffered,
3034 (unsigned long)src_so_sndbuf);
3035
3036 snprintf(dst_bufferinfo, sizeof(dst_bufferinfo),
3037 "%lu buffered (? + %lu + %lu)",
3038 (unsigned long)(dst_buffered + dst_so_sndbuf),
3039 (unsigned long)dst_buffered,
3040 (unsigned long)dst_so_sndbuf);
3041 }
3042
3043 #elif !HAVE_SENDBUF_IOCTL && HAVE_RECVBUF_IOCTL
3044 if (havesocketinfo) {
3045 snprintf(src_bufferinfo, sizeof(src_bufferinfo),
3046 "%lu buffered (%lu + %lu + ?)",
3047 (unsigned long)(src_buffered + dst_so_rcvbuf),
3048 (unsigned long)dst_so_rcvbuf,
3049 (unsigned long)src_buffered);
3050
3051 snprintf(dst_bufferinfo, sizeof(dst_bufferinfo),
3052 "%lu buffered (%lu + %lu + ?)",
3053 (unsigned long)(dst_buffered + src_so_rcvbuf),
3054 (unsigned long)src_so_rcvbuf,
3055 (unsigned long)dst_buffered);
3056 }
3057 #endif /* !HAVE_SENDBUF_IOCTL && !HAVE_RECVBUF_IOCTL */
3058
3059 if (!havesocketinfo) {
3060 snprintf(src_bufferinfo, sizeof(src_bufferinfo),
3061 "%lu buffered (? + %lu + ?)",
3062 (unsigned long)src_buffered,
3063 (unsigned long)src_buffered);
3064
3065 snprintf(dst_bufferinfo, sizeof(dst_bufferinfo),
3066 "%lu buffered (? + %lu + ?)",
3067 (unsigned long)dst_buffered,
3068 (unsigned long)dst_buffered);
3069 }
3070
3071 src_written = src->written.bytes;
3072 dst_written = dst->written.bytes;
3073
3074 if (iov[i].srule.log.tcpinfo) {
3075 const char *info;
3076 int fdv[] = { src->s, dst->s };
3077
3078 if ((info = get_tcpinfo(ELEMENTS(fdv), fdv, NULL, 0)) == NULL)
3079 *tcpinfo = NUL;
3080 else {
3081 #if DIAGNOSTIC
3082 if (strlen(info) >= sizeof(tcpinfo))
3083 SWARNX(strlen(info));
3084 #endif /* DIAGNOSTIC */
3085
3086 snprintf(tcpinfo, sizeof(tcpinfo),
3087 "\nTCP_INFO:\n"
3088 "%s",
3089 info);
3090 }
3091 }
3092 else
3093 *tcpinfo = NUL;
3094
3095 slog(LOG_INFO,
3096 "%s: %s <-> %s: %s, bytes transferred: "
3097 "%"PRIu64" (+ %s) "
3098 "<-> "
3099 "%"PRIu64" (+ %s)"
3100 "%s",
3101 protocol2string(iov[i].state.protocol),
3102 srcstring,
3103 dststring,
3104 timeinfo,
3105 dst_written,
3106 dst_bufferinfo,
3107 src_written,
3108 src_bufferinfo,
3109 tcpinfo);
3110 }
3111
3112 #if HAVE_UDP_SUPPORT
3113 else if (iov[i].state.protocol == SOCKS_UDP) {
3114 uint64_t src_packetswritten, dst_packetswritten;
3115 size_t srci;
3116
3117 #define UDPLOG() \
3118 do { \
3119 if (*dststring == NUL) { \
3120 slog(LOG_INFO, \
3121 "%s: %s: %s, " \
3122 "bytes transferred: %"PRIu64" <-> %"PRIu64", " \
3123 "packets: %"PRIu64" <-> %"PRIu64"", \
3124 protocol2string(iov[i].state.protocol), \
3125 srcstring, \
3126 timeinfo, \
3127 dst_written, \
3128 src_written, \
3129 dst_packetswritten, \
3130 src_packetswritten); \
3131 } \
3132 else { \
3133 slog(LOG_INFO, \
3134 "%s: %s <-> %s: %s, " \
3135 "bytes transferred: %"PRIu64" <-> %"PRIu64", " \
3136 "packets: %"PRIu64" <-> %"PRIu64"", \
3137 protocol2string(iov[i].state.protocol), \
3138 srcstring, \
3139 dststring, \
3140 timeinfo, \
3141 dst_written, \
3142 src_written, \
3143 dst_packetswritten, \
3144 src_packetswritten); \
3145 } \
3146 } while (/* CONSTCOND */ 0)
3147
3148 #if SOCKS_SERVER
3149 if (dst->dstc == 0) {
3150 /*
3151 * Special-case for Dante. Even though we have no target yet,
3152 * do print out the addresses on the internal side, as
3153 * we do have a client. In Barefoot's case we on the other
3154 * hand do not have any control-connection, so if dstc is 0,
3155 * we have no clients either.
3156 */
3157 build_addrstr_src(GET_HOSTIDV(&iov[i].state),
3158 GET_HOSTIDC(&iov[i].state),
3159 &src->host,
3160 NULL,
3161 NULL,
3162 sockaddr2sockshost(&src->laddr, &b),
3163 &src->auth,
3164 NULL,
3165 srcstring,
3166 sizeof(srcstring));
3167
3168 *dststring = NUL;
3169
3170 src_written = src->written.bytes;
3171 src_packetswritten = src->written.packets;
3172
3173 dst_written = dst->written.bytes;
3174 dst_packetswritten = dst->written.packets;
3175
3176 UDPLOG();
3177 continue;
3178 }
3179 #endif /* SOCKS_SERVER */
3180
3181 for (srci = 0; srci < dst->dstc; ++srci) {
3182 const udptarget_t *client = &dst->dstv[srci];
3183
3184 src_written = client->client_written.bytes,
3185 src_packetswritten = client->client_written.packets;
3186
3187 dst_written = client->target_written.bytes,
3188 dst_packetswritten = client->target_written.packets;
3189
3190 build_addrstr_src(GET_HOSTIDV(&iov[i].state),
3191 GET_HOSTIDC(&iov[i].state),
3192 #if BAREFOOTD
3193 sockaddr2sockshost(&client->client, &a),
3194 #else /* Dante */
3195 sockaddr2sockshost(&iov[i].src.raddr, &a),
3196 #endif /* Dante */
3197 NULL,
3198 NULL,
3199 sockaddr2sockshost(&src->laddr, &b),
3200 &src->auth,
3201 NULL,
3202 srcstring,
3203 sizeof(srcstring));
3204
3205 build_addrstr_dst(sockaddr2sockshost(&client->laddr, &a),
3206 iov[i].state.proxychain.proxyprotocol
3207 == PROXY_DIRECT ?
3208 NULL : sockaddr2sockshost(&client->raddr, &b),
3209 iov[i].state.proxychain.proxyprotocol
3210 == PROXY_DIRECT ?
3211 NULL : &iov[i].state.proxychain.extaddr,
3212 sockaddr2sockshost(&client->raddr, NULL),
3213 &dst->auth,
3214 NULL,
3215 (struct in_addr *)NULL,
3216 0,
3217 dststring,
3218 sizeof(dststring));
3219
3220 snprintf(timeinfo, sizeof(timeinfo), "age: %lus, idle: %lus",
3221 (long)socks_difftime(tnow, client->firstio.tv_sec),
3222 (long)socks_difftime(tnow, client->lastio.tv_sec));
3223
3224 UDPLOG();
3225 }
3226 }
3227 #endif /* HAVE_UDP_SUPPORT */
3228 }
3229
3230 SIGNAL_EPILOGUE(sig, si, errno_s);
3231 }
3232
3233 static void
freebuffers(io)3234 freebuffers(io)
3235 const sockd_io_t *io;
3236 {
3237 if (io->control.s != -1)
3238 socks_freebuffer(io->control.s);
3239
3240 socks_freebuffer(io->src.s);
3241
3242 switch (io->state.protocol) {
3243 case SOCKS_TCP:
3244 socks_freebuffer(io->dst.s);
3245 break;
3246
3247 #if HAVE_UDP_SUPPORT
3248 case SOCKS_UDP: {
3249 size_t i;
3250
3251 SASSERTX(SOCKS_SERVER); /* only called for UDP in Dante. */
3252
3253 for (i = 0; i < io->dst.dstc; ++i)
3254 socks_freebuffer(io->dst.dstv[i].s);
3255 break;
3256 }
3257 #endif /* HAVE_UDP_SUPPORT */
3258
3259 default:
3260 SERRX(io->state.protocol);
3261 }
3262
3263 }
3264
3265 static int
connectstatus(io,badfd)3266 connectstatus(io, badfd)
3267 sockd_io_t *io;
3268 int *badfd;
3269 {
3270 const char *function = "connectstatus()";
3271 clientinfo_t cinfo;
3272 socklen_t len;
3273 char src[MAXSOCKSHOSTSTRING], dst[MAXSOCKSHOSTSTRING], buf[2048];
3274
3275 SASSERTX(io_connectisinprogress(io));
3276 SASSERTX(io->dst.state.err == 0);
3277
3278 *badfd = -1;
3279
3280 cinfo.from = CONTROLIO(io)->raddr;
3281 HOSTIDCOPY(&io->state, &cinfo);
3282
3283 /*
3284 * Check if the socket connected successfully.
3285 */
3286 len = sizeof(io->dst.raddr);
3287 if (getpeername(io->dst.s, TOSA(&io->dst.raddr), &len) == 0) {
3288 iologaddr_t src, dst;
3289
3290 gettimeofday_monotonic(&io->state.time.established);
3291
3292 slog(LOG_DEBUG, "%s: connect to %s on fd %d completed successfully",
3293 function, sockshost2string(&io->dst.host, NULL, 0), io->dst.s);
3294
3295 io->dst.state.isconnected = 1;
3296
3297 #if HAVE_NEGOTIATE_PHASE
3298 if (SOCKS_SERVER || io->reqflags.httpconnect) {
3299 errno = 0; /* make sure we don't reuse some old junk. */
3300
3301 if (send_connectresponse(io->src.s, 0, io) != 0) {
3302 if (io->srule.mstats_shmid != 0
3303 && (io->srule.alarmsconfigured & ALARM_DISCONNECT)) {
3304 SASSERTX(!io->src.state.alarmdisconnectdone);
3305
3306 alarm_add_disconnect(0,
3307 &io->srule,
3308 ALARM_INTERNAL,
3309 &cinfo,
3310 strerror(errno),
3311 sockscf.shmemfd);
3312
3313 io->src.state.alarmdisconnectdone = 1;
3314 }
3315
3316 *badfd = io->src.s;
3317 return -1;
3318 }
3319 }
3320 #endif /* HAVE_NEGOTIATE_PHASE */
3321
3322 setconfsockoptions(io->dst.s,
3323 io->control.s,
3324 io->state.protocol,
3325 0,
3326 io->extsocketoptionc,
3327 io->extsocketoptionv,
3328 SOCKETOPT_POST,
3329 SOCKETOPT_POST);
3330
3331 init_iologaddr(&src,
3332 object_sockaddr,
3333 &io->src.laddr,
3334 object_sockshost,
3335 &io->src.host,
3336 &io->src.auth,
3337 GET_HOSTIDV(&io->state),
3338 GET_HOSTIDC(&io->state));
3339
3340 init_iologaddr(&dst,
3341 object_sockaddr,
3342 &io->dst.laddr,
3343 object_sockaddr,
3344 &io->dst.raddr,
3345 &io->dst.auth,
3346 NULL,
3347 0);
3348
3349 if (io->srule.log.tcpinfo) {
3350 int fdv[] = { io->src.s, io->dst.s };
3351 const char *tcpinfo = get_tcpinfo(ELEMENTS(fdv), fdv, NULL, 0);
3352
3353 if (tcpinfo != NULL) {
3354 snprintf(buf, sizeof(buf),
3355 "\nTCP_INFO:\n"
3356 "%s",
3357 tcpinfo);
3358 }
3359 else
3360 *buf = NUL;
3361 }
3362 else
3363 *buf = NUL;
3364
3365 iolog(&io->srule,
3366 &io->state,
3367 OPERATION_CONNECT,
3368 &src,
3369 &dst,
3370 NULL,
3371 NULL,
3372 buf,
3373 strlen(buf));
3374
3375 if (io_fillset_connectinprogress(NULL) == -1)
3376 iostate.haveconnectinprogress = 0;
3377
3378 return 0;
3379 }
3380
3381 /*
3382 * else: connect(2) failed.
3383 */
3384
3385 slog(LOG_DEBUG, "%s: getpeername(2) on fd %d failed: %s",
3386 function, io->dst.s, strerror(errno));
3387
3388 if (io->srule.mstats_shmid != 0
3389 && (io->srule.alarmsconfigured & ALARM_DISCONNECT)) {
3390 SASSERTX(!io->dst.state.alarmdisconnectdone);
3391
3392 alarm_add_disconnect(0,
3393 &io->srule,
3394 ALARM_EXTERNAL,
3395 &cinfo,
3396 strerror(errno),
3397 sockscf.shmemfd);
3398
3399 io->dst.state.alarmdisconnectdone = 1;
3400 }
3401
3402 len = sizeof(errno);
3403 (void)getsockopt(io->dst.s, SOL_SOCKET, SO_ERROR, &errno, &len);
3404
3405 if (errno == 0) {
3406 swarnx("%s: strange ... getpeername(2) failed, but getsockopt(2) "
3407 "still says errno is 0",
3408 function);
3409
3410 io->dst.state.err = errno = ECONNREFUSED; /* no idea. */
3411 }
3412 else
3413 io->dst.state.err = errno;
3414
3415 slog(LOG_DEBUG,
3416 "%s: connect(2) to %s on fd %d, on behalf of client %s, failed: %s",
3417 function,
3418 sockshost2string(&io->dst.host, dst, sizeof(dst)),
3419 io->dst.s,
3420 sockshost2string(&io->src.host, src, sizeof(src)),
3421 strerror(errno));
3422
3423 log_connectfailed(EXTERNALIF, dst); /* special-cased for customer. */
3424
3425 #if HAVE_NEGOTIATE_PHASE
3426 if (SOCKS_SERVER || io->reqflags.httpconnect) {
3427 SASSERTX(errno != 0);
3428
3429 if (send_connectresponse(io->src.s, errno, io) != 0)
3430 errno = io->dst.state.err; /* let errno be errno from connect(2). */
3431 }
3432 #endif /* HAVE_NEGOTIATE_PHASE */
3433
3434 *badfd = io->dst.s;
3435 return -1;
3436 }
3437
3438 void
io_update(timenow,bwused,i_read,i_written,e_read,e_written,rule,packetrule,lock)3439 io_update(timenow, bwused, i_read, i_written,
3440 e_read, e_written, rule, packetrule, lock)
3441 const struct timeval *timenow;
3442 const size_t bwused;
3443 const iocount_t *i_read;
3444 const iocount_t *i_written;
3445 const iocount_t *e_read;
3446 const iocount_t *e_written;
3447 rule_t *rule;
3448 rule_t *packetrule;
3449 const int lock;
3450 {
3451 const char *function = "io_update()";
3452 const iocount_t zero = { 0 };
3453 monitor_stats_t *monitor;
3454 int didattach;
3455
3456 slog(LOG_DEBUG, "%s: bwused %lu, bw_shmid %lu, mstats_shmid %lu",
3457 function,
3458 (unsigned long)bwused,
3459 (rule == NULL || rule->bw_shmid == 0) ?
3460 0 : (unsigned long)rule->bw_shmid,
3461 (unsigned long)packetrule->mstats_shmid);
3462
3463 if (rule != NULL && rule->bw_shmid != 0 && bwused != 0) {
3464 SASSERTX(rule->bw != NULL);
3465 bw_update(rule->bw, bwused, timenow, lock);
3466 }
3467
3468 if (packetrule->mstats_shmid == 0
3469 || !(packetrule->alarmsconfigured & ALARM_DATA))
3470 return;
3471
3472 if (packetrule->mstats == NULL) {
3473 /*
3474 * Must be a Dante UDP session.
3475 */
3476 SASSERTX(packetrule != rule);
3477 SASSERTX(SOCKS_SERVER);
3478
3479 if (sockd_shmat(packetrule, SHMEM_MONITOR) != 0)
3480 return;
3481
3482 didattach = 1;
3483 }
3484 else
3485 didattach = 0;
3486
3487 SASSERTX(packetrule->mstats != NULL);
3488 monitor = &packetrule->mstats->object.monitor;
3489
3490 socks_lock(sockscf.shmemfd, (off_t)packetrule->mstats_shmid, 1, 1, 1);
3491
3492 MUNPROTECT_SHMEMHEADER(packetrule->mstats);
3493
3494 if (monitor->internal.alarm.data.recv.isconfigured
3495 && i_read != NULL
3496 && memcmp(&zero, i_read, sizeof(zero)) != 0) {
3497 monitor->internal.alarm.data.recv.bytes += i_read->bytes;
3498 monitor->internal.alarm.data.recv.lastio = *timenow;
3499 }
3500
3501 if (monitor->internal.alarm.data.send.isconfigured
3502 && i_written != NULL
3503 && memcmp(&zero, i_written, sizeof(zero)) != 0) {
3504 monitor->internal.alarm.data.send.bytes += i_written->bytes;
3505 monitor->internal.alarm.data.send.lastio = *timenow;
3506 }
3507
3508 if (monitor->external.alarm.data.recv.isconfigured
3509 && e_read != NULL
3510 && memcmp(&zero, e_read, sizeof(zero)) != 0) {
3511 monitor->external.alarm.data.recv.bytes += e_read->bytes;
3512 monitor->external.alarm.data.recv.lastio = *timenow;
3513 }
3514
3515 if (monitor->external.alarm.data.send.isconfigured
3516 && e_written != NULL
3517 && memcmp(&zero, e_written, sizeof(zero)) != 0) {
3518 monitor->external.alarm.data.send.bytes += e_written->bytes;
3519 monitor->external.alarm.data.send.lastio = *timenow;
3520 }
3521
3522 MPROTECT_SHMEMHEADER(packetrule->mstats);
3523
3524 socks_unlock(sockscf.shmemfd, (off_t)packetrule->mstats_shmid, 1);
3525
3526 slog(LOG_DEBUG,
3527 "%s: data sides configured in monitor with shmid: %lu. "
3528 "Data added: i_recv/i_send/e_recv/e_send: %lu/%lu/%lu/%lu bytes",
3529 function,
3530 (unsigned long)packetrule->mstats_shmid,
3531 i_read != NULL && monitor->internal.alarm.data.recv.isconfigured ?
3532 (unsigned long)i_read->bytes : 0,
3533 i_written != NULL && monitor->internal.alarm.data.send.isconfigured ?
3534 (unsigned long)i_written->bytes : 0,
3535 e_read != NULL && monitor->external.alarm.data.recv.isconfigured ?
3536 (unsigned long)e_read->bytes : 0,
3537 e_written != NULL && monitor->external.alarm.data.send.isconfigured ?
3538 (unsigned long)e_written->bytes : 0);
3539
3540 if (didattach)
3541 sockd_shmdt(packetrule, SHMEM_MONITOR);
3542 }
3543
3544 void
io_delete(mother,io,badfd,status)3545 io_delete(mother, io, badfd, status)
3546 int mother;
3547 sockd_io_t *io;
3548 int badfd;
3549 const iostatus_t status;
3550 {
3551 const char *function = "io_delete()";
3552 const int isreversed = (io->state.command == SOCKS_BINDREPLY ? 1 : 0);
3553 const int errno_s = errno;
3554 #if HAVE_GSSAPI
3555 OM_uint32 major_status, minor_status;
3556 #endif /* HAVE_GSSAPI */
3557 struct timeval tnow;
3558 rule_t *rulev[] = {
3559 &io->srule,
3560 #if HAVE_SOCKS_HOSTID
3561 io->hrule_isset ? &io->hrule : NULL,
3562 #endif /* HAVE_SOCKS_HOSTID */
3563 &io->crule,
3564 };
3565 clientinfo_t cinfo;
3566 size_t rulei;
3567 char buf[512], tcpinfo[MAXTCPINFOLEN];
3568 int command, protocol;
3569
3570 slog(LOG_DEBUG,
3571 "%s: command %s, bad-fd %d, controlfd %d, src-fd %d, dst-fd %d"
3572 #if HAVE_UDP_SUPPORT
3573 " dstc = %lu"
3574 #endif /* HAVE_UDP_SUPPORT */
3575 ,
3576 function,
3577 command2string(io->state.command),
3578 badfd,
3579 io->control.s,
3580 io->src.s,
3581 io->dst.s
3582 #if HAVE_UDP_SUPPORT
3583 ,
3584 (unsigned long)io->dst.dstc
3585 #endif /* HAVE_UDP_SUPPORT */
3586 );
3587
3588 SASSERTX( badfd == -1
3589 || badfd == io->src.s
3590 || badfd == io->control.s
3591 || badfd == io->dst.s);
3592
3593 SASSERTX(io->allocated);
3594
3595 gettimeofday_monotonic(&tnow);
3596
3597 #if SOCKS_SERVER
3598 /*
3599 * UDP in Dante's case needs some special handling here because each
3600 * udp client can have two target sockets, one for ipv4 and one for ipv6,
3601 * and we want to print both of them when logging the session-close, but
3602 * log the client/hostid-rule close only once. We use an ugly hack
3603 * for this involving dsti.
3604 */
3605 size_t dsti = 0;
3606 #endif /* SOCKS_SERVER */
3607
3608 /* only log the disconnect if the rule says so. */
3609 for (rulei = 0; rulei < ELEMENTS(rulev); ++rulei) {
3610 const rule_t *rule = rulev[rulei];
3611 sockshost_t a, b;
3612 uint64_t src_read, src_written, dst_read, dst_written;
3613 size_t bufused;
3614 char in[MAX_IOLOGADDR], out[MAX_IOLOGADDR],
3615 timeinfo[512],
3616 logmsg[sizeof(in) + sizeof(out) + 1024 + sizeof(timeinfo)];
3617
3618 if (rule == NULL)
3619 continue;
3620
3621 #if !HAVE_SOCKS_RULES
3622 if (rule->type == object_crule && !sockscf.option.debug)
3623 continue; /* normally inherited by the auto-created socks-rule. */
3624 #endif /* HAVE_SOCKS_RULES */
3625
3626 if (rule->log.disconnect
3627 || (rule->log.error && (status == IO_IOERROR)))
3628 /* LINTED */ /* EMPTY */;
3629 else
3630 continue;
3631
3632 protocol = io->state.protocol;
3633
3634 if (protocol == SOCKS_TCP && rule->log.tcpinfo) {
3635 const char *info;
3636 int fdv[] = { CLIENTIO(io)->s, EXTERNALIO(io)->s };
3637
3638 if ((info = get_tcpinfo(ELEMENTS(fdv), fdv, NULL, 0)) == NULL)
3639 *tcpinfo = NUL;
3640 else {
3641 #if DIAGNOSTIC
3642 if (strlen(info) >= sizeof(tcpinfo))
3643 SWARNX(strlen(info));
3644 #endif /* DIAGNOSTIC */
3645
3646 snprintf(tcpinfo, sizeof(tcpinfo),
3647 "\nTCP_INFO:\n"
3648 "%s",
3649 info);
3650 }
3651 }
3652 else
3653 *tcpinfo = NUL;
3654
3655 if (rule->type == object_srule) {
3656 build_addrstr_src(GET_HOSTIDV(&io->state),
3657 GET_HOSTIDC(&io->state),
3658 &io->src.host,
3659 NULL,
3660 NULL,
3661 sockaddr2sockshost(&io->src.laddr, NULL),
3662 &io->src.auth,
3663 NULL,
3664 in,
3665 sizeof(in));
3666
3667 switch (io->state.command) {
3668 case SOCKS_BIND:
3669 case SOCKS_BINDREPLY:
3670 case SOCKS_CONNECT:
3671 build_addrstr_dst(sockaddr2sockshost(&io->dst.laddr, &a),
3672 io->state.proxychain.proxyprotocol
3673 == PROXY_DIRECT ?
3674 NULL : sockaddr2sockshost(&io->dst.raddr, &b),
3675 io->state.proxychain.proxyprotocol
3676 == PROXY_DIRECT ?
3677 NULL : &io->state.proxychain.extaddr,
3678 io->state.proxychain.proxyprotocol
3679 == PROXY_DIRECT ?
3680 sockaddr2sockshost(&io->dst.raddr, NULL)
3681 : &io->dst.host,
3682 &io->dst.auth,
3683 NULL,
3684 (struct in_addr *)NULL,
3685 0,
3686 out,
3687 sizeof(out));
3688 break;
3689
3690 #if HAVE_UDP_SUPPORT
3691 case SOCKS_UDPASSOCIATE: {
3692 sockshost_t a, b;
3693
3694 if (io->dst.dstc == 0) /* no targets created for this session. */
3695 *out = NUL;
3696 else {
3697 udptarget_t *udptarget;
3698
3699 #if SOCKS_SERVER
3700 /*
3701 * Can have up to two targets. Make sure we log both.
3702 */
3703
3704 SASSERTX(dsti < io->dst.dstc);
3705 udptarget = &io->dst.dstv[dsti];
3706
3707 #else /* BAREFOOTD */
3708
3709 SASSERTX(io->dst.s != -1);
3710
3711 udptarget = clientofsocket(io->dst.s,
3712 io->dst.dstc,
3713 io->dst.dstv);
3714
3715 SASSERTX(udptarget != NULL);
3716
3717 #endif /* BAREFOOTD */
3718
3719 io_syncudp(io, udptarget);
3720
3721 build_addrstr_dst(sockaddr2sockshost(&io->dst.laddr, &a),
3722 io->state.proxychain.proxyprotocol
3723 == PROXY_DIRECT ?
3724 NULL : sockaddr2sockshost(&io->dst.raddr, &b),
3725 io->state.proxychain.proxyprotocol
3726 == PROXY_DIRECT ?
3727 NULL : &io->state.proxychain.extaddr,
3728 io->dst.state.isconnected ?
3729 sockaddr2sockshost(&io->dst.raddr, &b) : NULL,
3730 &io->dst.auth,
3731 NULL,
3732 NULL,
3733 0,
3734 out,
3735 sizeof(out));
3736
3737 #if SOCKS_SERVER
3738 if (++dsti < io->dst.dstc) {
3739 /*
3740 * re-log using the same rule but with next dsti.
3741 */
3742 --rulei;
3743 }
3744 #endif /* SOCKS_SERVER */
3745
3746 }
3747 break;
3748 }
3749 #endif /* HAVE_UDP_SUPPORT */
3750
3751 default:
3752 SERRX(io->state.command);
3753 }
3754
3755 command = io->state.command;
3756 }
3757 else {
3758 /*
3759 * XXX if support for server chaining is added to bind, the
3760 * bindreply might involve a proxy on the src side.
3761 */
3762
3763 #if !HAVE_SOCKS_RULES
3764 /*
3765 * we don't want to log the crule close of a udp session,
3766 * but only the individual clients closing the srule.
3767 */
3768 if (protocol == SOCKS_UDP)
3769 continue;
3770 #endif /* !HAVE_SOCKS_RULES. */
3771
3772 build_addrstr_src(GET_HOSTIDV(&io->state),
3773 GET_HOSTIDC(&io->state),
3774 &CONTROLIO(io)->host,
3775 NULL,
3776 NULL,
3777 sockaddr2sockshost(&CONTROLIO(io)->laddr, NULL),
3778 &io->cauth,
3779 NULL,
3780 in,
3781 sizeof(in));
3782
3783 #if HAVE_SOCKS_RULES
3784 *out = NUL; /* client-rule is from client to socks-server, and stop. */
3785
3786 #else /* !HAVE_SOCKS_RULES; destination address is know upon accepting client.*/
3787 build_addrstr_dst(NULL, /* now known, but was not upon accepting. */
3788 NULL,
3789 NULL,
3790 &io->dst.host,
3791 &io->dst.auth,
3792 NULL,
3793 (struct in_addr *)NULL,
3794 0,
3795 out,
3796 sizeof(out));
3797 #endif /* HAVE_SOCKS_RULES */
3798
3799 SASSERTX(rule->type != object_srule);
3800
3801 #if HAVE_SOCKS_RULES
3802 command = (rule->type == object_crule ? SOCKS_ACCEPT: SOCKS_HOSTID);
3803 protocol = SOCKS_TCP; /* always tcp before socks-rules. */
3804
3805 #else /* !HAVE_SOCKS_RULES */
3806 switch (rule->type) {
3807 case object_crule:
3808 if (protocol == SOCKS_TCP)
3809 command = SOCKS_ACCEPT;
3810 else
3811 command = SOCKS_UDPASSOCIATE;
3812 break;
3813
3814 #if HAVE_SOCKS_HOSTID
3815 case object_hrule:
3816 command = SOCKS_HOSTID;
3817 break;
3818 #endif /* HAVE_SOCKS_HOSTID */
3819
3820 default:
3821 SERRX(rule->type);
3822 }
3823
3824 #endif /* !HAVE_SOCKS_RULES */
3825 }
3826
3827 bufused = snprintf(logmsg, sizeof(logmsg), "%s(%lu): %s/%s ]: ",
3828 rule->verdict == VERDICT_PASS ?
3829 VERDICT_PASSs : VERDICT_BLOCKs,
3830 #if !HAVE_SOCKS_RULES
3831 /* use the number from the user-created rule. */
3832 io->state.protocol == SOCKS_UDP ?
3833 (unsigned long)io->crule.number
3834 : (unsigned long)rule->number,
3835 #else /* HAVE_SOCKS_RULES */
3836 (unsigned long)rule->number,
3837 #endif /* HAVE_SOCKS_RULES */
3838 protocol2string(protocol),
3839 command2string(command));
3840
3841 src_read = io->src.read.bytes;
3842 src_written = io->src.written.bytes;
3843 dst_read = io->dst.read.bytes;
3844 dst_written = io->dst.written.bytes;
3845
3846 if (protocol == SOCKS_TCP) {
3847 if (*out == NUL) {
3848 bufused += snprintf(&logmsg[bufused], sizeof(logmsg) - bufused,
3849 "%"PRIu64" -> %s -> %"PRIu64"",
3850 (isreversed ? dst_written : src_written),
3851 in,
3852 (isreversed ? src_written : dst_written));
3853 }
3854 else
3855 bufused += snprintf(&logmsg[bufused], sizeof(logmsg) - bufused,
3856 "%"PRIu64" -> %s -> %"PRIu64", "
3857 "%"PRIu64" -> %s -> %"PRIu64"",
3858 src_written, in, src_read,
3859 dst_written, out, dst_read);
3860 }
3861 else {
3862 SASSERTX(protocol == SOCKS_UDP);
3863
3864 if (rule->type == object_srule) {
3865 bufused +=
3866 snprintf(&logmsg[bufused], sizeof(logmsg) - bufused,
3867 "%"PRIu64"/%"PRIu64" -> %s -> %"PRIu64"/%"PRIu64"",
3868 src_written,
3869 io->src.written.packets,
3870 in,
3871 src_read,
3872 io->src.read.packets);
3873
3874
3875 if (*out != NUL) { /* have a target address also. */
3876 bufused +=
3877 snprintf(&logmsg[bufused], sizeof(logmsg) - bufused,
3878 ", %"PRIu64"/%"PRIu64" -> %s -> %"PRIu64"/%"PRIu64"",
3879 dst_written,
3880 io->dst.written.packets,
3881 out,
3882 dst_read,
3883 io->dst.read.packets);
3884 }
3885 }
3886 else
3887 SASSERTX(*out == NUL);
3888 }
3889
3890 bufused = snprintf(timeinfo, sizeof(timeinfo),
3891 "Session duration: %lds",
3892 (long)( tnow.tv_sec
3893 - io->state.time.accepted.tv_sec));
3894
3895 /*
3896 * XXX probably better to add another log-option, "stats" or similar,
3897 * that can be used to log some extra information, including this
3898 * and buffer-usage perhaps?
3899 */
3900
3901 if (sockscf.option.debug
3902 #if BAREFOOTD
3903 && protocol == SOCKS_TCP
3904 #endif /* BAREFOOTD */
3905 ) {
3906 struct timeval accept2neg, negstart2negfinish, sessionduration;
3907 char established2io_str[16], negfinish2established_str[16];
3908
3909 timersub(&io->state.time.negotiatestart,
3910 &io->state.time.accepted,
3911 &accept2neg);
3912
3913 timersub(&io->state.time.negotiateend,
3914 &io->state.time.negotiatestart,
3915 &negstart2negfinish);
3916
3917 if (io->state.time.established.tv_sec == 0)
3918 STRCPY_ASSERTSIZE(negfinish2established_str, "N/A");
3919 else {
3920 struct timeval tdiff;
3921
3922 timersub(&io->state.time.established,
3923 &io->state.time.negotiateend,
3924 &tdiff);
3925
3926 snprintf(negfinish2established_str, sizeof(established2io_str),
3927 "%ld.%06lds", (long)tdiff.tv_sec, (long)tdiff.tv_usec);
3928 }
3929
3930 if (io->state.time.firstio.tv_sec == 0)
3931 STRCPY_ASSERTSIZE(established2io_str, "N/A");
3932 else {
3933 struct timeval tdiff;
3934
3935 timersub(&io->state.time.firstio, &io->state.time.established,
3936 &tdiff);
3937
3938 snprintf(established2io_str, sizeof(established2io_str),
3939 "%ld.%06lds", (long)tdiff.tv_sec, (long)tdiff.tv_usec);
3940 }
3941
3942 timersub(&tnow, &io->state.time.accepted, &sessionduration);
3943
3944 bufused += snprintf(&timeinfo[bufused], sizeof(timeinfo) - bufused,
3945 "\n"
3946 "accept to negotiate start : %ld.%06lds\n"
3947 "negotiate duration : %ld.%06lds\n"
3948 "negotiate finish to established : %s\n"
3949 "session establish to first i/o : %s\n"
3950 "total session duration : %ld.%06lds\n",
3951 (long)accept2neg.tv_sec,
3952 (long)accept2neg.tv_usec,
3953 (long)negstart2negfinish.tv_sec,
3954 (long)negstart2negfinish.tv_usec,
3955 negfinish2established_str,
3956 established2io_str,
3957 (long)sessionduration.tv_sec,
3958 (long)sessionduration.tv_usec);
3959 }
3960
3961 errno = errno_s;
3962 switch (status) {
3963 case IO_BLOCK:
3964 slog(LOG_INFO, "%s: blocked. %s%s", logmsg, timeinfo, tcpinfo);
3965 break;
3966
3967 case IO_IOERROR:
3968 case IO_ERROR:
3969 if (errno != 0)
3970 snprintf(buf, sizeof(buf), " (%s)", strerror(errno));
3971 else
3972 *buf = NUL;
3973
3974 slog(LOG_INFO, "%s: %s error%s. %s%s",
3975 logmsg,
3976 badfd < 0 ? "session"
3977 : (badfd == io->dst.s && !isreversed) ?
3978 "remote peer" : "local client",
3979 buf,
3980 timeinfo,
3981 tcpinfo);
3982
3983 if (badfd >= 0 && (ERRNOISRST(errno))) {
3984 if (io->dst.s != -1 && badfd == io->dst.s)
3985 sockd_rstonclose(io->src.s);
3986 else if (badfd == io->src.s && io->dst.s != -1)
3987 sockd_rstonclose(io->dst.s);
3988 }
3989 break;
3990
3991 case IO_CLOSE:
3992 slog(LOG_INFO, "%s: %s closed. %s%s",
3993 logmsg,
3994 badfd < 0 ? "session" : badfd == io->dst.s ?
3995 "remote peer" : "local client",
3996 timeinfo,
3997 tcpinfo);
3998 break;
3999
4000 case IO_TIMEOUT: {
4001 const char *timeoutinfo = NULL;
4002 timeouttype_t timeouttype;
4003 time_t timeuntiltimeout;
4004
4005 timeuntiltimeout = io_timeuntiltimeout(io, &tnow, &timeouttype, 0);
4006 SASSERTX(timeuntiltimeout <= 0);
4007 SASSERTX(timeouttype != TIMEOUT_NOTSET);
4008
4009 if (timeouttype == TIMEOUT_TCP_FIN_WAIT) {
4010 SASSERTX(io->src.state.fin_received
4011 || io->dst.state.fin_received);
4012
4013 if (io->dst.state.fin_received)
4014 timeoutinfo = " (waiting for client to close)";
4015 else
4016 timeoutinfo = " (waiting for remote peer to close)";
4017
4018 }
4019
4020 slog(LOG_INFO, "%s: %s%s. %s%s",
4021 logmsg,
4022 timeouttype2string(timeouttype),
4023 timeoutinfo == NULL ? "" : timeoutinfo,
4024 timeinfo,
4025 tcpinfo);
4026
4027 break;
4028 }
4029
4030 case IO_ADMINTERMINATION:
4031 slog(LOG_INFO, "%s: administrative termination. %s%s",
4032 logmsg, timeinfo, tcpinfo);
4033 break;
4034
4035 default:
4036 SERRX(status);
4037 }
4038
4039 #if SOCKS_SERVER
4040 if (io->state.command == SOCKS_BINDREPLY && rule->type == object_srule) {
4041 /*
4042 * log the close of the open'd bind session also.
4043 */
4044 const int original_command = io->state.command;
4045 iologaddr_t src, dst;
4046
4047 init_iologaddr(&src,
4048 object_sockaddr,
4049 &io->src.laddr,
4050 object_sockshost,
4051 io->state.extension.bind ? NULL : &io->cmd.bind.host,
4052 &io->src.auth,
4053 GET_HOSTIDV(&io->state),
4054 GET_HOSTIDC(&io->state));
4055
4056 init_iologaddr(&dst,
4057 object_sockaddr,
4058 &io->dst.laddr,
4059 object_sockaddr,
4060 &io->dst.raddr,
4061 &io->dst.auth,
4062 NULL,
4063 0);
4064
4065 io->state.command = SOCKS_BIND;
4066 /*
4067 * The bindreply src/dst order is reversed compared to that of the
4068 * bind as the src for bindreply is the client that connects to the
4069 * address bound.
4070 */
4071 iolog(&io->cmd.bind.rule,
4072 &io->state,
4073 OPERATION_DISCONNECT,
4074 &dst,
4075 &src,
4076 NULL,
4077 NULL,
4078 tcpinfo,
4079 strlen(tcpinfo));
4080
4081 io->state.command = original_command;
4082 }
4083 #endif /* SOCKS_SERVER */
4084 }
4085
4086 #if HAVE_GSSAPI
4087 if (io->src.auth.method == AUTHMETHOD_GSSAPI) {
4088 if ((major_status
4089 = gss_delete_sec_context(&minor_status,
4090 &io->src.auth.mdata.gssapi.state.id,
4091 GSS_C_NO_BUFFER)) != GSS_S_COMPLETE) {
4092 if (!gss_err_isset(major_status, minor_status, buf, sizeof(buf)))
4093 *buf = NUL;
4094
4095 swarnx("%s: gss_delete_sec_context() of src failed%s%s",
4096 function,
4097 *buf == NUL ? "" : ": ",
4098 *buf == NUL ? "" : buf);
4099 }
4100 }
4101
4102 if (io->dst.auth.method == AUTHMETHOD_GSSAPI) {
4103 if ((major_status
4104 = gss_delete_sec_context(&minor_status,
4105 &io->dst.auth.mdata.gssapi.state.id,
4106 GSS_C_NO_BUFFER)) != GSS_S_COMPLETE) {
4107 if (!gss_err_isset(major_status, minor_status, buf, sizeof(buf)))
4108 *buf = NUL;
4109
4110 swarnx("%s: gss_delete_sec_context() of dst failed%s%s",
4111 function,
4112 *buf == NUL ? "" : ": ",
4113 *buf == NUL ? "" : buf);
4114 }
4115 }
4116 #endif /* HAVE_GSSAPI */
4117
4118 #if BAREFOOTD
4119 if (io->state.protocol == SOCKS_UDP) {
4120 /*
4121 * The io itself is normally not freed in the udp-case, as we can
4122 * always get new clients; only this one client is removed.
4123 */
4124
4125 removeclient(io->dst.s, &io->dst.dstc, io->dst.dstv);
4126 socks_freebuffer(io->dst.s);
4127 io->dst.s = -1;
4128
4129 return;
4130 }
4131 #endif /* BAREFOOTD */
4132
4133 freebuffers(io);
4134 io_add_alarmdisconnects(io, "session delete");
4135 close_iodescriptors(io);
4136
4137 if (mother != -1) {
4138 const char info = (io->state.command == SOCKS_UDPASSOCIATE ?
4139 SOCKD_FREESLOT_UDP : SOCKD_FREESLOT_TCP);
4140
4141 /* ack io slot free. */
4142 if (socks_sendton(mother,
4143 &info,
4144 sizeof(info),
4145 sizeof(info),
4146 0,
4147 NULL,
4148 0,
4149 NULL,
4150 NULL) != sizeof(info))
4151 slog(sockd_motherexists() ? LOG_WARNING : LOG_DEBUG,
4152 "%s: sending ack to mother failed: %s",
4153 function, strerror(errno));
4154 }
4155
4156 if (io->state.command == SOCKS_CONNECT)
4157 if (io_fillset_connectinprogress(NULL) == -1)
4158 iostate.haveconnectinprogress = 0;
4159
4160 HOSTIDCOPY(&io->state, &cinfo);
4161
4162 log_ruleinfo_shmid(CRULE_OR_HRULE(io), function, "before SHMEM_UNUSE()");
4163
4164 if (io->srule.type == object_none) {
4165 SASSERTX(io->state.protocol == SOCKS_UDP);
4166 SASSERTX(!HAVE_SOCKS_RULES);
4167 }
4168 else
4169 log_ruleinfo_shmid(&io->srule, function, "before SHMEM_UNUSE()");
4170
4171 SASSERTX(!(SHMID_ISSET(CRULE_OR_HRULE(io)) && SHMID_ISSET(&io->srule)));
4172
4173 cinfo.from = CONTROLIO(io)->raddr;
4174
4175 #if HAVE_CONTROL_CONNECTION
4176
4177 SHMEM_UNUSE(SHMEMRULE(io), &cinfo, sockscf.shmemfd, SHMEM_ALL);
4178
4179 #else /* !HAVE_CONTROL_CONNECTION */
4180
4181 if (io->state.protocol == SOCKS_TCP) /* UDP is free'd by removeclient(). */
4182 SHMEM_UNUSE(SHMEMRULE(io), &cinfo, sockscf.shmemfd, SHMEM_ALL);
4183
4184 #endif /* !HAVE_CONTROL_CONNECTION */
4185
4186 #if SOCKS_SERVER
4187 bzero(io->dst.dstv, sizeof(*io->dst.dstv) * io->dst.dstc);
4188 #endif /* SOCKS_SERVER */
4189
4190 bzero(io, sizeof(*io));
4191
4192 proctitleupdate();
4193 }
4194
4195
4196 static int
io_connectisinprogress(io)4197 io_connectisinprogress(io)
4198 const sockd_io_t *io;
4199 {
4200 if (io->allocated
4201 && io->state.command == SOCKS_CONNECT
4202 && !io->dst.state.isconnected)
4203 return 1;
4204 else
4205 return 0;
4206 }
4207
4208
4209 #if COVENANT
4210
4211 int
recv_resentclient(s,client)4212 recv_resentclient(s, client)
4213 int s;
4214 sockd_client_t *client;
4215 {
4216 const char *function = "recv_resentclient()";
4217 struct iovec iov[2];
4218 struct msghdr msg;
4219 int ioc, fdexpect, fdreceived, r;
4220 CMSG_AALLOC(cmsg, sizeof(int));
4221
4222 ioc = 0;
4223 bzero(iov, sizeof(iov));
4224 iov[ioc].iov_base = client;
4225 iov[ioc].iov_len = sizeof(*client);
4226 ++ioc;
4227
4228 bzero(&msg, sizeof(msg));
4229 msg.msg_iov = iov;
4230 msg.msg_iovlen = ioc;
4231 msg.msg_name = NULL;
4232 msg.msg_namelen = 0;
4233
4234 /* LINTED pointer casts may be troublesome */
4235 CMSG_SETHDR_RECV(msg, cmsg, CMSG_MEMSIZE(cmsg));
4236
4237 if ((r = recvmsgn(s, &msg, 0)) < (ssize_t)sizeof(*client)) {
4238 switch (r) {
4239 case -1:
4240 swarn("%s: recvmsg() failed", function);
4241 break;
4242
4243 case 0:
4244 slog(LOG_DEBUG, "%s: recvmsg(): other side closed connection",
4245 function);
4246 break;
4247
4248 default:
4249 swarnx("%s: recvmsg(): unexpected short read: %d/%ld",
4250 function, r, (long)sizeof(*client));
4251 }
4252
4253 return -1;
4254 }
4255
4256 if (socks_msghaserrors(function, &msg))
4257 return -1;
4258
4259 r -= sizeof(*client);
4260 fdexpect = 1; /* client. */
4261
4262 if (!CMSG_RCPTLEN_ISOK(msg, sizeof(int) * fdexpect)) {
4263 swarnx("%s: received control message has the invalid len of %d",
4264 function, (int)CMSG_TOTLEN(msg));
4265
4266 return -1;
4267 }
4268
4269 SASSERTX(cmsg->cmsg_level == SOL_SOCKET);
4270 SASSERTX(cmsg->cmsg_type == SCM_RIGHTS);
4271
4272 fdreceived = 0;
4273 if (fdexpect > 0) {
4274 CMSG_GETOBJECT(client->s, cmsg, sizeof(client->s) * fdreceived++);
4275
4276 if (sockscf.option.debug >= DEBUG_VERBOSE)
4277 slog(LOG_DEBUG, "%s: received fd %d (%s) ...",
4278 function, client->s, socket2string(client->s, NULL, 0));
4279 }
4280
4281 if (sockscf.option.debug >= DEBUG_VERBOSE)
4282 slog(LOG_DEBUG, "%s: received %d descriptors for client",
4283 function, fdreceived);
4284
4285 return 0;
4286 }
4287
4288 #endif /* COVENANT */
4289