1 /*
2  * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 05    Socket Functions */
10 
11 #include "squid.h"
12 #include "ClientInfo.h"
13 #include "comm/AcceptLimiter.h"
14 #include "comm/comm_internal.h"
15 #include "comm/Connection.h"
16 #include "comm/IoCallback.h"
17 #include "comm/Loops.h"
18 #include "comm/Read.h"
19 #include "comm/TcpAcceptor.h"
20 #include "comm/Write.h"
21 #include "CommRead.h"
22 #include "compat/cmsg.h"
23 #include "DescriptorSet.h"
24 #include "event.h"
25 #include "fd.h"
26 #include "fde.h"
27 #include "globals.h"
28 #include "icmp/net_db.h"
29 #include "ip/Intercept.h"
30 #include "ip/QosConfig.h"
31 #include "ip/tools.h"
32 #include "pconn.h"
33 #include "profiler/Profiler.h"
34 #include "sbuf/SBuf.h"
35 #include "SquidConfig.h"
36 #include "StatCounters.h"
37 #include "StoreIOBuffer.h"
38 #include "tools.h"
39 
40 #if USE_OPENSSL
41 #include "ssl/support.h"
42 #endif
43 
44 #include <cerrno>
45 #include <cmath>
46 #if _SQUID_CYGWIN_
47 #include <sys/ioctl.h>
48 #endif
49 #ifdef HAVE_NETINET_TCP_H
50 #include <netinet/tcp.h>
51 #endif
52 #if HAVE_SYS_UN_H
53 #include <sys/un.h>
54 #endif
55 
56 /*
57  * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
58  */
59 
60 static IOCB commHalfClosedReader;
61 static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
62 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
63 
64 #if USE_DELAY_POOLS
65 CBDATA_CLASS_INIT(CommQuotaQueue);
66 
67 static void commHandleWriteHelper(void * data);
68 #endif
69 
70 /* STATIC */
71 
72 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
73 static bool WillCheckHalfClosed = false; /// true if check is scheduled
74 static EVH commHalfClosedCheck;
75 static void commPlanHalfClosedCheck();
76 
77 static Comm::Flag commBind(int s, struct addrinfo &);
78 static void commSetReuseAddr(int);
79 static void commSetNoLinger(int);
80 #ifdef TCP_NODELAY
81 static void commSetTcpNoDelay(int);
82 #endif
83 static void commSetTcpRcvbuf(int, int);
84 
85 fd_debug_t *fdd_table = NULL;
86 
87 bool
isOpen(const int fd)88 isOpen(const int fd)
89 {
90     return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
91 }
92 
93 /**
94  * Empty the read buffers
95  *
96  * This is a magical routine that empties the read buffers.
97  * Under some platforms (Linux) if a buffer has data in it before
98  * you call close(), the socket will hang and take quite a while
99  * to timeout.
100  */
101 static void
comm_empty_os_read_buffers(int fd)102 comm_empty_os_read_buffers(int fd)
103 {
104 #if _SQUID_LINUX_
105 #if USE_OPENSSL
106     // Bug 4146: SSL-Bump BIO does not release sockets on close.
107     if (fd_table[fd].ssl)
108         return;
109 #endif
110 
111     /* prevent those nasty RST packets */
112     char buf[SQUID_TCP_SO_RCVBUF];
113     if (fd_table[fd].flags.nonblocking && fd_table[fd].type != FD_MSGHDR) {
114         while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
115     }
116 #endif
117 }
118 
119 /**
120  * synchronous wrapper around udp socket functions
121  */
122 int
comm_udp_recvfrom(int fd,void * buf,size_t len,int flags,Ip::Address & from)123 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
124 {
125     ++ statCounter.syscalls.sock.recvfroms;
126     debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
127     struct addrinfo *AI = NULL;
128     Ip::Address::InitAddr(AI);
129     int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
130     from = *AI;
131     Ip::Address::FreeAddr(AI);
132     return x;
133 }
134 
135 int
comm_udp_recv(int fd,void * buf,size_t len,int flags)136 comm_udp_recv(int fd, void *buf, size_t len, int flags)
137 {
138     Ip::Address nul;
139     return comm_udp_recvfrom(fd, buf, len, flags, nul);
140 }
141 
142 ssize_t
comm_udp_send(int s,const void * buf,size_t len,int flags)143 comm_udp_send(int s, const void *buf, size_t len, int flags)
144 {
145     return send(s, buf, len, flags);
146 }
147 
148 bool
comm_has_incomplete_write(int fd)149 comm_has_incomplete_write(int fd)
150 {
151     assert(isOpen(fd) && COMMIO_FD_WRITECB(fd) != NULL);
152     return COMMIO_FD_WRITECB(fd)->active();
153 }
154 
155 /**
156  * Queue a write. handler/handler_data are called when the write fully
157  * completes, on error, or on file descriptor close.
158  */
159 
160 /* Return the local port associated with fd. */
161 unsigned short
comm_local_port(int fd)162 comm_local_port(int fd)
163 {
164     Ip::Address temp;
165     struct addrinfo *addr = NULL;
166     fde *F = &fd_table[fd];
167 
168     /* If the fd is closed already, just return */
169 
170     if (!F->flags.open) {
171         debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
172         return 0;
173     }
174 
175     if (F->local_addr.port())
176         return F->local_addr.port();
177 
178     if (F->sock_family == AF_INET)
179         temp.setIPv4();
180 
181     Ip::Address::InitAddr(addr);
182 
183     if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
184         int xerrno = errno;
185         debugs(50, DBG_IMPORTANT, MYNAME << "Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerr(xerrno));
186         Ip::Address::FreeAddr(addr);
187         return 0;
188     }
189     temp = *addr;
190 
191     Ip::Address::FreeAddr(addr);
192 
193     if (F->local_addr.isAnyAddr()) {
194         /* save the whole local address, not just the port. */
195         F->local_addr = temp;
196     } else {
197         F->local_addr.port(temp.port());
198     }
199 
200     debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
201     return F->local_addr.port();
202 }
203 
204 static Comm::Flag
commBind(int s,struct addrinfo & inaddr)205 commBind(int s, struct addrinfo &inaddr)
206 {
207     ++ statCounter.syscalls.sock.binds;
208 
209     if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
210         debugs(50, 6, "bind socket FD " << s << " to " << fd_table[s].local_addr);
211         return Comm::OK;
212     }
213     int xerrno = errno;
214     debugs(50, DBG_CRITICAL, MYNAME << "Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerr(xerrno));
215 
216     return Comm::COMM_ERROR;
217 }
218 
219 /**
220  * Create a socket. Default is blocking, stream (TCP) socket.  IO_TYPE
221  * is OR of flags specified in comm.h. Defaults TOS
222  */
223 int
comm_open(int sock_type,int proto,Ip::Address & addr,int flags,const char * note)224 comm_open(int sock_type,
225           int proto,
226           Ip::Address &addr,
227           int flags,
228           const char *note)
229 {
230     return comm_openex(sock_type, proto, addr, flags, note);
231 }
232 
233 void
comm_open_listener(int sock_type,int proto,Comm::ConnectionPointer & conn,const char * note)234 comm_open_listener(int sock_type,
235                    int proto,
236                    Comm::ConnectionPointer &conn,
237                    const char *note)
238 {
239     /* all listener sockets require bind() */
240     conn->flags |= COMM_DOBIND;
241 
242     /* attempt native enabled port. */
243     conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
244 }
245 
246 int
comm_open_listener(int sock_type,int proto,Ip::Address & addr,int flags,const char * note)247 comm_open_listener(int sock_type,
248                    int proto,
249                    Ip::Address &addr,
250                    int flags,
251                    const char *note)
252 {
253     int sock = -1;
254 
255     /* all listener sockets require bind() */
256     flags |= COMM_DOBIND;
257 
258     /* attempt native enabled port. */
259     sock = comm_openex(sock_type, proto, addr, flags, note);
260 
261     return sock;
262 }
263 
264 static bool
limitError(int const anErrno)265 limitError(int const anErrno)
266 {
267     return anErrno == ENFILE || anErrno == EMFILE;
268 }
269 
270 void
comm_set_v6only(int fd,int tos)271 comm_set_v6only(int fd, int tos)
272 {
273 #ifdef IPV6_V6ONLY
274     if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
275         int xerrno = errno;
276         debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerr(xerrno));
277     }
278 #else
279     debugs(50, DBG_CRITICAL, MYNAME << "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
280 #endif /* sockopt */
281 }
282 
283 /**
284  * Set the socket option required for TPROXY spoofing for:
285  * - Linux TPROXY v4 support,
286  * - OpenBSD divert-to support,
287  * - FreeBSD IPFW TPROXY v4 support.
288  */
289 void
comm_set_transparent(int fd)290 comm_set_transparent(int fd)
291 {
292 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
293 # define soLevel SOL_IP
294 # define soFlag  IP_TRANSPARENT
295     bool doneSuid = false;
296 
297 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
298 # define soLevel SOL_SOCKET
299 # define soFlag  SO_BINDANY
300     enter_suid();
301     bool doneSuid = true;
302 
303 #elif defined(IP_BINDANY) // FreeBSD with IPFW
304 # define soLevel IPPROTO_IP
305 # define soFlag  IP_BINDANY
306     enter_suid();
307     bool doneSuid = true;
308 
309 #else
310     debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
311 #endif /* sockopt */
312 
313 #if defined(soLevel) && defined(soFlag)
314     int tos = 1;
315     if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
316         int xerrno = errno;
317         debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(TPROXY) on FD " << fd << ": " << xstrerr(xerrno));
318     } else {
319         /* mark the socket as having transparent options */
320         fd_table[fd].flags.transparent = true;
321     }
322     if (doneSuid)
323         leave_suid();
324 #endif
325 }
326 
327 /**
328  * Create a socket. Default is blocking, stream (TCP) socket.  IO_TYPE
329  * is OR of flags specified in defines.h:COMM_*
330  */
331 int
comm_openex(int sock_type,int proto,Ip::Address & addr,int flags,const char * note)332 comm_openex(int sock_type,
333             int proto,
334             Ip::Address &addr,
335             int flags,
336             const char *note)
337 {
338     int new_socket;
339     struct addrinfo *AI = NULL;
340 
341     PROF_start(comm_open);
342     /* Create socket for accepting new connections. */
343     ++ statCounter.syscalls.sock.sockets;
344 
345     /* Setup the socket addrinfo details for use */
346     addr.getAddrInfo(AI);
347     AI->ai_socktype = sock_type;
348     AI->ai_protocol = proto;
349 
350     debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
351 
352     new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
353     int xerrno = errno;
354 
355     /* under IPv6 there is the possibility IPv6 is present but disabled. */
356     /* try again as IPv4-native if possible */
357     if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
358         /* attempt to open this IPv4-only. */
359         Ip::Address::FreeAddr(AI);
360         /* Setup the socket addrinfo details for use */
361         addr.getAddrInfo(AI);
362         AI->ai_socktype = sock_type;
363         AI->ai_protocol = proto;
364         debugs(50, 3, "Attempt fallback open socket for: " << addr );
365         new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
366         debugs(50, 2, "attempt open " << note << " socket on: " << addr);
367     }
368 
369     if (new_socket < 0) {
370         /* Increase the number of reserved fd's if calls to socket()
371          * are failing because the open file table is full.  This
372          * limits the number of simultaneous clients */
373 
374         if (limitError(errno)) {
375             debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
376             fdAdjustReserved();
377         } else {
378             debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
379         }
380 
381         Ip::Address::FreeAddr(AI);
382 
383         PROF_stop(comm_open);
384         errno = xerrno; // restore for caller
385         return -1;
386     }
387 
388     // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
389     Comm::ConnectionPointer conn = new Comm::Connection;
390     conn->local = addr;
391     conn->fd = new_socket;
392 
393     debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
394 
395     if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
396         comm_set_v6only(conn->fd, 1);
397 
398     /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
399     /* Other OS may have this administratively disabled for general use. Same deal. */
400     if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
401         comm_set_v6only(conn->fd, 0);
402 
403     comm_init_opened(conn, note, AI);
404     new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
405 
406     Ip::Address::FreeAddr(AI);
407 
408     PROF_stop(comm_open);
409 
410     // XXX transition only. prevent conn from closing the new FD on function exit.
411     conn->fd = -1;
412     errno = xerrno; // restore for caller
413     return new_socket;
414 }
415 
416 /// update FD tables after a local or remote (IPC) comm_openex();
417 void
comm_init_opened(const Comm::ConnectionPointer & conn,const char * note,struct addrinfo * AI)418 comm_init_opened(const Comm::ConnectionPointer &conn,
419                  const char *note,
420                  struct addrinfo *AI)
421 {
422     assert(Comm::IsConnOpen(conn));
423     assert(AI);
424 
425     /* update fdstat */
426     debugs(5, 5, HERE << conn << " is a new socket");
427 
428     assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
429     fd_open(conn->fd, FD_SOCKET, note);
430 
431     fdd_table[conn->fd].close_file = NULL;
432     fdd_table[conn->fd].close_line = 0;
433 
434     fde *F = &fd_table[conn->fd];
435     F->local_addr = conn->local;
436 
437     F->sock_family = AI->ai_family;
438 }
439 
440 /// apply flags after a local comm_open*() call;
441 /// returns new_socket or -1 on error
442 static int
comm_apply_flags(int new_socket,Ip::Address & addr,int flags,struct addrinfo * AI)443 comm_apply_flags(int new_socket,
444                  Ip::Address &addr,
445                  int flags,
446                  struct addrinfo *AI)
447 {
448     assert(new_socket >= 0);
449     assert(AI);
450     const int sock_type = AI->ai_socktype;
451 
452     if (!(flags & COMM_NOCLOEXEC))
453         commSetCloseOnExec(new_socket);
454 
455     if ((flags & COMM_REUSEADDR))
456         commSetReuseAddr(new_socket);
457 
458     if (addr.port() > (unsigned short) 0) {
459 #if _SQUID_WINDOWS_
460         if (sock_type != SOCK_DGRAM)
461 #endif
462             commSetNoLinger(new_socket);
463 
464         if (opt_reuseaddr)
465             commSetReuseAddr(new_socket);
466     }
467 
468     /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
469     if ((flags & COMM_TRANSPARENT)) {
470         comm_set_transparent(new_socket);
471     }
472 
473     if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
474         if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
475             debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
476         if ( addr.isNoAddr() )
477             debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
478 
479         if (commBind(new_socket, *AI) != Comm::OK) {
480             comm_close(new_socket);
481             return -1;
482         }
483     }
484 
485     if (flags & COMM_NONBLOCKING)
486         if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
487             comm_close(new_socket);
488             return -1;
489         }
490 
491 #ifdef TCP_NODELAY
492     if (sock_type == SOCK_STREAM)
493         commSetTcpNoDelay(new_socket);
494 
495 #endif
496 
497     if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
498         commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
499 
500     return new_socket;
501 }
502 
503 void
comm_import_opened(const Comm::ConnectionPointer & conn,const char * note,struct addrinfo * AI)504 comm_import_opened(const Comm::ConnectionPointer &conn,
505                    const char *note,
506                    struct addrinfo *AI)
507 {
508     debugs(5, 2, HERE << conn);
509     assert(Comm::IsConnOpen(conn));
510     assert(AI);
511 
512     comm_init_opened(conn, note, AI);
513 
514     if (!(conn->flags & COMM_NOCLOEXEC))
515         fd_table[conn->fd].flags.close_on_exec = true;
516 
517     if (conn->local.port() > (unsigned short) 0) {
518 #if _SQUID_WINDOWS_
519         if (AI->ai_socktype != SOCK_DGRAM)
520 #endif
521             fd_table[conn->fd].flags.nolinger = true;
522     }
523 
524     if ((conn->flags & COMM_TRANSPARENT))
525         fd_table[conn->fd].flags.transparent = true;
526 
527     if (conn->flags & COMM_NONBLOCKING)
528         fd_table[conn->fd].flags.nonblocking = true;
529 
530 #ifdef TCP_NODELAY
531     if (AI->ai_socktype == SOCK_STREAM)
532         fd_table[conn->fd].flags.nodelay = true;
533 #endif
534 
535     /* no fd_table[fd].flags. updates needed for these conditions:
536      * if ((flags & COMM_REUSEADDR)) ...
537      * if ((flags & COMM_DOBIND) ...) ...
538      */
539 }
540 
541 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
542 // With handler already unset. Leaving this present until that can be verified for all code paths.
543 void
commUnsetFdTimeout(int fd)544 commUnsetFdTimeout(int fd)
545 {
546     debugs(5, 3, HERE << "Remove timeout for FD " << fd);
547     assert(fd >= 0);
548     assert(fd < Squid_MaxFD);
549     fde *F = &fd_table[fd];
550     assert(F->flags.open);
551 
552     F->timeoutHandler = NULL;
553     F->timeout = 0;
554 }
555 
556 int
commSetConnTimeout(const Comm::ConnectionPointer & conn,int timeout,AsyncCall::Pointer & callback)557 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
558 {
559     debugs(5, 3, HERE << conn << " timeout " << timeout);
560     assert(Comm::IsConnOpen(conn));
561     assert(conn->fd < Squid_MaxFD);
562     fde *F = &fd_table[conn->fd];
563     assert(F->flags.open);
564 
565     if (timeout < 0) {
566         F->timeoutHandler = NULL;
567         F->timeout = 0;
568     } else {
569         if (callback != NULL) {
570             typedef CommTimeoutCbParams Params;
571             Params &params = GetCommParams<Params>(callback);
572             params.conn = conn;
573             F->timeoutHandler = callback;
574         }
575 
576         F->timeout = squid_curtime + (time_t) timeout;
577     }
578 
579     return F->timeout;
580 }
581 
582 int
commUnsetConnTimeout(const Comm::ConnectionPointer & conn)583 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
584 {
585     debugs(5, 3, HERE << "Remove timeout for " << conn);
586     AsyncCall::Pointer nil;
587     return commSetConnTimeout(conn, -1, nil);
588 }
589 
590 /**
591  * Connect socket FD to given remote address.
592  * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
593  * then error code will also be returned in errno.
594  */
595 int
comm_connect_addr(int sock,const Ip::Address & address)596 comm_connect_addr(int sock, const Ip::Address &address)
597 {
598     Comm::Flag status = Comm::OK;
599     fde *F = &fd_table[sock];
600     int x = 0;
601     int err = 0;
602     socklen_t errlen;
603     struct addrinfo *AI = NULL;
604     PROF_start(comm_connect_addr);
605 
606     assert(address.port() != 0);
607 
608     debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
609 
610     /* Handle IPv6 over IPv4-only socket case.
611      * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
612      * NP: because commResetFD is private to ConnStateData we have to return an error and
613      *     trust its handled properly.
614      */
615     if (F->sock_family == AF_INET && !address.isIPv4()) {
616         errno = ENETUNREACH;
617         return Comm::ERR_PROTOCOL;
618     }
619 
620     /* Handle IPv4 over IPv6-only socket case.
621      * This case is presently handled here as it's both a known case and it's
622      * uncertain what error will be returned by the IPv6 stack in such case. It's
623      * possible this will also be handled by the errno checks below after connect()
624      * but needs carefull cross-platform verification, and verifying the address
625      * condition here is simple.
626      */
627     if (!F->local_addr.isIPv4() && address.isIPv4()) {
628         errno = ENETUNREACH;
629         return Comm::ERR_PROTOCOL;
630     }
631 
632     address.getAddrInfo(AI, F->sock_family);
633 
634     /* Establish connection. */
635     int xerrno = 0;
636 
637     if (!F->flags.called_connect) {
638         F->flags.called_connect = true;
639         ++ statCounter.syscalls.sock.connects;
640 
641         errno = 0;
642         if ((x = connect(sock, AI->ai_addr, AI->ai_addrlen)) < 0) {
643             xerrno = errno;
644             debugs(5,5, "sock=" << sock << ", addrinfo(" <<
645                    " flags=" << AI->ai_flags <<
646                    ", family=" << AI->ai_family <<
647                    ", socktype=" << AI->ai_socktype <<
648                    ", protocol=" << AI->ai_protocol <<
649                    ", &addr=" << AI->ai_addr <<
650                    ", addrlen=" << AI->ai_addrlen << " )");
651             debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerr(xerrno));
652             debugs(14,9, "connecting to: " << address);
653 
654         } else if (x == 0) {
655             // XXX: ICAP code refuses callbacks during a pending comm_ call
656             // Async calls development will fix this.
657             x = -1;
658             xerrno = EINPROGRESS;
659         }
660 
661     } else {
662         errno = 0;
663 #if _SQUID_NEWSOS6_
664         /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
665         if (connect(sock, AI->ai_addr, AI->ai_addrlen) < 0)
666             xerrno = errno;
667 
668         if (xerrno == EINVAL) {
669             errlen = sizeof(err);
670             x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
671             if (x >= 0)
672                 xerrno = x;
673         }
674 #else
675         errlen = sizeof(err);
676         x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
677         if (x == 0)
678             xerrno = err;
679 
680 #if _SQUID_SOLARIS_
681         /*
682         * Solaris 2.4's socket emulation doesn't allow you
683         * to determine the error from a failed non-blocking
684         * connect and just returns EPIPE.  Create a fake
685         * error message for connect.   -- fenner@parc.xerox.com
686         */
687         if (x < 0 && xerrno == EPIPE)
688             xerrno = ENOTCONN;
689         else
690             xerrno = errno;
691 #endif
692 #endif
693     }
694 
695     Ip::Address::FreeAddr(AI);
696 
697     PROF_stop(comm_connect_addr);
698 
699     errno = xerrno;
700     if (xerrno == 0 || xerrno == EISCONN)
701         status = Comm::OK;
702     else if (ignoreErrno(xerrno))
703         status = Comm::INPROGRESS;
704     else if (xerrno == EAFNOSUPPORT || xerrno == EINVAL)
705         return Comm::ERR_PROTOCOL;
706     else
707         return Comm::COMM_ERROR;
708 
709     address.toStr(F->ipaddr, MAX_IPSTRLEN);
710 
711     F->remote_port = address.port(); /* remote_port is HS */
712 
713     if (status == Comm::OK) {
714         debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
715     } else if (status == Comm::INPROGRESS) {
716         debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
717     }
718 
719     errno = xerrno;
720     return status;
721 }
722 
723 void
commCallCloseHandlers(int fd)724 commCallCloseHandlers(int fd)
725 {
726     fde *F = &fd_table[fd];
727     debugs(5, 5, "commCallCloseHandlers: FD " << fd);
728 
729     while (F->closeHandler != NULL) {
730         AsyncCall::Pointer call = F->closeHandler;
731         F->closeHandler = call->Next();
732         call->setNext(NULL);
733         // If call is not canceled schedule it for execution else ignore it
734         if (!call->canceled()) {
735             debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
736             ScheduleCallHere(call);
737         }
738     }
739 }
740 
741 #if LINGERING_CLOSE
742 static void
commLingerClose(int fd,void * unused)743 commLingerClose(int fd, void *unused)
744 {
745     LOCAL_ARRAY(char, buf, 1024);
746     int n = FD_READ_METHOD(fd, buf, 1024);
747     if (n < 0) {
748         int xerrno = errno;
749         debugs(5, 3, "FD " << fd << " read: " << xstrerr(xerrno));
750     }
751     comm_close(fd);
752 }
753 
754 static void
commLingerTimeout(const FdeCbParams & params)755 commLingerTimeout(const FdeCbParams &params)
756 {
757     debugs(5, 3, "commLingerTimeout: FD " << params.fd);
758     comm_close(params.fd);
759 }
760 
761 /*
762  * Inspired by apache
763  */
764 void
comm_lingering_close(int fd)765 comm_lingering_close(int fd)
766 {
767     Security::SessionSendGoodbye(fd_table[fd].ssl);
768 
769     if (shutdown(fd, 1) < 0) {
770         comm_close(fd);
771         return;
772     }
773 
774     fd_note(fd, "lingering close");
775     AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
776 
777     debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
778     assert(fd_table[fd].flags.open);
779     if (callback != NULL) {
780         typedef FdeCbParams Params;
781         Params &params = GetCommParams<Params>(callback);
782         params.fd = fd;
783         fd_table[fd].timeoutHandler = callback;
784         fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
785     }
786 
787     Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
788 }
789 
790 #endif
791 
792 /**
793  * enable linger with time of 0 so that when the socket is
794  * closed, TCP generates a RESET
795  */
796 void
comm_reset_close(const Comm::ConnectionPointer & conn)797 comm_reset_close(const Comm::ConnectionPointer &conn)
798 {
799     struct linger L;
800     L.l_onoff = 1;
801     L.l_linger = 0;
802 
803     if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
804         int xerrno = errno;
805         debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerr(xerrno));
806     }
807     conn->close();
808 }
809 
810 // Legacy close function.
811 void
old_comm_reset_close(int fd)812 old_comm_reset_close(int fd)
813 {
814     struct linger L;
815     L.l_onoff = 1;
816     L.l_linger = 0;
817 
818     if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
819         int xerrno = errno;
820         debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerr(xerrno));
821     }
822     comm_close(fd);
823 }
824 
825 void
commStartTlsClose(const FdeCbParams & params)826 commStartTlsClose(const FdeCbParams &params)
827 {
828     Security::SessionSendGoodbye(fd_table[params.fd].ssl);
829 }
830 
831 void
comm_close_complete(const FdeCbParams & params)832 comm_close_complete(const FdeCbParams &params)
833 {
834     fde *F = &fd_table[params.fd];
835     F->ssl.reset();
836     F->dynamicTlsContext.reset();
837     fd_close(params.fd);        /* update fdstat */
838     close(params.fd);
839 
840     ++ statCounter.syscalls.sock.closes;
841 
842     /* When one connection closes, give accept() a chance, if need be */
843     Comm::AcceptLimiter::Instance().kick();
844 }
845 
846 /*
847  * Close the socket fd.
848  *
849  * + call write handlers with ERR_CLOSING
850  * + call read handlers with ERR_CLOSING
851  * + call closing handlers
852  *
853  * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
854  * DeferredReadManager.
855  */
856 void
_comm_close(int fd,char const * file,int line)857 _comm_close(int fd, char const *file, int line)
858 {
859     debugs(5, 3, "comm_close: start closing FD " << fd);
860     assert(fd >= 0);
861     assert(fd < Squid_MaxFD);
862 
863     fde *F = &fd_table[fd];
864     fdd_table[fd].close_file = file;
865     fdd_table[fd].close_line = line;
866 
867     if (F->closing())
868         return;
869 
870     /* XXX: is this obsolete behind F->closing() ? */
871     if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
872         return;
873 
874     /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
875     if (!isOpen(fd)) {
876         debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
877         // XXX: do we need to run close(fd) or fd_close(fd) here?
878         return;
879     }
880 
881     assert(F->type != FD_FILE);
882 
883     PROF_start(comm_close);
884 
885     F->flags.close_request = true;
886 
887     if (F->ssl) {
888         AsyncCall::Pointer startCall=commCbCall(5,4, "commStartTlsClose",
889                                                 FdeCbPtrFun(commStartTlsClose, nullptr));
890         FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
891         startParams.fd = fd;
892         ScheduleCallHere(startCall);
893     }
894 
895     // a half-closed fd may lack a reader, so we stop monitoring explicitly
896     if (commHasHalfClosedMonitor(fd))
897         commStopHalfClosedMonitor(fd);
898     commUnsetFdTimeout(fd);
899 
900     // notify read/write handlers after canceling select reservations, if any
901     if (COMMIO_FD_WRITECB(fd)->active()) {
902         Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
903         COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
904     }
905     if (COMMIO_FD_READCB(fd)->active()) {
906         Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
907         COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
908     }
909 
910 #if USE_DELAY_POOLS
911     if (ClientInfo *clientInfo = F->clientInfo) {
912         if (clientInfo->selectWaiting) {
913             clientInfo->selectWaiting = false;
914             // kick queue or it will get stuck as commWriteHandle is not called
915             clientInfo->kickQuotaQueue();
916         }
917     }
918 #endif
919 
920     commCallCloseHandlers(fd);
921 
922     comm_empty_os_read_buffers(fd);
923 
924     AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
925                                     FdeCbPtrFun(comm_close_complete, NULL));
926     FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
927     completeParams.fd = fd;
928     // must use async call to wait for all callbacks
929     // scheduled before comm_close() to finish
930     ScheduleCallHere(completeCall);
931 
932     PROF_stop(comm_close);
933 }
934 
935 /* Send a udp datagram to specified TO_ADDR. */
936 int
comm_udp_sendto(int fd,const Ip::Address & to_addr,const void * buf,int len)937 comm_udp_sendto(int fd,
938                 const Ip::Address &to_addr,
939                 const void *buf,
940                 int len)
941 {
942     PROF_start(comm_udp_sendto);
943     ++ statCounter.syscalls.sock.sendtos;
944 
945     debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
946            " using FD " << fd << " using Port " << comm_local_port(fd) );
947 
948     struct addrinfo *AI = NULL;
949     to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
950     int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
951     int xerrno = errno;
952     Ip::Address::FreeAddr(AI);
953 
954     PROF_stop(comm_udp_sendto);
955 
956     if (x >= 0) {
957         errno = xerrno; // restore for caller to use
958         return x;
959     }
960 
961 #if _SQUID_LINUX_
962     if (ECONNREFUSED != xerrno)
963 #endif
964         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerr(xerrno));
965 
966     errno = xerrno; // restore for caller to use
967     return Comm::COMM_ERROR;
968 }
969 
970 AsyncCall::Pointer
comm_add_close_handler(int fd,CLCB * handler,void * data)971 comm_add_close_handler(int fd, CLCB * handler, void *data)
972 {
973     debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
974            handler << ", data=" << data);
975 
976     AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
977                                        CommCloseCbPtrFun(handler, data));
978     comm_add_close_handler(fd, call);
979     return call;
980 }
981 
982 void
comm_add_close_handler(int fd,AsyncCall::Pointer & call)983 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
984 {
985     debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
986 
987     /*TODO:Check for a similar scheduled AsyncCall*/
988 //    for (c = fd_table[fd].closeHandler; c; c = c->next)
989 //        assert(c->handler != handler || c->data != data);
990 
991     call->setNext(fd_table[fd].closeHandler);
992 
993     fd_table[fd].closeHandler = call;
994 }
995 
996 // remove function-based close handler
997 void
comm_remove_close_handler(int fd,CLCB * handler,void * data)998 comm_remove_close_handler(int fd, CLCB * handler, void *data)
999 {
1000     assert(isOpen(fd));
1001     /* Find handler in list */
1002     debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1003            handler << ", data=" << data);
1004 
1005     AsyncCall::Pointer p, prev = NULL;
1006     for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1007         typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1008         const Call *call = dynamic_cast<const Call*>(p.getRaw());
1009         if (!call) // method callbacks have their own comm_remove_close_handler
1010             continue;
1011 
1012         typedef CommCloseCbParams Params;
1013         const Params &params = GetCommParams<Params>(p);
1014         if (call->dialer.handler == handler && params.data == data)
1015             break;      /* This is our handler */
1016     }
1017 
1018     // comm_close removes all close handlers so our handler may be gone
1019     if (p != NULL) {
1020         p->dequeue(fd_table[fd].closeHandler, prev);
1021         p->cancel("comm_remove_close_handler");
1022     }
1023 }
1024 
1025 // remove method-based close handler
1026 void
comm_remove_close_handler(int fd,AsyncCall::Pointer & call)1027 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1028 {
1029     assert(isOpen(fd));
1030     debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1031 
1032     // comm_close removes all close handlers so our handler may be gone
1033     AsyncCall::Pointer p, prev = NULL;
1034     for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1035 
1036     if (p != NULL)
1037         p->dequeue(fd_table[fd].closeHandler, prev);
1038     call->cancel("comm_remove_close_handler");
1039 }
1040 
1041 static void
commSetNoLinger(int fd)1042 commSetNoLinger(int fd)
1043 {
1044 
1045     struct linger L;
1046     L.l_onoff = 0;      /* off */
1047     L.l_linger = 0;
1048 
1049     if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
1050         int xerrno = errno;
1051         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1052     }
1053     fd_table[fd].flags.nolinger = true;
1054 }
1055 
1056 static void
commSetReuseAddr(int fd)1057 commSetReuseAddr(int fd)
1058 {
1059     int on = 1;
1060     if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0) {
1061         int xerrno = errno;
1062         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1063     }
1064 }
1065 
1066 static void
commSetTcpRcvbuf(int fd,int size)1067 commSetTcpRcvbuf(int fd, int size)
1068 {
1069     if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0) {
1070         int xerrno = errno;
1071         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1072     }
1073     if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0) {
1074         int xerrno = errno;
1075         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1076     }
1077 #ifdef TCP_WINDOW_CLAMP
1078     if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0) {
1079         int xerrno = errno;
1080         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1081     }
1082 #endif
1083 }
1084 
1085 int
commSetNonBlocking(int fd)1086 commSetNonBlocking(int fd)
1087 {
1088 #if _SQUID_WINDOWS_
1089     int nonblocking = TRUE;
1090 
1091     if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1092         int xerrno = errno;
1093         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno) << " " << fd_table[fd].type);
1094         return Comm::COMM_ERROR;
1095     }
1096 
1097 #else
1098     int flags;
1099     int dummy = 0;
1100 
1101     if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1102         int xerrno = errno;
1103         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1104         return Comm::COMM_ERROR;
1105     }
1106 
1107     if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1108         int xerrno = errno;
1109         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1110         return Comm::COMM_ERROR;
1111     }
1112 #endif
1113 
1114     fd_table[fd].flags.nonblocking = true;
1115     return 0;
1116 }
1117 
1118 int
commUnsetNonBlocking(int fd)1119 commUnsetNonBlocking(int fd)
1120 {
1121 #if _SQUID_WINDOWS_
1122     int nonblocking = FALSE;
1123 
1124     if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1125 #else
1126     int flags;
1127     int dummy = 0;
1128 
1129     if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1130         int xerrno = errno;
1131         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1132         return Comm::COMM_ERROR;
1133     }
1134 
1135     if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1136 #endif
1137         int xerrno = errno;
1138         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1139         return Comm::COMM_ERROR;
1140     }
1141 
1142     fd_table[fd].flags.nonblocking = false;
1143     return 0;
1144 }
1145 
1146 void
1147 commSetCloseOnExec(int fd)
1148 {
1149 #ifdef FD_CLOEXEC
1150     int flags;
1151     int dummy = 0;
1152 
1153     if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1154         int xerrno = errno;
1155         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFD: " << xstrerr(xerrno));
1156         return;
1157     }
1158 
1159     if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
1160         int xerrno = errno;
1161         debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": set close-on-exec failed: " << xstrerr(xerrno));
1162     }
1163 
1164     fd_table[fd].flags.close_on_exec = true;
1165 
1166 #endif
1167 }
1168 
1169 #ifdef TCP_NODELAY
1170 static void
1171 commSetTcpNoDelay(int fd)
1172 {
1173     int on = 1;
1174 
1175     if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0) {
1176         int xerrno = errno;
1177         debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1178     }
1179 
1180     fd_table[fd].flags.nodelay = true;
1181 }
1182 
1183 #endif
1184 
1185 void
1186 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1187 {
1188     int on = 1;
1189 #ifdef TCP_KEEPCNT
1190     if (timeout && interval) {
1191         int count = (timeout + interval - 1) / interval;
1192         if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0) {
1193             int xerrno = errno;
1194             debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1195         }
1196     }
1197 #endif
1198 #ifdef TCP_KEEPIDLE
1199     if (idle) {
1200         if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0) {
1201             int xerrno = errno;
1202             debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1203         }
1204     }
1205 #endif
1206 #ifdef TCP_KEEPINTVL
1207     if (interval) {
1208         if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0) {
1209             int xerrno = errno;
1210             debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1211         }
1212     }
1213 #endif
1214     if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0) {
1215         int xerrno = errno;
1216         debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1217     }
1218 }
1219 
1220 void
1221 comm_init(void)
1222 {
1223     fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1224     fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1225 
1226     /* make sure the accept() socket FIFO delay queue exists */
1227     Comm::AcceptLimiter::Instance();
1228 
1229     // make sure the IO pending callback table exists
1230     Comm::CallbackTableInit();
1231 
1232     /* XXX account fd_table */
1233     /* Keep a few file descriptors free so that we don't run out of FD's
1234      * after accepting a client but before it opens a socket or a file.
1235      * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1236     RESERVED_FD = min(100, Squid_MaxFD / 4);
1237 
1238     TheHalfClosed = new DescriptorSet;
1239 
1240     /* setup the select loop module */
1241     Comm::SelectLoopInit();
1242 }
1243 
1244 void
1245 comm_exit(void)
1246 {
1247     delete TheHalfClosed;
1248     TheHalfClosed = NULL;
1249 
1250     safe_free(fd_table);
1251     safe_free(fdd_table);
1252     Comm::CallbackTableDestruct();
1253 }
1254 
1255 #if USE_DELAY_POOLS
1256 // called when the queue is done waiting for the client bucket to fill
1257 void
1258 commHandleWriteHelper(void * data)
1259 {
1260     CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1261     assert(queue);
1262 
1263     ClientInfo *clientInfo = queue->clientInfo;
1264     // ClientInfo invalidates queue if freed, so if we got here through,
1265     // evenAdd cbdata protections, everything should be valid and consistent
1266     assert(clientInfo);
1267     assert(clientInfo->hasQueue());
1268     assert(clientInfo->hasQueue(queue));
1269     assert(!clientInfo->selectWaiting);
1270     assert(clientInfo->eventWaiting);
1271     clientInfo->eventWaiting = false;
1272 
1273     do {
1274         // check that the head descriptor is still relevant
1275         const int head = clientInfo->quotaPeekFd();
1276         Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1277 
1278         if (fd_table[head].clientInfo == clientInfo &&
1279                 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1280                 !fd_table[head].closing()) {
1281 
1282             // wait for the head descriptor to become ready for writing
1283             Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1284             clientInfo->selectWaiting = true;
1285             return;
1286         }
1287 
1288         clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1289         // and continue looking for a relevant one
1290     } while (clientInfo->hasQueue());
1291 
1292     debugs(77,3, HERE << "emptied queue");
1293 }
1294 
1295 bool
1296 ClientInfo::hasQueue() const
1297 {
1298     assert(quotaQueue);
1299     return !quotaQueue->empty();
1300 }
1301 
1302 bool
1303 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1304 {
1305     assert(quotaQueue);
1306     return quotaQueue == q;
1307 }
1308 
1309 /// returns the first descriptor to be dequeued
1310 int
1311 ClientInfo::quotaPeekFd() const
1312 {
1313     assert(quotaQueue);
1314     return quotaQueue->front();
1315 }
1316 
1317 /// returns the reservation ID of the first descriptor to be dequeued
1318 unsigned int
1319 ClientInfo::quotaPeekReserv() const
1320 {
1321     assert(quotaQueue);
1322     return quotaQueue->outs + 1;
1323 }
1324 
1325 /// queues a given fd, creating the queue if necessary; returns reservation ID
1326 unsigned int
1327 ClientInfo::quotaEnqueue(int fd)
1328 {
1329     assert(quotaQueue);
1330     return quotaQueue->enqueue(fd);
1331 }
1332 
1333 /// removes queue head
1334 void
1335 ClientInfo::quotaDequeue()
1336 {
1337     assert(quotaQueue);
1338     quotaQueue->dequeue();
1339 }
1340 
1341 void
1342 ClientInfo::kickQuotaQueue()
1343 {
1344     if (!eventWaiting && !selectWaiting && hasQueue()) {
1345         // wait at least a second if the bucket is empty
1346         const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1347         eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1348                  quotaQueue, delay, 0, true);
1349         eventWaiting = true;
1350     }
1351 }
1352 
1353 /// calculates how much to write for a single dequeued client
1354 int
1355 ClientInfo::quotaForDequed()
1356 {
1357     /* If we have multiple clients and give full bucketSize to each client then
1358      * clt1 may often get a lot more because clt1->clt2 time distance in the
1359      * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1360      * We divide quota evenly to be more fair. */
1361 
1362     if (!rationedCount) {
1363         rationedCount = quotaQueue->size() + 1;
1364 
1365         // The delay in ration recalculation _temporary_ deprives clients from
1366         // bytes that should have trickled in while rationedCount was positive.
1367         refillBucket();
1368 
1369         // Rounding errors do not accumulate here, but we round down to avoid
1370         // negative bucket sizes after write with rationedCount=1.
1371         rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1372         debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1373                '*' << rationedCount);
1374     }
1375 
1376     --rationedCount;
1377     debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1378            " rations remaining: " << rationedCount);
1379 
1380     // update 'last seen' time to prevent clientdb GC from dropping us
1381     last_seen = squid_curtime;
1382     return rationedQuota;
1383 }
1384 
1385 ///< adds bytes to the quota bucket based on the rate and passed time
1386 void
1387 ClientInfo::refillBucket()
1388 {
1389     // all these times are in seconds, with double precision
1390     const double currTime = current_dtime;
1391     const double timePassed = currTime - prevTime;
1392 
1393     // Calculate allowance for the time passed. Use double to avoid
1394     // accumulating rounding errors for small intervals. For example, always
1395     // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1396     const double gain = timePassed * writeSpeedLimit;
1397 
1398     debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1399            bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1400            " = " << gain << ')');
1401 
1402     // to further combat error accumulation during micro updates,
1403     // quit before updating time if we cannot add at least one byte
1404     if (gain < 1.0)
1405         return;
1406 
1407     prevTime = currTime;
1408 
1409     // for "first" connections, drain initial fat before refilling but keep
1410     // updating prevTime to avoid bursts after the fat is gone
1411     if (bucketSize > bucketSizeLimit) {
1412         debugs(77,4, HERE << "not refilling while draining initial fat");
1413         return;
1414     }
1415 
1416     bucketSize += gain;
1417 
1418     // obey quota limits
1419     if (bucketSize > bucketSizeLimit)
1420         bucketSize = bucketSizeLimit;
1421 }
1422 
1423 void
1424 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1425 {
1426     debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1427            " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1428            " highwatermark=" << aHighWatermark);
1429 
1430     // set or possibly update traffic shaping parameters
1431     writeLimitingActive = true;
1432     writeSpeedLimit = aWriteSpeedLimit;
1433     bucketSizeLimit = aHighWatermark;
1434 
1435     // but some members should only be set once for a newly activated bucket
1436     if (firstTimeConnection) {
1437         firstTimeConnection = false;
1438 
1439         assert(!selectWaiting);
1440         assert(!quotaQueue);
1441         quotaQueue = new CommQuotaQueue(this);
1442 
1443         bucketSize = anInitialBurst;
1444         prevTime = current_dtime;
1445     }
1446 }
1447 
1448 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1449     ins(0), outs(0)
1450 {
1451     assert(clientInfo);
1452 }
1453 
1454 CommQuotaQueue::~CommQuotaQueue()
1455 {
1456     assert(!clientInfo); // ClientInfo should clear this before destroying us
1457 }
1458 
1459 /// places the given fd at the end of the queue; returns reservation ID
1460 unsigned int
1461 CommQuotaQueue::enqueue(int fd)
1462 {
1463     debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1464            ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1465     fds.push_back(fd);
1466     return ++ins;
1467 }
1468 
1469 /// removes queue head
1470 void
1471 CommQuotaQueue::dequeue()
1472 {
1473     assert(!fds.empty());
1474     debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1475            ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1476            fds.size());
1477     fds.pop_front();
1478     ++outs;
1479 }
1480 #endif
1481 
1482 /*
1483  * hm, this might be too general-purpose for all the places we'd
1484  * like to use it.
1485  */
1486 int
1487 ignoreErrno(int ierrno)
1488 {
1489     switch (ierrno) {
1490 
1491     case EINPROGRESS:
1492 
1493     case EWOULDBLOCK:
1494 #if EAGAIN != EWOULDBLOCK
1495 
1496     case EAGAIN:
1497 #endif
1498 
1499     case EALREADY:
1500 
1501     case EINTR:
1502 #ifdef ERESTART
1503 
1504     case ERESTART:
1505 #endif
1506 
1507         return 1;
1508 
1509     default:
1510         return 0;
1511     }
1512 
1513     /* NOTREACHED */
1514 }
1515 
1516 void
1517 commCloseAllSockets(void)
1518 {
1519     int fd;
1520     fde *F = NULL;
1521 
1522     for (fd = 0; fd <= Biggest_FD; ++fd) {
1523         F = &fd_table[fd];
1524 
1525         if (!F->flags.open)
1526             continue;
1527 
1528         if (F->type != FD_SOCKET)
1529             continue;
1530 
1531         if (F->flags.ipc)   /* don't close inter-process sockets */
1532             continue;
1533 
1534         if (F->timeoutHandler != NULL) {
1535             AsyncCall::Pointer callback = F->timeoutHandler;
1536             F->timeoutHandler = NULL;
1537             debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1538             ScheduleCallHere(callback);
1539         } else {
1540             debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1541             old_comm_reset_close(fd);
1542         }
1543     }
1544 }
1545 
1546 static bool
1547 AlreadyTimedOut(fde *F)
1548 {
1549     if (!F->flags.open)
1550         return true;
1551 
1552     if (F->timeout == 0)
1553         return true;
1554 
1555     if (F->timeout > squid_curtime)
1556         return true;
1557 
1558     return false;
1559 }
1560 
1561 static bool
1562 writeTimedOut(int fd)
1563 {
1564     if (!COMMIO_FD_WRITECB(fd)->active())
1565         return false;
1566 
1567     if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1568         return false;
1569 
1570     return true;
1571 }
1572 
1573 void
1574 checkTimeouts(void)
1575 {
1576     int fd;
1577     fde *F = NULL;
1578     AsyncCall::Pointer callback;
1579 
1580     for (fd = 0; fd <= Biggest_FD; ++fd) {
1581         F = &fd_table[fd];
1582 
1583         if (writeTimedOut(fd)) {
1584             // We have an active write callback and we are timed out
1585             debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1586             Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1587             COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1588             continue;
1589         } else if (AlreadyTimedOut(F))
1590             continue;
1591 
1592         debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1593 
1594         if (F->timeoutHandler != NULL) {
1595             debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1596             callback = F->timeoutHandler;
1597             F->timeoutHandler = NULL;
1598             ScheduleCallHere(callback);
1599         } else {
1600             debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1601             comm_close(fd);
1602         }
1603     }
1604 }
1605 
1606 /// Start waiting for a possibly half-closed connection to close
1607 // by scheduling a read callback to a monitoring handler that
1608 // will close the connection on read errors.
1609 void
1610 commStartHalfClosedMonitor(int fd)
1611 {
1612     debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1613     assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1614     (void)TheHalfClosed->add(fd); // could also assert the result
1615     commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1616 }
1617 
1618 static
1619 void
1620 commPlanHalfClosedCheck()
1621 {
1622     if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1623         eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1624         WillCheckHalfClosed = true;
1625     }
1626 }
1627 
1628 /// iterates over all descriptors that may need half-closed tests and
1629 /// calls comm_read for those that do; re-schedules the check if needed
1630 static
1631 void
1632 commHalfClosedCheck(void *)
1633 {
1634     debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1635 
1636     typedef DescriptorSet::const_iterator DSCI;
1637     const DSCI end = TheHalfClosed->end();
1638     for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1639         Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1640         c->fd = *i;
1641         if (!fd_table[c->fd].halfClosedReader) { // not reading already
1642             AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1643                                                  CommIoCbPtrFun(&commHalfClosedReader, NULL));
1644             Comm::Read(c, call);
1645             fd_table[c->fd].halfClosedReader = call;
1646         } else
1647             c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1648     }
1649 
1650     WillCheckHalfClosed = false; // as far as we know
1651     commPlanHalfClosedCheck(); // may need to check again
1652 }
1653 
1654 /// checks whether we are waiting for possibly half-closed connection to close
1655 // We are monitoring if the read handler for the fd is the monitoring handler.
1656 bool
1657 commHasHalfClosedMonitor(int fd)
1658 {
1659     return TheHalfClosed->has(fd);
1660 }
1661 
1662 /// stop waiting for possibly half-closed connection to close
1663 void
1664 commStopHalfClosedMonitor(int const fd)
1665 {
1666     debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1667 
1668     // cancel the read if one was scheduled
1669     AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1670     if (reader != NULL)
1671         Comm::ReadCancel(fd, reader);
1672     fd_table[fd].halfClosedReader = NULL;
1673 
1674     TheHalfClosed->del(fd);
1675 }
1676 
1677 /// I/O handler for the possibly half-closed connection monitoring code
1678 static void
1679 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1680 {
1681     // there cannot be more data coming in on half-closed connections
1682     assert(size == 0);
1683     assert(conn != NULL);
1684     assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1685 
1686     fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1687 
1688     // nothing to do if fd is being closed
1689     if (flag == Comm::ERR_CLOSING)
1690         return;
1691 
1692     // if read failed, close the connection
1693     if (flag != Comm::OK) {
1694         debugs(5, 3, HERE << "closing " << conn);
1695         conn->close();
1696         return;
1697     }
1698 
1699     // continue waiting for close or error
1700     commPlanHalfClosedCheck(); // make sure this fd will be checked again
1701 }
1702 
1703 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1704 
1705 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1706     : conn(c), buf(buf_), len(len_), callback(callback_) {}
1707 
1708 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1709 
1710 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1711 
1712 DeferredReadManager::~DeferredReadManager()
1713 {
1714     flushReads();
1715     assert (deferredReads.empty());
1716 }
1717 
1718 /* explicit instantiation required for some systems */
1719 
1720 /// \cond AUTODOCS_IGNORE
1721 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1722 /// \endcond
1723 
1724 void
1725 DeferredReadManager::delayRead(DeferredRead const &aRead)
1726 {
1727     debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1728     CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1729 
1730     // We have to use a global function as a closer and point to temp
1731     // instead of "this" because DeferredReadManager is not a job and
1732     // is not even cbdata protected
1733     // XXX: and yet we use cbdata protection functions on it??
1734     AsyncCall::Pointer closer = commCbCall(5,4,
1735                                            "DeferredReadManager::CloseHandler",
1736                                            CommCloseCbPtrFun(&CloseHandler, temp));
1737     comm_add_close_handler(aRead.theRead.conn->fd, closer);
1738     temp->element.closer = closer; // remeber so that we can cancel
1739 }
1740 
1741 void
1742 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1743 {
1744     if (!cbdataReferenceValid(params.data))
1745         return;
1746 
1747     CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1748 
1749     temp->element.closer = NULL;
1750     temp->element.markCancelled();
1751 }
1752 
1753 DeferredRead
1754 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1755 {
1756     assert (!deferredReads.empty());
1757 
1758     DeferredRead &read = deferredReads.head->element;
1759 
1760     // NOTE: at this point the connection has been paused/stalled for an unknown
1761     //       amount of time. We must re-validate that it is active and usable.
1762 
1763     // If the connection has been closed already. Cancel this read.
1764     if (!fd_table || !Comm::IsConnOpen(read.theRead.conn)) {
1765         if (read.closer != NULL) {
1766             read.closer->cancel("Connection closed before.");
1767             read.closer = NULL;
1768         }
1769         read.markCancelled();
1770     }
1771 
1772     if (!read.cancelled) {
1773         comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1774         read.closer = NULL;
1775     }
1776 
1777     DeferredRead result = deferredReads.pop_front();
1778 
1779     return result;
1780 }
1781 
1782 void
1783 DeferredReadManager::kickReads(int const count)
1784 {
1785     /* if we had CbDataList::size() we could consolidate this and flushReads */
1786 
1787     if (count < 1) {
1788         flushReads();
1789         return;
1790     }
1791 
1792     size_t remaining = count;
1793 
1794     while (!deferredReads.empty() && remaining) {
1795         DeferredRead aRead = popHead(deferredReads);
1796         kickARead(aRead);
1797 
1798         if (!aRead.cancelled)
1799             --remaining;
1800     }
1801 }
1802 
1803 void
1804 DeferredReadManager::flushReads()
1805 {
1806     CbDataListContainer<DeferredRead> reads;
1807     reads = deferredReads;
1808     deferredReads = CbDataListContainer<DeferredRead>();
1809 
1810     // XXX: For fairness this SHOULD randomize the order
1811     while (!reads.empty()) {
1812         DeferredRead aRead = popHead(reads);
1813         kickARead(aRead);
1814     }
1815 }
1816 
1817 void
1818 DeferredReadManager::kickARead(DeferredRead const &aRead)
1819 {
1820     if (aRead.cancelled)
1821         return;
1822 
1823     if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1824         return;
1825 
1826     debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1827 
1828     aRead.theReader(aRead.theContext, aRead.theRead);
1829 }
1830 
1831 void
1832 DeferredRead::markCancelled()
1833 {
1834     cancelled = true;
1835 }
1836 
1837 int
1838 CommSelectEngine::checkEvents(int timeout)
1839 {
1840     static time_t last_timeout = 0;
1841 
1842     /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1843     if (squid_curtime > last_timeout) {
1844         last_timeout = squid_curtime;
1845         checkTimeouts();
1846     }
1847 
1848     switch (Comm::DoSelect(timeout)) {
1849 
1850     case Comm::OK:
1851 
1852     case Comm::TIMEOUT:
1853         return 0;
1854 
1855     case Comm::IDLE:
1856 
1857     case Comm::SHUTDOWN:
1858         return EVENT_IDLE;
1859 
1860     case Comm::COMM_ERROR:
1861         return EVENT_ERROR;
1862 
1863     default:
1864         fatal_dump("comm.cc: Internal error -- this should never happen.");
1865         return EVENT_ERROR;
1866     };
1867 }
1868 
1869 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1870 int
1871 comm_open_uds(int sock_type,
1872               int proto,
1873               struct sockaddr_un* addr,
1874               int flags)
1875 {
1876     // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1877 
1878     int new_socket;
1879 
1880     PROF_start(comm_open);
1881     /* Create socket for accepting new connections. */
1882     ++ statCounter.syscalls.sock.sockets;
1883 
1884     /* Setup the socket addrinfo details for use */
1885     struct addrinfo AI;
1886     AI.ai_flags = 0;
1887     AI.ai_family = PF_UNIX;
1888     AI.ai_socktype = sock_type;
1889     AI.ai_protocol = proto;
1890     AI.ai_addrlen = SUN_LEN(addr);
1891     AI.ai_addr = (sockaddr*)addr;
1892     AI.ai_canonname = NULL;
1893     AI.ai_next = NULL;
1894 
1895     debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1896 
1897     if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1898         int xerrno = errno;
1899         /* Increase the number of reserved fd's if calls to socket()
1900          * are failing because the open file table is full.  This
1901          * limits the number of simultaneous clients */
1902 
1903         if (limitError(xerrno)) {
1904             debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
1905             fdAdjustReserved();
1906         } else {
1907             debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
1908         }
1909 
1910         PROF_stop(comm_open);
1911         return -1;
1912     }
1913 
1914     debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1915 
1916     /* update fdstat */
1917     debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1918 
1919     assert(!isOpen(new_socket));
1920     fd_open(new_socket, FD_MSGHDR, addr->sun_path);
1921 
1922     fdd_table[new_socket].close_file = NULL;
1923 
1924     fdd_table[new_socket].close_line = 0;
1925 
1926     fd_table[new_socket].sock_family = AI.ai_family;
1927 
1928     if (!(flags & COMM_NOCLOEXEC))
1929         commSetCloseOnExec(new_socket);
1930 
1931     if (flags & COMM_REUSEADDR)
1932         commSetReuseAddr(new_socket);
1933 
1934     if (flags & COMM_NONBLOCKING) {
1935         if (commSetNonBlocking(new_socket) != Comm::OK) {
1936             comm_close(new_socket);
1937             PROF_stop(comm_open);
1938             return -1;
1939         }
1940     }
1941 
1942     if (flags & COMM_DOBIND) {
1943         if (commBind(new_socket, AI) != Comm::OK) {
1944             comm_close(new_socket);
1945             PROF_stop(comm_open);
1946             return -1;
1947         }
1948     }
1949 
1950 #ifdef TCP_NODELAY
1951     if (sock_type == SOCK_STREAM)
1952         commSetTcpNoDelay(new_socket);
1953 
1954 #endif
1955 
1956     if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1957         commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1958 
1959     PROF_stop(comm_open);
1960 
1961     return new_socket;
1962 }
1963 
1964