1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "errorpage.h"
80 #include "fd.h"
81 #include "fde.h"
82 #include "fqdncache.h"
83 #include "FwdState.h"
84 #include "globals.h"
85 #include "helper.h"
86 #include "helper/Reply.h"
87 #include "http.h"
88 #include "http/one/RequestParser.h"
89 #include "http/one/TeChunkedParser.h"
90 #include "http/Stream.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpHeaderTools.h"
93 #include "HttpReply.h"
94 #include "HttpRequest.h"
95 #include "ident/Config.h"
96 #include "ident/Ident.h"
97 #include "internal.h"
98 #include "ipc/FdNotes.h"
99 #include "ipc/StartListening.h"
100 #include "log/access_log.h"
101 #include "MemBuf.h"
102 #include "MemObject.h"
103 #include "mime_header.h"
104 #include "parser/Tokenizer.h"
105 #include "profiler/Profiler.h"
106 #include "security/NegotiationHistory.h"
107 #include "servers/forward.h"
108 #include "SquidConfig.h"
109 #include "SquidTime.h"
110 #include "StatCounters.h"
111 #include "StatHist.h"
112 #include "Store.h"
113 #include "TimeOrTag.h"
114 #include "tools.h"
115
116 #if USE_AUTH
117 #include "auth/UserRequest.h"
118 #endif
119 #if USE_DELAY_POOLS
120 #include "ClientInfo.h"
121 #endif
122 #if USE_OPENSSL
123 #include "ssl/bio.h"
124 #include "ssl/context_storage.h"
125 #include "ssl/gadgets.h"
126 #include "ssl/helper.h"
127 #include "ssl/ProxyCerts.h"
128 #include "ssl/ServerBump.h"
129 #include "ssl/support.h"
130 #endif
131
132 // for tvSubUsec() which should be in SquidTime.h
133 #include "util.h"
134
135 #include <climits>
136 #include <cmath>
137 #include <limits>
138
139 #if HAVE_SYSTEMD_SD_DAEMON_H
140 #include <systemd/sd-daemon.h>
141 #endif
142
143 #if LINGERING_CLOSE
144 #define comm_close comm_lingering_close
145 #endif
146
147 /// dials clientListenerConnectionOpened call
148 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
149 {
150 public:
151 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
ListeningStartedDialer(Handler aHandler,AnyP::PortCfgPointer & aPortCfg,const Ipc::FdNoteId note,const Subscription::Pointer & aSub)152 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
153 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
154
print(std::ostream & os) const155 virtual void print(std::ostream &os) const {
156 startPrint(os) <<
157 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
158 }
159
canDial(AsyncCall &) const160 virtual bool canDial(AsyncCall &) const { return true; }
dial(AsyncCall &)161 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
162
163 public:
164 Handler handler;
165
166 private:
167 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
168 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
169 Subscription::Pointer sub; ///< The handler to be subscribed for this connetion listener
170 };
171
172 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
173
174 static IOACB httpAccept;
175 static CTCB clientLifetimeTimeout;
176 #if USE_IDENT
177 static IDCB clientIdentDone;
178 #endif
179 static int clientIsContentLengthValid(HttpRequest * r);
180 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
181
182 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
183 static void clientUpdateStatCounters(const LogTags &logType);
184 static void clientUpdateHierCounters(HierarchyLogEntry *);
185 static bool clientPingHasFinished(ping_data const *aPing);
186 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &);
187 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
188
189 char *skipLeadingSpace(char *aString);
190
191 #if USE_IDENT
192 static void
clientIdentDone(const char * ident,void * data)193 clientIdentDone(const char *ident, void *data)
194 {
195 ConnStateData *conn = (ConnStateData *)data;
196 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
197 }
198 #endif
199
200 void
clientUpdateStatCounters(const LogTags & logType)201 clientUpdateStatCounters(const LogTags &logType)
202 {
203 ++statCounter.client_http.requests;
204
205 if (logType.isTcpHit())
206 ++statCounter.client_http.hits;
207
208 if (logType.oldType == LOG_TCP_HIT)
209 ++statCounter.client_http.disk_hits;
210 else if (logType.oldType == LOG_TCP_MEM_HIT)
211 ++statCounter.client_http.mem_hits;
212 }
213
214 void
clientUpdateStatHistCounters(const LogTags & logType,int svc_time)215 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
216 {
217 statCounter.client_http.allSvcTime.count(svc_time);
218 /**
219 * The idea here is not to be complete, but to get service times
220 * for only well-defined types. For example, we don't include
221 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
222 * (we *tried* to validate it, but failed).
223 */
224
225 switch (logType.oldType) {
226
227 case LOG_TCP_REFRESH_UNMODIFIED:
228 statCounter.client_http.nearHitSvcTime.count(svc_time);
229 break;
230
231 case LOG_TCP_INM_HIT:
232 case LOG_TCP_IMS_HIT:
233 statCounter.client_http.nearMissSvcTime.count(svc_time);
234 break;
235
236 case LOG_TCP_HIT:
237
238 case LOG_TCP_MEM_HIT:
239
240 case LOG_TCP_OFFLINE_HIT:
241 statCounter.client_http.hitSvcTime.count(svc_time);
242 break;
243
244 case LOG_TCP_MISS:
245
246 case LOG_TCP_CLIENT_REFRESH_MISS:
247 statCounter.client_http.missSvcTime.count(svc_time);
248 break;
249
250 default:
251 /* make compiler warnings go away */
252 break;
253 }
254 }
255
256 bool
clientPingHasFinished(ping_data const * aPing)257 clientPingHasFinished(ping_data const *aPing)
258 {
259 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
260 return true;
261
262 return false;
263 }
264
265 void
clientUpdateHierCounters(HierarchyLogEntry * someEntry)266 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
267 {
268 ping_data *i;
269
270 switch (someEntry->code) {
271 #if USE_CACHE_DIGESTS
272
273 case CD_PARENT_HIT:
274
275 case CD_SIBLING_HIT:
276 ++ statCounter.cd.times_used;
277 break;
278 #endif
279
280 case SIBLING_HIT:
281
282 case PARENT_HIT:
283
284 case FIRST_PARENT_MISS:
285
286 case CLOSEST_PARENT_MISS:
287 ++ statCounter.icp.times_used;
288 i = &someEntry->ping;
289
290 if (clientPingHasFinished(i))
291 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
292
293 if (i->timeout)
294 ++ statCounter.icp.query_timeouts;
295
296 break;
297
298 case CLOSEST_PARENT:
299
300 case CLOSEST_DIRECT:
301 ++ statCounter.netdb.times_used;
302
303 break;
304
305 default:
306 break;
307 }
308 }
309
310 void
updateCounters()311 ClientHttpRequest::updateCounters()
312 {
313 clientUpdateStatCounters(logType);
314
315 if (request->errType != ERR_NONE)
316 ++ statCounter.client_http.errors;
317
318 clientUpdateStatHistCounters(logType,
319 tvSubMsec(al->cache.start_time, current_time));
320
321 clientUpdateHierCounters(&request->hier);
322 }
323
324 void
prepareLogWithRequestDetails(HttpRequest * request,AccessLogEntry::Pointer & aLogEntry)325 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry::Pointer &aLogEntry)
326 {
327 assert(request);
328 assert(aLogEntry != NULL);
329
330 if (Config.onoff.log_mime_hdrs) {
331 MemBuf mb;
332 mb.init();
333 request->header.packInto(&mb);
334 //This is the request after adaptation or redirection
335 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
336
337 // the virgin request is saved to aLogEntry->request
338 if (aLogEntry->request) {
339 mb.reset();
340 aLogEntry->request->header.packInto(&mb);
341 aLogEntry->headers.request = xstrdup(mb.buf);
342 }
343
344 #if USE_ADAPTATION
345 const Adaptation::History::Pointer ah = request->adaptLogHistory();
346 if (ah != NULL) {
347 mb.reset();
348 ah->lastMeta.packInto(&mb);
349 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
350 }
351 #endif
352
353 mb.clean();
354 }
355
356 #if ICAP_CLIENT
357 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
358 if (ih != NULL)
359 ih->processingTime(aLogEntry->icap.processingTime);
360 #endif
361
362 aLogEntry->http.method = request->method;
363 aLogEntry->http.version = request->http_ver;
364 aLogEntry->hier = request->hier;
365 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
366
367 // Adapted request, if any, inherits and then collects all the stats, but
368 // the virgin request gets logged instead; copy the stats to log them.
369 // TODO: avoid losses by keeping these stats in a shared history object?
370 if (aLogEntry->request) {
371 aLogEntry->request->dnsWait = request->dnsWait;
372 aLogEntry->request->errType = request->errType;
373 aLogEntry->request->errDetail = request->errDetail;
374 }
375 }
376
377 void
logRequest()378 ClientHttpRequest::logRequest()
379 {
380 if (!out.size && logType.oldType == LOG_TAG_NONE)
381 debugs(33, 5, "logging half-baked transaction: " << log_uri);
382
383 al->icp.opcode = ICP_INVALID;
384 al->url = log_uri;
385 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
386
387 if (al->reply) {
388 al->http.code = al->reply->sline.status();
389 al->http.content_type = al->reply->content_type.termedBuf();
390 } else if (loggingEntry() && loggingEntry()->mem_obj) {
391 al->http.code = loggingEntry()->mem_obj->getReply()->sline.status();
392 al->http.content_type = loggingEntry()->mem_obj->getReply()->content_type.termedBuf();
393 }
394
395 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
396
397 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
398 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
399
400 al->http.clientRequestSz.header = req_sz;
401 // the virgin request is saved to al->request
402 if (al->request && al->request->body_pipe)
403 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
404 al->http.clientReplySz.header = out.headers_sz;
405 // XXX: calculate without payload encoding or headers !!
406 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
407
408 al->cache.highOffset = out.offset;
409
410 al->cache.code = logType;
411
412 tvSub(al->cache.trTime, al->cache.start_time, current_time);
413
414 if (request)
415 prepareLogWithRequestDetails(request, al);
416
417 #if USE_OPENSSL && 0
418
419 /* This is broken. Fails if the connection has been closed. Needs
420 * to snarf the ssl details some place earlier..
421 */
422 if (getConn() != NULL)
423 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
424
425 #endif
426
427 /* Add notes (if we have a request to annotate) */
428 if (request) {
429 // The al->notes and request->notes must point to the same object.
430 (void)SyncNotes(*al, *request);
431 for (auto i = Config.notes.begin(); i != Config.notes.end(); ++i) {
432 if (const char *value = (*i)->match(request, al->reply, al)) {
433 NotePairs ¬es = SyncNotes(*al, *request);
434 notes.add((*i)->key.termedBuf(), value);
435 debugs(33, 3, (*i)->key.termedBuf() << " " << value);
436 }
437 }
438 }
439
440 ACLFilledChecklist checklist(NULL, request, NULL);
441 if (al->reply) {
442 checklist.reply = al->reply;
443 HTTPMSGLOCK(checklist.reply);
444 }
445
446 if (request) {
447 HTTPMSGUNLOCK(al->adapted_request);
448 al->adapted_request = request;
449 HTTPMSGLOCK(al->adapted_request);
450 }
451 // no need checklist.syncAle(): already synced
452 checklist.al = al;
453 accessLogLog(al, &checklist);
454
455 bool updatePerformanceCounters = true;
456 if (Config.accessList.stats_collection) {
457 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
458 statsCheck.al = al;
459 if (al->reply) {
460 statsCheck.reply = al->reply;
461 HTTPMSGLOCK(statsCheck.reply);
462 }
463 updatePerformanceCounters = statsCheck.fastCheck().allowed();
464 }
465
466 if (updatePerformanceCounters) {
467 if (request)
468 updateCounters();
469
470 if (getConn() != NULL && getConn()->clientConnection != NULL)
471 clientdbUpdate(getConn()->clientConnection->remote, logType, AnyP::PROTO_HTTP, out.size);
472 }
473 }
474
475 void
freeResources()476 ClientHttpRequest::freeResources()
477 {
478 safe_free(uri);
479 safe_free(redirect.location);
480 range_iter.boundary.clean();
481 clearRequest();
482
483 if (client_stream.tail)
484 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
485 }
486
487 void
httpRequestFree(void * data)488 httpRequestFree(void *data)
489 {
490 ClientHttpRequest *http = (ClientHttpRequest *)data;
491 assert(http != NULL);
492 delete http;
493 }
494
495 /* This is a handler normally called by comm_close() */
connStateClosed(const CommCloseCbParams &)496 void ConnStateData::connStateClosed(const CommCloseCbParams &)
497 {
498 deleteThis("ConnStateData::connStateClosed");
499 }
500
501 #if USE_AUTH
502 void
setAuth(const Auth::UserRequest::Pointer & aur,const char * by)503 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
504 {
505 if (auth_ == NULL) {
506 if (aur != NULL) {
507 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
508 auth_ = aur;
509 }
510 return;
511 }
512
513 // clobered with self-pointer
514 // NP: something nasty is going on in Squid, but harmless.
515 if (aur == auth_) {
516 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
517 return;
518 }
519
520 /*
521 * Connection-auth relies on a single set of credentials being preserved
522 * for all requests on a connection once they have been setup.
523 * There are several things which need to happen to preserve security
524 * when connection-auth credentials change unexpectedly or are unset.
525 *
526 * 1) auth helper released from any active state
527 *
528 * They can only be reserved by a handshake process which this
529 * connection can now never complete.
530 * This prevents helpers hanging when their connections close.
531 *
532 * 2) pinning is expected to be removed and server conn closed
533 *
534 * The upstream link is authenticated with the same credentials.
535 * Expecting the same level of consistency we should have received.
536 * This prevents upstream being faced with multiple or missing
537 * credentials after authentication.
538 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
539 * we just trigger that cleanup here via comm_reset_close() or
540 * ConnStateData::stopReceiving()
541 *
542 * 3) the connection needs to close.
543 *
544 * This prevents attackers injecting requests into a connection,
545 * or gateways wrongly multiplexing users into a single connection.
546 *
547 * When credentials are missing closure needs to follow an auth
548 * challenge for best recovery by the client.
549 *
550 * When credentials change there is nothing we can do but abort as
551 * fast as possible. Sending TCP RST instead of an HTTP response
552 * is the best-case action.
553 */
554
555 // clobbered with nul-pointer
556 if (aur == NULL) {
557 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
558 auth_->releaseAuthServer();
559 auth_ = NULL;
560 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
561 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
562 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
563 stopReceiving("connection-auth removed");
564 return;
565 }
566
567 // clobbered with alternative credentials
568 if (aur != auth_) {
569 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
570 auth_->releaseAuthServer();
571 auth_ = NULL;
572 // this is a fatal type of problem.
573 // Close the connection immediately with TCP RST to abort all traffic flow
574 comm_reset_close(clientConnection);
575 return;
576 }
577
578 /* NOT REACHABLE */
579 }
580 #endif
581
582 // cleans up before destructor is called
583 void
swanSong()584 ConnStateData::swanSong()
585 {
586 debugs(33, 2, HERE << clientConnection);
587 checkLogging();
588
589 flags.readMore = false;
590 clientdbEstablished(clientConnection->remote, -1); /* decrement */
591 pipeline.terminateAll(0);
592
593 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
594 unpinConnection(true);
595
596 Server::swanSong(); // closes the client connection
597
598 #if USE_AUTH
599 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
600 setAuth(NULL, "ConnStateData::SwanSong cleanup");
601 #endif
602
603 flags.swanSang = true;
604 }
605
606 bool
isOpen() const607 ConnStateData::isOpen() const
608 {
609 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
610 Comm::IsConnOpen(clientConnection) &&
611 !fd_table[clientConnection->fd].closing();
612 }
613
~ConnStateData()614 ConnStateData::~ConnStateData()
615 {
616 debugs(33, 3, HERE << clientConnection);
617
618 if (isOpen())
619 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData did not close " << clientConnection);
620
621 if (!flags.swanSang)
622 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData was not destroyed properly; " << clientConnection);
623
624 if (bodyPipe != NULL)
625 stopProducingFor(bodyPipe, false);
626
627 delete bodyParser; // TODO: pool
628
629 #if USE_OPENSSL
630 delete sslServerBump;
631 #endif
632 }
633
634 /**
635 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
636 * This is the client-side persistent connection flag. We need
637 * to set this relatively early in the request processing
638 * to handle hacks for broken servers and clients.
639 */
640 void
clientSetKeepaliveFlag(ClientHttpRequest * http)641 clientSetKeepaliveFlag(ClientHttpRequest * http)
642 {
643 HttpRequest *request = http->request;
644
645 debugs(33, 3, "http_ver = " << request->http_ver);
646 debugs(33, 3, "method = " << request->method);
647
648 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
649 request->flags.proxyKeepalive = request->persistent();
650 }
651
652 /// checks body length of non-chunked requests
653 static int
clientIsContentLengthValid(HttpRequest * r)654 clientIsContentLengthValid(HttpRequest * r)
655 {
656 // No Content-Length means this request just has no body, but conflicting
657 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
658 if (r->header.conflictingContentLength())
659 return 0;
660
661 switch (r->method.id()) {
662
663 case Http::METHOD_GET:
664
665 case Http::METHOD_HEAD:
666 /* We do not want to see a request entity on GET/HEAD requests */
667 return (r->content_length <= 0 || Config.onoff.request_entities);
668
669 default:
670 /* For other types of requests we don't care */
671 return 1;
672 }
673
674 /* NOT REACHED */
675 }
676
677 int
clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)678 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
679 {
680 if (Config.maxRequestBodySize &&
681 bodyLength > Config.maxRequestBodySize)
682 return 1; /* too large */
683
684 return 0;
685 }
686
687 bool
multipartRangeRequest() const688 ClientHttpRequest::multipartRangeRequest() const
689 {
690 return request->multipartRangeRequest();
691 }
692
693 void
clientPackTermBound(String boundary,MemBuf * mb)694 clientPackTermBound(String boundary, MemBuf *mb)
695 {
696 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
697 debugs(33, 6, "buf offset: " << mb->size);
698 }
699
700 void
clientPackRangeHdr(const HttpReply * rep,const HttpHdrRangeSpec * spec,String boundary,MemBuf * mb)701 clientPackRangeHdr(const HttpReply * rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
702 {
703 HttpHeader hdr(hoReply);
704 assert(rep);
705 assert(spec);
706
707 /* put boundary */
708 debugs(33, 5, "appending boundary: " << boundary);
709 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
710 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
711
712 /* stuff the header with required entries and pack it */
713
714 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
715 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
716
717 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
718
719 hdr.packInto(mb);
720 hdr.clean();
721
722 /* append <crlf> (we packed a header, not a reply) */
723 mb->append("\r\n", 2);
724 }
725
726 /** returns expected content length for multi-range replies
727 * note: assumes that httpHdrRangeCanonize has already been called
728 * warning: assumes that HTTP headers for individual ranges at the
729 * time of the actuall assembly will be exactly the same as
730 * the headers when clientMRangeCLen() is called */
731 int64_t
mRangeCLen() const732 ClientHttpRequest::mRangeCLen() const
733 {
734 int64_t clen = 0;
735 MemBuf mb;
736
737 assert(memObject());
738
739 mb.init();
740 HttpHdrRange::iterator pos = request->range->begin();
741
742 while (pos != request->range->end()) {
743 /* account for headers for this range */
744 mb.reset();
745 clientPackRangeHdr(memObject()->getReply(),
746 *pos, range_iter.boundary, &mb);
747 clen += mb.size;
748
749 /* account for range content */
750 clen += (*pos)->length;
751
752 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
753 ++pos;
754 }
755
756 /* account for the terminating boundary */
757 mb.reset();
758
759 clientPackTermBound(range_iter.boundary, &mb);
760
761 clen += mb.size;
762
763 mb.clean();
764
765 return clen;
766 }
767
768 /**
769 * generates a "unique" boundary string for multipart responses
770 * the caller is responsible for cleaning the string */
771 String
rangeBoundaryStr() const772 ClientHttpRequest::rangeBoundaryStr() const
773 {
774 const char *key;
775 String b(APP_FULLNAME);
776 b.append(":",1);
777 key = storeEntry()->getMD5Text();
778 b.append(key, strlen(key));
779 return b;
780 }
781
782 /**
783 * Write a chunk of data to a client socket. If the reply is present,
784 * send the reply headers down the wire too, and clean them up when
785 * finished.
786 * Pre-condition:
787 * The request is one backed by a connection, not an internal request.
788 * data context is not NULL
789 * There are no more entries in the stream chain.
790 */
791 void
clientSocketRecipient(clientStreamNode * node,ClientHttpRequest * http,HttpReply * rep,StoreIOBuffer receivedData)792 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
793 HttpReply * rep, StoreIOBuffer receivedData)
794 {
795 // do not try to deliver if client already ABORTED
796 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
797 return;
798
799 /* Test preconditions */
800 assert(node != NULL);
801 PROF_start(clientSocketRecipient);
802 /* TODO: handle this rather than asserting
803 * - it should only ever happen if we cause an abort and
804 * the callback chain loops back to here, so we can simply return.
805 * However, that itself shouldn't happen, so it stays as an assert for now.
806 */
807 assert(cbdataReferenceValid(node));
808 assert(node->node.next == NULL);
809 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
810 assert(context != NULL);
811
812 /* TODO: check offset is what we asked for */
813
814 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
815 if (context != http->getConn()->pipeline.front())
816 context->deferRecipientForLater(node, rep, receivedData);
817 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
818 context->deferRecipientForLater(node, rep, receivedData);
819 else
820 http->getConn()->handleReply(rep, receivedData);
821
822 PROF_stop(clientSocketRecipient);
823 }
824
825 /**
826 * Called when a downstream node is no longer interested in
827 * our data. As we are a terminal node, this means on aborts
828 * only
829 */
830 void
clientSocketDetach(clientStreamNode * node,ClientHttpRequest * http)831 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
832 {
833 /* Test preconditions */
834 assert(node != NULL);
835 /* TODO: handle this rather than asserting
836 * - it should only ever happen if we cause an abort and
837 * the callback chain loops back to here, so we can simply return.
838 * However, that itself shouldn't happen, so it stays as an assert for now.
839 */
840 assert(cbdataReferenceValid(node));
841 /* Set null by ContextFree */
842 assert(node->node.next == NULL);
843 /* this is the assert discussed above */
844 assert(NULL == dynamic_cast<Http::Stream *>(node->data.getRaw()));
845 /* We are only called when the client socket shutsdown.
846 * Tell the prev pipeline member we're finished
847 */
848 clientStreamDetach(node, http);
849 }
850
851 void
readNextRequest()852 ConnStateData::readNextRequest()
853 {
854 debugs(33, 5, HERE << clientConnection << " reading next req");
855
856 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
857 /**
858 * Set the timeout BEFORE calling readSomeData().
859 */
860 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
861 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
862 TimeoutDialer, this, ConnStateData::requestTimeout);
863 commSetConnTimeout(clientConnection, clientConnection->timeLeft(idleTimeout()), timeoutCall);
864
865 readSomeData();
866 /** Please don't do anything with the FD past here! */
867 }
868
869 static void
ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest,ConnStateData * conn)870 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
871 {
872 debugs(33, 2, HERE << conn->clientConnection << " Sending next");
873
874 /** If the client stream is waiting on a socket write to occur, then */
875
876 if (deferredRequest->flags.deferred) {
877 /** NO data is allowed to have been sent. */
878 assert(deferredRequest->http->out.size == 0);
879 /** defer now. */
880 clientSocketRecipient(deferredRequest->deferredparams.node,
881 deferredRequest->http,
882 deferredRequest->deferredparams.rep,
883 deferredRequest->deferredparams.queuedBuffer);
884 }
885
886 /** otherwise, the request is still active in a callbacksomewhere,
887 * and we are done
888 */
889 }
890
891 void
kick()892 ConnStateData::kick()
893 {
894 if (!Comm::IsConnOpen(clientConnection)) {
895 debugs(33, 2, clientConnection << " Connection was closed");
896 return;
897 }
898
899 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
900 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
901 clientConnection->close();
902 return;
903 }
904
905 /** \par
906 * We are done with the response, and we are either still receiving request
907 * body (early response!) or have already stopped receiving anything.
908 *
909 * If we are still receiving, then clientParseRequest() below will fail.
910 * (XXX: but then we will call readNextRequest() which may succeed and
911 * execute a smuggled request as we are not done with the current request).
912 *
913 * If we stopped because we got everything, then try the next request.
914 *
915 * If we stopped receiving because of an error, then close now to avoid
916 * getting stuck and to prevent accidental request smuggling.
917 */
918
919 if (const char *reason = stoppedReceiving()) {
920 debugs(33, 3, "closing for earlier request error: " << reason);
921 clientConnection->close();
922 return;
923 }
924
925 /** \par
926 * Attempt to parse a request from the request buffer.
927 * If we've been fed a pipelined request it may already
928 * be in our read buffer.
929 *
930 \par
931 * This needs to fall through - if we're unlucky and parse the _last_ request
932 * from our read buffer we may never re-register for another client read.
933 */
934
935 if (clientParseRequests()) {
936 debugs(33, 3, clientConnection << ": parsed next request from buffer");
937 }
938
939 /** \par
940 * Either we need to kick-start another read or, if we have
941 * a half-closed connection, kill it after the last request.
942 * This saves waiting for half-closed connections to finished being
943 * half-closed _AND_ then, sometimes, spending "Timeout" time in
944 * the keepalive "Waiting for next request" state.
945 */
946 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
947 debugs(33, 3, "half-closed client with no pending requests, closing");
948 clientConnection->close();
949 return;
950 }
951
952 /** \par
953 * At this point we either have a parsed request (which we've
954 * kicked off the processing for) or not. If we have a deferred
955 * request (parsed but deferred for pipeling processing reasons)
956 * then look at processing it. If not, simply kickstart
957 * another read.
958 */
959 Http::StreamPointer deferredRequest = pipeline.front();
960 if (deferredRequest != nullptr) {
961 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
962 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
963 } else if (flags.readMore) {
964 debugs(33, 3, clientConnection << ": calling readNextRequest()");
965 readNextRequest();
966 } else {
967 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
968 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
969 }
970 }
971
972 void
stopSending(const char * error)973 ConnStateData::stopSending(const char *error)
974 {
975 debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
976 "; old receiving error: " <<
977 (stoppedReceiving() ? stoppedReceiving_ : "none"));
978
979 if (const char *oldError = stoppedSending()) {
980 debugs(33, 3, HERE << "already stopped sending: " << oldError);
981 return; // nothing has changed as far as this connection is concerned
982 }
983 stoppedSending_ = error;
984
985 if (!stoppedReceiving()) {
986 if (const int64_t expecting = mayNeedToReadMoreBody()) {
987 debugs(33, 5, HERE << "must still read " << expecting <<
988 " request body bytes with " << inBuf.length() << " unused");
989 return; // wait for the request receiver to finish reading
990 }
991 }
992
993 clientConnection->close();
994 }
995
996 void
afterClientWrite(size_t size)997 ConnStateData::afterClientWrite(size_t size)
998 {
999 if (pipeline.empty())
1000 return;
1001
1002 auto ctx = pipeline.front();
1003 if (size) {
1004 statCounter.client_http.kbytes_out += size;
1005 if (ctx->http->logType.isTcpHit())
1006 statCounter.client_http.hit_kbytes_out += size;
1007 }
1008 ctx->writeComplete(size);
1009 }
1010
1011 Http::Stream *
abortRequestParsing(const char * const uri)1012 ConnStateData::abortRequestParsing(const char *const uri)
1013 {
1014 ClientHttpRequest *http = new ClientHttpRequest(this);
1015 http->req_sz = inBuf.length();
1016 http->setErrorUri(uri);
1017 auto *context = new Http::Stream(clientConnection, http);
1018 StoreIOBuffer tempBuffer;
1019 tempBuffer.data = context->reqbuf;
1020 tempBuffer.length = HTTP_REQBUF_SZ;
1021 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1022 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1023 clientSocketDetach, context, tempBuffer);
1024 return context;
1025 }
1026
1027 void
startShutdown()1028 ConnStateData::startShutdown()
1029 {
1030 // RegisteredRunner API callback - Squid has been shut down
1031
1032 // if connection is idle terminate it now,
1033 // otherwise wait for grace period to end
1034 if (pipeline.empty())
1035 endingShutdown();
1036 }
1037
1038 void
endingShutdown()1039 ConnStateData::endingShutdown()
1040 {
1041 // RegisteredRunner API callback - Squid shutdown grace period is over
1042
1043 // force the client connection to close immediately
1044 // swanSong() in the close handler will cleanup.
1045 if (Comm::IsConnOpen(clientConnection))
1046 clientConnection->close();
1047 }
1048
1049 char *
skipLeadingSpace(char * aString)1050 skipLeadingSpace(char *aString)
1051 {
1052 char *result = aString;
1053
1054 while (xisspace(*aString))
1055 ++aString;
1056
1057 return result;
1058 }
1059
1060 /**
1061 * 'end' defaults to NULL for backwards compatibility
1062 * remove default value if we ever get rid of NULL-terminated
1063 * request buffers.
1064 */
1065 const char *
findTrailingHTTPVersion(const char * uriAndHTTPVersion,const char * end)1066 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1067 {
1068 if (NULL == end) {
1069 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1070 assert(end);
1071 }
1072
1073 for (; end > uriAndHTTPVersion; --end) {
1074 if (*end == '\n' || *end == '\r')
1075 continue;
1076
1077 if (xisspace(*end)) {
1078 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1079 return end + 1;
1080 else
1081 break;
1082 }
1083 }
1084
1085 return NULL;
1086 }
1087
1088 static char *
prepareAcceleratedURL(ConnStateData * conn,const Http1::RequestParserPointer & hp)1089 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1090 {
1091 int vhost = conn->port->vhost;
1092 int vport = conn->port->vport;
1093 static char ipbuf[MAX_IPSTRLEN];
1094
1095 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1096
1097 static const SBuf cache_object("cache_object://");
1098 if (hp->requestUri().startsWith(cache_object))
1099 return nullptr; /* already in good shape */
1100
1101 // XXX: re-use proper URL parser for this
1102 SBuf url = hp->requestUri(); // use full provided URI if we abort
1103 do { // use a loop so we can break out of it
1104 ::Parser::Tokenizer tok(url);
1105 if (tok.skip('/')) // origin-form URL already.
1106 break;
1107
1108 if (conn->port->vhost)
1109 return nullptr; /* already in good shape */
1110
1111 // skip the URI scheme
1112 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1113 static const SBuf uriSchemeEnd("://");
1114 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1115 break;
1116
1117 // skip the authority segment
1118 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1119 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1120 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1121 if (!tok.skipAll(authority))
1122 break;
1123
1124 static const SBuf slashUri("/");
1125 const SBuf t = tok.remaining();
1126 if (t.isEmpty())
1127 url = slashUri;
1128 else if (t[0]=='/') // looks like path
1129 url = t;
1130 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1131 url = slashUri;
1132 url.append(t);
1133 } // else do nothing. invalid path
1134
1135 } while(false);
1136
1137 #if SHOULD_REJECT_UNKNOWN_URLS
1138 // reject URI which are not well-formed even after the processing above
1139 if (url.isEmpty() || url[0] != '/') {
1140 hp->parseStatusCode = Http::scBadRequest;
1141 return conn->abortRequestParsing("error:invalid-request");
1142 }
1143 #endif
1144
1145 if (vport < 0)
1146 vport = conn->clientConnection->local.port();
1147
1148 char *receivedHost = nullptr;
1149 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1150 SBuf host(receivedHost);
1151 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1152 if (vport > 0) {
1153 // remove existing :port (if any), cope with IPv6+ without port
1154 const auto lastColonPos = host.rfind(':');
1155 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1156 host.chop(0, lastColonPos); // truncate until the last colon
1157 }
1158 host.appendf(":%d", vport);
1159 } // else nothing to alter port-wise.
1160 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1161 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1162 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1163 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1164 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1165 return uri;
1166 } else if (conn->port->defaultsite /* && !vhost */) {
1167 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1168 char vportStr[32];
1169 vportStr[0] = '\0';
1170 if (vport > 0) {
1171 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1172 }
1173 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1174 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1175 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1176 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1177 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1178 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1179 return uri;
1180 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1181 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1182 /* Put the local socket IP address as the hostname, with whatever vport we found */
1183 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1184 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1185 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1186 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1187 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1188 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1189 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1190 return uri;
1191 }
1192
1193 return nullptr;
1194 }
1195
1196 static char *
buildUrlFromHost(ConnStateData * conn,const Http1::RequestParserPointer & hp)1197 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1198 {
1199 char *uri = nullptr;
1200 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1201 if (const char *host = hp->getHostHeaderField()) {
1202 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1203 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1204 uri = static_cast<char *>(xcalloc(url_sz, 1));
1205 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1206 SQUIDSBUFPRINT(scheme),
1207 host,
1208 SQUIDSBUFPRINT(hp->requestUri()));
1209 }
1210 return uri;
1211 }
1212
1213 char *
prepareTlsSwitchingURL(const Http1::RequestParserPointer & hp)1214 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1215 {
1216 Must(switchedToHttps());
1217
1218 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1219 return nullptr; /* already in good shape */
1220
1221 char *uri = buildUrlFromHost(this, hp);
1222 #if USE_OPENSSL
1223 if (!uri) {
1224 Must(tlsConnectPort);
1225 Must(!tlsConnectHostOrIp.isEmpty());
1226 SBuf useHost;
1227 if (!tlsClientSni().isEmpty())
1228 useHost = tlsClientSni();
1229 else
1230 useHost = tlsConnectHostOrIp;
1231
1232 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1233 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1234 uri = static_cast<char *>(xcalloc(url_sz, 1));
1235 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1236 SQUIDSBUFPRINT(scheme),
1237 SQUIDSBUFPRINT(useHost),
1238 tlsConnectPort,
1239 SQUIDSBUFPRINT(hp->requestUri()));
1240 }
1241 #endif
1242 if (uri)
1243 debugs(33, 5, "TLS switching host rewrite: " << uri);
1244 return uri;
1245 }
1246
1247 static char *
prepareTransparentURL(ConnStateData * conn,const Http1::RequestParserPointer & hp)1248 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1249 {
1250 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1251 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1252 return nullptr; /* already in good shape */
1253
1254 char *uri = buildUrlFromHost(conn, hp);
1255 if (!uri) {
1256 /* Put the local socket IP address as the hostname. */
1257 static char ipbuf[MAX_IPSTRLEN];
1258 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1259 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1260 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1261 uri = static_cast<char *>(xcalloc(url_sz, 1));
1262 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1263 SQUIDSBUFPRINT(scheme),
1264 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1265 }
1266
1267 if (uri)
1268 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1269 return uri;
1270 }
1271
1272 Http::Stream *
parseHttpRequest(const Http1::RequestParserPointer & hp)1273 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1274 {
1275 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1276 {
1277 Must(hp);
1278
1279 if (preservingClientData_)
1280 preservedClientData = inBuf;
1281
1282 const bool parsedOk = hp->parse(inBuf);
1283
1284 // sync the buffers after parsing.
1285 inBuf = hp->remaining();
1286
1287 if (hp->needsMoreData()) {
1288 debugs(33, 5, "Incomplete request, waiting for end of request line");
1289 return NULL;
1290 }
1291
1292 if (!parsedOk) {
1293 const bool tooBig =
1294 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1295 hp->parseStatusCode == Http::scUriTooLong;
1296 auto result = abortRequestParsing(
1297 tooBig ? "error:request-too-large" : "error:invalid-request");
1298 // assume that remaining leftovers belong to this bad request
1299 if (!inBuf.isEmpty())
1300 consumeInput(inBuf.length());
1301 return result;
1302 }
1303 }
1304
1305 /* We know the whole request is in parser now */
1306 debugs(11, 2, "HTTP Client " << clientConnection);
1307 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1308 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1309 hp->mimeHeader() <<
1310 "\n----------");
1311
1312 /* deny CONNECT via accelerated ports */
1313 if (hp->method() == Http::METHOD_CONNECT && port != NULL && port->flags.accelSurrogate) {
1314 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1315 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1316 hp->parseStatusCode = Http::scMethodNotAllowed;
1317 return abortRequestParsing("error:method-not-allowed");
1318 }
1319
1320 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1321 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1322 * If seen it signals a broken client or proxy has corrupted the traffic.
1323 */
1324 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1325 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1326 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1327 hp->parseStatusCode = Http::scMethodNotAllowed;
1328 return abortRequestParsing("error:method-not-allowed");
1329 }
1330
1331 if (hp->method() == Http::METHOD_NONE) {
1332 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1333 hp->parseStatusCode = Http::scMethodNotAllowed;
1334 return abortRequestParsing("error:unsupported-request-method");
1335 }
1336
1337 // Process headers after request line
1338 debugs(33, 3, "complete request received. " <<
1339 "prefix_sz = " << hp->messageHeaderSize() <<
1340 ", request-line-size=" << hp->firstLineSize() <<
1341 ", mime-header-size=" << hp->headerBlockSize() <<
1342 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1343
1344 /* Ok, all headers are received */
1345 ClientHttpRequest *http = new ClientHttpRequest(this);
1346
1347 http->req_sz = hp->messageHeaderSize();
1348 Http::Stream *result = new Http::Stream(clientConnection, http);
1349
1350 StoreIOBuffer tempBuffer;
1351 tempBuffer.data = result->reqbuf;
1352 tempBuffer.length = HTTP_REQBUF_SZ;
1353
1354 ClientStreamData newServer = new clientReplyContext(http);
1355 ClientStreamData newClient = result;
1356 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1357 clientReplyStatus, newServer, clientSocketRecipient,
1358 clientSocketDetach, newClient, tempBuffer);
1359
1360 /* set url */
1361 debugs(33,5, "Prepare absolute URL from " <<
1362 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1363 /* Rewrite the URL in transparent or accelerator mode */
1364 /* NP: there are several cases to traverse here:
1365 * - standard mode (forward proxy)
1366 * - transparent mode (TPROXY)
1367 * - transparent mode with failures
1368 * - intercept mode (NAT)
1369 * - intercept mode with failures
1370 * - accelerator mode (reverse proxy)
1371 * - internal relative-URL
1372 * - mixed combos of the above with internal URL
1373 * - remote interception with PROXY protocol
1374 * - remote reverse-proxy with PROXY protocol
1375 */
1376 if (switchedToHttps()) {
1377 http->uri = prepareTlsSwitchingURL(hp);
1378 } else if (transparent()) {
1379 /* intercept or transparent mode, properly working with no failures */
1380 http->uri = prepareTransparentURL(this, hp);
1381
1382 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1383 /* internal URL mode */
1384 /* prepend our name & port */
1385 http->uri = xstrdup(internalLocalUri(NULL, hp->requestUri()));
1386 // We just re-wrote the URL. Must replace the Host: header.
1387 // But have not parsed there yet!! flag for local-only handling.
1388 http->flags.internal = true;
1389
1390 } else if (port->flags.accelSurrogate) {
1391 /* accelerator mode */
1392 http->uri = prepareAcceleratedURL(this, hp);
1393 http->flags.accel = true;
1394 }
1395
1396 if (!http->uri) {
1397 /* No special rewrites have been applied above, use the
1398 * requested url. may be rewritten later, so make extra room */
1399 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1400 http->uri = (char *)xcalloc(url_sz, 1);
1401 SBufToCstring(http->uri, hp->requestUri());
1402 }
1403
1404 result->flags.parsed_ok = 1;
1405 return result;
1406 }
1407
1408 bool
connFinishedWithConn(int size)1409 ConnStateData::connFinishedWithConn(int size)
1410 {
1411 if (size == 0) {
1412 if (pipeline.empty() && inBuf.isEmpty()) {
1413 /* no current or pending requests */
1414 debugs(33, 4, HERE << clientConnection << " closed");
1415 return true;
1416 } else if (!Config.onoff.half_closed_clients) {
1417 /* admin doesn't want to support half-closed client sockets */
1418 debugs(33, 3, HERE << clientConnection << " aborted (half_closed_clients disabled)");
1419 pipeline.terminateAll(0);
1420 return true;
1421 }
1422 }
1423
1424 return false;
1425 }
1426
1427 void
consumeInput(const size_t byteCount)1428 ConnStateData::consumeInput(const size_t byteCount)
1429 {
1430 assert(byteCount > 0 && byteCount <= inBuf.length());
1431 inBuf.consume(byteCount);
1432 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1433 }
1434
1435 void
clientAfterReadingRequests()1436 ConnStateData::clientAfterReadingRequests()
1437 {
1438 // Were we expecting to read more request body from half-closed connection?
1439 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1440 debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection);
1441 clientConnection->close();
1442 return;
1443 }
1444
1445 if (flags.readMore)
1446 readSomeData();
1447 }
1448
1449 void
quitAfterError(HttpRequest * request)1450 ConnStateData::quitAfterError(HttpRequest *request)
1451 {
1452 // From HTTP p.o.v., we do not have to close after every error detected
1453 // at the client-side, but many such errors do require closure and the
1454 // client-side code is bad at handling errors so we play it safe.
1455 if (request)
1456 request->flags.proxyKeepalive = false;
1457 flags.readMore = false;
1458 debugs(33,4, HERE << "Will close after error: " << clientConnection);
1459 }
1460
1461 #if USE_OPENSSL
serveDelayedError(Http::Stream * context)1462 bool ConnStateData::serveDelayedError(Http::Stream *context)
1463 {
1464 ClientHttpRequest *http = context->http;
1465
1466 if (!sslServerBump)
1467 return false;
1468
1469 assert(sslServerBump->entry);
1470 // Did we create an error entry while processing CONNECT?
1471 if (!sslServerBump->entry->isEmpty()) {
1472 quitAfterError(http->request);
1473
1474 // Get the saved error entry and send it to the client by replacing the
1475 // ClientHttpRequest store entry with it.
1476 clientStreamNode *node = context->getClientReplyContext();
1477 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1478 assert(repContext);
1479 debugs(33, 5, "Responding with delated error for " << http->uri);
1480 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1481
1482 // Get error details from the fake certificate-peeking request.
1483 http->request->detailError(sslServerBump->request->errType, sslServerBump->request->errDetail);
1484 context->pullData();
1485 return true;
1486 }
1487
1488 // In bump-server-first mode, we have not necessarily seen the intended
1489 // server name at certificate-peeking time. Check for domain mismatch now,
1490 // when we can extract the intended name from the bumped HTTP request.
1491 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1492 HttpRequest *request = http->request;
1493 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1494 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1495 "does not match domainname " << request->url.host());
1496
1497 bool allowDomainMismatch = false;
1498 if (Config.ssl_client.cert_error) {
1499 ACLFilledChecklist check(Config.ssl_client.cert_error, request, dash_str);
1500 check.al = http->al;
1501 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1502 check.syncAle(request, http->log_uri);
1503 allowDomainMismatch = check.fastCheck().allowed();
1504 delete check.sslErrors;
1505 check.sslErrors = NULL;
1506 }
1507
1508 if (!allowDomainMismatch) {
1509 quitAfterError(request);
1510
1511 clientStreamNode *node = context->getClientReplyContext();
1512 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1513 assert (repContext);
1514
1515 request->hier = sslServerBump->request->hier;
1516
1517 // Create an error object and fill it
1518 ErrorState *err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request);
1519 err->src_addr = clientConnection->remote;
1520 Ssl::ErrorDetail *errDetail = new Ssl::ErrorDetail(
1521 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1522 srvCert.get(), nullptr);
1523 err->detail = errDetail;
1524 repContext->setReplyToError(request->method, err);
1525 assert(context->http->out.offset == 0);
1526 context->pullData();
1527 return true;
1528 }
1529 }
1530 }
1531
1532 return false;
1533 }
1534 #endif // USE_OPENSSL
1535
1536 /// ConnStateData::tunnelOnError() wrapper. Reduces code changes. TODO: Remove.
1537 bool
clientTunnelOnError(ConnStateData * conn,Http::StreamPointer & context,HttpRequest::Pointer & request,const HttpRequestMethod & method,err_type requestError)1538 clientTunnelOnError(ConnStateData *conn, Http::StreamPointer &context, HttpRequest::Pointer &request, const HttpRequestMethod& method, err_type requestError)
1539 {
1540 assert(conn);
1541 assert(conn->pipeline.front() == context);
1542 return conn->tunnelOnError(method, requestError);
1543 }
1544
1545 /// initiate tunneling if possible or return false otherwise
1546 bool
tunnelOnError(const HttpRequestMethod & method,const err_type requestError)1547 ConnStateData::tunnelOnError(const HttpRequestMethod &method, const err_type requestError)
1548 {
1549 if (!Config.accessList.on_unsupported_protocol) {
1550 debugs(33, 5, "disabled; send error: " << requestError);
1551 return false;
1552 }
1553
1554 if (!preservingClientData_) {
1555 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1556 return false;
1557 }
1558
1559 const auto context = pipeline.front();
1560 const auto http = context ? context->http : nullptr;
1561 const auto request = http ? http->request : nullptr;
1562
1563 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, request, nullptr);
1564 checklist.al = http ? http->al : nullptr;
1565 checklist.requestErrorType = requestError;
1566 checklist.src_addr = clientConnection->remote;
1567 checklist.my_addr = clientConnection->local;
1568 checklist.conn(this);
1569 const char *log_uri = http ? http->log_uri : nullptr;
1570 checklist.syncAle(request, log_uri);
1571 auto answer = checklist.fastCheck();
1572 if (answer.allowed() && answer.kind == 1) {
1573 debugs(33, 3, "Request will be tunneled to server");
1574 if (context)
1575 context->finished(); // Will remove from pipeline queue
1576 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, NULL, NULL, 0);
1577 return initiateTunneledRequest(request, Http::METHOD_NONE, "unknown-protocol", preservedClientData);
1578 }
1579 debugs(33, 3, "denied; send error: " << requestError);
1580 return false;
1581 }
1582
1583 void
clientProcessRequestFinished(ConnStateData * conn,const HttpRequest::Pointer & request)1584 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1585 {
1586 /*
1587 * DPW 2007-05-18
1588 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1589 * to here because calling comm_reset_close() causes http to
1590 * be freed before accessing.
1591 */
1592 if (request != NULL && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1593 debugs(33, 3, HERE << "Sending TCP RST on " << conn->clientConnection);
1594 conn->flags.readMore = false;
1595 comm_reset_close(conn->clientConnection);
1596 }
1597 }
1598
1599 void
clientProcessRequest(ConnStateData * conn,const Http1::RequestParserPointer & hp,Http::Stream * context)1600 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1601 {
1602 ClientHttpRequest *http = context->http;
1603 bool mustReplyToOptions = false;
1604 bool expectBody = false;
1605
1606 // We already have the request parsed and checked, so we
1607 // only need to go through the final body/conn setup to doCallouts().
1608 assert(http->request);
1609 HttpRequest::Pointer request = http->request;
1610
1611 // temporary hack to avoid splitting this huge function with sensitive code
1612 const bool isFtp = !hp;
1613
1614 // Some blobs below are still HTTP-specific, but we would have to rewrite
1615 // this entire function to remove them from the FTP code path. Connection
1616 // setup and body_pipe preparation blobs are needed for FTP.
1617
1618 request->manager(conn, http->al);
1619
1620 request->flags.accelerated = http->flags.accel;
1621 request->flags.sslBumped=conn->switchedToHttps();
1622 // TODO: decouple http->flags.accel from request->flags.sslBumped
1623 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1624 !conn->port->allow_direct : 0;
1625 request->sources |= isFtp ? HttpMsg::srcFtp :
1626 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? HttpMsg::srcHttps : HttpMsg::srcHttp);
1627 #if USE_AUTH
1628 if (request->flags.sslBumped) {
1629 if (conn->getAuth() != NULL)
1630 request->auth_user_request = conn->getAuth();
1631 }
1632 #endif
1633
1634 if (internalCheck(request->url.path())) {
1635 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1636 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1637 http->flags.internal = true;
1638 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1639 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1640 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1641 request->url.host(internalHostname());
1642 request->url.port(getMyPort());
1643 http->flags.internal = true;
1644 http->setLogUriToRequestUri();
1645 } else
1646 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1647 }
1648
1649 request->flags.internal = http->flags.internal;
1650
1651 if (!isFtp) {
1652 // XXX: for non-HTTP messages instantiate a different HttpMsg child type
1653 // for now Squid only supports HTTP requests
1654 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1655 assert(request->http_ver.protocol == http_ver.protocol);
1656 request->http_ver.major = http_ver.major;
1657 request->http_ver.minor = http_ver.minor;
1658 }
1659
1660 const auto unsupportedTe = request->header.unsupportedTe();
1661
1662 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1663 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1664 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions || unsupportedTe) {
1665 clientStreamNode *node = context->getClientReplyContext();
1666 conn->quitAfterError(request.getRaw());
1667 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1668 assert (repContext);
1669 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, request->method, NULL,
1670 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1671 assert(context->http->out.offset == 0);
1672 context->pullData();
1673 clientProcessRequestFinished(conn, request);
1674 return;
1675 }
1676
1677 const auto chunked = request->header.chunked();
1678 if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
1679 clientStreamNode *node = context->getClientReplyContext();
1680 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1681 assert (repContext);
1682 conn->quitAfterError(request.getRaw());
1683 repContext->setReplyToError(ERR_INVALID_REQ,
1684 Http::scLengthRequired, request->method, NULL,
1685 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1686 assert(context->http->out.offset == 0);
1687 context->pullData();
1688 clientProcessRequestFinished(conn, request);
1689 return;
1690 }
1691
1692 clientSetKeepaliveFlag(http);
1693 // Let tunneling code be fully responsible for CONNECT requests
1694 if (http->request->method == Http::METHOD_CONNECT) {
1695 context->mayUseConnection(true);
1696 conn->flags.readMore = false;
1697 }
1698
1699 #if USE_OPENSSL
1700 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1701 clientProcessRequestFinished(conn, request);
1702 return;
1703 }
1704 #endif
1705
1706 /* Do we expect a request-body? */
1707 expectBody = chunked || request->content_length > 0;
1708 if (!context->mayUseConnection() && expectBody) {
1709 request->body_pipe = conn->expectRequestBody(
1710 chunked ? -1 : request->content_length);
1711
1712 /* Is it too large? */
1713 if (!chunked && // if chunked, we will check as we accumulate
1714 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1715 clientStreamNode *node = context->getClientReplyContext();
1716 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1717 assert (repContext);
1718 conn->quitAfterError(request.getRaw());
1719 repContext->setReplyToError(ERR_TOO_BIG,
1720 Http::scPayloadTooLarge, Http::METHOD_NONE, NULL,
1721 conn->clientConnection->remote, http->request, NULL, NULL);
1722 assert(context->http->out.offset == 0);
1723 context->pullData();
1724 clientProcessRequestFinished(conn, request);
1725 return;
1726 }
1727
1728 if (!isFtp) {
1729 // We may stop producing, comm_close, and/or call setReplyToError()
1730 // below, so quit on errors to avoid http->doCallouts()
1731 if (!conn->handleRequestBodyData()) {
1732 clientProcessRequestFinished(conn, request);
1733 return;
1734 }
1735
1736 if (!request->body_pipe->productionEnded()) {
1737 debugs(33, 5, "need more request body");
1738 context->mayUseConnection(true);
1739 assert(conn->flags.readMore);
1740 }
1741 }
1742 }
1743
1744 http->calloutContext = new ClientRequestContext(http);
1745
1746 http->doCallouts();
1747
1748 clientProcessRequestFinished(conn, request);
1749 }
1750
1751 int
pipelinePrefetchMax() const1752 ConnStateData::pipelinePrefetchMax() const
1753 {
1754 // TODO: Support pipelined requests through pinned connections.
1755 if (pinning.pinned)
1756 return 0;
1757 return Config.pipeline_max_prefetch;
1758 }
1759
1760 /**
1761 * Limit the number of concurrent requests.
1762 * \return true when there are available position(s) in the pipeline queue for another request.
1763 * \return false when the pipeline queue is full or disabled.
1764 */
1765 bool
concurrentRequestQueueFilled() const1766 ConnStateData::concurrentRequestQueueFilled() const
1767 {
1768 const int existingRequestCount = pipeline.count();
1769
1770 // default to the configured pipeline size.
1771 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1772 #if USE_OPENSSL
1773 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1774 #else
1775 const int internalRequest = 0;
1776 #endif
1777 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1778
1779 // when queue filled already we cant add more.
1780 if (existingRequestCount >= concurrentRequestLimit) {
1781 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1782 debugs(33, 5, clientConnection << " deferring new request until one is done");
1783 return true;
1784 }
1785
1786 return false;
1787 }
1788
1789 /**
1790 * Perform proxy_protocol_access ACL tests on the client which
1791 * connected to PROXY protocol port to see if we trust the
1792 * sender enough to accept their PROXY header claim.
1793 */
1794 bool
proxyProtocolValidateClient()1795 ConnStateData::proxyProtocolValidateClient()
1796 {
1797 if (!Config.accessList.proxyProtocol)
1798 return proxyProtocolError("PROXY client not permitted by default ACL");
1799
1800 ACLFilledChecklist ch(Config.accessList.proxyProtocol, NULL, clientConnection->rfc931);
1801 ch.src_addr = clientConnection->remote;
1802 ch.my_addr = clientConnection->local;
1803 ch.conn(this);
1804
1805 if (!ch.fastCheck().allowed())
1806 return proxyProtocolError("PROXY client not permitted by ACLs");
1807
1808 return true;
1809 }
1810
1811 /**
1812 * Perform cleanup on PROXY protocol errors.
1813 * If header parsing hits a fatal error terminate the connection,
1814 * otherwise wait for more data.
1815 */
1816 bool
proxyProtocolError(const char * msg)1817 ConnStateData::proxyProtocolError(const char *msg)
1818 {
1819 if (msg) {
1820 // This is important to know, but maybe not so much that flooding the log is okay.
1821 #if QUIET_PROXY_PROTOCOL
1822 // display the first of every 32 occurances at level 1, the others at level 2.
1823 static uint8_t hide = 0;
1824 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1825 #else
1826 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1827 #endif
1828 mustStop(msg);
1829 }
1830 return false;
1831 }
1832
1833 /// magic octet prefix for PROXY protocol version 1
1834 static const SBuf Proxy1p0magic("PROXY ", 6);
1835
1836 /// magic octet prefix for PROXY protocol version 2
1837 static const SBuf Proxy2p0magic("\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A", 12);
1838
1839 /**
1840 * Test the connection read buffer for PROXY protocol header.
1841 * Version 1 and 2 header currently supported.
1842 */
1843 bool
parseProxyProtocolHeader()1844 ConnStateData::parseProxyProtocolHeader()
1845 {
1846 // http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
1847
1848 // detect and parse PROXY/2.0 protocol header
1849 if (inBuf.startsWith(Proxy2p0magic))
1850 return parseProxy2p0();
1851
1852 // detect and parse PROXY/1.0 protocol header
1853 if (inBuf.startsWith(Proxy1p0magic))
1854 return parseProxy1p0();
1855
1856 // detect and terminate other protocols
1857 if (inBuf.length() >= Proxy2p0magic.length()) {
1858 // PROXY/1.0 magic is shorter, so we know that
1859 // the input does not start with any PROXY magic
1860 return proxyProtocolError("PROXY protocol error: invalid header");
1861 }
1862
1863 // TODO: detect short non-magic prefixes earlier to avoid
1864 // waiting for more data which may never come
1865
1866 // not enough bytes to parse yet.
1867 return false;
1868 }
1869
1870 /// parse the PROXY/1.0 protocol header from the connection read buffer
1871 bool
parseProxy1p0()1872 ConnStateData::parseProxy1p0()
1873 {
1874 ::Parser::Tokenizer tok(inBuf);
1875 tok.skip(Proxy1p0magic);
1876
1877 // skip to first LF (assumes it is part of CRLF)
1878 static const CharacterSet lineContent = CharacterSet::LF.complement("non-LF");
1879 SBuf line;
1880 if (tok.prefix(line, lineContent, 107-Proxy1p0magic.length())) {
1881 if (tok.skip('\n')) {
1882 // found valid header
1883 inBuf = tok.remaining();
1884 needProxyProtocolHeader_ = false;
1885 // reset the tokenizer to work on found line only.
1886 tok.reset(line);
1887 } else
1888 return false; // no LF yet
1889
1890 } else // protocol error only if there are more than 107 bytes prefix header
1891 return proxyProtocolError(inBuf.length() > 107? "PROXY/1.0 error: missing CRLF" : NULL);
1892
1893 static const SBuf unknown("UNKNOWN"), tcpName("TCP");
1894 if (tok.skip(tcpName)) {
1895
1896 // skip TCP/IP version number
1897 static const CharacterSet tcpVersions("TCP-version","46");
1898 if (!tok.skipOne(tcpVersions))
1899 return proxyProtocolError("PROXY/1.0 error: missing TCP version");
1900
1901 // skip SP after protocol version
1902 if (!tok.skip(' '))
1903 return proxyProtocolError("PROXY/1.0 error: missing SP");
1904
1905 SBuf ipa, ipb;
1906 int64_t porta, portb;
1907 static const CharacterSet ipChars = CharacterSet("IP Address",".:") + CharacterSet::HEXDIG;
1908
1909 // parse: src-IP SP dst-IP SP src-port SP dst-port CR
1910 // leave the LF until later.
1911 const bool correct = tok.prefix(ipa, ipChars) && tok.skip(' ') &&
1912 tok.prefix(ipb, ipChars) && tok.skip(' ') &&
1913 tok.int64(porta) && tok.skip(' ') &&
1914 tok.int64(portb) &&
1915 tok.skip('\r');
1916 if (!correct)
1917 return proxyProtocolError("PROXY/1.0 error: invalid syntax");
1918
1919 // parse IP and port strings
1920 Ip::Address originalClient, originalDest;
1921
1922 if (!originalClient.GetHostByName(ipa.c_str()))
1923 return proxyProtocolError("PROXY/1.0 error: invalid src-IP address");
1924
1925 if (!originalDest.GetHostByName(ipb.c_str()))
1926 return proxyProtocolError("PROXY/1.0 error: invalid dst-IP address");
1927
1928 if (porta > 0 && porta <= 0xFFFF) // max uint16_t
1929 originalClient.port(static_cast<uint16_t>(porta));
1930 else
1931 return proxyProtocolError("PROXY/1.0 error: invalid src port");
1932
1933 if (portb > 0 && portb <= 0xFFFF) // max uint16_t
1934 originalDest.port(static_cast<uint16_t>(portb));
1935 else
1936 return proxyProtocolError("PROXY/1.0 error: invalid dst port");
1937
1938 // we have original client and destination details now
1939 // replace the client connection values
1940 debugs(33, 5, "PROXY/1.0 protocol on connection " << clientConnection);
1941 clientConnection->local = originalDest;
1942 clientConnection->remote = originalClient;
1943 if ((clientConnection->flags & COMM_TRANSPARENT))
1944 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1945 debugs(33, 5, "PROXY/1.0 upgrade: " << clientConnection);
1946 return true;
1947
1948 } else if (tok.skip(unknown)) {
1949 // found valid but unusable header
1950 return true;
1951
1952 } else
1953 return proxyProtocolError("PROXY/1.0 error: invalid protocol family");
1954
1955 return false;
1956 }
1957
1958 /// parse the PROXY/2.0 protocol header from the connection read buffer
1959 bool
parseProxy2p0()1960 ConnStateData::parseProxy2p0()
1961 {
1962 static const SBuf::size_type prefixLen = Proxy2p0magic.length();
1963 if (inBuf.length() < prefixLen + 4)
1964 return false; // need more bytes
1965
1966 if ((inBuf[prefixLen] & 0xF0) != 0x20) // version == 2 is mandatory
1967 return proxyProtocolError("PROXY/2.0 error: invalid version");
1968
1969 const char command = (inBuf[prefixLen] & 0x0F);
1970 if ((command & 0xFE) != 0x00) // values other than 0x0-0x1 are invalid
1971 return proxyProtocolError("PROXY/2.0 error: invalid command");
1972
1973 const char family = (inBuf[prefixLen+1] & 0xF0) >>4;
1974 if (family > 0x3) // values other than 0x0-0x3 are invalid
1975 return proxyProtocolError("PROXY/2.0 error: invalid family");
1976
1977 const char proto = (inBuf[prefixLen+1] & 0x0F);
1978 if (proto > 0x2) // values other than 0x0-0x2 are invalid
1979 return proxyProtocolError("PROXY/2.0 error: invalid protocol type");
1980
1981 const char *clen = inBuf.rawContent() + prefixLen + 2;
1982 uint16_t len;
1983 memcpy(&len, clen, sizeof(len));
1984 len = ntohs(len);
1985
1986 if (inBuf.length() < prefixLen + 4 + len)
1987 return false; // need more bytes
1988
1989 inBuf.consume(prefixLen + 4); // 4 being the extra bytes
1990 const SBuf extra = inBuf.consume(len);
1991 needProxyProtocolHeader_ = false; // found successfully
1992
1993 // LOCAL connections do nothing with the extras
1994 if (command == 0x00/* LOCAL*/)
1995 return true;
1996
1997 union pax {
1998 struct { /* for TCP/UDP over IPv4, len = 12 */
1999 struct in_addr src_addr;
2000 struct in_addr dst_addr;
2001 uint16_t src_port;
2002 uint16_t dst_port;
2003 } ipv4_addr;
2004 struct { /* for TCP/UDP over IPv6, len = 36 */
2005 struct in6_addr src_addr;
2006 struct in6_addr dst_addr;
2007 uint16_t src_port;
2008 uint16_t dst_port;
2009 } ipv6_addr;
2010 #if NOT_SUPPORTED
2011 struct { /* for AF_UNIX sockets, len = 216 */
2012 uint8_t src_addr[108];
2013 uint8_t dst_addr[108];
2014 } unix_addr;
2015 #endif
2016 };
2017
2018 pax ipu;
2019 memcpy(&ipu, extra.rawContent(), sizeof(pax));
2020
2021 // replace the client connection values
2022 debugs(33, 5, "PROXY/2.0 protocol on connection " << clientConnection);
2023 switch (family) {
2024 case 0x1: // IPv4
2025 clientConnection->local = ipu.ipv4_addr.dst_addr;
2026 clientConnection->local.port(ntohs(ipu.ipv4_addr.dst_port));
2027 clientConnection->remote = ipu.ipv4_addr.src_addr;
2028 clientConnection->remote.port(ntohs(ipu.ipv4_addr.src_port));
2029 if ((clientConnection->flags & COMM_TRANSPARENT))
2030 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
2031 break;
2032 case 0x2: // IPv6
2033 clientConnection->local = ipu.ipv6_addr.dst_addr;
2034 clientConnection->local.port(ntohs(ipu.ipv6_addr.dst_port));
2035 clientConnection->remote = ipu.ipv6_addr.src_addr;
2036 clientConnection->remote.port(ntohs(ipu.ipv6_addr.src_port));
2037 if ((clientConnection->flags & COMM_TRANSPARENT))
2038 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
2039 break;
2040 default: // do nothing
2041 break;
2042 }
2043 debugs(33, 5, "PROXY/2.0 upgrade: " << clientConnection);
2044 return true;
2045 }
2046
2047 void
receivedFirstByte()2048 ConnStateData::receivedFirstByte()
2049 {
2050 if (receivedFirstByte_)
2051 return;
2052
2053 receivedFirstByte_ = true;
2054 // Set timeout to Config.Timeout.request
2055 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2056 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
2057 TimeoutDialer, this, ConnStateData::requestTimeout);
2058 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
2059 }
2060
2061 /**
2062 * Attempt to parse one or more requests from the input buffer.
2063 * Returns true after completing parsing of at least one request [header]. That
2064 * includes cases where parsing ended with an error (e.g., a huge request).
2065 */
2066 bool
clientParseRequests()2067 ConnStateData::clientParseRequests()
2068 {
2069 bool parsed_req = false;
2070
2071 debugs(33, 5, HERE << clientConnection << ": attempting to parse");
2072
2073 // Loop while we have read bytes that are not needed for producing the body
2074 // On errors, bodyPipe may become nil, but readMore will be cleared
2075 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
2076
2077 // Prohibit concurrent requests when using a pinned to-server connection
2078 // because our Client classes do not support request pipelining.
2079 if (pinning.pinned && !pinning.readHandler) {
2080 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
2081 break;
2082 }
2083
2084 /* Limit the number of concurrent requests */
2085 if (concurrentRequestQueueFilled())
2086 break;
2087
2088 // try to parse the PROXY protocol header magic bytes
2089 if (needProxyProtocolHeader_) {
2090 if (!parseProxyProtocolHeader())
2091 break;
2092
2093 // we have been waiting for PROXY to provide client-IP
2094 // for some lookups, ie rDNS and IDENT.
2095 whenClientIpKnown();
2096
2097 // Done with PROXY protocol which has cleared preservingClientData_.
2098 // If the next protocol supports on_unsupported_protocol, then its
2099 // parseOneRequest() must reset preservingClientData_.
2100 assert(!preservingClientData_);
2101 }
2102
2103 if (Http::StreamPointer context = parseOneRequest()) {
2104 debugs(33, 5, clientConnection << ": done parsing a request");
2105
2106 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
2107 CommTimeoutCbPtrFun(clientLifetimeTimeout, context->http));
2108 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
2109
2110 context->registerWithConn();
2111
2112 #if USE_OPENSSL
2113 if (switchedToHttps())
2114 parsedBumpedRequestCount++;
2115 #endif
2116
2117 processParsedRequest(context);
2118
2119 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
2120
2121 if (context->mayUseConnection()) {
2122 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
2123 break;
2124 }
2125 } else {
2126 debugs(33, 5, clientConnection << ": not enough request data: " <<
2127 inBuf.length() << " < " << Config.maxRequestHeaderSize);
2128 Must(inBuf.length() < Config.maxRequestHeaderSize);
2129 break;
2130 }
2131 }
2132
2133 /* XXX where to 'finish' the parsing pass? */
2134 return parsed_req;
2135 }
2136
2137 void
afterClientRead()2138 ConnStateData::afterClientRead()
2139 {
2140 #if USE_OPENSSL
2141 if (parsingTlsHandshake) {
2142 parseTlsHandshake();
2143 return;
2144 }
2145 #endif
2146
2147 /* Process next request */
2148 if (pipeline.empty())
2149 fd_note(clientConnection->fd, "Reading next request");
2150
2151 if (!clientParseRequests()) {
2152 if (!isOpen())
2153 return;
2154 /*
2155 * If the client here is half closed and we failed
2156 * to parse a request, close the connection.
2157 * The above check with connFinishedWithConn() only
2158 * succeeds _if_ the buffer is empty which it won't
2159 * be if we have an incomplete request.
2160 * XXX: This duplicates ConnStateData::kick
2161 */
2162 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
2163 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
2164 clientConnection->close();
2165 return;
2166 }
2167 }
2168
2169 if (!isOpen())
2170 return;
2171
2172 clientAfterReadingRequests();
2173 }
2174
2175 /**
2176 * called when new request data has been read from the socket
2177 *
2178 * \retval false called comm_close or setReplyToError (the caller should bail)
2179 * \retval true we did not call comm_close or setReplyToError
2180 */
2181 bool
handleReadData()2182 ConnStateData::handleReadData()
2183 {
2184 // if we are reading a body, stuff data into the body pipe
2185 if (bodyPipe != NULL)
2186 return handleRequestBodyData();
2187 return true;
2188 }
2189
2190 /**
2191 * called when new request body data has been buffered in inBuf
2192 * may close the connection if we were closing and piped everything out
2193 *
2194 * \retval false called comm_close or setReplyToError (the caller should bail)
2195 * \retval true we did not call comm_close or setReplyToError
2196 */
2197 bool
handleRequestBodyData()2198 ConnStateData::handleRequestBodyData()
2199 {
2200 assert(bodyPipe != NULL);
2201
2202 if (bodyParser) { // chunked encoding
2203 if (const err_type error = handleChunkedRequestBody()) {
2204 abortChunkedRequestBody(error);
2205 return false;
2206 }
2207 } else { // identity encoding
2208 debugs(33,5, HERE << "handling plain request body for " << clientConnection);
2209 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2210 if (putSize > 0)
2211 consumeInput(putSize);
2212
2213 if (!bodyPipe->mayNeedMoreData()) {
2214 // BodyPipe will clear us automagically when we produced everything
2215 bodyPipe = NULL;
2216 }
2217 }
2218
2219 if (!bodyPipe) {
2220 debugs(33,5, HERE << "produced entire request body for " << clientConnection);
2221
2222 if (const char *reason = stoppedSending()) {
2223 /* we've finished reading like good clients,
2224 * now do the close that initiateClose initiated.
2225 */
2226 debugs(33, 3, HERE << "closing for earlier sending error: " << reason);
2227 clientConnection->close();
2228 return false;
2229 }
2230 }
2231
2232 return true;
2233 }
2234
2235 /// parses available chunked encoded body bytes, checks size, returns errors
2236 err_type
handleChunkedRequestBody()2237 ConnStateData::handleChunkedRequestBody()
2238 {
2239 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2240
2241 try { // the parser will throw on errors
2242
2243 if (inBuf.isEmpty()) // nothing to do
2244 return ERR_NONE;
2245
2246 BodyPipeCheckout bpc(*bodyPipe);
2247 bodyParser->setPayloadBuffer(&bpc.buf);
2248 const bool parsed = bodyParser->parse(inBuf);
2249 inBuf = bodyParser->remaining(); // sync buffers
2250 bpc.checkIn();
2251
2252 // dechunk then check: the size limit applies to _dechunked_ content
2253 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2254 return ERR_TOO_BIG;
2255
2256 if (parsed) {
2257 finishDechunkingRequest(true);
2258 Must(!bodyPipe);
2259 return ERR_NONE; // nil bodyPipe implies body end for the caller
2260 }
2261
2262 // if chunk parser needs data, then the body pipe must need it too
2263 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2264
2265 // if parser needs more space and we can consume nothing, we will stall
2266 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2267 } catch (...) { // TODO: be more specific
2268 debugs(33, 3, HERE << "malformed chunks" << bodyPipe->status());
2269 return ERR_INVALID_REQ;
2270 }
2271
2272 debugs(33, 7, HERE << "need more chunked data" << *bodyPipe->status());
2273 return ERR_NONE;
2274 }
2275
2276 /// quit on errors related to chunked request body handling
2277 void
abortChunkedRequestBody(const err_type error)2278 ConnStateData::abortChunkedRequestBody(const err_type error)
2279 {
2280 finishDechunkingRequest(false);
2281
2282 // XXX: The code below works if we fail during initial request parsing,
2283 // but if we fail when the server connection is used already, the server may send
2284 // us its response too, causing various assertions. How to prevent that?
2285 #if WE_KNOW_HOW_TO_SEND_ERRORS
2286 Http::StreamPointer context = pipeline.front();
2287 if (context != NULL && !context->http->out.offset) { // output nothing yet
2288 clientStreamNode *node = context->getClientReplyContext();
2289 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2290 assert(repContext);
2291 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2292 Http::scPayloadTooLarge : HTTP_BAD_REQUEST;
2293 repContext->setReplyToError(error, scode,
2294 repContext->http->request->method,
2295 repContext->http->uri,
2296 CachePeer,
2297 repContext->http->request,
2298 inBuf, NULL);
2299 context->pullData();
2300 } else {
2301 // close or otherwise we may get stuck as nobody will notice the error?
2302 comm_reset_close(clientConnection);
2303 }
2304 #else
2305 debugs(33, 3, HERE << "aborting chunked request without error " << error);
2306 comm_reset_close(clientConnection);
2307 #endif
2308 flags.readMore = false;
2309 }
2310
2311 void
noteBodyConsumerAborted(BodyPipe::Pointer)2312 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2313 {
2314 // request reader may get stuck waiting for space if nobody consumes body
2315 if (bodyPipe != NULL)
2316 bodyPipe->enableAutoConsumption();
2317
2318 // kids extend
2319 }
2320
2321 /** general lifetime handler for HTTP requests */
2322 void
requestTimeout(const CommTimeoutCbParams & io)2323 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2324 {
2325 if (!Comm::IsConnOpen(io.conn))
2326 return;
2327
2328 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2329 if (tunnelOnError(HttpRequestMethod(), error))
2330 return;
2331
2332 /*
2333 * Just close the connection to not confuse browsers
2334 * using persistent connections. Some browsers open
2335 * a connection and then do not use it until much
2336 * later (presumeably because the request triggering
2337 * the open has already been completed on another
2338 * connection)
2339 */
2340 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2341 io.conn->close();
2342 }
2343
2344 static void
clientLifetimeTimeout(const CommTimeoutCbParams & io)2345 clientLifetimeTimeout(const CommTimeoutCbParams &io)
2346 {
2347 ClientHttpRequest *http = static_cast<ClientHttpRequest *>(io.data);
2348 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout");
2349 debugs(33, DBG_IMPORTANT, "\t" << http->uri);
2350 if (const auto conn = http->getConn())
2351 conn->pipeline.terminateAll(ETIMEDOUT);
2352 if (Comm::IsConnOpen(io.conn))
2353 io.conn->close();
2354 }
2355
ConnStateData(const MasterXaction::Pointer & xact)2356 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2357 AsyncJob("ConnStateData"), // kids overwrite
2358 Server(xact),
2359 bodyParser(nullptr),
2360 #if USE_OPENSSL
2361 sslBumpMode(Ssl::bumpEnd),
2362 tlsParser(Security::HandshakeParser::fromClient),
2363 #endif
2364 needProxyProtocolHeader_(false),
2365 #if USE_OPENSSL
2366 switchedToHttps_(false),
2367 parsingTlsHandshake(false),
2368 parsedBumpedRequestCount(0),
2369 tlsConnectPort(0),
2370 sslServerBump(NULL),
2371 signAlgorithm(Ssl::algSignTrusted),
2372 #endif
2373 stoppedSending_(NULL),
2374 stoppedReceiving_(NULL)
2375 {
2376 flags.readMore = true; // kids may overwrite
2377 flags.swanSang = false;
2378
2379 pinning.host = NULL;
2380 pinning.port = -1;
2381 pinning.pinned = false;
2382 pinning.auth = false;
2383 pinning.zeroReply = false;
2384 pinning.peer = NULL;
2385
2386 // store the details required for creating more MasterXaction objects as new requests come in
2387 log_addr = xact->tcpClient->remote;
2388 log_addr.applyMask(Config.Addrs.client_netmask);
2389
2390 // register to receive notice of Squid signal events
2391 // which may affect long persisting client connections
2392 registerRunner();
2393 }
2394
2395 void
start()2396 ConnStateData::start()
2397 {
2398 BodyProducer::start();
2399 HttpControlMsgSink::start();
2400
2401 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2402 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2403 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2404 int i = IP_PMTUDISC_DONT;
2405 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2406 int xerrno = errno;
2407 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2408 }
2409 #else
2410 static bool reported = false;
2411
2412 if (!reported) {
2413 debugs(33, DBG_IMPORTANT, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2414 reported = true;
2415 }
2416 #endif
2417 }
2418
2419 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2420 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2421 comm_add_close_handler(clientConnection->fd, call);
2422
2423 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2424 if (needProxyProtocolHeader_) {
2425 if (!proxyProtocolValidateClient()) // will close the connection on failure
2426 return;
2427 } else
2428 whenClientIpKnown();
2429
2430 // requires needProxyProtocolHeader_ which is initialized above
2431 preservingClientData_ = shouldPreserveClientData();
2432 }
2433
2434 void
whenClientIpKnown()2435 ConnStateData::whenClientIpKnown()
2436 {
2437 if (Config.onoff.log_fqdn)
2438 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2439
2440 #if USE_IDENT
2441 if (Ident::TheConfig.identLookup) {
2442 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
2443 identChecklist.src_addr = clientConnection->remote;
2444 identChecklist.my_addr = clientConnection->local;
2445 if (identChecklist.fastCheck().allowed())
2446 Ident::Start(clientConnection, clientIdentDone, this);
2447 }
2448 #endif
2449
2450 clientdbEstablished(clientConnection->remote, 1);
2451
2452 #if USE_DELAY_POOLS
2453 fd_table[clientConnection->fd].clientInfo = NULL;
2454
2455 if (!Config.onoff.client_db)
2456 return; // client delay pools require client_db
2457
2458 ClientDelayPools& pools(Config.ClientDelay.pools);
2459 if (pools.size()) {
2460 ACLFilledChecklist ch(NULL, NULL, NULL);
2461
2462 // TODO: we check early to limit error response bandwith but we
2463 // should recheck when we can honor delay_pool_uses_indirect
2464 // TODO: we should also pass the port details for myportname here.
2465 ch.src_addr = clientConnection->remote;
2466 ch.my_addr = clientConnection->local;
2467
2468 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2469
2470 /* pools require explicit 'allow' to assign a client into them */
2471 if (pools[pool].access) {
2472 ch.changeAcl(pools[pool].access);
2473 allow_t answer = ch.fastCheck();
2474 if (answer.allowed()) {
2475
2476 /* request client information from db after we did all checks
2477 this will save hash lookup if client failed checks */
2478 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2479 assert(cli);
2480
2481 /* put client info in FDE */
2482 fd_table[clientConnection->fd].clientInfo = cli;
2483
2484 /* setup write limiter for this request */
2485 const double burst = floor(0.5 +
2486 (pools[pool].highwatermark * Config.ClientDelay.initial)/100.0);
2487 cli->setWriteLimiter(pools[pool].rate, burst, pools[pool].highwatermark);
2488 break;
2489 } else {
2490 debugs(83, 4, HERE << "Delay pool " << pool << " skipped because ACL " << answer);
2491 }
2492 }
2493 }
2494 }
2495 #endif
2496
2497 // kids must extend to actually start doing something (e.g., reading)
2498 }
2499
2500 /** Handle a new connection on an HTTP socket. */
2501 void
httpAccept(const CommAcceptCbParams & params)2502 httpAccept(const CommAcceptCbParams ¶ms)
2503 {
2504 MasterXaction::Pointer xact = params.xaction;
2505 AnyP::PortCfgPointer s = xact->squidPort;
2506
2507 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2508
2509 if (params.flag != Comm::OK) {
2510 // Its possible the call was still queued when the client disconnected
2511 debugs(33, 2, s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2512 return;
2513 }
2514
2515 debugs(33, 4, params.conn << ": accepted");
2516 fd_note(params.conn->fd, "client http connect");
2517
2518 if (s->tcp_keepalive.enabled)
2519 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2520
2521 ++incoming_sockets_accepted;
2522
2523 // Socket is ready, setup the connection manager to start using it
2524 auto *srv = Http::NewServer(xact);
2525 AsyncJob::Start(srv); // usually async-calls readSomeData()
2526 }
2527
2528 /// Create TLS connection structure and update fd_table
2529 static bool
httpsCreate(const ConnStateData * connState,const Security::ContextPointer & ctx)2530 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2531 {
2532 const auto conn = connState->clientConnection;
2533 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2534 debugs(33, 5, "will negotiate TLS on " << conn);
2535 return true;
2536 }
2537
2538 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2539 conn->close();
2540 return false;
2541 }
2542
2543 /**
2544 *
2545 * \retval 1 on success
2546 * \retval 0 when needs more data
2547 * \retval -1 on error
2548 */
2549 static int
tlsAttemptHandshake(ConnStateData * conn,PF * callback)2550 tlsAttemptHandshake(ConnStateData *conn, PF *callback)
2551 {
2552 // TODO: maybe throw instead of returning -1
2553 // see https://github.com/squid-cache/squid/pull/81#discussion_r153053278
2554 int fd = conn->clientConnection->fd;
2555 auto session = fd_table[fd].ssl.get();
2556
2557 errno = 0;
2558
2559 #if USE_OPENSSL
2560 const auto ret = SSL_accept(session);
2561 if (ret > 0)
2562 return 1;
2563
2564 const int xerrno = errno;
2565 const auto ssl_error = SSL_get_error(session, ret);
2566
2567 switch (ssl_error) {
2568
2569 case SSL_ERROR_WANT_READ:
2570 Comm::SetSelect(fd, COMM_SELECT_READ, callback, (callback ? conn : nullptr), 0);
2571 return 0;
2572
2573 case SSL_ERROR_WANT_WRITE:
2574 Comm::SetSelect(fd, COMM_SELECT_WRITE, callback, (callback ? conn : nullptr), 0);
2575 return 0;
2576
2577 case SSL_ERROR_SYSCALL:
2578 if (ret == 0) {
2579 debugs(83, 2, "Error negotiating SSL connection on FD " << fd << ": Aborted by client: " << ssl_error);
2580 } else {
2581 debugs(83, (xerrno == ECONNRESET) ? 1 : 2, "Error negotiating SSL connection on FD " << fd << ": " <<
2582 (xerrno == 0 ? Security::ErrorString(ssl_error) : xstrerr(xerrno)));
2583 }
2584 break;
2585
2586 case SSL_ERROR_ZERO_RETURN:
2587 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " << fd << ": Closed by client");
2588 break;
2589
2590 default:
2591 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " <<
2592 fd << ": " << Security::ErrorString(ssl_error) <<
2593 " (" << ssl_error << "/" << ret << ")");
2594 }
2595
2596 #elif USE_GNUTLS
2597
2598 const auto x = gnutls_handshake(session);
2599 if (x == GNUTLS_E_SUCCESS)
2600 return 1;
2601
2602 if (gnutls_error_is_fatal(x)) {
2603 debugs(83, 2, "Error negotiating TLS on " << conn->clientConnection << ": Aborted by client: " << Security::ErrorString(x));
2604
2605 } else if (x == GNUTLS_E_INTERRUPTED || x == GNUTLS_E_AGAIN) {
2606 const auto ioAction = (gnutls_record_get_direction(session)==0 ? COMM_SELECT_READ : COMM_SELECT_WRITE);
2607 Comm::SetSelect(fd, ioAction, callback, (callback ? conn : nullptr), 0);
2608 return 0;
2609 }
2610
2611 #else
2612 // Performing TLS handshake should never be reachable without a TLS/SSL library.
2613 (void)session; // avoid compiler and static analysis complaints
2614 fatal("FATAL: HTTPS not supported by this Squid.");
2615 #endif
2616
2617 return -1;
2618 }
2619
2620 /** negotiate an SSL connection */
2621 static void
clientNegotiateSSL(int fd,void * data)2622 clientNegotiateSSL(int fd, void *data)
2623 {
2624 ConnStateData *conn = (ConnStateData *)data;
2625
2626 const int ret = tlsAttemptHandshake(conn, clientNegotiateSSL);
2627 if (ret <= 0) {
2628 if (ret < 0) // An error
2629 conn->clientConnection->close();
2630 return;
2631 }
2632
2633 Security::SessionPointer session(fd_table[fd].ssl);
2634
2635 #if USE_OPENSSL
2636 if (Security::SessionIsResumed(session)) {
2637 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2638 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2639 ":" << (int)fd_table[fd].remote_port << ")");
2640 } else {
2641 if (Debug::Enabled(83, 4)) {
2642 /* Write out the SSL session details.. actually the call below, but
2643 * OpenSSL headers do strange typecasts confusing GCC.. */
2644 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2645 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2646 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2647 PEM_STRING_SSL_SESSION, debug_log,
2648 reinterpret_cast<char *>(SSL_get_session(session.get())),
2649 nullptr, nullptr, 0, nullptr, nullptr);
2650
2651 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2652
2653 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2654 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2655 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2656 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2657 * Because there are two possible usable cast, if you get an error here, try the other
2658 * commented line. */
2659
2660 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2661 debug_log,
2662 reinterpret_cast<char *>(SSL_get_session(session.get())),
2663 nullptr, nullptr, 0, nullptr, nullptr);
2664 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2665 debug_log,
2666 reinterpret_cast<char *>(SSL_get_session(session.get())),
2667 nullptr, nullptr, 0, nullptr, nullptr);
2668 */
2669 #else
2670 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2671
2672 #endif
2673 /* Note: This does not automatically fflush the log file.. */
2674 }
2675
2676 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2677 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2678 fd_table[fd].remote_port << ")");
2679 }
2680 #else
2681 debugs(83, 2, "TLS session reuse not yet implemented.");
2682 #endif
2683
2684 // Connection established. Retrieve TLS connection parameters for logging.
2685 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2686
2687 #if USE_OPENSSL
2688 X509 *client_cert = SSL_get_peer_certificate(session.get());
2689
2690 if (client_cert) {
2691 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2692 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
2693
2694 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2695 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
2696
2697 X509_free(client_cert);
2698 } else {
2699 debugs(83, 5, "FD " << fd << " has no client certificate.");
2700 }
2701 #else
2702 debugs(83, 2, "Client certificate requesting not yet implemented.");
2703 #endif
2704
2705 conn->readSomeData();
2706 }
2707
2708 /**
2709 * If Security::ContextPointer is given, starts reading the TLS handshake.
2710 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2711 */
2712 static void
httpsEstablish(ConnStateData * connState,const Security::ContextPointer & ctx)2713 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2714 {
2715 assert(connState);
2716 const Comm::ConnectionPointer &details = connState->clientConnection;
2717
2718 if (!ctx || !httpsCreate(connState, ctx))
2719 return;
2720
2721 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2722 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
2723 connState, ConnStateData::requestTimeout);
2724 commSetConnTimeout(details, Config.Timeout.request, timeoutCall);
2725
2726 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2727 }
2728
2729 #if USE_OPENSSL
2730 /**
2731 * A callback function to use with the ACLFilledChecklist callback.
2732 */
2733 static void
httpsSslBumpAccessCheckDone(allow_t answer,void * data)2734 httpsSslBumpAccessCheckDone(allow_t answer, void *data)
2735 {
2736 ConnStateData *connState = (ConnStateData *) data;
2737
2738 // if the connection is closed or closing, just return.
2739 if (!connState->isOpen())
2740 return;
2741
2742 if (answer.allowed()) {
2743 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2744 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2745 } else {
2746 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2747 connState->sslBumpMode = Ssl::bumpSplice;
2748 }
2749
2750 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2751 connState->clientConnection->close();
2752 return;
2753 }
2754
2755 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2756 connState->clientConnection->close();
2757 }
2758 #endif
2759
2760 /** handle a new HTTPS connection */
2761 static void
httpsAccept(const CommAcceptCbParams & params)2762 httpsAccept(const CommAcceptCbParams ¶ms)
2763 {
2764 MasterXaction::Pointer xact = params.xaction;
2765 const AnyP::PortCfgPointer s = xact->squidPort;
2766
2767 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2768
2769 if (params.flag != Comm::OK) {
2770 // Its possible the call was still queued when the client disconnected
2771 debugs(33, 2, "httpsAccept: " << s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2772 return;
2773 }
2774
2775 debugs(33, 4, HERE << params.conn << " accepted, starting SSL negotiation.");
2776 fd_note(params.conn->fd, "client https connect");
2777
2778 if (s->tcp_keepalive.enabled) {
2779 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2780 }
2781 ++incoming_sockets_accepted;
2782
2783 // Socket is ready, setup the connection manager to start using it
2784 auto *srv = Https::NewServer(xact);
2785 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2786 }
2787
2788 void
postHttpsAccept()2789 ConnStateData::postHttpsAccept()
2790 {
2791 if (port->flags.tunnelSslBumping) {
2792 #if USE_OPENSSL
2793 debugs(33, 5, "accept transparent connection: " << clientConnection);
2794
2795 if (!Config.accessList.ssl_bump) {
2796 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2797 return;
2798 }
2799
2800 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
2801 mx->tcpClient = clientConnection;
2802 // Create a fake HTTP request for ssl_bump ACL check,
2803 // using tproxy/intercept provided destination IP and port.
2804 HttpRequest *request = new HttpRequest(mx);
2805 static char ip[MAX_IPSTRLEN];
2806 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2807 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2808 request->url.port(clientConnection->local.port());
2809 request->myportname = port->name;
2810
2811 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, NULL);
2812 acl_checklist->src_addr = clientConnection->remote;
2813 acl_checklist->my_addr = port->s;
2814 // Build a local AccessLogEntry to allow requiresAle() acls work
2815 acl_checklist->al = new AccessLogEntry;
2816 acl_checklist->al->cache.start_time = current_time;
2817 acl_checklist->al->tcpClient = clientConnection;
2818 acl_checklist->al->cache.port = port;
2819 acl_checklist->al->cache.caddr = log_addr;
2820 HTTPMSGUNLOCK(acl_checklist->al->request);
2821 acl_checklist->al->request = request;
2822 HTTPMSGLOCK(acl_checklist->al->request);
2823 Http::StreamPointer context = pipeline.front();
2824 ClientHttpRequest *http = context ? context->http : nullptr;
2825 const char *log_uri = http ? http->log_uri : nullptr;
2826 acl_checklist->syncAle(request, log_uri);
2827 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2828 #else
2829 fatal("FATAL: SSL-Bump requires --with-openssl");
2830 #endif
2831 return;
2832 } else {
2833 httpsEstablish(this, port->secure.staticContext);
2834 }
2835 }
2836
2837 #if USE_OPENSSL
2838 void
sslCrtdHandleReplyWrapper(void * data,const Helper::Reply & reply)2839 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2840 {
2841 ConnStateData * state_data = (ConnStateData *)(data);
2842 state_data->sslCrtdHandleReply(reply);
2843 }
2844
2845 void
sslCrtdHandleReply(const Helper::Reply & reply)2846 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2847 {
2848 if (!isOpen()) {
2849 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2850 return;
2851 }
2852
2853 if (reply.result == Helper::BrokenHelper) {
2854 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2855 } else if (!reply.other().hasContent()) {
2856 debugs(1, DBG_IMPORTANT, HERE << "\"ssl_crtd\" helper returned <NULL> reply.");
2857 } else {
2858 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2859 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2860 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2861 } else {
2862 if (reply.result != Helper::Okay) {
2863 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2864 } else {
2865 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully recieved from ssl_crtd");
2866 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2867 doPeekAndSpliceStep();
2868 auto ssl = fd_table[clientConnection->fd].ssl.get();
2869 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2870 if (!ret)
2871 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2872
2873 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2874 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2875 } else {
2876 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2877 if (ctx && !sslBumpCertKey.isEmpty())
2878 storeTlsContextToCache(sslBumpCertKey, ctx);
2879 getSslContextDone(ctx);
2880 }
2881 return;
2882 }
2883 }
2884 }
2885 Security::ContextPointer nil;
2886 getSslContextDone(nil);
2887 }
2888
buildSslCertGenerationParams(Ssl::CertificateProperties & certProperties)2889 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2890 {
2891 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2892
2893 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2894 if (connectedOk) {
2895 if (X509 *mimicCert = sslServerBump->serverCert.get())
2896 certProperties.mimicCert.resetAndLock(mimicCert);
2897
2898 ACLFilledChecklist checklist(NULL, sslServerBump->request.getRaw(),
2899 clientConnection != NULL ? clientConnection->rfc931 : dash_str);
2900 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
2901
2902 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != NULL; ca = ca->next) {
2903 // If the algorithm already set, then ignore it.
2904 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2905 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2906 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2907 continue;
2908
2909 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2910 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2911 const char *param = ca->param;
2912
2913 // For parameterless CN adaptation, use hostname from the
2914 // CONNECT request.
2915 if (ca->alg == Ssl::algSetCommonName) {
2916 if (!param)
2917 param = tlsConnectHostOrIp.c_str();
2918 certProperties.commonName = param;
2919 certProperties.setCommonName = true;
2920 } else if (ca->alg == Ssl::algSetValidAfter)
2921 certProperties.setValidAfter = true;
2922 else if (ca->alg == Ssl::algSetValidBefore)
2923 certProperties.setValidBefore = true;
2924
2925 debugs(33, 5, HERE << "Matches certificate adaptation aglorithm: " <<
2926 alg << " param: " << (param ? param : "-"));
2927 }
2928 }
2929
2930 certProperties.signAlgorithm = Ssl::algSignEnd;
2931 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != NULL; sg = sg->next) {
2932 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2933 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2934 break;
2935 }
2936 }
2937 } else {// did not try to connect (e.g. client-first) or failed to connect
2938 // In case of an error while connecting to the secure server, use a
2939 // trusted certificate, with no mimicked fields and no adaptation
2940 // algorithms. There is nothing we can mimic, so we want to minimize the
2941 // number of warnings the user will have to see to get to the error page.
2942 // We will close the connection, so that the trust is not extended to
2943 // non-Squid content.
2944 certProperties.signAlgorithm = Ssl::algSignTrusted;
2945 }
2946
2947 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2948
2949 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2950 assert(port->secure.untrustedSigningCa.cert);
2951 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2952 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2953 } else {
2954 assert(port->secure.signingCa.cert.get());
2955 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2956
2957 if (port->secure.signingCa.pkey)
2958 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2959 }
2960 signAlgorithm = certProperties.signAlgorithm;
2961
2962 certProperties.signHash = Ssl::DefaultSignHash;
2963 }
2964
2965 Security::ContextPointer
getTlsContextFromCache(const SBuf & cacheKey,const Ssl::CertificateProperties & certProperties)2966 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2967 {
2968 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2969 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2970 if (Security::ContextPointer *ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2971 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2972 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2973 return *ctx;
2974 } else {
2975 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2976 if (ssl_ctx_cache)
2977 ssl_ctx_cache->del(cacheKey);
2978 }
2979 }
2980 return Security::ContextPointer(nullptr);
2981 }
2982
2983 void
storeTlsContextToCache(const SBuf & cacheKey,Security::ContextPointer & ctx)2984 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2985 {
2986 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2987 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, new Security::ContextPointer(ctx))) {
2988 // If it is not in storage delete after using. Else storage deleted it.
2989 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2990 }
2991 }
2992
2993 void
getSslContextStart()2994 ConnStateData::getSslContextStart()
2995 {
2996 // If we are called, then CONNECT has succeeded. Finalize it.
2997 if (auto xact = pipeline.front()) {
2998 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2999 xact->finished();
3000 // cannot proceed with encryption if requests wait for plain responses
3001 Must(pipeline.empty());
3002 }
3003 /* careful: finished() above frees request, host, etc. */
3004
3005 if (port->secure.generateHostCertificates) {
3006 Ssl::CertificateProperties certProperties;
3007 buildSslCertGenerationParams(certProperties);
3008
3009 // Disable caching for bumpPeekAndSplice mode
3010 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
3011 sslBumpCertKey.clear();
3012 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
3013 assert(!sslBumpCertKey.isEmpty());
3014
3015 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
3016 if (ctx) {
3017 getSslContextDone(ctx);
3018 return;
3019 }
3020 }
3021
3022 #if USE_SSL_CRTD
3023 try {
3024 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
3025 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
3026 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
3027 request_message.composeRequest(certProperties);
3028 debugs(33, 5, HERE << "SSL crtd request: " << request_message.compose().c_str());
3029 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
3030 return;
3031 } catch (const std::exception &e) {
3032 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
3033 "request for " << certProperties.commonName <<
3034 " certificate: " << e.what() << "; will now block to " <<
3035 "generate that certificate.");
3036 // fall through to do blocking in-process generation.
3037 }
3038 #endif // USE_SSL_CRTD
3039
3040 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName);
3041 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
3042 doPeekAndSpliceStep();
3043 auto ssl = fd_table[clientConnection->fd].ssl.get();
3044 if (!Ssl::configureSSL(ssl, certProperties, *port))
3045 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
3046
3047 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
3048 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
3049 } else {
3050 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
3051 if (dynCtx && !sslBumpCertKey.isEmpty())
3052 storeTlsContextToCache(sslBumpCertKey, dynCtx);
3053 getSslContextDone(dynCtx);
3054 }
3055 return;
3056 }
3057
3058 Security::ContextPointer nil;
3059 getSslContextDone(nil);
3060 }
3061
3062 void
getSslContextDone(Security::ContextPointer & ctx)3063 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
3064 {
3065 if (port->secure.generateHostCertificates && !ctx) {
3066 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
3067 }
3068
3069 // If generated ssl context = NULL, try to use static ssl context.
3070 if (!ctx) {
3071 if (!port->secure.staticContext) {
3072 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
3073 clientConnection->close();
3074 return;
3075 } else {
3076 debugs(33, 5, "Using static TLS context.");
3077 ctx = port->secure.staticContext;
3078 }
3079 }
3080
3081 if (!httpsCreate(this, ctx))
3082 return;
3083
3084 // bumped intercepted conns should already have Config.Timeout.request set
3085 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
3086 // to make sure the connection does not get stuck on non-SSL clients.
3087 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3088 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
3089 this, ConnStateData::requestTimeout);
3090 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
3091
3092 switchedToHttps_ = true;
3093
3094 auto ssl = fd_table[clientConnection->fd].ssl.get();
3095 BIO *b = SSL_get_rbio(ssl);
3096 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3097 bio->setReadBufData(inBuf);
3098 inBuf.clear();
3099 clientNegotiateSSL(clientConnection->fd, this);
3100 }
3101
3102 void
switchToHttps(HttpRequest * request,Ssl::BumpMode bumpServerMode)3103 ConnStateData::switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode)
3104 {
3105 assert(!switchedToHttps_);
3106
3107 // Depending on receivedFirstByte_, we are at the start of either an
3108 // established CONNECT tunnel with the client or an intercepted TCP (and
3109 // presumably TLS) connection from the client. Expect TLS Client Hello.
3110 const auto insideConnectTunnel = receivedFirstByte_;
3111 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
3112
3113 tlsConnectHostOrIp = request->url.hostOrIp();
3114 tlsConnectPort = request->url.port();
3115 resetSslCommonName(request->url.host());
3116
3117 // We are going to read new request
3118 flags.readMore = true;
3119
3120 // keep version major.minor details the same.
3121 // but we are now performing the HTTPS handshake traffic
3122 transferProtocol.protocol = AnyP::PROTO_HTTPS;
3123
3124 // If sslServerBump is set, then we have decided to deny CONNECT
3125 // and now want to switch to SSL to send the error to the client
3126 // without even peeking at the origin server certificate.
3127 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
3128 request->flags.sslPeek = true;
3129 sslServerBump = new Ssl::ServerBump(request);
3130 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
3131 request->flags.sslPeek = true;
3132 sslServerBump = new Ssl::ServerBump(request, NULL, bumpServerMode);
3133 }
3134
3135 // commSetConnTimeout() was called for this request before we switched.
3136 // Fix timeout to request_start_timeout
3137 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3138 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
3139 TimeoutDialer, this, ConnStateData::requestTimeout);
3140 commSetConnTimeout(clientConnection, Config.Timeout.request_start_timeout, timeoutCall);
3141 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
3142 // a bumbed "connect" request on non transparent port.
3143 receivedFirstByte_ = false;
3144 // Get more data to peek at Tls
3145 parsingTlsHandshake = true;
3146
3147 // If the protocol has changed, then reset preservingClientData_.
3148 // Otherwise, its value initially set in start() is still valid/fresh.
3149 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
3150 if (insideConnectTunnel)
3151 preservingClientData_ = shouldPreserveClientData();
3152
3153 readSomeData();
3154 }
3155
3156 void
parseTlsHandshake()3157 ConnStateData::parseTlsHandshake()
3158 {
3159 Must(parsingTlsHandshake);
3160
3161 assert(!inBuf.isEmpty());
3162 receivedFirstByte();
3163 fd_note(clientConnection->fd, "Parsing TLS handshake");
3164
3165 bool unsupportedProtocol = false;
3166 try {
3167 if (!tlsParser.parseHello(inBuf)) {
3168 // need more data to finish parsing
3169 readSomeData();
3170 return;
3171 }
3172 }
3173 catch (const std::exception &ex) {
3174 debugs(83, 2, "error on FD " << clientConnection->fd << ": " << ex.what());
3175 unsupportedProtocol = true;
3176 }
3177
3178 parsingTlsHandshake = false;
3179
3180 // client data may be needed for splicing and for
3181 // tunneling unsupportedProtocol after an error
3182 preservedClientData = inBuf;
3183
3184 // Even if the parser failed, each TLS detail should either be set
3185 // correctly or still be "unknown"; copying unknown detail is a no-op.
3186 Security::TlsDetails::Pointer const &details = tlsParser.details;
3187 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
3188 if (details && !details->serverName.isEmpty()) {
3189 resetSslCommonName(details->serverName.c_str());
3190 tlsClientSni_ = details->serverName;
3191 }
3192
3193 // We should disable read/write handlers
3194 Comm::ResetSelect(clientConnection->fd);
3195
3196 if (unsupportedProtocol) {
3197 Http::StreamPointer context = pipeline.front();
3198 Must(context && context->http);
3199 HttpRequest::Pointer request = context->http->request;
3200 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
3201 sslBumpMode = Ssl::bumpSplice;
3202 context->http->al->ssl.bumpMode = Ssl::bumpSplice;
3203 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN))
3204 clientConnection->close();
3205 return;
3206 }
3207
3208 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
3209 getSslContextStart();
3210 return;
3211 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
3212 // will call httpsPeeked() with certificate and connection, eventually
3213 FwdState::fwdStart(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw());
3214 } else {
3215 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
3216 startPeekAndSplice();
3217 }
3218 }
3219
httpsSslBumpStep2AccessCheckDone(allow_t answer,void * data)3220 void httpsSslBumpStep2AccessCheckDone(allow_t answer, void *data)
3221 {
3222 ConnStateData *connState = (ConnStateData *) data;
3223
3224 // if the connection is closed or closing, just return.
3225 if (!connState->isOpen())
3226 return;
3227
3228 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
3229 assert(connState->serverBump());
3230 Ssl::BumpMode bumpAction;
3231 if (answer.allowed()) {
3232 bumpAction = (Ssl::BumpMode)answer.kind;
3233 } else
3234 bumpAction = Ssl::bumpSplice;
3235
3236 connState->serverBump()->act.step2 = bumpAction;
3237 connState->sslBumpMode = bumpAction;
3238 Http::StreamPointer context = connState->pipeline.front();
3239 if (ClientHttpRequest *http = (context ? context->http : nullptr))
3240 http->al->ssl.bumpMode = bumpAction;
3241
3242 if (bumpAction == Ssl::bumpTerminate) {
3243 connState->clientConnection->close();
3244 } else if (bumpAction != Ssl::bumpSplice) {
3245 connState->startPeekAndSplice();
3246 } else if (!connState->splice())
3247 connState->clientConnection->close();
3248 }
3249
3250 bool
splice()3251 ConnStateData::splice()
3252 {
3253 // normally we can splice here, because we just got client hello message
3254
3255 if (fd_table[clientConnection->fd].ssl.get()) {
3256 // Restore default read methods
3257 fd_table[clientConnection->fd].read_method = &default_read_method;
3258 fd_table[clientConnection->fd].write_method = &default_write_method;
3259 }
3260
3261 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3262 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3263 transferProtocol = Http::ProtocolVersion();
3264 assert(!pipeline.empty());
3265 Http::StreamPointer context = pipeline.front();
3266 Must(context);
3267 Must(context->http);
3268 ClientHttpRequest *http = context->http;
3269 HttpRequest::Pointer request = http->request;
3270 context->finished();
3271 if (transparent()) {
3272 // For transparent connections, make a new fake CONNECT request, now
3273 // with SNI as target. doCallout() checks, adaptations may need that.
3274 return fakeAConnectRequest("splice", preservedClientData);
3275 } else {
3276 // For non transparent connections make a new tunneled CONNECT, which
3277 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3278 // respond with "Connection Established" to the client.
3279 // This fake CONNECT request required to allow use of SNI in
3280 // doCallout() checks and adaptations.
3281 return initiateTunneledRequest(request, Http::METHOD_CONNECT, "splice", preservedClientData);
3282 }
3283 }
3284
3285 void
startPeekAndSplice()3286 ConnStateData::startPeekAndSplice()
3287 {
3288 // This is the Step2 of the SSL bumping
3289 assert(sslServerBump);
3290 Http::StreamPointer context = pipeline.front();
3291 ClientHttpRequest *http = context ? context->http : nullptr;
3292
3293 if (sslServerBump->step == Ssl::bumpStep1) {
3294 sslServerBump->step = Ssl::bumpStep2;
3295 // Run a accessList check to check if want to splice or continue bumping
3296
3297 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3298 acl_checklist->al = http ? http->al : nullptr;
3299 //acl_checklist->src_addr = params.conn->remote;
3300 //acl_checklist->my_addr = s->s;
3301 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpNone));
3302 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3303 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3304 const char *log_uri = http ? http->log_uri : nullptr;
3305 acl_checklist->syncAle(sslServerBump->request.getRaw(), log_uri);
3306 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3307 return;
3308 }
3309
3310 // will call httpsPeeked() with certificate and connection, eventually
3311 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3312 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3313
3314 if (!httpsCreate(this, unConfiguredCTX))
3315 return;
3316
3317 switchedToHttps_ = true;
3318
3319 auto ssl = fd_table[clientConnection->fd].ssl.get();
3320 BIO *b = SSL_get_rbio(ssl);
3321 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3322 bio->setReadBufData(inBuf);
3323 bio->hold(true);
3324
3325 // Here squid should have all of the client hello message so the
3326 // tlsAttemptHandshake() should return 0.
3327 // This block exist only to force openSSL parse client hello and detect
3328 // ERR_SECURE_ACCEPT_FAIL error, which should be checked and splice if required.
3329 if (tlsAttemptHandshake(this, nullptr) < 0) {
3330 debugs(83, 2, "TLS handshake failed.");
3331 HttpRequest::Pointer request(http ? http->request : nullptr);
3332 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_SECURE_ACCEPT_FAIL))
3333 clientConnection->close();
3334 return;
3335 }
3336
3337 // We need to reset inBuf here, to be used by incoming requests in the case
3338 // of SSL bump
3339 inBuf.clear();
3340
3341 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3342 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : NULL);
3343 }
3344
3345 void
doPeekAndSpliceStep()3346 ConnStateData::doPeekAndSpliceStep()
3347 {
3348 auto ssl = fd_table[clientConnection->fd].ssl.get();
3349 BIO *b = SSL_get_rbio(ssl);
3350 assert(b);
3351 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3352
3353 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Currrent state:" << SSL_state_string_long(ssl));
3354 bio->hold(false);
3355
3356 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3357 switchedToHttps_ = true;
3358 }
3359
3360 void
httpsPeeked(PinnedIdleContext pic)3361 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3362 {
3363 Must(sslServerBump != NULL);
3364 Must(sslServerBump->request == pic.request);
3365 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3366
3367 if (Comm::IsConnOpen(pic.connection)) {
3368 notePinnedConnectionBecameIdle(pic);
3369 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3370 } else
3371 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3372
3373 getSslContextStart();
3374 }
3375
3376 #endif /* USE_OPENSSL */
3377
3378 bool
initiateTunneledRequest(HttpRequest::Pointer const & cause,Http::MethodType const method,const char * reason,const SBuf & payload)3379 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, Http::MethodType const method, const char *reason, const SBuf &payload)
3380 {
3381 // fake a CONNECT request to force connState to tunnel
3382 SBuf connectHost;
3383 unsigned short connectPort = 0;
3384
3385 if (pinning.serverConnection != nullptr) {
3386 static char ip[MAX_IPSTRLEN];
3387 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3388 connectPort = pinning.serverConnection->remote.port();
3389 } else if (cause) {
3390 connectHost = cause->url.hostOrIp();
3391 connectPort = cause->url.port();
3392 #if USE_OPENSSL
3393 } else if (!tlsConnectHostOrIp.isEmpty()) {
3394 connectHost = tlsConnectHostOrIp;
3395 connectPort = tlsConnectPort;
3396 #endif
3397 } else if (transparent()) {
3398 static char ip[MAX_IPSTRLEN];
3399 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3400 connectPort = clientConnection->local.port();
3401 } else {
3402 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3403 return false;
3404 }
3405
3406 debugs(33, 2, "Request tunneling for " << reason);
3407 ClientHttpRequest *http = buildFakeRequest(method, connectHost, connectPort, payload);
3408 HttpRequest::Pointer request = http->request;
3409 request->flags.forceTunnel = true;
3410 http->calloutContext = new ClientRequestContext(http);
3411 http->doCallouts();
3412 clientProcessRequestFinished(this, request);
3413 return true;
3414 }
3415
3416 bool
fakeAConnectRequest(const char * reason,const SBuf & payload)3417 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3418 {
3419 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3420
3421 SBuf connectHost;
3422 assert(transparent());
3423 const unsigned short connectPort = clientConnection->local.port();
3424
3425 #if USE_OPENSSL
3426 if (!tlsClientSni_.isEmpty())
3427 connectHost.assign(tlsClientSni_);
3428 else
3429 #endif
3430 {
3431 static char ip[MAX_IPSTRLEN];
3432 clientConnection->local.toHostStr(ip, sizeof(ip));
3433 connectHost.assign(ip);
3434 }
3435
3436 ClientHttpRequest *http = buildFakeRequest(Http::METHOD_CONNECT, connectHost, connectPort, payload);
3437
3438 http->calloutContext = new ClientRequestContext(http);
3439 HttpRequest::Pointer request = http->request;
3440 http->doCallouts();
3441 clientProcessRequestFinished(this, request);
3442 return true;
3443 }
3444
3445 ClientHttpRequest *
buildFakeRequest(Http::MethodType const method,SBuf & useHost,unsigned short usePort,const SBuf & payload)3446 ConnStateData::buildFakeRequest(Http::MethodType const method, SBuf &useHost, unsigned short usePort, const SBuf &payload)
3447 {
3448 ClientHttpRequest *http = new ClientHttpRequest(this);
3449 Http::Stream *stream = new Http::Stream(clientConnection, http);
3450
3451 StoreIOBuffer tempBuffer;
3452 tempBuffer.data = stream->reqbuf;
3453 tempBuffer.length = HTTP_REQBUF_SZ;
3454
3455 ClientStreamData newServer = new clientReplyContext(http);
3456 ClientStreamData newClient = stream;
3457 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3458 clientReplyStatus, newServer, clientSocketRecipient,
3459 clientSocketDetach, newClient, tempBuffer);
3460
3461 stream->flags.parsed_ok = 1; // Do we need it?
3462 stream->mayUseConnection(true);
3463
3464 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
3465 CommTimeoutCbPtrFun(clientLifetimeTimeout, stream->http));
3466 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
3467
3468 stream->registerWithConn();
3469
3470 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
3471 mx->tcpClient = clientConnection;
3472 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3473 // clientProcessRequest
3474 HttpRequest::Pointer request = new HttpRequest(mx);
3475 AnyP::ProtocolType proto = (method == Http::METHOD_NONE) ? AnyP::PROTO_AUTHORITY_FORM : AnyP::PROTO_HTTP;
3476 request->url.setScheme(proto, nullptr);
3477 request->method = method;
3478 request->url.host(useHost.c_str());
3479 request->url.port(usePort);
3480
3481 http->uri = SBufToCstring(request->effectiveRequestUri());
3482 http->initRequest(request.getRaw());
3483
3484 request->manager(this, http->al);
3485
3486 if (proto == AnyP::PROTO_HTTP)
3487 request->header.putStr(Http::HOST, useHost.c_str());
3488
3489 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? HttpMsg::srcHttps : HttpMsg::srcHttp);
3490 #if USE_AUTH
3491 if (getAuth())
3492 request->auth_user_request = getAuth();
3493 #endif
3494
3495 inBuf = payload;
3496 flags.readMore = false;
3497
3498 return http;
3499 }
3500
3501 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3502 static bool
OpenedHttpSocket(const Comm::ConnectionPointer & c,const Ipc::FdNoteId portType)3503 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3504 {
3505 if (!Comm::IsConnOpen(c)) {
3506 Must(NHttpSockets > 0); // we tried to open some
3507 --NHttpSockets; // there will be fewer sockets than planned
3508 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3509
3510 if (!NHttpSockets) // we could not open any listen sockets at all
3511 fatalf("Unable to open %s",FdNote(portType));
3512
3513 return false;
3514 }
3515 return true;
3516 }
3517
3518 /// find any unused HttpSockets[] slot and store fd there or return false
3519 static bool
AddOpenedHttpSocket(const Comm::ConnectionPointer & conn)3520 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3521 {
3522 bool found = false;
3523 for (int i = 0; i < NHttpSockets && !found; ++i) {
3524 if ((found = HttpSockets[i] < 0))
3525 HttpSockets[i] = conn->fd;
3526 }
3527 return found;
3528 }
3529
3530 static void
clientHttpConnectionsOpen(void)3531 clientHttpConnectionsOpen(void)
3532 {
3533 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3534 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3535
3536 if (MAXTCPLISTENPORTS == NHttpSockets) {
3537 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines.");
3538 debugs(1, DBG_IMPORTANT, " The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3539 continue;
3540 }
3541
3542 #if USE_OPENSSL
3543 if (s->flags.tunnelSslBumping) {
3544 if (!Config.accessList.ssl_bump) {
3545 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3546 s->flags.tunnelSslBumping = false;
3547 }
3548 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3549 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3550 s->flags.tunnelSslBumping = false;
3551 if (s->transport.protocol == AnyP::PROTO_HTTP)
3552 s->secure.encryptTransport = false;
3553 }
3554 if (s->flags.tunnelSslBumping) {
3555 // Create ssl_ctx cache for this port.
3556 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3557 }
3558 }
3559 #endif
3560
3561 if (s->secure.encryptTransport && !s->secure.staticContext) {
3562 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3563 continue;
3564 }
3565
3566 // Fill out a Comm::Connection which IPC will open as a listener for us
3567 // then pass back when active so we can start a TcpAcceptor subscription.
3568 s->listenConn = new Comm::Connection;
3569 s->listenConn->local = s->s;
3570
3571 s->listenConn->flags = COMM_NONBLOCKING | (s->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3572 (s->flags.natIntercept ? COMM_INTERCEPTION : 0);
3573
3574 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3575 if (s->transport.protocol == AnyP::PROTO_HTTP) {
3576 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3577 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept, CommAcceptCbParams(NULL)));
3578 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3579
3580 AsyncCall::Pointer listenCall = asyncCall(33,2, "clientListenerConnectionOpened",
3581 ListeningStartedDialer(&clientListenerConnectionOpened, s, Ipc::fdnHttpSocket, sub));
3582 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpSocket, listenCall);
3583
3584 } else if (s->transport.protocol == AnyP::PROTO_HTTPS) {
3585 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3586 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept, CommAcceptCbParams(NULL)));
3587 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3588
3589 AsyncCall::Pointer listenCall = asyncCall(33, 2, "clientListenerConnectionOpened",
3590 ListeningStartedDialer(&clientListenerConnectionOpened,
3591 s, Ipc::fdnHttpsSocket, sub));
3592 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpsSocket, listenCall);
3593 }
3594
3595 HttpSockets[NHttpSockets] = -1; // set in clientListenerConnectionOpened
3596 ++NHttpSockets;
3597 }
3598 }
3599
3600 void
clientStartListeningOn(AnyP::PortCfgPointer & port,const RefCount<CommCbFunPtrCallT<CommAcceptCbPtrFun>> & subCall,const Ipc::FdNoteId fdNote)3601 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3602 {
3603 // Fill out a Comm::Connection which IPC will open as a listener for us
3604 port->listenConn = new Comm::Connection;
3605 port->listenConn->local = port->s;
3606 port->listenConn->flags =
3607 COMM_NONBLOCKING |
3608 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3609 (port->flags.natIntercept ? COMM_INTERCEPTION : 0);
3610
3611 // route new connections to subCall
3612 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3613 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3614 AsyncCall::Pointer listenCall =
3615 asyncCall(33, 2, "clientListenerConnectionOpened",
3616 ListeningStartedDialer(&clientListenerConnectionOpened,
3617 port, fdNote, sub));
3618 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3619
3620 assert(NHttpSockets < MAXTCPLISTENPORTS);
3621 HttpSockets[NHttpSockets] = -1;
3622 ++NHttpSockets;
3623 }
3624
3625 /// process clientHttpConnectionsOpen result
3626 static void
clientListenerConnectionOpened(AnyP::PortCfgPointer & s,const Ipc::FdNoteId portTypeNote,const Subscription::Pointer & sub)3627 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3628 {
3629 Must(s != NULL);
3630
3631 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3632 return;
3633
3634 Must(Comm::IsConnOpen(s->listenConn));
3635
3636 // TCP: setup a job to handle accept() with subscribed handler
3637 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3638
3639 debugs(1, DBG_IMPORTANT, "Accepting " <<
3640 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3641 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3642 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3643 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3644 << FdNote(portTypeNote) << " connections at "
3645 << s->listenConn);
3646
3647 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3648
3649 #if USE_SYSTEMD
3650 // When the very first port opens, tell systemd we are able to serve connections.
3651 // Subsequent sd_notify() calls, including calls during reconfiguration,
3652 // do nothing because the first call parameter is 1.
3653 // XXX: Send the notification only after opening all configured ports.
3654 if (opt_foreground || opt_no_daemon) {
3655 const auto result = sd_notify(1, "READY=1");
3656 if (result < 0) {
3657 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3658 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3659 }
3660 }
3661 #endif
3662 }
3663
3664 void
clientOpenListenSockets(void)3665 clientOpenListenSockets(void)
3666 {
3667 clientHttpConnectionsOpen();
3668 Ftp::StartListening();
3669
3670 if (NHttpSockets < 1)
3671 fatal("No HTTP, HTTPS, or FTP ports configured");
3672 }
3673
3674 void
clientConnectionsClose()3675 clientConnectionsClose()
3676 {
3677 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3678 if (s->listenConn != NULL) {
3679 debugs(1, DBG_IMPORTANT, "Closing HTTP(S) port " << s->listenConn->local);
3680 s->listenConn->close();
3681 s->listenConn = NULL;
3682 }
3683 }
3684
3685 Ftp::StopListening();
3686
3687 // TODO see if we can drop HttpSockets array entirely */
3688 for (int i = 0; i < NHttpSockets; ++i) {
3689 HttpSockets[i] = -1;
3690 }
3691
3692 NHttpSockets = 0;
3693 }
3694
3695 int
varyEvaluateMatch(StoreEntry * entry,HttpRequest * request)3696 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3697 {
3698 SBuf vary(request->vary_headers);
3699 int has_vary = entry->getReply()->header.has(Http::HdrType::VARY);
3700 #if X_ACCELERATOR_VARY
3701
3702 has_vary |=
3703 entry->getReply()->header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3704 #endif
3705
3706 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3707 if (!vary.isEmpty()) {
3708 /* Oops... something odd is going on here.. */
3709 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3710 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3711 request->vary_headers.clear();
3712 return VARY_CANCEL;
3713 }
3714
3715 if (!has_vary) {
3716 /* This is not a varying object */
3717 return VARY_NONE;
3718 }
3719
3720 /* virtual "vary" object found. Calculate the vary key and
3721 * continue the search
3722 */
3723 vary = httpMakeVaryMark(request, entry->getReply());
3724
3725 if (!vary.isEmpty()) {
3726 request->vary_headers = vary;
3727 return VARY_OTHER;
3728 } else {
3729 /* Ouch.. we cannot handle this kind of variance */
3730 /* XXX This cannot really happen, but just to be complete */
3731 return VARY_CANCEL;
3732 }
3733 } else {
3734 if (vary.isEmpty()) {
3735 vary = httpMakeVaryMark(request, entry->getReply());
3736
3737 if (!vary.isEmpty())
3738 request->vary_headers = vary;
3739 }
3740
3741 if (vary.isEmpty()) {
3742 /* Ouch.. we cannot handle this kind of variance */
3743 /* XXX This cannot really happen, but just to be complete */
3744 return VARY_CANCEL;
3745 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3746 return VARY_MATCH;
3747 } else {
3748 /* Oops.. we have already been here and still haven't
3749 * found the requested variant. Bail out
3750 */
3751 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3752 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3753 return VARY_CANCEL;
3754 }
3755 }
3756 }
3757
3758 ACLFilledChecklist *
clientAclChecklistCreate(const acl_access * acl,ClientHttpRequest * http)3759 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3760 {
3761 ConnStateData * conn = http->getConn();
3762 ACLFilledChecklist *ch = new ACLFilledChecklist(acl, http->request,
3763 cbdataReferenceValid(conn) && conn != NULL && conn->clientConnection != NULL ? conn->clientConnection->rfc931 : dash_str);
3764 ch->al = http->al;
3765 ch->syncAle(http->request, http->log_uri);
3766 /*
3767 * hack for ident ACL. It needs to get full addresses, and a place to store
3768 * the ident result on persistent connections...
3769 */
3770 /* connection oriented auth also needs these two lines for it's operation. */
3771 return ch;
3772 }
3773
3774 bool
transparent() const3775 ConnStateData::transparent() const
3776 {
3777 return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3778 }
3779
3780 BodyPipe::Pointer
expectRequestBody(int64_t size)3781 ConnStateData::expectRequestBody(int64_t size)
3782 {
3783 bodyPipe = new BodyPipe(this);
3784 if (size >= 0)
3785 bodyPipe->setBodySize(size);
3786 else
3787 startDechunkingRequest();
3788 return bodyPipe;
3789 }
3790
3791 int64_t
mayNeedToReadMoreBody() const3792 ConnStateData::mayNeedToReadMoreBody() const
3793 {
3794 if (!bodyPipe)
3795 return 0; // request without a body or read/produced all body bytes
3796
3797 if (!bodyPipe->bodySizeKnown())
3798 return -1; // probably need to read more, but we cannot be sure
3799
3800 const int64_t needToProduce = bodyPipe->unproducedSize();
3801 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3802
3803 if (needToProduce <= haveAvailable)
3804 return 0; // we have read what we need (but are waiting for pipe space)
3805
3806 return needToProduce - haveAvailable;
3807 }
3808
3809 void
stopReceiving(const char * error)3810 ConnStateData::stopReceiving(const char *error)
3811 {
3812 debugs(33, 4, HERE << "receiving error (" << clientConnection << "): " << error <<
3813 "; old sending error: " <<
3814 (stoppedSending() ? stoppedSending_ : "none"));
3815
3816 if (const char *oldError = stoppedReceiving()) {
3817 debugs(33, 3, HERE << "already stopped receiving: " << oldError);
3818 return; // nothing has changed as far as this connection is concerned
3819 }
3820
3821 stoppedReceiving_ = error;
3822
3823 if (const char *sendError = stoppedSending()) {
3824 debugs(33, 3, HERE << "closing because also stopped sending: " << sendError);
3825 clientConnection->close();
3826 }
3827 }
3828
3829 void
expectNoForwarding()3830 ConnStateData::expectNoForwarding()
3831 {
3832 if (bodyPipe != NULL) {
3833 debugs(33, 4, HERE << "no consumer for virgin body " << bodyPipe->status());
3834 bodyPipe->expectNoConsumption();
3835 }
3836 }
3837
3838 /// initialize dechunking state
3839 void
startDechunkingRequest()3840 ConnStateData::startDechunkingRequest()
3841 {
3842 Must(bodyPipe != NULL);
3843 debugs(33, 5, HERE << "start dechunking" << bodyPipe->status());
3844 assert(!bodyParser);
3845 bodyParser = new Http1::TeChunkedParser;
3846 }
3847
3848 /// put parsed content into input buffer and clean up
3849 void
finishDechunkingRequest(bool withSuccess)3850 ConnStateData::finishDechunkingRequest(bool withSuccess)
3851 {
3852 debugs(33, 5, HERE << "finish dechunking: " << withSuccess);
3853
3854 if (bodyPipe != NULL) {
3855 debugs(33, 7, HERE << "dechunked tail: " << bodyPipe->status());
3856 BodyPipe::Pointer myPipe = bodyPipe;
3857 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3858 Must(!bodyPipe); // we rely on it being nil after we are done with body
3859 if (withSuccess) {
3860 Must(myPipe->bodySizeKnown());
3861 Http::StreamPointer context = pipeline.front();
3862 if (context != NULL && context->http && context->http->request)
3863 context->http->request->setContentLength(myPipe->bodySize());
3864 }
3865 }
3866
3867 delete bodyParser;
3868 bodyParser = NULL;
3869 }
3870
3871 // XXX: this is an HTTP/1-only operation
3872 void
sendControlMsg(HttpControlMsg msg)3873 ConnStateData::sendControlMsg(HttpControlMsg msg)
3874 {
3875 if (!isOpen()) {
3876 debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
3877 return;
3878 }
3879
3880 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3881 if (!pipeline.empty()) {
3882 HttpReply::Pointer rep(msg.reply);
3883 Must(rep);
3884 // remember the callback
3885 cbControlMsgSent = msg.cbSuccess;
3886
3887 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3888 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3889
3890 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3891 // but still inform the caller (so it may resume its operation)
3892 doneWithControlMsg();
3893 }
3894 return;
3895 }
3896
3897 debugs(33, 3, HERE << " closing due to missing context for 1xx");
3898 clientConnection->close();
3899 }
3900
3901 void
doneWithControlMsg()3902 ConnStateData::doneWithControlMsg()
3903 {
3904 HttpControlMsgSink::doneWithControlMsg();
3905
3906 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3907 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3908 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3909 }
3910 }
3911
3912 /// Our close handler called by Comm when the pinned connection is closed
3913 void
clientPinnedConnectionClosed(const CommCloseCbParams & io)3914 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3915 {
3916 // FwdState might repin a failed connection sooner than this close
3917 // callback is called for the failed connection.
3918 assert(pinning.serverConnection == io.conn);
3919 pinning.closeHandler = NULL; // Comm unregisters handlers before calling
3920 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3921 pinning.serverConnection->noteClosure();
3922 unpinConnection(false);
3923
3924 if (sawZeroReply && clientConnection != NULL) {
3925 debugs(33, 3, "Closing client connection on pinned zero reply.");
3926 clientConnection->close();
3927 }
3928
3929 }
3930
3931 void
pinBusyConnection(const Comm::ConnectionPointer & pinServer,const HttpRequest::Pointer & request)3932 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3933 {
3934 pinConnection(pinServer, *request);
3935 }
3936
3937 void
notePinnedConnectionBecameIdle(PinnedIdleContext pic)3938 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3939 {
3940 Must(pic.connection);
3941 Must(pic.request);
3942 pinConnection(pic.connection, *pic.request);
3943
3944 // monitor pinned server connection for remote-end closures.
3945 startPinnedConnectionMonitoring();
3946
3947 if (pipeline.empty())
3948 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3949 }
3950
3951 /// Forward future client requests using the given server connection.
3952 void
pinConnection(const Comm::ConnectionPointer & pinServer,const HttpRequest & request)3953 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3954 {
3955 if (Comm::IsConnOpen(pinning.serverConnection) &&
3956 pinning.serverConnection->fd == pinServer->fd) {
3957 debugs(33, 3, "already pinned" << pinServer);
3958 return;
3959 }
3960
3961 unpinConnection(true); // closes pinned connection, if any, and resets fields
3962
3963 pinning.serverConnection = pinServer;
3964
3965 debugs(33, 3, HERE << pinning.serverConnection);
3966
3967 Must(pinning.serverConnection != NULL);
3968
3969 const char *pinnedHost = "[unknown]";
3970 pinning.host = xstrdup(request.url.host());
3971 pinning.port = request.url.port();
3972 pinnedHost = pinning.host;
3973 pinning.pinned = true;
3974 if (CachePeer *aPeer = pinServer->getPeer())
3975 pinning.peer = cbdataReference(aPeer);
3976 pinning.auth = request.flags.connectionAuth;
3977 char stmp[MAX_IPSTRLEN];
3978 char desc[FD_DESC_SZ];
3979 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3980 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3981 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3982 clientConnection->fd);
3983 fd_note(pinning.serverConnection->fd, desc);
3984
3985 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3986 pinning.closeHandler = JobCallback(33, 5,
3987 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3988 // remember the pinned connection so that cb does not unpin a fresher one
3989 typedef CommCloseCbParams Params;
3990 Params ¶ms = GetCommParams<Params>(pinning.closeHandler);
3991 params.conn = pinning.serverConnection;
3992 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3993 }
3994
3995 /// [re]start monitoring pinned connection for peer closures so that we can
3996 /// propagate them to an _idle_ client pinned to that peer
3997 void
startPinnedConnectionMonitoring()3998 ConnStateData::startPinnedConnectionMonitoring()
3999 {
4000 if (pinning.readHandler != NULL)
4001 return; // already monitoring
4002
4003 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
4004 pinning.readHandler = JobCallback(33, 3,
4005 Dialer, this, ConnStateData::clientPinnedConnectionRead);
4006 Comm::Read(pinning.serverConnection, pinning.readHandler);
4007 }
4008
4009 void
stopPinnedConnectionMonitoring()4010 ConnStateData::stopPinnedConnectionMonitoring()
4011 {
4012 if (pinning.readHandler != NULL) {
4013 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
4014 pinning.readHandler = NULL;
4015 }
4016 }
4017
4018 #if USE_OPENSSL
4019 bool
handleIdleClientPinnedTlsRead()4020 ConnStateData::handleIdleClientPinnedTlsRead()
4021 {
4022 // A ready-for-reading connection means that the TLS server either closed
4023 // the connection, sent us some unexpected HTTP data, or started TLS
4024 // renegotiations. We should close the connection except for the last case.
4025
4026 Must(pinning.serverConnection != nullptr);
4027 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
4028 if (!ssl)
4029 return false;
4030
4031 char buf[1];
4032 const int readResult = SSL_read(ssl, buf, sizeof(buf));
4033
4034 if (readResult > 0 || SSL_pending(ssl) > 0) {
4035 debugs(83, 2, pinning.serverConnection << " TLS application data read");
4036 return false;
4037 }
4038
4039 switch(const int error = SSL_get_error(ssl, readResult)) {
4040 case SSL_ERROR_WANT_WRITE:
4041 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
4042 // fall through to restart monitoring, for now
4043 case SSL_ERROR_NONE:
4044 case SSL_ERROR_WANT_READ:
4045 startPinnedConnectionMonitoring();
4046 return true;
4047
4048 default:
4049 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
4050 return false;
4051 }
4052
4053 // not reached
4054 return true;
4055 }
4056 #endif
4057
4058 /// Our read handler called by Comm when the server either closes an idle pinned connection or
4059 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
4060 void
clientPinnedConnectionRead(const CommIoCbParams & io)4061 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
4062 {
4063 pinning.readHandler = NULL; // Comm unregisters handlers before calling
4064
4065 if (io.flag == Comm::ERR_CLOSING)
4066 return; // close handler will clean up
4067
4068 Must(pinning.serverConnection == io.conn);
4069
4070 #if USE_OPENSSL
4071 if (handleIdleClientPinnedTlsRead())
4072 return;
4073 #endif
4074
4075 const bool clientIsIdle = pipeline.empty();
4076
4077 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
4078 io.size << (clientIsIdle ? " with idle client" : ""));
4079
4080 pinning.serverConnection->close();
4081
4082 // If we are still sending data to the client, do not close now. When we are done sending,
4083 // ConnStateData::kick() checks pinning.serverConnection and will close.
4084 // However, if we are idle, then we must close to inform the idle client and minimize races.
4085 if (clientIsIdle && clientConnection != NULL)
4086 clientConnection->close();
4087 }
4088
4089 const Comm::ConnectionPointer
validatePinnedConnection(HttpRequest * request,const CachePeer * aPeer)4090 ConnStateData::validatePinnedConnection(HttpRequest *request, const CachePeer *aPeer)
4091 {
4092 debugs(33, 7, HERE << pinning.serverConnection);
4093
4094 bool valid = true;
4095 if (!Comm::IsConnOpen(pinning.serverConnection))
4096 valid = false;
4097 else if (pinning.auth && pinning.host && request && strcasecmp(pinning.host, request->url.host()) != 0)
4098 valid = false;
4099 else if (request && pinning.port != request->url.port())
4100 valid = false;
4101 else if (pinning.peer && !cbdataReferenceValid(pinning.peer))
4102 valid = false;
4103 else if (aPeer != pinning.peer)
4104 valid = false;
4105
4106 if (!valid) {
4107 /* The pinning info is not safe, remove any pinning info */
4108 unpinConnection(true);
4109 }
4110
4111 return pinning.serverConnection;
4112 }
4113
4114 Comm::ConnectionPointer
borrowPinnedConnection(HttpRequest * request,const CachePeer * aPeer)4115 ConnStateData::borrowPinnedConnection(HttpRequest *request, const CachePeer *aPeer)
4116 {
4117 debugs(33, 7, pinning.serverConnection);
4118 if (validatePinnedConnection(request, aPeer) != NULL)
4119 stopPinnedConnectionMonitoring();
4120
4121 return pinning.serverConnection; // closed if validation failed
4122 }
4123
4124 void
unpinConnection(const bool andClose)4125 ConnStateData::unpinConnection(const bool andClose)
4126 {
4127 debugs(33, 3, HERE << pinning.serverConnection);
4128
4129 if (pinning.peer)
4130 cbdataReferenceDone(pinning.peer);
4131
4132 if (Comm::IsConnOpen(pinning.serverConnection)) {
4133 if (pinning.closeHandler != NULL) {
4134 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
4135 pinning.closeHandler = NULL;
4136 }
4137
4138 stopPinnedConnectionMonitoring();
4139
4140 // close the server side socket if requested
4141 if (andClose)
4142 pinning.serverConnection->close();
4143 pinning.serverConnection = NULL;
4144 }
4145
4146 safe_free(pinning.host);
4147
4148 pinning.zeroReply = false;
4149
4150 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
4151 * connection has gone away */
4152 }
4153
4154 void
checkLogging()4155 ConnStateData::checkLogging()
4156 {
4157 // if we are parsing request body, its request is responsible for logging
4158 if (bodyPipe)
4159 return;
4160
4161 // a request currently using this connection is responsible for logging
4162 if (!pipeline.empty() && pipeline.back()->mayUseConnection())
4163 return;
4164
4165 /* Either we are waiting for the very first transaction, or
4166 * we are done with the Nth transaction and are waiting for N+1st.
4167 * XXX: We assume that if anything was added to inBuf, then it could
4168 * only be consumed by actions already covered by the above checks.
4169 */
4170
4171 // do not log connections that closed after a transaction (it is normal)
4172 // TODO: access_log needs ACLs to match received-no-bytes connections
4173 if (pipeline.nrequests && inBuf.isEmpty())
4174 return;
4175
4176 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4177 ClientHttpRequest http(this);
4178 http.req_sz = inBuf.length();
4179 // XXX: Or we died while waiting for the pinned connection to become idle.
4180 http.setErrorUri("error:transaction-end-before-headers");
4181 }
4182
4183 bool
shouldPreserveClientData() const4184 ConnStateData::shouldPreserveClientData() const
4185 {
4186 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4187 if (needProxyProtocolHeader_)
4188 return false;
4189
4190 // If our decision here is negative, configuration changes are irrelevant.
4191 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4192 if (!Config.accessList.on_unsupported_protocol)
4193 return false;
4194
4195 // TODO: Figure out whether/how we can support FTP tunneling.
4196 if (port->transport.protocol == AnyP::PROTO_FTP)
4197 return false;
4198
4199 #if USE_OPENSSL
4200 if (parsingTlsHandshake)
4201 return true;
4202
4203 // the 1st HTTP request on a bumped connection
4204 if (!parsedBumpedRequestCount && switchedToHttps())
4205 return true;
4206 #endif
4207
4208 // the 1st HTTP(S) request on a connection to an intercepting port
4209 if (!pipeline.nrequests && transparent())
4210 return true;
4211
4212 return false;
4213 }
4214
4215 std::ostream &
operator <<(std::ostream & os,const ConnStateData::PinnedIdleContext & pic)4216 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4217 {
4218 return os << pic.connection << ", request=" << pic.request;
4219 }
4220
4221