1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 11 Hypertext Transfer Protocol (HTTP) */
10
11 /*
12 * Anonymizing patch by lutz@as-node.jena.thur.de
13 * have a look into http-anon.c to get more informations.
14 */
15
16 #include "squid.h"
17 #include "acl/FilledChecklist.h"
18 #include "base/AsyncJobCalls.h"
19 #include "base/TextException.h"
20 #include "base64.h"
21 #include "CachePeer.h"
22 #include "client_side.h"
23 #include "comm/Connection.h"
24 #include "comm/Read.h"
25 #include "comm/Write.h"
26 #include "CommRead.h"
27 #include "err_detail_type.h"
28 #include "errorpage.h"
29 #include "fd.h"
30 #include "fde.h"
31 #include "globals.h"
32 #include "http.h"
33 #include "http/one/ResponseParser.h"
34 #include "http/one/TeChunkedParser.h"
35 #include "http/Stream.h"
36 #include "HttpControlMsg.h"
37 #include "HttpHdrCc.h"
38 #include "HttpHdrContRange.h"
39 #include "HttpHdrSc.h"
40 #include "HttpHdrScTarget.h"
41 #include "HttpHeaderTools.h"
42 #include "HttpReply.h"
43 #include "HttpRequest.h"
44 #include "log/access_log.h"
45 #include "MemBuf.h"
46 #include "MemObject.h"
47 #include "neighbors.h"
48 #include "peer_proxy_negotiate_auth.h"
49 #include "profiler/Profiler.h"
50 #include "refresh.h"
51 #include "RefreshPattern.h"
52 #include "rfc1738.h"
53 #include "SquidConfig.h"
54 #include "SquidTime.h"
55 #include "StatCounters.h"
56 #include "Store.h"
57 #include "StrList.h"
58 #include "tools.h"
59 #include "util.h"
60
61 #if USE_AUTH
62 #include "auth/UserRequest.h"
63 #endif
64 #if USE_DELAY_POOLS
65 #include "DelayPools.h"
66 #endif
67
68 CBDATA_CLASS_INIT(HttpStateData);
69
70 static const char *const crlf = "\r\n";
71
72 static void httpMaybeRemovePublic(StoreEntry *, Http::StatusCode);
73 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request,
74 HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &);
75
HttpStateData(FwdState * theFwdState)76 HttpStateData::HttpStateData(FwdState *theFwdState) :
77 AsyncJob("HttpStateData"),
78 Client(theFwdState),
79 lastChunk(0),
80 httpChunkDecoder(NULL),
81 payloadSeen(0),
82 payloadTruncated(0),
83 sawDateGoBack(false)
84 {
85 debugs(11,5,HERE << "HttpStateData " << this << " created");
86 ignoreCacheControl = false;
87 surrogateNoStore = false;
88 serverConnection = fwd->serverConnection();
89
90 if (fwd->serverConnection() != NULL)
91 _peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */
92
93 if (_peer) {
94 request->flags.proxying = true;
95 /*
96 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
97 * We might end up getting the object from somewhere else if,
98 * for example, the request to this neighbor fails.
99 */
100 if (_peer->options.proxy_only)
101 entry->releaseRequest(true);
102
103 #if USE_DELAY_POOLS
104 entry->setNoDelay(_peer->options.no_delay);
105 #endif
106 }
107
108 /*
109 * register the handler to free HTTP state data when the FD closes
110 */
111 typedef CommCbMemFunT<HttpStateData, CommCloseCbParams> Dialer;
112 closeHandler = JobCallback(9, 5, Dialer, this, HttpStateData::httpStateConnClosed);
113 comm_add_close_handler(serverConnection->fd, closeHandler);
114 }
115
~HttpStateData()116 HttpStateData::~HttpStateData()
117 {
118 /*
119 * don't forget that ~Client() gets called automatically
120 */
121
122 if (httpChunkDecoder)
123 delete httpChunkDecoder;
124
125 cbdataReferenceDone(_peer);
126
127 debugs(11,5, HERE << "HttpStateData " << this << " destroyed; " << serverConnection);
128 }
129
130 const Comm::ConnectionPointer &
dataConnection() const131 HttpStateData::dataConnection() const
132 {
133 return serverConnection;
134 }
135
136 void
httpStateConnClosed(const CommCloseCbParams & params)137 HttpStateData::httpStateConnClosed(const CommCloseCbParams ¶ms)
138 {
139 debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
140 doneWithFwd = "httpStateConnClosed()"; // assume FwdState is monitoring too
141 mustStop("HttpStateData::httpStateConnClosed");
142 }
143
144 void
httpTimeout(const CommTimeoutCbParams &)145 HttpStateData::httpTimeout(const CommTimeoutCbParams &)
146 {
147 debugs(11, 4, serverConnection << ": '" << entry->url() << "'");
148
149 if (entry->store_status == STORE_PENDING) {
150 fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, fwd->request));
151 }
152
153 closeServer();
154 mustStop("HttpStateData::httpTimeout");
155 }
156
157 static StoreEntry *
findPreviouslyCachedEntry(StoreEntry * newEntry)158 findPreviouslyCachedEntry(StoreEntry *newEntry) {
159 assert(newEntry->mem_obj);
160 return newEntry->mem_obj->request ?
161 storeGetPublicByRequest(newEntry->mem_obj->request) :
162 storeGetPublic(newEntry->mem_obj->storeId(), newEntry->mem_obj->method);
163 }
164
165 /// Remove an existing public store entry if the incoming response (to be
166 /// stored in a currently private entry) is going to invalidate it.
167 static void
httpMaybeRemovePublic(StoreEntry * e,Http::StatusCode status)168 httpMaybeRemovePublic(StoreEntry * e, Http::StatusCode status)
169 {
170 int remove = 0;
171 int forbidden = 0;
172
173 // If the incoming response already goes into a public entry, then there is
174 // nothing to remove. This protects ready-for-collapsing entries as well.
175 if (!EBIT_TEST(e->flags, KEY_PRIVATE))
176 return;
177
178 // If the new/incoming response cannot be stored, then it does not
179 // compete with the old stored response for the public key, and the
180 // old stored response should be left as is.
181 if (e->mem_obj->request && !e->mem_obj->request->flags.cachable)
182 return;
183
184 switch (status) {
185
186 case Http::scOkay:
187
188 case Http::scNonAuthoritativeInformation:
189
190 case Http::scMultipleChoices:
191
192 case Http::scMovedPermanently:
193
194 case Http::scFound:
195
196 case Http::scSeeOther:
197
198 case Http::scGone:
199
200 case Http::scNotFound:
201 remove = 1;
202
203 break;
204
205 case Http::scForbidden:
206
207 case Http::scMethodNotAllowed:
208 forbidden = 1;
209
210 break;
211
212 #if WORK_IN_PROGRESS
213
214 case Http::scUnauthorized:
215 forbidden = 1;
216
217 break;
218
219 #endif
220
221 default:
222 #if QUESTIONABLE
223 /*
224 * Any 2xx response should eject previously cached entities...
225 */
226
227 if (status >= 200 && status < 300)
228 remove = 1;
229
230 #endif
231
232 break;
233 }
234
235 if (!remove && !forbidden)
236 return;
237
238 StoreEntry *pe = findPreviouslyCachedEntry(e);
239
240 if (pe != NULL) {
241 assert(e != pe);
242 #if USE_HTCP
243 neighborsHtcpClear(e, e->mem_obj->request, e->mem_obj->method, HTCP_CLR_INVALIDATION);
244 #endif
245 pe->release(true);
246 }
247
248 /** \par
249 * Also remove any cached HEAD response in case the object has
250 * changed.
251 */
252 if (e->mem_obj->request)
253 pe = storeGetPublicByRequestMethod(e->mem_obj->request, Http::METHOD_HEAD);
254 else
255 pe = storeGetPublic(e->mem_obj->storeId(), Http::METHOD_HEAD);
256
257 if (pe != NULL) {
258 assert(e != pe);
259 #if USE_HTCP
260 neighborsHtcpClear(e, e->mem_obj->request, HttpRequestMethod(Http::METHOD_HEAD), HTCP_CLR_INVALIDATION);
261 #endif
262 pe->release(true);
263 }
264 }
265
266 void
processSurrogateControl(HttpReply * reply)267 HttpStateData::processSurrogateControl(HttpReply *reply)
268 {
269 if (request->flags.accelerated && reply->surrogate_control) {
270 HttpHdrScTarget *sctusable = reply->surrogate_control->getMergedTarget(Config.Accel.surrogate_id);
271
272 if (sctusable) {
273 if (sctusable->hasNoStore() ||
274 (Config.onoff.surrogate_is_remote
275 && sctusable->noStoreRemote())) {
276 surrogateNoStore = true;
277 // Be conservative for now and make it non-shareable because
278 // there is no enough information here to make the decision.
279 entry->makePrivate(false);
280 }
281
282 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
283 * accelerated request or not...
284 * Still, this is an abstraction breach. - RC
285 */
286 if (sctusable->hasMaxAge()) {
287 if (sctusable->maxAge() < sctusable->maxStale())
288 reply->expires = reply->date + sctusable->maxAge();
289 else
290 reply->expires = reply->date + sctusable->maxStale();
291
292 /* And update the timestamps */
293 entry->timestampsSet();
294 }
295
296 /* We ignore cache-control directives as per the Surrogate specification */
297 ignoreCacheControl = true;
298
299 delete sctusable;
300 }
301 }
302 }
303
304 HttpStateData::ReuseDecision::Answers
reusableReply(HttpStateData::ReuseDecision & decision)305 HttpStateData::reusableReply(HttpStateData::ReuseDecision &decision)
306 {
307 HttpReply const *rep = finalReply();
308 HttpHeader const *hdr = &rep->header;
309 const char *v;
310 #if USE_HTTP_VIOLATIONS
311
312 const RefreshPattern *R = NULL;
313
314 /* This strange looking define first looks up the refresh pattern
315 * and then checks if the specified flag is set. The main purpose
316 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
317 * condition
318 */
319 #define REFRESH_OVERRIDE(flag) \
320 ((R = (R ? R : refreshLimits(entry->mem_obj->storeId()))) , \
321 (R && R->flags.flag))
322 #else
323 #define REFRESH_OVERRIDE(flag) 0
324 #endif
325
326 if (EBIT_TEST(entry->flags, RELEASE_REQUEST))
327 return decision.make(ReuseDecision::doNotCacheButShare, "the entry has been released");
328
329 // RFC 7234 section 4: a cache MUST use the most recent response
330 // (as determined by the Date header field)
331 // TODO: whether such responses could be shareable?
332 if (sawDateGoBack)
333 return decision.make(ReuseDecision::reuseNot, "the response has an older date header");
334
335 // Check for Surrogate/1.0 protocol conditions
336 // NP: reverse-proxy traffic our parent server has instructed us never to cache
337 if (surrogateNoStore)
338 return decision.make(ReuseDecision::reuseNot, "Surrogate-Control:no-store");
339
340 // RFC 2616: HTTP/1.1 Cache-Control conditions
341 if (!ignoreCacheControl) {
342 // XXX: check to see if the request headers alone were enough to prevent caching earlier
343 // (ie no-store request header) no need to check those all again here if so.
344 // for now we are not reliably doing that so we waste CPU re-checking request CC
345
346 // RFC 2616 section 14.9.2 - MUST NOT cache any response with request CC:no-store
347 if (request && request->cache_control && request->cache_control->hasNoStore() &&
348 !REFRESH_OVERRIDE(ignore_no_store))
349 return decision.make(ReuseDecision::reuseNot,
350 "client request Cache-Control:no-store");
351
352 // NP: request CC:no-cache only means cache READ is forbidden. STORE is permitted.
353 if (rep->cache_control && rep->cache_control->hasNoCacheWithParameters()) {
354 /* TODO: we are allowed to cache when no-cache= has parameters.
355 * Provided we strip away any of the listed headers unless they are revalidated
356 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
357 * That is a bit tricky for squid right now so we avoid caching entirely.
358 */
359 return decision.make(ReuseDecision::reuseNot,
360 "server reply Cache-Control:no-cache has parameters");
361 }
362
363 // NP: request CC:private is undefined. We ignore.
364 // NP: other request CC flags are limiters on HIT/MISS. We don't care about here.
365
366 // RFC 2616 section 14.9.2 - MUST NOT cache any response with CC:no-store
367 if (rep->cache_control && rep->cache_control->hasNoStore() &&
368 !REFRESH_OVERRIDE(ignore_no_store))
369 return decision.make(ReuseDecision::reuseNot,
370 "server reply Cache-Control:no-store");
371
372 // RFC 2616 section 14.9.1 - MUST NOT cache any response with CC:private in a shared cache like Squid.
373 // CC:private overrides CC:public when both are present in a response.
374 // TODO: add a shared/private cache configuration possibility.
375 if (rep->cache_control &&
376 rep->cache_control->hasPrivate() &&
377 !REFRESH_OVERRIDE(ignore_private)) {
378 /* TODO: we are allowed to cache when private= has parameters.
379 * Provided we strip away any of the listed headers unless they are revalidated
380 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
381 * That is a bit tricky for squid right now so we avoid caching entirely.
382 */
383 return decision.make(ReuseDecision::reuseNot,
384 "server reply Cache-Control:private");
385 }
386 }
387
388 // RFC 2068, sec 14.9.4 - MUST NOT cache any response with Authentication UNLESS certain CC controls are present
389 // allow HTTP violations to IGNORE those controls (ie re-block caching Auth)
390 if (request && (request->flags.auth || request->flags.authSent)) {
391 if (!rep->cache_control)
392 return decision.make(ReuseDecision::reuseNot,
393 "authenticated and server reply missing Cache-Control");
394
395 if (ignoreCacheControl)
396 return decision.make(ReuseDecision::reuseNot,
397 "authenticated and ignoring Cache-Control");
398
399 bool mayStore = false;
400 // HTTPbis pt6 section 3.2: a response CC:public is present
401 if (rep->cache_control->hasPublic()) {
402 debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:public");
403 mayStore = true;
404
405 // HTTPbis pt6 section 3.2: a response CC:must-revalidate is present
406 } else if (rep->cache_control->hasMustRevalidate()) {
407 debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:must-revalidate");
408 mayStore = true;
409
410 #if USE_HTTP_VIOLATIONS
411 // NP: given the must-revalidate exception we should also be able to exempt no-cache.
412 // HTTPbis WG verdict on this is that it is omitted from the spec due to being 'unexpected' by
413 // some. The caching+revalidate is not exactly unsafe though with Squids interpretation of no-cache
414 // (without parameters) as equivalent to must-revalidate in the reply.
415 } else if (rep->cache_control->hasNoCacheWithoutParameters()) {
416 debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:no-cache (equivalent to must-revalidate)");
417 mayStore = true;
418 #endif
419
420 // HTTPbis pt6 section 3.2: a response CC:s-maxage is present
421 } else if (rep->cache_control->hasSMaxAge()) {
422 debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:s-maxage");
423 mayStore = true;
424 }
425
426 if (!mayStore)
427 return decision.make(ReuseDecision::reuseNot, "authenticated transaction");
428
429 // NP: response CC:no-cache is equivalent to CC:must-revalidate,max-age=0. We MAY cache, and do so.
430 // NP: other request CC flags are limiters on HIT/MISS/REFRESH. We don't care about here.
431 }
432
433 /* HACK: The "multipart/x-mixed-replace" content type is used for
434 * continuous push replies. These are generally dynamic and
435 * probably should not be cachable
436 */
437 if ((v = hdr->getStr(Http::HdrType::CONTENT_TYPE)))
438 if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
439 return decision.make(ReuseDecision::reuseNot, "Content-Type:multipart/x-mixed-replace");
440
441 // TODO: if possible, provide more specific message for each status code
442 static const char *shareableError = "shareable error status code";
443 static const char *nonShareableError = "non-shareable error status code";
444 ReuseDecision::Answers statusAnswer = ReuseDecision::reuseNot;
445 const char *statusReason = nonShareableError;
446
447 switch (rep->sline.status()) {
448
449 /* There are several situations when a non-cacheable response may be
450 * still shareable (e.g., among collapsed clients). We assume that these
451 * are 3xx and 5xx responses, indicating server problems and some of
452 * 4xx responses, common for all clients with a given cache key (e.g.,
453 * 404 Not Found or 414 URI Too Long). On the other hand, we should not
454 * share non-cacheable client-specific errors, such as 400 Bad Request
455 * or 406 Not Acceptable.
456 */
457
458 /* Responses that are cacheable */
459
460 case Http::scOkay:
461
462 case Http::scNonAuthoritativeInformation:
463
464 case Http::scMultipleChoices:
465
466 case Http::scMovedPermanently:
467 case Http::scPermanentRedirect:
468
469 case Http::scGone:
470 /*
471 * Don't cache objects that need to be refreshed on next request,
472 * unless we know how to refresh it.
473 */
474
475 if (refreshIsCachable(entry) || REFRESH_OVERRIDE(store_stale))
476 decision.make(ReuseDecision::cachePositively, "refresh check returned cacheable");
477 else
478 decision.make(ReuseDecision::doNotCacheButShare, "refresh check returned non-cacheable");
479 break;
480
481 /* Responses that only are cacheable if the server says so */
482
483 case Http::scFound:
484 case Http::scTemporaryRedirect:
485 if (rep->date <= 0)
486 decision.make(ReuseDecision::doNotCacheButShare, "Date is missing/invalid");
487 else if (rep->expires > rep->date)
488 decision.make(ReuseDecision::cachePositively, "Expires > Date");
489 else
490 decision.make(ReuseDecision::doNotCacheButShare, "Expires <= Date");
491 break;
492
493 /* These responses can be negatively cached. Most can also be shared. */
494 case Http::scNoContent:
495 case Http::scUseProxy:
496 case Http::scForbidden:
497 case Http::scNotFound:
498 case Http::scMethodNotAllowed:
499 case Http::scUriTooLong:
500 case Http::scInternalServerError:
501 case Http::scNotImplemented:
502 case Http::scBadGateway:
503 case Http::scServiceUnavailable:
504 case Http::scGatewayTimeout:
505 case Http::scMisdirectedRequest:
506 statusAnswer = ReuseDecision::doNotCacheButShare;
507 statusReason = shareableError;
508 // fall through to the actual decision making below
509
510 case Http::scBadRequest: // no sharing; perhaps the server did not like something specific to this request
511 #if USE_HTTP_VIOLATIONS
512 if (Config.negativeTtl > 0)
513 decision.make(ReuseDecision::cacheNegatively, "Config.negativeTtl > 0");
514 else
515 #endif
516 decision.make(statusAnswer, statusReason);
517 break;
518
519 /* these responses can never be cached, some
520 of them can be shared though */
521 case Http::scSeeOther:
522 case Http::scNotModified:
523 case Http::scUnauthorized:
524 case Http::scProxyAuthenticationRequired:
525 case Http::scPaymentRequired:
526 case Http::scInsufficientStorage:
527 // TODO: use more specific reason for non-error status codes
528 decision.make(ReuseDecision::doNotCacheButShare, shareableError);
529 break;
530
531 case Http::scPartialContent: /* Not yet supported. TODO: make shareable for suitable ranges */
532 case Http::scNotAcceptable:
533 case Http::scRequestTimeout: // TODO: is this shareable?
534 case Http::scConflict: // TODO: is this shareable?
535 case Http::scLengthRequired:
536 case Http::scPreconditionFailed:
537 case Http::scPayloadTooLarge:
538 case Http::scUnsupportedMediaType:
539 case Http::scUnprocessableEntity:
540 case Http::scLocked: // TODO: is this shareable?
541 case Http::scFailedDependency:
542 case Http::scRequestedRangeNotSatisfied:
543 case Http::scExpectationFailed:
544 case Http::scInvalidHeader: /* Squid header parsing error */
545 case Http::scHeaderTooLarge:
546 decision.make(ReuseDecision::reuseNot, nonShareableError);
547 break;
548
549 default:
550 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
551 decision.make(ReuseDecision::reuseNot, "unknown status code");
552 break;
553 }
554
555 return decision.answer;
556 }
557
558 /// assemble a variant key (vary-mark) from the given Vary header and HTTP request
559 static void
assembleVaryKey(String & vary,SBuf & vstr,const HttpRequest & request)560 assembleVaryKey(String &vary, SBuf &vstr, const HttpRequest &request)
561 {
562 static const SBuf asterisk("*");
563 const char *pos = nullptr;
564 const char *item = nullptr;
565 int ilen = 0;
566
567 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
568 SBuf name(item, ilen);
569 if (name == asterisk) {
570 vstr = asterisk;
571 break;
572 }
573 name.toLower();
574 if (!vstr.isEmpty())
575 vstr.append(", ", 2);
576 vstr.append(name);
577 String hdr(request.header.getByName(name));
578 const char *value = hdr.termedBuf();
579 if (value) {
580 value = rfc1738_escape_part(value);
581 vstr.append("=\"", 2);
582 vstr.append(value);
583 vstr.append("\"", 1);
584 }
585
586 hdr.clean();
587 }
588 }
589
590 /*
591 * For Vary, store the relevant request headers as
592 * virtual headers in the reply
593 * Returns an empty SBuf if the variance cannot be stored
594 */
595 SBuf
httpMakeVaryMark(HttpRequest * request,HttpReply const * reply)596 httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
597 {
598 SBuf vstr;
599 String vary;
600
601 vary = reply->header.getList(Http::HdrType::VARY);
602 assembleVaryKey(vary, vstr, *request);
603
604 #if X_ACCELERATOR_VARY
605 vary.clean();
606 vary = reply->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
607 assembleVaryKey(vary, vstr, *request);
608 #endif
609
610 debugs(11, 3, vstr);
611 return vstr;
612 }
613
614 void
keepaliveAccounting(HttpReply * reply)615 HttpStateData::keepaliveAccounting(HttpReply *reply)
616 {
617 if (flags.keepalive)
618 if (_peer)
619 ++ _peer->stats.n_keepalives_sent;
620
621 if (reply->keep_alive) {
622 if (_peer)
623 ++ _peer->stats.n_keepalives_recv;
624
625 if (Config.onoff.detect_broken_server_pconns
626 && reply->bodySize(request->method) == -1 && !flags.chunked) {
627 debugs(11, DBG_IMPORTANT, "keepaliveAccounting: Impossible keep-alive header from '" << entry->url() << "'" );
628 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
629 flags.keepalive_broken = true;
630 }
631 }
632 }
633
634 void
checkDateSkew(HttpReply * reply)635 HttpStateData::checkDateSkew(HttpReply *reply)
636 {
637 if (reply->date > -1 && !_peer) {
638 int skew = abs((int)(reply->date - squid_curtime));
639
640 if (skew > 86400)
641 debugs(11, 3, "" << request->url.host() << "'s clock is skewed by " << skew << " seconds!");
642 }
643 }
644
645 /**
646 * This creates the error page itself.. its likely
647 * that the forward ported reply header max size patch
648 * generates non http conformant error pages - in which
649 * case the errors where should be 'BAD_GATEWAY' etc
650 */
651 void
processReplyHeader()652 HttpStateData::processReplyHeader()
653 {
654 /** Creates a blank header. If this routine is made incremental, this will not do */
655
656 /* NP: all exit points to this function MUST call ctx_exit(ctx) */
657 Ctx ctx = ctx_enter(entry->mem_obj->urlXXX());
658
659 debugs(11, 3, "processReplyHeader: key '" << entry->getMD5Text() << "'");
660
661 assert(!flags.headers_parsed);
662
663 if (!inBuf.length()) {
664 ctx_exit(ctx);
665 return;
666 }
667
668 /* Attempt to parse the first line; this will define where the protocol, status, reason-phrase and header begin */
669 {
670 if (hp == NULL)
671 hp = new Http1::ResponseParser;
672
673 bool parsedOk = hp->parse(inBuf);
674
675 // sync the buffers after parsing.
676 inBuf = hp->remaining();
677
678 if (hp->needsMoreData()) {
679 if (eof) { // no more data coming
680 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n.
681 * We also may receive truncated responses.
682 * Ensure here that we have at minimum two \r\n when EOF is seen.
683 */
684 inBuf.append("\r\n\r\n", 4);
685 // retry the parse
686 parsedOk = hp->parse(inBuf);
687 // sync the buffers after parsing.
688 inBuf = hp->remaining();
689 } else {
690 debugs(33, 5, "Incomplete response, waiting for end of response headers");
691 ctx_exit(ctx);
692 return;
693 }
694 }
695
696 if (!parsedOk) {
697 // unrecoverable parsing error
698 // TODO: Use Raw! XXX: inBuf no longer has the [beginning of the] malformed header.
699 debugs(11, 3, "Non-HTTP-compliant header:\n---------\n" << inBuf << "\n----------");
700 flags.headers_parsed = true;
701 HttpReply *newrep = new HttpReply;
702 newrep->sline.set(Http::ProtocolVersion(), hp->parseStatusCode);
703 setVirginReply(newrep);
704 ctx_exit(ctx);
705 return;
706 }
707 }
708
709 /* We know the whole response is in parser now */
710 debugs(11, 2, "HTTP Server " << serverConnection);
711 debugs(11, 2, "HTTP Server RESPONSE:\n---------\n" <<
712 hp->messageProtocol() << " " << hp->messageStatus() << " " << hp->reasonPhrase() << "\n" <<
713 hp->mimeHeader() <<
714 "----------");
715
716 // reset payload tracking to begin after message headers
717 payloadSeen = inBuf.length();
718
719 HttpReply *newrep = new HttpReply;
720 // XXX: RFC 7230 indicates we MAY ignore the reason phrase,
721 // and use an empty string on unknown status.
722 // We do that now to avoid performance regression from using SBuf::c_str()
723 newrep->sline.set(Http::ProtocolVersion(1,1), hp->messageStatus() /* , hp->reasonPhrase() */);
724 newrep->sline.protocol = newrep->sline.version.protocol = hp->messageProtocol().protocol;
725 newrep->sline.version.major = hp->messageProtocol().major;
726 newrep->sline.version.minor = hp->messageProtocol().minor;
727
728 // parse headers
729 if (!newrep->parseHeader(*hp)) {
730 // XXX: when Http::ProtocolVersion is a function, remove this hack. just set with messageProtocol()
731 newrep->sline.set(Http::ProtocolVersion(), Http::scInvalidHeader);
732 newrep->sline.version.protocol = hp->messageProtocol().protocol;
733 newrep->sline.version.major = hp->messageProtocol().major;
734 newrep->sline.version.minor = hp->messageProtocol().minor;
735 debugs(11, 2, "error parsing response headers mime block");
736 }
737
738 // done with Parser, now process using the HttpReply
739 hp = NULL;
740
741 newrep->sources |= request->url.getScheme() == AnyP::PROTO_HTTPS ? HttpMsg::srcHttps : HttpMsg::srcHttp;
742
743 newrep->removeStaleWarnings();
744
745 if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->sline.status() >= 100 && newrep->sline.status() < 200) {
746 handle1xx(newrep);
747 ctx_exit(ctx);
748 return;
749 }
750
751 flags.chunked = false;
752 if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) {
753 flags.chunked = true;
754 httpChunkDecoder = new Http1::TeChunkedParser;
755 }
756
757 if (!peerSupportsConnectionPinning())
758 request->flags.connectionAuthDisabled = true;
759
760 HttpReply *vrep = setVirginReply(newrep);
761 flags.headers_parsed = true;
762
763 keepaliveAccounting(vrep);
764
765 checkDateSkew(vrep);
766
767 processSurrogateControl (vrep);
768
769 request->hier.peer_reply_status = newrep->sline.status();
770
771 ctx_exit(ctx);
772 }
773
774 /// ignore or start forwarding the 1xx response (a.k.a., control message)
775 void
handle1xx(HttpReply * reply)776 HttpStateData::handle1xx(HttpReply *reply)
777 {
778 HttpReply::Pointer msg(reply); // will destroy reply if unused
779
780 // one 1xx at a time: we must not be called while waiting for previous 1xx
781 Must(!flags.handling1xx);
782 flags.handling1xx = true;
783
784 if (!request->canHandle1xx() || request->forcedBodyContinuation) {
785 debugs(11, 2, "ignoring 1xx because it is " << (request->forcedBodyContinuation ? "already sent" : "not supported by client"));
786 proceedAfter1xx();
787 return;
788 }
789
790 #if USE_HTTP_VIOLATIONS
791 // check whether the 1xx response forwarding is allowed by squid.conf
792 if (Config.accessList.reply) {
793 ACLFilledChecklist ch(Config.accessList.reply, originalRequest(), NULL);
794 ch.al = fwd->al;
795 ch.reply = reply;
796 ch.syncAle(originalRequest(), nullptr);
797 HTTPMSGLOCK(ch.reply);
798 if (!ch.fastCheck().allowed()) { // TODO: support slow lookups?
799 debugs(11, 3, HERE << "ignoring denied 1xx");
800 proceedAfter1xx();
801 return;
802 }
803 }
804 #endif // USE_HTTP_VIOLATIONS
805
806 debugs(11, 2, HERE << "forwarding 1xx to client");
807
808 // the Sink will use this to call us back after writing 1xx to the client
809 typedef NullaryMemFunT<HttpStateData> CbDialer;
810 const AsyncCall::Pointer cb = JobCallback(11, 3, CbDialer, this,
811 HttpStateData::proceedAfter1xx);
812 CallJobHere1(11, 4, request->clientConnectionManager, ConnStateData,
813 ConnStateData::sendControlMsg, HttpControlMsg(msg, cb));
814 // If the call is not fired, then the Sink is gone, and HttpStateData
815 // will terminate due to an aborted store entry or another similar error.
816 // If we get stuck, it is not handle1xx fault if we could get stuck
817 // for similar reasons without a 1xx response.
818 }
819
820 /// restores state and resumes processing after 1xx is ignored or forwarded
821 void
proceedAfter1xx()822 HttpStateData::proceedAfter1xx()
823 {
824 Must(flags.handling1xx);
825 debugs(11, 2, "continuing with " << payloadSeen << " bytes in buffer after 1xx");
826 CallJobHere(11, 3, this, HttpStateData, HttpStateData::processReply);
827 }
828
829 /**
830 * returns true if the peer can support connection pinning
831 */
832 bool
peerSupportsConnectionPinning() const833 HttpStateData::peerSupportsConnectionPinning() const
834 {
835 if (!_peer)
836 return true;
837
838 /*If this peer does not support connection pinning (authenticated
839 connections) return false
840 */
841 if (!_peer->connection_auth)
842 return false;
843
844 const HttpReply *rep = entry->mem_obj->getReply();
845
846 /*The peer supports connection pinning and the http reply status
847 is not unauthorized, so the related connection can be pinned
848 */
849 if (rep->sline.status() != Http::scUnauthorized)
850 return true;
851
852 /*The server respond with Http::scUnauthorized and the peer configured
853 with "connection-auth=on" we know that the peer supports pinned
854 connections
855 */
856 if (_peer->connection_auth == 1)
857 return true;
858
859 /*At this point peer has configured with "connection-auth=auto"
860 parameter so we need some extra checks to decide if we are going
861 to allow pinned connections or not
862 */
863
864 /*if the peer configured with originserver just allow connection
865 pinning (squid 2.6 behaviour)
866 */
867 if (_peer->options.originserver)
868 return true;
869
870 /*if the connections it is already pinned it is OK*/
871 if (request->flags.pinned)
872 return true;
873
874 /*Allow pinned connections only if the Proxy-support header exists in
875 reply and has in its list the "Session-Based-Authentication"
876 which means that the peer supports connection pinning.
877 */
878 if (rep->header.hasListMember(Http::HdrType::PROXY_SUPPORT, "Session-Based-Authentication", ','))
879 return true;
880
881 return false;
882 }
883
884 // Called when we parsed (and possibly adapted) the headers but
885 // had not starting storing (a.k.a., sending) the body yet.
886 void
haveParsedReplyHeaders()887 HttpStateData::haveParsedReplyHeaders()
888 {
889 Client::haveParsedReplyHeaders();
890
891 Ctx ctx = ctx_enter(entry->mem_obj->urlXXX());
892 HttpReply *rep = finalReply();
893 const Http::StatusCode statusCode = rep->sline.status();
894
895 entry->timestampsSet();
896
897 /* Check if object is cacheable or not based on reply code */
898 debugs(11, 3, "HTTP CODE: " << statusCode);
899
900 if (StoreEntry *oldEntry = findPreviouslyCachedEntry(entry)) {
901 oldEntry->lock("HttpStateData::haveParsedReplyHeaders");
902 sawDateGoBack = rep->olderThan(oldEntry->getReply());
903 oldEntry->unlock("HttpStateData::haveParsedReplyHeaders");
904 }
905
906 if (neighbors_do_private_keys && !sawDateGoBack)
907 httpMaybeRemovePublic(entry, rep->sline.status());
908
909 bool varyFailure = false;
910 if (rep->header.has(Http::HdrType::VARY)
911 #if X_ACCELERATOR_VARY
912 || rep->header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY)
913 #endif
914 ) {
915 const SBuf vary(httpMakeVaryMark(request, rep));
916
917 if (vary.isEmpty()) {
918 // TODO: check whether such responses are shareable.
919 // Do not share for now.
920 entry->makePrivate(false);
921 if (fwd->reforwardableStatus(rep->sline.status()))
922 EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
923 varyFailure = true;
924 } else {
925 entry->mem_obj->vary_headers = vary;
926
927 // RFC 7231 section 7.1.4
928 // Vary:* can be cached, but has mandatory revalidation
929 static const SBuf asterisk("*");
930 if (vary == asterisk)
931 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
932 }
933 }
934
935 if (!varyFailure) {
936 /*
937 * If its not a reply that we will re-forward, then
938 * allow the client to get it.
939 */
940 if (fwd->reforwardableStatus(rep->sline.status()))
941 EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
942
943 ReuseDecision decision(entry, statusCode);
944
945 switch (reusableReply(decision)) {
946
947 case ReuseDecision::reuseNot:
948 entry->makePrivate(false);
949 break;
950
951 case ReuseDecision::cachePositively:
952 if (!entry->makePublic()) {
953 decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
954 entry->makePrivate(true);
955 }
956 break;
957
958 case ReuseDecision::cacheNegatively:
959 if (!entry->cacheNegatively()) {
960 decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
961 entry->makePrivate(true);
962 }
963 break;
964
965 case ReuseDecision::doNotCacheButShare:
966 entry->makePrivate(true);
967 break;
968
969 default:
970 assert(0);
971 break;
972 }
973 debugs(11, 3, "decided: " << decision);
974 }
975
976 if (!ignoreCacheControl) {
977 if (rep->cache_control) {
978 // We are required to revalidate on many conditions.
979 // For security reasons we do so even if storage was caused by refresh_pattern ignore-* option
980
981 // CC:must-revalidate or CC:proxy-revalidate
982 const bool ccMustRevalidate = (rep->cache_control->hasProxyRevalidate() || rep->cache_control->hasMustRevalidate());
983
984 // CC:no-cache (only if there are no parameters)
985 const bool ccNoCacheNoParams = rep->cache_control->hasNoCacheWithoutParameters();
986
987 // CC:s-maxage=N
988 const bool ccSMaxAge = rep->cache_control->hasSMaxAge();
989
990 // CC:private (yes, these can sometimes be stored)
991 const bool ccPrivate = rep->cache_control->hasPrivate();
992
993 if (ccNoCacheNoParams || ccPrivate)
994 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
995 else if (ccMustRevalidate || ccSMaxAge)
996 EBIT_SET(entry->flags, ENTRY_REVALIDATE_STALE);
997 }
998 #if USE_HTTP_VIOLATIONS // response header Pragma::no-cache is undefined in HTTP
999 else {
1000 // Expensive calculation. So only do it IF the CC: header is not present.
1001
1002 /* HACK: Pragma: no-cache in _replies_ is not documented in HTTP,
1003 * but servers like "Active Imaging Webcast/2.0" sure do use it */
1004 if (rep->header.has(Http::HdrType::PRAGMA) &&
1005 rep->header.hasListMember(Http::HdrType::PRAGMA,"no-cache",','))
1006 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
1007 }
1008 #endif
1009 }
1010
1011 #if HEADERS_LOG
1012 headersLog(1, 0, request->method, rep);
1013
1014 #endif
1015
1016 ctx_exit(ctx);
1017 }
1018
1019 HttpStateData::ConnectionStatus
statusIfComplete() const1020 HttpStateData::statusIfComplete() const
1021 {
1022 const HttpReply *rep = virginReply();
1023 /** \par
1024 * If the reply wants to close the connection, it takes precedence */
1025
1026 if (httpHeaderHasConnDir(&rep->header, "close"))
1027 return COMPLETE_NONPERSISTENT_MSG;
1028
1029 /** \par
1030 * If we didn't send a keep-alive request header, then this
1031 * can not be a persistent connection.
1032 */
1033 if (!flags.keepalive)
1034 return COMPLETE_NONPERSISTENT_MSG;
1035
1036 /** \par
1037 * If we haven't sent the whole request then this can not be a persistent
1038 * connection.
1039 */
1040 if (!flags.request_sent) {
1041 debugs(11, 2, "Request not yet fully sent " << request->method << ' ' << entry->url());
1042 return COMPLETE_NONPERSISTENT_MSG;
1043 }
1044
1045 /** \par
1046 * What does the reply have to say about keep-alive?
1047 */
1048 /**
1049 \bug XXX BUG?
1050 * If the origin server (HTTP/1.0) does not send a keep-alive
1051 * header, but keeps the connection open anyway, what happens?
1052 * We'll return here and http.c waits for an EOF before changing
1053 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
1054 * and an error status code, and we might have to wait until
1055 * the server times out the socket.
1056 */
1057 if (!rep->keep_alive)
1058 return COMPLETE_NONPERSISTENT_MSG;
1059
1060 return COMPLETE_PERSISTENT_MSG;
1061 }
1062
1063 HttpStateData::ConnectionStatus
persistentConnStatus() const1064 HttpStateData::persistentConnStatus() const
1065 {
1066 debugs(11, 3, HERE << serverConnection << " eof=" << eof);
1067 if (eof) // already reached EOF
1068 return COMPLETE_NONPERSISTENT_MSG;
1069
1070 /* If server fd is closing (but we have not been notified yet), stop Comm
1071 I/O to avoid assertions. TODO: Change Comm API to handle callers that
1072 want more I/O after async closing (usually initiated by others). */
1073 // XXX: add canReceive or s/canSend/canTalkToServer/
1074 if (!Comm::IsConnOpen(serverConnection))
1075 return COMPLETE_NONPERSISTENT_MSG;
1076
1077 /** \par
1078 * In chunked response we do not know the content length but we are absolutely
1079 * sure about the end of response, so we are calling the statusIfComplete to
1080 * decide if we can be persistant
1081 */
1082 if (lastChunk && flags.chunked)
1083 return statusIfComplete();
1084
1085 const HttpReply *vrep = virginReply();
1086 debugs(11, 5, "persistentConnStatus: content_length=" << vrep->content_length);
1087
1088 const int64_t clen = vrep->bodySize(request->method);
1089
1090 debugs(11, 5, "persistentConnStatus: clen=" << clen);
1091
1092 /* If the body size is unknown we must wait for EOF */
1093 if (clen < 0)
1094 return INCOMPLETE_MSG;
1095
1096 /** \par
1097 * If the body size is known, we must wait until we've gotten all of it. */
1098 if (clen > 0) {
1099 debugs(11,5, "payloadSeen=" << payloadSeen << " content_length=" << vrep->content_length);
1100
1101 if (payloadSeen < vrep->content_length)
1102 return INCOMPLETE_MSG;
1103
1104 if (payloadTruncated > 0) // already read more than needed
1105 return COMPLETE_NONPERSISTENT_MSG; // disable pconns
1106 }
1107
1108 /** \par
1109 * If there is no message body or we got it all, we can be persistent */
1110 return statusIfComplete();
1111 }
1112
1113 static void
readDelayed(void * context,CommRead const &)1114 readDelayed(void *context, CommRead const &)
1115 {
1116 HttpStateData *state = static_cast<HttpStateData*>(context);
1117 state->flags.do_next_read = true;
1118 state->maybeReadVirginBody();
1119 }
1120
1121 void
readReply(const CommIoCbParams & io)1122 HttpStateData::readReply(const CommIoCbParams &io)
1123 {
1124 Must(!flags.do_next_read); // XXX: should have been set false by mayReadVirginBody()
1125 flags.do_next_read = false;
1126
1127 debugs(11, 5, io.conn);
1128
1129 // Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us
1130 if (io.flag == Comm::ERR_CLOSING) {
1131 debugs(11, 3, "http socket closing");
1132 return;
1133 }
1134
1135 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1136 abortTransaction("store entry aborted while reading reply");
1137 return;
1138 }
1139
1140 Must(Comm::IsConnOpen(serverConnection));
1141 Must(io.conn->fd == serverConnection->fd);
1142
1143 /*
1144 * Don't reset the timeout value here. The value should be
1145 * counting Config.Timeout.request and applies to the request
1146 * as a whole, not individual read() calls.
1147 * Plus, it breaks our lame *HalfClosed() detection
1148 */
1149
1150 Must(maybeMakeSpaceAvailable(true));
1151 CommIoCbParams rd(this); // will be expanded with ReadNow results
1152 rd.conn = io.conn;
1153 rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
1154
1155 if (rd.size <= 0) {
1156 assert(entry->mem_obj);
1157 AsyncCall::Pointer nilCall;
1158 entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
1159 return;
1160 }
1161
1162 switch (Comm::ReadNow(rd, inBuf)) {
1163 case Comm::INPROGRESS:
1164 if (inBuf.isEmpty())
1165 debugs(33, 2, io.conn << ": no data to process, " << xstrerr(rd.xerrno));
1166 flags.do_next_read = true;
1167 maybeReadVirginBody();
1168 return;
1169
1170 case Comm::OK:
1171 {
1172 payloadSeen += rd.size;
1173 #if USE_DELAY_POOLS
1174 DelayId delayId = entry->mem_obj->mostBytesAllowed();
1175 delayId.bytesIn(rd.size);
1176 #endif
1177
1178 statCounter.server.all.kbytes_in += rd.size;
1179 statCounter.server.http.kbytes_in += rd.size;
1180 ++ IOStats.Http.reads;
1181
1182 int bin = 0;
1183 for (int clen = rd.size - 1; clen; ++bin)
1184 clen >>= 1;
1185
1186 ++ IOStats.Http.read_hist[bin];
1187
1188 request->hier.notePeerRead();
1189 }
1190
1191 /* Continue to process previously read data */
1192 break;
1193
1194 case Comm::ENDFILE: // close detected by 0-byte read
1195 eof = 1;
1196 flags.do_next_read = false;
1197
1198 /* Continue to process previously read data */
1199 break;
1200
1201 // case Comm::COMM_ERROR:
1202 default: // no other flags should ever occur
1203 debugs(11, 2, io.conn << ": read failure: " << xstrerr(rd.xerrno));
1204 ErrorState *err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request);
1205 err->xerrno = rd.xerrno;
1206 fwd->fail(err);
1207 flags.do_next_read = false;
1208 closeServer();
1209 mustStop("HttpStateData::readReply");
1210 return;
1211 }
1212
1213 /* Process next response from buffer */
1214 processReply();
1215 }
1216
1217 /// processes the already read and buffered response data, possibly after
1218 /// waiting for asynchronous 1xx control message processing
1219 void
processReply()1220 HttpStateData::processReply()
1221 {
1222
1223 if (flags.handling1xx) { // we came back after handling a 1xx response
1224 debugs(11, 5, HERE << "done with 1xx handling");
1225 flags.handling1xx = false;
1226 Must(!flags.headers_parsed);
1227 }
1228
1229 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1230 abortTransaction("store entry aborted while we were waiting for processReply()");
1231 return;
1232 }
1233
1234 if (!flags.headers_parsed) { // have not parsed headers yet?
1235 PROF_start(HttpStateData_processReplyHeader);
1236 processReplyHeader();
1237 PROF_stop(HttpStateData_processReplyHeader);
1238
1239 if (!continueAfterParsingHeader()) // parsing error or need more data
1240 return; // TODO: send errors to ICAP
1241
1242 adaptOrFinalizeReply(); // may write to, abort, or "close" the entry
1243 }
1244
1245 // kick more reads if needed and/or process the response body, if any
1246 PROF_start(HttpStateData_processReplyBody);
1247 processReplyBody(); // may call serverComplete()
1248 PROF_stop(HttpStateData_processReplyBody);
1249 }
1250
1251 /**
1252 \retval true if we can continue with processing the body or doing ICAP.
1253 */
1254 bool
continueAfterParsingHeader()1255 HttpStateData::continueAfterParsingHeader()
1256 {
1257 if (flags.handling1xx) {
1258 debugs(11, 5, HERE << "wait for 1xx handling");
1259 Must(!flags.headers_parsed);
1260 return false;
1261 }
1262
1263 if (!flags.headers_parsed && !eof) {
1264 debugs(11, 9, "needs more at " << inBuf.length());
1265 flags.do_next_read = true;
1266 /** \retval false If we have not finished parsing the headers and may get more data.
1267 * Schedules more reads to retrieve the missing data.
1268 */
1269 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1270 return false;
1271 }
1272
1273 /** If we are done with parsing, check for errors */
1274
1275 err_type error = ERR_NONE;
1276
1277 if (flags.headers_parsed) { // parsed headers, possibly with errors
1278 // check for header parsing errors
1279 if (HttpReply *vrep = virginReply()) {
1280 const Http::StatusCode s = vrep->sline.status();
1281 const AnyP::ProtocolVersion &v = vrep->sline.version;
1282 if (s == Http::scInvalidHeader && v != Http::ProtocolVersion(0,9)) {
1283 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << request->url);
1284 error = ERR_INVALID_RESP;
1285 } else if (s == Http::scHeaderTooLarge) {
1286 fwd->dontRetry(true);
1287 error = ERR_TOO_BIG;
1288 } else if (vrep->header.conflictingContentLength()) {
1289 fwd->dontRetry(true);
1290 error = ERR_INVALID_RESP;
1291 } else if (vrep->header.unsupportedTe()) {
1292 fwd->dontRetry(true);
1293 error = ERR_INVALID_RESP;
1294 } else {
1295 return true; // done parsing, got reply, and no error
1296 }
1297 } else {
1298 // parsed headers but got no reply
1299 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << request->url);
1300 error = ERR_INVALID_RESP;
1301 }
1302 } else {
1303 assert(eof);
1304 if (inBuf.length()) {
1305 error = ERR_INVALID_RESP;
1306 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << request->url);
1307 } else {
1308 error = ERR_ZERO_SIZE_OBJECT;
1309 debugs(11, (request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " << entry->url() << " AKA " << request->url);
1310 }
1311 }
1312
1313 assert(error != ERR_NONE);
1314 entry->reset();
1315 fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request));
1316 flags.do_next_read = false;
1317 closeServer();
1318 mustStop("HttpStateData::continueAfterParsingHeader");
1319 return false; // quit on error
1320 }
1321
1322 /** truncate what we read if we read too much so that writeReplyBody()
1323 writes no more than what we should have read */
1324 void
truncateVirginBody()1325 HttpStateData::truncateVirginBody()
1326 {
1327 assert(flags.headers_parsed);
1328
1329 HttpReply *vrep = virginReply();
1330 int64_t clen = -1;
1331 if (!vrep->expectingBody(request->method, clen) || clen < 0)
1332 return; // no body or a body of unknown size, including chunked
1333
1334 if (payloadSeen - payloadTruncated <= clen)
1335 return; // we did not read too much or already took care of the extras
1336
1337 if (const int64_t extras = payloadSeen - payloadTruncated - clen) {
1338 // server sent more that the advertised content length
1339 debugs(11, 5, "payloadSeen=" << payloadSeen <<
1340 " clen=" << clen << '/' << vrep->content_length <<
1341 " trucated=" << payloadTruncated << '+' << extras);
1342
1343 inBuf.chop(0, inBuf.length() - extras);
1344 payloadTruncated += extras;
1345 }
1346 }
1347
1348 /**
1349 * Call this when there is data from the origin server
1350 * which should be sent to either StoreEntry, or to ICAP...
1351 */
1352 void
writeReplyBody()1353 HttpStateData::writeReplyBody()
1354 {
1355 truncateVirginBody(); // if needed
1356 const char *data = inBuf.rawContent();
1357 int len = inBuf.length();
1358 addVirginReplyBody(data, len);
1359 inBuf.consume(len);
1360 }
1361
1362 bool
decodeAndWriteReplyBody()1363 HttpStateData::decodeAndWriteReplyBody()
1364 {
1365 assert(flags.chunked);
1366 assert(httpChunkDecoder);
1367 try {
1368 MemBuf decodedData;
1369 decodedData.init();
1370 httpChunkDecoder->setPayloadBuffer(&decodedData);
1371 const bool doneParsing = httpChunkDecoder->parse(inBuf);
1372 inBuf = httpChunkDecoder->remaining(); // sync buffers after parse
1373 addVirginReplyBody(decodedData.content(), decodedData.contentSize());
1374 if (doneParsing) {
1375 lastChunk = 1;
1376 flags.do_next_read = false;
1377 }
1378 return true;
1379 }
1380 catch (...) {
1381 debugs (11, 2, "de-chunking failure: " << CurrentException);
1382 }
1383 return false;
1384 }
1385
1386 /**
1387 * processReplyBody has two purposes:
1388 * 1 - take the reply body data, if any, and put it into either
1389 * the StoreEntry, or give it over to ICAP.
1390 * 2 - see if we made it to the end of the response (persistent
1391 * connections and such)
1392 */
1393 void
processReplyBody()1394 HttpStateData::processReplyBody()
1395 {
1396 if (!flags.headers_parsed) {
1397 flags.do_next_read = true;
1398 maybeReadVirginBody();
1399 return;
1400 }
1401
1402 #if USE_ADAPTATION
1403 debugs(11,5, HERE << "adaptationAccessCheckPending=" << adaptationAccessCheckPending);
1404 if (adaptationAccessCheckPending)
1405 return;
1406
1407 #endif
1408
1409 /*
1410 * At this point the reply headers have been parsed and consumed.
1411 * That means header content has been removed from readBuf and
1412 * it contains only body data.
1413 */
1414 if (entry->isAccepting()) {
1415 if (flags.chunked) {
1416 if (!decodeAndWriteReplyBody()) {
1417 flags.do_next_read = false;
1418 serverComplete();
1419 return;
1420 }
1421 } else
1422 writeReplyBody();
1423 }
1424
1425 // storing/sending methods like earlier adaptOrFinalizeReply() or
1426 // above writeReplyBody() may release/abort the store entry.
1427 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1428 // TODO: In some cases (e.g., 304), we should keep persistent conn open.
1429 // Detect end-of-reply (and, hence, pool our idle pconn) earlier (ASAP).
1430 abortTransaction("store entry aborted while storing reply");
1431 return;
1432 } else
1433 switch (persistentConnStatus()) {
1434 case INCOMPLETE_MSG: {
1435 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG from " << serverConnection);
1436 /* Wait for more data or EOF condition */
1437 AsyncCall::Pointer nil;
1438 if (flags.keepalive_broken) {
1439 commSetConnTimeout(serverConnection, 10, nil);
1440 } else {
1441 commSetConnTimeout(serverConnection, Config.Timeout.read, nil);
1442 }
1443
1444 flags.do_next_read = true;
1445 }
1446 break;
1447
1448 case COMPLETE_PERSISTENT_MSG: {
1449 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection);
1450
1451 // TODO: Remove serverConnectionSaved but preserve exception safety.
1452
1453 commUnsetConnTimeout(serverConnection);
1454 flags.do_next_read = false;
1455
1456 comm_remove_close_handler(serverConnection->fd, closeHandler);
1457 closeHandler = NULL;
1458
1459 Ip::Address client_addr; // XXX: Remove as unused. Why was it added?
1460 if (request->flags.spoofClientIp)
1461 client_addr = request->client_addr;
1462
1463 auto serverConnectionSaved = serverConnection;
1464 fwd->unregister(serverConnection);
1465 serverConnection = nullptr;
1466
1467 bool ispinned = false; // TODO: Rename to isOrShouldBePinned
1468 if (request->flags.pinned) {
1469 ispinned = true;
1470 } else if (request->flags.connectionAuth && request->flags.authSent) {
1471 ispinned = true;
1472 }
1473
1474 if (ispinned) {
1475 if (request->clientConnectionManager.valid()) {
1476 CallJobHere1(11, 4, request->clientConnectionManager,
1477 ConnStateData,
1478 notePinnedConnectionBecameIdle,
1479 ConnStateData::PinnedIdleContext(serverConnectionSaved, request));
1480 } else {
1481 // must not pool/share ispinned connections, even orphaned ones
1482 serverConnectionSaved->close();
1483 }
1484 } else {
1485 fwd->pconnPush(serverConnectionSaved, request->url.host());
1486 }
1487
1488 serverComplete();
1489 return;
1490 }
1491
1492 case COMPLETE_NONPERSISTENT_MSG:
1493 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection);
1494 if (flags.chunked && !lastChunk)
1495 entry->lengthWentBad("missing last-chunk");
1496
1497 serverComplete();
1498 return;
1499 }
1500
1501 maybeReadVirginBody();
1502 }
1503
1504 bool
mayReadVirginReplyBody() const1505 HttpStateData::mayReadVirginReplyBody() const
1506 {
1507 // TODO: Be more precise here. For example, if/when reading trailer, we may
1508 // not be doneWithServer() yet, but we should return false. Similarly, we
1509 // could still be writing the request body after receiving the whole reply.
1510 return !doneWithServer();
1511 }
1512
1513 void
maybeReadVirginBody()1514 HttpStateData::maybeReadVirginBody()
1515 {
1516 // too late to read
1517 if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
1518 return;
1519
1520 if (!maybeMakeSpaceAvailable(false))
1521 return;
1522
1523 // XXX: get rid of the do_next_read flag
1524 // check for the proper reasons preventing read(2)
1525 if (!flags.do_next_read)
1526 return;
1527
1528 flags.do_next_read = false;
1529
1530 // must not already be waiting for read(2) ...
1531 assert(!Comm::MonitorsRead(serverConnection->fd));
1532
1533 // wait for read(2) to be possible.
1534 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1535 AsyncCall::Pointer call = JobCallback(11, 5, Dialer, this, HttpStateData::readReply);
1536 Comm::Read(serverConnection, call);
1537 }
1538
1539 bool
maybeMakeSpaceAvailable(bool doGrow)1540 HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
1541 {
1542 // how much we are allowed to buffer
1543 const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
1544
1545 if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
1546 // when buffer is at or over limit already
1547 debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
1548 debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
1549 // Process next response from buffer
1550 processReply();
1551 return false;
1552 }
1553
1554 // how much we want to read
1555 const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
1556
1557 if (!read_size) {
1558 debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
1559 return false;
1560 }
1561
1562 // just report whether we could grow or not, do not actually do it
1563 if (doGrow)
1564 return (read_size >= 2);
1565
1566 // we may need to grow the buffer
1567 inBuf.reserveSpace(read_size);
1568 debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
1569 " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
1570 ") from " << serverConnection);
1571
1572 return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
1573 }
1574
1575 /// called after writing the very last request byte (body, last-chunk, etc)
1576 void
wroteLast(const CommIoCbParams & io)1577 HttpStateData::wroteLast(const CommIoCbParams &io)
1578 {
1579 debugs(11, 5, HERE << serverConnection << ": size " << io.size << ": errflag " << io.flag << ".");
1580 #if URL_CHECKSUM_DEBUG
1581
1582 entry->mem_obj->checkUrlChecksum();
1583 #endif
1584
1585 // XXX: Keep in sync with Client::sentRequestBody().
1586 // TODO: Extract common parts.
1587
1588 if (io.size > 0) {
1589 fd_bytes(io.fd, io.size, FD_WRITE);
1590 statCounter.server.all.kbytes_out += io.size;
1591 statCounter.server.http.kbytes_out += io.size;
1592 }
1593
1594 if (io.flag == Comm::ERR_CLOSING)
1595 return;
1596
1597 // both successful and failed writes affect response times
1598 request->hier.notePeerWrite();
1599
1600 if (io.flag) {
1601 ErrorState *err = new ErrorState(ERR_WRITE_ERROR, Http::scBadGateway, fwd->request);
1602 err->xerrno = io.xerrno;
1603 fwd->fail(err);
1604 closeServer();
1605 mustStop("HttpStateData::wroteLast");
1606 return;
1607 }
1608
1609 sendComplete();
1610 }
1611
1612 /// successfully wrote the entire request (including body, last-chunk, etc.)
1613 void
sendComplete()1614 HttpStateData::sendComplete()
1615 {
1616 /*
1617 * Set the read timeout here because it hasn't been set yet.
1618 * We only set the read timeout after the request has been
1619 * fully written to the peer. If we start the timeout
1620 * after connection establishment, then we are likely to hit
1621 * the timeout for POST/PUT requests that have very large
1622 * request bodies.
1623 */
1624 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1625 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
1626 TimeoutDialer, this, HttpStateData::httpTimeout);
1627
1628 commSetConnTimeout(serverConnection, Config.Timeout.read, timeoutCall);
1629 flags.request_sent = true;
1630 }
1631
1632 void
closeServer()1633 HttpStateData::closeServer()
1634 {
1635 debugs(11,5, HERE << "closing HTTP server " << serverConnection << " this " << this);
1636
1637 if (Comm::IsConnOpen(serverConnection)) {
1638 fwd->unregister(serverConnection);
1639 comm_remove_close_handler(serverConnection->fd, closeHandler);
1640 closeHandler = NULL;
1641 serverConnection->close();
1642 }
1643 }
1644
1645 bool
doneWithServer() const1646 HttpStateData::doneWithServer() const
1647 {
1648 return !Comm::IsConnOpen(serverConnection);
1649 }
1650
1651 /*
1652 * Fixup authentication request headers for special cases
1653 */
1654 static void
httpFixupAuthentication(HttpRequest * request,const HttpHeader * hdr_in,HttpHeader * hdr_out,const Http::StateFlags & flags)1655 httpFixupAuthentication(HttpRequest * request, const HttpHeader * hdr_in, HttpHeader * hdr_out, const Http::StateFlags &flags)
1656 {
1657 Http::HdrType header = flags.originpeer ? Http::HdrType::AUTHORIZATION : Http::HdrType::PROXY_AUTHORIZATION;
1658
1659 /* Nothing to do unless we are forwarding to a peer */
1660 if (!request->flags.proxying)
1661 return;
1662
1663 /* Needs to be explicitly enabled */
1664 if (!request->peer_login)
1665 return;
1666
1667 /* Maybe already dealt with? */
1668 if (hdr_out->has(header))
1669 return;
1670
1671 /* Nothing to do here for PASSTHRU */
1672 if (strcmp(request->peer_login, "PASSTHRU") == 0)
1673 return;
1674
1675 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1676 if (flags.originpeer && strcmp(request->peer_login, "PROXYPASS") == 0 && hdr_in->has(Http::HdrType::PROXY_AUTHORIZATION)) {
1677 const char *auth = hdr_in->getStr(Http::HdrType::PROXY_AUTHORIZATION);
1678
1679 if (auth && strncasecmp(auth, "basic ", 6) == 0) {
1680 hdr_out->putStr(header, auth);
1681 return;
1682 }
1683 }
1684
1685 char loginbuf[base64_encode_len(MAX_LOGIN_SZ)];
1686 size_t blen;
1687 struct base64_encode_ctx ctx;
1688 base64_encode_init(&ctx);
1689
1690 /* Special mode to pass the username to the upstream cache */
1691 if (*request->peer_login == '*') {
1692 const char *username = "-";
1693
1694 if (request->extacl_user.size())
1695 username = request->extacl_user.termedBuf();
1696 #if USE_AUTH
1697 else if (request->auth_user_request != NULL)
1698 username = request->auth_user_request->username();
1699 #endif
1700
1701 blen = base64_encode_update(&ctx, loginbuf, strlen(username), reinterpret_cast<const uint8_t*>(username));
1702 blen += base64_encode_update(&ctx, loginbuf+blen, strlen(request->peer_login +1), reinterpret_cast<const uint8_t*>(request->peer_login +1));
1703 blen += base64_encode_final(&ctx, loginbuf+blen);
1704 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1705 return;
1706 }
1707
1708 /* external_acl provided credentials */
1709 if (request->extacl_user.size() && request->extacl_passwd.size() &&
1710 (strcmp(request->peer_login, "PASS") == 0 ||
1711 strcmp(request->peer_login, "PROXYPASS") == 0)) {
1712
1713 blen = base64_encode_update(&ctx, loginbuf, request->extacl_user.size(), reinterpret_cast<const uint8_t*>(request->extacl_user.rawBuf()));
1714 blen += base64_encode_update(&ctx, loginbuf+blen, 1, reinterpret_cast<const uint8_t*>(":"));
1715 blen += base64_encode_update(&ctx, loginbuf+blen, request->extacl_passwd.size(), reinterpret_cast<const uint8_t*>(request->extacl_passwd.rawBuf()));
1716 blen += base64_encode_final(&ctx, loginbuf+blen);
1717 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1718 return;
1719 }
1720 // if no external user credentials are available to fake authentication with PASS acts like PASSTHRU
1721 if (strcmp(request->peer_login, "PASS") == 0)
1722 return;
1723
1724 /* Kerberos login to peer */
1725 #if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1726 if (strncmp(request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1727 char *Token=NULL;
1728 char *PrincipalName=NULL,*p;
1729 int negotiate_flags = 0;
1730
1731 if ((p=strchr(request->peer_login,':')) != NULL ) {
1732 PrincipalName=++p;
1733 }
1734 if (request->flags.auth_no_keytab) {
1735 negotiate_flags |= PEER_PROXY_NEGOTIATE_NOKEYTAB;
1736 }
1737 Token = peer_proxy_negotiate_auth(PrincipalName, request->peer_host, negotiate_flags);
1738 if (Token) {
1739 httpHeaderPutStrf(hdr_out, header, "Negotiate %s",Token);
1740 }
1741 return;
1742 }
1743 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1744
1745 blen = base64_encode_update(&ctx, loginbuf, strlen(request->peer_login), reinterpret_cast<const uint8_t*>(request->peer_login));
1746 blen += base64_encode_final(&ctx, loginbuf+blen);
1747 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1748 return;
1749 }
1750
1751 /*
1752 * build request headers and append them to a given MemBuf
1753 * used by buildRequestPrefix()
1754 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1755 */
1756 void
httpBuildRequestHeader(HttpRequest * request,StoreEntry * entry,const AccessLogEntryPointer & al,HttpHeader * hdr_out,const Http::StateFlags & flags)1757 HttpStateData::httpBuildRequestHeader(HttpRequest * request,
1758 StoreEntry * entry,
1759 const AccessLogEntryPointer &al,
1760 HttpHeader * hdr_out,
1761 const Http::StateFlags &flags)
1762 {
1763 /* building buffer for complex strings */
1764 #define BBUF_SZ (MAX_URL+32)
1765 LOCAL_ARRAY(char, bbuf, BBUF_SZ);
1766 LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN);
1767 const HttpHeader *hdr_in = &request->header;
1768 const HttpHeaderEntry *e = NULL;
1769 HttpHeaderPos pos = HttpHeaderInitPos;
1770 assert (hdr_out->owner == hoRequest);
1771
1772 /* use our IMS header if the cached entry has Last-Modified time */
1773 if (request->lastmod > -1)
1774 hdr_out->putTime(Http::HdrType::IF_MODIFIED_SINCE, request->lastmod);
1775
1776 // Add our own If-None-Match field if the cached entry has a strong ETag.
1777 // copyOneHeaderFromClientsideRequestToUpstreamRequest() adds client ones.
1778 if (request->etag.size() > 0) {
1779 hdr_out->addEntry(new HttpHeaderEntry(Http::HdrType::IF_NONE_MATCH, NULL,
1780 request->etag.termedBuf()));
1781 }
1782
1783 bool we_do_ranges = decideIfWeDoRanges (request);
1784
1785 String strConnection (hdr_in->getList(Http::HdrType::CONNECTION));
1786
1787 while ((e = hdr_in->getEntry(&pos)))
1788 copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, hdr_out, we_do_ranges, flags);
1789
1790 /* Abstraction break: We should interpret multipart/byterange responses
1791 * into offset-length data, and this works around our inability to do so.
1792 */
1793 if (!we_do_ranges && request->multipartRangeRequest()) {
1794 /* don't cache the result */
1795 request->flags.cachable = false;
1796 /* pretend it's not a range request */
1797 request->ignoreRange("want to request the whole object");
1798 request->flags.isRanged = false;
1799 }
1800
1801 hdr_out->addVia(request->http_ver, hdr_in);
1802
1803 if (request->flags.accelerated) {
1804 /* Append Surrogate-Capabilities */
1805 String strSurrogate(hdr_in->getList(Http::HdrType::SURROGATE_CAPABILITY));
1806 #if USE_SQUID_ESI
1807 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id);
1808 #else
1809 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id);
1810 #endif
1811 strListAdd(&strSurrogate, bbuf, ',');
1812 hdr_out->putStr(Http::HdrType::SURROGATE_CAPABILITY, strSurrogate.termedBuf());
1813 }
1814
1815 /** \pre Handle X-Forwarded-For */
1816 if (strcmp(opt_forwarded_for, "delete") != 0) {
1817
1818 String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
1819
1820 // if we cannot double strFwd size, then it grew past 50% of the limit
1821 if (!strFwd.canGrowBy(strFwd.size())) {
1822 // There is probably a forwarding loop with Via detection disabled.
1823 // If we do nothing, String will assert on overflow soon.
1824 // TODO: Terminate all transactions with huge XFF?
1825 strFwd = "error";
1826
1827 static int warnedCount = 0;
1828 if (warnedCount++ < 100) {
1829 const SBuf url(entry ? SBuf(entry->url()) : request->effectiveRequestUri());
1830 debugs(11, DBG_IMPORTANT, "Warning: likely forwarding loop with " << url);
1831 }
1832 }
1833
1834 if (strcmp(opt_forwarded_for, "on") == 0) {
1835 /** If set to ON - append client IP or 'unknown'. */
1836 if ( request->client_addr.isNoAddr() )
1837 strListAdd(&strFwd, "unknown", ',');
1838 else
1839 strListAdd(&strFwd, request->client_addr.toStr(ntoabuf, MAX_IPSTRLEN), ',');
1840 } else if (strcmp(opt_forwarded_for, "off") == 0) {
1841 /** If set to OFF - append 'unknown'. */
1842 strListAdd(&strFwd, "unknown", ',');
1843 } else if (strcmp(opt_forwarded_for, "transparent") == 0) {
1844 /** If set to TRANSPARENT - pass through unchanged. */
1845 } else if (strcmp(opt_forwarded_for, "truncate") == 0) {
1846 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1847 if ( request->client_addr.isNoAddr() )
1848 strFwd = "unknown";
1849 else
1850 strFwd = request->client_addr.toStr(ntoabuf, MAX_IPSTRLEN);
1851 }
1852 if (strFwd.size() > 0)
1853 hdr_out->putStr(Http::HdrType::X_FORWARDED_FOR, strFwd.termedBuf());
1854 }
1855 /** If set to DELETE - do not copy through. */
1856
1857 /* append Host if not there already */
1858 if (!hdr_out->has(Http::HdrType::HOST)) {
1859 if (request->peer_domain) {
1860 hdr_out->putStr(Http::HdrType::HOST, request->peer_domain);
1861 } else {
1862 SBuf authority = request->url.authority();
1863 hdr_out->putStr(Http::HdrType::HOST, authority.c_str());
1864 }
1865 }
1866
1867 /* append Authorization if known in URL, not in header and going direct */
1868 if (!hdr_out->has(Http::HdrType::AUTHORIZATION)) {
1869 if (!request->flags.proxying && !request->url.userInfo().isEmpty()) {
1870 static char result[base64_encode_len(MAX_URL*2)]; // should be big enough for a single URI segment
1871 struct base64_encode_ctx ctx;
1872 base64_encode_init(&ctx);
1873 size_t blen = base64_encode_update(&ctx, result, request->url.userInfo().length(), reinterpret_cast<const uint8_t*>(request->url.userInfo().rawContent()));
1874 blen += base64_encode_final(&ctx, result+blen);
1875 result[blen] = '\0';
1876 if (blen)
1877 httpHeaderPutStrf(hdr_out, Http::HdrType::AUTHORIZATION, "Basic %.*s", (int)blen, result);
1878 }
1879 }
1880
1881 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1882 httpFixupAuthentication(request, hdr_in, hdr_out, flags);
1883
1884 /* append Cache-Control, add max-age if not there already */
1885 {
1886 HttpHdrCc *cc = hdr_in->getCc();
1887
1888 if (!cc)
1889 cc = new HttpHdrCc();
1890
1891 #if 0 /* see bug 2330 */
1892 /* Set no-cache if determined needed but not found */
1893 if (request->flags.nocache)
1894 EBIT_SET(cc->mask, HttpHdrCcType::CC_NO_CACHE);
1895 #endif
1896
1897 /* Add max-age only without no-cache */
1898 if (!cc->hasMaxAge() && !cc->hasNoCache()) {
1899 // XXX: performance regression. c_str() reallocates
1900 SBuf tmp(request->effectiveRequestUri());
1901 cc->maxAge(getMaxAge(entry ? entry->url() : tmp.c_str()));
1902 }
1903
1904 /* Enforce sibling relations */
1905 if (flags.only_if_cached)
1906 cc->onlyIfCached(true);
1907
1908 hdr_out->putCc(cc);
1909
1910 delete cc;
1911 }
1912
1913 // Always send Connection because HTTP/1.0 servers need explicit "keep-alive"
1914 // while HTTP/1.1 servers need explicit "close", and we do not always know
1915 // the server expectations.
1916 hdr_out->putStr(Http::HdrType::CONNECTION, flags.keepalive ? "keep-alive" : "close");
1917
1918 /* append Front-End-Https */
1919 if (flags.front_end_https) {
1920 if (flags.front_end_https == 1 || request->url.getScheme() == AnyP::PROTO_HTTPS)
1921 hdr_out->putStr(Http::HdrType::FRONT_END_HTTPS, "On");
1922 }
1923
1924 if (flags.chunked_request) {
1925 // Do not just copy the original value so that if the client-side
1926 // starts decode other encodings, this code may remain valid.
1927 hdr_out->putStr(Http::HdrType::TRANSFER_ENCODING, "chunked");
1928 }
1929
1930 /* Now mangle the headers. */
1931 httpHdrMangleList(hdr_out, request, al, ROR_REQUEST);
1932
1933 strConnection.clean();
1934 }
1935
1936 /**
1937 * Decides whether a particular header may be cloned from the received Clients request
1938 * to our outgoing fetch request.
1939 */
1940 void
copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry * e,const String strConnection,const HttpRequest * request,HttpHeader * hdr_out,const int we_do_ranges,const Http::StateFlags & flags)1941 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &flags)
1942 {
1943 debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
1944
1945 switch (e->id) {
1946
1947 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1948
1949 case Http::HdrType::PROXY_AUTHORIZATION:
1950 /** \par Proxy-Authorization:
1951 * Only pass on proxy authentication to peers for which
1952 * authentication forwarding is explicitly enabled
1953 */
1954 if (!flags.originpeer && flags.proxying && request->peer_login &&
1955 (strcmp(request->peer_login, "PASS") == 0 ||
1956 strcmp(request->peer_login, "PROXYPASS") == 0 ||
1957 strcmp(request->peer_login, "PASSTHRU") == 0)) {
1958 hdr_out->addEntry(e->clone());
1959 }
1960 break;
1961
1962 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1963
1964 case Http::HdrType::CONNECTION: /** \par Connection: */
1965 case Http::HdrType::TE: /** \par TE: */
1966 case Http::HdrType::KEEP_ALIVE: /** \par Keep-Alive: */
1967 case Http::HdrType::PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
1968 case Http::HdrType::TRAILER: /** \par Trailer: */
1969 case Http::HdrType::UPGRADE: /** \par Upgrade: */
1970 case Http::HdrType::TRANSFER_ENCODING: /** \par Transfer-Encoding: */
1971 break;
1972
1973 /** \par OTHER headers I haven't bothered to track down yet. */
1974
1975 case Http::HdrType::AUTHORIZATION:
1976 /** \par WWW-Authorization:
1977 * Pass on WWW authentication */
1978
1979 if (!flags.originpeer) {
1980 hdr_out->addEntry(e->clone());
1981 } else {
1982 /** \note In accelerators, only forward authentication if enabled
1983 * (see also httpFixupAuthentication for special cases)
1984 */
1985 if (request->peer_login &&
1986 (strcmp(request->peer_login, "PASS") == 0 ||
1987 strcmp(request->peer_login, "PASSTHRU") == 0 ||
1988 strcmp(request->peer_login, "PROXYPASS") == 0)) {
1989 hdr_out->addEntry(e->clone());
1990 }
1991 }
1992
1993 break;
1994
1995 case Http::HdrType::HOST:
1996 /** \par Host:
1997 * Normally Squid rewrites the Host: header.
1998 * However, there is one case when we don't: If the URL
1999 * went through our redirector and the admin configured
2000 * 'redir_rewrites_host' to be off.
2001 */
2002 if (request->peer_domain)
2003 hdr_out->putStr(Http::HdrType::HOST, request->peer_domain);
2004 else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
2005 hdr_out->addEntry(e->clone());
2006 else {
2007 SBuf authority = request->url.authority();
2008 hdr_out->putStr(Http::HdrType::HOST, authority.c_str());
2009 }
2010
2011 break;
2012
2013 case Http::HdrType::IF_MODIFIED_SINCE:
2014 /** \par If-Modified-Since:
2015 * append unless we added our own,
2016 * but only if cache_miss_revalidate is enabled, or
2017 * the request is not cacheable, or
2018 * the request contains authentication credentials.
2019 * \note at most one client's If-Modified-Since header can pass through
2020 */
2021 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2022 if (hdr_out->has(Http::HdrType::IF_MODIFIED_SINCE))
2023 break;
2024 else if (Config.onoff.cache_miss_revalidate || !request->flags.cachable || request->flags.auth)
2025 hdr_out->addEntry(e->clone());
2026 break;
2027
2028 case Http::HdrType::IF_NONE_MATCH:
2029 /** \par If-None-Match:
2030 * append if the wildcard '*' special case value is present, or
2031 * cache_miss_revalidate is disabled, or
2032 * the request is not cacheable in this proxy, or
2033 * the request contains authentication credentials.
2034 * \note this header lists a set of responses for the server to elide sending. Squid added values are extending that set.
2035 */
2036 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2037 if (hdr_out->hasListMember(Http::HdrType::IF_MATCH, "*", ',') || Config.onoff.cache_miss_revalidate || !request->flags.cachable || request->flags.auth)
2038 hdr_out->addEntry(e->clone());
2039 break;
2040
2041 case Http::HdrType::MAX_FORWARDS:
2042 /** \par Max-Forwards:
2043 * pass only on TRACE or OPTIONS requests */
2044 if (request->method == Http::METHOD_TRACE || request->method == Http::METHOD_OPTIONS) {
2045 const int64_t hops = e->getInt64();
2046
2047 if (hops > 0)
2048 hdr_out->putInt64(Http::HdrType::MAX_FORWARDS, hops - 1);
2049 }
2050
2051 break;
2052
2053 case Http::HdrType::VIA:
2054 /** \par Via:
2055 * If Via is disabled then forward any received header as-is.
2056 * Otherwise leave for explicit updated addition later. */
2057
2058 if (!Config.onoff.via)
2059 hdr_out->addEntry(e->clone());
2060
2061 break;
2062
2063 case Http::HdrType::RANGE:
2064
2065 case Http::HdrType::IF_RANGE:
2066
2067 case Http::HdrType::REQUEST_RANGE:
2068 /** \par Range:, If-Range:, Request-Range:
2069 * Only pass if we accept ranges */
2070 if (!we_do_ranges)
2071 hdr_out->addEntry(e->clone());
2072
2073 break;
2074
2075 case Http::HdrType::PROXY_CONNECTION: // SHOULD ignore. But doing so breaks things.
2076 break;
2077
2078 case Http::HdrType::CONTENT_LENGTH:
2079 // pass through unless we chunk; also, keeping this away from default
2080 // prevents request smuggling via Connection: Content-Length tricks
2081 if (!flags.chunked_request)
2082 hdr_out->addEntry(e->clone());
2083 break;
2084
2085 case Http::HdrType::X_FORWARDED_FOR:
2086
2087 case Http::HdrType::CACHE_CONTROL:
2088 /** \par X-Forwarded-For:, Cache-Control:
2089 * handled specially by Squid, so leave off for now.
2090 * append these after the loop if needed */
2091 break;
2092
2093 case Http::HdrType::FRONT_END_HTTPS:
2094 /** \par Front-End-Https:
2095 * Pass thru only if peer is configured with front-end-https */
2096 if (!flags.front_end_https)
2097 hdr_out->addEntry(e->clone());
2098
2099 break;
2100
2101 default:
2102 /** \par default.
2103 * pass on all other header fields
2104 * which are NOT listed by the special Connection: header. */
2105
2106 if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) {
2107 debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
2108 return;
2109 }
2110
2111 hdr_out->addEntry(e->clone());
2112 }
2113 }
2114
2115 bool
decideIfWeDoRanges(HttpRequest * request)2116 HttpStateData::decideIfWeDoRanges (HttpRequest * request)
2117 {
2118 bool result = true;
2119 /* decide if we want to do Ranges ourselves
2120 * and fetch the whole object now)
2121 * We want to handle Ranges ourselves iff
2122 * - we can actually parse client Range specs
2123 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
2124 * - reply will be cachable
2125 * (If the reply will be uncachable we have to throw it away after
2126 * serving this request, so it is better to forward ranges to
2127 * the server and fetch only the requested content)
2128 */
2129
2130 int64_t roffLimit = request->getRangeOffsetLimit();
2131
2132 if (NULL == request->range || !request->flags.cachable
2133 || request->range->offsetLimitExceeded(roffLimit) || request->flags.connectionAuth)
2134 result = false;
2135
2136 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
2137 request->range << ", cachable: " <<
2138 request->flags.cachable << "; we_do_ranges: " << result);
2139
2140 return result;
2141 }
2142
2143 /* build request prefix and append it to a given MemBuf;
2144 * return the length of the prefix */
2145 mb_size_t
buildRequestPrefix(MemBuf * mb)2146 HttpStateData::buildRequestPrefix(MemBuf * mb)
2147 {
2148 const int offset = mb->size;
2149 /* Uses a local httpver variable to print the HTTP label
2150 * since the HttpRequest may have an older version label.
2151 * XXX: This could create protocol bugs as the headers sent and
2152 * flow control should all be based on the HttpRequest version
2153 * not the one we are sending. Needs checking.
2154 */
2155 const AnyP::ProtocolVersion httpver = Http::ProtocolVersion();
2156 const SBuf url(_peer && !_peer->options.originserver ? request->effectiveRequestUri() : request->url.path());
2157 mb->appendf(SQUIDSBUFPH " " SQUIDSBUFPH " %s/%d.%d\r\n",
2158 SQUIDSBUFPRINT(request->method.image()),
2159 SQUIDSBUFPRINT(url),
2160 AnyP::ProtocolType_str[httpver.protocol],
2161 httpver.major,httpver.minor);
2162 /* build and pack headers */
2163 {
2164 HttpHeader hdr(hoRequest);
2165 httpBuildRequestHeader(request, entry, fwd->al, &hdr, flags);
2166
2167 if (request->flags.pinned && request->flags.connectionAuth)
2168 request->flags.authSent = true;
2169 else if (hdr.has(Http::HdrType::AUTHORIZATION))
2170 request->flags.authSent = true;
2171
2172 hdr.packInto(mb);
2173 hdr.clean();
2174 }
2175 /* append header terminator */
2176 mb->append(crlf, 2);
2177 return mb->size - offset;
2178 }
2179
2180 /* This will be called when connect completes. Write request. */
2181 bool
sendRequest()2182 HttpStateData::sendRequest()
2183 {
2184 MemBuf mb;
2185
2186 debugs(11, 5, HERE << serverConnection << ", request " << request << ", this " << this << ".");
2187
2188 if (!Comm::IsConnOpen(serverConnection)) {
2189 debugs(11,3, HERE << "cannot send request to closing " << serverConnection);
2190 assert(closeHandler != NULL);
2191 return false;
2192 }
2193
2194 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
2195 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
2196 TimeoutDialer, this, HttpStateData::httpTimeout);
2197 commSetConnTimeout(serverConnection, Config.Timeout.lifetime, timeoutCall);
2198 flags.do_next_read = true;
2199 maybeReadVirginBody();
2200
2201 if (request->body_pipe != NULL) {
2202 if (!startRequestBodyFlow()) // register to receive body data
2203 return false;
2204 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2205 requestSender = JobCallback(11,5,
2206 Dialer, this, HttpStateData::sentRequestBody);
2207
2208 Must(!flags.chunked_request);
2209 // use chunked encoding if we do not know the length
2210 if (request->content_length < 0)
2211 flags.chunked_request = true;
2212 } else {
2213 assert(!requestBodySource);
2214 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2215 requestSender = JobCallback(11,5,
2216 Dialer, this, HttpStateData::wroteLast);
2217 }
2218
2219 flags.originpeer = (_peer != NULL && _peer->options.originserver);
2220 flags.proxying = (_peer != NULL && !flags.originpeer);
2221
2222 /*
2223 * Is keep-alive okay for all request methods?
2224 */
2225 if (request->flags.mustKeepalive)
2226 flags.keepalive = true;
2227 else if (request->flags.pinned)
2228 flags.keepalive = request->persistent();
2229 else if (!Config.onoff.server_pconns)
2230 flags.keepalive = false;
2231 else if (_peer == NULL)
2232 flags.keepalive = true;
2233 else if (_peer->stats.n_keepalives_sent < 10)
2234 flags.keepalive = true;
2235 else if ((double) _peer->stats.n_keepalives_recv /
2236 (double) _peer->stats.n_keepalives_sent > 0.50)
2237 flags.keepalive = true;
2238
2239 if (_peer) {
2240 /*The old code here was
2241 if (neighborType(_peer, request->url) == PEER_SIBLING && ...
2242 which is equivalent to:
2243 if (neighborType(_peer, URL()) == PEER_SIBLING && ...
2244 or better:
2245 if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) ||
2246 _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss)
2247 flags.only_if_cached = 1;
2248
2249 But I suppose it was a bug
2250 */
2251 if (neighborType(_peer, request->url) == PEER_SIBLING && !_peer->options.allow_miss)
2252 flags.only_if_cached = true;
2253
2254 flags.front_end_https = _peer->front_end_https;
2255 }
2256
2257 mb.init();
2258 request->peer_host=_peer?_peer->host:NULL;
2259 buildRequestPrefix(&mb);
2260
2261 debugs(11, 2, "HTTP Server " << serverConnection);
2262 debugs(11, 2, "HTTP Server REQUEST:\n---------\n" << mb.buf << "\n----------");
2263
2264 Comm::Write(serverConnection, &mb, requestSender);
2265 return true;
2266 }
2267
2268 bool
getMoreRequestBody(MemBuf & buf)2269 HttpStateData::getMoreRequestBody(MemBuf &buf)
2270 {
2271 // parent's implementation can handle the no-encoding case
2272 if (!flags.chunked_request)
2273 return Client::getMoreRequestBody(buf);
2274
2275 MemBuf raw;
2276
2277 Must(requestBodySource != NULL);
2278 if (!requestBodySource->getMoreData(raw))
2279 return false; // no request body bytes to chunk yet
2280
2281 // optimization: pre-allocate buffer size that should be enough
2282 const mb_size_t rawDataSize = raw.contentSize();
2283 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2284 buf.init(16 + 2 + rawDataSize + 2 + 5, raw.max_capacity);
2285
2286 buf.appendf("%x\r\n", static_cast<unsigned int>(rawDataSize));
2287 buf.append(raw.content(), rawDataSize);
2288 buf.append("\r\n", 2);
2289
2290 Must(rawDataSize > 0); // we did not accidently created last-chunk above
2291
2292 // Do not send last-chunk unless we successfully received everything
2293 if (receivedWholeRequestBody) {
2294 Must(!flags.sentLastChunk);
2295 flags.sentLastChunk = true;
2296 buf.append("0\r\n\r\n", 5);
2297 }
2298
2299 return true;
2300 }
2301
2302 void
httpStart(FwdState * fwd)2303 httpStart(FwdState *fwd)
2304 {
2305 debugs(11, 3, fwd->request->method << ' ' << fwd->entry->url());
2306 AsyncJob::Start(new HttpStateData(fwd));
2307 }
2308
2309 void
start()2310 HttpStateData::start()
2311 {
2312 if (!sendRequest()) {
2313 debugs(11, 3, "httpStart: aborted");
2314 mustStop("HttpStateData::start failed");
2315 return;
2316 }
2317
2318 ++ statCounter.server.all.requests;
2319 ++ statCounter.server.http.requests;
2320
2321 /*
2322 * We used to set the read timeout here, but not any more.
2323 * Now its set in httpSendComplete() after the full request,
2324 * including request body, has been written to the server.
2325 */
2326 }
2327
2328 /// if broken posts are enabled for the request, try to fix and return true
2329 bool
finishingBrokenPost()2330 HttpStateData::finishingBrokenPost()
2331 {
2332 #if USE_HTTP_VIOLATIONS
2333 if (!Config.accessList.brokenPosts) {
2334 debugs(11, 5, HERE << "No brokenPosts list");
2335 return false;
2336 }
2337
2338 ACLFilledChecklist ch(Config.accessList.brokenPosts, originalRequest(), NULL);
2339 ch.al = fwd->al;
2340 ch.syncAle(originalRequest(), nullptr);
2341 if (!ch.fastCheck().allowed()) {
2342 debugs(11, 5, HERE << "didn't match brokenPosts");
2343 return false;
2344 }
2345
2346 if (!Comm::IsConnOpen(serverConnection)) {
2347 debugs(11, 3, HERE << "ignoring broken POST for closed " << serverConnection);
2348 assert(closeHandler != NULL);
2349 return true; // prevent caller from proceeding as if nothing happened
2350 }
2351
2352 debugs(11, 3, "finishingBrokenPost: fixing broken POST");
2353 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2354 requestSender = JobCallback(11,5,
2355 Dialer, this, HttpStateData::wroteLast);
2356 Comm::Write(serverConnection, "\r\n", 2, requestSender, NULL);
2357 return true;
2358 #else
2359 return false;
2360 #endif /* USE_HTTP_VIOLATIONS */
2361 }
2362
2363 /// if needed, write last-chunk to end the request body and return true
2364 bool
finishingChunkedRequest()2365 HttpStateData::finishingChunkedRequest()
2366 {
2367 if (flags.sentLastChunk) {
2368 debugs(11, 5, HERE << "already sent last-chunk");
2369 return false;
2370 }
2371
2372 Must(receivedWholeRequestBody); // or we should not be sending last-chunk
2373 flags.sentLastChunk = true;
2374
2375 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2376 requestSender = JobCallback(11,5, Dialer, this, HttpStateData::wroteLast);
2377 Comm::Write(serverConnection, "0\r\n\r\n", 5, requestSender, NULL);
2378 return true;
2379 }
2380
2381 void
doneSendingRequestBody()2382 HttpStateData::doneSendingRequestBody()
2383 {
2384 Client::doneSendingRequestBody();
2385 debugs(11,5, HERE << serverConnection);
2386
2387 // do we need to write something after the last body byte?
2388 if (flags.chunked_request && finishingChunkedRequest())
2389 return;
2390 if (!flags.chunked_request && finishingBrokenPost())
2391 return;
2392
2393 sendComplete();
2394 }
2395
2396 // more origin request body data is available
2397 void
handleMoreRequestBodyAvailable()2398 HttpStateData::handleMoreRequestBodyAvailable()
2399 {
2400 if (eof || !Comm::IsConnOpen(serverConnection)) {
2401 // XXX: we should check this condition in other callbacks then!
2402 // TODO: Check whether this can actually happen: We should unsubscribe
2403 // as a body consumer when the above condition(s) are detected.
2404 debugs(11, DBG_IMPORTANT, HERE << "Transaction aborted while reading HTTP body");
2405 return;
2406 }
2407
2408 assert(requestBodySource != NULL);
2409
2410 if (requestBodySource->buf().hasContent()) {
2411 // XXX: why does not this trigger a debug message on every request?
2412
2413 if (flags.headers_parsed && !flags.abuse_detected) {
2414 flags.abuse_detected = true;
2415 debugs(11, DBG_IMPORTANT, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request->client_addr << "' -> '" << entry->url() << "'" );
2416
2417 if (virginReply()->sline.status() == Http::scInvalidHeader) {
2418 closeServer();
2419 mustStop("HttpStateData::handleMoreRequestBodyAvailable");
2420 return;
2421 }
2422 }
2423 }
2424
2425 HttpStateData::handleMoreRequestBodyAvailable();
2426 }
2427
2428 // premature end of the request body
2429 void
handleRequestBodyProducerAborted()2430 HttpStateData::handleRequestBodyProducerAborted()
2431 {
2432 Client::handleRequestBodyProducerAborted();
2433 if (entry->isEmpty()) {
2434 debugs(11, 3, "request body aborted: " << serverConnection);
2435 // We usually get here when ICAP REQMOD aborts during body processing.
2436 // We might also get here if client-side aborts, but then our response
2437 // should not matter because either client-side will provide its own or
2438 // there will be no response at all (e.g., if the the client has left).
2439 ErrorState *err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, fwd->request);
2440 err->detailError(ERR_DETAIL_SRV_REQMOD_REQ_BODY);
2441 fwd->fail(err);
2442 }
2443
2444 abortTransaction("request body producer aborted");
2445 }
2446
2447 // called when we wrote request headers(!) or a part of the body
2448 void
sentRequestBody(const CommIoCbParams & io)2449 HttpStateData::sentRequestBody(const CommIoCbParams &io)
2450 {
2451 if (io.size > 0)
2452 statCounter.server.http.kbytes_out += io.size;
2453
2454 Client::sentRequestBody(io);
2455 }
2456
2457 void
abortAll(const char * reason)2458 HttpStateData::abortAll(const char *reason)
2459 {
2460 debugs(11,5, HERE << "aborting transaction for " << reason <<
2461 "; " << serverConnection << ", this " << this);
2462 mustStop(reason);
2463 }
2464
ReuseDecision(const StoreEntry * e,const Http::StatusCode code)2465 HttpStateData::ReuseDecision::ReuseDecision(const StoreEntry *e, const Http::StatusCode code)
2466 : answer(HttpStateData::ReuseDecision::reuseNot), reason(nullptr), entry(e), statusCode(code) {}
2467
2468 HttpStateData::ReuseDecision::Answers
make(const HttpStateData::ReuseDecision::Answers ans,const char * why)2469 HttpStateData::ReuseDecision::make(const HttpStateData::ReuseDecision::Answers ans, const char *why)
2470 {
2471 answer = ans;
2472 reason = why;
2473 return answer;
2474 }
2475
operator <<(std::ostream & os,const HttpStateData::ReuseDecision & d)2476 std::ostream &operator <<(std::ostream &os, const HttpStateData::ReuseDecision &d)
2477 {
2478 static const char *ReuseMessages[] = {
2479 "do not cache and do not share", // reuseNot
2480 "cache positively and share", // cachePositively
2481 "cache negatively and share", // cacheNegatively
2482 "do not cache but share" // doNotCacheButShare
2483 };
2484
2485 assert(d.answer >= HttpStateData::ReuseDecision::reuseNot &&
2486 d.answer <= HttpStateData::ReuseDecision::doNotCacheButShare);
2487 return os << ReuseMessages[d.answer] << " because " << d.reason <<
2488 "; HTTP status " << d.statusCode << " " << *(d.entry);
2489 }
2490
2491