1 /*-
2  * Copyright (c) 2011 Varnish Software AS
3  * All rights reserved.
4  *
5  * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6  *
7  * SPDX-License-Identifier: BSD-2-Clause
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * VED - Varnish Esi Delivery
31  */
32 
33 #include "config.h"
34 
35 #include "cache_varnishd.h"
36 
37 #include <stdlib.h>
38 
39 #include "cache_transport.h"
40 #include "cache_filter.h"
41 #include "cache_vgz.h"
42 
43 #include "vtim.h"
44 #include "cache_esi.h"
45 #include "vend.h"
46 #include "vgz.h"
47 
48 static vtr_deliver_f ved_deliver;
49 static vtr_reembark_f ved_reembark;
50 
51 static const uint8_t gzip_hdr[] = {
52 	0x1f, 0x8b, 0x08,
53 	0x00, 0x00, 0x00, 0x00,
54 	0x00,
55 	0x02, 0x03
56 };
57 
58 struct ecx {
59 	unsigned	magic;
60 #define ECX_MAGIC	0x0b0f9163
61 	const uint8_t	*p;
62 	const uint8_t	*e;
63 	int		state;
64 	ssize_t		l;
65 	int		isgzip;
66 	int		woken;
67 
68 	struct req	*preq;
69 	struct ecx	*pecx;
70 	ssize_t		l_crc;
71 	uint32_t	crc;
72 };
73 
v_matchproto_(vtr_minimal_response_f)74 static int v_matchproto_(vtr_minimal_response_f)
75 ved_minimal_response(struct req *req, uint16_t status)
76 {
77 	(void)req;
78 	(void)status;
79 	WRONG("esi:includes should not try minimal responses");
80 }
81 
82 static const struct transport VED_transport = {
83 	.magic =		TRANSPORT_MAGIC,
84 	.name =			"ESI_INCLUDE",
85 	.deliver =		ved_deliver,
86 	.reembark =		ved_reembark,
87 	.minimal_response =	ved_minimal_response,
88 };
89 
90 /*--------------------------------------------------------------------*/
91 
v_matchproto_(vtr_reembark_f)92 static void v_matchproto_(vtr_reembark_f)
93 ved_reembark(struct worker *wrk, struct req *req)
94 {
95 	struct ecx *ecx;
96 
97 	(void)wrk;
98 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
99 	CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC);
100 	Lck_Lock(&req->sp->mtx);
101 	ecx->woken = 1;
102 	AZ(pthread_cond_signal(&ecx->preq->wrk->cond));
103 	Lck_Unlock(&req->sp->mtx);
104 }
105 
106 /*--------------------------------------------------------------------*/
107 
108 static void
ved_include(struct req * preq,const char * src,const char * host,struct ecx * ecx)109 ved_include(struct req *preq, const char *src, const char *host,
110     struct ecx *ecx)
111 {
112 	struct worker *wrk;
113 	struct sess *sp;
114 	struct req *req;
115 	enum req_fsm_nxt s;
116 
117 	CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
118 	CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC);
119 	sp = preq->sp;
120 	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
121 	CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC);
122 	wrk = preq->wrk;
123 
124 	if (preq->esi_level >= cache_param->max_esi_depth) {
125 		VSLb(preq->vsl, SLT_VCL_Error,
126 		    "ESI depth limit reach (param max_esi_depth = %u)",
127 		    cache_param->max_esi_depth);
128 		return;
129 	}
130 
131 	req = Req_New(wrk, sp);
132 	AN(req);
133 	THR_SetRequest(req);
134 	AZ(req->vsl->wid);
135 	req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
136 
137 	VSLb(req->vsl, SLT_Begin, "req %u esi", VXID(preq->vsl->wid));
138 	VSLb(preq->vsl, SLT_Link, "req %u esi", VXID(req->vsl->wid));
139 
140 	VSLb_ts_req(req, "Start", W_TIM_real(wrk));
141 
142 	wrk->stats->esi_req++;
143 	req->esi_level = preq->esi_level + 1;
144 
145 	memset(req->top, 0, sizeof *req->top);
146 	req->top = preq->top;
147 
148 	HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod);
149 	HTTP_Dup(req->http, preq->http0);
150 
151 	http_SetH(req->http, HTTP_HDR_URL, src);
152 	if (host != NULL && *host != '\0')  {
153 		http_Unset(req->http, H_Host);
154 		http_SetHeader(req->http, host);
155 	}
156 
157 	http_ForceField(req->http, HTTP_HDR_METHOD, "GET");
158 	http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1");
159 
160 	/* Don't allow conditionals, we can't use a 304 */
161 	http_Unset(req->http, H_If_Modified_Since);
162 	http_Unset(req->http, H_If_None_Match);
163 
164 	/* Don't allow Range */
165 	http_Unset(req->http, H_Range);
166 
167 	/* Set Accept-Encoding according to what we want */
168 	if (ecx->isgzip)
169 		http_ForceHeader(req->http, H_Accept_Encoding, "gzip");
170 	else
171 		http_Unset(req->http, H_Accept_Encoding);
172 
173 	/* Client content already taken care of */
174 	http_Unset(req->http, H_Content_Length);
175 	req->req_body_status = BS_NONE;
176 
177 	AZ(req->vcl);
178 	AN(req->top);
179 	if (req->top->vcl0)
180 		req->vcl = req->top->vcl0;
181 	else
182 		req->vcl = preq->vcl;
183 	VCL_Ref(req->vcl);
184 
185 	assert(req->req_step == R_STP_TRANSPORT);
186 	req->t_req = preq->t_req;
187 
188 	req->transport = &VED_transport;
189 	req->transport_priv = ecx;
190 
191 	VCL_TaskEnter(req->privs);
192 
193 	while (1) {
194 		CNT_Embark(wrk, req);
195 		ecx->woken = 0;
196 		s = CNT_Request(req);
197 		if (s == REQ_FSM_DONE)
198 			break;
199 		DSL(DBG_WAITINGLIST, req->vsl->wid,
200 		    "loop waiting for ESI (%d)", (int)s);
201 		assert(s == REQ_FSM_DISEMBARK);
202 		Lck_Lock(&sp->mtx);
203 		if (!ecx->woken)
204 			(void)Lck_CondWait(
205 			    &ecx->preq->wrk->cond, &sp->mtx, 0);
206 		Lck_Unlock(&sp->mtx);
207 		AZ(req->wrk);
208 	}
209 
210 	VCL_Rel(&req->vcl);
211 
212 	req->wrk = NULL;
213 	THR_SetRequest(preq);
214 
215 	Req_Cleanup(sp, wrk, req);
216 	Req_Release(req);
217 }
218 
219 /*--------------------------------------------------------------------*/
220 
221 //#define Debug(fmt, ...) printf(fmt, __VA_ARGS__)
222 #define Debug(fmt, ...) /**/
223 
224 static ssize_t
ved_decode_len(struct vsl_log * vsl,const uint8_t ** pp)225 ved_decode_len(struct vsl_log *vsl, const uint8_t **pp)
226 {
227 	const uint8_t *p;
228 	ssize_t l;
229 
230 	p = *pp;
231 	switch (*p & 15) {
232 	case 1:
233 		l = p[1];
234 		p += 2;
235 		break;
236 	case 2:
237 		l = vbe16dec(p + 1);
238 		p += 3;
239 		break;
240 	case 8:
241 		l = vbe64dec(p + 1);
242 		p += 9;
243 		break;
244 	default:
245 		VSLb(vsl, SLT_Error,
246 		    "ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15));
247 		WRONG("ESI-codes: illegal length");
248 	}
249 	*pp = p;
250 	assert(l > 0);
251 	return (l);
252 }
253 
254 /*---------------------------------------------------------------------
255  */
256 
v_matchproto_(vdp_init_f)257 static int v_matchproto_(vdp_init_f)
258 ved_vdp_esi_init(struct vdp_ctx *vdc, void **priv, struct objcore *oc)
259 {
260 	struct ecx *ecx;
261 	struct req *req;
262 
263 	CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
264 	CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
265 	if (oc == NULL || !ObjHasAttr(vdc->wrk, oc, OA_ESIDATA))
266 		return (1);
267 
268 	req = vdc->req;
269 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
270 	AN(priv);
271 	AZ(*priv);
272 
273 	ALLOC_OBJ(ecx, ECX_MAGIC);
274 	AN(ecx);
275 	assert(sizeof gzip_hdr == 10);
276 	ecx->preq = req;
277 	*priv = ecx;
278 	RFC2616_Weaken_Etag(req->resp);
279 
280 	req->res_mode |= RES_ESI;
281 	if (req->resp_len != 0)
282 		req->resp_len = -1;
283 	if (req->esi_level > 0) {
284 		assert(req->transport == &VED_transport);
285 		CAST_OBJ_NOTNULL(ecx->pecx, req->transport_priv, ECX_MAGIC);
286 		if (!ecx->pecx->isgzip)
287 			ecx->pecx = NULL;
288 	}
289 
290 	return (0);
291 }
292 
v_matchproto_(vdp_fini_f)293 static int v_matchproto_(vdp_fini_f)
294 ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv)
295 {
296 	struct ecx *ecx;
297 
298 	(void)vdc;
299 	AN(priv);
300 	CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
301 	FREE_OBJ(ecx);
302 	*priv = NULL;
303 	return (0);
304 }
305 
v_matchproto_(vdp_bytes_f)306 static int v_matchproto_(vdp_bytes_f)
307 ved_vdp_esi_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
308     const void *ptr, ssize_t len)
309 {
310 	uint8_t *q, *r;
311 	ssize_t l = 0;
312 	uint32_t icrc = 0;
313 	uint8_t tailbuf[8 + 5];
314 	const uint8_t *pp;
315 	struct ecx *ecx;
316 	int retval = 0;
317 
318 	if (act == VDP_END)
319 		act = VDP_FLUSH;
320 
321 	AN(priv);
322 	CHECK_OBJ_NOTNULL(vdx, VDP_CTX_MAGIC);
323 	CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
324 	pp = ptr;
325 
326 	while (1) {
327 		switch (ecx->state) {
328 		case 0:
329 			ecx->p = ObjGetAttr(vdx->wrk, ecx->preq->objcore,
330 			    OA_ESIDATA, &l);
331 			AN(ecx->p);
332 			assert(l > 0);
333 			ecx->e = ecx->p + l;
334 
335 			if (*ecx->p == VEC_GZ) {
336 				if (ecx->pecx == NULL)
337 					retval = VDP_bytes(vdx, VDP_NULL,
338 					    gzip_hdr, 10);
339 				ecx->l_crc = 0;
340 				ecx->crc = crc32(0L, Z_NULL, 0);
341 				ecx->isgzip = 1;
342 				ecx->p++;
343 			}
344 			ecx->state = 1;
345 			break;
346 		case 1:
347 			if (ecx->p >= ecx->e) {
348 				ecx->state = 2;
349 				break;
350 			}
351 			switch (*ecx->p) {
352 			case VEC_V1:
353 			case VEC_V2:
354 			case VEC_V8:
355 				ecx->l = ved_decode_len(vdx->vsl, &ecx->p);
356 				if (ecx->l < 0)
357 					return (-1);
358 				if (ecx->isgzip) {
359 					assert(*ecx->p == VEC_C1 ||
360 					    *ecx->p == VEC_C2 ||
361 					    *ecx->p == VEC_C8);
362 					l = ved_decode_len(vdx->vsl, &ecx->p);
363 					if (l < 0)
364 						return (-1);
365 					icrc = vbe32dec(ecx->p);
366 					ecx->p += 4;
367 					ecx->crc = crc32_combine(
368 					    ecx->crc, icrc, l);
369 					ecx->l_crc += l;
370 				}
371 				ecx->state = 3;
372 				break;
373 			case VEC_S1:
374 			case VEC_S2:
375 			case VEC_S8:
376 				ecx->l = ved_decode_len(vdx->vsl, &ecx->p);
377 				if (ecx->l < 0)
378 					return (-1);
379 				Debug("SKIP1(%d)\n", (int)ecx->l);
380 				ecx->state = 4;
381 				break;
382 			case VEC_INCL:
383 				ecx->p++;
384 				q = (void*)strchr((const char*)ecx->p, '\0');
385 				AN(q);
386 				q++;
387 				r = (void*)strchr((const char*)q, '\0');
388 				AN(r);
389 				if (VDP_bytes(vdx, VDP_FLUSH, NULL, 0)) {
390 					ecx->p = ecx->e;
391 					break;
392 				}
393 				Debug("INCL [%s][%s] BEGIN\n", q, ecx->p);
394 				ved_include(ecx->preq,
395 				    (const char*)q, (const char*)ecx->p, ecx);
396 				Debug("INCL [%s][%s] END\n", q, ecx->p);
397 				ecx->p = r + 1;
398 				break;
399 			default:
400 				VSLb(vdx->vsl, SLT_Error,
401 				    "ESI corruption line %d 0x%02x [%s]\n",
402 				    __LINE__, *ecx->p, ecx->p);
403 				WRONG("ESI-codes: Illegal code");
404 			}
405 			break;
406 		case 2:
407 			ptr = NULL;
408 			len = 0;
409 			if (ecx->isgzip && ecx->pecx == NULL) {
410 				/*
411 				 * We are bytealigned here, so simply emit
412 				 * a gzip literal block with finish bit set.
413 				 */
414 				tailbuf[0] = 0x01;
415 				tailbuf[1] = 0x00;
416 				tailbuf[2] = 0x00;
417 				tailbuf[3] = 0xff;
418 				tailbuf[4] = 0xff;
419 
420 				/* Emit CRC32 */
421 				vle32enc(tailbuf + 5, ecx->crc);
422 
423 				/* MOD(2^32) length */
424 				vle32enc(tailbuf + 9, ecx->l_crc);
425 
426 				ptr = tailbuf;
427 				len = 13;
428 			} else if (ecx->pecx != NULL) {
429 				ecx->pecx->crc = crc32_combine(ecx->pecx->crc,
430 				    ecx->crc, ecx->l_crc);
431 				ecx->pecx->l_crc += ecx->l_crc;
432 			}
433 			retval = VDP_bytes(vdx, VDP_END, ptr, len);
434 			ecx->state = 99;
435 			return (retval);
436 		case 3:
437 		case 4:
438 			/*
439 			 * There is no guarantee that the 'l' bytes are all
440 			 * in the same storage segment, so loop over storage
441 			 * until we have processed them all.
442 			 */
443 			if (ecx->l <= len) {
444 				if (ecx->state == 3)
445 					retval = VDP_bytes(vdx, act,
446 					    pp, ecx->l);
447 				len -= ecx->l;
448 				pp += ecx->l;
449 				ecx->state = 1;
450 				break;
451 			}
452 			if (ecx->state == 3 && len > 0)
453 				retval = VDP_bytes(vdx, act, pp, len);
454 			ecx->l -= len;
455 			return (retval);
456 		case 99:
457 			/*
458 			 * VEP does not account for the PAD+CRC+LEN
459 			 * so we can see up to approx 15 bytes here.
460 			 */
461 			return (retval);
462 		default:
463 			WRONG("FOO");
464 			break;
465 		}
466 		if (retval)
467 			return (retval);
468 	}
469 }
470 
471 const struct vdp VDP_esi = {
472 	.name =		"esi",
473 	.init =		ved_vdp_esi_init,
474 	.bytes =	ved_vdp_esi_bytes,
475 	.fini =		ved_vdp_esi_fini,
476 };
477 
478 /*
479  * Account body bytes on req
480  * Push bytes to preq
481  */
482 static inline int
ved_bytes(struct ecx * ecx,enum vdp_action act,const void * ptr,ssize_t len)483 ved_bytes(struct ecx *ecx, enum vdp_action act,
484     const void *ptr, ssize_t len)
485 {
486 	if (act == VDP_END)
487 		act = VDP_FLUSH;
488 	return (VDP_bytes(ecx->preq->vdc, act, ptr, len));
489 }
490 
491 /*---------------------------------------------------------------------
492  * If a gzip'ed ESI object includes a ungzip'ed object, we need to make
493  * it looked like a gzip'ed data stream.  The official way to do so would
494  * be to fire up libvgz and gzip it, but we don't, we fake it.
495  *
496  * First, we cannot know if it is ungzip'ed on purpose, the admin may
497  * know something we don't.
498  *
499  * What do you mean "BS ?"
500  *
501  * All right then...
502  *
503  * The matter of the fact is that we simply will not fire up a gzip in
504  * the output path because it costs too much memory and CPU, so we simply
505  * wrap the data in very convenient "gzip copy-blocks" and send it down
506  * the stream with a bit more overhead.
507  */
508 
v_matchproto_(vdp_fini_f)509 static int v_matchproto_(vdp_fini_f)
510 ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv)
511 {
512 	(void)vdc;
513 	*priv = NULL;
514 	return (0);
515 }
516 
v_matchproto_(vdp_bytes_f)517 static int v_matchproto_(vdp_bytes_f)
518 ved_pretend_gzip_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
519     const void *pv, ssize_t l)
520 {
521 	uint8_t buf1[5], buf2[5];
522 	const uint8_t *p;
523 	uint16_t lx;
524 	struct ecx *ecx;
525 
526 	CHECK_OBJ_NOTNULL(vdx, VDP_CTX_MAGIC);
527 	CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
528 
529 	(void)priv;
530 	if (l == 0)
531 		return (ved_bytes(ecx, act, pv, l));
532 
533 	p = pv;
534 
535 	AN (ecx->isgzip);
536 	ecx->crc = crc32(ecx->crc, p, l);
537 	ecx->l_crc += l;
538 
539 	lx = 65535;
540 	buf1[0] = 0;
541 	vle16enc(buf1 + 1, lx);
542 	vle16enc(buf1 + 3, ~lx);
543 
544 	while (l > 0) {
545 		if (l >= 65535) {
546 			lx = 65535;
547 			if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1))
548 				return (-1);
549 		} else {
550 			lx = (uint16_t)l;
551 			buf2[0] = 0;
552 			vle16enc(buf2 + 1, lx);
553 			vle16enc(buf2 + 3, ~lx);
554 			if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2))
555 				return (-1);
556 		}
557 		if (ved_bytes(ecx, VDP_NULL, p, lx))
558 			return (-1);
559 		l -= lx;
560 		p += lx;
561 	}
562 	/* buf1 & buf2 is local, have to flush */
563 	return (ved_bytes(ecx, VDP_FLUSH, NULL, 0));
564 }
565 
566 static const struct vdp ved_pretend_gz = {
567 	.name =		"PGZ",
568 	.bytes =	ved_pretend_gzip_bytes,
569 	.fini =		ved_pretend_gzip_fini,
570 };
571 
572 /*---------------------------------------------------------------------
573  * Include a gzip'ed object in a gzip'ed ESI object delivery
574  *
575  * This is the interesting case: Deliver all the deflate blocks, stripping
576  * the "LAST" bit of the last one and padding it, as necessary, to a byte
577  * boundary.
578  *
579  */
580 
581 struct ved_foo {
582 	unsigned		magic;
583 #define VED_FOO_MAGIC		0x6a5a262d
584 	struct ecx		*ecx;
585 	struct objcore		*objcore;
586 	uint64_t		start, last, stop, lpad;
587 	ssize_t			ll;
588 	uint64_t		olen;
589 	uint8_t			dbits[8];
590 	uint8_t			tailbuf[8];
591 };
592 
v_matchproto_(vdp_fini_f)593 static int v_matchproto_(vdp_fini_f)
594 ved_gzgz_init(struct vdp_ctx *vdc, void **priv, struct objcore *oc)
595 {
596 	ssize_t l;
597 	const char *p;
598 	struct ved_foo *foo;
599 	struct req *req;
600 
601 	CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
602 	(void)oc;
603 	req = vdc->req;
604 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
605 	CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
606 	CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC);
607 
608 	memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf);
609 
610 	AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED));
611 
612 	p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l);
613 	AN(p);
614 	assert(l == 32);
615 	foo->start = vbe64dec(p);
616 	foo->last = vbe64dec(p + 8);
617 	foo->stop = vbe64dec(p + 16);
618 	foo->olen = ObjGetLen(vdc->wrk, foo->objcore);
619 	assert(foo->start > 0 && foo->start < foo->olen * 8);
620 	assert(foo->last > 0 && foo->last < foo->olen * 8);
621 	assert(foo->stop > 0 && foo->stop < foo->olen * 8);
622 	assert(foo->last >= foo->start);
623 	assert(foo->last < foo->stop);
624 
625 	/* The start bit must be byte aligned. */
626 	AZ(foo->start & 7);
627 	return (0);
628 }
629 
630 /*
631  * XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than
632  * we need. The VDP_END case would trip our "at most one VDP_END call" assertion
633  * in VDP_bytes(), but ved_bytes() covers it.
634  *
635  * To avoid unnecessary chunks downstream, it would be nice to re-structure the
636  * code to intendify the last block, send VDP_END/VDP_FLUSH for that one and
637  * VDP_NULL for anything before it.
638  */
639 
v_matchproto_(vdp_bytes_f)640 static int v_matchproto_(vdp_bytes_f)
641 ved_gzgz_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
642     const void *ptr, ssize_t len)
643 {
644 	struct ved_foo *foo;
645 	const uint8_t *pp;
646 	ssize_t dl;
647 	ssize_t l;
648 
649 	(void)vdx;
650 	CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
651 	pp = ptr;
652 	if (len > 0) {
653 		/* Skip over the GZIP header */
654 		dl = foo->start / 8 - foo->ll;
655 		if (dl > 0) {
656 			/* Before foo.start, skip */
657 			if (dl > len)
658 				dl = len;
659 			foo->ll += dl;
660 			len -= dl;
661 			pp += dl;
662 		}
663 	}
664 	if (len > 0) {
665 		/* The main body of the object */
666 		dl = foo->last / 8 - foo->ll;
667 		if (dl > 0) {
668 			if (dl > len)
669 				dl = len;
670 			if (ved_bytes(foo->ecx, act, pp, dl))
671 				return (-1);
672 			foo->ll += dl;
673 			len -= dl;
674 			pp += dl;
675 		}
676 	}
677 	if (len > 0 && foo->ll == foo->last / 8) {
678 		/* Remove the "LAST" bit */
679 		foo->dbits[0] = *pp;
680 		foo->dbits[0] &= ~(1U << (foo->last & 7));
681 		if (ved_bytes(foo->ecx, act, foo->dbits, 1))
682 			return (-1);
683 		foo->ll++;
684 		len--;
685 		pp++;
686 	}
687 	if (len > 0) {
688 		/* Last block */
689 		dl = foo->stop / 8 - foo->ll;
690 		if (dl > 0) {
691 			if (dl > len)
692 				dl = len;
693 			if (ved_bytes(foo->ecx, act, pp, dl))
694 				return (-1);
695 			foo->ll += dl;
696 			len -= dl;
697 			pp += dl;
698 		}
699 	}
700 	if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) {
701 		/* Add alignment to byte boundary */
702 		foo->dbits[1] = *pp;
703 		foo->ll++;
704 		len--;
705 		pp++;
706 		switch ((int)(foo->stop & 7)) {
707 		case 1: /*
708 			 * x000....
709 			 * 00000000 00000000 11111111 11111111
710 			 */
711 		case 3: /*
712 			 * xxx000..
713 			 * 00000000 00000000 11111111 11111111
714 			 */
715 		case 5: /*
716 			 * xxxxx000
717 			 * 00000000 00000000 11111111 11111111
718 			 */
719 			foo->dbits[2] = 0x00; foo->dbits[3] = 0x00;
720 			foo->dbits[4] = 0xff; foo->dbits[5] = 0xff;
721 			foo->lpad = 5;
722 			break;
723 		case 2: /* xx010000 00000100 00000001 00000000 */
724 			foo->dbits[1] |= 0x08;
725 			foo->dbits[2] = 0x20;
726 			foo->dbits[3] = 0x80;
727 			foo->dbits[4] = 0x00;
728 			foo->lpad = 4;
729 			break;
730 		case 4: /* xxxx0100 00000001 00000000 */
731 			foo->dbits[1] |= 0x20;
732 			foo->dbits[2] = 0x80;
733 			foo->dbits[3] = 0x00;
734 			foo->lpad = 3;
735 			break;
736 		case 6: /* xxxxxx01 00000000 */
737 			foo->dbits[1] |= 0x80;
738 			foo->dbits[2] = 0x00;
739 			foo->lpad = 2;
740 			break;
741 		case 7:	/*
742 			 * xxxxxxx0
743 			 * 00......
744 			 * 00000000 00000000 11111111 11111111
745 			 */
746 			foo->dbits[2] = 0x00;
747 			foo->dbits[3] = 0x00; foo->dbits[4] = 0x00;
748 			foo->dbits[5] = 0xff; foo->dbits[6] = 0xff;
749 			foo->lpad = 6;
750 			break;
751 		case 0: /* xxxxxxxx */
752 		default:
753 			WRONG("compiler must be broken");
754 		}
755 		if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad))
756 			return (-1);
757 	}
758 	if (len > 0) {
759 		/* Recover GZIP tail */
760 		dl = foo->olen - foo->ll;
761 		assert(dl >= 0);
762 		if (dl > len)
763 			dl = len;
764 		if (dl > 0) {
765 			assert(dl <= 8);
766 			l = foo->ll - (foo->olen - 8);
767 			assert(l >= 0);
768 			assert(l <= 8);
769 			assert(l + dl <= 8);
770 			memcpy(foo->tailbuf + l, pp, dl);
771 			foo->ll += dl;
772 			len -= dl;
773 		}
774 	}
775 	assert(len == 0);
776 	return (0);
777 }
778 
v_matchproto_(vdp_fini_f)779 static int v_matchproto_(vdp_fini_f)
780 ved_gzgz_fini(struct vdp_ctx *vdc, void **priv)
781 {
782 	uint32_t icrc;
783 	uint32_t ilen;
784 	struct ved_foo *foo;
785 
786 	(void)vdc;
787 	CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
788 	*priv = NULL;
789 
790 	/* XXX
791 	 * this works due to the esi layering, a VDP pushing bytes from _fini
792 	 * will otherwise have it's own _bytes method called.
793 	 *
794 	 * Could rewrite use VDP_END
795 	 */
796 	(void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0);
797 
798 	icrc = vle32dec(foo->tailbuf);
799 	ilen = vle32dec(foo->tailbuf + 4);
800 	foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen);
801 	foo->ecx->l_crc += ilen;
802 
803 	return (0);
804 }
805 
806 static const struct vdp ved_gzgz = {
807 	.name =         "VZZ",
808 	.init =         ved_gzgz_init,
809 	.bytes =        ved_gzgz_bytes,
810 	.fini =         ved_gzgz_fini,
811 };
812 
813 /*--------------------------------------------------------------------
814  * Straight through without processing.
815  */
816 
v_matchproto_(vdp_fini_f)817 static int v_matchproto_(vdp_fini_f)
818 ved_vdp_fini(struct vdp_ctx *vdc, void **priv)
819 {
820 	(void)vdc;
821 	*priv = NULL;
822 	return (0);
823 }
824 
v_matchproto_(vdp_bytes_f)825 static int v_matchproto_(vdp_bytes_f)
826 ved_vdp_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
827     const void *ptr, ssize_t len)
828 {
829 	struct ecx *ecx;
830 
831 	(void)vdx;
832 	CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
833 	return (ved_bytes(ecx, act, ptr, len));
834 }
835 
836 static const struct vdp ved_ved = {
837 	.name =		"VED",
838 	.bytes =	ved_vdp_bytes,
839 	.fini =		ved_vdp_fini,
840 };
841 
842 /*--------------------------------------------------------------------*/
843 
v_matchproto_(vtr_deliver_f)844 static void v_matchproto_(vtr_deliver_f)
845 ved_deliver(struct req *req, struct boc *boc, int wantbody)
846 {
847 	int i = 0;
848 	const char *p;
849 	struct ecx *ecx;
850 	struct ved_foo foo[1];
851 
852 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
853 	CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
854 	CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC);
855 
856 	CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC);
857 
858 	if (wantbody == 0)
859 		return;
860 
861 	if (boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0)
862 		return;
863 
864 	if (http_GetHdr(req->resp, H_Content_Encoding, &p))
865 		i = !strcasecmp(p, "gzip");
866 	if (i)
867 		i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED);
868 
869 	if (ecx->isgzip && i && !(req->res_mode & RES_ESI)) {
870 		/* A gzip'ed include which is not ESI processed */
871 
872 		/* OA_GZIPBITS are not valid until BOS_FINISHED */
873 		if (boc != NULL)
874 			ObjWaitState(req->objcore, BOS_FINISHED);
875 
876 		if (req->objcore->flags & OC_F_FAILED) {
877 			/* No way of signalling errors in the middle of
878 			   the ESI body. Omit this ESI fragment. */
879 			return;
880 		}
881 
882 		INIT_OBJ(foo, VED_FOO_MAGIC);
883 		foo->ecx = ecx;
884 		foo->objcore = req->objcore;
885 		i = VDP_Push(req->vdc, req->ws, &ved_gzgz, foo);
886 
887 	} else if (ecx->isgzip && !i) {
888 		/* Non-Gzip'ed include in gzip'ed parent */
889 		i = VDP_Push(req->vdc, req->ws, &ved_pretend_gz, ecx);
890 	} else {
891 		/* Anything else goes straight through */
892 		i = VDP_Push(req->vdc, req->ws, &ved_ved, ecx);
893 	}
894 
895 	if (i == 0) {
896 		i = VDP_DeliverObj(req->vdc, req->objcore);
897 	} else {
898 		VSLb(req->vsl, SLT_Error, "Failure to push ESI processors");
899 		req->doclose = SC_OVERLOAD;
900 	}
901 
902 	if (i && req->doclose == SC_NULL)
903 		req->doclose = SC_REM_CLOSE;
904 
905 	req->acct.resp_bodybytes += VDP_Close(req->vdc);
906 }
907