1 /*-
2 * Copyright (c) 2006 Verdens Gang AS
3 * Copyright (c) 2006-2015 Varnish Software AS
4 * All rights reserved.
5 *
6 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include "config.h"
34
35 #include <stdlib.h>
36
37 #include "cache_varnishd.h"
38 #include "cache_filter.h"
39 #include "cache_objhead.h"
40 #include "cache_transport.h"
41
42 #include "vtim.h"
43 #include "storage/storage.h"
44 #include "hash/hash_slinger.h"
45
46 /*----------------------------------------------------------------------
47 * Pull the req.body in via/into a objcore
48 *
49 * This can be called only once per request
50 *
51 */
52
53 static ssize_t
vrb_pull(struct req * req,ssize_t maxsize,objiterate_f * func,void * priv)54 vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv)
55 {
56 ssize_t l, r = 0, yet;
57 struct vfp_ctx *vfc;
58 uint8_t *ptr;
59 enum vfp_status vfps = VFP_ERROR;
60 const struct stevedore *stv;
61 ssize_t req_bodybytes = 0;
62
63 CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
64
65 CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC);
66 CHECK_OBJ_NOTNULL(req->vfc, VFP_CTX_MAGIC);
67 vfc = req->vfc;
68
69 req->body_oc = HSH_Private(req->wrk);
70 AN(req->body_oc);
71
72 if (req->storage != NULL)
73 stv = req->storage;
74 else
75 stv = stv_transient;
76
77 req->storage = NULL;
78
79 if (STV_NewObject(req->wrk, req->body_oc, stv, 8) == 0) {
80 req->req_body_status = BS_ERROR;
81 HSH_DerefBoc(req->wrk, req->body_oc);
82 AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
83 (void)VFP_Error(vfc, "Object allocation failed:"
84 " Ran out of space in %s", stv->vclname);
85 return (-1);
86 }
87
88 vfc->oc = req->body_oc;
89
90 if (VFP_Open(vfc) < 0) {
91 req->req_body_status = BS_ERROR;
92 HSH_DerefBoc(req->wrk, req->body_oc);
93 AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
94 return (-1);
95 }
96
97 AN(req->htc);
98 yet = req->htc->content_length;
99 if (yet != 0 && req->want100cont) {
100 req->want100cont = 0;
101 (void)req->transport->minimal_response(req, 100);
102 }
103 if (yet < 0)
104 yet = 0;
105 do {
106 AZ(vfc->failed);
107 if (maxsize >= 0 && req_bodybytes > maxsize) {
108 (void)VFP_Error(vfc, "Request body too big to cache");
109 break;
110 }
111 l = yet;
112 if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK)
113 break;
114 AZ(vfc->failed);
115 AN(ptr);
116 AN(l);
117 vfps = VFP_Suck(vfc, ptr, &l);
118 if (l > 0 && vfps != VFP_ERROR) {
119 req_bodybytes += l;
120 if (yet >= l)
121 yet -= l;
122 if (func != NULL) {
123 r = func(priv, 1, ptr, l);
124 if (r)
125 break;
126 } else {
127 ObjExtend(req->wrk, req->body_oc, l,
128 vfps == VFP_END ? 1 : 0);
129 }
130 }
131
132 } while (vfps == VFP_OK);
133 req->acct.req_bodybytes += VFP_Close(vfc);
134 VSLb_ts_req(req, "ReqBody", VTIM_real());
135 if (func != NULL) {
136 HSH_DerefBoc(req->wrk, req->body_oc);
137 AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
138 if (vfps != VFP_END) {
139 req->req_body_status = BS_ERROR;
140 if (r == 0)
141 r = -1;
142 }
143 return (r);
144 }
145
146 AZ(ObjSetU64(req->wrk, req->body_oc, OA_LEN, req_bodybytes));
147 HSH_DerefBoc(req->wrk, req->body_oc);
148
149 if (vfps != VFP_END) {
150 req->req_body_status = BS_ERROR;
151 AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
152 return (-1);
153 }
154
155 assert(req_bodybytes >= 0);
156 if (req_bodybytes != req->htc->content_length) {
157 /* We must update also the "pristine" req.* copy */
158 http_Unset(req->http0, H_Content_Length);
159 http_Unset(req->http0, H_Transfer_Encoding);
160 http_PrintfHeader(req->http0, "Content-Length: %ju",
161 (uintmax_t)req_bodybytes);
162
163 http_Unset(req->http, H_Content_Length);
164 http_Unset(req->http, H_Transfer_Encoding);
165 http_PrintfHeader(req->http, "Content-Length: %ju",
166 (uintmax_t)req_bodybytes);
167 }
168
169 req->req_body_status = BS_CACHED;
170 return (req_bodybytes);
171 }
172
173 /*----------------------------------------------------------------------
174 * Iterate over the req.body.
175 *
176 * This can be done exactly once if uncached, and multiple times if the
177 * req.body is cached.
178 *
179 * return length or -1 on error
180 */
181
182 ssize_t
VRB_Iterate(struct worker * wrk,struct vsl_log * vsl,struct req * req,objiterate_f * func,void * priv)183 VRB_Iterate(struct worker *wrk, struct vsl_log *vsl,
184 struct req *req, objiterate_f *func, void *priv)
185 {
186 int i;
187
188 CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
189 AN(func);
190
191 if (req->req_body_status == BS_CACHED) {
192 AN(req->body_oc);
193 if (ObjIterate(wrk, req->body_oc, priv, func, 0))
194 return (-1);
195 return (0);
196 }
197 if (req->req_body_status == BS_NONE)
198 return (0);
199 if (req->req_body_status == BS_TAKEN) {
200 VSLb(vsl, SLT_VCL_Error,
201 "Uncached req.body can only be consumed once.");
202 return (-1);
203 }
204 if (req->req_body_status == BS_ERROR) {
205 VSLb(vsl, SLT_FetchError,
206 "Had failed reading req.body before.");
207 return (-1);
208 }
209 Lck_Lock(&req->sp->mtx);
210 if (req->req_body_status->avail > 0) {
211 req->req_body_status = BS_TAKEN;
212 i = 0;
213 } else
214 i = -1;
215 Lck_Unlock(&req->sp->mtx);
216 if (i) {
217 VSLb(vsl, SLT_VCL_Error,
218 "Multiple attempts to access non-cached req.body");
219 return (i);
220 }
221 return (vrb_pull(req, -1, func, priv));
222 }
223
224 /*----------------------------------------------------------------------
225 * VRB_Ignore() is a dedicated function, because we might
226 * be able to disuade or terminate its transmission in some protocols.
227 *
228 * For HTTP1, we do nothing if we are going to close the connection anyway or
229 * just iterate it into oblivion.
230 */
231
v_matchproto_(objiterate_f)232 static int v_matchproto_(objiterate_f)
233 httpq_req_body_discard(void *priv, unsigned flush, const void *ptr, ssize_t len)
234 {
235
236 (void)priv;
237 (void)flush;
238 (void)ptr;
239 (void)len;
240 return (0);
241 }
242
243 int
VRB_Ignore(struct req * req)244 VRB_Ignore(struct req *req)
245 {
246
247 CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
248
249 if (req->doclose)
250 return (0);
251 if (req->req_body_status->avail > 0)
252 (void)VRB_Iterate(req->wrk, req->vsl, req,
253 httpq_req_body_discard, NULL);
254 if (req->req_body_status == BS_ERROR)
255 req->doclose = SC_RX_BODY;
256 return (0);
257 }
258
259 /*----------------------------------------------------------------------
260 */
261
262 void
VRB_Free(struct req * req)263 VRB_Free(struct req *req)
264 {
265 int r;
266
267 CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
268
269 if (req->body_oc == NULL)
270 return;
271
272 r = HSH_DerefObjCore(req->wrk, &req->body_oc, 0);
273
274 // each busyobj may have gained a reference
275 assert (r >= 0);
276 assert ((unsigned)r <= req->restarts + 1);
277 }
278
279 /*----------------------------------------------------------------------
280 * Cache the req.body if it is smaller than the given size
281 *
282 * This function must be called before any backend fetches are kicked
283 * off to prevent parallelism.
284 */
285
286 ssize_t
VRB_Cache(struct req * req,ssize_t maxsize)287 VRB_Cache(struct req *req, ssize_t maxsize)
288 {
289 uint64_t u;
290
291 CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
292 assert (req->req_step == R_STP_RECV);
293 assert(maxsize >= 0);
294
295 /*
296 * We only allow caching to happen the first time through vcl_recv{}
297 * where we know we will have no competition or conflicts for the
298 * updates to req.http.* etc.
299 */
300 if (req->restarts > 0 && req->req_body_status != BS_CACHED) {
301 VSLb(req->vsl, SLT_VCL_Error,
302 "req.body must be cached before restarts");
303 return (-1);
304 }
305
306 if (req->req_body_status == BS_CACHED) {
307 AZ(ObjGetU64(req->wrk, req->body_oc, OA_LEN, &u));
308 return (u);
309 }
310
311 if (req->req_body_status->avail <= 0)
312 return (req->req_body_status->avail);
313
314 if (req->htc->content_length > maxsize) {
315 req->req_body_status = BS_ERROR;
316 (void)VFP_Error(req->vfc, "Request body too big to cache");
317 return (-1);
318 }
319
320 return (vrb_pull(req, maxsize, NULL, NULL));
321 }
322