1 /*
2  * HTT/1 mux-demux for connections
3  *
4  * Copyright 2018 Christopher Faulet <cfaulet@haproxy.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  */
12 #include <common/cfgparse.h>
13 #include <common/config.h>
14 #include <common/h1.h>
15 #include <common/htx.h>
16 #include <common/initcall.h>
17 
18 #include <types/pipe.h>
19 #include <types/proxy.h>
20 #include <types/session.h>
21 
22 #include <proto/connection.h>
23 #include <proto/http_htx.h>
24 #include <proto/log.h>
25 #include <proto/session.h>
26 #include <proto/stream.h>
27 #include <proto/stream_interface.h>
28 
29 /*
30  *  H1 Connection flags (32 bits)
31  */
32 #define H1C_F_NONE           0x00000000
33 
34 /* Flags indicating why writing output data are blocked */
35 #define H1C_F_OUT_ALLOC      0x00000001 /* mux is blocked on lack of output buffer */
36 #define H1C_F_OUT_FULL       0x00000002 /* mux is blocked on output buffer full */
37 /* 0x00000004 - 0x00000008 unused */
38 
39 /* Flags indicating why reading input data are blocked. */
40 #define H1C_F_IN_ALLOC       0x00000010 /* mux is blocked on lack of input buffer */
41 #define H1C_F_IN_FULL        0x00000020 /* mux is blocked on input buffer full */
42 #define H1C_F_IN_BUSY        0x00000040 /* mux is blocked on input waiting the other side */
43 /* 0x00000040 - 0x00000400 unused */
44 
45 #define H1C_F_CS_WAIT_CONN   0x00000800 /* waiting for the connection establishment */
46 #define H1C_F_CS_ERROR       0x00001000 /* connection must be closed ASAP because an error occurred */
47 #define H1C_F_CS_SHUTW_NOW   0x00002000 /* connection must be shut down for writes ASAP */
48 #define H1C_F_CS_SHUTDOWN    0x00004000 /* connection is shut down */
49 #define H1C_F_CS_IDLE        0x00008000 /* connection is idle and may be reused
50 					 * (exclusive to all H1C_F_CS flags and never set when an h1s is attached) */
51 
52 #define H1C_F_WAIT_NEXT_REQ  0x00010000 /*  waiting for the next request to start, use keep-alive timeout */
53 
54 /*
55  * H1 Stream flags (32 bits)
56  */
57 #define H1S_F_NONE           0x00000000
58 #define H1S_F_ERROR          0x00000001 /* An error occurred on the H1 stream */
59 #define H1S_F_REQ_ERROR      0x00000002 /* An error occurred during the request parsing/xfer */
60 #define H1S_F_RES_ERROR      0x00000004 /* An error occurred during the response parsing/xfer */
61 /* 0x00000008 unused */
62 #define H1S_F_WANT_KAL       0x00000010
63 #define H1S_F_WANT_TUN       0x00000020
64 #define H1S_F_WANT_CLO       0x00000040
65 #define H1S_F_WANT_MSK       0x00000070
66 #define H1S_F_NOT_FIRST      0x00000080 /* The H1 stream is not the first one */
67 #define H1S_F_BUF_FLUSH      0x00000100 /* Flush input buffer and don't read more data */
68 #define H1S_F_SPLICED_DATA   0x00000200 /* Set when the kernel splicing is in used */
69 #define H1S_F_HAVE_I_EOD     0x00000400 /* Set during input process to know the last empty chunk was processed */
70 #define H1S_F_HAVE_I_TLR     0x00000800 /* Set during input process to know the trailers were processed */
71 #define H1S_F_HAVE_O_EOD     0x00001000 /* Set during output process to know the last empty chunk was processed */
72 #define H1S_F_HAVE_O_TLR     0x00002000 /* Set during output process to know the trailers were processed */
73 
74 /* H1 connection descriptor */
75 struct h1c {
76 	struct connection *conn;
77 	struct proxy *px;
78 	uint32_t flags;                  /* Connection flags: H1C_F_* */
79 
80 	struct buffer ibuf;              /* Input buffer to store data before parsing */
81 	struct buffer obuf;              /* Output buffer to store data after reformatting */
82 
83 	struct buffer_wait buf_wait;     /* Wait list for buffer allocation */
84 	struct wait_event wait_event;    /* To be used if we're waiting for I/Os */
85 
86 	struct h1s *h1s;                 /* H1 stream descriptor */
87 	struct task *task;               /* timeout management task */
88 	int timeout;                     /* idle timeout duration in ticks */
89 	int shut_timeout;                /* idle timeout duration in ticks after stream shutdown */
90 };
91 
92 /* H1 stream descriptor */
93 struct h1s {
94 	struct h1c *h1c;
95 	struct conn_stream *cs;
96 	struct cs_info csinfo;         /* CS info, only used for client connections */
97 	uint32_t flags;                /* Connection flags: H1S_F_* */
98 
99 	struct wait_event *recv_wait; /* Address of the wait_event the conn_stream associated is waiting on */
100 	struct wait_event *send_wait; /* Address of the wait_event the conn_stream associated is waiting on */
101 
102 	struct session *sess;         /* Associated session */
103 	struct h1m req;
104 	struct h1m res;
105 
106 	enum http_meth_t meth; /* HTTP resquest method */
107 	uint16_t status;       /* HTTP response status */
108 };
109 
110 /* the h1c and h1s pools */
111 DECLARE_STATIC_POOL(pool_head_h1c, "h1c", sizeof(struct h1c));
112 DECLARE_STATIC_POOL(pool_head_h1s, "h1s", sizeof(struct h1s));
113 
114 static int h1_recv(struct h1c *h1c);
115 static int h1_send(struct h1c *h1c);
116 static int h1_process(struct h1c *h1c);
117 static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short state);
118 static void h1_shutw_conn(struct connection *conn, enum cs_shw_mode mode);
119 static struct task *h1_timeout_task(struct task *t, void *context, unsigned short state);
120 
121 /*****************************************************/
122 /* functions below are for dynamic buffer management */
123 /*****************************************************/
124 /*
125  * Indicates whether or not we may receive data. The rules are the following :
126  *   - if an error or a shutdown for reads was detected on the connection we
127        must not attempt to receive
128  *   - if the input buffer failed to be allocated or is full , we must not try
129  *     to receive
130  *   - if he input processing is busy waiting for the output side, we may
131  *     attemp to receive
132  *   - otherwise must may not attempt to receive
133  */
h1_recv_allowed(const struct h1c * h1c)134 static inline int h1_recv_allowed(const struct h1c *h1c)
135 {
136 	if (h1c->flags & H1C_F_CS_ERROR)
137 		return 0;
138 
139 	if (h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_RD_SH))
140 		return 0;
141 
142 	if (!(h1c->flags & (H1C_F_IN_ALLOC|H1C_F_IN_FULL|H1C_F_IN_BUSY)))
143 		return 1;
144 
145 	return 0;
146 }
147 
148 /*
149  * Tries to grab a buffer and to re-enables processing on mux <target>. The h1
150  * flags are used to figure what buffer was requested. It returns 1 if the
151  * allocation succeeds, in which case the connection is woken up, or 0 if it's
152  * impossible to wake up and we prefer to be woken up later.
153  */
h1_buf_available(void * target)154 static int h1_buf_available(void *target)
155 {
156 	struct h1c *h1c = target;
157 
158 	if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
159 		h1c->flags &= ~H1C_F_IN_ALLOC;
160 		if (h1_recv_allowed(h1c))
161 			tasklet_wakeup(h1c->wait_event.task);
162 		return 1;
163 	}
164 
165 	if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
166 		h1c->flags &= ~H1C_F_OUT_ALLOC;
167 		tasklet_wakeup(h1c->wait_event.task);
168 		return 1;
169 	}
170 
171 	return 0;
172 }
173 
174 /*
175  * Allocate a buffer. If if fails, it adds the mux in buffer wait queue.
176  */
h1_get_buf(struct h1c * h1c,struct buffer * bptr)177 static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
178 {
179 	struct buffer *buf = NULL;
180 
181 	if (likely(LIST_ISEMPTY(&h1c->buf_wait.list)) &&
182 	    unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
183 		h1c->buf_wait.target = h1c;
184 		h1c->buf_wait.wakeup_cb = h1_buf_available;
185 		HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
186 		LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
187 		HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
188 		__conn_xprt_stop_recv(h1c->conn);
189 	}
190 	return buf;
191 }
192 
193 /*
194  * Release a buffer, if any, and try to wake up entities waiting in the buffer
195  * wait queue.
196  */
h1_release_buf(struct h1c * h1c,struct buffer * bptr)197 static inline void h1_release_buf(struct h1c *h1c, struct buffer *bptr)
198 {
199 	if (bptr->size) {
200 		b_free(bptr);
201 		offer_buffers(h1c->buf_wait.target, tasks_run_queue);
202 	}
203 }
204 
205 /* returns the number of streams in use on a connection to figure if it's idle
206  * or not. We rely on H1C_F_CS_IDLE to know if the connection is in-use or
207  * not. This flag is only set when no H1S is attached and when the previous
208  * stream, if any, was fully terminated without any error and in K/A mode.
209  */
h1_used_streams(struct connection * conn)210 static int h1_used_streams(struct connection *conn)
211 {
212 	struct h1c *h1c = conn->ctx;
213 
214 	return ((h1c->flags & H1C_F_CS_IDLE) ? 0 : 1);
215 }
216 
217 /* returns the number of streams still available on a connection */
h1_avail_streams(struct connection * conn)218 static int h1_avail_streams(struct connection *conn)
219 {
220 	return 1 - h1_used_streams(conn);
221 }
222 
223 
224 /*****************************************************************/
225 /* functions below are dedicated to the mux setup and management */
226 /*****************************************************************/
227 
228 /* returns non-zero if there are input data pending for stream h1s. */
h1s_data_pending(const struct h1s * h1s)229 static inline size_t h1s_data_pending(const struct h1s *h1s)
230 {
231 	const struct h1m *h1m;
232 
233 	h1m = conn_is_back(h1s->h1c->conn) ? &h1s->res : &h1s->req;
234 	if (h1m->state == H1_MSG_DONE)
235 		return 0; // data not for this stream (e.g. pipelining)
236 
237 	return b_data(&h1s->h1c->ibuf);
238 }
239 
h1s_new_cs(struct h1s * h1s)240 static struct conn_stream *h1s_new_cs(struct h1s *h1s)
241 {
242 	struct conn_stream *cs;
243 
244 	cs = cs_new(h1s->h1c->conn);
245 	if (!cs)
246 		goto err;
247 	h1s->cs = cs;
248 	cs->ctx = h1s;
249 
250 	if (h1s->flags & H1S_F_NOT_FIRST)
251 		cs->flags |= CS_FL_NOT_FIRST;
252 
253 	if (stream_create_from_cs(cs) < 0)
254 		goto err;
255 	return cs;
256 
257   err:
258 	cs_free(cs);
259 	h1s->cs = NULL;
260 	return NULL;
261 }
262 
h1s_create(struct h1c * h1c,struct conn_stream * cs,struct session * sess)263 static struct h1s *h1s_create(struct h1c *h1c, struct conn_stream *cs, struct session *sess)
264 {
265 	struct h1s *h1s;
266 
267 	h1s = pool_alloc(pool_head_h1s);
268 	if (!h1s)
269 		goto fail;
270 
271 	h1s->h1c = h1c;
272 	h1c->h1s = h1s;
273 
274 	h1s->sess = sess;
275 
276 	h1s->cs    = NULL;
277 	h1s->flags = H1S_F_NONE;
278 
279 	h1s->recv_wait = NULL;
280 	h1s->send_wait = NULL;
281 
282 	h1m_init_req(&h1s->req);
283 	h1s->req.flags |= H1_MF_NO_PHDR;
284 
285 	h1m_init_res(&h1s->res);
286 	h1s->res.flags |= H1_MF_NO_PHDR;
287 
288 	h1s->status = 0;
289 	h1s->meth   = HTTP_METH_OTHER;
290 
291 	if (h1c->flags & H1C_F_WAIT_NEXT_REQ)
292 		h1s->flags |= H1S_F_NOT_FIRST;
293 	h1c->flags &= ~(H1C_F_CS_IDLE|H1C_F_WAIT_NEXT_REQ);
294 
295 	if (!conn_is_back(h1c->conn)) {
296 		if (h1c->px->options2 & PR_O2_REQBUG_OK)
297 			h1s->req.err_pos = -1;
298 	}
299 	else {
300 		if (h1c->px->options2 & PR_O2_RSPBUG_OK)
301 			h1s->res.err_pos = -1;
302 	}
303 
304 	/* If a conn_stream already exists, attach it to this H1S. Otherwise we
305 	 * create a new one.
306 	 */
307 	if (cs) {
308 		h1s->csinfo.create_date = date;
309 		h1s->csinfo.tv_create   = now;
310 		h1s->csinfo.t_handshake = 0;
311 		h1s->csinfo.t_idle      = -1;
312 
313 		cs->ctx = h1s;
314 		h1s->cs = cs;
315 	}
316 	else {
317 		/* For frontend connections we should always have a session */
318 		sess = h1c->conn->owner;
319 
320 		/* Timers for subsequent sessions on the same HTTP 1.x connection
321 		 * measure from `now`, not from the connection accept time */
322 		if (h1s->flags & H1S_F_NOT_FIRST) {
323 			h1s->csinfo.create_date = date;
324 			h1s->csinfo.tv_create   = now;
325 			h1s->csinfo.t_handshake = 0;
326 			h1s->csinfo.t_idle      = -1;
327 		}
328 		else {
329 			h1s->csinfo.create_date = sess->accept_date;
330 			h1s->csinfo.tv_create   = sess->tv_accept;
331 			h1s->csinfo.t_handshake = sess->t_handshake;
332 			h1s->csinfo.t_idle      = -1;
333 		}
334 
335 		cs = h1s_new_cs(h1s);
336 		if (!cs)
337 			goto fail;
338 	}
339 	return h1s;
340 
341   fail:
342 	pool_free(pool_head_h1s, h1s);
343 	return NULL;
344 }
345 
h1s_destroy(struct h1s * h1s)346 static void h1s_destroy(struct h1s *h1s)
347 {
348 	if (h1s) {
349 		struct h1c *h1c = h1s->h1c;
350 
351 		h1c->h1s = NULL;
352 
353 		if (h1s->recv_wait != NULL)
354 			h1s->recv_wait->events &= ~SUB_RETRY_RECV;
355 		if (h1s->send_wait != NULL)
356 			h1s->send_wait->events &= ~SUB_RETRY_SEND;
357 
358 		h1c->flags &= ~H1C_F_IN_BUSY;
359 		if (h1s->flags & (H1S_F_REQ_ERROR|H1S_F_RES_ERROR))
360 			h1c->flags |= H1C_F_CS_ERROR;
361 
362 		if (!(h1c->flags & (H1C_F_CS_ERROR|H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN)) && /* No error/shutdown on h1c */
363 		    !(h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH)) && /* No error/shutdown on conn */
364 		    (h1s->flags & H1S_F_WANT_KAL) &&                                         /* K/A possible */
365 		    h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE) {        /* req/res in DONE state */
366 			h1c->flags |= (H1C_F_CS_IDLE|H1C_F_WAIT_NEXT_REQ);
367 		}
368 
369 		cs_free(h1s->cs);
370 		pool_free(pool_head_h1s, h1s);
371 	}
372 }
373 
h1_get_cs_info(struct conn_stream * cs)374 static const struct cs_info *h1_get_cs_info(struct conn_stream *cs)
375 {
376 	struct h1s *h1s = cs->ctx;
377 
378 	if (h1s && !conn_is_back(cs->conn))
379 		return &h1s->csinfo;
380 	return NULL;
381 }
382 
383 /*
384  * Initialize the mux once it's attached. It is expected that conn->ctx
385  * points to the existing conn_stream (for outgoing connections) or NULL (for
386  * incoming ones). Returns < 0 on error.
387  */
h1_init(struct connection * conn,struct proxy * proxy,struct session * sess)388 static int h1_init(struct connection *conn, struct proxy *proxy, struct session *sess)
389 {
390 	struct h1c *h1c;
391 	struct task *t = NULL;
392 
393 	h1c = pool_alloc(pool_head_h1c);
394 	if (!h1c)
395 		goto fail_h1c;
396 	h1c->conn = conn;
397 	h1c->px   = proxy;
398 
399 	h1c->flags = H1C_F_CS_IDLE;
400 	h1c->ibuf  = BUF_NULL;
401 	h1c->obuf  = BUF_NULL;
402 	h1c->h1s   = NULL;
403 	h1c->task = NULL;
404 
405 	LIST_INIT(&h1c->buf_wait.list);
406 	h1c->wait_event.task = tasklet_new();
407 	if (!h1c->wait_event.task)
408 		goto fail;
409 	h1c->wait_event.task->process = h1_io_cb;
410 	h1c->wait_event.task->context = h1c;
411 	h1c->wait_event.events   = 0;
412 
413 	if (conn->ctx) {
414 		h1c->shut_timeout = h1c->timeout = proxy->timeout.server;
415 		if (tick_isset(proxy->timeout.serverfin))
416 			h1c->shut_timeout = proxy->timeout.serverfin;
417 	} else {
418 		h1c->shut_timeout = h1c->timeout = proxy->timeout.client;
419 		if (tick_isset(proxy->timeout.clientfin))
420 			h1c->shut_timeout = proxy->timeout.clientfin;
421 	}
422 	if (tick_isset(h1c->timeout)) {
423 		t = task_new(tid_bit);
424 		if (!t)
425 			goto fail;
426 
427 		h1c->task = t;
428 		t->process = h1_timeout_task;
429 		t->context = h1c;
430 		t->expire = tick_add(now_ms, h1c->timeout);
431 	}
432 
433 	if (!(conn->flags & CO_FL_CONNECTED))
434 		h1c->flags |= H1C_F_CS_WAIT_CONN;
435 
436 	/* Always Create a new H1S */
437 	if (!h1s_create(h1c, conn->ctx, sess))
438 		goto fail;
439 
440 	conn->ctx = h1c;
441 
442 
443 	if (t)
444 		task_queue(t);
445 
446 	/* Try to read, if nothing is available yet we'll just subscribe */
447 	tasklet_wakeup(h1c->wait_event.task);
448 
449 	/* mux->wake will be called soon to complete the operation */
450 	return 0;
451 
452   fail:
453 	if (t)
454 		task_free(t);
455 	if (h1c->wait_event.task)
456 		tasklet_free(h1c->wait_event.task);
457 	pool_free(pool_head_h1c, h1c);
458  fail_h1c:
459 	return -1;
460 }
461 
462 
463 /* release function for a connection. This one should be called to free all
464  * resources allocated to the mux.
465  */
h1_release(struct connection * conn)466 static void h1_release(struct connection *conn)
467 {
468 	struct h1c *h1c = conn->ctx;
469 
470 	LIST_DEL(&conn->list);
471 
472 	if (h1c) {
473 		if (!LIST_ISEMPTY(&h1c->buf_wait.list)) {
474 			HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
475 			LIST_DEL(&h1c->buf_wait.list);
476 			LIST_INIT(&h1c->buf_wait.list);
477 			HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
478 		}
479 
480 		h1_release_buf(h1c, &h1c->ibuf);
481 		h1_release_buf(h1c, &h1c->obuf);
482 
483 		if (h1c->task) {
484 			h1c->task->context = NULL;
485 			task_wakeup(h1c->task, TASK_WOKEN_OTHER);
486 			h1c->task = NULL;
487 		}
488 
489 		if (h1c->wait_event.task)
490 			tasklet_free(h1c->wait_event.task);
491 
492 		h1s_destroy(h1c->h1s);
493 		if (h1c->wait_event.events != 0)
494 			conn->xprt->unsubscribe(conn, h1c->wait_event.events,
495 			    &h1c->wait_event);
496 		pool_free(pool_head_h1c, h1c);
497 	}
498 
499 	conn->mux = NULL;
500 	conn->ctx = NULL;
501 
502 	conn_stop_tracking(conn);
503 	conn_full_close(conn);
504 	if (conn->destroy_cb)
505 		conn->destroy_cb(conn);
506 	conn_free(conn);
507 }
508 
509 /******************************************************/
510 /* functions below are for the H1 protocol processing */
511 /******************************************************/
512 /* Parse the request version and set H1_MF_VER_11 on <h1m> if the version is
513  * greater or equal to 1.1
514  */
h1_parse_req_vsn(struct h1m * h1m,const struct htx_sl * sl)515 static void h1_parse_req_vsn(struct h1m *h1m, const struct htx_sl *sl)
516 {
517 	const char *p = HTX_SL_REQ_VPTR(sl);
518 
519 	if ((HTX_SL_REQ_VLEN(sl) == 8) &&
520 	    (*(p + 5) > '1' ||
521 	     (*(p + 5) == '1' && *(p + 7) >= '1')))
522 		h1m->flags |= H1_MF_VER_11;
523 }
524 
525 /* Parse the response version and set H1_MF_VER_11 on <h1m> if the version is
526  * greater or equal to 1.1
527  */
h1_parse_res_vsn(struct h1m * h1m,const struct htx_sl * sl)528 static void h1_parse_res_vsn(struct h1m *h1m, const struct htx_sl *sl)
529 {
530 	const char *p = HTX_SL_RES_VPTR(sl);
531 
532 	if ((HTX_SL_RES_VLEN(sl) == 8) &&
533 	    (*(p + 5) > '1' ||
534 	     (*(p + 5) == '1' && *(p + 7) >= '1')))
535 		h1m->flags |= H1_MF_VER_11;
536 }
537 
538 /*
539  * Check the validity of the request version. If the version is valid, it
540  * returns 1. Otherwise, it returns 0.
541  */
h1_process_req_vsn(struct h1s * h1s,struct h1m * h1m,union h1_sl sl)542 static int h1_process_req_vsn(struct h1s *h1s, struct h1m *h1m, union h1_sl sl)
543 {
544 	struct h1c *h1c = h1s->h1c;
545 
546 	/* RFC7230#2.6 has enforced the format of the HTTP version string to be
547 	 * exactly one digit "." one digit. This check may be disabled using
548 	 * option accept-invalid-http-request.
549 	 */
550 	if (!(h1c->px->options2 & PR_O2_REQBUG_OK)) {
551 		if (sl.rq.v.len != 8)
552 			return 0;
553 
554 		if (*(sl.rq.v.ptr + 4) != '/' ||
555 		    !isdigit((unsigned char)*(sl.rq.v.ptr + 5)) ||
556 		    *(sl.rq.v.ptr + 6) != '.' ||
557 		    !isdigit((unsigned char)*(sl.rq.v.ptr + 7)))
558 			return 0;
559 	}
560 	else if (!sl.rq.v.len) {
561 		/* try to convert HTTP/0.9 requests to HTTP/1.0 */
562 
563 		/* RFC 1945 allows only GET for HTTP/0.9 requests */
564 		if (sl.rq.meth != HTTP_METH_GET)
565 			return 0;
566 
567 		/* HTTP/0.9 requests *must* have a request URI, per RFC 1945 */
568 		if (!sl.rq.u.len)
569 			return 0;
570 
571 		/* Add HTTP version */
572 		sl.rq.v = ist("HTTP/1.0");
573 		return 1;
574 	}
575 
576 	if ((sl.rq.v.len == 8) &&
577 	    ((*(sl.rq.v.ptr + 5) > '1') ||
578 	     ((*(sl.rq.v.ptr + 5) == '1') && (*(sl.rq.v.ptr + 7) >= '1'))))
579 		h1m->flags |= H1_MF_VER_11;
580 	return 1;
581 }
582 
583 /*
584  * Check the validity of the response version. If the version is valid, it
585  * returns 1. Otherwise, it returns 0.
586  */
h1_process_res_vsn(struct h1s * h1s,struct h1m * h1m,union h1_sl sl)587 static int h1_process_res_vsn(struct h1s *h1s, struct h1m *h1m, union h1_sl sl)
588 {
589 	struct h1c *h1c = h1s->h1c;
590 
591 	/* RFC7230#2.6 has enforced the format of the HTTP version string to be
592 	 * exactly one digit "." one digit. This check may be disabled using
593 	 * option accept-invalid-http-request.
594 	 */
595 	if (!(h1c->px->options2 & PR_O2_RSPBUG_OK)) {
596 		if (sl.st.v.len != 8)
597 			return 0;
598 
599 		if (*(sl.st.v.ptr + 4) != '/' ||
600 		    !isdigit((unsigned char)*(sl.st.v.ptr + 5)) ||
601 		    *(sl.st.v.ptr + 6) != '.' ||
602 		    !isdigit((unsigned char)*(sl.st.v.ptr + 7)))
603 			return 0;
604 	}
605 
606 	if ((sl.st.v.len == 8) &&
607 	    ((*(sl.st.v.ptr + 5) > '1') ||
608 	     ((*(sl.st.v.ptr + 5) == '1') && (*(sl.st.v.ptr + 7) >= '1'))))
609 		h1m->flags |= H1_MF_VER_11;
610 
611 	return 1;
612 }
613 /* Remove all "Connection:" headers from the HTX message <htx> */
h1_remove_conn_hdrs(struct h1m * h1m,struct htx * htx)614 static void h1_remove_conn_hdrs(struct h1m *h1m, struct htx *htx)
615 {
616 	struct ist hdr = {.ptr = "Connection", .len = 10};
617 	struct http_hdr_ctx ctx;
618 
619 	while (http_find_header(htx, hdr, &ctx, 1))
620 		http_remove_header(htx, &ctx);
621 
622 	h1m->flags &= ~(H1_MF_CONN_KAL|H1_MF_CONN_CLO);
623 }
624 
625 /* Add a "Connection:" header with the value <value> into the HTX message
626  * <htx>.
627  */
h1_add_conn_hdr(struct h1m * h1m,struct htx * htx,struct ist value)628 static void h1_add_conn_hdr(struct h1m *h1m, struct htx *htx, struct ist value)
629 {
630 	struct ist hdr = {.ptr = "Connection", .len = 10};
631 
632 	http_add_header(htx, hdr, value);
633 }
634 
635 /* Deduce the connection mode of the client connection, depending on the
636  * configuration and the H1 message flags. This function is called twice, the
637  * first time when the request is parsed and the second time when the response
638  * is parsed.
639  */
h1_set_cli_conn_mode(struct h1s * h1s,struct h1m * h1m)640 static void h1_set_cli_conn_mode(struct h1s *h1s, struct h1m *h1m)
641 {
642 	struct proxy *fe = h1s->h1c->px;
643 	int flag = H1S_F_WANT_KAL; /* For client connection: server-close == keepalive */
644 
645 	if ((fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)
646 		flag = H1S_F_WANT_CLO;
647 
648 	/* flags order: CLO > SCL > TUN > KAL */
649 	if ((h1s->flags & H1S_F_WANT_MSK) < flag)
650 		h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | flag;
651 
652 	if (h1m->flags & H1_MF_RESP) {
653 		/* Either we've established an explicit tunnel, or we're
654 		 * switching the protocol. In both cases, we're very unlikely to
655 		 * understand the next protocols. We have to switch to tunnel
656 		 * mode, so that we transfer the request and responses then let
657 		 * this protocol pass unmodified. When we later implement
658 		 * specific parsers for such protocols, we'll want to check the
659 		 * Upgrade header which contains information about that protocol
660 		 * for responses with status 101 (eg: see RFC2817 about TLS).
661 		 */
662 		if ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) ||
663 		    h1s->status == 101)
664 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
665 		else if (!(h1m->flags & H1_MF_XFER_LEN) || /* no length known => close */
666 			 (h1m->flags & H1_MF_CONN_CLO && h1s->req.state != H1_MSG_DONE)) /*explicit close and unfinished request */
667 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
668 	}
669 	else {
670 		if (h1s->flags & H1S_F_WANT_KAL &&
671 		    (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || /* no KA in HTTP/1.0 */
672 		     h1m->flags & H1_MF_CONN_CLO))                    /* explicit close */
673 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
674 	}
675 
676 	/* If KAL, check if the frontend is stopping. If yes, switch in CLO mode */
677 	if (h1s->flags & H1S_F_WANT_KAL && fe->state == PR_STSTOPPED)
678 		h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
679 }
680 
681 /* Deduce the connection mode of the client connection, depending on the
682  * configuration and the H1 message flags. This function is called twice, the
683  * first time when the request is parsed and the second time when the response
684  * is parsed.
685  */
h1_set_srv_conn_mode(struct h1s * h1s,struct h1m * h1m)686 static void h1_set_srv_conn_mode(struct h1s *h1s, struct h1m *h1m)
687 {
688 	struct h1c *h1c = h1s->h1c;
689 	struct session *sess = h1s->sess;
690 	struct proxy *be = h1c->px;
691 	int flag =  H1S_F_WANT_KAL;
692 	int fe_flags = sess ? sess->fe->options : 0;
693 
694 	/* For the server connection: server-close == httpclose */
695 	if ((fe_flags & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
696 	    (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
697 	    (fe_flags & PR_O_HTTP_MODE) == PR_O_HTTP_CLO ||
698 	    (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)
699 		flag = H1S_F_WANT_CLO;
700 
701 	/* flags order: CLO > SCL > TUN > KAL */
702 	if ((h1s->flags & H1S_F_WANT_MSK) < flag)
703 		h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | flag;
704 
705 	if (h1m->flags & H1_MF_RESP) {
706 		/* Either we've established an explicit tunnel, or we're
707 		 * switching the protocol. In both cases, we're very unlikely to
708 		 * understand the next protocols. We have to switch to tunnel
709 		 * mode, so that we transfer the request and responses then let
710 		 * this protocol pass unmodified. When we later implement
711 		 * specific parsers for such protocols, we'll want to check the
712 		 * Upgrade header which contains information about that protocol
713 		 * for responses with status 101 (eg: see RFC2817 about TLS).
714 		 */
715 		if ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) ||
716 		    h1s->status == 101)
717 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
718 		else if (!(h1m->flags & H1_MF_XFER_LEN)) /* no length known => close */
719 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
720 		else if (h1s->flags & H1S_F_WANT_KAL &&
721 			 (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || /* no KA in HTTP/1.0 */
722 			  h1m->flags & H1_MF_CONN_CLO))                    /* explicit close */
723 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
724 	}
725 	else {
726 		if (h1s->flags & H1S_F_WANT_KAL &&
727 		    (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || /* no KA in HTTP/1.0 */
728 		     h1m->flags & H1_MF_CONN_CLO))                    /* explicit close */
729 			h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
730 	}
731 
732 	/* If KAL, check if the backend is stopping. If yes, switch in CLO mode */
733 	if (h1s->flags & H1S_F_WANT_KAL && be->state == PR_STSTOPPED)
734 		h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
735 }
736 
h1_update_req_conn_hdr(struct h1s * h1s,struct h1m * h1m,struct htx * htx,struct ist * conn_val)737 static void h1_update_req_conn_hdr(struct h1s *h1s, struct h1m *h1m,
738 				   struct htx *htx, struct ist *conn_val)
739 {
740 	struct proxy *px = h1s->h1c->px;
741 
742 	/* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
743 	 * token is found
744 	 */
745 	if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
746 		return;
747 
748 	if (h1s->flags & H1S_F_WANT_KAL || px->options2 & PR_O2_FAKE_KA) {
749 		if (h1m->flags & H1_MF_CONN_CLO) {
750 			if (conn_val)
751 				*conn_val = ist("");
752 			if (htx)
753 				h1_remove_conn_hdrs(h1m, htx);
754 		}
755 		if (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL))) {
756 			if (conn_val)
757 				*conn_val = ist("keep-alive");
758 			if (htx)
759 				h1_add_conn_hdr(h1m, htx, ist("keep-alive"));
760 		}
761 		if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) == (H1_MF_VER_11|H1_MF_CONN_KAL)) {
762 			if (conn_val)
763 				*conn_val = ist("");
764 			if (htx)
765 				h1_remove_conn_hdrs(h1m, htx);
766 		}
767 	}
768 	else { /* H1S_F_WANT_CLO && !PR_O2_FAKE_KA */
769 		if (h1m->flags & H1_MF_CONN_KAL) {
770 			if (conn_val)
771 				*conn_val = ist("");
772 			if (htx)
773 				h1_remove_conn_hdrs(h1m, htx);
774 		}
775 		if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_VER_11) {
776 			if (conn_val)
777 				*conn_val = ist("close");
778 			if (htx)
779 				h1_add_conn_hdr(h1m, htx, ist("close"));
780 		}
781 		if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_CONN_CLO) {
782 			if (conn_val)
783 				*conn_val = ist("");
784 			if (htx)
785 				h1_remove_conn_hdrs(h1m, htx);
786 		}
787 	}
788 }
789 
h1_update_res_conn_hdr(struct h1s * h1s,struct h1m * h1m,struct htx * htx,struct ist * conn_val)790 static void h1_update_res_conn_hdr(struct h1s *h1s, struct h1m *h1m,
791 					 struct htx *htx, struct ist *conn_val)
792 {
793 	/* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
794 	 * token is found
795 	 */
796 	if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
797 		return;
798 
799 	if (h1s->flags & H1S_F_WANT_KAL) {
800 		if (h1m->flags & H1_MF_CONN_CLO) {
801 			if (conn_val)
802 				*conn_val = ist("");
803 			if (htx)
804 				h1_remove_conn_hdrs(h1m, htx);
805 		}
806 		if (!(h1m->flags & H1_MF_CONN_KAL) &&
807 		    !((h1m->flags & h1s->req.flags) & H1_MF_VER_11)) {
808 			if (conn_val)
809 				*conn_val = ist("keep-alive");
810 			if (htx)
811 				h1_add_conn_hdr(h1m, htx, ist("keep-alive"));
812 		}
813 		else if ((h1m->flags & H1_MF_CONN_KAL) &&
814 		         ((h1m->flags & h1s->req.flags) & H1_MF_VER_11)) {
815 			if (conn_val)
816 				*conn_val = ist("");
817 			if (htx)
818 				h1_remove_conn_hdrs(h1m, htx);
819 		}
820 	}
821 	else { /* H1S_F_WANT_CLO */
822 		if (h1m->flags & H1_MF_CONN_KAL) {
823 			if (conn_val)
824 				*conn_val = ist("");
825 			if (htx)
826 				h1_remove_conn_hdrs(h1m, htx);
827 		}
828 		if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_VER_11) {
829 			if (conn_val)
830 				*conn_val = ist("close");
831 			if (htx)
832 				h1_add_conn_hdr(h1m, htx, ist("close"));
833 		}
834 		if ((h1m->flags & (H1_MF_VER_11|H1_MF_CONN_CLO)) == H1_MF_CONN_CLO) {
835 			if (conn_val)
836 				*conn_val = ist("");
837 			if (htx)
838 				h1_remove_conn_hdrs(h1m, htx);
839 		}
840 	}
841 }
842 
843 /* Set the right connection mode and update "Connection:" header if
844  * needed. <htx> and <conn_val> can be NULL. When <htx> is not NULL, the HTX
845  * message is updated accordingly. When <conn_val> is not NULL, it is set with
846  * the new header value.
847  */
h1_process_conn_mode(struct h1s * h1s,struct h1m * h1m,struct htx * htx,struct ist * conn_val)848 static void h1_process_conn_mode(struct h1s *h1s, struct h1m *h1m,
849 				 struct htx *htx, struct ist *conn_val)
850 {
851 	if (!conn_is_back(h1s->h1c->conn)) {
852 		h1_set_cli_conn_mode(h1s, h1m);
853 		if (h1m->flags & H1_MF_RESP)
854 			h1_update_res_conn_hdr(h1s, h1m, htx, conn_val);
855 	}
856 	else {
857 		h1_set_srv_conn_mode(h1s, h1m);
858 		if (!(h1m->flags & H1_MF_RESP))
859 			h1_update_req_conn_hdr(h1s, h1m, htx, conn_val);
860 	}
861 }
862 
863 
864 /* Append the description of what is present in error snapshot <es> into <out>.
865  * The description must be small enough to always fit in a buffer. The output
866  * buffer may be the trash so the trash must not be used inside this function.
867  */
h1_show_error_snapshot(struct buffer * out,const struct error_snapshot * es)868 static void h1_show_error_snapshot(struct buffer *out, const struct error_snapshot *es)
869 {
870 	chunk_appendf(out,
871 		      "  H1 connection flags 0x%08x, H1 stream flags 0x%08x\n"
872 		      "  H1 msg state %s(%d), H1 msg flags 0x%08x\n"
873 		      "  H1 chunk len %lld bytes, H1 body len %lld bytes :\n",
874 		      es->ctx.h1.c_flags, es->ctx.h1.s_flags,
875 		      h1m_state_str(es->ctx.h1.state), es->ctx.h1.state,
876 		      es->ctx.h1.m_flags, es->ctx.h1.m_clen, es->ctx.h1.m_blen);
877 }
878 /*
879  * Capture a bad request or response and archive it in the proxy's structure.
880  * By default it tries to report the error position as h1m->err_pos. However if
881  * this one is not set, it will then report h1m->next, which is the last known
882  * parsing point. The function is able to deal with wrapping buffers. It always
883  * displays buffers as a contiguous area starting at buf->p. The direction is
884  * determined thanks to the h1m's flags.
885  */
h1_capture_bad_message(struct h1c * h1c,struct h1s * h1s,struct h1m * h1m,struct buffer * buf)886 static void h1_capture_bad_message(struct h1c *h1c, struct h1s *h1s,
887 				   struct h1m *h1m, struct buffer *buf)
888 {
889 	struct session *sess = h1c->conn->owner;
890 	struct proxy *proxy = h1c->px;
891 	struct proxy *other_end = sess->fe;
892 	union error_snapshot_ctx ctx;
893 
894 	if (h1s->cs->data && !(h1m->flags & H1_MF_RESP))
895 		other_end = si_strm(h1s->cs->data)->be;
896 
897 	/* http-specific part now */
898 	ctx.h1.state   = h1m->state;
899 	ctx.h1.c_flags = h1c->flags;
900 	ctx.h1.s_flags = h1s->flags;
901 	ctx.h1.m_flags = h1m->flags;
902 	ctx.h1.m_clen  = h1m->curr_len;
903 	ctx.h1.m_blen  = h1m->body_len;
904 
905 	proxy_capture_error(proxy, !!(h1m->flags & H1_MF_RESP), other_end,
906 			    h1c->conn->target, sess, buf, 0, 0,
907 			    (h1m->err_pos >= 0) ? h1m->err_pos : h1m->next,
908 			    &ctx, h1_show_error_snapshot);
909 }
910 
911 /* Emit the chunksize followed by a CRLF in front of data of the buffer
912  * <buf>. It goes backwards and starts with the byte before the buffer's
913  * head. The caller is responsible for ensuring there is enough room left before
914  * the buffer's head for the string.
915  */
h1_emit_chunk_size(struct buffer * buf,size_t chksz)916 static void h1_emit_chunk_size(struct buffer *buf, size_t chksz)
917 {
918 	char *beg, *end;
919 
920 	beg = end = b_head(buf);
921 	*--beg = '\n';
922 	*--beg = '\r';
923 	do {
924 		*--beg = hextab[chksz & 0xF];
925 	} while (chksz >>= 4);
926 	buf->head -= (end - beg);
927 	b_add(buf, end - beg);
928 }
929 
930 /* Emit a CRLF after the data of the buffer <buf>. The caller is responsible for
931  * ensuring there is enough room left in the buffer for the string. */
h1_emit_chunk_crlf(struct buffer * buf)932 static void h1_emit_chunk_crlf(struct buffer *buf)
933 {
934 	*(b_peek(buf, b_data(buf)))     = '\r';
935 	*(b_peek(buf, b_data(buf) + 1)) = '\n';
936 	b_add(buf, 2);
937 }
938 
939 /*
940  * Switch the request to tunnel mode. This function must only be called for
941  * CONNECT requests. On the client side, if the response is not finished, the
942  * mux is mark as busy on input.
943  */
h1_set_req_tunnel_mode(struct h1s * h1s)944 static void h1_set_req_tunnel_mode(struct h1s *h1s)
945 {
946 	h1s->req.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
947 	h1s->req.state = H1_MSG_TUNNEL;
948 	if (!conn_is_back(h1s->h1c->conn)  && h1s->res.state < H1_MSG_DONE)
949 		h1s->h1c->flags |= H1C_F_IN_BUSY;
950 	else if (h1s->h1c->flags & H1C_F_IN_BUSY) {
951 		h1s->h1c->flags &= ~H1C_F_IN_BUSY;
952 		tasklet_wakeup(h1s->h1c->wait_event.task);
953 	}
954 }
955 
956 /*
957  * Switch the response to tunnel mode. This function must only be called on
958  * successfull replies to CONNECT requests or on protocol switching. In this
959  * last case, this function takes care to switch the request to tunnel mode if
960  * possible. On the server side, if the request is not finished, the mux is mark
961  * as busy on input.
962  */
h1_set_res_tunnel_mode(struct h1s * h1s)963 static void h1_set_res_tunnel_mode(struct h1s *h1s)
964 {
965 	/* On protocol switching, switch the request to tunnel mode if it is in
966 	 * DONE state. Otherwise we will wait the end of the request to switch
967 	 * it in tunnel mode.
968 	 */
969 	if (h1s->status == 101 && h1s->req.state == H1_MSG_DONE) {
970 		h1s->req.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
971 		h1s->req.state = H1_MSG_TUNNEL;
972 	}
973 
974 	h1s->res.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
975 	h1s->res.state = H1_MSG_TUNNEL;
976 	if (conn_is_back(h1s->h1c->conn) && h1s->req.state < H1_MSG_DONE)
977 		h1s->h1c->flags |= H1C_F_IN_BUSY;
978 	else if (h1s->h1c->flags & H1C_F_IN_BUSY) {
979 		h1s->h1c->flags &= ~H1C_F_IN_BUSY;
980 		tasklet_wakeup(h1s->h1c->wait_event.task);
981 	}
982 }
983 
984 /*
985  * Handle 100-Continue responses or any other informational 1xx responses which
986  * is non-final. In such case, this function reset the response parser. It is
987  * the caller responsibility to call this function when appropriate.
988  */
h1_handle_1xx_response(struct h1s * h1s,struct h1m * h1m)989 static void h1_handle_1xx_response(struct h1s *h1s, struct h1m *h1m)
990 {
991 	if ((h1m->flags & H1_MF_RESP) && h1m->state == H1_MSG_DONE &&
992 	    h1s->status < 200 && (h1s->status == 100 || h1s->status >= 102)) {
993 		h1m_init_res(&h1s->res);
994 		h1m->flags |= H1_MF_NO_PHDR;
995 		if (h1s->h1c->flags & H1C_F_IN_BUSY) {
996 			h1s->h1c->flags &= ~H1C_F_IN_BUSY;
997 			tasklet_wakeup(h1s->h1c->wait_event.task);
998 		}
999 	}
1000 }
1001 
1002 /*
1003  * Parse HTTP/1 headers. It returns the number of bytes parsed if > 0, or 0 if
1004  * it couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR
1005  * flag and filling h1s->err_pos and h1s->err_state fields. This functions is
1006  * responsible to update the parser state <h1m>.
1007  */
h1_process_headers(struct h1s * h1s,struct h1m * h1m,struct htx * htx,struct buffer * buf,size_t * ofs,size_t max)1008 static size_t h1_process_headers(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
1009 				 struct buffer *buf, size_t *ofs, size_t max)
1010 {
1011 	struct http_hdr hdrs[global.tune.max_http_hdr];
1012 	union h1_sl h1sl;
1013 	unsigned int flags = HTX_SL_F_NONE;
1014 	int ret = 0;
1015 
1016 	if (!max)
1017 		goto end;
1018 
1019 	/* Realing input buffer if necessary */
1020 	if (b_head(buf) + b_data(buf) > b_wrap(buf))
1021 		b_slow_realign(buf, trash.area, 0);
1022 
1023 	ret = h1_headers_to_hdr_list(b_peek(buf, *ofs), b_peek(buf, *ofs) + max,
1024 				     hdrs, sizeof(hdrs)/sizeof(hdrs[0]), h1m, &h1sl);
1025 	if (ret <= 0) {
1026 		/* Incomplete or invalid message. If the input buffer only
1027 		 * contains headers and is full, which is detected by it being
1028 		 * full and the offset to be zero, it's an error because
1029 		 * headers are too large to be handled by the parser. */
1030 		if (ret < 0 || (!ret && !*ofs && !buf_room_for_htx_data(buf)))
1031 			goto error;
1032 		goto end;
1033 	}
1034 
1035 	if (h1m->err_pos >= 0)  {
1036 		/* Maybe we found an error during the parsing while we were
1037 		 * configured not to block on that, so we have to capture it
1038 		 * now.
1039 		 */
1040 		h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
1041 	}
1042 
1043 	/* messages headers fully parsed, do some checks to prepare the body
1044 	 * parsing.
1045 	 */
1046 
1047 	/* Be sure to keep some space to do headers rewritting */
1048 	if (ret > (b_size(buf) - global.tune.maxrewrite))
1049 		goto error;
1050 
1051 	/* Save the request's method or the response's status, check if the body
1052 	 * length is known and check the VSN validity */
1053 	if (!(h1m->flags & H1_MF_RESP)) {
1054 		h1s->meth = h1sl.rq.meth;
1055 
1056 		/* By default, request have always a known length */
1057 		h1m->flags |= H1_MF_XFER_LEN;
1058 
1059 		if (h1s->meth == HTTP_METH_CONNECT) {
1060 			/* Switch CONNECT requests to tunnel mode */
1061 			h1_set_req_tunnel_mode(h1s);
1062 		}
1063 		else if (!(h1m->flags & H1_MF_CHNK) && !h1m->body_len) {
1064 			/* Switch requests with no body to done. */
1065 			h1m->state = H1_MSG_DONE;
1066 		}
1067 
1068 		if (!h1_process_req_vsn(h1s, h1m, h1sl)) {
1069 			h1m->err_pos = h1sl.rq.v.ptr - b_head(buf);
1070 			h1m->err_state = h1m->state;
1071 			goto vsn_error;
1072 		}
1073 	}
1074 	else {
1075 		h1s->status = h1sl.st.status;
1076 
1077 		if ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) ||
1078 		    h1s->status == 101) {
1079 			/* Switch successfull replies to CONNECT requests and
1080 			 * protocol switching to tunnel mode. */
1081 			h1_set_res_tunnel_mode(h1s);
1082 		}
1083 		else if ((h1s->meth == HTTP_METH_HEAD) ||
1084 			 (h1s->status >= 100 && h1s->status < 200) ||
1085 			 (h1s->status == 204) || (h1s->status == 304)) {
1086 			/* Switch responses without body to done. */
1087 			h1m->flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
1088 			h1m->flags |= H1_MF_XFER_LEN;
1089 			h1m->curr_len = h1m->body_len = 0;
1090 			h1m->state = H1_MSG_DONE;
1091 		}
1092 		else if (h1m->flags & (H1_MF_CLEN|H1_MF_CHNK)) {
1093 			/* Responses with a known body length. Switch requests
1094 			 * with no body to done. */
1095 			h1m->flags |= H1_MF_XFER_LEN;
1096 			if ((h1m->flags & H1_MF_CLEN) && !h1m->body_len)
1097 				h1m->state = H1_MSG_DONE;
1098 		}
1099 		else {
1100 			/* Responses with an unknown body length */
1101 			h1m->state = H1_MSG_TUNNEL;
1102 		}
1103 
1104 		if (!h1_process_res_vsn(h1s, h1m, h1sl)) {
1105 			h1m->err_pos = h1sl.st.v.ptr - b_head(buf);
1106 			h1m->err_state = h1m->state;
1107 			goto vsn_error;
1108 		}
1109 	}
1110 
1111 	/* Set HTX start-line flags */
1112 	if (h1m->flags & H1_MF_VER_11)
1113 		flags |= HTX_SL_F_VER_11;
1114 	if (h1m->flags & H1_MF_XFER_ENC)
1115 		flags |= HTX_SL_F_XFER_ENC;
1116 	if (h1m->flags & H1_MF_XFER_LEN) {
1117 		flags |= HTX_SL_F_XFER_LEN;
1118 		if (h1m->flags & H1_MF_CHNK)
1119 			flags |= HTX_SL_F_CHNK;
1120 		else if (h1m->flags & H1_MF_CLEN)
1121 			flags |= HTX_SL_F_CLEN;
1122 		if (h1m->state == H1_MSG_DONE)
1123 			flags |= HTX_SL_F_BODYLESS;
1124 	}
1125 
1126 	if (!(h1m->flags & H1_MF_RESP)) {
1127 		struct htx_sl *sl;
1128 
1129 		sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, h1sl.rq.m, h1sl.rq.u, h1sl.rq.v);
1130 		if (!sl || !htx_add_all_headers(htx, hdrs))
1131 			goto error;
1132 		sl->info.req.meth = h1s->meth;
1133 	}
1134 	else {
1135 		struct htx_sl *sl;
1136 
1137 		flags |= HTX_SL_F_IS_RESP;
1138 		sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, h1sl.st.v, h1sl.st.c, h1sl.st.r);
1139 		if (!sl || !htx_add_all_headers(htx, hdrs))
1140 			goto error;
1141 		sl->info.res.status = h1s->status;
1142 	}
1143 
1144 	if (h1m->state == H1_MSG_DONE) {
1145 		if (!htx_add_endof(htx, HTX_BLK_EOM))
1146 			goto error;
1147 		h1s->cs->flags |= CS_FL_EOI;
1148 	}
1149 
1150 	h1_process_conn_mode(h1s, h1m, htx, NULL);
1151 
1152 	/* If body length cannot be determined, set htx->extra to
1153 	 * ULLONG_MAX. This value is impossible in other cases.
1154 	 */
1155 	htx->extra = ((h1m->flags & H1_MF_XFER_LEN) ? h1m->curr_len : ULLONG_MAX);
1156 
1157 	/* Recheck there is enough space to do headers rewritting */
1158 	if (htx_used_space(htx) > b_size(buf) - global.tune.maxrewrite)
1159 		goto error;
1160 
1161 	*ofs += ret;
1162   end:
1163 	return ret;
1164 
1165   error:
1166 	h1m->err_state = h1m->state;
1167 	h1m->err_pos = h1m->next;
1168   vsn_error:
1169 	h1s->flags |= (!(h1m->flags & H1_MF_RESP) ? H1S_F_REQ_ERROR : H1S_F_RES_ERROR);
1170 	h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
1171 	ret = 0;
1172 	goto end;
1173 }
1174 
1175 /*
1176  * Parse HTTP/1 body. It returns the number of bytes parsed if > 0, or 0 if it
1177  * couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR flag
1178  * and filling h1s->err_pos and h1s->err_state fields. This functions is
1179  * responsible to update the parser state <h1m>.
1180  */
h1_process_data(struct h1s * h1s,struct h1m * h1m,struct htx * htx,struct buffer * buf,size_t * ofs,size_t max,struct buffer * htxbuf,size_t reserve)1181 static size_t h1_process_data(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
1182 			      struct buffer *buf, size_t *ofs, size_t max,
1183 			      struct buffer *htxbuf, size_t reserve)
1184 {
1185 	uint32_t data_space;
1186 	size_t total = 0;
1187 	int ret = 0;
1188 
1189 	data_space = htx_free_data_space(htx);
1190 	if (data_space <= reserve)
1191 		goto end;
1192 	data_space -= reserve;
1193 
1194 	if (h1m->flags & H1_MF_XFER_LEN) {
1195 		if (h1m->flags & H1_MF_CLEN) {
1196 			/* content-length: read only h2m->body_len */
1197 			ret = max;
1198 			if (ret > data_space)
1199 				ret = data_space;
1200 			if ((uint64_t)ret > h1m->curr_len)
1201 				ret = h1m->curr_len;
1202 			if (ret > b_contig_data(buf, *ofs))
1203 				ret = b_contig_data(buf, *ofs);
1204 			if (ret) {
1205 				/* very often with large files we'll face the following
1206 				 * situation :
1207 				 *   - htx is empty and points to <htxbuf>
1208 				 *   - ret == buf->data
1209 				 *   - buf->head == sizeof(struct htx)
1210 				 *   => we can swap the buffers and place an htx header into
1211 				 *      the target buffer instead
1212 				 */
1213 				if (unlikely(htx_is_empty(htx) && ret == b_data(buf) &&
1214 					     !*ofs && b_head_ofs(buf) == sizeof(struct htx))) {
1215 					void *raw_area = buf->area;
1216 					void *htx_area = htxbuf->area;
1217 					struct htx_blk *blk;
1218 
1219 					buf->area = htx_area;
1220 					htxbuf->area = raw_area;
1221 					htx = (struct htx *)htxbuf->area;
1222 					htx->size = htxbuf->size - sizeof(*htx);
1223 					htx_reset(htx);
1224 					b_set_data(htxbuf, b_size(htxbuf));
1225 
1226 					blk = htx_add_blk(htx, HTX_BLK_DATA, ret);
1227 					blk->info += ret;
1228 					/* nothing else to do, the old buffer now contains an
1229 					 * empty pre-initialized HTX header
1230 					 */
1231 				}
1232 				else if (!htx_add_data(htx, ist2(b_peek(buf, *ofs), ret)))
1233 					goto end;
1234 				h1m->curr_len -= ret;
1235 				*ofs += ret;
1236 				total += ret;
1237 			}
1238 
1239 			if (!h1m->curr_len) {
1240 				if (!htx_add_endof(htx, HTX_BLK_EOM))
1241 					goto end;
1242 				h1m->state = H1_MSG_DONE;
1243 				h1s->cs->flags |= CS_FL_EOI;
1244 			}
1245 		}
1246 		else if (h1m->flags & H1_MF_CHNK) {
1247 		  new_chunk:
1248 			/* te:chunked : parse chunks */
1249 			if (h1m->state == H1_MSG_CHUNK_CRLF) {
1250 				ret = h1_skip_chunk_crlf(buf, *ofs, *ofs + max);
1251 				if (ret <= 0)
1252 					goto end;
1253 				h1m->state = H1_MSG_CHUNK_SIZE;
1254 
1255 				max -= ret;
1256 				*ofs += ret;
1257 				total += ret;
1258 			}
1259 
1260 			if (h1m->state == H1_MSG_CHUNK_SIZE) {
1261 				unsigned int chksz;
1262 
1263 				ret = h1_parse_chunk_size(buf, *ofs, *ofs + max, &chksz);
1264 				if (ret <= 0)
1265 					goto end;
1266 				if (!chksz) {
1267 					if (!htx_add_endof(htx, HTX_BLK_EOD))
1268 						goto end;
1269 					h1s->flags |= H1S_F_HAVE_I_EOD;
1270 					h1m->state = H1_MSG_TRAILERS;
1271 				}
1272 				else
1273 					h1m->state = H1_MSG_DATA;
1274 
1275 				h1m->curr_len  = chksz;
1276 				h1m->body_len += chksz;
1277 				max -= ret;
1278 				*ofs += ret;
1279 				total += ret;
1280 			}
1281 
1282 			if (h1m->state == H1_MSG_DATA) {
1283 				ret = max;
1284 				if (ret > data_space)
1285 					ret = data_space;
1286 				if ((uint64_t)ret > h1m->curr_len)
1287 					ret = h1m->curr_len;
1288 				if (ret > b_contig_data(buf, *ofs))
1289 					ret = b_contig_data(buf, *ofs);
1290 				if (ret) {
1291 					if (!htx_add_data(htx, ist2(b_peek(buf, *ofs), ret)))
1292 						goto end;
1293 					h1m->curr_len -= ret;
1294 					max -= ret;
1295 					*ofs += ret;
1296 					total += ret;
1297 				}
1298 				if (!h1m->curr_len) {
1299 					h1m->state = H1_MSG_CHUNK_CRLF;
1300 					data_space = htx_free_data_space(htx);
1301 					if (data_space <= reserve)
1302 						goto end;
1303 					data_space -= reserve;
1304 					goto new_chunk;
1305 				}
1306 				goto end;
1307 			}
1308 
1309 			if (h1m->state == H1_MSG_TRAILERS) {
1310 				/* Trailers were alread parsed, only the EOM
1311 				 * need to be added */
1312 				if (h1s->flags & H1S_F_HAVE_I_TLR)
1313 					goto skip_tlr_parsing;
1314 
1315 				ret = h1_measure_trailers(buf, *ofs, max);
1316 				if (ret > data_space)
1317 					ret = (htx_is_empty(htx) ? -1 : 0);
1318 				if (ret <= 0)
1319 					goto end;
1320 
1321 				/* Realing input buffer if tailers wrap. For now
1322 				 * this is a workaroung. Because trailers are
1323 				 * not split on CRLF, like headers, there is no
1324 				 * way to know where to split it when trailers
1325 				 * wrap. This is a limitation of
1326 				 * h1_measure_trailers.
1327 				 */
1328 				if (b_peek(buf, *ofs) > b_peek(buf, *ofs + ret))
1329 					b_slow_realign(buf, trash.area, 0);
1330 
1331 				if (!htx_add_trailer(htx, ist2(b_peek(buf, *ofs), ret)))
1332 					goto end;
1333 				h1s->flags |= H1S_F_HAVE_I_TLR;
1334 				max -= ret;
1335 				*ofs += ret;
1336 				total += ret;
1337 
1338 			  skip_tlr_parsing:
1339 				if (!htx_add_endof(htx, HTX_BLK_EOM))
1340 					goto end;
1341 				h1m->state = H1_MSG_DONE;
1342 				h1s->cs->flags |= CS_FL_EOI;
1343 			}
1344 		}
1345 		else {
1346 			/* XFER_LEN is set but not CLEN nor CHNK, it means there
1347 			 * is no body. Switch the message in DONE state
1348 			 */
1349 			if (!htx_add_endof(htx, HTX_BLK_EOM))
1350 				goto end;
1351 			h1m->state = H1_MSG_DONE;
1352 			h1s->cs->flags |= CS_FL_EOI;
1353 		}
1354 	}
1355 	else {
1356 		/* no content length, read till SHUTW */
1357 		ret = max;
1358 		if (ret > data_space)
1359 			ret = data_space;
1360 		if (ret > b_contig_data(buf, *ofs))
1361 			ret = b_contig_data(buf, *ofs);
1362 		if (ret) {
1363 			if (!htx_add_data(htx, ist2(b_peek(buf, *ofs), ret)))
1364 				goto end;
1365 
1366 			*ofs += ret;
1367 			total = ret;
1368 		}
1369 	}
1370 
1371   end:
1372 	if (ret < 0) {
1373 		h1s->flags |= (!(h1m->flags & H1_MF_RESP) ? H1S_F_REQ_ERROR : H1S_F_RES_ERROR);
1374 		h1m->err_state = h1m->state;
1375 		h1m->err_pos = *ofs + max + ret;
1376 		h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
1377 		return 0;
1378 	}
1379 	/* update htx->extra, only when the body length is known */
1380 	if (h1m->flags & H1_MF_XFER_LEN)
1381 		htx->extra = h1m->curr_len;
1382 	return total;
1383 }
1384 
1385 /*
1386  * Process incoming data. It parses data and transfer them from h1c->ibuf into
1387  * <buf>. It returns the number of bytes parsed and transferred if > 0, or 0 if
1388  * it couldn't proceed.
1389  */
h1_process_input(struct h1c * h1c,struct buffer * buf,int flags)1390 static size_t h1_process_input(struct h1c *h1c, struct buffer *buf, int flags)
1391 {
1392 	struct h1s *h1s = h1c->h1s;
1393 	struct h1m *h1m;
1394 	struct htx *htx;
1395 	size_t data = 0;
1396 	size_t total = 0;
1397 	size_t ret = 0;
1398 	size_t count, rsv;
1399 	int errflag;
1400 
1401 	htx = htx_from_buf(buf);
1402 
1403 	if (!conn_is_back(h1c->conn)) {
1404 		h1m = &h1s->req;
1405 		errflag = H1S_F_REQ_ERROR;
1406 	}
1407 	else {
1408 		h1m = &h1s->res;
1409 		errflag = H1S_F_RES_ERROR;
1410 	}
1411 
1412 	data = htx->data;
1413 	count = b_data(&h1c->ibuf);
1414 	rsv = ((flags & CO_RFL_KEEP_RSV) ? global.tune.maxrewrite : 0);
1415 
1416 	if (htx_is_empty(htx))
1417 		h1_handle_1xx_response(h1s, h1m);
1418 
1419 	do {
1420 		if (h1m->state <= H1_MSG_LAST_LF) {
1421 			/* Don't start to parse a new message if some part of
1422 			 * the previous one is still there
1423 			 */
1424 			if (!htx_is_empty(htx))
1425 				goto end;
1426 			ret = h1_process_headers(h1s, h1m, htx, &h1c->ibuf, &total, count);
1427 			if (!ret)
1428 				break;
1429 			h1_handle_1xx_response(h1s, h1m);
1430 		}
1431 		else if (h1m->state <= H1_MSG_TRAILERS) {
1432 			ret = h1_process_data(h1s, h1m, htx, &h1c->ibuf, &total, count, buf, rsv);
1433 			htx = htx_from_buf(buf);
1434 			if (!ret)
1435 				break;
1436 		}
1437 		else if (h1m->state == H1_MSG_DONE) {
1438 			if (!(h1m->flags & H1_MF_RESP) && h1s->status == 101)
1439 				h1_set_req_tunnel_mode(h1s);
1440 			else if (h1s->req.state < H1_MSG_DONE || h1s->res.state < H1_MSG_DONE) {
1441 				h1c->flags |= H1C_F_IN_BUSY;
1442 				break;
1443 			}
1444 			else
1445 				break;
1446 		}
1447 		else if (h1m->state == H1_MSG_TUNNEL) {
1448 			ret = h1_process_data(h1s, h1m, htx, &h1c->ibuf, &total, count, buf, rsv);
1449 			htx = htx_from_buf(buf);
1450 			if (!ret)
1451 				break;
1452 		}
1453 		else {
1454 			h1s->flags |= errflag;
1455 			break;
1456 		}
1457 
1458 		count -= ret;
1459 	} while (!(h1s->flags & errflag) && count);
1460 
1461 	if (h1s->flags & errflag)
1462 		goto parsing_err;
1463 
1464 	b_del(&h1c->ibuf, total);
1465 
1466   end:
1467 	htx_to_buf(htx, buf);
1468 	data = (htx->data - data);
1469 	if (h1c->flags & H1C_F_IN_FULL && buf_room_for_htx_data(&h1c->ibuf)) {
1470 		h1c->flags &= ~H1C_F_IN_FULL;
1471 		tasklet_wakeup(h1c->wait_event.task);
1472 	}
1473 
1474 	h1s->cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
1475 
1476 	if (!b_data(&h1c->ibuf))
1477 		h1_release_buf(h1c, &h1c->ibuf);
1478 	else if (h1s_data_pending(h1s) && !htx_is_empty(htx))
1479 		h1s->cs->flags |= CS_FL_RCV_MORE | CS_FL_WANT_ROOM;
1480 
1481 	if ((h1s->cs->flags & CS_FL_REOS) && (!h1s_data_pending(h1s) || htx_is_empty(htx))) {
1482 		h1s->cs->flags |= CS_FL_EOS;
1483 		if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE)
1484 			h1s->cs->flags |= CS_FL_ERROR;
1485 	}
1486 
1487 	return data;
1488 
1489   parsing_err:
1490 	b_reset(&h1c->ibuf);
1491 	htx->flags |= HTX_FL_PARSING_ERROR;
1492 	htx_to_buf(htx, buf);
1493 	h1s->cs->flags |= CS_FL_EOI;
1494 	return 0;
1495 }
1496 
1497 /*
1498  * Process outgoing data. It parses data and transfer them from the channel buffer into
1499  * h1c->obuf. It returns the number of bytes parsed and transferred if > 0, or
1500  * 0 if it couldn't proceed.
1501  */
h1_process_output(struct h1c * h1c,struct buffer * buf,size_t count)1502 static size_t h1_process_output(struct h1c *h1c, struct buffer *buf, size_t count)
1503 {
1504 	struct h1s *h1s = h1c->h1s;
1505 	struct h1m *h1m;
1506 	struct htx *chn_htx;
1507 	struct htx_blk *blk;
1508 	struct buffer tmp;
1509 	size_t total = 0;
1510 	int process_conn_mode = 1; /* If still 1 on EOH, process the connection mode */
1511 	int errflag;
1512 
1513 	if (!count)
1514 		goto end;
1515 
1516 	chn_htx = htx_from_buf(buf);
1517 	if (htx_is_empty(chn_htx))
1518 		goto end;
1519 
1520 	if (!h1_get_buf(h1c, &h1c->obuf)) {
1521 		h1c->flags |= H1C_F_OUT_ALLOC;
1522 		goto end;
1523 	}
1524 
1525 	if (!conn_is_back(h1c->conn)) {
1526 		h1m = &h1s->res;
1527 		errflag = H1S_F_RES_ERROR;
1528 	}
1529 	else {
1530 		h1m = &h1s->req;
1531 		errflag = H1S_F_REQ_ERROR;
1532 	}
1533 
1534 	/* the htx is non-empty thus has at least one block */
1535 	blk = htx_get_head_blk(chn_htx);
1536 
1537 	/* Perform some optimizations to reduce the number of buffer copies.
1538 	 * First, if the mux's buffer is empty and the htx area contains
1539 	 * exactly one data block of the same size as the requested count,
1540 	 * then it's possible to simply swap the caller's buffer with the
1541 	 * mux's output buffer and adjust offsets and length to match the
1542 	 * entire DATA HTX block in the middle. In this case we perform a
1543 	 * true zero-copy operation from end-to-end. This is the situation
1544 	 * that happens all the time with large files. Second, if this is not
1545 	 * possible, but the mux's output buffer is empty, we still have an
1546 	 * opportunity to avoid the copy to the intermediary buffer, by making
1547 	 * the intermediary buffer's area point to the output buffer's area.
1548 	 * In this case we want to skip the HTX header to make sure that copies
1549 	 * remain aligned and that this operation remains possible all the
1550 	 * time. This goes for headers, data blocks and any data extracted from
1551 	 * the HTX blocks.
1552 	 */
1553 	if (!b_data(&h1c->obuf)) {
1554 		if (chn_htx->used == 1 &&
1555 		    htx_get_blk_type(blk) == HTX_BLK_DATA &&
1556 		    htx_get_blk_value(chn_htx, blk).len == count) {
1557 			void *old_area = h1c->obuf.area;
1558 
1559 			h1c->obuf.area = buf->area;
1560 			h1c->obuf.head = sizeof(struct htx) + blk->addr;
1561 			h1c->obuf.data = count;
1562 
1563 			buf->area = old_area;
1564 			buf->data = buf->head = 0;
1565 
1566 			/* The message is chunked. We need to emit the chunk
1567 			 * size. We have at least the size of the struct htx to
1568 			 * write the chunk envelope. It should be enough.
1569 			 */
1570 			if (h1m->flags & H1_MF_CHNK) {
1571 				h1_emit_chunk_size(&h1c->obuf, count);
1572 				h1_emit_chunk_crlf(&h1c->obuf);
1573 			}
1574 
1575 			total += count;
1576 			goto out;
1577 		}
1578 		tmp.area = h1c->obuf.area + h1c->obuf.head;
1579 	}
1580 	else
1581 		tmp.area = trash.area;
1582 
1583 	tmp.data = 0;
1584 	tmp.size = b_room(&h1c->obuf);
1585 	while (count && !(h1s->flags & errflag) && blk) {
1586 		struct htx_sl *sl;
1587 		struct ist n, v;
1588 		enum htx_blk_type type = htx_get_blk_type(blk);
1589 		uint32_t sz = htx_get_blksz(blk);
1590 		uint32_t vlen;
1591 
1592 		vlen = sz;
1593 		if (vlen > count) {
1594 			if (type != HTX_BLK_DATA && type != HTX_BLK_TLR)
1595 				goto full;
1596 			vlen = count;
1597 		}
1598 
1599 		switch (type) {
1600 			case HTX_BLK_UNUSED:
1601 				break;
1602 
1603 			case HTX_BLK_REQ_SL:
1604 				h1m_init_req(h1m);
1605 				h1m->flags |= H1_MF_NO_PHDR;
1606 				sl = htx_get_blk_ptr(chn_htx, blk);
1607 				h1s->meth = sl->info.req.meth;
1608 				h1_parse_req_vsn(h1m, sl);
1609 				if (!htx_reqline_to_h1(sl, &tmp))
1610 					goto full;
1611 				h1m->flags |= H1_MF_XFER_LEN;
1612 				if (sl->flags & HTX_SL_F_BODYLESS)
1613 					h1m->flags |= H1_MF_CLEN;
1614 				h1m->state = H1_MSG_HDR_FIRST;
1615 				break;
1616 
1617 			case HTX_BLK_RES_SL:
1618 				h1m_init_res(h1m);
1619 				h1m->flags |= H1_MF_NO_PHDR;
1620 				sl = htx_get_blk_ptr(chn_htx, blk);
1621 				h1s->status = sl->info.res.status;
1622 				h1_parse_res_vsn(h1m, sl);
1623 				if (!htx_stline_to_h1(sl, &tmp))
1624 					goto full;
1625 				if (sl->flags & HTX_SL_F_XFER_LEN)
1626 					h1m->flags |= H1_MF_XFER_LEN;
1627 				if (sl->info.res.status < 200 &&
1628 				    (sl->info.res.status == 100 || sl->info.res.status >= 102))
1629 					process_conn_mode = 0;
1630 				h1m->state = H1_MSG_HDR_FIRST;
1631 				break;
1632 
1633 			case HTX_BLK_HDR:
1634 				h1m->state = H1_MSG_HDR_NAME;
1635 				n = htx_get_blk_name(chn_htx, blk);
1636 				v = htx_get_blk_value(chn_htx, blk);
1637 
1638 				if (isteqi(n, ist("transfer-encoding")))
1639 					h1_parse_xfer_enc_header(h1m, v);
1640 				else if (isteqi(n, ist("content-length"))) {
1641 					/* Only skip C-L header with invalid value. */
1642 					if (h1_parse_cont_len_header(h1m, &v) < 0)
1643 						goto skip_hdr;
1644 				}
1645 				else if (isteqi(n, ist("connection"))) {
1646 					h1_parse_connection_header(h1m, v);
1647 					h1_process_conn_mode(h1s, h1m, NULL, &v);
1648 					process_conn_mode = 0;
1649 					if (!v.len)
1650 						goto skip_hdr;
1651 				}
1652 
1653 				if (!htx_hdr_to_h1(n, v, &tmp))
1654 					goto full;
1655 			  skip_hdr:
1656 				h1m->state = H1_MSG_HDR_L2_LWS;
1657 				break;
1658 
1659 			case HTX_BLK_PHDR:
1660 				/* not implemented yet */
1661 				h1m->flags |= errflag;
1662 				break;
1663 
1664 			case HTX_BLK_EOH:
1665 				if (h1m->state != H1_MSG_LAST_LF && process_conn_mode) {
1666 					/* There is no "Connection:" header and
1667 					 * it the conn_mode must be
1668 					 * processed. So do it */
1669 					n = ist("connection");
1670 					v = ist("");
1671 					h1_process_conn_mode(h1s, h1m, NULL, &v);
1672 					process_conn_mode = 0;
1673 					if (v.len) {
1674 						if (!htx_hdr_to_h1(n, v, &tmp))
1675 							goto full;
1676 					}
1677 				}
1678 
1679 				if ((h1s->meth != HTTP_METH_CONNECT &&
1680 				     (h1m->flags & (H1_MF_VER_11|H1_MF_RESP|H1_MF_CLEN|H1_MF_CHNK|H1_MF_XFER_LEN)) ==
1681 				     (H1_MF_VER_11|H1_MF_XFER_LEN)) ||
1682 				    (h1s->status >= 200 && h1s->status != 204 && h1s->status != 304 &&
1683 				     h1s->meth != HTTP_METH_HEAD && !(h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) &&
1684 				     (h1m->flags & (H1_MF_VER_11|H1_MF_RESP|H1_MF_CLEN|H1_MF_CHNK|H1_MF_XFER_LEN)) ==
1685 				     (H1_MF_VER_11|H1_MF_RESP|H1_MF_XFER_LEN))) {
1686 					/* chunking needed but header not seen */
1687 					if (!chunk_memcat(&tmp, "transfer-encoding: chunked\r\n", 28))
1688 						goto full;
1689 					h1m->flags |= H1_MF_CHNK;
1690 				}
1691 
1692 				h1m->state = H1_MSG_LAST_LF;
1693 				if (!chunk_memcat(&tmp, "\r\n", 2))
1694 					goto full;
1695 
1696 				if (!(h1m->flags & H1_MF_RESP) && h1s->meth == HTTP_METH_CONNECT) {
1697 					/* a CONNECT request is sent to the server. Switch it to tunnel mode. */
1698 					h1_set_req_tunnel_mode(h1s);
1699 				}
1700 				else if ((h1m->flags & H1_MF_RESP) &&
1701 					 ((h1s->meth == HTTP_METH_CONNECT && h1s->status == 200) || h1s->status == 101)) {
1702 					/* a successfull reply to a CONNECT or a protocol switching is sent
1703 					 * to the client. Switch the response to tunnel mode.
1704 					 */
1705 					h1_set_res_tunnel_mode(h1s);
1706 				}
1707 				else if ((h1m->flags & H1_MF_RESP) &&  h1s->meth == HTTP_METH_HEAD)
1708 					h1m->state = H1_MSG_DONE;
1709 				else
1710 					h1m->state = H1_MSG_DATA;
1711 				break;
1712 
1713 			case HTX_BLK_DATA:
1714 				v = htx_get_blk_value(chn_htx, blk);
1715 				v.len = vlen;
1716 				if (!htx_data_to_h1(v, &tmp, !!(h1m->flags & H1_MF_CHNK)))
1717 					goto full;
1718 				break;
1719 
1720 			case HTX_BLK_EOD:
1721 				/* If the message is not chunked, ignore
1722 				 * trailers. It may happen with H2 messages. */
1723 				if (!(h1m->flags & H1_MF_CHNK))
1724 					break;
1725 
1726 				if (!chunk_memcat(&tmp, "0\r\n", 3))
1727 					goto full;
1728 				h1s->flags |= H1S_F_HAVE_O_EOD;
1729 				h1m->state = H1_MSG_TRAILERS;
1730 				break;
1731 
1732 			case HTX_BLK_TLR:
1733 				/* If the message is not chunked, ignore
1734 				 * trailers. It may happen with H2 messages. */
1735 				if (!(h1m->flags & H1_MF_CHNK))
1736 					break;
1737 
1738 				if (!(h1s->flags & H1S_F_HAVE_O_EOD)) {
1739 					if (!chunk_memcat(&tmp, "0\r\n", 3))
1740 						goto full;
1741 					h1s->flags |= H1S_F_HAVE_O_EOD;
1742 				}
1743 				v = htx_get_blk_value(chn_htx, blk);
1744 				v.len = vlen;
1745 				if (!htx_trailer_to_h1(v, &tmp))
1746 					goto full;
1747 				h1s->flags |= H1S_F_HAVE_O_TLR;
1748 				break;
1749 
1750 			case HTX_BLK_EOM:
1751 				if ((h1m->flags & H1_MF_CHNK) && h1s->meth != HTTP_METH_HEAD) {
1752 					if (!(h1s->flags & H1S_F_HAVE_O_EOD)) {
1753 						if (!chunk_memcat(&tmp, "0\r\n", 3))
1754 							goto full;
1755 						h1s->flags |= H1S_F_HAVE_O_EOD;
1756 					}
1757 					if (!(h1s->flags & H1S_F_HAVE_O_TLR)) {
1758 						if (!chunk_memcat(&tmp, "\r\n", 2))
1759 							goto full;
1760 						h1s->flags |= H1S_F_HAVE_O_TLR;
1761 					}
1762 				}
1763 				h1m->state = H1_MSG_DONE;
1764 				h1_handle_1xx_response(h1s, h1m);
1765 				if (!(h1m->flags & H1_MF_RESP) && h1s->status == 101)
1766 					h1_set_req_tunnel_mode(h1s);
1767 				else if (h1s->h1c->flags & H1C_F_IN_BUSY) {
1768 					h1s->h1c->flags &= ~H1C_F_IN_BUSY;
1769 					tasklet_wakeup(h1s->h1c->wait_event.task);
1770 				}
1771 				break;
1772 
1773 			case HTX_BLK_OOB:
1774 				v = htx_get_blk_value(chn_htx, blk);
1775 				if (!chunk_memcat(&tmp, v.ptr, v.len))
1776 					goto full;
1777 				break;
1778 
1779 			default:
1780 				/* Unexpected error during output processing */
1781 				chn_htx->flags |= HTX_FL_PARSING_ERROR;
1782 				h1s->flags |= errflag;
1783 				h1c->flags |= H1C_F_CS_ERROR;
1784 				break;
1785 		}
1786 		total += vlen;
1787 		count -= vlen;
1788 		if (sz == vlen)
1789 			blk = htx_remove_blk(chn_htx, blk);
1790 		else {
1791 			htx_cut_data_blk(chn_htx, blk, vlen);
1792 			break;
1793 		}
1794 	}
1795 
1796   copy:
1797 	/* when the output buffer is empty, tmp shares the same area so that we
1798 	 * only have to update pointers and lengths.
1799 	 */
1800 	if (tmp.area == h1c->obuf.area + h1c->obuf.head)
1801 		h1c->obuf.data = tmp.data;
1802 	else
1803 		b_putblk(&h1c->obuf, tmp.area, tmp.data);
1804 
1805 	htx_to_buf(chn_htx, buf);
1806  out:
1807 	if (!buf_room_for_htx_data(&h1c->obuf))
1808 		h1c->flags |= H1C_F_OUT_FULL;
1809   end:
1810 	return total;
1811 
1812   full:
1813 	h1c->flags |= H1C_F_OUT_FULL;
1814 	goto copy;
1815 }
1816 
1817 /*********************************************************/
1818 /* functions below are I/O callbacks from the connection */
1819 /*********************************************************/
h1_wake_stream_for_recv(struct h1s * h1s)1820 static void h1_wake_stream_for_recv(struct h1s *h1s)
1821 {
1822 	if (h1s && h1s->recv_wait) {
1823 		h1s->recv_wait->events &= ~SUB_RETRY_RECV;
1824 		tasklet_wakeup(h1s->recv_wait->task);
1825 		h1s->recv_wait = NULL;
1826 	}
1827 }
h1_wake_stream_for_send(struct h1s * h1s)1828 static void h1_wake_stream_for_send(struct h1s *h1s)
1829 {
1830 	if (h1s && h1s->send_wait) {
1831 		h1s->send_wait->events &= ~SUB_RETRY_SEND;
1832 		tasklet_wakeup(h1s->send_wait->task);
1833 		h1s->send_wait = NULL;
1834 	}
1835 }
1836 
1837 /*
1838  * Attempt to read data, and subscribe if none available
1839  */
h1_recv(struct h1c * h1c)1840 static int h1_recv(struct h1c *h1c)
1841 {
1842 	struct connection *conn = h1c->conn;
1843 		struct h1s *h1s = h1c->h1s;
1844 	size_t ret = 0, max;
1845 	int rcvd = 0;
1846 
1847 	if (h1c->wait_event.events & SUB_RETRY_RECV)
1848 		return (b_data(&h1c->ibuf));
1849 
1850 	if (!h1_recv_allowed(h1c)) {
1851 		rcvd = 1;
1852 		goto end;
1853 	}
1854 
1855 	if (!h1_get_buf(h1c, &h1c->ibuf)) {
1856 		h1c->flags |= H1C_F_IN_ALLOC;
1857 		goto end;
1858 	}
1859 
1860 	if (h1s && (h1s->flags & (H1S_F_BUF_FLUSH|H1S_F_SPLICED_DATA))) {
1861 		if (!h1s_data_pending(h1s))
1862 			h1_wake_stream_for_recv(h1s);
1863 		rcvd = 1;
1864 		goto end;
1865 	}
1866 
1867 	/*
1868 	 * If we only have a small amount of data, realign it,
1869 	 * it's probably cheaper than doing 2 recv() calls.
1870 	 */
1871 	if (b_data(&h1c->ibuf) > 0 && b_data(&h1c->ibuf) < 128)
1872 		b_slow_realign(&h1c->ibuf, trash.area, 0);
1873 
1874 	max = buf_room_for_htx_data(&h1c->ibuf);
1875 	if (max) {
1876 		h1c->flags &= ~H1C_F_IN_FULL;
1877 
1878 		b_realign_if_empty(&h1c->ibuf);
1879 		if (!b_data(&h1c->ibuf)) {
1880 			/* try to pre-align the buffer like the rxbufs will be
1881 			 * to optimize memory copies.
1882 			 */
1883 			h1c->ibuf.head  = sizeof(struct htx);
1884 		}
1885 		ret = conn->xprt->rcv_buf(conn, &h1c->ibuf, max, 0);
1886 	}
1887 	if (ret > 0) {
1888 		rcvd = 1;
1889 		if (h1s && h1s->cs) {
1890 			h1s->cs->flags |= (CS_FL_READ_PARTIAL|CS_FL_RCV_MORE);
1891 			if (h1s->csinfo.t_idle == -1)
1892 				h1s->csinfo.t_idle = tv_ms_elapsed(&h1s->csinfo.tv_create, &now) - h1s->csinfo.t_handshake;
1893 		}
1894 	}
1895 
1896 	if (!h1_recv_allowed(h1c) || !buf_room_for_htx_data(&h1c->ibuf)) {
1897 		rcvd = 1;
1898 		goto end;
1899 	}
1900 
1901 	conn->xprt->subscribe(conn, SUB_RETRY_RECV, &h1c->wait_event);
1902 
1903   end:
1904 	if (ret > 0 || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn))
1905 		h1_wake_stream_for_recv(h1s);
1906 
1907 	if (conn_xprt_read0_pending(conn) && h1s && h1s->cs) {
1908 		h1s->cs->flags |= CS_FL_REOS;
1909 		rcvd = 1;
1910 	}
1911 
1912 	if (!b_data(&h1c->ibuf))
1913 		h1_release_buf(h1c, &h1c->ibuf);
1914 	else if (!buf_room_for_htx_data(&h1c->ibuf))
1915 		h1c->flags |= H1C_F_IN_FULL;
1916 	return rcvd;
1917 }
1918 
1919 
1920 /*
1921  * Try to send data if possible
1922  */
h1_send(struct h1c * h1c)1923 static int h1_send(struct h1c *h1c)
1924 {
1925 	struct connection *conn = h1c->conn;
1926 	unsigned int flags = 0;
1927 	size_t ret;
1928 	int sent = 0;
1929 
1930 	if (conn->flags & CO_FL_ERROR)
1931 		return 0;
1932 
1933 	if (h1c->flags & H1C_F_CS_WAIT_CONN) {
1934 		if (!(h1c->wait_event.events & SUB_RETRY_SEND))
1935 			conn->xprt->subscribe(conn, SUB_RETRY_SEND, &h1c->wait_event);
1936 		return 0;
1937 	}
1938 
1939 	if (!b_data(&h1c->obuf))
1940 		goto end;
1941 
1942 	if (h1c->flags & H1C_F_OUT_FULL)
1943 		flags |= CO_SFL_MSG_MORE;
1944 
1945 	ret = conn->xprt->snd_buf(conn, &h1c->obuf, b_data(&h1c->obuf), flags);
1946 	if (ret > 0) {
1947 		h1c->flags &= ~H1C_F_OUT_FULL;
1948 		b_del(&h1c->obuf, ret);
1949 		sent = 1;
1950 	}
1951 
1952 	if (conn->flags & (CO_FL_ERROR|CO_FL_SOCK_WR_SH)) {
1953 		/* error or output closed, nothing to send, clear the buffer to release it */
1954 		b_reset(&h1c->obuf);
1955 	}
1956 
1957   end:
1958 	if (!(h1c->flags & H1C_F_OUT_FULL))
1959 		h1_wake_stream_for_send(h1c->h1s);
1960 
1961 	/* We're done, no more to send */
1962 	if (!b_data(&h1c->obuf)) {
1963 		h1_release_buf(h1c, &h1c->obuf);
1964 		if (h1c->flags & H1C_F_CS_SHUTW_NOW)
1965 			h1_shutw_conn(conn, CS_SHW_NORMAL);
1966 	}
1967 	else if (!(h1c->wait_event.events & SUB_RETRY_SEND))
1968 		conn->xprt->subscribe(conn, SUB_RETRY_SEND, &h1c->wait_event);
1969 
1970 	return sent;
1971 }
1972 
1973 
1974 /* callback called on any event by the connection handler.
1975  * It applies changes and returns zero, or < 0 if it wants immediate
1976  * destruction of the connection.
1977  */
h1_process(struct h1c * h1c)1978 static int h1_process(struct h1c * h1c)
1979 {
1980 	struct connection *conn = h1c->conn;
1981 	struct h1s *h1s = h1c->h1s;
1982 
1983 	if (!conn->ctx)
1984 		return -1;
1985 
1986 	if (h1c->flags & H1C_F_CS_WAIT_CONN) {
1987 		if (!(conn->flags & (CO_FL_CONNECTED|CO_FL_ERROR)))
1988 			goto end;
1989 		h1c->flags &= ~H1C_F_CS_WAIT_CONN;
1990 		h1_wake_stream_for_send(h1s);
1991 	}
1992 
1993 	if (!h1s) {
1994 		if (h1c->flags & (H1C_F_CS_ERROR|H1C_F_CS_SHUTDOWN) ||
1995 		    conn->flags & (CO_FL_ERROR|CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH))
1996 			goto release;
1997 		if (!conn_is_back(conn) && (h1c->flags & H1C_F_CS_IDLE)) {
1998 			if (!h1s_create(h1c, NULL, NULL))
1999 				goto release;
2000 		}
2001 		else
2002 			goto end;
2003 		h1s = h1c->h1s;
2004 	}
2005 
2006 	if (b_data(&h1c->ibuf) && h1s->csinfo.t_idle == -1)
2007 		h1s->csinfo.t_idle = tv_ms_elapsed(&h1s->csinfo.tv_create, &now) - h1s->csinfo.t_handshake;
2008 
2009 	if (!h1s_data_pending(h1s) && h1s && h1s->cs && h1s->cs->data_cb->wake &&
2010 	    (conn_xprt_read0_pending(conn) || h1c->flags & H1C_F_CS_ERROR ||
2011 	    conn->flags & (CO_FL_ERROR | CO_FL_SOCK_WR_SH))) {
2012 		int flags = 0;
2013 
2014 		if (h1c->flags & H1C_F_CS_ERROR || conn->flags & CO_FL_ERROR)
2015 			flags |= CS_FL_ERROR;
2016 		if (conn_xprt_read0_pending(conn))
2017 			flags |= CS_FL_REOS;
2018 		h1s->cs->flags |= flags;
2019 		h1s->cs->data_cb->wake(h1s->cs);
2020 	}
2021   end:
2022 	if (h1c->task) {
2023 		h1c->task->expire = TICK_ETERNITY;
2024 		if (b_data(&h1c->obuf)) {
2025 			h1c->task->expire = tick_add(now_ms, ((h1c->flags & (H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN))
2026 							      ? h1c->shut_timeout
2027 							      : h1c->timeout));
2028 			task_queue(h1c->task);
2029 		}
2030 	}
2031 	return 0;
2032 
2033   release:
2034 	h1_release(conn);
2035 	return -1;
2036 }
2037 
h1_io_cb(struct task * t,void * ctx,unsigned short status)2038 static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
2039 {
2040 	struct h1c *h1c = ctx;
2041 	int ret = 0;
2042 
2043 	if (!(h1c->wait_event.events & SUB_RETRY_SEND))
2044 		ret = h1_send(h1c);
2045 	if (!(h1c->wait_event.events & SUB_RETRY_RECV))
2046 		ret |= h1_recv(h1c);
2047 	if (ret || !h1c->h1s)
2048 		h1_process(h1c);
2049 	return NULL;
2050 }
2051 
h1_reset(struct connection * conn)2052 static void h1_reset(struct connection *conn)
2053 {
2054 	struct h1c *h1c = conn->ctx;
2055 
2056 	/* Reset the flags, and let the mux know we're waiting for a connection */
2057 	h1c->flags = H1C_F_CS_WAIT_CONN;
2058 }
2059 
h1_wake(struct connection * conn)2060 static int h1_wake(struct connection *conn)
2061 {
2062 	struct h1c *h1c = conn->ctx;
2063 	int ret;
2064 
2065 	h1_send(h1c);
2066 	ret = h1_process(h1c);
2067 	if (ret == 0) {
2068 		struct h1s *h1s = h1c->h1s;
2069 
2070 		if (h1s && h1s->cs && h1s->cs->data_cb->wake)
2071 			ret = h1s->cs->data_cb->wake(h1s->cs);
2072 	}
2073 	return ret;
2074 }
2075 
2076 /* Connection timeout management. The principle is that if there's no receipt
2077  * nor sending for a certain amount of time, the connection is closed.
2078  */
h1_timeout_task(struct task * t,void * context,unsigned short state)2079 static struct task *h1_timeout_task(struct task *t, void *context, unsigned short state)
2080 {
2081 	struct h1c *h1c = context;
2082 	int expired = tick_is_expired(t->expire, now_ms);
2083 
2084 	if (!expired && h1c)
2085 		return t;
2086 
2087 	task_delete(t);
2088 	task_free(t);
2089 
2090 	if (!h1c) {
2091 		/* resources were already deleted */
2092 		return NULL;
2093 	}
2094 
2095 	h1c->task = NULL;
2096 	/* If a stream is still attached to the mux, just set an error and wait
2097 	 * for the stream's timeout. Otherwise, release the mux. This is only ok
2098 	 * because same timeouts are used.
2099 	 */
2100 	if (h1c->h1s && h1c->h1s->cs)
2101 		h1c->flags |= H1C_F_CS_ERROR;
2102 	else
2103 		h1_release(h1c->conn);
2104 	return NULL;
2105 }
2106 
2107 /*******************************************/
2108 /* functions below are used by the streams */
2109 /*******************************************/
2110 /*
2111  * Attach a new stream to a connection
2112  * (Used for outgoing connections)
2113  */
h1_attach(struct connection * conn,struct session * sess)2114 static struct conn_stream *h1_attach(struct connection *conn, struct session *sess)
2115 {
2116 	struct h1c *h1c = conn->ctx;
2117 	struct conn_stream *cs = NULL;
2118 	struct h1s *h1s;
2119 
2120 	if (h1c->flags & H1C_F_CS_ERROR)
2121 		goto end;
2122 
2123 	cs = cs_new(h1c->conn);
2124 	if (!cs)
2125 		goto end;
2126 
2127 	h1s = h1s_create(h1c, cs, sess);
2128 	if (h1s == NULL)
2129 		goto end;
2130 
2131 	return cs;
2132   end:
2133 	cs_free(cs);
2134 	return NULL;
2135 }
2136 
2137 /* Retrieves a valid conn_stream from this connection, or returns NULL. For
2138  * this mux, it's easy as we can only store a single conn_stream.
2139  */
h1_get_first_cs(const struct connection * conn)2140 static const struct conn_stream *h1_get_first_cs(const struct connection *conn)
2141 {
2142 	struct h1c *h1c = conn->ctx;
2143 	struct h1s *h1s = h1c->h1s;
2144 
2145 	if (h1s)
2146 		return h1s->cs;
2147 
2148 	return NULL;
2149 }
2150 
h1_destroy(struct connection * conn)2151 static void h1_destroy(struct connection *conn)
2152 {
2153 	struct h1c *h1c = conn->ctx;
2154 
2155 	if (!h1c->h1s)
2156 		h1_release(conn);
2157 }
2158 
2159 /*
2160  * Detach the stream from the connection and possibly release the connection.
2161  */
h1_detach(struct conn_stream * cs)2162 static void h1_detach(struct conn_stream *cs)
2163 {
2164 	struct h1s *h1s = cs->ctx;
2165 	struct h1c *h1c;
2166 	struct session *sess;
2167 	int is_not_first;
2168 
2169 	cs->ctx = NULL;
2170 	if (!h1s)
2171 		return;
2172 
2173 	sess = h1s->sess;
2174 	h1c = h1s->h1c;
2175 	h1s->cs = NULL;
2176 
2177 	is_not_first = h1s->flags & H1S_F_NOT_FIRST;
2178 	h1s_destroy(h1s);
2179 
2180 	if (conn_is_back(h1c->conn) && (h1c->flags & H1C_F_CS_IDLE)) {
2181 		/* If there are any excess server data in the input buffer,
2182 		 * release it and close the connection ASAP (some data may
2183 		 * remain in the output buffer). This happens if a server sends
2184 		 * invalid responses. So in such case, we don't want to reuse
2185 		 * the connection
2186 		 */
2187 		if (b_data(&h1c->ibuf)) {
2188 			h1_release_buf(h1c, &h1c->ibuf);
2189 			h1c->flags = (h1c->flags & ~H1C_F_CS_IDLE) | H1C_F_CS_SHUTW_NOW;
2190 			goto release;
2191 		}
2192 
2193 		/* Never ever allow to reuse a connection from a non-reuse backend */
2194 		if ((h1c->px->options & PR_O_REUSE_MASK) == PR_O_REUSE_NEVR)
2195 			h1c->conn->flags |= CO_FL_PRIVATE;
2196 
2197 		if (!(h1c->conn->owner)) {
2198 			h1c->conn->owner = sess;
2199 			if (!session_add_conn(sess, h1c->conn, h1c->conn->target)) {
2200 				h1c->conn->owner = NULL;
2201 				if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn))
2202 					/* The server doesn't want it, let's kill the connection right away */
2203 					h1c->conn->mux->destroy(h1c->conn);
2204 				else
2205 					tasklet_wakeup(h1c->wait_event.task);
2206 				return;
2207 
2208 			}
2209 		}
2210 		if (h1c->conn->owner == sess) {
2211 			int ret = session_check_idle_conn(sess, h1c->conn);
2212 			if (ret == -1)
2213 				/* The connection got destroyed, let's leave */
2214 				return;
2215 			else if (ret == 1) {
2216 				/* The connection was added to the server list,
2217 				 * wake the task so we can subscribe to events
2218 				 */
2219 				tasklet_wakeup(h1c->wait_event.task);
2220 				return;
2221 			}
2222 		}
2223 		/* we're in keep-alive with an idle connection, monitor it if not already done */
2224 		if (LIST_ISEMPTY(&h1c->conn->list)) {
2225 			struct server *srv = objt_server(h1c->conn->target);
2226 
2227 			if (srv) {
2228 				if (h1c->conn->flags & CO_FL_PRIVATE)
2229 					LIST_ADD(&srv->priv_conns[tid], &h1c->conn->list);
2230 				else if (is_not_first)
2231 					LIST_ADD(&srv->safe_conns[tid], &h1c->conn->list);
2232 				else
2233 					LIST_ADD(&srv->idle_conns[tid], &h1c->conn->list);
2234 			}
2235 		}
2236 	}
2237 
2238   release:
2239 	/* We don't want to close right now unless the connection is in error or shut down for writes */
2240 	if ((h1c->flags & (H1C_F_CS_ERROR|H1C_F_CS_SHUTDOWN)) ||
2241 	    (h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_WR_SH)) ||
2242 	    ((h1c->flags & H1C_F_CS_SHUTW_NOW) && !b_data(&h1c->obuf)) ||
2243 	    !h1c->conn->owner)
2244 		h1_release(h1c->conn);
2245 	else {
2246 		tasklet_wakeup(h1c->wait_event.task);
2247 		if (h1c->task) {
2248 			h1c->task->expire = TICK_ETERNITY;
2249 			if (b_data(&h1c->obuf)) {
2250 				h1c->task->expire = tick_add(now_ms, ((h1c->flags & (H1C_F_CS_SHUTW_NOW|H1C_F_CS_SHUTDOWN))
2251 								      ? h1c->shut_timeout
2252 								      : h1c->timeout));
2253 				task_queue(h1c->task);
2254 			}
2255 		}
2256 	}
2257 }
2258 
2259 
h1_shutr(struct conn_stream * cs,enum cs_shr_mode mode)2260 static void h1_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
2261 {
2262 	struct h1s *h1s = cs->ctx;
2263 	struct h1c *h1c;
2264 
2265 	if (!h1s)
2266 		return;
2267 	h1c = h1s->h1c;
2268 
2269 	if ((cs->flags & CS_FL_KILL_CONN) || (h1c->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)))
2270 		goto do_shutr;
2271 
2272 	if (h1s->flags & H1S_F_WANT_KAL)
2273 		return;
2274 
2275   do_shutr:
2276 	/* NOTE: Be sure to handle abort (cf. h2_shutr) */
2277 	if (cs->flags & CS_FL_SHR)
2278 		return;
2279 	if (conn_xprt_ready(cs->conn) && cs->conn->xprt->shutr)
2280 		cs->conn->xprt->shutr(cs->conn, (mode == CS_SHR_DRAIN));
2281 }
2282 
h1_shutw(struct conn_stream * cs,enum cs_shw_mode mode)2283 static void h1_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
2284 {
2285 	struct h1s *h1s = cs->ctx;
2286 	struct h1c *h1c;
2287 
2288 	if (!h1s)
2289 		return;
2290 	h1c = h1s->h1c;
2291 
2292 	if ((cs->flags & CS_FL_KILL_CONN) || (h1c->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)))
2293 		goto do_shutw;
2294 
2295 	if ((h1s->flags & H1S_F_WANT_KAL) && h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE)
2296 		return;
2297 
2298   do_shutw:
2299 	h1c->flags |= H1C_F_CS_SHUTW_NOW;
2300 	if ((cs->flags & CS_FL_SHW) || b_data(&h1c->obuf))
2301 		return;
2302 
2303 	h1_shutw_conn(cs->conn, mode);
2304 }
2305 
h1_shutw_conn(struct connection * conn,enum cs_shw_mode mode)2306 static void h1_shutw_conn(struct connection *conn, enum cs_shw_mode mode)
2307 {
2308 	struct h1c *h1c = conn->ctx;
2309 
2310 	conn_xprt_shutw(conn);
2311 	conn_sock_shutw(conn, (mode == CS_SHW_NORMAL));
2312 	h1c->flags = (h1c->flags & ~H1C_F_CS_SHUTW_NOW) | H1C_F_CS_SHUTDOWN;
2313 }
2314 
2315 /* Called from the upper layer, to unsubscribe to events */
h1_unsubscribe(struct conn_stream * cs,int event_type,void * param)2316 static int h1_unsubscribe(struct conn_stream *cs, int event_type, void *param)
2317 {
2318 	struct wait_event *sw;
2319 	struct h1s *h1s = cs->ctx;
2320 
2321 	if (!h1s)
2322 		return 0;
2323 
2324 	if (event_type & SUB_RETRY_RECV) {
2325 		sw = param;
2326 		if (h1s->recv_wait == sw) {
2327 			sw->events &= ~SUB_RETRY_RECV;
2328 			h1s->recv_wait = NULL;
2329 		}
2330 	}
2331 	if (event_type & SUB_RETRY_SEND) {
2332 		sw = param;
2333 		if (h1s->send_wait == sw) {
2334 			sw->events &= ~SUB_RETRY_SEND;
2335 			h1s->send_wait = NULL;
2336 		}
2337 	}
2338 	return 0;
2339 }
2340 
2341 /* Called from the upper layer, to subscribe to events, such as being able to send */
h1_subscribe(struct conn_stream * cs,int event_type,void * param)2342 static int h1_subscribe(struct conn_stream *cs, int event_type, void *param)
2343 {
2344 	struct wait_event *sw;
2345 	struct h1s *h1s = cs->ctx;
2346 
2347 	if (!h1s)
2348 		return -1;
2349 
2350 	switch (event_type) {
2351 		case SUB_RETRY_RECV:
2352 			sw = param;
2353 			if (!(sw->events & SUB_RETRY_RECV)) {
2354 				sw->events |= SUB_RETRY_RECV;
2355 				sw->handle = h1s;
2356 				h1s->recv_wait = sw;
2357 			}
2358 			return 0;
2359 		case SUB_RETRY_SEND:
2360 			sw = param;
2361 			if (!(sw->events & SUB_RETRY_SEND)) {
2362 				sw->events |= SUB_RETRY_SEND;
2363 				sw->handle = h1s;
2364 				h1s->send_wait = sw;
2365 			}
2366 			return 0;
2367 		default:
2368 			break;
2369 	}
2370 	return -1;
2371 }
2372 
2373 /* Called from the upper layer, to receive data */
h1_rcv_buf(struct conn_stream * cs,struct buffer * buf,size_t count,int flags)2374 static size_t h1_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
2375 {
2376 	struct h1s *h1s = cs->ctx;
2377 	struct h1c *h1c = h1s->h1c;
2378 	size_t ret = 0;
2379 
2380 	if (!(h1c->flags & H1C_F_IN_ALLOC))
2381 		ret = h1_process_input(h1c, buf, flags);
2382 
2383 	if (flags & CO_RFL_BUF_FLUSH) {
2384 		struct h1m *h1m = (!conn_is_back(cs->conn) ? &h1s->req : &h1s->res);
2385 
2386 		if (h1m->state != H1_MSG_TUNNEL || (h1m->state == H1_MSG_DATA && h1m->curr_len))
2387 			h1s->flags |= H1S_F_BUF_FLUSH;
2388 	}
2389 	else if (ret > 0 || (h1s->flags & H1S_F_SPLICED_DATA)) {
2390 		h1s->flags &= ~H1S_F_SPLICED_DATA;
2391 		if (!(h1c->wait_event.events & SUB_RETRY_RECV))
2392 			tasklet_wakeup(h1c->wait_event.task);
2393 	}
2394 	return ret;
2395 }
2396 
2397 
2398 /* Called from the upper layer, to send data */
h1_snd_buf(struct conn_stream * cs,struct buffer * buf,size_t count,int flags)2399 static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t count, int flags)
2400 {
2401 	struct h1s *h1s = cs->ctx;
2402 	struct h1c *h1c;
2403 	size_t total = 0;
2404 
2405 	if (!h1s)
2406 		return 0;
2407 
2408 	h1c = h1s->h1c;
2409 	if (h1c->flags & H1C_F_CS_WAIT_CONN)
2410 		return 0;
2411 
2412 	while (count) {
2413 		size_t ret = 0;
2414 
2415 		if (!(h1c->flags & (H1C_F_OUT_FULL|H1C_F_OUT_ALLOC)))
2416 			ret = h1_process_output(h1c, buf, count);
2417 		if (!ret)
2418 			break;
2419 		total += ret;
2420 		count -= ret;
2421 		if ((h1c->wait_event.events & SUB_RETRY_SEND) || !h1_send(h1c))
2422 			break;
2423 	}
2424 
2425 	return total;
2426 }
2427 
2428 #if defined(CONFIG_HAP_LINUX_SPLICE)
2429 /* Send and get, using splicing */
h1_rcv_pipe(struct conn_stream * cs,struct pipe * pipe,unsigned int count)2430 static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int count)
2431 {
2432 	struct h1s *h1s = cs->ctx;
2433 	struct h1m *h1m = (!conn_is_back(cs->conn) ? &h1s->req : &h1s->res);
2434 	int ret = 0;
2435 
2436 	if ((h1m->flags & H1_MF_CHNK) || (h1m->state != H1_MSG_DATA && h1m->state != H1_MSG_TUNNEL)) {
2437 		h1s->flags &= ~(H1S_F_BUF_FLUSH|H1S_F_SPLICED_DATA);
2438 		if (!(h1s->h1c->wait_event.events & SUB_RETRY_RECV))
2439 			cs->conn->xprt->subscribe(cs->conn, SUB_RETRY_RECV, &h1s->h1c->wait_event);
2440 		goto end;
2441 	}
2442 
2443 	if (h1s_data_pending(h1s)) {
2444 		h1s->flags |= H1S_F_BUF_FLUSH;
2445 		goto end;
2446 	}
2447 
2448 	h1s->flags &= ~H1S_F_BUF_FLUSH;
2449 	h1s->flags |= H1S_F_SPLICED_DATA;
2450 	if (h1m->state == H1_MSG_DATA && count > h1m->curr_len)
2451 		count = h1m->curr_len;
2452 	ret = cs->conn->xprt->rcv_pipe(cs->conn, pipe, count);
2453 	if (h1m->state == H1_MSG_DATA && ret >= 0) {
2454 		h1m->curr_len -= ret;
2455 		if (!h1m->curr_len)
2456 			h1s->flags &= ~(H1S_F_BUF_FLUSH|H1S_F_SPLICED_DATA);
2457 	}
2458 
2459   end:
2460 	if (conn_xprt_read0_pending(cs->conn)) {
2461 		cs->flags |= CS_FL_REOS;
2462 		h1s->flags &= ~(H1S_F_BUF_FLUSH|H1S_F_SPLICED_DATA);
2463 	}
2464 	return ret;
2465 }
2466 
h1_snd_pipe(struct conn_stream * cs,struct pipe * pipe)2467 static int h1_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
2468 {
2469 	struct h1s *h1s = cs->ctx;
2470 	int ret = 0;
2471 
2472 	if (b_data(&h1s->h1c->obuf))
2473 		goto end;
2474 
2475 	ret = cs->conn->xprt->snd_pipe(cs->conn, pipe);
2476   end:
2477 	if (pipe->data) {
2478 		if (!(h1s->h1c->wait_event.events & SUB_RETRY_SEND))
2479 			cs->conn->xprt->subscribe(cs->conn, SUB_RETRY_SEND, &h1s->h1c->wait_event);
2480 	}
2481 	return ret;
2482 }
2483 #endif
2484 
h1_ctl(struct connection * conn,enum mux_ctl_type mux_ctl,void * output)2485 static int h1_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
2486 {
2487 	int ret = 0;
2488 	switch (mux_ctl) {
2489 	case MUX_STATUS:
2490 		if (conn->flags & CO_FL_CONNECTED)
2491 			ret |= MUX_STATUS_READY;
2492 		return ret;
2493 	default:
2494 		return -1;
2495 	}
2496 }
2497 
2498 /* for debugging with CLI's "show fd" command */
h1_show_fd(struct buffer * msg,struct connection * conn)2499 static void h1_show_fd(struct buffer *msg, struct connection *conn)
2500 {
2501 	struct h1c *h1c = conn->ctx;
2502 	struct h1s *h1s = h1c->h1s;
2503 
2504 	chunk_appendf(msg, " h1c.flg=0x%x .sub=%d .ibuf=%u@%p+%u/%u .obuf=%u@%p+%u/%u",
2505 		      h1c->flags,  h1c->wait_event.events,
2506 		      (unsigned int)b_data(&h1c->ibuf), b_orig(&h1c->ibuf),
2507 		      (unsigned int)b_head_ofs(&h1c->ibuf), (unsigned int)b_size(&h1c->ibuf),
2508 		       (unsigned int)b_data(&h1c->obuf), b_orig(&h1c->obuf),
2509 		      (unsigned int)b_head_ofs(&h1c->obuf), (unsigned int)b_size(&h1c->obuf));
2510 
2511 	if (h1s) {
2512 		char *method;
2513 
2514 		if (h1s->meth < HTTP_METH_OTHER)
2515 			method = http_known_methods[h1s->meth].ptr;
2516 		else
2517 			method = "UNKNOWN";
2518 		chunk_appendf(msg, " h1s=%p h1s.flg=0x%x .req.state=%s .res.state=%s"
2519 		    " .meth=%s status=%d",
2520 			      h1s, h1s->flags,
2521 			      h1m_state_str(h1s->req.state),
2522 			      h1m_state_str(h1s->res.state), method, h1s->status);
2523 		if (h1s->cs)
2524 			chunk_appendf(msg, " .cs.flg=0x%08x .cs.data=%p",
2525 				      h1s->cs->flags, h1s->cs->data);
2526 	}
2527 }
2528 
2529 /****************************************/
2530 /* MUX initialization and instanciation */
2531 /****************************************/
2532 
2533 /* The mux operations */
2534 const struct mux_ops mux_h1_ops = {
2535 	.init        = h1_init,
2536 	.wake        = h1_wake,
2537 	.attach      = h1_attach,
2538 	.get_first_cs = h1_get_first_cs,
2539 	.get_cs_info = h1_get_cs_info,
2540 	.detach      = h1_detach,
2541 	.destroy     = h1_destroy,
2542 	.avail_streams = h1_avail_streams,
2543 	.used_streams = h1_used_streams,
2544 	.rcv_buf     = h1_rcv_buf,
2545 	.snd_buf     = h1_snd_buf,
2546 #if defined(CONFIG_HAP_LINUX_SPLICE)
2547 	.rcv_pipe    = h1_rcv_pipe,
2548 	.snd_pipe    = h1_snd_pipe,
2549 #endif
2550 	.subscribe   = h1_subscribe,
2551 	.unsubscribe = h1_unsubscribe,
2552 	.shutr       = h1_shutr,
2553 	.shutw       = h1_shutw,
2554 	.show_fd     = h1_show_fd,
2555 	.reset       = h1_reset,
2556 	.ctl         = h1_ctl,
2557 	.flags       = MX_FL_NONE,
2558 	.name        = "h1",
2559 };
2560 
2561 
2562 /* this mux registers default HTX proto */
2563 static struct mux_proto_list mux_proto_htx =
2564 { .token = IST(""), .mode = PROTO_MODE_HTX, .side = PROTO_SIDE_BOTH, .mux = &mux_h1_ops };
2565 
2566 INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_htx);
2567 
2568 /*
2569  * Local variables:
2570  *  c-indent-level: 8
2571  *  c-basic-offset: 8
2572  * End:
2573  */
2574