1 /*
2 * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Tatsuhiro Tsujikawa
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22 #include <limits.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <sys/uio.h>
26 #include "h2o.h"
27 #include "h2o/socket.h"
28
29 #ifndef IOV_MAX
30 #define IOV_MAX UIO_MAXIOV
31 #endif
32
33 #define INITIAL_INBUFSZ 8192
34
35 struct st_deferred_request_action_t {
36 h2o_timer_t timeout;
37 h2o_req_t *req;
38 };
39
40 struct st_reprocess_request_deferred_t {
41 struct st_deferred_request_action_t super;
42 h2o_iovec_t method;
43 const h2o_url_scheme_t *scheme;
44 h2o_iovec_t authority;
45 h2o_iovec_t path;
46 h2o_req_overrides_t *overrides;
47 int is_delegated;
48 };
49
50 struct st_send_error_deferred_t {
51 h2o_req_t *req;
52 int status;
53 const char *reason;
54 const char *body;
55 int flags;
56 h2o_timer_t _timeout;
57 };
58
on_deferred_action_dispose(void * _action)59 static void on_deferred_action_dispose(void *_action)
60 {
61 struct st_deferred_request_action_t *action = _action;
62 h2o_timer_unlink(&action->timeout);
63 }
64
create_deferred_action(h2o_req_t * req,size_t sz,h2o_timer_cb cb)65 static struct st_deferred_request_action_t *create_deferred_action(h2o_req_t *req, size_t sz, h2o_timer_cb cb)
66 {
67 struct st_deferred_request_action_t *action = h2o_mem_alloc_shared(&req->pool, sz, on_deferred_action_dispose);
68 action->req = req;
69 h2o_timer_init(&action->timeout, cb);
70 h2o_timer_link(req->conn->ctx->loop, 0, &action->timeout);
71 return action;
72 }
73
find_hostconf(h2o_hostconf_t ** hostconfs,h2o_iovec_t authority,uint16_t default_port,h2o_iovec_t * wildcard_match)74 static h2o_hostconf_t *find_hostconf(h2o_hostconf_t **hostconfs, h2o_iovec_t authority, uint16_t default_port,
75 h2o_iovec_t *wildcard_match)
76 {
77 h2o_iovec_t hostname;
78 uint16_t port;
79 char *hostname_lc;
80
81 /* safe-guard for alloca */
82 if (authority.len >= 65536)
83 return NULL;
84
85 /* extract the specified hostname and port */
86 if (h2o_url_parse_hostport(authority.base, authority.len, &hostname, &port) == NULL)
87 return NULL;
88 if (port == 65535)
89 port = default_port;
90
91 /* convert supplied hostname to lower-case */
92 hostname_lc = alloca(hostname.len);
93 h2o_strcopytolower(hostname_lc, hostname.base, hostname.len);
94
95 do {
96 h2o_hostconf_t *hostconf = *hostconfs;
97 if (hostconf->authority.port == port || (hostconf->authority.port == 65535 && port == default_port)) {
98 if (hostconf->authority.host.base[0] == '*') {
99 /* matching against "*.foo.bar" */
100 size_t cmplen = hostconf->authority.host.len - 1;
101 if (cmplen < hostname.len &&
102 memcmp(hostconf->authority.host.base + 1, hostname_lc + hostname.len - cmplen, cmplen) == 0) {
103 *wildcard_match = h2o_iovec_init(hostname.base, hostname.len - cmplen);
104 return hostconf;
105 }
106 } else {
107 /* exact match */
108 if (h2o_memis(hostconf->authority.host.base, hostconf->authority.host.len, hostname_lc, hostname.len))
109 return hostconf;
110 }
111 }
112 } while (*++hostconfs != NULL);
113
114 return NULL;
115 }
116
find_default_hostconf(h2o_hostconf_t ** hostconfs)117 static h2o_hostconf_t *find_default_hostconf(h2o_hostconf_t **hostconfs)
118 {
119 h2o_hostconf_t *fallback_host = hostconfs[0]->global->fallback_host;
120
121 do {
122 h2o_hostconf_t *hostconf = *hostconfs;
123 if (!hostconf->strict_match)
124 return hostconf;
125 } while (*++hostconfs != NULL);
126
127 return fallback_host;
128 }
129
h2o_req_setup(h2o_req_t * req)130 h2o_hostconf_t *h2o_req_setup(h2o_req_t *req)
131 {
132 h2o_context_t *ctx = req->conn->ctx;
133 h2o_hostconf_t *hostconf;
134
135 req->processed_at = h2o_get_timestamp(ctx, &req->pool);
136
137 /* find the host context */
138 if (req->input.authority.base != NULL) {
139 if (req->conn->hosts[1] == NULL ||
140 (hostconf = find_hostconf(req->conn->hosts, req->input.authority, req->input.scheme->default_port,
141 &req->authority_wildcard_match)) == NULL)
142 hostconf = find_default_hostconf(req->conn->hosts);
143 } else {
144 /* set the authority name to the default one */
145 hostconf = find_default_hostconf(req->conn->hosts);
146 req->input.authority = hostconf->authority.hostport;
147 }
148
149 req->scheme = req->input.scheme;
150 req->method = req->input.method;
151 req->authority = req->input.authority;
152 req->path = req->input.path;
153 req->path_normalized =
154 h2o_url_normalize_path(&req->pool, req->input.path.base, req->input.path.len, &req->query_at, &req->norm_indexes);
155 req->input.query_at = req->query_at; /* we can do this since input.path == path */
156
157 return hostconf;
158 }
159
call_handlers(h2o_req_t * req,h2o_handler_t ** handler)160 static void call_handlers(h2o_req_t *req, h2o_handler_t **handler)
161 {
162 h2o_handler_t **end = req->pathconf->handlers.entries + req->pathconf->handlers.size;
163
164 for (; handler != end; ++handler) {
165 req->handler = *handler;
166 if ((*handler)->on_req(*handler, req) == 0)
167 return;
168 }
169
170 h2o_send_error_404(req, "File Not Found", "not found", 0);
171 }
172
setup_pathconf(h2o_req_t * req,h2o_hostconf_t * hostconf)173 static void setup_pathconf(h2o_req_t *req, h2o_hostconf_t *hostconf)
174 {
175 h2o_pathconf_t *selected_pathconf = &hostconf->fallback_path;
176 size_t i;
177
178 /* setup pathconf, or redirect to "path/" */
179 for (i = 0; i != hostconf->paths.size; ++i) {
180 h2o_pathconf_t *candidate = hostconf->paths.entries[i];
181 if (req->path_normalized.len >= candidate->path.len &&
182 memcmp(req->path_normalized.base, candidate->path.base, candidate->path.len) == 0 &&
183 (candidate->path.base[candidate->path.len - 1] == '/' || req->path_normalized.len == candidate->path.len ||
184 req->path_normalized.base[candidate->path.len] == '/')) {
185 selected_pathconf = candidate;
186 break;
187 }
188 }
189 h2o_req_bind_conf(req, hostconf, selected_pathconf);
190 }
191
deferred_proceed_cb(h2o_timer_t * entry)192 static void deferred_proceed_cb(h2o_timer_t *entry)
193 {
194 h2o_req_t *req = H2O_STRUCT_FROM_MEMBER(h2o_req_t, _timeout_entry, entry);
195 h2o_proceed_response(req);
196 }
197
close_generator_and_filters(h2o_req_t * req)198 static void close_generator_and_filters(h2o_req_t *req)
199 {
200 /* close the generator if it is still open */
201 if (req->_generator != NULL) {
202 /* close generator */
203 if (req->_generator->stop != NULL)
204 req->_generator->stop(req->_generator, req);
205 req->_generator = NULL;
206 }
207 /* close the ostreams still open */
208 while (req->_ostr_top->next != NULL) {
209 if (req->_ostr_top->stop != NULL)
210 req->_ostr_top->stop(req->_ostr_top, req);
211 req->_ostr_top = req->_ostr_top->next;
212 }
213 }
214
reset_response(h2o_req_t * req)215 static void reset_response(h2o_req_t *req)
216 {
217 req->res = (h2o_res_t){0, NULL, SIZE_MAX};
218 req->res.reason = "OK";
219 req->_next_filter_index = 0;
220 req->bytes_sent = 0;
221 }
222
retain_original_response(h2o_req_t * req)223 static void retain_original_response(h2o_req_t *req)
224 {
225 if (req->res.original.status != 0)
226 return;
227
228 req->res.original.status = req->res.status;
229 h2o_vector_reserve(&req->pool, &req->res.original.headers, req->res.headers.size);
230 h2o_memcpy(req->res.original.headers.entries, req->res.headers.entries,
231 sizeof(req->res.headers.entries[0]) * req->res.headers.size);
232 req->res.original.headers.size = req->res.headers.size;
233 }
234
h2o_write_error_log(h2o_iovec_t prefix,h2o_iovec_t msg)235 void h2o_write_error_log(h2o_iovec_t prefix, h2o_iovec_t msg)
236 {
237 /* use writev(2) to emit error atomically */
238 struct iovec vecs[] = {{prefix.base, prefix.len}, {msg.base, msg.len}, {"\n", 1}};
239 H2O_BUILD_ASSERT(sizeof(vecs) / sizeof(vecs[0]) <= IOV_MAX);
240 writev(2, vecs, sizeof(vecs) / sizeof(vecs[0]));
241 }
242
on_default_error_callback(void * data,h2o_iovec_t prefix,h2o_iovec_t msg)243 static void on_default_error_callback(void *data, h2o_iovec_t prefix, h2o_iovec_t msg)
244 {
245 h2o_req_t *req = (void *)data;
246 if (req->error_logs == NULL)
247 h2o_buffer_init(&req->error_logs, &h2o_socket_buffer_prototype);
248 h2o_buffer_append(&req->error_logs, prefix.base, prefix.len);
249 h2o_buffer_append(&req->error_logs, msg.base, msg.len);
250
251 if (req->pathconf->error_log.emit_request_errors) {
252 h2o_write_error_log(prefix, msg);
253 }
254 }
255
h2o_init_request(h2o_req_t * req,h2o_conn_t * conn,h2o_req_t * src)256 void h2o_init_request(h2o_req_t *req, h2o_conn_t *conn, h2o_req_t *src)
257 {
258 /* clear all memory (expect memory pool, since it is large) */
259 memset(req, 0, offsetof(h2o_req_t, pool));
260
261 /* init memory pool (before others, since it may be used) */
262 h2o_mem_init_pool(&req->pool);
263
264 /* init properties that should be initialized to non-zero */
265 req->conn = conn;
266 req->_timeout_entry.cb = deferred_proceed_cb;
267 req->res.reason = "OK"; /* default to "OK" regardless of the status value, it's not important after all (never sent in HTTP2) */
268 req->res.content_length = SIZE_MAX;
269 req->preferred_chunk_size = SIZE_MAX;
270 req->content_length = SIZE_MAX;
271 req->remaining_delegations = conn == NULL ? 0 : conn->ctx->globalconf->max_delegations;
272 req->remaining_reprocesses = 5;
273 req->error_log_delegate.cb = on_default_error_callback;
274 req->error_log_delegate.data = req;
275
276 if (src != NULL) {
277 size_t i;
278 #define COPY(buf) \
279 do { \
280 req->buf.base = h2o_mem_alloc_pool(&req->pool, char, src->buf.len); \
281 memcpy(req->buf.base, src->buf.base, src->buf.len); \
282 req->buf.len = src->buf.len; \
283 } while (0)
284 COPY(input.authority);
285 COPY(input.method);
286 COPY(input.path);
287 req->input.scheme = src->input.scheme;
288 req->version = src->version;
289 req->entity = src->entity;
290 req->http1_is_persistent = src->http1_is_persistent;
291 req->timestamps = src->timestamps;
292 if (src->upgrade.base != NULL) {
293 COPY(upgrade);
294 } else {
295 req->upgrade.base = NULL;
296 req->upgrade.len = 0;
297 }
298 #undef COPY
299 h2o_vector_reserve(&req->pool, &req->headers, src->headers.size);
300 req->headers.size = src->headers.size;
301 for (i = 0; i != src->headers.size; ++i) {
302 h2o_header_t *dst_header = req->headers.entries + i, *src_header = src->headers.entries + i;
303 if (h2o_iovec_is_token(src_header->name)) {
304 dst_header->name = src_header->name;
305 } else {
306 dst_header->name = h2o_mem_alloc_pool(&req->pool, *dst_header->name, 1);
307 *dst_header->name = h2o_strdup(&req->pool, src_header->name->base, src_header->name->len);
308 }
309 dst_header->value = h2o_strdup(&req->pool, src_header->value.base, src_header->value.len);
310 dst_header->flags = src_header->flags;
311 if (!src_header->orig_name)
312 dst_header->orig_name = NULL;
313 else
314 dst_header->orig_name = h2o_strdup(&req->pool, src_header->orig_name, src_header->name->len).base;
315 }
316 if (src->env.size != 0) {
317 h2o_vector_reserve(&req->pool, &req->env, src->env.size);
318 req->env.size = src->env.size;
319 for (i = 0; i != req->env.size; ++i)
320 req->env.entries[i] = h2o_strdup(&req->pool, src->env.entries[i].base, src->env.entries[i].len);
321 }
322 }
323 }
324
h2o_dispose_request(h2o_req_t * req)325 void h2o_dispose_request(h2o_req_t *req)
326 {
327 close_generator_and_filters(req);
328
329 h2o_timer_unlink(&req->_timeout_entry);
330
331 if (req->pathconf != NULL) {
332 h2o_logger_t **logger = req->loggers, **end = logger + req->num_loggers;
333 for (; logger != end; ++logger) {
334 (*logger)->log_access((*logger), req);
335 }
336 }
337
338 if (req->error_logs != NULL)
339 h2o_buffer_dispose(&req->error_logs);
340
341 h2o_mem_clear_pool(&req->pool);
342 }
343
h2o_req_validate_pseudo_headers(h2o_req_t * req)344 int h2o_req_validate_pseudo_headers(h2o_req_t *req)
345 {
346 if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("CONNECT-UDP"))) {
347 if (req->input.scheme != &H2O_URL_SCHEME_MASQUE)
348 return 0;
349 if (!h2o_memis(req->input.path.base, req->input.path.len, H2O_STRLIT("/")))
350 return 0;
351 } else {
352 if (req->input.scheme == &H2O_URL_SCHEME_MASQUE)
353 return 0;
354 }
355
356 return 1;
357 }
358
h2o_get_first_handler(h2o_req_t * req)359 h2o_handler_t *h2o_get_first_handler(h2o_req_t *req)
360 {
361 h2o_hostconf_t *hostconf = h2o_req_setup(req);
362 setup_pathconf(req, hostconf);
363 return req->pathconf->handlers.size != 0 ? req->pathconf->handlers.entries[0] : NULL;
364 }
365
h2o_process_request(h2o_req_t * req)366 void h2o_process_request(h2o_req_t *req)
367 {
368 assert(!req->process_called);
369 req->process_called = 1;
370
371 if (req->pathconf == NULL) {
372 h2o_hostconf_t *hostconf = h2o_req_setup(req);
373 setup_pathconf(req, hostconf);
374 }
375 call_handlers(req, req->pathconf->handlers.entries);
376 }
377
h2o_delegate_request(h2o_req_t * req)378 void h2o_delegate_request(h2o_req_t *req)
379 {
380 h2o_handler_t **handler = req->pathconf->handlers.entries, **end = handler + req->pathconf->handlers.size;
381 for (;; ++handler) {
382 assert(handler != end);
383 if (*handler == req->handler)
384 break;
385 }
386 ++handler;
387 call_handlers(req, handler);
388 }
389
on_delegate_request_cb(h2o_timer_t * entry)390 static void on_delegate_request_cb(h2o_timer_t *entry)
391 {
392 struct st_deferred_request_action_t *args = H2O_STRUCT_FROM_MEMBER(struct st_deferred_request_action_t, timeout, entry);
393 h2o_delegate_request(args->req);
394 }
395
h2o_delegate_request_deferred(h2o_req_t * req)396 void h2o_delegate_request_deferred(h2o_req_t *req)
397 {
398 create_deferred_action(req, sizeof(struct st_deferred_request_action_t), on_delegate_request_cb);
399 }
400
process_resolved_request(h2o_req_t * req,h2o_hostconf_t ** hosts)401 static void process_resolved_request(h2o_req_t *req, h2o_hostconf_t **hosts)
402 {
403 h2o_hostconf_t *hostconf;
404 if (req->overrides == NULL &&
405 (hostconf = find_hostconf(hosts, req->authority, req->scheme->default_port, &req->authority_wildcard_match)) != NULL) {
406 setup_pathconf(req, hostconf);
407 call_handlers(req, req->pathconf->handlers.entries);
408 return;
409 }
410
411 /* uses the current pathconf, in other words, proxy uses the previous pathconf for building filters */
412 h2o__proxy_process_request(req);
413 }
414
h2o_reprocess_request(h2o_req_t * req,h2o_iovec_t method,const h2o_url_scheme_t * scheme,h2o_iovec_t authority,h2o_iovec_t path,h2o_req_overrides_t * overrides,int is_delegated)415 void h2o_reprocess_request(h2o_req_t *req, h2o_iovec_t method, const h2o_url_scheme_t *scheme, h2o_iovec_t authority,
416 h2o_iovec_t path, h2o_req_overrides_t *overrides, int is_delegated)
417 {
418 retain_original_response(req);
419
420 /* close generators and filters that are already running */
421 close_generator_and_filters(req);
422
423 /* setup the request/response parameters */
424 req->handler = NULL;
425 req->method = method;
426 req->scheme = scheme;
427 req->authority = authority;
428 req->path = path;
429 req->path_normalized = h2o_url_normalize_path(&req->pool, req->path.base, req->path.len, &req->query_at, &req->norm_indexes);
430 req->authority_wildcard_match = h2o_iovec_init(NULL, 0);
431 req->overrides = overrides;
432 req->res_is_delegated |= is_delegated;
433 req->reprocess_if_too_early = 0;
434 reset_response(req);
435
436 /* check the delegation (or reprocess) counter */
437 if (req->res_is_delegated) {
438 if (req->remaining_delegations == 0) {
439 /* TODO log */
440 h2o_send_error_502(req, "Gateway Error", "too many internal delegations", 0);
441 return;
442 }
443 --req->remaining_delegations;
444 } else {
445 if (req->remaining_reprocesses == 0) {
446 /* TODO log */
447 h2o_send_error_502(req, "Gateway Error", "too many internal reprocesses", 0);
448 return;
449 }
450 --req->remaining_reprocesses;
451 }
452
453 process_resolved_request(req, req->conn->ctx->globalconf->hosts);
454 }
455
on_reprocess_request_cb(h2o_timer_t * entry)456 static void on_reprocess_request_cb(h2o_timer_t *entry)
457 {
458 struct st_reprocess_request_deferred_t *args =
459 H2O_STRUCT_FROM_MEMBER(struct st_reprocess_request_deferred_t, super.timeout, entry);
460 h2o_reprocess_request(args->super.req, args->method, args->scheme, args->authority, args->path, args->overrides,
461 args->is_delegated);
462 }
463
h2o_reprocess_request_deferred(h2o_req_t * req,h2o_iovec_t method,const h2o_url_scheme_t * scheme,h2o_iovec_t authority,h2o_iovec_t path,h2o_req_overrides_t * overrides,int is_delegated)464 void h2o_reprocess_request_deferred(h2o_req_t *req, h2o_iovec_t method, const h2o_url_scheme_t *scheme, h2o_iovec_t authority,
465 h2o_iovec_t path, h2o_req_overrides_t *overrides, int is_delegated)
466 {
467 struct st_reprocess_request_deferred_t *args =
468 (struct st_reprocess_request_deferred_t *)create_deferred_action(req, sizeof(*args), on_reprocess_request_cb);
469 args->method = method;
470 args->scheme = scheme;
471 args->authority = authority;
472 args->path = path;
473 args->overrides = overrides;
474 args->is_delegated = is_delegated;
475 }
476
h2o_replay_request(h2o_req_t * req)477 void h2o_replay_request(h2o_req_t *req)
478 {
479 close_generator_and_filters(req);
480 reset_response(req);
481
482 if (req->handler != NULL) {
483 h2o_handler_t **handler = req->pathconf->handlers.entries, **end = handler + req->pathconf->handlers.size;
484 for (;; ++handler) {
485 assert(handler != end);
486 if (*handler == req->handler)
487 break;
488 }
489 call_handlers(req, handler);
490 } else {
491 process_resolved_request(req, req->conn->hosts);
492 }
493 }
494
on_replay_request_cb(h2o_timer_t * entry)495 static void on_replay_request_cb(h2o_timer_t *entry)
496 {
497 struct st_deferred_request_action_t *args = H2O_STRUCT_FROM_MEMBER(struct st_deferred_request_action_t, timeout, entry);
498 h2o_replay_request(args->req);
499 }
500
h2o_replay_request_deferred(h2o_req_t * req)501 void h2o_replay_request_deferred(h2o_req_t *req)
502 {
503 create_deferred_action(req, sizeof(struct st_deferred_request_action_t), on_replay_request_cb);
504 }
505
h2o_start_response(h2o_req_t * req,h2o_generator_t * generator)506 void h2o_start_response(h2o_req_t *req, h2o_generator_t *generator)
507 {
508 retain_original_response(req);
509
510 /* set generator */
511 assert(req->_generator == NULL);
512 req->_generator = generator;
513
514 if (req->is_tunnel_req && (req->res.status == 101 || req->res.status == 200)) {
515 /* a tunnel has been established; forward response as is */
516 } else {
517 /* setup response filters */
518 if (req->prefilters != NULL) {
519 req->prefilters->on_setup_ostream(req->prefilters, req, &req->_ostr_top);
520 } else {
521 h2o_setup_next_ostream(req, &req->_ostr_top);
522 }
523 }
524 }
525
h2o_sendvec_init_raw(h2o_sendvec_t * vec,const void * base,size_t len)526 void h2o_sendvec_init_raw(h2o_sendvec_t *vec, const void *base, size_t len)
527 {
528 static const h2o_sendvec_callbacks_t callbacks = {h2o_sendvec_flatten_raw};
529 vec->callbacks = &callbacks;
530 vec->raw = (char *)base;
531 vec->len = len;
532 }
533
sendvec_immutable_update_refcnt(h2o_sendvec_t * vec,h2o_req_t * req,int is_incr)534 static void sendvec_immutable_update_refcnt(h2o_sendvec_t *vec, h2o_req_t *req, int is_incr)
535 {
536 /* noop */
537 }
538
h2o_sendvec_init_immutable(h2o_sendvec_t * vec,const void * base,size_t len)539 void h2o_sendvec_init_immutable(h2o_sendvec_t *vec, const void *base, size_t len)
540 {
541 static const h2o_sendvec_callbacks_t callbacks = {h2o_sendvec_flatten_raw, sendvec_immutable_update_refcnt};
542 vec->callbacks = &callbacks;
543 vec->raw = (char *)base;
544 vec->len = len;
545 }
546
h2o_sendvec_flatten_raw(h2o_sendvec_t * src,h2o_req_t * req,h2o_iovec_t dst,size_t off)547 int h2o_sendvec_flatten_raw(h2o_sendvec_t *src, h2o_req_t *req, h2o_iovec_t dst, size_t off)
548 {
549 assert(off + dst.len <= src->len);
550 memcpy(dst.base, src->raw + off, dst.len);
551 return 1;
552 }
553
do_sendvec(h2o_req_t * req,h2o_sendvec_t * bufs,size_t bufcnt,h2o_send_state_t state)554 static void do_sendvec(h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
555 {
556 assert(req->_generator != NULL);
557
558 if (!h2o_send_state_is_in_progress(state))
559 req->_generator = NULL;
560
561 req->_ostr_top->do_send(req->_ostr_top, req, bufs, bufcnt, state);
562 }
563
h2o_send(h2o_req_t * req,h2o_iovec_t * bufs,size_t bufcnt,h2o_send_state_t state)564 void h2o_send(h2o_req_t *req, h2o_iovec_t *bufs, size_t bufcnt, h2o_send_state_t state)
565 {
566 h2o_sendvec_t *vecs = alloca(sizeof(*vecs) * bufcnt);
567 size_t i;
568
569 for (i = 0; i != bufcnt; ++i)
570 h2o_sendvec_init_raw(vecs + i, bufs[i].base, bufs[i].len);
571
572 do_sendvec(req, vecs, bufcnt, state);
573 }
574
h2o_sendvec(h2o_req_t * req,h2o_sendvec_t * bufs,size_t bufcnt,h2o_send_state_t state)575 void h2o_sendvec(h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
576 {
577 assert(bufcnt == 0 || (bufs[0].callbacks->flatten == &h2o_sendvec_flatten_raw || bufcnt == 1));
578 do_sendvec(req, bufs, bufcnt, state);
579 }
580
h2o_add_prefilter(h2o_req_t * req,size_t alignment,size_t sz)581 h2o_req_prefilter_t *h2o_add_prefilter(h2o_req_t *req, size_t alignment, size_t sz)
582 {
583 h2o_req_prefilter_t *prefilter = h2o_mem_alloc_pool_aligned(&req->pool, alignment, sz);
584 prefilter->next = req->prefilters;
585 req->prefilters = prefilter;
586 return prefilter;
587 }
588
h2o_add_ostream(h2o_req_t * req,size_t alignment,size_t sz,h2o_ostream_t ** slot)589 h2o_ostream_t *h2o_add_ostream(h2o_req_t *req, size_t alignment, size_t sz, h2o_ostream_t **slot)
590 {
591 h2o_ostream_t *ostr = h2o_mem_alloc_pool_aligned(&req->pool, alignment, sz);
592 ostr->next = *slot;
593 ostr->do_send = NULL;
594 ostr->stop = NULL;
595 ostr->send_informational = NULL;
596
597 *slot = ostr;
598
599 return ostr;
600 }
601
apply_env(h2o_req_t * req,h2o_envconf_t * env)602 static void apply_env(h2o_req_t *req, h2o_envconf_t *env)
603 {
604 size_t i;
605
606 if (env->parent != NULL)
607 apply_env(req, env->parent);
608 for (i = 0; i != env->unsets.size; ++i)
609 h2o_req_unsetenv(req, env->unsets.entries[i].base, env->unsets.entries[i].len);
610 for (i = 0; i != env->sets.size; i += 2)
611 *h2o_req_getenv(req, env->sets.entries[i].base, env->sets.entries[i].len, 1) = env->sets.entries[i + 1];
612 }
613
h2o_req_bind_conf(h2o_req_t * req,h2o_hostconf_t * hostconf,h2o_pathconf_t * pathconf)614 void h2o_req_bind_conf(h2o_req_t *req, h2o_hostconf_t *hostconf, h2o_pathconf_t *pathconf)
615 {
616 req->hostconf = hostconf;
617 req->pathconf = pathconf;
618
619 /* copy filters and loggers */
620 req->filters = pathconf->_filters.entries;
621 req->num_filters = pathconf->_filters.size;
622 req->loggers = pathconf->_loggers.entries;
623 req->num_loggers = pathconf->_loggers.size;
624
625 if (pathconf->env != NULL)
626 apply_env(req, pathconf->env);
627 }
628
h2o_proceed_response_deferred(h2o_req_t * req)629 void h2o_proceed_response_deferred(h2o_req_t *req)
630 {
631 h2o_timer_link(req->conn->ctx->loop, 0, &req->_timeout_entry);
632 }
633
h2o_ostream_send_next(h2o_ostream_t * ostream,h2o_req_t * req,h2o_sendvec_t * bufs,size_t bufcnt,h2o_send_state_t state)634 void h2o_ostream_send_next(h2o_ostream_t *ostream, h2o_req_t *req, h2o_sendvec_t *bufs, size_t bufcnt, h2o_send_state_t state)
635 {
636 if (!h2o_send_state_is_in_progress(state)) {
637 assert(req->_ostr_top == ostream);
638 req->_ostr_top = ostream->next;
639 }
640 ostream->next->do_send(ostream->next, req, bufs, bufcnt, state);
641 }
642
h2o_req_fill_mime_attributes(h2o_req_t * req)643 void h2o_req_fill_mime_attributes(h2o_req_t *req)
644 {
645 ssize_t content_type_index;
646 h2o_mimemap_type_t *mime;
647
648 if (req->res.mime_attr != NULL)
649 return;
650
651 if ((content_type_index = h2o_find_header(&req->res.headers, H2O_TOKEN_CONTENT_TYPE, -1)) != -1 &&
652 (mime = h2o_mimemap_get_type_by_mimetype(req->pathconf->mimemap, req->res.headers.entries[content_type_index].value, 0)) !=
653 NULL)
654 req->res.mime_attr = &mime->data.attr;
655 else
656 req->res.mime_attr = &h2o_mime_attributes_as_is;
657 }
658
h2o_send_inline(h2o_req_t * req,const char * body,size_t len)659 void h2o_send_inline(h2o_req_t *req, const char *body, size_t len)
660 {
661 static h2o_generator_t generator = {NULL, NULL};
662
663 h2o_iovec_t buf = h2o_strdup(&req->pool, body, len);
664 /* the function intentionally does not set the content length, since it may be used for generating 304 response, etc. */
665 /* req->res.content_length = buf.len; */
666
667 h2o_start_response(req, &generator);
668
669 if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD")))
670 h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
671 else
672 h2o_send(req, &buf, 1, H2O_SEND_STATE_FINAL);
673 }
674
h2o_send_error_generic(h2o_req_t * req,int status,const char * reason,const char * body,int flags)675 void h2o_send_error_generic(h2o_req_t *req, int status, const char *reason, const char *body, int flags)
676 {
677 if (req->pathconf == NULL) {
678 h2o_hostconf_t *hostconf = h2o_req_setup(req);
679 h2o_req_bind_conf(req, hostconf, &hostconf->fallback_path);
680 }
681
682 /* If the request is broken or incomplete, do not apply filters, as it would be dangerous to do so. Legitimate clients would not
683 * send broken requests, so we do not need to decorate error responses using errordoc handler or anything else. */
684 if ((flags & H2O_SEND_ERROR_BROKEN_REQUEST) != 0)
685 req->_next_filter_index = SIZE_MAX;
686
687 if ((flags & H2O_SEND_ERROR_HTTP1_CLOSE_CONNECTION) != 0)
688 req->http1_is_persistent = 0;
689
690 req->res.status = status;
691 req->res.reason = reason;
692 req->res.content_length = strlen(body);
693
694 if ((flags & H2O_SEND_ERROR_KEEP_HEADERS) == 0)
695 memset(&req->res.headers, 0, sizeof(req->res.headers));
696
697 h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/plain; charset=utf-8"));
698
699 h2o_send_inline(req, body, SIZE_MAX);
700 }
701
702 #define DECL_SEND_ERROR_DEFERRED(status_) \
703 static void send_error_deferred_cb_##status_(h2o_timer_t *entry) \
704 { \
705 struct st_send_error_deferred_t *args = H2O_STRUCT_FROM_MEMBER(struct st_send_error_deferred_t, _timeout, entry); \
706 reset_response(args->req); \
707 args->req->conn->ctx->emitted_error_status[H2O_STATUS_ERROR_##status_]++; \
708 h2o_send_error_generic(args->req, args->status, args->reason, args->body, args->flags); \
709 } \
710 \
711 static void h2o_send_error_deferred_##status_(h2o_req_t *req, const char *reason, const char *body, int flags) \
712 { \
713 struct st_send_error_deferred_t *args = h2o_mem_alloc_pool(&req->pool, *args, 1); \
714 *args = (struct st_send_error_deferred_t){req, status_, reason, body, flags}; \
715 h2o_timer_init(&args->_timeout, send_error_deferred_cb_##status_); \
716 h2o_timer_link(req->conn->ctx->loop, 0, &args->_timeout); \
717 }
718
719 DECL_SEND_ERROR_DEFERRED(502)
720
721 #undef DECL_SEND_ERROR_DEFERRED
722
h2o_req_log_error(h2o_req_t * req,const char * module,const char * fmt,...)723 void h2o_req_log_error(h2o_req_t *req, const char *module, const char *fmt, ...)
724 {
725 #define INITIAL_BUF_SIZE 256
726
727 char *errbuf = h2o_mem_alloc_pool(&req->pool, char, INITIAL_BUF_SIZE);
728 int errlen;
729 va_list args;
730
731 va_start(args, fmt);
732 errlen = vsnprintf(errbuf, INITIAL_BUF_SIZE, fmt, args);
733 va_end(args);
734
735 if (errlen >= INITIAL_BUF_SIZE) {
736 errbuf = h2o_mem_alloc_pool(&req->pool, char, errlen + 1);
737 va_start(args, fmt);
738 errlen = vsnprintf(errbuf, errlen + 1, fmt, args);
739 va_end(args);
740 }
741 h2o_iovec_t msg = h2o_iovec_init(errbuf, errlen);
742
743 #undef INITIAL_BUF_SIZE
744
745 /* build prefix */
746 char *pbuf = h2o_mem_alloc_pool(&req->pool, char, sizeof("[] in request::") + 32 + strlen(module)), *p = pbuf;
747 p += sprintf(p, "[%s] in request:", module);
748 if (req->path.len < 32) {
749 memcpy(p, req->path.base, req->path.len);
750 p += req->path.len;
751 } else {
752 memcpy(p, req->path.base, 29);
753 p += 29;
754 memcpy(p, "...", 3);
755 p += 3;
756 }
757 *p++ = ':';
758 h2o_iovec_t prefix = h2o_iovec_init(pbuf, p - pbuf);
759
760 /* run error callback (save and emit the log if needed) */
761 req->error_log_delegate.cb(req->error_log_delegate.data, prefix, msg);
762 }
763
h2o_send_redirect(h2o_req_t * req,int status,const char * reason,const char * url,size_t url_len)764 void h2o_send_redirect(h2o_req_t *req, int status, const char *reason, const char *url, size_t url_len)
765 {
766 if (req->res_is_delegated) {
767 h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
768 h2o_send_redirect_internal(req, method, url, url_len, 0);
769 return;
770 }
771
772 static h2o_generator_t generator = {NULL, NULL};
773 static const h2o_iovec_t body_prefix = {H2O_STRLIT("<!DOCTYPE html><TITLE>Moved</TITLE><P>The document has moved <A HREF=\"")};
774 static const h2o_iovec_t body_suffix = {H2O_STRLIT("\">here</A>")};
775
776 /* build and send response */
777 h2o_iovec_t bufs[3];
778 size_t bufcnt;
779 if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD"))) {
780 req->res.content_length = SIZE_MAX;
781 bufcnt = 0;
782 } else {
783 bufs[0] = body_prefix;
784 bufs[1] = h2o_htmlescape(&req->pool, url, url_len);
785 bufs[2] = body_suffix;
786 bufcnt = 3;
787 req->res.content_length = body_prefix.len + bufs[1].len + body_suffix.len;
788 }
789 req->res.status = status;
790 req->res.reason = reason;
791 req->res.headers = (h2o_headers_t){NULL};
792 h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_LOCATION, NULL, url, url_len);
793 h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/html; charset=utf-8"));
794 h2o_start_response(req, &generator);
795 h2o_send(req, bufs, bufcnt, H2O_SEND_STATE_FINAL);
796 }
797
h2o_send_redirect_internal(h2o_req_t * req,h2o_iovec_t method,const char * url_str,size_t url_len,int preserve_overrides)798 void h2o_send_redirect_internal(h2o_req_t *req, h2o_iovec_t method, const char *url_str, size_t url_len, int preserve_overrides)
799 {
800 h2o_url_t url;
801
802 /* parse the location URL */
803 if (h2o_url_parse_relative(url_str, url_len, &url) != 0) {
804 /* TODO log h2o_error_printf("[proxy] cannot handle location header: %.*s\n", (int)url_len, url); */
805 h2o_send_error_deferred_502(req, "Gateway Error", "internal error", 0);
806 return;
807 }
808 /* convert the location to absolute (while creating copies of the values passed to the deferred call) */
809 if (url.scheme == NULL)
810 url.scheme = req->scheme;
811 if (url.authority.base == NULL) {
812 if (req->hostconf != NULL)
813 url.authority = req->hostconf->authority.hostport;
814 else
815 url.authority = req->authority;
816 } else {
817 if (h2o_lcstris(url.authority.base, url.authority.len, req->authority.base, req->authority.len)) {
818 url.authority = req->authority;
819 } else {
820 url.authority = h2o_strdup(&req->pool, url.authority.base, url.authority.len);
821 preserve_overrides = 0;
822 }
823 }
824 h2o_iovec_t base_path = req->path;
825 h2o_url_resolve_path(&base_path, &url.path);
826 url.path = h2o_concat(&req->pool, base_path, url.path);
827
828 h2o_reprocess_request_deferred(req, method, url.scheme, url.authority, url.path, preserve_overrides ? req->overrides : NULL, 1);
829 }
830
h2o_get_redirect_method(h2o_iovec_t method,int status)831 h2o_iovec_t h2o_get_redirect_method(h2o_iovec_t method, int status)
832 {
833 if (h2o_memis(method.base, method.len, H2O_STRLIT("POST")) && !(status == 307 || status == 308))
834 method = h2o_iovec_init(H2O_STRLIT("GET"));
835 return method;
836 }
837
do_push_path(void * _req,const char * path,size_t path_len,int is_critical)838 static void do_push_path(void *_req, const char *path, size_t path_len, int is_critical)
839 {
840 h2o_req_t *req = _req;
841
842 if (req->conn->callbacks->push_path != NULL)
843 req->conn->callbacks->push_path(req, path, path_len, is_critical);
844 }
845
h2o_push_path_in_link_header(h2o_req_t * req,const char * value,size_t value_len)846 h2o_iovec_t h2o_push_path_in_link_header(h2o_req_t *req, const char *value, size_t value_len)
847 {
848 h2o_iovec_t ret = h2o_iovec_init(value, value_len);
849
850 h2o_extract_push_path_from_link_header(&req->pool, value, value_len, req->path_normalized, req->input.scheme,
851 req->input.authority, req->res_is_delegated ? req->scheme : NULL,
852 req->res_is_delegated ? &req->authority : NULL, do_push_path, req, &ret,
853 req->hostconf->http2.allow_cross_origin_push);
854
855 return ret;
856 }
857
h2o_resp_add_date_header(h2o_req_t * req)858 void h2o_resp_add_date_header(h2o_req_t *req)
859 {
860 h2o_timestamp_t ts = h2o_get_timestamp(req->conn->ctx, &req->pool);
861 h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_DATE, NULL, ts.str->rfc1123, strlen(ts.str->rfc1123));
862 }
863
h2o_send_informational(h2o_req_t * req)864 void h2o_send_informational(h2o_req_t *req)
865 {
866 /* 1xx must be sent before h2o_start_response is called*/
867 assert(req->_generator == NULL);
868 assert(req->_ostr_top->next == NULL);
869 assert(100 <= req->res.status && req->res.status <= 199 && req->res.status != 101);
870
871 if (req->_ostr_top->send_informational == NULL)
872 goto Clear;
873
874 size_t index;
875 if ((index = h2o_find_header(&req->headers, H2O_TOKEN_NO_EARLY_HINTS, -1)) != -1) {
876 h2o_iovec_t value = req->headers.entries[index].value;
877 if (value.len == 1 && value.base[0] == '1')
878 goto Clear;
879 }
880
881 int i = 0;
882 for (i = 0; i != req->num_filters; ++i) {
883 h2o_filter_t *filter = req->filters[i];
884 if (filter->on_informational != NULL)
885 filter->on_informational(filter, req);
886 }
887
888 if (req->res.status == 103 && req->res.headers.size == 0)
889 goto Clear;
890
891 req->_ostr_top->send_informational(req->_ostr_top, req);
892
893 Clear:
894 /* clear status and headers */
895 req->res.status = 0;
896 req->res.headers = (h2o_headers_t){NULL, 0, 0};
897 }
898
h2o_req_resolve_internal_redirect_url(h2o_req_t * req,h2o_iovec_t dest,h2o_url_t * resolved)899 int h2o_req_resolve_internal_redirect_url(h2o_req_t *req, h2o_iovec_t dest, h2o_url_t *resolved)
900 {
901 h2o_url_t input;
902
903 /* resolve the URL */
904 if (h2o_url_parse_relative(dest.base, dest.len, &input) != 0) {
905 return -1;
906 }
907 if (input.scheme != NULL && input.authority.base != NULL) {
908 *resolved = input;
909 } else {
910 h2o_url_t base;
911 /* we MUST to set authority to that of hostconf, or internal redirect might create a TCP connection */
912 if (h2o_url_init(&base, req->scheme, req->hostconf->authority.hostport, req->path) != 0) {
913 return -1;
914 }
915 h2o_url_resolve(&req->pool, &base, &input, resolved);
916 }
917
918 return 0;
919 }
920