1 /*
2  * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to
6  * deal in the Software without restriction, including without limitation the
7  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8  * sell copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20  * IN THE SOFTWARE.
21  */
22 #include "h2o.h"
23 
24 extern h2o_status_handler_t h2o_events_status_handler;
25 extern h2o_status_handler_t h2o_requests_status_handler;
26 extern h2o_status_handler_t h2o_durations_status_handler;
27 extern h2o_status_handler_t h2o_ssl_status_handler;
28 
29 struct st_h2o_status_logger_t {
30     h2o_logger_t super;
31 };
32 
33 struct st_h2o_root_status_handler_t {
34     h2o_handler_t super;
35     H2O_VECTOR(h2o_multithread_receiver_t *) receivers;
36 };
37 
38 struct st_h2o_status_context_t {
39     h2o_context_t *ctx;
40     h2o_multithread_receiver_t receiver;
41 };
42 
43 struct st_status_ctx_t {
44     int active;
45     void *ctx;
46 };
47 struct st_h2o_status_collector_t {
48     struct {
49         h2o_req_t *req;
50         h2o_multithread_receiver_t *receiver;
51     } src;
52     size_t num_remaining_threads_atomic;
53     H2O_VECTOR(struct st_status_ctx_t) status_ctx;
54 };
55 
56 struct st_h2o_status_message_t {
57     h2o_multithread_message_t super;
58     struct st_h2o_status_collector_t *collector;
59 };
60 
collect_reqs_of_context(struct st_h2o_status_collector_t * collector,h2o_context_t * ctx)61 static void collect_reqs_of_context(struct st_h2o_status_collector_t *collector, h2o_context_t *ctx)
62 {
63     int i;
64 
65     for (i = 0; i < ctx->globalconf->statuses.size; i++) {
66         struct st_status_ctx_t *sc = collector->status_ctx.entries + i;
67         h2o_status_handler_t *sh = ctx->globalconf->statuses.entries[i];
68         if (sc->active && sh->per_thread != NULL)
69             sh->per_thread(sc->ctx, ctx);
70     }
71 
72     if (__sync_sub_and_fetch(&collector->num_remaining_threads_atomic, 1) == 0) {
73         struct st_h2o_status_message_t *message = h2o_mem_alloc(sizeof(*message));
74         message->super = (h2o_multithread_message_t){{NULL}};
75         message->collector = collector;
76         h2o_multithread_send_message(collector->src.receiver, &message->super);
77     }
78 }
79 
send_response(struct st_h2o_status_collector_t * collector)80 static void send_response(struct st_h2o_status_collector_t *collector)
81 {
82     static h2o_generator_t generator = {NULL, NULL};
83     h2o_req_t *req;
84     size_t nr_statuses;
85     int i;
86     int cur_resp = 0;
87 
88     req = collector->src.req;
89     if (!req) {
90         h2o_mem_release_shared(collector);
91         return;
92     }
93 
94     nr_statuses = req->conn->ctx->globalconf->statuses.size;
95     size_t nr_resp = nr_statuses + 2; // 2 for the footer and header
96     h2o_iovec_t resp[nr_resp];
97 
98     memset(resp, 0, sizeof(resp[0]) * nr_resp);
99     resp[cur_resp++] = (h2o_iovec_t){H2O_STRLIT("{\n")};
100 
101     int coma_removed = 0;
102     for (i = 0; i < req->conn->ctx->globalconf->statuses.size; i++) {
103         h2o_status_handler_t *sh = req->conn->ctx->globalconf->statuses.entries[i];
104         if (!collector->status_ctx.entries[i].active) {
105             continue;
106         }
107         resp[cur_resp++] = sh->final(collector->status_ctx.entries[i].ctx, req->conn->ctx->globalconf, req);
108         if (resp[cur_resp - 1].len > 0 && !coma_removed) {
109             /* requests come in with a leading coma, replace if with a space */
110             resp[cur_resp - 1].base[0] = ' ';
111             coma_removed = 1;
112         }
113     }
114     resp[cur_resp++] = (h2o_iovec_t){H2O_STRLIT("\n}\n")};
115 
116     req->res.status = 200;
117     h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/plain; charset=utf-8"));
118     h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CACHE_CONTROL, NULL, H2O_STRLIT("no-cache, no-store"));
119     h2o_start_response(req, &generator);
120     h2o_send(req, resp, h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD")) ? 0 : nr_resp,
121              H2O_SEND_STATE_FINAL);
122     h2o_mem_release_shared(collector);
123 }
124 
on_collect_notify(h2o_multithread_receiver_t * receiver,h2o_linklist_t * messages)125 static void on_collect_notify(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
126 {
127     struct st_h2o_status_context_t *status_ctx = H2O_STRUCT_FROM_MEMBER(struct st_h2o_status_context_t, receiver, receiver);
128 
129     while (!h2o_linklist_is_empty(messages)) {
130         struct st_h2o_status_message_t *message = H2O_STRUCT_FROM_MEMBER(struct st_h2o_status_message_t, super, messages->next);
131         struct st_h2o_status_collector_t *collector = message->collector;
132         h2o_linklist_unlink(&message->super.link);
133         free(message);
134 
135         if (__sync_add_and_fetch(&collector->num_remaining_threads_atomic, 0) != 0) {
136             collect_reqs_of_context(collector, status_ctx->ctx);
137         } else {
138             send_response(collector);
139         }
140     }
141 }
142 
on_collector_dispose(void * _collector)143 static void on_collector_dispose(void *_collector)
144 {
145 }
146 
on_req_close(void * p)147 static void on_req_close(void *p)
148 {
149     struct st_h2o_status_collector_t *collector = *(void **)p;
150     collector->src.req = NULL;
151     h2o_mem_release_shared(collector);
152 }
153 
on_req_json(struct st_h2o_root_status_handler_t * self,h2o_req_t * req,h2o_iovec_t status_list)154 static int on_req_json(struct st_h2o_root_status_handler_t *self, h2o_req_t *req, h2o_iovec_t status_list)
155 {
156     { /* construct collector and send request to every thread */
157         struct st_h2o_status_context_t *status_ctx = h2o_context_get_handler_context(req->conn->ctx, &self->super);
158         struct st_h2o_status_collector_t *collector = h2o_mem_alloc_shared(NULL, sizeof(*collector), on_collector_dispose);
159         size_t i;
160 
161         memset(collector, 0, sizeof(*collector));
162         for (i = 0; i < req->conn->ctx->globalconf->statuses.size; i++) {
163             h2o_status_handler_t *sh;
164 
165             h2o_vector_reserve(&req->pool, &collector->status_ctx, collector->status_ctx.size + 1);
166             sh = req->conn->ctx->globalconf->statuses.entries[i];
167 
168             if (status_list.base) {
169                 if (!h2o_contains_token(status_list.base, status_list.len, sh->name.base, sh->name.len, ',')) {
170                     collector->status_ctx.entries[collector->status_ctx.size].active = 0;
171                     goto Skip;
172                 }
173             }
174             if (sh->init) {
175                 collector->status_ctx.entries[collector->status_ctx.size].ctx = sh->init();
176             }
177             collector->status_ctx.entries[collector->status_ctx.size].active = 1;
178         Skip:
179             collector->status_ctx.size++;
180         }
181         collector->src.req = req;
182         collector->src.receiver = &status_ctx->receiver;
183         collector->num_remaining_threads_atomic = self->receivers.size;
184 
185         for (i = 0; i != self->receivers.size; ++i) {
186             struct st_h2o_status_message_t *message = h2o_mem_alloc(sizeof(*message));
187             *message = (struct st_h2o_status_message_t){{{NULL}}, collector};
188             h2o_multithread_send_message(self->receivers.entries[i], &message->super);
189         }
190 
191         /* collector is also retained by the on_req_close callback */
192         *(struct st_h2o_status_collector_t **)h2o_mem_alloc_shared(&req->pool, sizeof(collector), on_req_close) = collector;
193         h2o_mem_addref_shared(collector);
194     }
195 
196     return 0;
197 }
198 
on_req(h2o_handler_t * _self,h2o_req_t * req)199 static int on_req(h2o_handler_t *_self, h2o_req_t *req)
200 {
201     struct st_h2o_root_status_handler_t *self = (void *)_self;
202     size_t prefix_len = req->pathconf->path.len - (req->pathconf->path.base[req->pathconf->path.len - 1] == '/');
203     h2o_iovec_t local_path = h2o_iovec_init(req->path_normalized.base + prefix_len, req->path_normalized.len - prefix_len);
204 
205     if (local_path.len == 0 || h2o_memis(local_path.base, local_path.len, H2O_STRLIT("/"))) {
206         /* root of the handler returns HTML that renders the status */
207         h2o_iovec_t fn;
208         const char *root = getenv("H2O_ROOT");
209         if (root == NULL)
210             root = H2O_TO_STR(H2O_ROOT);
211         fn = h2o_concat(&req->pool, h2o_iovec_init(root, strlen(root)), h2o_iovec_init(H2O_STRLIT("/share/h2o/status/index.html")));
212         h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CACHE_CONTROL, NULL, H2O_STRLIT("no-cache"));
213         return h2o_file_send(req, 200, "OK", fn.base, h2o_iovec_init(H2O_STRLIT("text/html; charset=utf-8")), 0);
214     } else if (h2o_memis(local_path.base, local_path.len, H2O_STRLIT("/json"))) {
215         int ret;
216         /* "/json" maps to the JSON API */
217         h2o_iovec_t status_list = {NULL, 0}; /* NULL means we'll show all statuses */
218         if (req->query_at != SIZE_MAX && (req->path.len - req->query_at > 6)) {
219             if (h2o_memis(&req->path.base[req->query_at], 6, "?show=", 6)) {
220                 status_list = h2o_iovec_init(&req->path.base[req->query_at + 6], req->path.len - req->query_at - 6);
221             }
222         }
223         ret = on_req_json(self, req, status_list);
224         return ret;
225     }
226 
227     return -1;
228 }
229 
on_context_init(h2o_handler_t * _self,h2o_context_t * ctx)230 static void on_context_init(h2o_handler_t *_self, h2o_context_t *ctx)
231 {
232     struct st_h2o_root_status_handler_t *self = (void *)_self;
233     struct st_h2o_status_context_t *status_ctx = h2o_mem_alloc(sizeof(*status_ctx));
234 
235     status_ctx->ctx = ctx;
236     h2o_multithread_register_receiver(ctx->queue, &status_ctx->receiver, on_collect_notify);
237 
238     h2o_vector_reserve(NULL, &self->receivers, self->receivers.size + 1);
239     self->receivers.entries[self->receivers.size++] = &status_ctx->receiver;
240 
241     h2o_context_set_handler_context(ctx, &self->super, status_ctx);
242 }
243 
on_context_dispose(h2o_handler_t * _self,h2o_context_t * ctx)244 static void on_context_dispose(h2o_handler_t *_self, h2o_context_t *ctx)
245 {
246     struct st_h2o_root_status_handler_t *self = (void *)_self;
247     struct st_h2o_status_context_t *status_ctx = h2o_context_get_handler_context(ctx, &self->super);
248     size_t i;
249 
250     for (i = 0; i != self->receivers.size; ++i)
251         if (self->receivers.entries[i] == &status_ctx->receiver)
252             break;
253     assert(i != self->receivers.size);
254     memmove(self->receivers.entries + i + 1, self->receivers.entries + i, self->receivers.size - i - 1);
255     --self->receivers.size;
256 
257     h2o_multithread_unregister_receiver(ctx->queue, &status_ctx->receiver);
258 
259     free(status_ctx);
260 }
261 
h2o_status_register(h2o_pathconf_t * conf)262 void h2o_status_register(h2o_pathconf_t *conf)
263 {
264     struct st_h2o_root_status_handler_t *self = (void *)h2o_create_handler(conf, sizeof(*self));
265     self->super.on_context_init = on_context_init;
266     self->super.on_context_dispose = on_context_dispose;
267     self->super.on_req = on_req;
268     h2o_config_register_status_handler(conf->global, &h2o_requests_status_handler);
269     h2o_config_register_status_handler(conf->global, &h2o_events_status_handler);
270     h2o_config_register_status_handler(conf->global, &h2o_ssl_status_handler);
271     h2o_config_register_status_handler(conf->global, &h2o_durations_status_handler);
272 }
273