1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 void
__lws_wsi_remove_from_sul(struct lws * wsi)28 __lws_wsi_remove_from_sul(struct lws *wsi)
29 {
30 	//struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
31 
32 	//lwsl_notice("%s: wsi %p, to %p, hr %p\n", __func__, wsi,
33 	//		&wsi->sul_timeout.list, &wsi->sul_hrtimer.list);
34 
35 	// lws_dll2_describe(&pt->pt_sul_owner, "pre-remove");
36 	lws_dll2_remove(&wsi->sul_timeout.list);
37 	lws_dll2_remove(&wsi->sul_hrtimer.list);
38 	lws_dll2_remove(&wsi->sul_validity.list);
39 	// lws_dll2_describe(&pt->pt_sul_owner, "post-remove");
40 }
41 
42 /*
43  * hrtimer
44  */
45 
46 static void
lws_sul_hrtimer_cb(lws_sorted_usec_list_t * sul)47 lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul)
48 {
49 	struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
50 
51 	if (wsi->a.protocol &&
52 	    wsi->a.protocol->callback(wsi, LWS_CALLBACK_TIMER,
53 				    wsi->user_space, NULL, 0))
54 		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
55 				     "hrtimer cb errored");
56 }
57 
58 void
__lws_set_timer_usecs(struct lws * wsi,lws_usec_t us)59 __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
60 {
61 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
62 
63 	wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
64 	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
65 			    &wsi->sul_hrtimer, us);
66 }
67 
68 void
lws_set_timer_usecs(struct lws * wsi,lws_usec_t usecs)69 lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
70 {
71 	__lws_set_timer_usecs(wsi, usecs);
72 }
73 
74 /*
75  * wsi timeout
76  */
77 
78 static void
lws_sul_wsitimeout_cb(lws_sorted_usec_list_t * sul)79 lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul)
80 {
81 	struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
82 	struct lws_context *cx = wsi->a.context;
83 	struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi];
84 
85 	/* no need to log normal idle keepalive timeout */
86 //		if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
87 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
88 	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
89 		lwsl_info("%s: %s: TIMEDOUT WAITING on %d "
90 			  "(did hdr %d, ah %p, wl %d)\n", __func__,
91 			  lws_wsi_tag(wsi), wsi->pending_timeout,
92 			  wsi->hdr_parsing_completed, wsi->http.ah,
93 			  pt->http.ah_wait_list_length);
94 #if defined(LWS_WITH_CGI)
95 	if (wsi->http.cgi)
96 		lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
97 #endif
98 #else
99 	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
100 		lwsl_info("%s: %s: TIMEDOUT WAITING on %d ", __func__,
101 				lws_wsi_tag(wsi), wsi->pending_timeout);
102 #endif
103 	/* cgi timeout */
104 	if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
105 		/*
106 		 * Since he failed a timeout, he already had a chance to
107 		 * do something and was unable to... that includes
108 		 * situations like half closed connections.  So process
109 		 * this "failed timeout" close as a violent death and
110 		 * don't try to do protocol cleanup like flush partials.
111 		 */
112 		wsi->socket_is_permanently_unusable = 1;
113 #if defined(LWS_WITH_CLIENT)
114 	if (lwsi_state(wsi) == LRS_WAITING_SSL)
115 		lws_inform_client_conn_fail(wsi,
116 			(void *)"Timed out waiting SSL", 21);
117 	if (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY)
118 		lws_inform_client_conn_fail(wsi,
119 			(void *)"Timed out waiting server reply", 30);
120 #endif
121 
122 	lws_context_lock(cx, __func__);
123 	lws_pt_lock(pt, __func__);
124 	__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
125 	lws_pt_unlock(pt);
126 	lws_context_unlock(cx);
127 }
128 
129 void
__lws_set_timeout(struct lws * wsi,enum pending_timeout reason,int secs)130 __lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
131 {
132 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
133 
134 	wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
135 	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
136 			    &wsi->sul_timeout,
137 			    ((lws_usec_t)secs) * LWS_US_PER_SEC);
138 
139 	lwsl_debug("%s: %s: %d secs, reason %d\n", __func__, lws_wsi_tag(wsi),
140 			secs, reason);
141 
142 	wsi->pending_timeout = (char)reason;
143 }
144 
145 void
lws_set_timeout(struct lws * wsi,enum pending_timeout reason,int secs)146 lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
147 {
148 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
149 
150 	lws_context_lock(pt->context, __func__);
151 	lws_pt_lock(pt, __func__);
152 	lws_dll2_remove(&wsi->sul_timeout.list);
153 	lws_pt_unlock(pt);
154 
155 	if (!secs)
156 		goto bail;
157 
158 	if (secs == LWS_TO_KILL_SYNC) {
159 		lwsl_debug("%s: TO_KILL_SYNC %s\n", __func__, lws_wsi_tag(wsi));
160 		lws_context_unlock(pt->context);
161 		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
162 				   "to sync kill");
163 		return;
164 	}
165 
166 	if (secs == LWS_TO_KILL_ASYNC)
167 		secs = 0;
168 
169 	// assert(!secs || !wsi->mux_stream_immortal);
170 	if (secs && wsi->mux_stream_immortal)
171 		lwsl_err("%s: on immortal stream %d %d\n", __func__, reason, secs);
172 
173 	lws_pt_lock(pt, __func__);
174 	__lws_set_timeout(wsi, reason, secs);
175 	lws_pt_unlock(pt);
176 
177 bail:
178 	lws_context_unlock(pt->context);
179 }
180 
181 void
lws_set_timeout_us(struct lws * wsi,enum pending_timeout reason,lws_usec_t us)182 lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
183 {
184 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
185 
186 	lws_pt_lock(pt, __func__);
187 	lws_dll2_remove(&wsi->sul_timeout.list);
188 	lws_pt_unlock(pt);
189 
190 	if (!us)
191 		return;
192 
193 	lws_pt_lock(pt, __func__);
194 	__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
195 			    &wsi->sul_timeout, us);
196 
197 	lwsl_notice("%s: %s: %llu us, reason %d\n", __func__, lws_wsi_tag(wsi),
198 		   (unsigned long long)us, reason);
199 
200 	wsi->pending_timeout = (char)reason;
201 	lws_pt_unlock(pt);
202 }
203 
204 #if defined(LWS_WITH_DEPRECATED_THINGS)
205 
206 /* requires context + vh lock */
207 
208 int
__lws_timed_callback_remove(struct lws_vhost * vh,struct lws_timed_vh_protocol * p)209 __lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p)
210 {
211 	lws_start_foreach_llp_safe(struct lws_timed_vh_protocol **, pt,
212 			      vh->timed_vh_protocol_list, next) {
213 		if (*pt == p) {
214 			*pt = p->next;
215 			lws_dll2_remove(&p->sul.list);
216 			lws_free(p);
217 
218 			return 0;
219 		}
220 	} lws_end_foreach_llp_safe(pt);
221 
222 	return 1;
223 }
224 
225 void
lws_sul_timed_callback_vh_protocol_cb(lws_sorted_usec_list_t * sul)226 lws_sul_timed_callback_vh_protocol_cb(lws_sorted_usec_list_t *sul)
227 {
228 	struct lws_timed_vh_protocol *tvp = lws_container_of(sul,
229 					struct lws_timed_vh_protocol, sul);
230 	lws_fakewsi_def_plwsa(&tvp->vhost->context->pt[0]);
231 
232 	lws_fakewsi_prep_plwsa_ctx(tvp->vhost->context);
233 	plwsa->vhost = tvp->vhost; /* not a real bound wsi */
234 	plwsa->protocol = tvp->protocol;
235 
236 	lwsl_debug("%s: timed cb: %s, protocol %s, reason %d\n", __func__,
237 		   lws_vh_tag(tvp->vhost), tvp->protocol->name, tvp->reason);
238 
239 	tvp->protocol->callback((struct lws *)plwsa, tvp->reason, NULL, NULL, 0);
240 
241 	__lws_timed_callback_remove(tvp->vhost, tvp);
242 }
243 
244 int
lws_timed_callback_vh_protocol_us(struct lws_vhost * vh,const struct lws_protocols * prot,int reason,lws_usec_t us)245 lws_timed_callback_vh_protocol_us(struct lws_vhost *vh,
246 				  const struct lws_protocols *prot, int reason,
247 				  lws_usec_t us)
248 {
249 	struct lws_timed_vh_protocol *p = (struct lws_timed_vh_protocol *)
250 			lws_malloc(sizeof(*p), "timed_vh");
251 
252 	if (!p)
253 		return 1;
254 
255 	memset(p, 0, sizeof(*p));
256 
257 	p->tsi_req = lws_pthread_self_to_tsi(vh->context);
258 	if (p->tsi_req < 0) /* not called from a service thread --> tsi 0 */
259 		p->tsi_req = 0;
260 
261 	lws_context_lock(vh->context, __func__); /* context ----------------- */
262 
263 	p->protocol = prot;
264 	p->reason = reason;
265 	p->vhost = vh;
266 
267 	p->sul.cb = lws_sul_timed_callback_vh_protocol_cb;
268 	/* list is always at the very top of the sul */
269 	__lws_sul_insert(&vh->context->pt[p->tsi_req].pt_sul_owner,
270 			 (lws_sorted_usec_list_t *)&p->sul.list, us);
271 
272 	// lwsl_notice("%s: %s.%s %d\n", __func__, vh->name, prot->name, secs);
273 
274 	lws_vhost_lock(vh); /* vhost ---------------------------------------- */
275 	p->next = vh->timed_vh_protocol_list;
276 	vh->timed_vh_protocol_list = p;
277 	lws_vhost_unlock(vh); /* -------------------------------------- vhost */
278 
279 	lws_context_unlock(vh->context); /* ------------------------- context */
280 
281 	return 0;
282 }
283 
284 int
lws_timed_callback_vh_protocol(struct lws_vhost * vh,const struct lws_protocols * prot,int reason,int secs)285 lws_timed_callback_vh_protocol(struct lws_vhost *vh,
286 			       const struct lws_protocols *prot, int reason,
287 			       int secs)
288 {
289 	return lws_timed_callback_vh_protocol_us(vh, prot, reason,
290 					((lws_usec_t)secs) * LWS_US_PER_SEC);
291 }
292 
293 #endif
294 
295 static void
lws_validity_cb(lws_sorted_usec_list_t * sul)296 lws_validity_cb(lws_sorted_usec_list_t *sul)
297 {
298 	struct lws *wsi = lws_container_of(sul, struct lws, sul_validity);
299 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
300 	const lws_retry_bo_t *rbo = wsi->retry_policy;
301 
302 	/* one of either the ping or hangup validity threshold was crossed */
303 
304 	if (wsi->validity_hup) {
305 		struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
306 
307 		lwsl_info("%s: %s: validity too old\n", __func__, lws_wsi_tag(wsi));
308 
309 		lws_context_lock(wsi->a.context, __func__);
310 		lws_pt_lock(pt, __func__);
311 		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
312 				     "validity timeout");
313 		lws_pt_unlock(pt);
314 		lws_context_unlock(wsi->a.context);
315 		return;
316 	}
317 
318 	/* schedule a protocol-dependent ping */
319 
320 	lwsl_info("%s: %s: scheduling validity check\n", __func__, lws_wsi_tag(wsi));
321 
322 	if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
323 		lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
324 							issue_keepalive(wsi, 0);
325 
326 	/*
327 	 * We arrange to come back here after the additional ping to hangup time
328 	 * and do the hangup, unless we get validated (by, eg, a PONG) and
329 	 * reset the timer
330 	 */
331 
332 	assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
333 
334 	wsi->validity_hup = 1;
335 	__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
336 			    &wsi->sul_validity,
337 			    ((uint64_t)rbo->secs_since_valid_hangup -
338 				 rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
339 }
340 
341 /*
342  * The role calls this back to actually confirm validity on a particular wsi
343  * (which may not be the original wsi)
344  */
345 
346 void
_lws_validity_confirmed_role(struct lws * wsi)347 _lws_validity_confirmed_role(struct lws *wsi)
348 {
349 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
350 	const lws_retry_bo_t *rbo = wsi->retry_policy;
351 
352 	if (!rbo || !rbo->secs_since_valid_hangup)
353 		return;
354 
355 	wsi->validity_hup = 0;
356 	wsi->sul_validity.cb = lws_validity_cb;
357 
358 	wsi->validity_hup = rbo->secs_since_valid_ping >=
359 			    rbo->secs_since_valid_hangup;
360 
361 	lwsl_info("%s: %s: setting validity timer %ds (hup %d)\n",
362 			__func__, lws_wsi_tag(wsi),
363 			wsi->validity_hup ? rbo->secs_since_valid_hangup :
364 					    rbo->secs_since_valid_ping,
365 			wsi->validity_hup);
366 
367 	__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
368 			    &wsi->sul_validity,
369 			    ((uint64_t)(wsi->validity_hup ?
370 				rbo->secs_since_valid_hangup :
371 				rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
372 }
373 
374 void
lws_validity_confirmed(struct lws * wsi)375 lws_validity_confirmed(struct lws *wsi)
376 {
377 	/*
378 	 * This may be a stream inside a muxed network connection... leave it
379 	 * to the role to figure out who actually needs to understand their
380 	 * validity was confirmed.
381 	 */
382 	if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */
383 	    wsi->role_ops &&
384 	    lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
385 		lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
386 							issue_keepalive(wsi, 1);
387 }
388