1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 const char *
lws_wsi_tag(struct lws * wsi)28 lws_wsi_tag(struct lws *wsi)
29 {
30 	if (!wsi)
31 		return "[null wsi]";
32 	return lws_lc_tag(&wsi->lc);
33 }
34 
35 #if defined (_DEBUG)
lwsi_set_role(struct lws * wsi,lws_wsi_state_t role)36 void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
37 {
38 	wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
39 
40 	lwsl_debug("lwsi_set_role(%s, 0x%lx)\n", lws_wsi_tag(wsi),
41 					(unsigned long)wsi->wsistate);
42 }
43 
lwsi_set_state(struct lws * wsi,lws_wsi_state_t lrs)44 void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
45 {
46 	wsi->wsistate = (wsi->wsistate & (unsigned int)(~LRS_MASK)) | lrs;
47 
48 	lwsl_debug("lwsi_set_state(%s, 0x%lx)\n", lws_wsi_tag(wsi),
49 					(unsigned long)wsi->wsistate);
50 }
51 #endif
52 
53 
54 void
lws_vhost_bind_wsi(struct lws_vhost * vh,struct lws * wsi)55 lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi)
56 {
57 	if (wsi->a.vhost == vh)
58 		return;
59 	lws_context_lock(vh->context, __func__); /* ---------- context { */
60 	wsi->a.vhost = vh;
61 	vh->count_bound_wsi++;
62 	lws_context_unlock(vh->context); /* } context ---------- */
63 	lwsl_debug("%s: vh %s: wsi %s/%s, count_bound_wsi %d\n", __func__,
64 		   vh->name, wsi->role_ops ? wsi->role_ops->name : "none",
65 		   wsi->a.protocol ? wsi->a.protocol->name : "none",
66 		   vh->count_bound_wsi);
67 	assert(wsi->a.vhost->count_bound_wsi > 0);
68 }
69 
70 
71 /* req cx lock... acquires vh lock */
72 void
__lws_vhost_unbind_wsi(struct lws * wsi)73 __lws_vhost_unbind_wsi(struct lws *wsi)
74 {
75 	if (!wsi->a.vhost)
76 		return;
77 
78 	lws_context_assert_lock_held(wsi->a.context);
79 
80 	lws_vhost_lock(wsi->a.vhost);
81 
82 	assert(wsi->a.vhost->count_bound_wsi > 0);
83 	wsi->a.vhost->count_bound_wsi--;
84 	lwsl_debug("%s: vh %s: count_bound_wsi %d\n", __func__,
85 		   wsi->a.vhost->name, wsi->a.vhost->count_bound_wsi);
86 
87 	if (!wsi->a.vhost->count_bound_wsi &&
88 	    wsi->a.vhost->being_destroyed) {
89 		/*
90 		 * We have closed all wsi that were bound to this vhost
91 		 * by any pt: nothing can be servicing any wsi belonging
92 		 * to it any more.
93 		 *
94 		 * Finalize the vh destruction... must drop vh lock
95 		 */
96 		lws_vhost_unlock(wsi->a.vhost);
97 		__lws_vhost_destroy2(wsi->a.vhost);
98 		wsi->a.vhost = NULL;
99 		return;
100 	}
101 
102 	lws_vhost_unlock(wsi->a.vhost);
103 	wsi->a.vhost = NULL;
104 }
105 
106 struct lws *
lws_get_network_wsi(struct lws * wsi)107 lws_get_network_wsi(struct lws *wsi)
108 {
109 	if (!wsi)
110 		return NULL;
111 
112 #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT)
113 	if (!wsi->mux_substream
114 #if defined(LWS_WITH_CLIENT)
115 			&& !wsi->client_mux_substream
116 #endif
117 	)
118 		return wsi;
119 
120 	while (wsi->mux.parent_wsi)
121 		wsi = wsi->mux.parent_wsi;
122 #endif
123 
124 	return wsi;
125 }
126 
127 
128 const struct lws_protocols *
lws_vhost_name_to_protocol(struct lws_vhost * vh,const char * name)129 lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
130 {
131 	int n;
132 
133 	for (n = 0; n < vh->count_protocols; n++)
134 		if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name))
135 			return &vh->protocols[n];
136 
137 	return NULL;
138 }
139 
140 int
lws_callback_all_protocol(struct lws_context * context,const struct lws_protocols * protocol,int reason)141 lws_callback_all_protocol(struct lws_context *context,
142 			  const struct lws_protocols *protocol, int reason)
143 {
144 	struct lws_context_per_thread *pt = &context->pt[0];
145 	unsigned int n, m = context->count_threads;
146 	struct lws *wsi;
147 
148 	while (m--) {
149 		for (n = 0; n < pt->fds_count; n++) {
150 			wsi = wsi_from_fd(context, pt->fds[n].fd);
151 			if (!wsi)
152 				continue;
153 			if (wsi->a.protocol == protocol)
154 				protocol->callback(wsi,
155 					(enum lws_callback_reasons)reason,
156 					wsi->user_space, NULL, 0);
157 		}
158 		pt++;
159 	}
160 
161 	return 0;
162 }
163 
164 int
lws_callback_all_protocol_vhost_args(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason,void * argp,size_t len)165 lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
166 			  const struct lws_protocols *protocol, int reason,
167 			  void *argp, size_t len)
168 {
169 	struct lws_context *context = vh->context;
170 	struct lws_context_per_thread *pt = &context->pt[0];
171 	unsigned int n, m = context->count_threads;
172 	struct lws *wsi;
173 
174 	while (m--) {
175 		for (n = 0; n < pt->fds_count; n++) {
176 			wsi = wsi_from_fd(context, pt->fds[n].fd);
177 			if (!wsi)
178 				continue;
179 			if (wsi->a.vhost == vh && (wsi->a.protocol == protocol ||
180 						 !protocol))
181 				wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason,
182 						wsi->user_space, argp, len);
183 		}
184 		pt++;
185 	}
186 
187 	return 0;
188 }
189 
190 int
lws_callback_all_protocol_vhost(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason)191 lws_callback_all_protocol_vhost(struct lws_vhost *vh,
192 			  const struct lws_protocols *protocol, int reason)
193 {
194 	return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
195 }
196 
197 int
lws_callback_vhost_protocols(struct lws * wsi,int reason,void * in,size_t len)198 lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, size_t len)
199 {
200 	int n;
201 
202 	for (n = 0; n < wsi->a.vhost->count_protocols; n++)
203 		if (wsi->a.vhost->protocols[n].callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len))
204 			return 1;
205 
206 	return 0;
207 }
208 
209 /*
210  * We need the context lock
211  */
212 
213 struct lws *
__lws_wsi_create_with_role(struct lws_context * context,int tsi,const struct lws_role_ops * ops)214 __lws_wsi_create_with_role(struct lws_context *context, int tsi,
215 			   const struct lws_role_ops *ops)
216 {
217 	size_t s = sizeof(struct lws);
218 	struct lws *wsi;
219 
220 	assert(tsi >= 0 && tsi < LWS_MAX_SMP);
221 
222 	lws_context_assert_lock_held(context);
223 
224 #if defined(LWS_WITH_EVENT_LIBS)
225 	s += context->event_loop_ops->evlib_size_wsi;
226 #endif
227 
228 	wsi = lws_zalloc(s, __func__);
229 
230 	if (!wsi) {
231 		lwsl_err("%s: Out of mem\n", __func__);
232 		return NULL;
233 	}
234 
235 #if defined(LWS_WITH_EVENT_LIBS)
236 	wsi->evlib_wsi = (uint8_t *)wsi + sizeof(*wsi);
237 #endif
238 	wsi->a.context = context;
239 	lws_role_transition(wsi, 0, LRS_UNCONNECTED, ops);
240 	wsi->pending_timeout = NO_PENDING_TIMEOUT;
241 	wsi->a.protocol = NULL;
242 	wsi->tsi = (char)tsi;
243 	wsi->a.vhost = NULL;
244 	wsi->desc.sockfd = LWS_SOCK_INVALID;
245 	wsi->position_in_fds_table = LWS_NO_FDS_POS;
246 
247 //	lwsl_debug("%s: tsi %d: role: %s\n", __func__, tsi,
248 //			ops ? ops->name : "none");
249 
250 #if defined(LWS_WITH_SYS_FAULT_INJECTION)
251 	lws_xos_init(&wsi->fic.xos, lws_xos(&context->fic.xos));
252 #endif
253 
254 	lws_fi_inherit_copy(&wsi->fic, &context->fic, "wsi", NULL);
255 
256 	if (lws_fi(&wsi->fic, "createfail")) {
257 		lws_fi_destroy(&wsi->fic);
258 		lws_free(wsi);
259 		return NULL;
260 	}
261 
262 	return wsi;
263 }
264 
265 int
lws_wsi_inject_to_loop(struct lws_context_per_thread * pt,struct lws * wsi)266 lws_wsi_inject_to_loop(struct lws_context_per_thread *pt, struct lws *wsi)
267 {
268 	int ret = 1;
269 
270 	lws_pt_lock(pt, __func__); /* -------------- pt { */
271 
272 	if (pt->context->event_loop_ops->sock_accept)
273 		if (pt->context->event_loop_ops->sock_accept(wsi))
274 			goto bail;
275 
276 	if (__insert_wsi_socket_into_fds(pt->context, wsi))
277 		goto bail;
278 
279 	ret = 0;
280 
281 bail:
282 	lws_pt_unlock(pt);
283 
284 	return ret;
285 }
286 
287 /*
288  * Take a copy of wsi->desc.sockfd before calling this, then close it
289  * afterwards
290  */
291 
292 int
lws_wsi_extract_from_loop(struct lws * wsi)293 lws_wsi_extract_from_loop(struct lws *wsi)
294 {
295 	if (lws_socket_is_valid(wsi->desc.sockfd))
296 		__remove_wsi_socket_from_fds(wsi);
297 
298 	if (!wsi->a.context->event_loop_ops->destroy_wsi &&
299 	    wsi->a.context->event_loop_ops->wsi_logical_close) {
300 		wsi->a.context->event_loop_ops->wsi_logical_close(wsi);
301 		return 1; /* close / destroy continues async */
302 	}
303 
304 	if (wsi->a.context->event_loop_ops->destroy_wsi)
305 		wsi->a.context->event_loop_ops->destroy_wsi(wsi);
306 
307 	return 0; /* he is destroyed */
308 }
309 
310 int
lws_callback_vhost_protocols_vhost(struct lws_vhost * vh,int reason,void * in,size_t len)311 lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
312 				   size_t len)
313 {
314 	int n;
315 	struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
316 
317 	if (!wsi)
318 		return 1;
319 
320 	wsi->a.context = vh->context;
321 	lws_vhost_bind_wsi(vh, wsi);
322 
323 	for (n = 0; n < wsi->a.vhost->count_protocols; n++) {
324 		wsi->a.protocol = &vh->protocols[n];
325 		if (wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len)) {
326 			lws_free(wsi);
327 			return 1;
328 		}
329 	}
330 
331 	lws_free(wsi);
332 
333 	return 0;
334 }
335 
336 
337 int
lws_rx_flow_control(struct lws * wsi,int _enable)338 lws_rx_flow_control(struct lws *wsi, int _enable)
339 {
340 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
341 	int en = _enable;
342 
343 	// h2 ignores rx flow control atm
344 	if (lwsi_role_h2(wsi) || wsi->mux_substream ||
345 	    lwsi_role_h2_ENCAPSULATION(wsi))
346 		return 0; // !!!
347 
348 	lwsl_info("%s: %s 0x%x\n", __func__, lws_wsi_tag(wsi), _enable);
349 
350 	if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
351 		/*
352 		 * convert user bool style to bitmap style... in user simple
353 		 * bool style _enable = 0 = flow control it, = 1 = allow rx
354 		 */
355 		en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
356 		if (_enable & 1)
357 			en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
358 	}
359 
360 	lws_pt_lock(pt, __func__);
361 
362 	/* any bit set in rxflow_bitmap DISABLEs rxflow control */
363 	if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
364 		wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap & ~(en & 0xff));
365 	else
366 		wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap | (en & 0xff));
367 
368 	if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
369 	    wsi->rxflow_change_to)
370 		goto skip;
371 
372 	wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE |
373 				(!wsi->rxflow_bitmap);
374 
375 	lwsl_info("%s: %s: bitmap 0x%x: en 0x%x, ch 0x%x\n", __func__,
376 		  lws_wsi_tag(wsi),
377 		  wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
378 
379 	if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
380 	    !wsi->rxflow_will_be_applied) {
381 		en = __lws_rx_flow_control(wsi);
382 		lws_pt_unlock(pt);
383 
384 		return en;
385 	}
386 
387 skip:
388 	lws_pt_unlock(pt);
389 
390 	return 0;
391 }
392 
393 void
lws_rx_flow_allow_all_protocol(const struct lws_context * context,const struct lws_protocols * protocol)394 lws_rx_flow_allow_all_protocol(const struct lws_context *context,
395 			       const struct lws_protocols *protocol)
396 {
397 	const struct lws_context_per_thread *pt = &context->pt[0];
398 	struct lws *wsi;
399 	unsigned int n, m = context->count_threads;
400 
401 	while (m--) {
402 		for (n = 0; n < pt->fds_count; n++) {
403 			wsi = wsi_from_fd(context, pt->fds[n].fd);
404 			if (!wsi)
405 				continue;
406 			if (wsi->a.protocol == protocol)
407 				lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
408 		}
409 		pt++;
410 	}
411 }
412 
user_callback_handle_rxflow(lws_callback_function callback_function,struct lws * wsi,enum lws_callback_reasons reason,void * user,void * in,size_t len)413 int user_callback_handle_rxflow(lws_callback_function callback_function,
414 				struct lws *wsi,
415 				enum lws_callback_reasons reason, void *user,
416 				void *in, size_t len)
417 {
418 	int n;
419 
420 	wsi->rxflow_will_be_applied = 1;
421 	n = callback_function(wsi, reason, user, in, len);
422 	wsi->rxflow_will_be_applied = 0;
423 	if (!n)
424 		n = __lws_rx_flow_control(wsi);
425 
426 	return n;
427 }
428 
429 int
__lws_rx_flow_control(struct lws * wsi)430 __lws_rx_flow_control(struct lws *wsi)
431 {
432 	struct lws *wsic = wsi->child_list;
433 
434 	// h2 ignores rx flow control atm
435 	if (lwsi_role_h2(wsi) || wsi->mux_substream ||
436 	    lwsi_role_h2_ENCAPSULATION(wsi))
437 		return 0; // !!!
438 
439 	/* if he has children, do those if they were changed */
440 	while (wsic) {
441 		if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
442 			__lws_rx_flow_control(wsic);
443 
444 		wsic = wsic->sibling_list;
445 	}
446 
447 	/* there is no pending change */
448 	if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
449 		return 0;
450 
451 	/* stuff is still buffered, not ready to really accept new input */
452 	if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
453 		/* get ourselves called back to deal with stashed buffer */
454 		lws_callback_on_writable(wsi);
455 		// return 0;
456 	}
457 
458 	/* now the pending is cleared, we can change rxflow state */
459 
460 	wsi->rxflow_change_to &= (~LWS_RXFLOW_PENDING_CHANGE) & 3;
461 
462 	lwsl_info("rxflow: %s change_to %d\n", lws_wsi_tag(wsi),
463 		  wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
464 
465 	/* adjust the pollfd for this wsi */
466 
467 	if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
468 		lwsl_info("%s: reenable POLLIN\n", __func__);
469 		// lws_buflist_describe(&wsi->buflist, NULL, __func__);
470 		if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
471 			lwsl_info("%s: fail\n", __func__);
472 			return -1;
473 		}
474 	} else
475 		if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
476 			return -1;
477 
478 	return 0;
479 }
480 
481 
482 const struct lws_protocols *
lws_get_protocol(struct lws * wsi)483 lws_get_protocol(struct lws *wsi)
484 {
485 	return wsi->a.protocol;
486 }
487 
488 
489 int
lws_ensure_user_space(struct lws * wsi)490 lws_ensure_user_space(struct lws *wsi)
491 {
492 	if (!wsi->a.protocol)
493 		return 0;
494 
495 	/* allocate the per-connection user memory (if any) */
496 
497 	if (wsi->a.protocol->per_session_data_size && !wsi->user_space) {
498 		wsi->user_space = lws_zalloc(
499 			    wsi->a.protocol->per_session_data_size, "user space");
500 		if (wsi->user_space == NULL) {
501 			lwsl_err("%s: OOM\n", __func__);
502 			return 1;
503 		}
504 	} else
505 		lwsl_debug("%s: %s protocol pss %lu, user_space=%p\n", __func__,
506 			   lws_wsi_tag(wsi),
507 			   (long)wsi->a.protocol->per_session_data_size,
508 			   wsi->user_space);
509 	return 0;
510 }
511 
512 void *
lws_adjust_protocol_psds(struct lws * wsi,size_t new_size)513 lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
514 {
515 	((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
516 		new_size;
517 
518 	if (lws_ensure_user_space(wsi))
519 			return NULL;
520 
521 	return wsi->user_space;
522 }
523 
524 int
lws_get_tsi(struct lws * wsi)525 lws_get_tsi(struct lws *wsi)
526 {
527         return (int)wsi->tsi;
528 }
529 
530 int
lws_is_ssl(struct lws * wsi)531 lws_is_ssl(struct lws *wsi)
532 {
533 #if defined(LWS_WITH_TLS)
534 	return wsi->tls.use_ssl & LCCSCF_USE_SSL;
535 #else
536 	(void)wsi;
537 	return 0;
538 #endif
539 }
540 
541 #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
542 lws_tls_conn*
lws_get_ssl(struct lws * wsi)543 lws_get_ssl(struct lws *wsi)
544 {
545 	return wsi->tls.ssl;
546 }
547 #endif
548 
549 int
lws_partial_buffered(struct lws * wsi)550 lws_partial_buffered(struct lws *wsi)
551 {
552 	return lws_has_buffered_out(wsi);
553 }
554 
555 lws_fileofs_t
lws_get_peer_write_allowance(struct lws * wsi)556 lws_get_peer_write_allowance(struct lws *wsi)
557 {
558 	if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit))
559 		return -1;
560 
561 	return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit).
562 				   tx_credit(wsi, LWSTXCR_US_TO_PEER, 0);
563 }
564 
565 void
lws_role_transition(struct lws * wsi,enum lwsi_role role,enum lwsi_state state,const struct lws_role_ops * ops)566 lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
567 		    const struct lws_role_ops *ops)
568 {
569 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
570 	const char *name = "(unset)";
571 #endif
572 	wsi->wsistate = (unsigned int)role | (unsigned int)state;
573 	if (ops)
574 		wsi->role_ops = ops;
575 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
576 	if (wsi->role_ops)
577 		name = wsi->role_ops->name;
578 	lwsl_debug("%s: %s: wsistate 0x%lx, ops %s\n", __func__,
579 		   lws_wsi_tag(wsi), (unsigned long)wsi->wsistate, name);
580 #endif
581 }
582 
583 int
lws_parse_uri(char * p,const char ** prot,const char ** ads,int * port,const char ** path)584 lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
585 	      const char **path)
586 {
587 	const char *end;
588 	char unix_skt = 0;
589 
590 	/* cut up the location into address, port and path */
591 	*prot = p;
592 	while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
593 		p++;
594 	if (!*p) {
595 		end = p;
596 		p = (char *)*prot;
597 		*prot = end;
598 	} else {
599 		*p = '\0';
600 		p += 3;
601 	}
602 	if (*p == '+') /* unix skt */
603 		unix_skt = 1;
604 
605 	*ads = p;
606 	if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
607 		*port = 80;
608 	else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
609 		*port = 443;
610 
611 	if (*p == '[') {
612 		++(*ads);
613 		while (*p && *p != ']')
614 			p++;
615 		if (*p)
616 			*p++ = '\0';
617 	} else
618 		while (*p && *p != ':' && (unix_skt || *p != '/'))
619 			p++;
620 
621 	if (*p == ':') {
622 		*p++ = '\0';
623 		*port = atoi(p);
624 		while (*p && *p != '/')
625 			p++;
626 	}
627 	*path = "/";
628 	if (*p) {
629 		*p++ = '\0';
630 		if (*p)
631 			*path = p;
632 	}
633 
634 	return 0;
635 }
636 
637 /* ... */
638 
639 int
lws_get_urlarg_by_name_safe(struct lws * wsi,const char * name,char * buf,int len)640 lws_get_urlarg_by_name_safe(struct lws *wsi, const char *name, char *buf, int len)
641 {
642 	int n = 0, fraglen, sl = (int)strlen(name);
643 
644 	do {
645 		fraglen = lws_hdr_copy_fragment(wsi, buf, len,
646 						WSI_TOKEN_HTTP_URI_ARGS, n);
647 
648 		if (fraglen < 0)
649 			break;
650 
651 		if (fraglen + 1 < len &&
652 		    fraglen >= sl &&
653 		    !strncmp(buf, name, (size_t)sl)) {
654 			/*
655 			 * If he left off the trailing =, trim it from the
656 			 * result
657 			 */
658 
659 			if (name[sl - 1] != '=' &&
660 			    sl < fraglen &&
661 			    buf[sl] == '=')
662 				sl++;
663 
664 			memmove(buf, buf + sl, (size_t)(fraglen - sl));
665 			buf[fraglen - sl] = '\0';
666 
667 			return fraglen - sl;
668 		}
669 
670 		n++;
671 	} while (1);
672 
673 	return -1;
674 }
675 
676 const char *
lws_get_urlarg_by_name(struct lws * wsi,const char * name,char * buf,int len)677 lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
678 {
679 	int n = lws_get_urlarg_by_name_safe(wsi, name, buf, len);
680 
681 	return n < 0 ? NULL : buf;
682 }
683 
684 
685 #if defined(LWS_WITHOUT_EXTENSIONS)
686 
687 /* we need to provide dummy callbacks for internal exts
688  * so user code runs when faced with a lib compiled with
689  * extensions disabled.
690  */
691 
692 int
lws_extension_callback_pm_deflate(struct lws_context * context,const struct lws_extension * ext,struct lws * wsi,enum lws_extension_callback_reasons reason,void * user,void * in,size_t len)693 lws_extension_callback_pm_deflate(struct lws_context *context,
694                                   const struct lws_extension *ext,
695                                   struct lws *wsi,
696                                   enum lws_extension_callback_reasons reason,
697                                   void *user, void *in, size_t len)
698 {
699 	(void)context;
700 	(void)ext;
701 	(void)wsi;
702 	(void)reason;
703 	(void)user;
704 	(void)in;
705 	(void)len;
706 
707 	return 0;
708 }
709 
710 int
lws_set_extension_option(struct lws * wsi,const char * ext_name,const char * opt_name,const char * opt_val)711 lws_set_extension_option(struct lws *wsi, const char *ext_name,
712 			 const char *opt_name, const char *opt_val)
713 {
714 	return -1;
715 }
716 #endif
717 
718 int
lws_is_cgi(struct lws * wsi)719 lws_is_cgi(struct lws *wsi) {
720 #ifdef LWS_WITH_CGI
721 	return !!wsi->http.cgi;
722 #else
723 	return 0;
724 #endif
725 }
726 
727 const struct lws_protocol_vhost_options *
lws_pvo_search(const struct lws_protocol_vhost_options * pvo,const char * name)728 lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
729 {
730 	while (pvo) {
731 		if (!strcmp(pvo->name, name))
732 			break;
733 
734 		pvo = pvo->next;
735 	}
736 
737 	return pvo;
738 }
739 
740 int
lws_pvo_get_str(void * in,const char * name,const char ** result)741 lws_pvo_get_str(void *in, const char *name, const char **result)
742 {
743 	const struct lws_protocol_vhost_options *pv =
744 		lws_pvo_search((const struct lws_protocol_vhost_options *)in,
745 				name);
746 
747 	if (!pv)
748 		return 1;
749 
750 	*result = (const char *)pv->value;
751 
752 	return 0;
753 }
754 
755 int
lws_broadcast(struct lws_context_per_thread * pt,int reason,void * in,size_t len)756 lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, size_t len)
757 {
758 	struct lws_vhost *v = pt->context->vhost_list;
759 	lws_fakewsi_def_plwsa(pt);
760 	int n, ret = 0;
761 
762 	lws_fakewsi_prep_plwsa_ctx(pt->context);
763 #if !defined(LWS_PLAT_FREERTOS) && LWS_MAX_SMP > 1
764 	((struct lws *)plwsa)->tsi = (char)(int)(pt - &pt->context->pt[0]);
765 #endif
766 
767 	while (v) {
768 		const struct lws_protocols *p = v->protocols;
769 
770 		plwsa->vhost = v; /* not a real bound wsi */
771 
772 		for (n = 0; n < v->count_protocols; n++) {
773 			plwsa->protocol = p;
774 			if (p->callback &&
775 			    p->callback((struct lws *)plwsa, (enum lws_callback_reasons)reason, NULL, in, len))
776 				ret |= 1;
777 			p++;
778 		}
779 
780 		v = v->vhost_next;
781 	}
782 
783 	return ret;
784 }
785 
786 void *
lws_wsi_user(struct lws * wsi)787 lws_wsi_user(struct lws *wsi)
788 {
789 	return wsi->user_space;
790 }
791 
792 int
lws_wsi_tsi(struct lws * wsi)793 lws_wsi_tsi(struct lws *wsi)
794 {
795 	return wsi->tsi;
796 }
797 
798 
799 void
lws_set_wsi_user(struct lws * wsi,void * data)800 lws_set_wsi_user(struct lws *wsi, void *data)
801 {
802 	if (!wsi->user_space_externally_allocated && wsi->user_space)
803 		lws_free(wsi->user_space);
804 
805 	wsi->user_space_externally_allocated = 1;
806 	wsi->user_space = data;
807 }
808 
809 struct lws *
lws_get_parent(const struct lws * wsi)810 lws_get_parent(const struct lws *wsi)
811 {
812 	return wsi->parent;
813 }
814 
815 struct lws *
lws_get_child(const struct lws * wsi)816 lws_get_child(const struct lws *wsi)
817 {
818 	return wsi->child_list;
819 }
820 
821 void *
lws_get_opaque_parent_data(const struct lws * wsi)822 lws_get_opaque_parent_data(const struct lws *wsi)
823 {
824 	return wsi->opaque_parent_data;
825 }
826 
827 void
lws_set_opaque_parent_data(struct lws * wsi,void * data)828 lws_set_opaque_parent_data(struct lws *wsi, void *data)
829 {
830 	wsi->opaque_parent_data = data;
831 }
832 
833 void *
lws_get_opaque_user_data(const struct lws * wsi)834 lws_get_opaque_user_data(const struct lws *wsi)
835 {
836 	return wsi->a.opaque_user_data;
837 }
838 
839 void
lws_set_opaque_user_data(struct lws * wsi,void * data)840 lws_set_opaque_user_data(struct lws *wsi, void *data)
841 {
842 	wsi->a.opaque_user_data = data;
843 }
844 
845 int
lws_get_child_pending_on_writable(const struct lws * wsi)846 lws_get_child_pending_on_writable(const struct lws *wsi)
847 {
848 	return wsi->parent_pending_cb_on_writable;
849 }
850 
851 void
lws_clear_child_pending_on_writable(struct lws * wsi)852 lws_clear_child_pending_on_writable(struct lws *wsi)
853 {
854 	wsi->parent_pending_cb_on_writable = 0;
855 }
856 
857 
858 
859 const char *
lws_get_vhost_name(struct lws_vhost * vhost)860 lws_get_vhost_name(struct lws_vhost *vhost)
861 {
862 	return vhost->name;
863 }
864 
865 int
lws_get_vhost_port(struct lws_vhost * vhost)866 lws_get_vhost_port(struct lws_vhost *vhost)
867 {
868 	return vhost->listen_port;
869 }
870 
871 void *
lws_get_vhost_user(struct lws_vhost * vhost)872 lws_get_vhost_user(struct lws_vhost *vhost)
873 {
874 	return vhost->user;
875 }
876 
877 const char *
lws_get_vhost_iface(struct lws_vhost * vhost)878 lws_get_vhost_iface(struct lws_vhost *vhost)
879 {
880 	return vhost->iface;
881 }
882 
883 lws_sockfd_type
lws_get_socket_fd(struct lws * wsi)884 lws_get_socket_fd(struct lws *wsi)
885 {
886 	if (!wsi)
887 		return -1;
888 	return wsi->desc.sockfd;
889 }
890 
891 
892 struct lws_vhost *
lws_vhost_get(struct lws * wsi)893 lws_vhost_get(struct lws *wsi)
894 {
895 	return wsi->a.vhost;
896 }
897 
898 struct lws_vhost *
lws_get_vhost(struct lws * wsi)899 lws_get_vhost(struct lws *wsi)
900 {
901 	return wsi->a.vhost;
902 }
903 
904 const struct lws_protocols *
lws_protocol_get(struct lws * wsi)905 lws_protocol_get(struct lws *wsi)
906 {
907 	return wsi->a.protocol;
908 }
909 
910 #if defined(LWS_WITH_UDP)
911 const struct lws_udp *
lws_get_udp(const struct lws * wsi)912 lws_get_udp(const struct lws *wsi)
913 {
914 	return wsi->udp;
915 }
916 #endif
917 
918 struct lws_context *
lws_get_context(const struct lws * wsi)919 lws_get_context(const struct lws *wsi)
920 {
921 	return wsi->a.context;
922 }
923 
924 #if defined(LWS_WITH_CLIENT)
925 int
_lws_generic_transaction_completed_active_conn(struct lws ** _wsi,char take_vh_lock)926 _lws_generic_transaction_completed_active_conn(struct lws **_wsi, char take_vh_lock)
927 {
928 	struct lws *wnew, *wsi = *_wsi;
929 
930 	/*
931 	 * Are we constitutionally capable of having a queue, ie, we are on
932 	 * the "active client connections" list?
933 	 *
934 	 * If not, that's it for us.
935 	 */
936 
937 	if (lws_dll2_is_detached(&wsi->dll_cli_active_conns))
938 		return 0; /* no new transaction */
939 
940 	/*
941 	 * With h1 queuing, the original "active client" moves his attributes
942 	 * like fd, ssl, queue and active client list entry to the next guy in
943 	 * the queue before closing... it's because the user code knows the
944 	 * individual wsi and the action must take place in the correct wsi
945 	 * context.  Note this means we don't truly pipeline headers.
946 	 *
947 	 * Trying to keep the original "active client" in place to do the work
948 	 * of the wsi breaks down when dealing with queued POSTs otherwise; it's
949 	 * also competing with the real mux child arrangements and complicating
950 	 * the code.
951 	 *
952 	 * For that reason, see if we have any queued child now...
953 	 */
954 
955 	if (!wsi->dll2_cli_txn_queue_owner.head) {
956 		/*
957 		 * Nothing pipelined... we should hang around a bit
958 		 * in case something turns up... otherwise we'll close
959 		 */
960 		lwsl_info("%s: nothing pipelined waiting\n", __func__);
961 		lwsi_set_state(wsi, LRS_IDLING);
962 
963 		lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE,
964 				wsi->keep_warm_secs);
965 
966 		return 0; /* no new transaction right now */
967 	}
968 
969 	/*
970 	 * We have a queued child wsi we should bequeath our assets to, before
971 	 * closing ourself
972 	 */
973 
974 	if (take_vh_lock)
975 		lws_vhost_lock(wsi->a.vhost);
976 
977 	wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws,
978 				dll2_cli_txn_queue);
979 
980 	assert(wsi != wnew);
981 
982 	lws_dll2_remove(&wnew->dll2_cli_txn_queue);
983 
984 	assert(lws_socket_is_valid(wsi->desc.sockfd));
985 
986 	__lws_change_pollfd(wsi, LWS_POLLOUT | LWS_POLLIN, 0);
987 
988 	/* copy the fd */
989 	wnew->desc = wsi->desc;
990 
991 	assert(lws_socket_is_valid(wnew->desc.sockfd));
992 
993 	/* disconnect the fd from association with old wsi */
994 
995 	if (__remove_wsi_socket_from_fds(wsi))
996 		return -1;
997 
998 	sanity_assert_no_wsi_traces(wsi->a.context, wsi);
999 	sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd);
1000 	wsi->desc.sockfd = LWS_SOCK_INVALID;
1001 
1002 	__lws_wsi_remove_from_sul(wsi);
1003 
1004 	/*
1005 	 * ... we're doing some magic here in terms of handing off the socket
1006 	 * that has been active to a wsi that has not yet itself been active...
1007 	 * depending on the event lib we may need to give a magic spark to the
1008 	 * new guy and snuff out the old guy's magic spark at that level as well
1009 	 */
1010 
1011 #if defined(LWS_WITH_EVENT_LIBS)
1012 	if (wsi->a.context->event_loop_ops->destroy_wsi)
1013 		wsi->a.context->event_loop_ops->destroy_wsi(wsi);
1014 	if (wsi->a.context->event_loop_ops->sock_accept)
1015 		wsi->a.context->event_loop_ops->sock_accept(wnew);
1016 #endif
1017 
1018 	/* point the fd table entry to new guy */
1019 
1020 	assert(lws_socket_is_valid(wnew->desc.sockfd));
1021 
1022 	if (__insert_wsi_socket_into_fds(wsi->a.context, wnew))
1023 		return -1;
1024 
1025 #if defined(LWS_WITH_TLS)
1026 	/* pass on the tls */
1027 
1028 	wnew->tls = wsi->tls;
1029 	wsi->tls.client_bio = NULL;
1030 	wsi->tls.ssl = NULL;
1031 	wsi->tls.use_ssl = 0;
1032 #endif
1033 
1034 	/* take over his copy of his endpoint as an active connection */
1035 
1036 	wnew->cli_hostname_copy = wsi->cli_hostname_copy;
1037 	wsi->cli_hostname_copy = NULL;
1038 	wnew->keep_warm_secs = wsi->keep_warm_secs;
1039 
1040 	/*
1041 	 * selected queued guy now replaces the original leader on the
1042 	 * active client conn list
1043 	 */
1044 
1045 	lws_dll2_remove(&wsi->dll_cli_active_conns);
1046 	lws_dll2_add_tail(&wnew->dll_cli_active_conns,
1047 			  &wsi->a.vhost->dll_cli_active_conns_owner);
1048 
1049 	/* move any queued guys to queue on new active conn */
1050 
1051 	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
1052 				   wsi->dll2_cli_txn_queue_owner.head) {
1053 		struct lws *ww = lws_container_of(d, struct lws,
1054 					  dll2_cli_txn_queue);
1055 
1056 		lws_dll2_remove(&ww->dll2_cli_txn_queue);
1057 		lws_dll2_add_tail(&ww->dll2_cli_txn_queue,
1058 				  &wnew->dll2_cli_txn_queue_owner);
1059 
1060 	} lws_end_foreach_dll_safe(d, d1);
1061 
1062 	if (take_vh_lock)
1063 		lws_vhost_unlock(wsi->a.vhost);
1064 
1065 	/*
1066 	 * The original leader who passed on all his powers already can die...
1067 	 * in the call stack above us there are guys who still want to touch
1068 	 * him, so have him die next time around the event loop, not now.
1069 	 */
1070 
1071 	wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */
1072 	lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC);
1073 
1074 	/* after the first one, they can only be coming from the queue */
1075 	wnew->transaction_from_pipeline_queue = 1;
1076 
1077 	lwsl_notice("%s: pipeline queue passed %s on to queued %s\n",
1078 			__func__, lws_wsi_tag(wsi), lws_wsi_tag(wnew));
1079 
1080 	*_wsi = wnew; /* inform caller we swapped */
1081 
1082 	return 1; /* new transaction */
1083 }
1084 #endif
1085 
1086 int LWS_WARN_UNUSED_RESULT
lws_raw_transaction_completed(struct lws * wsi)1087 lws_raw_transaction_completed(struct lws *wsi)
1088 {
1089 	if (lws_has_buffered_out(wsi)) {
1090 		/*
1091 		 * ...so he tried to send something large, but it went out
1092 		 * as a partial, but he immediately called us to say he wants
1093 		 * to close the connection.
1094 		 *
1095 		 * Defer the close until the last part of the partial is sent.
1096 		 *
1097 		 */
1098 		lwsl_debug("%s: %s: deferring due to partial\n", __func__,
1099 				lws_wsi_tag(wsi));
1100 		wsi->close_when_buffered_out_drained = 1;
1101 		lws_callback_on_writable(wsi);
1102 
1103 		return 0;
1104 	}
1105 
1106 	return -1;
1107 }
1108 
1109 int
lws_bind_protocol(struct lws * wsi,const struct lws_protocols * p,const char * reason)1110 lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p,
1111 		  const char *reason)
1112 {
1113 //	if (wsi->a.protocol == p)
1114 //		return 0;
1115 	const struct lws_protocols *vp = wsi->a.vhost->protocols, *vpo;
1116 
1117 	if (wsi->a.protocol && wsi->protocol_bind_balance) {
1118 		wsi->a.protocol->callback(wsi,
1119 		       wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)],
1120 					wsi->user_space, (void *)reason, 0);
1121 		wsi->protocol_bind_balance = 0;
1122 	}
1123 	if (!wsi->user_space_externally_allocated)
1124 		lws_free_set_NULL(wsi->user_space);
1125 
1126 	lws_same_vh_protocol_remove(wsi);
1127 
1128 	wsi->a.protocol = p;
1129 	if (!p)
1130 		return 0;
1131 
1132 	if (lws_ensure_user_space(wsi))
1133 		return 1;
1134 
1135 	if (p > vp && p < &vp[wsi->a.vhost->count_protocols])
1136 		lws_same_vh_protocol_insert(wsi, (int)(p - vp));
1137 	else {
1138 		int n = wsi->a.vhost->count_protocols;
1139 		int hit = 0;
1140 
1141 		vpo = vp;
1142 
1143 		while (n--) {
1144 			if (p->name && vp->name && !strcmp(p->name, vp->name)) {
1145 				hit = 1;
1146 				lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
1147 				break;
1148 			}
1149 			vp++;
1150 		}
1151 		if (!hit)
1152 			lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
1153 				 __func__, p, wsi->a.vhost->name);
1154 	}
1155 
1156 	if (wsi->a.protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[
1157 				    !!lwsi_role_server(wsi)],
1158 				    wsi->user_space, NULL, 0))
1159 		return 1;
1160 
1161 	wsi->protocol_bind_balance = 1;
1162 
1163 	return 0;
1164 }
1165 
1166 void
lws_http_close_immortal(struct lws * wsi)1167 lws_http_close_immortal(struct lws *wsi)
1168 {
1169 	struct lws *nwsi;
1170 
1171 	if (!wsi->mux_substream)
1172 		return;
1173 
1174 	assert(wsi->mux_stream_immortal);
1175 	wsi->mux_stream_immortal = 0;
1176 
1177 	nwsi = lws_get_network_wsi(wsi);
1178 	lwsl_debug("%s: %s %s %d\n", __func__, lws_wsi_tag(wsi), lws_wsi_tag(nwsi),
1179 				     nwsi->immortal_substream_count);
1180 	assert(nwsi->immortal_substream_count);
1181 	nwsi->immortal_substream_count--;
1182 	if (!nwsi->immortal_substream_count)
1183 		/*
1184 		 * since we closed the only immortal stream on this nwsi, we
1185 		 * need to reapply a normal timeout regime to the nwsi
1186 		 */
1187 		lws_set_timeout(nwsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE,
1188 				wsi->a.vhost->keepalive_timeout ?
1189 				    wsi->a.vhost->keepalive_timeout : 31);
1190 }
1191 
1192 void
lws_mux_mark_immortal(struct lws * wsi)1193 lws_mux_mark_immortal(struct lws *wsi)
1194 {
1195 	struct lws *nwsi;
1196 
1197 	lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
1198 
1199 	if (!wsi->mux_substream
1200 #if defined(LWS_WITH_CLIENT)
1201 			&& !wsi->client_mux_substream
1202 #endif
1203 	) {
1204 		lwsl_err("%s: not h2 substream\n", __func__);
1205 		return;
1206 	}
1207 
1208 	nwsi = lws_get_network_wsi(wsi);
1209 	if (!nwsi)
1210 		return;
1211 
1212 	lwsl_debug("%s: %s %s %d\n", __func__, lws_wsi_tag(wsi), lws_wsi_tag(nwsi),
1213 				     nwsi->immortal_substream_count);
1214 
1215 	wsi->mux_stream_immortal = 1;
1216 	assert(nwsi->immortal_substream_count < 255); /* largest count */
1217 	nwsi->immortal_substream_count++;
1218 	if (nwsi->immortal_substream_count == 1)
1219 		lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0);
1220 }
1221 
1222 int
lws_http_mark_sse(struct lws * wsi)1223 lws_http_mark_sse(struct lws *wsi)
1224 {
1225 	if (!wsi)
1226 		return 0;
1227 
1228 	lws_http_headers_detach(wsi);
1229 	lws_mux_mark_immortal(wsi);
1230 
1231 	if (wsi->mux_substream)
1232 		wsi->h2_stream_carries_sse = 1;
1233 
1234 	return 0;
1235 }
1236 
1237 #if defined(LWS_WITH_CLIENT)
1238 
1239 const char *
lws_wsi_client_stash_item(struct lws * wsi,int stash_idx,int hdr_idx)1240 lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, int hdr_idx)
1241 {
1242 	/* try the generic client stash */
1243 	if (wsi->stash)
1244 		return wsi->stash->cis[stash_idx];
1245 
1246 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
1247 	/* if not, use the ah stash if applicable */
1248 	return lws_hdr_simple_ptr(wsi, (enum lws_token_indexes)hdr_idx);
1249 #else
1250 	return NULL;
1251 #endif
1252 }
1253 #endif
1254 
1255 #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT)
1256 
1257 void
lws_wsi_mux_insert(struct lws * wsi,struct lws * parent_wsi,unsigned int sid)1258 lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, unsigned int sid)
1259 {
1260 	lwsl_info("%s: %s, par %s: assign sid %d (curr %d)\n", __func__,
1261 		  lws_wsi_tag(wsi), lws_wsi_tag(parent_wsi), sid, wsi->mux.my_sid);
1262 
1263 	if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid)
1264 		assert(0);
1265 
1266 	wsi->mux.my_sid = sid;
1267 	wsi->mux.parent_wsi = parent_wsi;
1268 	wsi->role_ops = parent_wsi->role_ops;
1269 
1270 	/* new guy's sibling is whoever was the first child before */
1271 	wsi->mux.sibling_list = parent_wsi->mux.child_list;
1272 
1273 	/* first child is now the new guy */
1274 	parent_wsi->mux.child_list = wsi;
1275 
1276 	parent_wsi->mux.child_count++;
1277 }
1278 
1279 struct lws *
lws_wsi_mux_from_id(struct lws * parent_wsi,unsigned int sid)1280 lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid)
1281 {
1282 	lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) {
1283 		if (wsi->mux.my_sid == sid)
1284 			return wsi;
1285 	} lws_end_foreach_ll(wsi, mux.sibling_list);
1286 
1287 	return NULL;
1288 }
1289 
1290 void
lws_wsi_mux_dump_children(struct lws * wsi)1291 lws_wsi_mux_dump_children(struct lws *wsi)
1292 {
1293 #if defined(_DEBUG)
1294 	if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO))
1295 		return;
1296 
1297 	lws_start_foreach_llp(struct lws **, w,
1298 			      wsi->mux.parent_wsi->mux.child_list) {
1299 		lwsl_info("   \\---- child %s %s\n",
1300 			  (*w)->role_ops ? (*w)->role_ops->name : "?",
1301 					  lws_wsi_tag(*w));
1302 		assert(*w != (*w)->mux.sibling_list);
1303 	} lws_end_foreach_llp(w, mux.sibling_list);
1304 #endif
1305 }
1306 
1307 void
lws_wsi_mux_close_children(struct lws * wsi,int reason)1308 lws_wsi_mux_close_children(struct lws *wsi, int reason)
1309 {
1310 	struct lws *wsi2;
1311 	struct lws **w;
1312 
1313 	if (!wsi->mux.child_list)
1314 		return;
1315 
1316 	w = &wsi->mux.child_list;
1317 	while (*w) {
1318 		lwsl_info("   closing child %s\n", lws_wsi_tag(*w));
1319 		/* disconnect from siblings */
1320 		wsi2 = (*w)->mux.sibling_list;
1321 		assert (wsi2 != *w);
1322 		(*w)->mux.sibling_list = NULL;
1323 		(*w)->socket_is_permanently_unusable = 1;
1324 		__lws_close_free_wsi(*w, (enum lws_close_status)reason, "mux child recurse");
1325 		*w = wsi2;
1326 	}
1327 }
1328 
1329 
1330 void
lws_wsi_mux_sibling_disconnect(struct lws * wsi)1331 lws_wsi_mux_sibling_disconnect(struct lws *wsi)
1332 {
1333 	struct lws *wsi2;
1334 
1335 	lws_start_foreach_llp(struct lws **, w,
1336 			      wsi->mux.parent_wsi->mux.child_list) {
1337 
1338 		/* disconnect from siblings */
1339 		if (*w == wsi) {
1340 			wsi2 = (*w)->mux.sibling_list;
1341 			(*w)->mux.sibling_list = NULL;
1342 			*w = wsi2;
1343 			lwsl_debug("  %s disentangled from sibling %s\n",
1344 				  lws_wsi_tag(wsi), lws_wsi_tag(wsi2));
1345 			break;
1346 		}
1347 	} lws_end_foreach_llp(w, mux.sibling_list);
1348 	wsi->mux.parent_wsi->mux.child_count--;
1349 
1350 	wsi->mux.parent_wsi = NULL;
1351 }
1352 
1353 void
lws_wsi_mux_dump_waiting_children(struct lws * wsi)1354 lws_wsi_mux_dump_waiting_children(struct lws *wsi)
1355 {
1356 #if defined(_DEBUG)
1357 	lwsl_info("%s: %s: children waiting for POLLOUT service:\n",
1358 		  __func__, lws_wsi_tag(wsi));
1359 
1360 	wsi = wsi->mux.child_list;
1361 	while (wsi) {
1362 		lwsl_info("  %c %s: sid %u: 0x%x %s %s\n",
1363 			  wsi->mux.requested_POLLOUT ? '*' : ' ',
1364 			  lws_wsi_tag(wsi), wsi->mux.my_sid, lwsi_state(wsi),
1365 			  wsi->role_ops->name,
1366 			  wsi->a.protocol ? wsi->a.protocol->name : "noprotocol");
1367 
1368 		wsi = wsi->mux.sibling_list;
1369 	}
1370 #endif
1371 }
1372 
1373 int
lws_wsi_mux_mark_parents_needing_writeable(struct lws * wsi)1374 lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi)
1375 {
1376 	struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2;
1377 	//int already = network_wsi->mux.requested_POLLOUT;
1378 
1379 	/* mark everybody above him as requesting pollout */
1380 
1381 	wsi2 = wsi;
1382 	while (wsi2) {
1383 		wsi2->mux.requested_POLLOUT = 1;
1384 		lwsl_info("%s: mark: %s, sid %u, pending writable\n",
1385 			  __func__, lws_wsi_tag(wsi2), wsi2->mux.my_sid);
1386 		wsi2 = wsi2->mux.parent_wsi;
1387 	}
1388 
1389 	return 0; // already;
1390 }
1391 
1392 struct lws *
lws_wsi_mux_move_child_to_tail(struct lws ** wsi2)1393 lws_wsi_mux_move_child_to_tail(struct lws **wsi2)
1394 {
1395 	struct lws *w = *wsi2;
1396 
1397 	while (w) {
1398 		if (!w->mux.sibling_list) { /* w is the current last */
1399 			lwsl_debug("w=%s, *wsi2 = %s\n", lws_wsi_tag(w),
1400 					lws_wsi_tag(*wsi2));
1401 
1402 			if (w == *wsi2) /* we are already last */
1403 				break;
1404 
1405 			/* last points to us as new last */
1406 			w->mux.sibling_list = *wsi2;
1407 
1408 			/* guy pointing to us until now points to
1409 			 * our old next */
1410 			*wsi2 = (*wsi2)->mux.sibling_list;
1411 
1412 			/* we point to nothing because we are last */
1413 			w->mux.sibling_list->mux.sibling_list = NULL;
1414 
1415 			/* w becomes us */
1416 			w = w->mux.sibling_list;
1417 			break;
1418 		}
1419 		w = w->mux.sibling_list;
1420 	}
1421 
1422 	/* clear the waiting for POLLOUT on the guy that was chosen */
1423 
1424 	if (w)
1425 		w->mux.requested_POLLOUT = 0;
1426 
1427 	return w;
1428 }
1429 
1430 int
lws_wsi_mux_action_pending_writeable_reqs(struct lws * wsi)1431 lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi)
1432 {
1433 	struct lws *w = wsi->mux.child_list;
1434 
1435 	while (w) {
1436 		if (w->mux.requested_POLLOUT) {
1437 			if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
1438 				return -1;
1439 			return 0;
1440 		}
1441 		w = w->mux.sibling_list;
1442 	}
1443 
1444 	if (lws_change_pollfd(wsi, LWS_POLLOUT, 0))
1445 		return -1;
1446 
1447 	return 0;
1448 }
1449 
1450 int
lws_wsi_txc_check_skint(struct lws_tx_credit * txc,int32_t tx_cr)1451 lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr)
1452 {
1453 	if (txc->tx_cr <= 0) {
1454 		/*
1455 		 * If other side is not able to cope with us sending any DATA
1456 		 * so no matter if we have POLLOUT on our side if it's DATA we
1457 		 * want to send.
1458 		 */
1459 
1460 		if (!txc->skint)
1461 			lwsl_info("%s: %p: skint (%d)\n", __func__, txc,
1462 				  (int)txc->tx_cr);
1463 
1464 		txc->skint = 1;
1465 
1466 		return 1;
1467 	}
1468 
1469 	if (txc->skint)
1470 		lwsl_info("%s: %p: unskint (%d)\n", __func__, txc,
1471 			  (int)txc->tx_cr);
1472 
1473 	txc->skint = 0;
1474 
1475 	return 0;
1476 }
1477 
1478 #if defined(_DEBUG)
1479 void
lws_wsi_txc_describe(struct lws_tx_credit * txc,const char * at,uint32_t sid)1480 lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, uint32_t sid)
1481 {
1482 	lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n",
1483 		  __func__, txc, at, (int)sid, txc->skint ? "SKINT, " : "",
1484 		  (int)txc->peer_tx_cr_est, (int)txc->tx_cr);
1485 }
1486 #endif
1487 
1488 int
lws_wsi_tx_credit(struct lws * wsi,char peer_to_us,int add)1489 lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add)
1490 {
1491 	if (wsi->role_ops && lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit))
1492 		return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit).
1493 				   tx_credit(wsi, peer_to_us, add);
1494 
1495 	return 0;
1496 }
1497 
1498 /*
1499  * Let the protocol know about incoming tx credit window updates if it's
1500  * managing the flow control manually (it may want to proxy this information)
1501  */
1502 
1503 int
lws_wsi_txc_report_manual_txcr_in(struct lws * wsi,int32_t bump)1504 lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump)
1505 {
1506 	if (!wsi->txc.manual)
1507 		/*
1508 		 * If we don't care about managing it manually, no need to
1509 		 * report it
1510 		 */
1511 		return 0;
1512 
1513 	return user_callback_handle_rxflow(wsi->a.protocol->callback,
1514 					   wsi, LWS_CALLBACK_WSI_TX_CREDIT_GET,
1515 					   wsi->user_space, NULL, (size_t)bump);
1516 }
1517 
1518 #if defined(LWS_WITH_CLIENT)
1519 
1520 int
lws_wsi_mux_apply_queue(struct lws * wsi)1521 lws_wsi_mux_apply_queue(struct lws *wsi)
1522 {
1523 	/* we have a transaction queue that wants to pipeline */
1524 
1525 	lws_context_lock(wsi->a.context, __func__); /* -------------- cx { */
1526 	lws_vhost_lock(wsi->a.vhost);
1527 
1528 	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
1529 				   wsi->dll2_cli_txn_queue_owner.head) {
1530 		struct lws *w = lws_container_of(d, struct lws,
1531 						 dll2_cli_txn_queue);
1532 
1533 #if defined(LWS_ROLE_H2)
1534 		if (lwsi_role_http(wsi) &&
1535 		    lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) {
1536 			lwsl_info("%s: cli pipeq %s to be h2\n", __func__,
1537 					lws_wsi_tag(w));
1538 
1539 			lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2);
1540 
1541 			/* remove ourselves from client queue */
1542 			lws_dll2_remove(&w->dll2_cli_txn_queue);
1543 
1544 			/* attach ourselves as an h2 stream */
1545 			lws_wsi_h2_adopt(wsi, w);
1546 		}
1547 #endif
1548 
1549 #if defined(LWS_ROLE_MQTT)
1550 		if (lwsi_role_mqtt(wsi) &&
1551 		    lwsi_state(wsi) == LRS_ESTABLISHED) {
1552 			lwsl_info("%s: cli pipeq %s to be mqtt\n", __func__,
1553 					lws_wsi_tag(w));
1554 
1555 			/* remove ourselves from client queue */
1556 			lws_dll2_remove(&w->dll2_cli_txn_queue);
1557 
1558 			/* attach ourselves as an h2 stream */
1559 			lws_wsi_mqtt_adopt(wsi, w);
1560 		}
1561 #endif
1562 
1563 	} lws_end_foreach_dll_safe(d, d1);
1564 
1565 	lws_vhost_unlock(wsi->a.vhost);
1566 	lws_context_unlock(wsi->a.context); /* } cx --------------  */
1567 
1568 	return 0;
1569 }
1570 
1571 #endif
1572 
1573 #endif
1574