1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  *
25  * When the user code is in a different process, a non-tls unix domain socket
26  * proxy is used to asynchronusly transfer buffers in each direction via the
27  * network stack, without explicit IPC
28  *
29  *     user_process{ [user code] | shim | socket-}------ lws_process{ lws }
30  *
31  * Lws exposes a listening unix domain socket in this case, the user processes
32  * connect to it and pass just info.streamtype in an initial tx packet.  All
33  * packets are prepended by a 1-byte type field when used in this mode.  See
34  * lws-secure-streams.h for documentation and definitions.
35  *
36  * Proxying in either direction can face the situation it cannot send the onward
37  * packet immediately and is subject to separating the write request from the
38  * write action.  To make the best use of memory, a single preallocated buffer
39  * stashes pending packets in all four directions (c->p, p->c, p->ss, ss->p).
40  * This allows it to adapt to different traffic patterns without wasted areas
41  * dedicated to traffic that isn't coming in a particular application.
42  *
43  * A shim is provided to monitor the process' unix domain socket and regenerate
44  * the secure sockets api there with callbacks happening in the process thread
45  * context.
46  *
47  * This file implements the listening unix domain socket proxy... this code is
48  * only going to run on a Linux-class device with its implications about memory
49  * availability.
50  */
51 
52 #include <private-lib-core.h>
53 
54 struct raw_pss {
55 	struct conn		*conn;
56 };
57 
58 /*
59  * Proxy - onward secure-stream handler
60  */
61 
62 typedef struct ss_proxy_onward {
63 	lws_ss_handle_t 	*ss;
64 	struct conn		*conn;
65 } ss_proxy_t;
66 
67 void
lws_proxy_clean_conn_ss(struct lws * wsi)68 lws_proxy_clean_conn_ss(struct lws *wsi)
69 {
70 #if 0
71 	lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
72 	struct conn *conn = h->conn_if_sspc_onw;
73 
74 	if (!wsi)
75 		return;
76 
77 	if (conn && conn->ss)
78 		conn->ss->wsi = NULL;
79 #endif
80 }
81 
82 
83 void
ss_proxy_onward_link_req_writeable(lws_ss_handle_t * h_onward)84 ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward)
85 {
86 	ss_proxy_t *m = (ss_proxy_t *)&h_onward[1];
87 
88 	if (m->conn->wsi) /* if possible, request client conn write */
89 		lws_callback_on_writable(m->conn->wsi);
90 }
91 
92 int
__lws_ss_proxy_bind_ss_to_conn_wsi(void * parconn,size_t dsh_size)93 __lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
94 {
95 	struct conn *conn = (struct conn *)parconn;
96 	struct lws_context_per_thread *pt;
97 
98 	if (!conn || !conn->wsi || !conn->ss)
99 		return -1;
100 
101 	pt = &conn->wsi->a.context->pt[(int)conn->wsi->tsi];
102 
103 	if (lws_fi(&conn->ss->fic, "ssproxy_dsh_create_oom"))
104 		return -1;
105 	conn->dsh = lws_dsh_create(&pt->ss_dsh_owner, dsh_size, 2);
106 	if (!conn->dsh)
107 		return -1;
108 
109 	__lws_lc_tag_append(&conn->wsi->lc, lws_ss_tag(conn->ss));
110 
111 	return 0;
112 }
113 
114 /* Onward secure streams payload interface */
115 
116 static lws_ss_state_return_t
ss_proxy_onward_rx(void * userobj,const uint8_t * buf,size_t len,int flags)117 ss_proxy_onward_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
118 {
119 	ss_proxy_t *m = (ss_proxy_t *)userobj;
120 	const char *rsp = NULL;
121 	int n;
122 
123 	// lwsl_notice("%s: len %d\n", __func__, (int)len);
124 
125 	/*
126 	 * The onward secure stream connection has received something.
127 	 */
128 
129 	if (m->ss->rideshare != m->ss->policy && m->ss->rideshare) {
130 		rsp = m->ss->rideshare->streamtype;
131 		flags |= LWSSS_FLAG_RIDESHARE;
132 	}
133 
134 	/*
135 	 * Apply SSS framing around this chunk of RX and stash it in the dsh
136 	 * in ss -> proxy [ -> client] direction.  This can fail...
137 	 */
138 
139 	if (lws_fi(&m->ss->fic, "ssproxy_dsh_rx_queue_oom"))
140 		n = 1;
141 	else
142 		n = lws_ss_serialize_rx_payload(m->conn->dsh, buf, len,
143 						flags, rsp);
144 	if (n)
145 		/*
146 		 * We couldn't buffer this rx, eg due to OOM, let's escalate it
147 		 * to be a "loss of connection", which it basically is...
148 		 */
149 		return LWSSSSRET_DISCONNECT_ME;
150 
151 	/*
152 	 * Manage rx flow on the SS (onward) side according to our situation
153 	 * in the dsh holding proxy->client serialized forwarding rx
154 	 */
155 
156 	if (!m->conn->onward_in_flow_control && m->ss->wsi &&
157 	    m->ss->policy->proxy_buflen_rxflow_on_above &&
158 	    lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P) >=
159 				m->ss->policy->proxy_buflen_rxflow_on_above) {
160 		lwsl_info("%s: %s: rxflow disabling rx (%lu / %lu, hwm %lu)\n", __func__,
161 				lws_wsi_tag(m->ss->wsi),
162 				(unsigned long)lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P),
163 				(unsigned long)m->ss->policy->proxy_buflen,
164 				(unsigned long)m->ss->policy->proxy_buflen_rxflow_on_above);
165 		/*
166 		 * stop taking in rx once the onward wsi rx is above the
167 		 * high water mark
168 		 */
169 		lws_rx_flow_control(m->ss->wsi, 0);
170 		m->conn->onward_in_flow_control = 1;
171 	}
172 
173 	if (m->conn->wsi) /* if possible, request client conn write */
174 		lws_callback_on_writable(m->conn->wsi);
175 
176 	return LWSSSSRET_OK;
177 }
178 
179 /*
180  * we are transmitting buffered payload originally from the client on to the ss
181  */
182 
183 static lws_ss_state_return_t
ss_proxy_onward_tx(void * userobj,lws_ss_tx_ordinal_t ord,uint8_t * buf,size_t * len,int * flags)184 ss_proxy_onward_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf,
185 		   size_t *len, int *flags)
186 {
187 	ss_proxy_t *m = (ss_proxy_t *)userobj;
188 	void *p;
189 	size_t si;
190 
191 	if (!m->conn->ss || m->conn->state != LPCSPROX_OPERATIONAL) {
192 		lwsl_notice("%s: ss not ready\n", __func__);
193 		*len = 0;
194 
195 		return LWSSSSRET_TX_DONT_SEND;
196 	}
197 
198 	/*
199 	 * The onward secure stream says that we could send something to it
200 	 * (by putting it in buf, and setting *len and *flags)... dredge the
201 	 * next thing out of the dsh
202 	 */
203 
204 	if (lws_ss_deserialize_tx_payload(m->conn->dsh, m->ss->wsi,
205 					  ord, buf, len, flags))
206 		return LWSSSSRET_TX_DONT_SEND;
207 
208 	/* ... there's more we want to send? */
209 	if (!lws_dsh_get_head(m->conn->dsh, KIND_C_TO_P, (void **)&p, &si))
210 		_lws_ss_request_tx(m->conn->ss);
211 
212 	if (!*len && !*flags)
213 		/* we don't actually want to send anything */
214 		return LWSSSSRET_TX_DONT_SEND;
215 
216 	lwsl_info("%s: onward tx %d fl 0x%x\n", __func__, (int)*len, *flags);
217 
218 #if 0
219 	{
220 		int ff = open("/tmp/z", O_RDWR | O_CREAT | O_APPEND, 0666);
221 		if (ff == -1)
222 			lwsl_err("%s: errno %d\n", __func__, errno);
223 		write(ff, buf, *len);
224 		close(ff);
225 	}
226 #endif
227 
228 	return LWSSSSRET_OK;
229 }
230 
231 static lws_ss_state_return_t
ss_proxy_onward_state(void * userobj,void * sh,lws_ss_constate_t state,lws_ss_tx_ordinal_t ack)232 ss_proxy_onward_state(void *userobj, void *sh,
233 		      lws_ss_constate_t state, lws_ss_tx_ordinal_t ack)
234 {
235 	ss_proxy_t *m = (ss_proxy_t *)userobj;
236 	size_t dsh_size;
237 
238 	switch (state) {
239 	case LWSSSCS_CREATING:
240 
241 		/*
242 		 * conn is private to -process.c, call thru to a) adjust
243 		 * the accepted incoming proxy link wsi tag name to be
244 		 * appended with the onward ss tag information now we
245 		 * have it, and b) allocate the dsh buffer now we
246 		 * can find out the policy about it for the streamtype.
247 		 */
248 
249 		dsh_size = m->ss->policy->proxy_buflen ?
250 				m->ss->policy->proxy_buflen : 32768;
251 
252 		lwsl_notice("%s: %s: initializing dsh max len %lu\n",
253 				__func__, lws_ss_tag(m->ss),
254 				(unsigned long)dsh_size);
255 
256 		/* this includes ssproxy_dsh_create_oom fault generation */
257 
258 		if (__lws_ss_proxy_bind_ss_to_conn_wsi(m->conn, dsh_size)) {
259 
260 			/* failed to allocate the dsh */
261 
262 			lwsl_notice("%s: dsh init failed\n", __func__);
263 
264 			return LWSSSSRET_DESTROY_ME;
265 		}
266 		break;
267 
268 	case LWSSSCS_DESTROYING:
269 		if (!m->conn)
270 			break;
271 		if (!m->conn->wsi) {
272 			/*
273 			 * Our onward secure stream is closing and our client
274 			 * connection has already gone away... destroy the conn.
275 			 */
276 			lwsl_info("%s: Destroying conn\n", __func__);
277 			lws_dsh_destroy(&m->conn->dsh);
278 			free(m->conn);
279 			m->conn = NULL;
280 			return 0;
281 		} else
282 			lwsl_info("%s: ss DESTROYING, wsi up\n", __func__);
283 		break;
284 
285 	default:
286 		break;
287 	}
288 	if (!m->conn) {
289 		lwsl_warn("%s: dropping state due to conn not up\n", __func__);
290 
291 		return LWSSSSRET_OK;
292 	}
293 
294 	if (lws_ss_serialize_state(m->conn->wsi, m->conn->dsh, state, ack))
295 		/*
296 		 * Failed to alloc state packet that we want to send in dsh,
297 		 * we will lose coherence and have to disconnect the link
298 		 */
299 		return LWSSSSRET_DISCONNECT_ME;
300 
301 	if (m->conn->wsi) /* if possible, request client conn write */
302 		lws_callback_on_writable(m->conn->wsi);
303 
304 	return LWSSSSRET_OK;
305 }
306 
307 void
ss_proxy_onward_txcr(void * userobj,int bump)308 ss_proxy_onward_txcr(void *userobj, int bump)
309 {
310 	ss_proxy_t *m = (ss_proxy_t *)userobj;
311 
312 	if (!m->conn)
313 		return;
314 
315 	lws_ss_serialize_txcr(m->conn->dsh, bump);
316 
317 	if (m->conn->wsi) /* if possible, request client conn write */
318 		lws_callback_on_writable(m->conn->wsi);
319 }
320 
321 /*
322  * Client <-> Proxy connection, usually on Unix Domain Socket
323  */
324 
325 static int
callback_ss_proxy(struct lws * wsi,enum lws_callback_reasons reason,void * user,void * in,size_t len)326 callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
327 		  void *user, void *in, size_t len)
328 {
329 	struct raw_pss *pss = (struct raw_pss *)user;
330 	const lws_ss_policy_t *rsp;
331 	struct conn *conn = NULL;
332 	lws_ss_metadata_t *md;
333 	lws_ss_info_t ssi;
334 	const uint8_t *cp;
335 	char s[512];
336 	uint8_t *p;
337 	size_t si;
338 	char pay;
339 	int n;
340 
341 	if (pss)
342 		conn = pss->conn;
343 
344 	switch (reason) {
345 	case LWS_CALLBACK_PROTOCOL_INIT:
346 		break;
347 
348 	case LWS_CALLBACK_PROTOCOL_DESTROY:
349 		break;
350 
351 	/* callbacks related to raw socket descriptor "accepted side" */
352 
353         case LWS_CALLBACK_RAW_ADOPT:
354 		lwsl_info("LWS_CALLBACK_RAW_ADOPT\n");
355 		if (!pss)
356 			return -1;
357 
358 		if (lws_fi(&wsi->fic, "ssproxy_client_adopt_oom"))
359 			pss->conn = NULL;
360 		else
361 			pss->conn = malloc(sizeof(struct conn));
362 		if (!pss->conn)
363 			return -1;
364 
365 		memset(pss->conn, 0, sizeof(*pss->conn));
366 
367 		/* dsh is allocated when the onward ss is done */
368 
369 		pss->conn->wsi = wsi;
370 		wsi->bound_ss_proxy_conn = 1; /* opaque is conn */
371 
372 		pss->conn->state = LPCSPROX_WAIT_INITIAL_TX;
373 
374 		/*
375 		 * Client is expected to follow the unix domain socket
376 		 * acceptance up rapidly with an initial tx containing the
377 		 * streamtype name.  We can't create the stream until then.
378 		 */
379 		lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_CLIENT_HS_SEND, 3);
380                 break;
381 
382 	case LWS_CALLBACK_RAW_CLOSE:
383 		lwsl_info("LWS_CALLBACK_RAW_CLOSE:\n");
384 
385 		if (!conn)
386 			break;
387 
388 		/*
389 		 * the client unix domain socket connection (wsi / conn->wsi)
390 		 * has closed... eg, client has exited or otherwise has
391 		 * definitively finished with the proxying and onward connection
392 		 *
393 		 * But right now, the SS and possibly the SS onward wsi are
394 		 * still live...
395 		 */
396 
397 		assert(conn->wsi == wsi);
398 		conn->wsi = NULL;
399 
400 		lwsl_notice("%s: cli->prox link %s closing\n", __func__,
401 				lws_wsi_tag(wsi));
402 
403 		/* sever relationship with conn */
404 		lws_set_opaque_user_data(wsi, NULL);
405 
406 		/*
407 		 * The current wsi is decoupled from the pss / conn and
408 		 * the conn no longer has a pointer on it.
409 		 *
410 		 * If there's an outgoing, proxied SS conn on our behalf, we
411 		 * have to destroy those
412 		 */
413 
414 		if (conn->ss) {
415 			struct lws *cw = conn->ss->wsi;
416 			/*
417 			 * conn->ss is the onward connection SS
418 			 */
419 
420 			lwsl_info("%s: destroying %s, wsi %s\n",
421 					__func__, lws_ss_tag(conn->ss),
422 					lws_wsi_tag(conn->ss->wsi));
423 
424 			/* sever conn relationship with ss about to be deleted */
425 
426 			conn->ss->wsi = NULL;
427 
428 			if (cw && wsi != cw) {
429 
430 				/* disconnect onward SS from its wsi */
431 
432 				lws_set_opaque_user_data(cw, NULL);
433 
434 				/*
435 				 * The wsi doing the onward connection can no
436 				 * longer relate to the conn... otherwise when
437 				 * he gets callbacks he wants to bind to
438 				 * the ss we are about to delete
439 				 */
440 				lws_wsi_close(cw, LWS_TO_KILL_ASYNC);
441 			}
442 
443 			lws_ss_destroy(&conn->ss);
444 			/*
445 			 * Conn may have gone, at ss destroy handler in
446 			 * ssi.state for proxied ss
447 			 */
448 			break;
449 		}
450 
451 		if (conn->state == LPCSPROX_DESTROYED || !conn->ss) {
452 			/*
453 			 * There's no onward secure stream and our client
454 			 * connection is closing.  Destroy the conn.
455 			 */
456 			lws_dsh_destroy(&conn->dsh);
457 			free(conn);
458 			pss->conn = NULL;
459 		} else
460 			lwsl_debug("%s: CLOSE; %s\n", __func__, lws_ss_tag(conn->ss));
461 
462 		break;
463 
464 	case LWS_CALLBACK_RAW_RX:
465 		/*
466 		 * ie, the proxy is receiving something from a client
467 		 */
468 		lwsl_info("%s: RX: rx %d\n", __func__, (int)len);
469 
470 		if (!conn || !conn->wsi) {
471 			lwsl_err("%s: rx with bad conn state\n", __func__);
472 
473 			return -1;
474 		}
475 
476 		// lwsl_hexdump_info(in, len);
477 
478 		if (conn->state == LPCSPROX_WAIT_INITIAL_TX) {
479 			memset(&ssi, 0, sizeof(ssi));
480 			ssi.user_alloc = sizeof(ss_proxy_t);
481 			ssi.handle_offset = offsetof(ss_proxy_t, ss);
482 			ssi.opaque_user_data_offset =
483 					offsetof(ss_proxy_t, conn);
484 			ssi.rx = ss_proxy_onward_rx;
485 			ssi.tx = ss_proxy_onward_tx;
486 		}
487 		ssi.state = ss_proxy_onward_state;
488 		ssi.flags = 0;
489 
490 		n = lws_ss_deserialize_parse(&conn->parser,
491 				lws_get_context(wsi), conn->dsh, in, len,
492 				&conn->state, conn, &conn->ss, &ssi, 0);
493 		switch (n) {
494 		case LWSSSSRET_OK:
495 			break;
496 		case LWSSSSRET_DISCONNECT_ME:
497 			return -1;
498 		case LWSSSSRET_DESTROY_ME:
499 			if (conn->ss)
500 				lws_ss_destroy(&conn->ss);
501 			return -1;
502 		}
503 
504 		if (conn->state == LPCSPROX_REPORTING_FAIL ||
505 		    conn->state == LPCSPROX_REPORTING_OK)
506 			lws_callback_on_writable(conn->wsi);
507 
508 		break;
509 
510 	case LWS_CALLBACK_RAW_WRITEABLE:
511 
512 		lwsl_debug("%s: %s: LWS_CALLBACK_RAW_WRITEABLE, state 0x%x\n",
513 				__func__, lws_wsi_tag(wsi), lwsi_state(wsi));
514 
515 		/*
516 		 * We can transmit something back to the client from the dsh
517 		 * of stuff we received on its behalf from the ss
518 		 */
519 
520 		if (!conn || !conn->wsi)
521 			break;
522 
523 		n = 0;
524 		pay = 0;
525 
526 		s[3] = 0;
527 		cp = (const uint8_t *)s;
528 		switch (conn->state) {
529 		case LPCSPROX_REPORTING_FAIL:
530 			s[3] = 1;
531 			/* fallthru */
532 		case LPCSPROX_REPORTING_OK:
533 			s[0] = LWSSS_SER_RXPRE_CREATE_RESULT;
534 			s[1] = 0;
535 			s[2] = 1;
536 
537 			n = 8;
538 
539 			lws_ser_wu32be((uint8_t *)&s[4], conn->ss &&
540 							 conn->ss->policy ?
541 					conn->ss->policy->client_buflen : 0);
542 
543 			/*
544 			 * If there's rideshare sequencing, it's added after the
545 			 * first 4 bytes or the create result, comma-separated
546 			 */
547 
548 			if (conn->ss) {
549 				rsp = conn->ss->policy;
550 
551 				while (rsp) {
552 					if (n != 4 && n < (int)sizeof(s) - 2)
553 						s[n++] = ',';
554 					n += lws_snprintf(&s[n], sizeof(s) - (unsigned int)n,
555 							"%s", rsp->streamtype);
556 					rsp = lws_ss_policy_lookup(wsi->a.context,
557 						rsp->rideshare_streamtype);
558 				}
559 			}
560 			s[2] = (char)(n - 3);
561 			conn->state = LPCSPROX_OPERATIONAL;
562 			lws_set_timeout(wsi, 0, 0);
563 			break;
564 
565 		case LPCSPROX_OPERATIONAL:
566 
567 			/*
568 			 * returning [onward -> ] proxy]-> client
569 			 * rx metadata has priority 1
570 			 */
571 
572 			md = conn->ss->metadata;
573 			while (md) {
574 				// lwsl_notice("%s: check %s: %d\n", __func__,
575 				// md->name, md->pending_onward);
576 				if (md->pending_onward) {
577 					size_t naml = strlen(md->name);
578 
579 					// lwsl_notice("%s: proxy issuing rxmd\n", __func__);
580 
581 					if (4 + naml + md->length > sizeof(s)) {
582 						lwsl_err("%s: rxmdata too big\n",
583 								__func__);
584 						goto hangup;
585 					}
586 					md->pending_onward = 0;
587 					p = (uint8_t *)s;
588 					p[0] = LWSSS_SER_RXPRE_METADATA;
589 					lws_ser_wu16be(&p[1], (uint16_t)(1 + naml +
590 							      md->length));
591 					p[3] = (uint8_t)naml;
592 					memcpy(&p[4], md->name, naml);
593 					p += 4 + naml;
594 					memcpy(p, md->value__may_own_heap,
595 					       md->length);
596 					p += md->length;
597 
598 					n = lws_ptr_diff(p, cp);
599 					goto again;
600 				}
601 
602 				md = md->next;
603 			}
604 
605 			/*
606 			 * If we have performance data, render it in JSON
607 			 * and send that in LWSSS_SER_RXPRE_PERF has
608 			 * priority 2
609 			 */
610 
611 #if defined(LWS_WITH_CONMON)
612 			if (conn->ss->conmon_json) {
613 				unsigned int xlen = conn->ss->conmon_len;
614 
615 				if (xlen > sizeof(s) - 3)
616 					xlen = sizeof(s) - 3;
617 				cp = (uint8_t *)s;
618 				p = (uint8_t *)s;
619 				p[0] = LWSSS_SER_RXPRE_PERF;
620 				lws_ser_wu16be(&p[1], (uint16_t)xlen);
621 				memcpy(&p[3], conn->ss->conmon_json, xlen);
622 
623 				lws_free_set_NULL(conn->ss->conmon_json);
624 				n = (int)(xlen + 3);
625 
626 				pay = 0;
627 				goto again;
628 			}
629 #endif
630 			/*
631 			 * if no fresh rx metadata, just pass through incoming
632 			 * dsh
633 			 */
634 
635 			if (lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
636 					     (void **)&p, &si))
637 				break;
638 
639 			cp = p;
640 
641 #if 0
642 			if (cp[0] == LWSSS_SER_RXPRE_RX_PAYLOAD &&
643 			    wsi->a.context->detailed_latency_cb) {
644 
645 				/*
646 				 * we're fulfilling rx that came in on ss
647 				 * by sending it back out to the client on
648 				 * the Unix Domain Socket
649 				 *
650 				 * +  7  u32  write will compute latency here...
651 				 * + 11  u32  ust we received from ss
652 				 *
653 				 * lws_write will report it and fill in
654 				 * LAT_DUR_PROXY_CLIENT_REQ_TO_WRITE
655 				 */
656 
657 				us = lws_now_usecs();
658 				lws_ser_wu32be(&p[7], us -
659 						      lws_ser_ru64be(&p[11]));
660 				lws_ser_wu64be(&p[11], us);
661 
662 				wsi->detlat.acc_size =
663 					wsi->detlat.req_size = si - 19;
664 				/* time proxy held it */
665 				wsi->detlat.latencies[
666 				            LAT_DUR_PROXY_RX_TO_ONWARD_TX] =
667 							lws_ser_ru32be(&p[7]);
668 			}
669 #endif
670 			pay = 1;
671 			n = (int)si;
672 			break;
673 		default:
674 			break;
675 		}
676 again:
677 		if (!n)
678 			break;
679 
680 		if (lws_fi(&wsi->fic, "ssproxy_client_write_fail"))
681 			n = -1;
682 		else
683 			n = lws_write(wsi, (uint8_t *)cp, (unsigned int)n, LWS_WRITE_RAW);
684 		if (n < 0) {
685 			lwsl_info("%s: WRITEABLE: %d\n", __func__, n);
686 
687 			goto hangup;
688 		}
689 
690 		switch (conn->state) {
691 		case LPCSPROX_REPORTING_FAIL:
692 			goto hangup;
693 		case LPCSPROX_OPERATIONAL:
694 			if (!conn)
695 				break;
696 			if (pay) {
697 				lws_dsh_free((void **)&p);
698 
699 				/*
700 				 * Did we go below the rx flow threshold for
701 				 * this dsh?
702 				 */
703 
704 				if (conn->onward_in_flow_control &&
705 				    conn->ss->policy->proxy_buflen_rxflow_on_above &&
706 				    conn->ss->wsi &&
707 				    lws_dsh_get_size(conn->dsh, KIND_SS_TO_P) <
708 				      conn->ss->policy->proxy_buflen_rxflow_off_below) {
709 					lwsl_info("%s: %s: rxflow enabling rx (%lu / %lu, lwm %lu)\n", __func__,
710 							lws_wsi_tag(conn->ss->wsi),
711 							(unsigned long)lws_dsh_get_size(conn->dsh, KIND_SS_TO_P),
712 							(unsigned long)conn->ss->policy->proxy_buflen,
713 							(unsigned long)conn->ss->policy->proxy_buflen_rxflow_off_below);
714 					/*
715 					 * Resume receiving taking in rx once
716 					 * below the low threshold
717 					 */
718 					lws_rx_flow_control(conn->ss->wsi,
719 							    LWS_RXFLOW_ALLOW);
720 					conn->onward_in_flow_control = 0;
721 				}
722 			}
723 			if (!lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
724 					     (void **)&p, &si)) {
725 				if (!lws_send_pipe_choked(wsi)) {
726 					cp = p;
727 					pay = 1;
728 					n = (int)si;
729 					goto again;
730 				}
731 				lws_callback_on_writable(wsi);
732 			}
733 			break;
734 		default:
735 			break;
736 		}
737 		break;
738 
739 	default:
740 		break;
741 	}
742 
743 	return lws_callback_http_dummy(wsi, reason, user, in, len);
744 
745 hangup:
746 	/* hang up on him */
747 
748 	return -1;
749 }
750 
751 static const struct lws_protocols protocols[] = {
752 	{
753 		"ssproxy-protocol",
754 		callback_ss_proxy,
755 		sizeof(struct raw_pss),
756 		2048, 2048, NULL, 0
757 	},
758 	{ NULL, NULL, 0, 0, 0, NULL, 0 }
759 };
760 
761 /*
762  * called from create_context()
763  */
764 
765 int
lws_ss_proxy_create(struct lws_context * context,const char * bind,int port)766 lws_ss_proxy_create(struct lws_context *context, const char *bind, int port)
767 {
768 	struct lws_context_creation_info info;
769 
770 	memset(&info, 0, sizeof(info));
771 
772 	info.vhost_name			= "ssproxy";
773 	info.options = LWS_SERVER_OPTION_ADOPT_APPLY_LISTEN_ACCEPT_CONFIG |
774 			LWS_SERVER_OPTION_SS_PROXY;
775 	info.port = port;
776 	if (!port) {
777 		if (!bind)
778 #if defined(__linux__)
779 			bind = "@proxy.ss.lws";
780 #else
781 			bind = "/tmp/proxy.ss.lws";
782 #endif
783 		info.options |= LWS_SERVER_OPTION_UNIX_SOCK;
784 	}
785 	info.iface			= bind;
786 #if defined(__linux__)
787 	info.unix_socket_perms		= "root:root";
788 #else
789 #endif
790 	info.listen_accept_role		= "raw-skt";
791 	info.listen_accept_protocol	= "ssproxy-protocol";
792 	info.protocols			= protocols;
793 
794 	if (!lws_create_vhost(context, &info)) {
795 		lwsl_err("%s: Failed to create ss proxy vhost\n", __func__);
796 
797 		return 1;
798 	}
799 
800 	return 0;
801 }
802