1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 int
lws_callback_as_writeable(struct lws * wsi)28 lws_callback_as_writeable(struct lws *wsi)
29 {
30 	int n, m;
31 
32 	n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
33 	m = user_callback_handle_rxflow(wsi->a.protocol->callback,
34 					wsi, (enum lws_callback_reasons) n,
35 					wsi->user_space, NULL, 0);
36 
37 	return m;
38 }
39 
40 int
lws_handle_POLLOUT_event(struct lws * wsi,struct lws_pollfd * pollfd)41 lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
42 {
43 	volatile struct lws *vwsi = (volatile struct lws *)wsi;
44 	int n;
45 
46 	// lwsl_notice("%s: %s\n", __func__, lws_wsi_tag(wsi));
47 
48 	if (wsi->socket_is_permanently_unusable)
49 		return 0;
50 
51 	vwsi->leave_pollout_active = 0;
52 	vwsi->handling_pollout = 1;
53 	/*
54 	 * if another thread wants POLLOUT on us, from here on while
55 	 * handling_pollout is set, he will only set leave_pollout_active.
56 	 * If we are going to disable POLLOUT, we will check that first.
57 	 */
58 	wsi->could_have_pending = 0; /* clear back-to-back write detection */
59 
60 	/*
61 	 * user callback is lowest priority to get these notifications
62 	 * actually, since other pending things cannot be disordered
63 	 *
64 	 * Priority 1: pending truncated sends are incomplete ws fragments
65 	 *	       If anything else sent first the protocol would be
66 	 *	       corrupted.
67 	 *
68 	 *	       These are post- any compression transform
69 	 */
70 
71 	if (lws_has_buffered_out(wsi)) {
72 		//lwsl_notice("%s: completing partial\n", __func__);
73 		if (lws_issue_raw(wsi, NULL, 0) < 0) {
74 			lwsl_info("%s signalling to close\n", __func__);
75 			goto bail_die;
76 		}
77 		/* leave POLLOUT active either way */
78 		goto bail_ok;
79 	} else
80 		if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
81 			wsi->socket_is_permanently_unusable = 1;
82 			goto bail_die; /* retry closing now */
83 		}
84 
85 	/* Priority 2: pre- compression transform */
86 
87 #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
88 	if (wsi->http.comp_ctx.buflist_comp ||
89 	    wsi->http.comp_ctx.may_have_more) {
90 		enum lws_write_protocol wp = LWS_WRITE_HTTP;
91 
92 		lwsl_info("%s: completing comp partial (buflist_comp %p, may %d)\n",
93 				__func__, wsi->http.comp_ctx.buflist_comp,
94 				wsi->http.comp_ctx.may_have_more
95 				);
96 
97 		if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol) &&
98 		    lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol).
99 					write_role_protocol(wsi, NULL, 0, &wp) < 0) {
100 			lwsl_info("%s signalling to close\n", __func__);
101 			goto bail_die;
102 		}
103 		lws_callback_on_writable(wsi);
104 
105 		goto bail_ok;
106 	}
107 #endif
108 
109 #ifdef LWS_WITH_CGI
110 	/*
111 	 * A cgi connection's wire protocol remains h1 or h2.  He is just
112 	 * getting his data from his child cgis.
113 	 */
114 	if (wsi->http.cgi) {
115 		/* also one shot */
116 		if (pollfd)
117 			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
118 				lwsl_info("failed at set pollfd\n");
119 				return 1;
120 			}
121 		goto user_service_go_again;
122 	}
123 #endif
124 
125 	/* if we got here, we should have wire protocol ops set on the wsi */
126 	assert(wsi->role_ops);
127 
128 	if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT))
129 		goto bail_ok;
130 
131 	n = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT).
132 							handle_POLLOUT(wsi);
133 	switch (n) {
134 	case LWS_HP_RET_BAIL_OK:
135 		goto bail_ok;
136 	case LWS_HP_RET_BAIL_DIE:
137 		goto bail_die;
138 	case LWS_HP_RET_DROP_POLLOUT:
139 	case LWS_HP_RET_USER_SERVICE:
140 		break;
141 	default:
142 		assert(0);
143 	}
144 
145 	/* one shot */
146 
147 	if (pollfd) {
148 		int eff = vwsi->leave_pollout_active;
149 
150 		if (!eff) {
151 			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
152 				lwsl_info("failed at set pollfd\n");
153 				goto bail_die;
154 			}
155 		}
156 
157 		vwsi->handling_pollout = 0;
158 
159 		/* cannot get leave_pollout_active set after the above */
160 		if (!eff && wsi->leave_pollout_active) {
161 			/*
162 			 * got set inbetween sampling eff and clearing
163 			 * handling_pollout, force POLLOUT on
164 			 */
165 			lwsl_debug("leave_pollout_active\n");
166 			if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
167 				lwsl_info("failed at set pollfd\n");
168 				goto bail_die;
169 			}
170 		}
171 
172 		vwsi->leave_pollout_active = 0;
173 	}
174 
175 	if (lwsi_role_client(wsi) && !wsi->hdr_parsing_completed &&
176 	     lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
177 	     lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY)
178 		goto bail_ok;
179 
180 	if (n == LWS_HP_RET_DROP_POLLOUT)
181 		goto bail_ok;
182 
183 
184 #ifdef LWS_WITH_CGI
185 user_service_go_again:
186 #endif
187 
188 	if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_perform_user_POLLOUT)) {
189 		if (lws_rops_func_fidx(wsi->role_ops,
190 				       LWS_ROPS_perform_user_POLLOUT).
191 						perform_user_POLLOUT(wsi) == -1)
192 			goto bail_die;
193 		else
194 			goto bail_ok;
195 	}
196 
197 	lwsl_debug("%s: %s: non mux: wsistate 0x%lx, ops %s\n", __func__,
198 		   lws_wsi_tag(wsi),
199 		   (unsigned long)wsi->wsistate, wsi->role_ops->name);
200 
201 	vwsi = (volatile struct lws *)wsi;
202 	vwsi->leave_pollout_active = 0;
203 
204 	n = lws_callback_as_writeable(wsi);
205 	vwsi->handling_pollout = 0;
206 
207 	if (vwsi->leave_pollout_active)
208 		if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
209 			goto bail_die;
210 
211 	return n;
212 
213 	/*
214 	 * since these don't disable the POLLOUT, they are always doing the
215 	 * right thing for leave_pollout_active whether it was set or not.
216 	 */
217 
218 bail_ok:
219 	vwsi->handling_pollout = 0;
220 	vwsi->leave_pollout_active = 0;
221 
222 	return 0;
223 
224 bail_die:
225 	vwsi->handling_pollout = 0;
226 	vwsi->leave_pollout_active = 0;
227 
228 	return -1;
229 }
230 
231 int
lws_rxflow_cache(struct lws * wsi,unsigned char * buf,size_t n,size_t len)232 lws_rxflow_cache(struct lws *wsi, unsigned char *buf, size_t n, size_t len)
233 {
234 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
235 	uint8_t *buffered;
236 	size_t blen;
237 	int ret = LWSRXFC_CACHED, m;
238 
239 	/* his RX is flowcontrolled, don't send remaining now */
240 	blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
241 	if (blen) {
242 		if (buf >= buffered && buf + len <= buffered + blen &&
243 		    blen != (size_t)len) {
244 			/*
245 			 * rxflow while we were spilling prev rxflow
246 			 *
247 			 * len indicates how much was unused, then... so trim
248 			 * the head buflist to match that situation
249 			 */
250 
251 			lws_buflist_use_segment(&wsi->buflist, blen - len);
252 			lwsl_debug("%s: trim existing rxflow %d -> %d\n",
253 					__func__, (int)blen, (int)len);
254 
255 			return LWSRXFC_TRIMMED;
256 		}
257 		ret = LWSRXFC_ADDITIONAL;
258 	}
259 
260 	/* a new rxflow, buffer it and warn caller */
261 
262 	lwsl_debug("%s: rxflow append %d\n", __func__, (int)(len - n));
263 	m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
264 
265 	if (m < 0)
266 		return LWSRXFC_ERROR;
267 	if (m) {
268 		lwsl_debug("%s: added %s to rxflow list\n", __func__, lws_wsi_tag(wsi));
269 		if (lws_dll2_is_detached(&wsi->dll_buflist))
270 			lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
271 	}
272 
273 	return ret;
274 }
275 
276 /* this is used by the platform service code to stop us waiting for network
277  * activity in poll() when we have something that already needs service
278  */
279 
280 int
lws_service_adjust_timeout(struct lws_context * context,int timeout_ms,int tsi)281 lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
282 {
283 	struct lws_context_per_thread *pt;
284 
285 	if (!context)
286 		return 1;
287 
288         if (!context->protocol_init_done)
289                 if (lws_protocol_init(context))
290                         return 1;
291 
292 #if defined(LWS_WITH_SYS_SMD)
293 	if (!tsi && lws_smd_message_pending(context)) {
294 		lws_smd_msg_distribute(context);
295 		if (lws_smd_message_pending(context))
296 			return 0;
297 	}
298 #endif
299 
300 	pt = &context->pt[tsi];
301 
302 #if defined(LWS_WITH_EXTERNAL_POLL)
303 	{
304 		lws_usec_t u = __lws_sul_service_ripe(pt->pt_sul_owner,
305 				      LWS_COUNT_PT_SUL_OWNERS, lws_now_usecs());
306 		if (u < (lws_usec_t)timeout_ms * (lws_usec_t)1000)
307 			timeout_ms = (int)(u / 1000);
308         }
309 #endif
310 
311 	/*
312 	 * Figure out if we really want to wait in poll()... we only need to
313 	 * wait if really nothing already to do and we have to wait for
314 	 * something from network
315 	 */
316 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
317 	/* 1) if we know we are draining rx ext, do not wait in poll */
318 	if (pt->ws.rx_draining_ext_list)
319 		return 0;
320 #endif
321 
322 #if defined(LWS_WITH_TLS)
323 	/* 2) if we know we have non-network pending data,
324 	 *    do not wait in poll */
325 
326 	if (pt->context->tls_ops &&
327 	    pt->context->tls_ops->fake_POLLIN_for_buffered &&
328 	    pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
329 			return 0;
330 #endif
331 
332 	/*
333 	 * 4) If there is any wsi with rxflow buffered and in a state to process
334 	 *    it, we should not wait in poll
335 	 */
336 
337 	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
338 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
339 
340 		if (!lws_is_flowcontrolled(wsi) &&
341 		     lwsi_state(wsi) != LRS_DEFERRING_ACTION)
342 			return 0;
343 
344 	/*
345 	 * 5) If any guys with http compression to spill, we shouldn't wait in
346 	 *    poll but hurry along and service them
347 	 */
348 
349 	} lws_end_foreach_dll(d);
350 
351 	return timeout_ms;
352 }
353 
354 /*
355  * POLLIN said there is something... we must read it, and either use it; or
356  * if other material already in the buflist append it and return the buflist
357  * head material.
358  */
359 int
lws_buflist_aware_read(struct lws_context_per_thread * pt,struct lws * wsi,struct lws_tokens * ebuf,char fr,const char * hint)360 lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
361 		       struct lws_tokens *ebuf, char fr, const char *hint)
362 {
363 	int n, e, bns;
364 	uint8_t *ep, *b;
365 
366 	// lwsl_debug("%s: %s: %s: prior %d\n", __func__, lws_wsi_tag(wsi), hint, prior);
367 	// lws_buflist_describe(&wsi->buflist, wsi, __func__);
368 
369 	(void)hint;
370 	if (!ebuf->token)
371 		ebuf->token = pt->serv_buf + LWS_PRE;
372 	if (!ebuf->len ||
373 	    (unsigned int)ebuf->len > wsi->a.context->pt_serv_buf_size - LWS_PRE)
374 		ebuf->len = (int)(wsi->a.context->pt_serv_buf_size - LWS_PRE);
375 
376 	e = ebuf->len;
377 	ep = ebuf->token;
378 
379 	/* h2 or muxed stream... must force the read due to HOL blocking */
380 
381 	if (wsi->mux_substream)
382 		fr = 1;
383 
384 	/* there's something on the buflist? */
385 
386 	bns = (int)lws_buflist_next_segment_len(&wsi->buflist, &ebuf->token);
387 	b = ebuf->token;
388 
389 	if (!fr && bns)
390 		goto buflist_material;
391 
392 	/* we're going to read something */
393 
394 	ebuf->token = ep;
395 	ebuf->len = n = lws_ssl_capable_read(wsi, ep, (size_t)e);
396 
397 	lwsl_debug("%s: %s: %s: ssl_capable_read %d\n", __func__,
398 			lws_wsi_tag(wsi), hint, ebuf->len);
399 
400 	if (!bns && /* only acknowledge error when we handled buflist content */
401 	    n == LWS_SSL_CAPABLE_ERROR) {
402 		lwsl_debug("%s: SSL_CAPABLE_ERROR\n", __func__);
403 		return -1;
404 	}
405 
406 	if (n <= 0 && bns)
407 		/*
408 		 * There wasn't anything to read yet, but there's something
409 		 * on the buflist to give him
410 		 */
411 		goto buflist_material;
412 
413 	/* we read something */
414 
415 	if (fr && bns) {
416 		/*
417 		 * Stash what we read, since there's earlier buflist material
418 		 */
419 
420 		n = lws_buflist_append_segment(&wsi->buflist, ebuf->token, (size_t)ebuf->len);
421 		if (n < 0)
422 			return -1;
423 		if (n && lws_dll2_is_detached(&wsi->dll_buflist))
424 			lws_dll2_add_head(&wsi->dll_buflist,
425 					  &pt->dll_buflist_owner);
426 
427 		goto buflist_material;
428 	}
429 
430 	/*
431 	 * directly return what we read
432 	 */
433 
434 	return 0;
435 
436 buflist_material:
437 
438 	ebuf->token = b;
439 	if (e < bns)
440 		/* restrict to e, if more than e available */
441 		ebuf->len = e;
442 	else
443 		ebuf->len = bns;
444 
445 	return 1; /* from buflist */
446 }
447 
448 int
lws_buflist_aware_finished_consuming(struct lws * wsi,struct lws_tokens * ebuf,int used,int buffered,const char * hint)449 lws_buflist_aware_finished_consuming(struct lws *wsi, struct lws_tokens *ebuf,
450 				     int used, int buffered, const char *hint)
451 {
452 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
453 	int m;
454 
455 	//lwsl_debug("%s %s consuming buffered %d used %zu / %zu\n", __func__, hint,
456 	//		buffered, (size_t)used, (size_t)ebuf->len);
457 	// lws_buflist_describe(&wsi->buflist, wsi, __func__);
458 
459 	/* it's in the buflist; we didn't use any */
460 
461 	if (!used && buffered)
462 		return 0;
463 
464 	if (used && buffered) {
465 		if (wsi->buflist) {
466 			m = (int)lws_buflist_use_segment(&wsi->buflist, (size_t)used);
467 			// lwsl_notice("%s: used %d, next %d\n", __func__, used, m);
468 			// lws_buflist_describe(&wsi->buflist, wsi, __func__);
469 			if (m)
470 				return 0;
471 		}
472 
473 		lwsl_info("%s: removed %p from dll_buflist\n", __func__, wsi);
474 		lws_dll2_remove(&wsi->dll_buflist);
475 
476 		return 0;
477 	}
478 
479 	/* any remainder goes on the buflist */
480 
481 	if (used < ebuf->len && ebuf->len >= 0 && used >= 0) {
482 		// lwsl_notice("%s %s bac appending %d\n", __func__, hint,
483 		//		ebuf->len - used);
484 		m = lws_buflist_append_segment(&wsi->buflist,
485 					       ebuf->token + used,
486 					       (unsigned int)(ebuf->len - used));
487 		if (m < 0)
488 			return 1; /* OOM */
489 		if (m) {
490 			lwsl_debug("%s: added %s to rxflow list\n",
491 				   __func__, lws_wsi_tag(wsi));
492 			if (lws_dll2_is_detached(&wsi->dll_buflist))
493 				lws_dll2_add_head(&wsi->dll_buflist,
494 					 &pt->dll_buflist_owner);
495 		}
496 		// lws_buflist_describe(&wsi->buflist, wsi, __func__);
497 	}
498 
499 	return 0;
500 }
501 
502 void
lws_service_do_ripe_rxflow(struct lws_context_per_thread * pt)503 lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
504 {
505 	struct lws_pollfd pfd;
506 
507 	if (!pt->dll_buflist_owner.head)
508 		return;
509 
510 	/*
511 	 * service all guys with pending rxflow that reached a state they can
512 	 * accept the pending data
513 	 */
514 
515 	lws_pt_lock(pt, __func__);
516 
517 	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
518 				   pt->dll_buflist_owner.head) {
519 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
520 
521 		pfd.events = LWS_POLLIN;
522 		pfd.revents = LWS_POLLIN;
523 		pfd.fd = -1;
524 
525 		lwsl_debug("%s: rxflow processing: %s fc=%d, 0x%lx\n", __func__,
526 			   lws_wsi_tag(wsi), lws_is_flowcontrolled(wsi),
527 			   (unsigned long)wsi->wsistate);
528 
529 		if (!lws_is_flowcontrolled(wsi) &&
530 		    lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
531 			pt->inside_lws_service = 1;
532 
533 			if (lws_rops_func_fidx(wsi->role_ops,
534 					       LWS_ROPS_handle_POLLIN).
535 						handle_POLLIN(pt, wsi, &pfd) ==
536 						   LWS_HPI_RET_PLEASE_CLOSE_ME)
537 				lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
538 						"close_and_handled");
539 			pt->inside_lws_service = 0;
540 		}
541 
542 	} lws_end_foreach_dll_safe(d, d1);
543 
544 	lws_pt_unlock(pt);
545 }
546 
547 /*
548  * guys that need POLLIN service again without waiting for network action
549  * can force POLLIN here if not flowcontrolled, so they will get service.
550  *
551  * Return nonzero if anybody got their POLLIN faked
552  */
553 int
lws_service_flag_pending(struct lws_context * context,int tsi)554 lws_service_flag_pending(struct lws_context *context, int tsi)
555 {
556 	struct lws_context_per_thread *pt;
557 	int forced = 0;
558 
559 	if (!context)
560 		return 1;
561 
562 	pt = &context->pt[tsi];
563 
564 	lws_pt_lock(pt, __func__);
565 
566 	/*
567 	 * 1) If there is any wsi with a buflist and in a state to process
568 	 *    it, we should not wait in poll
569 	 */
570 
571 	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
572 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
573 
574 		if (!lws_is_flowcontrolled(wsi) &&
575 		     lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
576 			forced = 1;
577 			break;
578 		}
579 	} lws_end_foreach_dll(d);
580 
581 #if defined(LWS_ROLE_WS)
582 	forced |= lws_rops_func_fidx(&role_ops_ws,
583 				     LWS_ROPS_service_flag_pending).
584 					service_flag_pending(context, tsi);
585 #endif
586 
587 #if defined(LWS_WITH_TLS)
588 	/*
589 	 * 2) For all guys with buffered SSL read data already saved up, if they
590 	 * are not flowcontrolled, fake their POLLIN status so they'll get
591 	 * service to use up the buffered incoming data, even though their
592 	 * network socket may have nothing
593 	 */
594 	lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
595 			lws_dll2_get_head(&pt->tls.dll_pending_tls_owner)) {
596 		struct lws *wsi = lws_container_of(p, struct lws,
597 						   tls.dll_pending_tls);
598 
599 		if (wsi->position_in_fds_table >= 0) {
600 
601 			pt->fds[wsi->position_in_fds_table].revents = (short)(
602 					pt->fds[wsi->position_in_fds_table].revents |
603 				(pt->fds[wsi->position_in_fds_table].events &
604 								LWS_POLLIN));
605 			if (pt->fds[wsi->position_in_fds_table].revents &
606 								LWS_POLLIN)
607 				/*
608 				 * We're not going to remove the wsi from the
609 				 * pending tls list.  The processing will have
610 				 * to do it if he exhausts the pending tls.
611 				 */
612 				forced = 1;
613 		}
614 
615 	} lws_end_foreach_dll_safe(p, p1);
616 #endif
617 
618 	lws_pt_unlock(pt);
619 
620 	return forced;
621 }
622 
623 int
lws_service_fd_tsi(struct lws_context * context,struct lws_pollfd * pollfd,int tsi)624 lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
625 		   int tsi)
626 {
627 	struct lws_context_per_thread *pt;
628 	struct lws *wsi;
629 
630 	if (!context || context->service_no_longer_possible)
631 		return -1;
632 
633 	pt = &context->pt[tsi];
634 
635 	if (pt->event_loop_pt_unused)
636 		return -1;
637 
638 	if (!pollfd) {
639 		/*
640 		 * calling with NULL pollfd for periodic background processing
641 		 * is no longer needed and is now illegal.
642 		 */
643 		assert(pollfd);
644 		return -1;
645 	}
646 	assert(lws_socket_is_valid(pollfd->fd));
647 
648 	/* no, here to service a socket descriptor */
649 	wsi = wsi_from_fd(context, pollfd->fd);
650 	if (!wsi)
651 		/* not lws connection ... leave revents alone and return */
652 		return 0;
653 
654 #if LWS_MAX_SMP > 1
655 	if (wsi->undergoing_init_from_other_pt)
656 		/*
657 		 * Temporary situation that other service thread is initializing
658 		 * this wsi right now for use on our service thread.
659 		 */
660 		return 0;
661 #endif
662 
663 	/*
664 	 * so that caller can tell we handled, past here we need to
665 	 * zero down pollfd->revents after handling
666 	 */
667 
668 	/*
669 	 * Whatever the situation with buffered rx packets, or explicitly read-
670 	 * and-buffered rx going to be handled before we want to acknowledge the
671 	 * socket is gone, any sign of HUP always immediately means no more tx
672 	 * is possible.
673 	 */
674 
675 	if ((pollfd->revents & LWS_POLLHUP) == LWS_POLLHUP) {
676 		wsi->socket_is_permanently_unusable = 1;
677 
678 		if (!(pollfd->revents & pollfd->events & LWS_POLLIN)) {
679 
680 			/* ... there are no pending rx packets waiting... */
681 
682 			if (!lws_buflist_total_len(&wsi->buflist)) {
683 
684 				/*
685 				 * ... nothing stashed in the buflist either,
686 				 * so acknowledge the wsi is done
687 				 */
688 
689 				lwsl_debug("Session Socket %s (fd=%d) dead\n",
690 					   lws_wsi_tag(wsi), pollfd->fd);
691 
692 				goto close_and_handled;
693 			}
694 
695 			/*
696 			 * ... in fact we have some unread rx buffered in the
697 			 * input buflist.  Hold off the closing a bit...
698 			 */
699 
700 			lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 3);
701 		}
702 	}
703 
704 #ifdef _WIN32
705 	if (pollfd->revents & LWS_POLLOUT)
706 		wsi->sock_send_blocking = FALSE;
707 #endif
708 
709 #if defined(LWS_WITH_TLS)
710 	if (lwsi_state(wsi) == LRS_SHUTDOWN &&
711 	    lws_is_ssl(wsi) && wsi->tls.ssl) {
712 		switch (__lws_tls_shutdown(wsi)) {
713 		case LWS_SSL_CAPABLE_DONE:
714 		case LWS_SSL_CAPABLE_ERROR:
715 			goto close_and_handled;
716 
717 		case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
718 		case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
719 		case LWS_SSL_CAPABLE_MORE_SERVICE:
720 			goto handled;
721 		}
722 	}
723 #endif
724 	wsi->could_have_pending = 0; /* clear back-to-back write detection */
725 	pt->inside_lws_service = 1;
726 
727 	/* okay, what we came here to do... */
728 
729 	/* if we got here, we should have wire protocol ops set on the wsi */
730 	assert(wsi->role_ops);
731 
732 	// lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
733 	//	    wsi->wsistate);
734 
735 	switch (lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLIN).
736 					       handle_POLLIN(pt, wsi, pollfd)) {
737 	case LWS_HPI_RET_WSI_ALREADY_DIED:
738 		pt->inside_lws_service = 0;
739 		return 1;
740 	case LWS_HPI_RET_HANDLED:
741 		break;
742 	case LWS_HPI_RET_PLEASE_CLOSE_ME:
743 		//lwsl_notice("%s: %s pollin says please close me\n", __func__,
744 		//		wsi->role_ops->name);
745 close_and_handled:
746 		lwsl_debug("%s: %s: Close and handled\n", __func__, lws_wsi_tag(wsi));
747 		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
748 				   "close_and_handled");
749 #if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
750 		/*
751 		 * confirm close has no problem being called again while
752 		 * it waits for libuv service to complete the first async
753 		 * close
754 		 */
755 		if (!strcmp(context->event_loop_ops->name, "libuv"))
756 			lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
757 					   "close_and_handled uv repeat test");
758 #endif
759 		/*
760 		 * pollfd may point to something else after the close
761 		 * due to pollfd swapping scheme on delete on some platforms
762 		 * we can't clear revents now because it'd be the wrong guy's
763 		 * revents
764 		 */
765 		pt->inside_lws_service = 0;
766 		return 1;
767 	default:
768 		assert(0);
769 	}
770 #if defined(LWS_WITH_TLS)
771 handled:
772 #endif
773 	pollfd->revents = 0;
774 	pt->inside_lws_service = 0;
775 
776 	return 0;
777 }
778 
779 int
lws_service_fd(struct lws_context * context,struct lws_pollfd * pollfd)780 lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
781 {
782 	return lws_service_fd_tsi(context, pollfd, 0);
783 }
784 
785 int
lws_service(struct lws_context * context,int timeout_ms)786 lws_service(struct lws_context *context, int timeout_ms)
787 {
788 	struct lws_context_per_thread *pt;
789 	int n;
790 
791 	if (!context)
792 		return 1;
793 
794 	pt = &context->pt[0];
795 	pt->inside_service = 1;
796 
797 	if (context->event_loop_ops->run_pt) {
798 		/* we are configured for an event loop */
799 		context->event_loop_ops->run_pt(context, 0);
800 
801 		pt->inside_service = 0;
802 
803 		return 1;
804 	}
805 	n = lws_plat_service(context, timeout_ms);
806 
807 	if (n != -1)
808 		pt->inside_service = 0;
809 
810 	return n;
811 }
812 
813 int
lws_service_tsi(struct lws_context * context,int timeout_ms,int tsi)814 lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
815 {
816 	struct lws_context_per_thread *pt;
817 	int n;
818 
819 	if (!context)
820 		return 1;
821 
822 	pt = &context->pt[tsi];
823 	pt->inside_service = 1;
824 #if LWS_MAX_SMP > 1
825 	pt->self = pthread_self();
826 #endif
827 
828 	if (context->event_loop_ops->run_pt) {
829 		/* we are configured for an event loop */
830 		context->event_loop_ops->run_pt(context, tsi);
831 
832 		pt->inside_service = 0;
833 
834 		return 1;
835 	}
836 
837 	n = _lws_plat_service_tsi(context, timeout_ms, tsi);
838 
839 	pt->inside_service = 0;
840 
841 	return n;
842 }
843