1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 #if defined(LWS_WITH_CLIENT)
28 static int
lws_close_trans_q_leader(struct lws_dll2 * d,void * user)29 lws_close_trans_q_leader(struct lws_dll2 *d, void *user)
30 {
31 struct lws *w = lws_container_of(d, struct lws, dll2_cli_txn_queue);
32
33 __lws_close_free_wsi(w, (enum lws_close_status)-1, "trans q leader closing");
34
35 return 0;
36 }
37 #endif
38
39 void
__lws_reset_wsi(struct lws * wsi)40 __lws_reset_wsi(struct lws *wsi)
41 {
42 if (!wsi)
43 return;
44
45 #if defined(LWS_WITH_CLIENT)
46
47 lws_free_set_NULL(wsi->cli_hostname_copy);
48
49 #if defined(LWS_WITH_CONMON)
50
51 if (wsi->conmon.dns_results_copy) {
52 lws_conmon_addrinfo_destroy(wsi->conmon.dns_results_copy);
53 wsi->conmon.dns_results_copy = NULL;
54 }
55
56 wsi->conmon.ciu_dns =
57 wsi->conmon.ciu_sockconn =
58 wsi->conmon.ciu_tls =
59 wsi->conmon.ciu_txn_resp = 0;
60 #endif
61
62 /*
63 * if we have wsi in our transaction queue, if we are closing we
64 * must go through and close all those first
65 */
66 if (wsi->a.vhost) {
67
68 /* we are no longer an active client connection that can piggyback */
69 lws_dll2_remove(&wsi->dll_cli_active_conns);
70
71 lws_dll2_foreach_safe(&wsi->dll2_cli_txn_queue_owner, NULL,
72 lws_close_trans_q_leader);
73
74 /*
75 * !!! If we are closing, but we have pending pipelined
76 * transaction results we already sent headers for, that's going
77 * to destroy sync for HTTP/1 and leave H2 stream with no live
78 * swsi.`
79 *
80 * However this is normal if we are being closed because the
81 * transaction queue leader is closing.
82 */
83 lws_dll2_remove(&wsi->dll2_cli_txn_queue);
84 }
85 #endif
86
87 if (wsi->a.vhost) {
88 lws_vhost_lock(wsi->a.vhost);
89 lws_dll2_remove(&wsi->vh_awaiting_socket);
90 lws_vhost_unlock(wsi->a.vhost);
91 }
92
93 /*
94 * Protocol user data may be allocated either internally by lws
95 * or by specified the user. We should only free what we allocated.
96 */
97 if (wsi->a.protocol && wsi->a.protocol->per_session_data_size &&
98 wsi->user_space && !wsi->user_space_externally_allocated) {
99 /* confirm no sul left scheduled in user data itself */
100 lws_sul_debug_zombies(wsi->a.context, wsi->user_space,
101 wsi->a.protocol->per_session_data_size, __func__);
102 lws_free_set_NULL(wsi->user_space);
103 }
104
105 /*
106 * Don't let buflist content or state from the wsi's previous life
107 * carry over to the new life
108 */
109
110 lws_buflist_destroy_all_segments(&wsi->buflist);
111 lws_dll2_remove(&wsi->dll_buflist);
112 lws_buflist_destroy_all_segments(&wsi->buflist_out);
113 #if defined(LWS_WITH_UDP)
114 if (wsi->udp) {
115 /* confirm no sul left scheduled in wsi->udp itself */
116 lws_sul_debug_zombies(wsi->a.context, wsi->udp,
117 sizeof(*wsi->udp), "close udp wsi");
118 lws_free_set_NULL(wsi->udp);
119 }
120 #endif
121 wsi->retry = 0;
122
123 #if defined(LWS_WITH_CLIENT)
124 lws_dll2_remove(&wsi->dll2_cli_txn_queue);
125 lws_dll2_remove(&wsi->dll_cli_active_conns);
126 #endif
127
128 #if defined(LWS_WITH_SYS_ASYNC_DNS)
129 lws_async_dns_cancel(wsi);
130 #endif
131
132 #if defined(LWS_WITH_HTTP_PROXY)
133 if (wsi->http.buflist_post_body)
134 lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body);
135 #endif
136
137 if (wsi->a.vhost && wsi->a.vhost->lserv_wsi == wsi)
138 wsi->a.vhost->lserv_wsi = NULL;
139 #if defined(LWS_WITH_CLIENT)
140 if (wsi->a.vhost)
141 lws_dll2_remove(&wsi->dll_cli_active_conns);
142 #endif
143
144 __lws_same_vh_protocol_remove(wsi);
145 #if defined(LWS_WITH_CLIENT)
146 lws_free_set_NULL(wsi->stash);
147 lws_free_set_NULL(wsi->cli_hostname_copy);
148 #endif
149
150 #if defined(LWS_WITH_PEER_LIMITS)
151 lws_peer_track_wsi_close(wsi->a.context, wsi->peer);
152 wsi->peer = NULL;
153 #endif
154
155 /* since we will destroy the wsi, make absolutely sure now */
156
157 #if defined(LWS_WITH_OPENSSL)
158 __lws_ssl_remove_wsi_from_buffered_list(wsi);
159 #endif
160 __lws_wsi_remove_from_sul(wsi);
161
162 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_destroy_role))
163 lws_rops_func_fidx(wsi->role_ops,
164 LWS_ROPS_destroy_role).destroy_role(wsi);
165
166 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
167 __lws_header_table_detach(wsi, 0);
168 #endif
169 }
170
171 /* req cx lock */
172
173 void
__lws_free_wsi(struct lws * wsi)174 __lws_free_wsi(struct lws *wsi)
175 {
176 struct lws_vhost *vh;
177
178 if (!wsi)
179 return;
180
181 lws_context_assert_lock_held(wsi->a.context);
182
183 #if defined(LWS_WITH_SECURE_STREAMS)
184 if (wsi->for_ss) {
185
186 #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
187 if (wsi->client_bound_sspc) {
188 lws_sspc_handle_t *h = (lws_sspc_handle_t *)
189 wsi->a.opaque_user_data;
190 if (h) {
191 h->cwsi = NULL;
192 wsi->a.opaque_user_data = NULL;
193 }
194 } else
195 #endif
196 {
197 /*
198 * Make certain it is disconnected from the ss by now
199 */
200 lws_ss_handle_t *h = (lws_ss_handle_t *)
201 wsi->a.opaque_user_data;
202
203 if (h) {
204 h->wsi = NULL;
205 wsi->a.opaque_user_data = NULL;
206 }
207 }
208 }
209 #endif
210
211 __lws_reset_wsi(wsi);
212 __lws_wsi_remove_from_sul(wsi);
213
214 vh = wsi->a.vhost;
215
216 if (wsi->a.context->event_loop_ops->destroy_wsi)
217 wsi->a.context->event_loop_ops->destroy_wsi(wsi);
218
219 if (vh)
220 __lws_vhost_unbind_wsi(wsi); /* req cx + vh lock */
221
222 lwsl_debug("%s: %s, tsi fds count %d\n", __func__,
223 lws_wsi_tag(wsi),
224 wsi->a.context->pt[(int)wsi->tsi].fds_count);
225
226 /* confirm no sul left scheduled in wsi itself */
227 lws_sul_debug_zombies(wsi->a.context, wsi, sizeof(wsi), __func__);
228
229 __lws_lc_untag(&wsi->lc);
230 lws_free(wsi);
231 }
232
233
234 void
lws_remove_child_from_any_parent(struct lws * wsi)235 lws_remove_child_from_any_parent(struct lws *wsi)
236 {
237 struct lws **pwsi;
238 int seen = 0;
239
240 if (!wsi->parent)
241 return;
242
243 /* detach ourselves from parent's child list */
244 pwsi = &wsi->parent->child_list;
245 while (*pwsi) {
246 if (*pwsi == wsi) {
247 lwsl_info("%s: detach %s from parent %s\n", __func__,
248 lws_wsi_tag(wsi), lws_wsi_tag(wsi->parent));
249
250 if (wsi->parent->a.protocol)
251 wsi->parent->a.protocol->callback(wsi,
252 LWS_CALLBACK_CHILD_CLOSING,
253 wsi->parent->user_space, wsi, 0);
254
255 *pwsi = wsi->sibling_list;
256 seen = 1;
257 break;
258 }
259 pwsi = &(*pwsi)->sibling_list;
260 }
261 if (!seen)
262 lwsl_err("%s: failed to detach from parent\n", __func__);
263
264 wsi->parent = NULL;
265 }
266
267 #if defined(LWS_WITH_CLIENT)
268 void
lws_inform_client_conn_fail(struct lws * wsi,void * arg,size_t len)269 lws_inform_client_conn_fail(struct lws *wsi, void *arg, size_t len)
270 {
271 lws_addrinfo_clean(wsi);
272
273 if (wsi->already_did_cce)
274 return;
275
276 wsi->already_did_cce = 1;
277
278 if (!wsi->a.protocol)
279 return;
280
281 if (!wsi->client_suppress_CONNECTION_ERROR)
282 wsi->a.protocol->callback(wsi,
283 LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
284 wsi->user_space, arg, len);
285 }
286 #endif
287
288 void
lws_addrinfo_clean(struct lws * wsi)289 lws_addrinfo_clean(struct lws *wsi)
290 {
291 #if defined(LWS_WITH_CLIENT)
292 struct lws_dll2 *d = lws_dll2_get_head(&wsi->dns_sorted_list), *d1;
293
294 while (d) {
295 lws_dns_sort_t *r = lws_container_of(d, lws_dns_sort_t, list);
296
297 d1 = d->next;
298 lws_dll2_remove(d);
299 lws_free(r);
300
301 d = d1;
302 }
303 #endif
304 }
305
306 /* requires cx and pt lock */
307
308 void
__lws_close_free_wsi(struct lws * wsi,enum lws_close_status reason,const char * caller)309 __lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason,
310 const char *caller)
311 {
312 struct lws_context_per_thread *pt;
313 const struct lws_protocols *pro;
314 struct lws_context *context;
315 struct lws *wsi1, *wsi2;
316 int n, ccb;
317
318 lwsl_info("%s: %s: caller: %s\n", __func__, lws_wsi_tag(wsi), caller);
319
320 if (!wsi)
321 return;
322
323 lws_access_log(wsi);
324
325 if (!lws_dll2_is_detached(&wsi->dll_buflist)) {
326 lwsl_info("%s: %s: going down with stuff in buflist\n",
327 __func__, lws_wsi_tag(wsi)); }
328
329 context = wsi->a.context;
330 pt = &context->pt[(int)wsi->tsi];
331
332 #if defined(LWS_WITH_SYS_METRICS) && \
333 (defined(LWS_WITH_CLIENT) || defined(LWS_WITH_SERVER))
334 /* wsi level: only reports if dangling caliper */
335 if (wsi->cal_conn.mt && wsi->cal_conn.us_start) {
336 if ((lws_metrics_priv_to_pub(wsi->cal_conn.mt)->flags) & LWSMTFL_REPORT_HIST) {
337 lws_metrics_caliper_report_hist(wsi->cal_conn, (struct lws *)NULL);
338 } else {
339 lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO);
340 lws_metrics_caliper_done(wsi->cal_conn);
341 }
342 } else
343 lws_metrics_caliper_done(wsi->cal_conn);
344 #endif
345
346 #if defined(LWS_WITH_SYS_ASYNC_DNS)
347 if (wsi == context->async_dns.wsi)
348 context->async_dns.wsi = NULL;
349 #endif
350
351 lws_pt_assert_lock_held(pt);
352
353 #if defined(LWS_WITH_CLIENT)
354
355 lws_free_set_NULL(wsi->cli_hostname_copy);
356
357 lws_addrinfo_clean(wsi);
358 #endif
359
360 #if defined(LWS_WITH_HTTP2)
361 if (wsi->mux_stream_immortal)
362 lws_http_close_immortal(wsi);
363 #endif
364
365 /* if we have children, close them first */
366 if (wsi->child_list) {
367 wsi2 = wsi->child_list;
368 while (wsi2) {
369 wsi1 = wsi2->sibling_list;
370 // wsi2->parent = NULL;
371 /* stop it doing shutdown processing */
372 wsi2->socket_is_permanently_unusable = 1;
373 __lws_close_free_wsi(wsi2, reason,
374 "general child recurse");
375 wsi2 = wsi1;
376 }
377 wsi->child_list = NULL;
378 }
379
380 #if defined(LWS_ROLE_RAW_FILE)
381 if (wsi->role_ops == &role_ops_raw_file) {
382 lws_remove_child_from_any_parent(wsi);
383 __remove_wsi_socket_from_fds(wsi);
384 if (wsi->a.protocol)
385 wsi->a.protocol->callback(wsi, wsi->role_ops->close_cb[0],
386 wsi->user_space, NULL, 0);
387 goto async_close;
388 }
389 #endif
390
391 wsi->wsistate_pre_close = wsi->wsistate;
392
393 #ifdef LWS_WITH_CGI
394 if (wsi->role_ops == &role_ops_cgi) {
395
396 // lwsl_debug("%s: closing stdwsi index %d\n", __func__, (int)wsi->lsp_channel);
397
398 /* we are not a network connection, but a handler for CGI io */
399 if (wsi->parent && wsi->parent->http.cgi) {
400
401 /*
402 * We need to keep the logical cgi around so we can
403 * drain it
404 */
405
406 // if (wsi->parent->child_list == wsi && !wsi->sibling_list)
407 // lws_cgi_remove_and_kill(wsi->parent);
408
409 /* end the binding between us and network connection */
410 if (wsi->parent->http.cgi && wsi->parent->http.cgi->lsp)
411 wsi->parent->http.cgi->lsp->stdwsi[(int)wsi->lsp_channel] =
412 NULL;
413 }
414 wsi->socket_is_permanently_unusable = 1;
415
416 goto just_kill_connection;
417 }
418
419 if (wsi->http.cgi)
420 lws_cgi_remove_and_kill(wsi);
421 #endif
422
423 #if defined(LWS_WITH_CLIENT)
424 lws_free_set_NULL(wsi->stash);
425 #endif
426
427 if (wsi->role_ops == &role_ops_raw_skt) {
428 wsi->socket_is_permanently_unusable = 1;
429 goto just_kill_connection;
430 }
431 #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
432 if (lwsi_role_http(wsi) && lwsi_role_server(wsi) &&
433 wsi->http.fop_fd != NULL)
434 lws_vfs_file_close(&wsi->http.fop_fd);
435 #endif
436
437 if (lwsi_state(wsi) == LRS_DEAD_SOCKET)
438 return;
439
440 if (wsi->socket_is_permanently_unusable ||
441 reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY ||
442 lwsi_state(wsi) == LRS_SHUTDOWN)
443 goto just_kill_connection;
444
445 switch (lwsi_state_PRE_CLOSE(wsi)) {
446 case LRS_DEAD_SOCKET:
447 return;
448
449 /* we tried the polite way... */
450 case LRS_WAITING_TO_SEND_CLOSE:
451 case LRS_AWAITING_CLOSE_ACK:
452 case LRS_RETURNED_CLOSE:
453 goto just_kill_connection;
454
455 case LRS_FLUSHING_BEFORE_CLOSE:
456 if (lws_has_buffered_out(wsi)
457 #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
458 || wsi->http.comp_ctx.buflist_comp ||
459 wsi->http.comp_ctx.may_have_more
460 #endif
461 ) {
462 lws_callback_on_writable(wsi);
463 return;
464 }
465 lwsl_info("%s: %s: end LRS_FLUSHING_BEFORE_CLOSE\n", __func__, lws_wsi_tag(wsi));
466 goto just_kill_connection;
467 default:
468 if (lws_has_buffered_out(wsi)
469 #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
470 || wsi->http.comp_ctx.buflist_comp ||
471 wsi->http.comp_ctx.may_have_more
472 #endif
473 ) {
474 lwsl_info("%s: %s: LRS_FLUSHING_BEFORE_CLOSE\n", __func__, lws_wsi_tag(wsi));
475 lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
476 __lws_set_timeout(wsi,
477 PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5);
478 return;
479 }
480 break;
481 }
482
483 if (lwsi_state(wsi) == LRS_WAITING_CONNECT ||
484 lwsi_state(wsi) == LRS_WAITING_DNS ||
485 lwsi_state(wsi) == LRS_H1C_ISSUE_HANDSHAKE)
486 goto just_kill_connection;
487
488 if (!wsi->told_user_closed && wsi->user_space && wsi->a.protocol &&
489 wsi->protocol_bind_balance) {
490 wsi->a.protocol->callback(wsi,
491 wsi->role_ops->protocol_unbind_cb[
492 !!lwsi_role_server(wsi)],
493 wsi->user_space, (void *)__func__, 0);
494 wsi->protocol_bind_balance = 0;
495 }
496
497 /*
498 * signal we are closing, lws_write will
499 * add any necessary version-specific stuff. If the write fails,
500 * no worries we are closing anyway. If we didn't initiate this
501 * close, then our state has been changed to
502 * LRS_RETURNED_CLOSE and we will skip this.
503 *
504 * Likewise if it's a second call to close this connection after we
505 * sent the close indication to the peer already, we are in state
506 * LRS_AWAITING_CLOSE_ACK and will skip doing this a second time.
507 */
508
509 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol) &&
510 lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_via_role_protocol).
511 close_via_role_protocol(wsi, reason)) {
512 lwsl_info("%s: clsoe_via_role took over: %s (sockfd %d)\n", __func__,
513 lws_wsi_tag(wsi), wsi->desc.sockfd);
514 return;
515 }
516
517 just_kill_connection:
518
519 lwsl_debug("%s: real just_kill_connection A: %s (sockfd %d)\n", __func__,
520 lws_wsi_tag(wsi), wsi->desc.sockfd);
521
522 #if defined(LWS_WITH_THREADPOOL)
523 lws_threadpool_wsi_closing(wsi);
524 #endif
525
526 #if defined(LWS_WITH_FILE_OPS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
527 if (lwsi_role_http(wsi) && lwsi_role_server(wsi) &&
528 wsi->http.fop_fd != NULL)
529 lws_vfs_file_close(&wsi->http.fop_fd);
530 #endif
531
532 lws_sul_cancel(&wsi->sul_connect_timeout);
533 #if defined(LWS_WITH_SYS_ASYNC_DNS)
534 lws_async_dns_cancel(wsi);
535 #endif
536
537 #if defined(LWS_WITH_HTTP_PROXY)
538 if (wsi->http.buflist_post_body)
539 lws_buflist_destroy_all_segments(&wsi->http.buflist_post_body);
540 #endif
541 #if defined(LWS_WITH_UDP)
542 if (wsi->udp) {
543 /* confirm no sul left scheduled in wsi->udp itself */
544 lws_sul_debug_zombies(wsi->a.context, wsi->udp,
545 sizeof(*wsi->udp), "close udp wsi");
546
547 lws_free_set_NULL(wsi->udp);
548 }
549 #endif
550
551 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_kill_connection))
552 lws_rops_func_fidx(wsi->role_ops,
553 LWS_ROPS_close_kill_connection).
554 close_kill_connection(wsi, reason);
555
556 n = 0;
557
558 if (!wsi->told_user_closed && wsi->user_space &&
559 wsi->protocol_bind_balance && wsi->a.protocol) {
560 lwsl_debug("%s: %s: DROP_PROTOCOL %s\n", __func__, lws_wsi_tag(wsi),
561 wsi->a.protocol ? wsi->a.protocol->name: "NULL");
562 if (wsi->a.protocol)
563 wsi->a.protocol->callback(wsi,
564 wsi->role_ops->protocol_unbind_cb[
565 !!lwsi_role_server(wsi)],
566 wsi->user_space, (void *)__func__, 0);
567 wsi->protocol_bind_balance = 0;
568 }
569
570 #if defined(LWS_WITH_CLIENT)
571 if ((
572 #if defined(LWS_ROLE_WS)
573 /*
574 * If our goal is a ws upgrade, effectively we did not reach
575 * ESTABLISHED if we did not get the upgrade server reply
576 */
577 (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY &&
578 wsi->role_ops == &role_ops_ws) ||
579 #endif
580 lwsi_state(wsi) == LRS_WAITING_DNS ||
581 lwsi_state(wsi) == LRS_WAITING_CONNECT) &&
582 !wsi->already_did_cce && wsi->a.protocol) {
583 static const char _reason[] = "closed before established";
584
585 lwsl_debug("%s: closing in unestablished state 0x%x\n",
586 __func__, lwsi_state(wsi));
587 wsi->socket_is_permanently_unusable = 1;
588
589 lws_inform_client_conn_fail(wsi,
590 (void *)_reason, sizeof(_reason));
591 }
592 #endif
593
594 /*
595 * Testing with ab shows that we have to stage the socket close when
596 * the system is under stress... shutdown any further TX, change the
597 * state to one that won't emit anything more, and wait with a timeout
598 * for the POLLIN to show a zero-size rx before coming back and doing
599 * the actual close.
600 */
601 if (wsi->role_ops != &role_ops_raw_skt && !lwsi_role_client(wsi) &&
602 lwsi_state(wsi) != LRS_SHUTDOWN &&
603 lwsi_state(wsi) != LRS_UNCONNECTED &&
604 reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY &&
605 !wsi->socket_is_permanently_unusable) {
606
607 #if defined(LWS_WITH_TLS)
608 if (lws_is_ssl(wsi) && wsi->tls.ssl) {
609 n = 0;
610 switch (__lws_tls_shutdown(wsi)) {
611 case LWS_SSL_CAPABLE_DONE:
612 case LWS_SSL_CAPABLE_ERROR:
613 case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
614 case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
615 case LWS_SSL_CAPABLE_MORE_SERVICE:
616 break;
617 }
618 } else
619 #endif
620 {
621 lwsl_info("%s: shutdown conn: %s (sk %d, state 0x%x)\n",
622 __func__, lws_wsi_tag(wsi), (int)(lws_intptr_t)wsi->desc.sockfd,
623 lwsi_state(wsi));
624 if (!wsi->socket_is_permanently_unusable &&
625 lws_socket_is_valid(wsi->desc.sockfd)) {
626 wsi->socket_is_permanently_unusable = 1;
627 n = shutdown(wsi->desc.sockfd, SHUT_WR);
628 }
629 }
630 if (n)
631 lwsl_debug("closing: shutdown (state 0x%x) ret %d\n",
632 lwsi_state(wsi), LWS_ERRNO);
633
634 /*
635 * This causes problems on WINCE / ESP32 with disconnection
636 * when the events are half closing connection
637 */
638 #if !defined(_WIN32_WCE) && !defined(LWS_PLAT_FREERTOS)
639 /* libuv: no event available to guarantee completion */
640 if (!wsi->socket_is_permanently_unusable &&
641 lws_socket_is_valid(wsi->desc.sockfd) &&
642 lwsi_state(wsi) != LRS_SHUTDOWN &&
643 (context->event_loop_ops->flags & LELOF_ISPOLL)) {
644 __lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
645 lwsi_set_state(wsi, LRS_SHUTDOWN);
646 __lws_set_timeout(wsi, PENDING_TIMEOUT_SHUTDOWN_FLUSH,
647 (int)context->timeout_secs);
648
649 return;
650 }
651 #endif
652 }
653
654 lwsl_info("%s: real just_kill_connection: %s (sockfd %d)\n", __func__,
655 lws_wsi_tag(wsi), wsi->desc.sockfd);
656
657 #ifdef LWS_WITH_HUBBUB
658 if (wsi->http.rw) {
659 lws_rewrite_destroy(wsi->http.rw);
660 wsi->http.rw = NULL;
661 }
662 #endif
663
664 if (wsi->http.pending_return_headers)
665 lws_free_set_NULL(wsi->http.pending_return_headers);
666
667 /*
668 * we won't be servicing or receiving anything further from this guy
669 * delete socket from the internal poll list if still present
670 */
671 __lws_ssl_remove_wsi_from_buffered_list(wsi);
672 __lws_wsi_remove_from_sul(wsi);
673
674 //if (wsi->told_event_loop_closed) // cgi std close case (dummy-callback)
675 // return;
676
677 /* checking return redundant since we anyway close */
678 __remove_wsi_socket_from_fds(wsi);
679
680 lwsi_set_state(wsi, LRS_DEAD_SOCKET);
681 lws_buflist_destroy_all_segments(&wsi->buflist);
682 lws_dll2_remove(&wsi->dll_buflist);
683
684 if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_close_role))
685 lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_close_role).
686 close_role(pt, wsi);
687
688 /* tell the user it's all over for this guy */
689
690 ccb = 0;
691 if ((lwsi_state_est_PRE_CLOSE(wsi) ||
692 /* raw skt adopted but didn't complete tls hs should CLOSE */
693 (wsi->role_ops == &role_ops_raw_skt && !lwsi_role_client(wsi)) ||
694 lwsi_state_PRE_CLOSE(wsi) == LRS_WAITING_SERVER_REPLY) &&
695 !wsi->told_user_closed &&
696 wsi->role_ops->close_cb[lwsi_role_server(wsi)]) {
697 if (!wsi->upgraded_to_http2 || !lwsi_role_client(wsi))
698 ccb = 1;
699 /*
700 * The network wsi for a client h2 connection shouldn't
701 * call back for its role: the child stream connections
702 * own the role. Otherwise h2 will call back closed
703 * one too many times as the children do it and then
704 * the closing network stream.
705 */
706 }
707
708 if (!wsi->told_user_closed &&
709 !lws_dll2_is_detached(&wsi->vh_awaiting_socket))
710 /*
711 * He's a guy who go started with dns, but failed or is
712 * caught with a shutdown before he got the result. We have
713 * to issue him a close cb
714 */
715 ccb = 1;
716
717 lwsl_info("%s: %s: cce=%d\n", __func__, lws_wsi_tag(wsi), ccb);
718
719 pro = wsi->a.protocol;
720
721 if (wsi->already_did_cce)
722 /*
723 * If we handled this by CLIENT_CONNECTION_ERROR, it's
724 * mutually exclusive with CLOSE
725 */
726 ccb = 0;
727
728 #if defined(LWS_WITH_CLIENT)
729 if (!ccb && (lwsi_state_PRE_CLOSE(wsi) & LWSIFS_NOT_EST) &&
730 lwsi_role_client(wsi)) {
731 lws_inform_client_conn_fail(wsi, "Closed before conn", 18);
732 }
733 #endif
734 if (ccb) {
735
736 if (!wsi->a.protocol && wsi->a.vhost && wsi->a.vhost->protocols)
737 pro = &wsi->a.vhost->protocols[0];
738
739 if (pro)
740 pro->callback(wsi,
741 wsi->role_ops->close_cb[lwsi_role_server(wsi)],
742 wsi->user_space, NULL, 0);
743 wsi->told_user_closed = 1;
744 }
745
746 #if defined(LWS_ROLE_RAW_FILE)
747 async_close:
748 #endif
749
750 #if defined(LWS_WITH_SECURE_STREAMS)
751 if (wsi->for_ss) {
752 lwsl_debug("%s: for_ss\n", __func__);
753 /*
754 * We were adopted for a particular ss, but, eg, we may not
755 * have succeeded with the connection... we are closing which is
756 * good, but we have to invalidate any pointer the related ss
757 * handle may be holding on us
758 */
759 #if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
760
761 if (wsi->client_proxy_onward) {
762 /*
763 * We are an onward proxied wsi at the proxy,
764 * opaque is proxing "conn", we must remove its pointer
765 * to us since we are destroying
766 */
767 lws_proxy_clean_conn_ss(wsi);
768 } else
769
770 if (wsi->client_bound_sspc) {
771 lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data;
772
773 if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
774
775 #if defined(LWS_WITH_SYS_METRICS)
776 /*
777 * If any hanging caliper measurement, dump it, and free any tags
778 */
779 lws_metrics_caliper_report_hist(h->cal_txn, (struct lws *)NULL);
780 #endif
781
782 h->cwsi = NULL;
783 //wsi->a.opaque_user_data = NULL;
784 }
785 } else
786 #endif
787 {
788 lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
789
790 if (h) { // && (h->info.flags & LWSSSINFLAGS_ACCEPTED)) {
791
792 /*
793 * ss level: only reports if dangling caliper
794 * not already reported
795 */
796 lws_metrics_caliper_report_hist(h->cal_txn, wsi);
797
798 h->wsi = NULL;
799 wsi->a.opaque_user_data = NULL;
800
801 if (h->ss_dangling_connected &&
802 lws_ss_event_helper(h, LWSSSCS_DISCONNECTED) ==
803 LWSSSSRET_DESTROY_ME) {
804
805 lws_ss_destroy(&h);
806 }
807 }
808 }
809 }
810 #endif
811
812
813 lws_remove_child_from_any_parent(wsi);
814 wsi->socket_is_permanently_unusable = 1;
815
816 if (wsi->a.context->event_loop_ops->wsi_logical_close)
817 if (wsi->a.context->event_loop_ops->wsi_logical_close(wsi))
818 return;
819
820 __lws_close_free_wsi_final(wsi);
821 }
822
823
824 /* cx + vh lock */
825
826 void
__lws_close_free_wsi_final(struct lws * wsi)827 __lws_close_free_wsi_final(struct lws *wsi)
828 {
829 int n;
830
831 if (!wsi->shadow &&
832 lws_socket_is_valid(wsi->desc.sockfd) && !lws_ssl_close(wsi)) {
833 lwsl_debug("%s: wsi %s: fd %d\n", __func__, lws_wsi_tag(wsi),
834 wsi->desc.sockfd);
835 n = compatible_close(wsi->desc.sockfd);
836 if (n)
837 lwsl_debug("closing: close ret %d\n", LWS_ERRNO);
838
839 __remove_wsi_socket_from_fds(wsi);
840 if (lws_socket_is_valid(wsi->desc.sockfd))
841 delete_from_fd(wsi->a.context, wsi->desc.sockfd);
842
843 #if !defined(LWS_PLAT_FREERTOS) && !defined(WIN32) && !defined(LWS_PLAT_OPTEE)
844 delete_from_fdwsi(wsi->a.context, wsi);
845 #endif
846
847 sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd);
848
849 wsi->desc.sockfd = LWS_SOCK_INVALID;
850 }
851
852 /* outermost destroy notification for wsi (user_space still intact) */
853 if (wsi->a.vhost)
854 wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_WSI_DESTROY,
855 wsi->user_space, NULL, 0);
856
857 #ifdef LWS_WITH_CGI
858 if (wsi->http.cgi) {
859 lws_spawn_piped_destroy(&wsi->http.cgi->lsp);
860 lws_sul_cancel(&wsi->http.cgi->sul_grace);
861 lws_free_set_NULL(wsi->http.cgi);
862 }
863 #endif
864
865 #if defined(LWS_WITH_SYS_FAULT_INJECTION)
866 lws_fi_destroy(&wsi->fic);
867 #endif
868
869 __lws_wsi_remove_from_sul(wsi);
870 sanity_assert_no_wsi_traces(wsi->a.context, wsi);
871 __lws_free_wsi(wsi);
872 }
873
874
875 void
lws_close_free_wsi(struct lws * wsi,enum lws_close_status reason,const char * caller)876 lws_close_free_wsi(struct lws *wsi, enum lws_close_status reason, const char *caller)
877 {
878 struct lws_context *cx = wsi->a.context;
879 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
880
881 lws_context_lock(cx, __func__);
882
883 lws_pt_lock(pt, __func__);
884 /* may destroy vhost, cannot hold vhost lock outside it */
885 __lws_close_free_wsi(wsi, reason, caller);
886 lws_pt_unlock(pt);
887
888 lws_context_unlock(cx);
889 }
890
891
892