1 /*
2  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3  *
4  * This Source Code Form is subject to the terms of the Mozilla Public
5  * License, v. 2.0. If a copy of the MPL was not distributed with this
6  * file, You can obtain one at http://mozilla.org/MPL/2.0/.
7  *
8  * See the COPYRIGHT file distributed with this work for additional
9  * information regarding copyright ownership.
10  */
11 
12 #pragma once
13 
14 #include <unistd.h>
15 #include <uv.h>
16 
17 #include <isc/astack.h>
18 #include <isc/atomic.h>
19 #include <isc/buffer.h>
20 #include <isc/condition.h>
21 #include <isc/magic.h>
22 #include <isc/mem.h>
23 #include <isc/netmgr.h>
24 #include <isc/queue.h>
25 #include <isc/quota.h>
26 #include <isc/random.h>
27 #include <isc/refcount.h>
28 #include <isc/region.h>
29 #include <isc/result.h>
30 #include <isc/sockaddr.h>
31 #include <isc/stats.h>
32 #include <isc/thread.h>
33 #include <isc/util.h>
34 
35 #include "uv-compat.h"
36 
37 #define ISC_NETMGR_TID_UNKNOWN -1
38 
39 #if !defined(WIN32)
40 /*
41  * New versions of libuv support recvmmsg on unices.
42  * Since recvbuf is only allocated per worker allocating a bigger one is not
43  * that wasteful.
44  * 20 here is UV__MMSG_MAXWIDTH taken from the current libuv source, nothing
45  * will break if the original value changes.
46  */
47 #define ISC_NETMGR_RECVBUF_SIZE (20 * 65536)
48 #else
49 #define ISC_NETMGR_RECVBUF_SIZE (65536)
50 #endif
51 
52 /*
53  * Single network event loop worker.
54  */
55 typedef struct isc__networker {
56 	isc_nm_t *mgr;
57 	int id;		  /* thread id */
58 	uv_loop_t loop;	  /* libuv loop structure */
59 	uv_async_t async; /* async channel to send
60 			   * data to this networker */
61 	isc_mutex_t lock;
62 	isc_condition_t cond;
63 	bool paused;
64 	bool finished;
65 	isc_thread_t thread;
66 	isc_queue_t *ievents;	   /* incoming async events */
67 	isc_queue_t *ievents_prio; /* priority async events
68 				    * used for listening etc.
69 				    * can be processed while
70 				    * worker is paused */
71 	isc_refcount_t references;
72 	atomic_int_fast64_t pktcount;
73 	char *recvbuf;
74 	bool recvbuf_inuse;
75 } isc__networker_t;
76 
77 /*
78  * A general handle for a connection bound to a networker.  For UDP
79  * connections we have peer address here, so both TCP and UDP can be
80  * handled with a simple send-like function
81  */
82 #define NMHANDLE_MAGIC	  ISC_MAGIC('N', 'M', 'H', 'D')
83 #define VALID_NMHANDLE(t) ISC_MAGIC_VALID(t, NMHANDLE_MAGIC)
84 
85 typedef void (*isc__nm_closecb)(isc_nmhandle_t *);
86 
87 struct isc_nmhandle {
88 	int magic;
89 	isc_refcount_t references;
90 
91 	/*
92 	 * The socket is not 'attached' in the traditional
93 	 * reference-counting sense. Instead, we keep all handles in an
94 	 * array in the socket object.  This way, we don't have circular
95 	 * dependencies and we can close all handles when we're destroying
96 	 * the socket.
97 	 */
98 	isc_nmsocket_t *sock;
99 	size_t ah_pos; /* Position in the socket's
100 			* 'active handles' array */
101 
102 	/*
103 	 * The handle is 'inflight' if netmgr is not currently processing
104 	 * it in any way - it might mean that e.g. a recursive resolution
105 	 * is happening. For an inflight handle we must wait for the
106 	 * calling code to finish before we can free it.
107 	 */
108 	atomic_bool inflight;
109 
110 	isc_sockaddr_t peer;
111 	isc_sockaddr_t local;
112 	isc_nm_opaquecb_t doreset; /* reset extra callback, external */
113 	isc_nm_opaquecb_t dofree;  /* free extra callback, external */
114 	void *opaque;
115 	char extra[];
116 };
117 
118 /*
119  * An interface - an address we can listen on.
120  */
121 struct isc_nmiface {
122 	isc_sockaddr_t addr;
123 };
124 
125 typedef enum isc__netievent_type {
126 	netievent_udpsend,
127 	netievent_udprecv,
128 	netievent_udpstop,
129 
130 	netievent_tcpconnect,
131 	netievent_tcpsend,
132 	netievent_tcprecv,
133 	netievent_tcpstartread,
134 	netievent_tcppauseread,
135 	netievent_tcpchildaccept,
136 	netievent_tcpaccept,
137 	netievent_tcpstop,
138 	netievent_tcpclose,
139 
140 	netievent_tcpdnsclose,
141 	netievent_tcpdnssend,
142 
143 	netievent_closecb,
144 	netievent_shutdown,
145 	netievent_stop,
146 	netievent_prio = 0xff, /* event type values higher than this
147 				* will be treated as high-priority
148 				* events, which can be processed
149 				* while the netmgr is paused.
150 				*/
151 	netievent_udplisten,
152 	netievent_tcplisten,
153 } isc__netievent_type;
154 
155 /*
156  * We have to split it because we can read and write on a socket
157  * simultaneously.
158  */
159 typedef union {
160 	isc_nm_recv_cb_t recv;
161 	isc_nm_accept_cb_t accept;
162 } isc__nm_readcb_t;
163 
164 typedef union {
165 	isc_nm_cb_t send;
166 	isc_nm_cb_t connect;
167 } isc__nm_writecb_t;
168 
169 typedef union {
170 	isc_nm_recv_cb_t recv;
171 	isc_nm_accept_cb_t accept;
172 	isc_nm_cb_t send;
173 	isc_nm_cb_t connect;
174 } isc__nm_cb_t;
175 
176 /*
177  * Wrapper around uv_req_t with 'our' fields in it.  req->data should
178  * always point to its parent.  Note that we always allocate more than
179  * sizeof(struct) because we make room for different req types;
180  */
181 #define UVREQ_MAGIC    ISC_MAGIC('N', 'M', 'U', 'R')
182 #define VALID_UVREQ(t) ISC_MAGIC_VALID(t, UVREQ_MAGIC)
183 
184 typedef struct isc__nm_uvreq {
185 	int magic;
186 	isc_nmsocket_t *sock;
187 	isc_nmhandle_t *handle;
188 	uv_buf_t uvbuf;	      /* translated isc_region_t, to be
189 			       * sent or received */
190 	isc_sockaddr_t local; /* local address */
191 	isc_sockaddr_t peer;  /* peer address */
192 	isc__nm_cb_t cb;      /* callback */
193 	void *cbarg;	      /* callback argument */
194 	uv_pipe_t ipc;	      /* used for sending socket
195 			       * uv_handles to other threads */
196 	union {
197 		uv_req_t req;
198 		uv_getaddrinfo_t getaddrinfo;
199 		uv_getnameinfo_t getnameinfo;
200 		uv_shutdown_t shutdown;
201 		uv_write_t write;
202 		uv_connect_t connect;
203 		uv_udp_send_t udp_send;
204 		uv_fs_t fs;
205 		uv_work_t work;
206 	} uv_req;
207 } isc__nm_uvreq_t;
208 
209 typedef struct isc__netievent__socket {
210 	isc__netievent_type type;
211 	isc_nmsocket_t *sock;
212 } isc__netievent__socket_t;
213 
214 typedef isc__netievent__socket_t isc__netievent_udplisten_t;
215 typedef isc__netievent__socket_t isc__netievent_udpstop_t;
216 typedef isc__netievent__socket_t isc__netievent_tcpstop_t;
217 typedef isc__netievent__socket_t isc__netievent_tcpclose_t;
218 typedef isc__netievent__socket_t isc__netievent_tcpdnsclose_t;
219 typedef isc__netievent__socket_t isc__netievent_startread_t;
220 typedef isc__netievent__socket_t isc__netievent_pauseread_t;
221 typedef isc__netievent__socket_t isc__netievent_closecb_t;
222 
223 typedef struct isc__netievent__socket_req {
224 	isc__netievent_type type;
225 	isc_nmsocket_t *sock;
226 	isc__nm_uvreq_t *req;
227 } isc__netievent__socket_req_t;
228 
229 typedef isc__netievent__socket_req_t isc__netievent_tcpconnect_t;
230 typedef isc__netievent__socket_req_t isc__netievent_tcplisten_t;
231 typedef isc__netievent__socket_req_t isc__netievent_tcpsend_t;
232 typedef isc__netievent__socket_req_t isc__netievent_tcpdnssend_t;
233 
234 typedef struct isc__netievent__socket_streaminfo_quota {
235 	isc__netievent_type type;
236 	isc_nmsocket_t *sock;
237 	isc_uv_stream_info_t streaminfo;
238 	isc_quota_t *quota;
239 } isc__netievent__socket_streaminfo_quota_t;
240 
241 typedef isc__netievent__socket_streaminfo_quota_t
242 	isc__netievent_tcpchildaccept_t;
243 
244 typedef struct isc__netievent__socket_handle {
245 	isc__netievent_type type;
246 	isc_nmsocket_t *sock;
247 	isc_nmhandle_t *handle;
248 } isc__netievent__socket_handle_t;
249 
250 typedef struct isc__netievent__socket_quota {
251 	isc__netievent_type type;
252 	isc_nmsocket_t *sock;
253 	isc_quota_t *quota;
254 } isc__netievent__socket_quota_t;
255 
256 typedef isc__netievent__socket_quota_t isc__netievent_tcpaccept_t;
257 
258 typedef struct isc__netievent_udpsend {
259 	isc__netievent_type type;
260 	isc_nmsocket_t *sock;
261 	isc_sockaddr_t peer;
262 	isc__nm_uvreq_t *req;
263 } isc__netievent_udpsend_t;
264 
265 typedef struct isc__netievent {
266 	isc__netievent_type type;
267 } isc__netievent_t;
268 
269 typedef isc__netievent_t isc__netievent_shutdown_t;
270 typedef isc__netievent_t isc__netievent_stop_t;
271 
272 typedef union {
273 	isc__netievent_t ni;
274 	isc__netievent__socket_t nis;
275 	isc__netievent__socket_req_t nisr;
276 	isc__netievent_udpsend_t nius;
277 	isc__netievent__socket_quota_t nisq;
278 	isc__netievent__socket_streaminfo_quota_t nissq;
279 } isc__netievent_storage_t;
280 
281 /*
282  * Network manager
283  */
284 #define NM_MAGIC    ISC_MAGIC('N', 'E', 'T', 'M')
285 #define VALID_NM(t) ISC_MAGIC_VALID(t, NM_MAGIC)
286 
287 struct isc_nm {
288 	int magic;
289 	isc_refcount_t references;
290 	isc_mem_t *mctx;
291 	uint32_t nworkers;
292 	isc_mutex_t lock;
293 	isc_condition_t wkstatecond;
294 	isc__networker_t *workers;
295 
296 	isc_stats_t *stats;
297 
298 	isc_mempool_t *reqpool;
299 	isc_mutex_t reqlock;
300 
301 	isc_mempool_t *evpool;
302 	isc_mutex_t evlock;
303 
304 	atomic_uint_fast32_t workers_running;
305 	atomic_uint_fast32_t workers_paused;
306 	atomic_uint_fast32_t maxudp;
307 	atomic_bool paused;
308 
309 	/*
310 	 * Active connections are being closed and new connections are
311 	 * no longer allowed.
312 	 */
313 	atomic_bool closing;
314 
315 	/*
316 	 * A worker is actively waiting for other workers, for example to
317 	 * stop listening; that means no other thread can do the same thing
318 	 * or pause, or we'll deadlock. We have to either re-enqueue our
319 	 * event or wait for the other one to finish if we want to pause.
320 	 */
321 	atomic_bool interlocked;
322 
323 	/*
324 	 * Timeout values for TCP connections, corresponding to
325 	 * tcp-intiial-timeout, tcp-idle-timeout, tcp-keepalive-timeout,
326 	 * and tcp-advertised-timeout. Note that these are stored in
327 	 * milliseconds so they can be used directly with the libuv timer,
328 	 * but they are configured in tenths of seconds.
329 	 */
330 	uint32_t init;
331 	uint32_t idle;
332 	uint32_t keepalive;
333 	uint32_t advertised;
334 };
335 
336 typedef enum isc_nmsocket_type {
337 	isc_nm_udpsocket,
338 	isc_nm_udplistener, /* Aggregate of nm_udpsocks */
339 	isc_nm_tcpsocket,
340 	isc_nm_tcplistener,
341 	isc_nm_tcpdnslistener,
342 	isc_nm_tcpdnssocket
343 } isc_nmsocket_type;
344 
345 /*%
346  * A universal structure for either a single socket or a group of
347  * dup'd/SO_REUSE_PORT-using sockets listening on the same interface.
348  */
349 #define NMSOCK_MAGIC	ISC_MAGIC('N', 'M', 'S', 'K')
350 #define VALID_NMSOCK(t) ISC_MAGIC_VALID(t, NMSOCK_MAGIC)
351 
352 /*%
353  * Index into socket stat counter arrays.
354  */
355 enum { STATID_OPEN = 0,
356        STATID_OPENFAIL = 1,
357        STATID_CLOSE = 2,
358        STATID_BINDFAIL = 3,
359        STATID_CONNECTFAIL = 4,
360        STATID_CONNECT = 5,
361        STATID_ACCEPTFAIL = 6,
362        STATID_ACCEPT = 7,
363        STATID_SENDFAIL = 8,
364        STATID_RECVFAIL = 9,
365        STATID_ACTIVE = 10 };
366 
367 struct isc_nmsocket {
368 	/*% Unlocked, RO */
369 	int magic;
370 	int tid;
371 	isc_nmsocket_type type;
372 	isc_nm_t *mgr;
373 	/*% Parent socket for multithreaded listeners */
374 	isc_nmsocket_t *parent;
375 	/*% Listener socket this connection was accepted on */
376 	isc_nmsocket_t *listener;
377 
378 	/*%
379 	 * quota is the TCP client, attached when a TCP connection
380 	 * is established. pquota is a non-attached pointer to the
381 	 * TCP client quota, stored in listening sockets but only
382 	 * attached in connected sockets.
383 	 */
384 	isc_quota_t *quota;
385 	isc_quota_t *pquota;
386 	isc_quota_cb_t quotacb;
387 
388 	/*%
389 	 * Socket statistics
390 	 */
391 	const isc_statscounter_t *statsindex;
392 
393 	/*%
394 	 * TCP read timeout timer.
395 	 */
396 	uv_timer_t timer;
397 	bool timer_initialized;
398 	uint64_t read_timeout;
399 
400 	/*% outer socket is for 'wrapped' sockets - e.g. tcpdns in tcp */
401 	isc_nmsocket_t *outer;
402 
403 	/*% server socket for connections */
404 	isc_nmsocket_t *server;
405 
406 	/*% Child sockets for multi-socket setups */
407 	isc_nmsocket_t *children;
408 	int nchildren;
409 	isc_nmiface_t *iface;
410 	isc_nmhandle_t *tcphandle;
411 
412 	/*% Extra data allocated at the end of each isc_nmhandle_t */
413 	size_t extrahandlesize;
414 
415 	/*% TCP backlog */
416 	int backlog;
417 
418 	/*% libuv data */
419 	uv_os_sock_t fd;
420 	union uv_any_handle uv_handle;
421 
422 	/*% Peer address */
423 	isc_sockaddr_t peer;
424 
425 	/* Atomic */
426 	/*% Number of running (e.g. listening) child sockets */
427 	atomic_int_fast32_t rchildren;
428 
429 	/*%
430 	 * Socket is active if it's listening, working, etc. If it's
431 	 * closing, then it doesn't make a sense, for example, to
432 	 * push handles or reqs for reuse.
433 	 */
434 	atomic_bool active;
435 	atomic_bool destroying;
436 
437 	/*%
438 	 * Socket is closed if it's not active and all the possible
439 	 * callbacks were fired, there are no active handles, etc.
440 	 * If active==false but closed==false, that means the socket
441 	 * is closing.
442 	 */
443 	atomic_bool closed;
444 	atomic_bool listening;
445 	atomic_bool listen_error;
446 	isc_refcount_t references;
447 
448 	/*%
449 	 * TCPDNS socket has been set not to pipeliine.
450 	 */
451 	atomic_bool sequential;
452 
453 	/*%
454 	 * TCPDNS socket has exceeded the maximum number of
455 	 * simultaneous requests per connection, so will be temporarily
456 	 * restricted from pipelining.
457 	 */
458 	atomic_bool overlimit;
459 
460 	/*%
461 	 * TCPDNS socket in sequential mode is currently processing a packet,
462 	 * we need to wait until it finishes.
463 	 */
464 	atomic_bool processing;
465 
466 	/*%
467 	 * A TCP socket has had isc_nm_pauseread() called.
468 	 */
469 	atomic_bool readpaused;
470 
471 	/*%
472 	 * A TCP or TCPDNS socket has been set to use the keepalive
473 	 * timeout instead of the default idle timeout.
474 	 */
475 	atomic_bool keepalive;
476 
477 	/*%
478 	 * 'spare' handles for that can be reused to avoid allocations,
479 	 * for UDP.
480 	 */
481 	isc_astack_t *inactivehandles;
482 	isc_astack_t *inactivereqs;
483 
484 	/*%
485 	 * Used to wait for TCP listening events to complete, and
486 	 * for the number of running children to reach zero during
487 	 * shutdown.
488 	 */
489 	isc_mutex_t lock;
490 	isc_condition_t cond;
491 
492 	/*%
493 	 * Used to pass a result back from TCP listening events.
494 	 */
495 	isc_result_t result;
496 
497 	/*%
498 	 * List of active handles.
499 	 * ah - current position in 'ah_frees'; this represents the
500 	 *	current number of active handles;
501 	 * ah_size - size of the 'ah_frees' and 'ah_handles' arrays
502 	 * ah_handles - array pointers to active handles
503 	 *
504 	 * Adding a handle
505 	 *  - if ah == ah_size, reallocate
506 	 *  - x = ah_frees[ah]
507 	 *  - ah_frees[ah++] = 0;
508 	 *  - ah_handles[x] = handle
509 	 *  - x must be stored with the handle!
510 	 * Removing a handle:
511 	 *  - ah_frees[--ah] = x
512 	 *  - ah_handles[x] = NULL;
513 	 *
514 	 * XXX: for now this is locked with socket->lock, but we
515 	 * might want to change it to something lockless in the
516 	 * future.
517 	 */
518 	atomic_int_fast32_t ah;
519 	size_t ah_size;
520 	size_t *ah_frees;
521 	isc_nmhandle_t **ah_handles;
522 
523 	/*% Buffer for TCPDNS processing */
524 	size_t buf_size;
525 	size_t buf_len;
526 	unsigned char *buf;
527 
528 	/*%
529 	 * This function will be called with handle->sock
530 	 * as the argument whenever a handle's references drop
531 	 * to zero, after its reset callback has been called.
532 	 */
533 	isc_nm_opaquecb_t closehandle_cb;
534 
535 	isc__nm_readcb_t rcb;
536 	void *rcbarg;
537 
538 	isc__nm_cb_t accept_cb;
539 	void *accept_cbarg;
540 };
541 
542 bool
543 isc__nm_in_netthread(void);
544 /*%
545  * Returns 'true' if we're in the network thread.
546  */
547 
548 void *
549 isc__nm_get_ievent(isc_nm_t *mgr, isc__netievent_type type);
550 /*%<
551  * Allocate an ievent and set the type.
552  */
553 void
554 isc__nm_put_ievent(isc_nm_t *mgr, void *ievent);
555 
556 void
557 isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event);
558 /*%<
559  * Enqueue an ievent onto a specific worker queue. (This the only safe
560  * way to use an isc__networker_t from another thread.)
561  */
562 
563 void
564 isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf);
565 /*%<
566  * Allocator for recv operations.
567  *
568  * Note that as currently implemented, this doesn't actually
569  * allocate anything, it just assigns the the isc__networker's UDP
570  * receive buffer to a socket, and marks it as "in use".
571  */
572 
573 void
574 isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf);
575 /*%<
576  * Free a buffer allocated for a receive operation.
577  *
578  * Note that as currently implemented, this doesn't actually
579  * free anything, marks the isc__networker's UDP receive buffer
580  * as "not in use".
581  */
582 
583 isc_nmhandle_t *
584 isc__nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
585 		  isc_sockaddr_t *local);
586 /*%<
587  * Get a handle for the socket 'sock', allocating a new one
588  * if there isn't one available in 'sock->inactivehandles'.
589  *
590  * If 'peer' is not NULL, set the handle's peer address to 'peer',
591  * otherwise set it to 'sock->peer'.
592  *
593  * If 'local' is not NULL, set the handle's local address to 'local',
594  * otherwise set it to 'sock->iface->addr'.
595  */
596 
597 isc__nm_uvreq_t *
598 isc__nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock);
599 /*%<
600  * Get a UV request structure for the socket 'sock', allocating a
601  * new one if there isn't one available in 'sock->inactivereqs'.
602  */
603 
604 void
605 isc__nm_uvreq_put(isc__nm_uvreq_t **req, isc_nmsocket_t *sock);
606 /*%<
607  * Completes the use of a UV request structure, setting '*req' to NULL.
608  *
609  * The UV request is pushed onto the 'sock->inactivereqs' stack or,
610  * if that doesn't work, freed.
611  */
612 
613 void
614 isc__nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
615 		   isc_nmiface_t *iface);
616 /*%<
617  * Initialize socket 'sock', attach it to 'mgr', and set it to type 'type'
618  * and its interface to 'iface'.
619  */
620 
621 void
622 isc__nmsocket_prep_destroy(isc_nmsocket_t *sock);
623 /*%<
624  * Market 'sock' as inactive, close it if necessary, and destroy it
625  * if there are no remaining references or active handles.
626  */
627 
628 bool
629 isc__nmsocket_active(isc_nmsocket_t *sock);
630 /*%<
631  * Determine whether 'sock' is active by checking 'sock->active'
632  * or, for child sockets, 'sock->parent->active'.
633  */
634 
635 void
636 isc__nm_async_closecb(isc__networker_t *worker, isc__netievent_t *ev0);
637 /*%<
638  * Issue a 'handle closed' callback on the socket.
639  */
640 
641 void
642 isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0);
643 /*%<
644  * Walk through all uv handles, get the underlying sockets and issue
645  * close on them.
646  */
647 
648 isc_result_t
649 isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
650 		 void *cbarg);
651 /*%<
652  * Back-end implementation of isc_nm_send() for UDP handles.
653  */
654 
655 void
656 isc__nm_udp_stoplistening(isc_nmsocket_t *sock);
657 
658 void
659 isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0);
660 
661 void
662 isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0);
663 void
664 isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0);
665 /*%<
666  * Callback handlers for asynchronous UDP events (listen, stoplisten, send).
667  */
668 
669 isc_result_t
670 isc__nm_tcp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
671 		 void *cbarg);
672 /*%<
673  * Back-end implementation of isc_nm_send() for TCP handles.
674  */
675 
676 isc_result_t
677 isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
678 
679 void
680 isc__nm_tcp_close(isc_nmsocket_t *sock);
681 /*%<
682  * Close a TCP socket.
683  */
684 isc_result_t
685 isc__nm_tcp_pauseread(isc_nmsocket_t *sock);
686 /*%<
687  * Pause reading on this socket, while still remembering the callback.
688  */
689 
690 isc_result_t
691 isc__nm_tcp_resumeread(isc_nmsocket_t *sock);
692 /*%<
693  * Resume reading from socket.
694  *
695  */
696 
697 void
698 isc__nm_tcp_shutdown(isc_nmsocket_t *sock);
699 /*%<
700  * Called on shutdown to close and clean up a listening TCP socket.
701  */
702 
703 void
704 isc__nm_tcp_stoplistening(isc_nmsocket_t *sock);
705 
706 void
707 isc__nm_async_tcpconnect(isc__networker_t *worker, isc__netievent_t *ev0);
708 void
709 isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0);
710 void
711 isc__nm_async_tcpaccept(isc__networker_t *worker, isc__netievent_t *ev0);
712 void
713 isc__nm_async_tcpchildaccept(isc__networker_t *worker, isc__netievent_t *ev0);
714 void
715 isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0);
716 void
717 isc__nm_async_tcpsend(isc__networker_t *worker, isc__netievent_t *ev0);
718 void
719 isc__nm_async_startread(isc__networker_t *worker, isc__netievent_t *ev0);
720 void
721 isc__nm_async_pauseread(isc__networker_t *worker, isc__netievent_t *ev0);
722 void
723 isc__nm_async_tcp_startread(isc__networker_t *worker, isc__netievent_t *ev0);
724 void
725 isc__nm_async_tcp_pauseread(isc__networker_t *worker, isc__netievent_t *ev0);
726 void
727 isc__nm_async_tcpclose(isc__networker_t *worker, isc__netievent_t *ev0);
728 /*%<
729  * Callback handlers for asynchronous TCP events (connect, listen,
730  * stoplisten, send, read, pause, close).
731  */
732 
733 isc_result_t
734 isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
735 		    isc_nm_cb_t cb, void *cbarg);
736 /*%<
737  * Back-end implementation of isc_nm_send() for TCPDNS handles.
738  */
739 
740 void
741 isc__nm_tcpdns_close(isc_nmsocket_t *sock);
742 /*%<
743  * Close a TCPDNS socket.
744  */
745 
746 void
747 isc__nm_tcpdns_stoplistening(isc_nmsocket_t *sock);
748 
749 void
750 isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ev0);
751 
752 void
753 isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0);
754 
755 #define isc__nm_uverr2result(x) \
756 	isc___nm_uverr2result(x, true, __FILE__, __LINE__)
757 isc_result_t
758 isc___nm_uverr2result(int uverr, bool dolog, const char *file,
759 		      unsigned int line);
760 /*%<
761  * Convert a libuv error value into an isc_result_t.  The
762  * list of supported error values is not complete; new users
763  * of this function should add any expected errors that are
764  * not already there.
765  */
766 
767 bool
768 isc__nm_acquire_interlocked(isc_nm_t *mgr);
769 /*%<
770  * Try to acquire interlocked state; return true if successful.
771  */
772 
773 void
774 isc__nm_drop_interlocked(isc_nm_t *mgr);
775 /*%<
776  * Drop interlocked state; signal waiters.
777  */
778 
779 void
780 isc__nm_acquire_interlocked_force(isc_nm_t *mgr);
781 /*%<
782  * Actively wait for interlocked state.
783  */
784 
785 void
786 isc__nm_incstats(isc_nm_t *mgr, isc_statscounter_t counterid);
787 /*%<
788  * Increment socket-related statistics counters.
789  */
790 
791 void
792 isc__nm_decstats(isc_nm_t *mgr, isc_statscounter_t counterid);
793 /*%<
794  * Decrement socket-related statistics counters.
795  */
796