1 /*
2  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3  *
4  * This Source Code Form is subject to the terms of the Mozilla Public
5  * License, v. 2.0. If a copy of the MPL was not distributed with this
6  * file, you can obtain one at https://mozilla.org/MPL/2.0/.
7  *
8  * See the COPYRIGHT file distributed with this work for additional
9  * information regarding copyright ownership.
10  */
11 
12 #pragma once
13 
14 #include <unistd.h>
15 #include <uv.h>
16 
17 #include <openssl/err.h>
18 #include <openssl/ssl.h>
19 
20 #include <isc/astack.h>
21 #include <isc/atomic.h>
22 #include <isc/barrier.h>
23 #include <isc/buffer.h>
24 #include <isc/condition.h>
25 #include <isc/magic.h>
26 #include <isc/mem.h>
27 #include <isc/netmgr.h>
28 #include <isc/queue.h>
29 #include <isc/quota.h>
30 #include <isc/random.h>
31 #include <isc/refcount.h>
32 #include <isc/region.h>
33 #include <isc/result.h>
34 #include <isc/sockaddr.h>
35 #include <isc/stats.h>
36 #include <isc/thread.h>
37 #include <isc/tls.h>
38 #include <isc/util.h>
39 
40 #include "uv-compat.h"
41 
42 #define ISC_NETMGR_TID_UNKNOWN -1
43 
44 /* Must be different from ISC_NETMGR_TID_UNKNOWN */
45 #define ISC_NETMGR_NON_INTERLOCKED -2
46 
47 #define ISC_NETMGR_TLSBUF_SIZE 65536
48 
49 /*
50  * New versions of libuv support recvmmsg on unices.
51  * Since recvbuf is only allocated per worker allocating a bigger one is not
52  * that wasteful.
53  * 20 here is UV__MMSG_MAXWIDTH taken from the current libuv source, nothing
54  * will break if the original value changes.
55  */
56 #define ISC_NETMGR_RECVBUF_SIZE (20 * 65536)
57 
58 #define ISC_NETMGR_SENDBUF_SIZE (sizeof(uint16_t) + UINT16_MAX)
59 
60 /*%
61  * Regular TCP buffer size.
62  */
63 #define NM_REG_BUF 4096
64 
65 /*%
66  * Larger buffer for when the regular one isn't enough; this will
67  * hold two full DNS packets with lengths.  netmgr receives 64k at
68  * most in TCPDNS or TLSDNS connections, so there's no risk of overrun
69  * when using a buffer this size.
70  */
71 #define NM_BIG_BUF (65535 + 2) * 2
72 
73 #if defined(SO_REUSEPORT_LB) || (defined(SO_REUSEPORT) && defined(__linux__))
74 #define HAVE_SO_REUSEPORT_LB 1
75 #endif
76 
77 /*
78  * Define NETMGR_TRACE to activate tracing of handles and sockets.
79  * This will impair performance but enables us to quickly determine,
80  * if netmgr resources haven't been cleaned up on shutdown, which ones
81  * are still in use.
82  */
83 #ifdef NETMGR_TRACE
84 #define TRACE_SIZE 8
85 
86 void
87 isc__nm_dump_active(isc_nm_t *nm);
88 
89 #if defined(__linux__)
90 #include <syscall.h>
91 #define gettid() (uint32_t) syscall(SYS_gettid)
92 #else
93 #define gettid() (uint32_t) pthread_self()
94 #endif
95 
96 #ifdef NETMGR_TRACE_VERBOSE
97 #define NETMGR_TRACE_LOG(format, ...)                                \
98 	fprintf(stderr, "%" PRIu32 ":%d:%s:%u:%s:" format, gettid(), \
99 		isc_nm_tid(), file, line, func, __VA_ARGS__)
100 #else
101 #define NETMGR_TRACE_LOG(format, ...) \
102 	(void)file;                   \
103 	(void)line;                   \
104 	(void)func;
105 #endif
106 
107 #define FLARG_PASS , file, line, func
108 #define FLARG                                              \
109 	, const char *file __attribute__((unused)),        \
110 		unsigned int line __attribute__((unused)), \
111 		const char *func __attribute__((unused))
112 #define FLARG_IEVENT(ievent)              \
113 	const char *file = ievent->file;  \
114 	unsigned int line = ievent->line; \
115 	const char *func = ievent->func;
116 #define FLARG_IEVENT_PASS(ievent) \
117 	ievent->file = file;      \
118 	ievent->line = line;      \
119 	ievent->func = func;
120 #define isc__nm_uvreq_get(req, sock) \
121 	isc___nm_uvreq_get(req, sock, __FILE__, __LINE__, __func__)
122 #define isc__nm_uvreq_put(req, sock) \
123 	isc___nm_uvreq_put(req, sock, __FILE__, __LINE__, __func__)
124 #define isc__nmsocket_init(sock, mgr, type, iface)                      \
125 	isc___nmsocket_init(sock, mgr, type, iface, __FILE__, __LINE__, \
126 			    __func__)
127 #define isc__nmsocket_put(sockp) \
128 	isc___nmsocket_put(sockp, __FILE__, __LINE__, __func__)
129 #define isc__nmsocket_attach(sock, target) \
130 	isc___nmsocket_attach(sock, target, __FILE__, __LINE__, __func__)
131 #define isc__nmsocket_detach(socketp) \
132 	isc___nmsocket_detach(socketp, __FILE__, __LINE__, __func__)
133 #define isc__nmsocket_close(socketp) \
134 	isc___nmsocket_close(socketp, __FILE__, __LINE__, __func__)
135 #define isc__nmhandle_get(sock, peer, local) \
136 	isc___nmhandle_get(sock, peer, local, __FILE__, __LINE__, __func__)
137 #define isc__nmsocket_prep_destroy(sock) \
138 	isc___nmsocket_prep_destroy(sock, __FILE__, __LINE__, __func__)
139 #else
140 #define NETMGR_TRACE_LOG(format, ...)
141 
142 #define FLARG_PASS
143 #define FLARG
144 #define FLARG_IEVENT(ievent)
145 #define FLARG_IEVENT_PASS(ievent)
146 #define isc__nm_uvreq_get(req, sock) isc___nm_uvreq_get(req, sock)
147 #define isc__nm_uvreq_put(req, sock) isc___nm_uvreq_put(req, sock)
148 #define isc__nmsocket_init(sock, mgr, type, iface) \
149 	isc___nmsocket_init(sock, mgr, type, iface)
150 #define isc__nmsocket_put(sockp)	   isc___nmsocket_put(sockp)
151 #define isc__nmsocket_attach(sock, target) isc___nmsocket_attach(sock, target)
152 #define isc__nmsocket_detach(socketp)	   isc___nmsocket_detach(socketp)
153 #define isc__nmsocket_close(socketp)	   isc___nmsocket_close(socketp)
154 #define isc__nmhandle_get(sock, peer, local) \
155 	isc___nmhandle_get(sock, peer, local)
156 #define isc__nmsocket_prep_destroy(sock) isc___nmsocket_prep_destroy(sock)
157 #endif
158 
159 /*
160  * Queue types in the order of processing priority.
161  */
162 typedef enum {
163 	NETIEVENT_PRIORITY = 0,
164 	NETIEVENT_PRIVILEGED = 1,
165 	NETIEVENT_TASK = 2,
166 	NETIEVENT_NORMAL = 3,
167 	NETIEVENT_MAX = 4,
168 } netievent_type_t;
169 
170 /*
171  * Single network event loop worker.
172  */
173 typedef struct isc__networker {
174 	isc_nm_t *mgr;
175 	int id;		  /* thread id */
176 	uv_loop_t loop;	  /* libuv loop structure */
177 	uv_async_t async; /* async channel to send
178 			   * data to this networker */
179 	isc_mutex_t lock;
180 	bool paused;
181 	bool finished;
182 	isc_thread_t thread;
183 	isc_queue_t *ievents[NETIEVENT_MAX];
184 	atomic_uint_fast32_t nievents[NETIEVENT_MAX];
185 	isc_condition_t cond_prio;
186 
187 	isc_refcount_t references;
188 	atomic_int_fast64_t pktcount;
189 	char *recvbuf;
190 	char *sendbuf;
191 	bool recvbuf_inuse;
192 } isc__networker_t;
193 
194 /*
195  * A general handle for a connection bound to a networker.  For UDP
196  * connections we have peer address here, so both TCP and UDP can be
197  * handled with a simple send-like function
198  */
199 #define NMHANDLE_MAGIC ISC_MAGIC('N', 'M', 'H', 'D')
200 #define VALID_NMHANDLE(t)                      \
201 	(ISC_MAGIC_VALID(t, NMHANDLE_MAGIC) && \
202 	 atomic_load(&(t)->references) > 0)
203 
204 typedef void (*isc__nm_closecb)(isc_nmhandle_t *);
205 typedef struct isc_nm_http_session isc_nm_http_session_t;
206 
207 struct isc_nmhandle {
208 	int magic;
209 	isc_refcount_t references;
210 
211 	/*
212 	 * The socket is not 'attached' in the traditional
213 	 * reference-counting sense. Instead, we keep all handles in an
214 	 * array in the socket object.  This way, we don't have circular
215 	 * dependencies and we can close all handles when we're destroying
216 	 * the socket.
217 	 */
218 	isc_nmsocket_t *sock;
219 	size_t ah_pos; /* Position in the socket's 'active handles' array */
220 
221 	isc_nm_http_session_t *httpsession;
222 
223 	isc_sockaddr_t peer;
224 	isc_sockaddr_t local;
225 	isc_nm_opaquecb_t doreset; /* reset extra callback, external */
226 	isc_nm_opaquecb_t dofree;  /* free extra callback, external */
227 #ifdef NETMGR_TRACE
228 	void *backtrace[TRACE_SIZE];
229 	int backtrace_size;
230 	LINK(isc_nmhandle_t) active_link;
231 #endif
232 	void *opaque;
233 	char extra[];
234 };
235 
236 typedef enum isc__netievent_type {
237 	netievent_udpconnect,
238 	netievent_udpclose,
239 	netievent_udpsend,
240 	netievent_udpread,
241 	netievent_udpcancel,
242 
243 	netievent_tcpconnect,
244 	netievent_tcpclose,
245 	netievent_tcpsend,
246 	netievent_tcpstartread,
247 	netievent_tcppauseread,
248 	netievent_tcpaccept,
249 	netievent_tcpcancel,
250 
251 	netievent_tcpdnsaccept,
252 	netievent_tcpdnsconnect,
253 	netievent_tcpdnsclose,
254 	netievent_tcpdnssend,
255 	netievent_tcpdnsread,
256 	netievent_tcpdnscancel,
257 
258 	netievent_tlsclose,
259 	netievent_tlssend,
260 	netievent_tlsstartread,
261 	netievent_tlsconnect,
262 	netievent_tlsdobio,
263 	netievent_tlscancel,
264 
265 	netievent_tlsdnsaccept,
266 	netievent_tlsdnsconnect,
267 	netievent_tlsdnsclose,
268 	netievent_tlsdnssend,
269 	netievent_tlsdnsread,
270 	netievent_tlsdnscancel,
271 	netievent_tlsdnscycle,
272 	netievent_tlsdnsshutdown,
273 
274 	netievent_httpclose,
275 	netievent_httpsend,
276 
277 	netievent_shutdown,
278 	netievent_stop,
279 	netievent_pause,
280 
281 	netievent_connectcb,
282 	netievent_readcb,
283 	netievent_sendcb,
284 
285 	netievent_task,
286 	netievent_privilegedtask,
287 
288 	/*
289 	 * event type values higher than this will be treated
290 	 * as high-priority events, which can be processed
291 	 * while the netmgr is pausing or paused.
292 	 */
293 	netievent_prio = 0xff,
294 
295 	netievent_udplisten,
296 	netievent_udpstop,
297 	netievent_tcplisten,
298 	netievent_tcpstop,
299 	netievent_tcpdnslisten,
300 	netievent_tcpdnsstop,
301 	netievent_tlsdnslisten,
302 	netievent_tlsdnsstop,
303 	netievent_httpstop,
304 
305 	netievent_resume,
306 	netievent_detach,
307 	netievent_close,
308 } isc__netievent_type;
309 
310 typedef union {
311 	isc_nm_recv_cb_t recv;
312 	isc_nm_cb_t send;
313 	isc_nm_cb_t connect;
314 	isc_nm_accept_cb_t accept;
315 } isc__nm_cb_t;
316 
317 /*
318  * Wrapper around uv_req_t with 'our' fields in it.  req->data should
319  * always point to its parent.  Note that we always allocate more than
320  * sizeof(struct) because we make room for different req types;
321  */
322 #define UVREQ_MAGIC    ISC_MAGIC('N', 'M', 'U', 'R')
323 #define VALID_UVREQ(t) ISC_MAGIC_VALID(t, UVREQ_MAGIC)
324 
325 typedef struct isc__nm_uvreq isc__nm_uvreq_t;
326 struct isc__nm_uvreq {
327 	int magic;
328 	isc_nmsocket_t *sock;
329 	isc_nmhandle_t *handle;
330 	char tcplen[2];	      /* The TCP DNS message length */
331 	uv_buf_t uvbuf;	      /* translated isc_region_t, to be
332 			       * sent or received */
333 	isc_sockaddr_t local; /* local address */
334 	isc_sockaddr_t peer;  /* peer address */
335 	isc__nm_cb_t cb;      /* callback */
336 	void *cbarg;	      /* callback argument */
337 	uv_pipe_t ipc;	      /* used for sending socket
338 			       * uv_handles to other threads */
339 	union {
340 		uv_handle_t handle;
341 		uv_req_t req;
342 		uv_getaddrinfo_t getaddrinfo;
343 		uv_getnameinfo_t getnameinfo;
344 		uv_shutdown_t shutdown;
345 		uv_write_t write;
346 		uv_connect_t connect;
347 		uv_udp_send_t udp_send;
348 		uv_fs_t fs;
349 		uv_work_t work;
350 	} uv_req;
351 	ISC_LINK(isc__nm_uvreq_t) link;
352 };
353 
354 void *
355 isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type);
356 /*%<
357  * Allocate an ievent and set the type.
358  */
359 void
360 isc__nm_put_netievent(isc_nm_t *mgr, void *ievent);
361 
362 /*
363  * The macros here are used to simulate the "inheritance" in C, there's the base
364  * netievent structure that contains just its own type and socket, and there are
365  * extended netievent types that also have handles or requests or other data.
366  *
367  * The macros here ensure that:
368  *
369  *   1. every netievent type has matching definition, declaration and
370  *      implementation
371  *
372  *   2. we handle all the netievent types of same subclass the same, e.g. if the
373  *      extended netievent contains handle, we always attach to the handle in
374  *      the ctor and detach from the handle in dtor.
375  *
376  * There are three macros here for each netievent subclass:
377  *
378  *   1. NETIEVENT_*_TYPE(type) creates the typedef for each type; used below in
379  *   this header
380  *
381  *   2. NETIEVENT_*_DECL(type) generates the declaration of the get and put
382  *      functions (isc__nm_get_netievent_* and isc__nm_put_netievent_*); used
383  *      below in this header
384  *
385  *   3. NETIEVENT_*_DEF(type) generates the definition of the functions; used
386  *   either in netmgr.c or matching protocol file (e.g. udp.c, tcp.c, etc.)
387  */
388 
389 #define NETIEVENT__SOCKET         \
390 	isc__netievent_type type; \
391 	isc_nmsocket_t *sock;     \
392 	const char *file;         \
393 	unsigned int line;        \
394 	const char *func
395 
396 typedef struct isc__netievent__socket {
397 	NETIEVENT__SOCKET;
398 } isc__netievent__socket_t;
399 
400 #define NETIEVENT_SOCKET_TYPE(type) \
401 	typedef isc__netievent__socket_t isc__netievent_##type##_t;
402 
403 #define NETIEVENT_SOCKET_DECL(type)                              \
404 	isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
405 		isc_nm_t *nm, isc_nmsocket_t *sock);             \
406 	void isc__nm_put_netievent_##type(isc_nm_t *nm,          \
407 					  isc__netievent_##type##_t *ievent);
408 
409 #define NETIEVENT_SOCKET_DEF(type)                                             \
410 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
411 		isc_nm_t *nm, isc_nmsocket_t *sock) {                          \
412 		isc__netievent_##type##_t *ievent =                            \
413 			isc__nm_get_netievent(nm, netievent_##type);           \
414 		isc__nmsocket_attach(sock, &ievent->sock);                     \
415                                                                                \
416 		return (ievent);                                               \
417 	}                                                                      \
418                                                                                \
419 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
420 					  isc__netievent_##type##_t *ievent) { \
421 		isc__nmsocket_detach(&ievent->sock);                           \
422 		isc__nm_put_netievent(nm, ievent);                             \
423 	}
424 
425 typedef struct isc__netievent__socket_req {
426 	NETIEVENT__SOCKET;
427 	isc__nm_uvreq_t *req;
428 } isc__netievent__socket_req_t;
429 
430 #define NETIEVENT_SOCKET_REQ_TYPE(type) \
431 	typedef isc__netievent__socket_req_t isc__netievent_##type##_t;
432 
433 #define NETIEVENT_SOCKET_REQ_DECL(type)                                    \
434 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(           \
435 		isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req); \
436 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                    \
437 					  isc__netievent_##type##_t *ievent);
438 
439 #define NETIEVENT_SOCKET_REQ_DEF(type)                                         \
440 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
441 		isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {    \
442 		isc__netievent_##type##_t *ievent =                            \
443 			isc__nm_get_netievent(nm, netievent_##type);           \
444 		isc__nmsocket_attach(sock, &ievent->sock);                     \
445 		ievent->req = req;                                             \
446                                                                                \
447 		return (ievent);                                               \
448 	}                                                                      \
449                                                                                \
450 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
451 					  isc__netievent_##type##_t *ievent) { \
452 		isc__nmsocket_detach(&ievent->sock);                           \
453 		isc__nm_put_netievent(nm, ievent);                             \
454 	}
455 
456 typedef struct isc__netievent__socket_req_result {
457 	isc__netievent_type type;
458 	isc_nmsocket_t *sock;
459 	isc__nm_uvreq_t *req;
460 	isc_result_t result;
461 } isc__netievent__socket_req_result_t;
462 
463 #define NETIEVENT_SOCKET_REQ_RESULT_TYPE(type) \
464 	typedef isc__netievent__socket_req_result_t isc__netievent_##type##_t;
465 
466 #define NETIEVENT_SOCKET_REQ_RESULT_DECL(type)                            \
467 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(          \
468 		isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req, \
469 		isc_result_t result);                                     \
470 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                   \
471 					  isc__netievent_##type##_t *ievent);
472 
473 #define NETIEVENT_SOCKET_REQ_RESULT_DEF(type)                                  \
474 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
475 		isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req,      \
476 		isc_result_t result) {                                         \
477 		isc__netievent_##type##_t *ievent =                            \
478 			isc__nm_get_netievent(nm, netievent_##type);           \
479 		isc__nmsocket_attach(sock, &ievent->sock);                     \
480 		ievent->req = req;                                             \
481 		ievent->result = result;                                       \
482                                                                                \
483 		return (ievent);                                               \
484 	}                                                                      \
485                                                                                \
486 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
487 					  isc__netievent_##type##_t *ievent) { \
488 		isc__nmsocket_detach(&ievent->sock);                           \
489 		isc__nm_put_netievent(nm, ievent);                             \
490 	}
491 
492 typedef struct isc__netievent__socket_handle {
493 	NETIEVENT__SOCKET;
494 	isc_nmhandle_t *handle;
495 } isc__netievent__socket_handle_t;
496 
497 #define NETIEVENT_SOCKET_HANDLE_TYPE(type) \
498 	typedef isc__netievent__socket_handle_t isc__netievent_##type##_t;
499 
500 #define NETIEVENT_SOCKET_HANDLE_DECL(type)                                   \
501 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(             \
502 		isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle); \
503 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                      \
504 					  isc__netievent_##type##_t *ievent);
505 
506 #define NETIEVENT_SOCKET_HANDLE_DEF(type)                                      \
507 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
508 		isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle) {  \
509 		isc__netievent_##type##_t *ievent =                            \
510 			isc__nm_get_netievent(nm, netievent_##type);           \
511 		isc__nmsocket_attach(sock, &ievent->sock);                     \
512 		isc_nmhandle_attach(handle, &ievent->handle);                  \
513                                                                                \
514 		return (ievent);                                               \
515 	}                                                                      \
516                                                                                \
517 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
518 					  isc__netievent_##type##_t *ievent) { \
519 		isc__nmsocket_detach(&ievent->sock);                           \
520 		isc_nmhandle_detach(&ievent->handle);                          \
521 		isc__nm_put_netievent(nm, ievent);                             \
522 	}
523 
524 typedef struct isc__netievent__socket_quota {
525 	NETIEVENT__SOCKET;
526 	isc_quota_t *quota;
527 } isc__netievent__socket_quota_t;
528 
529 #define NETIEVENT_SOCKET_QUOTA_TYPE(type) \
530 	typedef isc__netievent__socket_quota_t isc__netievent_##type##_t;
531 
532 #define NETIEVENT_SOCKET_QUOTA_DECL(type)                                \
533 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(         \
534 		isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota); \
535 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                  \
536 					  isc__netievent_##type##_t *ievent);
537 
538 #define NETIEVENT_SOCKET_QUOTA_DEF(type)                                       \
539 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
540 		isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota) {      \
541 		isc__netievent_##type##_t *ievent =                            \
542 			isc__nm_get_netievent(nm, netievent_##type);           \
543 		isc__nmsocket_attach(sock, &ievent->sock);                     \
544 		ievent->quota = quota;                                         \
545                                                                                \
546 		return (ievent);                                               \
547 	}                                                                      \
548                                                                                \
549 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
550 					  isc__netievent_##type##_t *ievent) { \
551 		isc__nmsocket_detach(&ievent->sock);                           \
552 		isc__nm_put_netievent(nm, ievent);                             \
553 	}
554 
555 typedef struct isc__netievent__task {
556 	isc__netievent_type type;
557 	isc_task_t *task;
558 } isc__netievent__task_t;
559 
560 #define NETIEVENT_TASK_TYPE(type) \
561 	typedef isc__netievent__task_t isc__netievent_##type##_t;
562 
563 #define NETIEVENT_TASK_DECL(type)                                \
564 	isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
565 		isc_nm_t *nm, isc_task_t *task);                 \
566 	void isc__nm_put_netievent_##type(isc_nm_t *nm,          \
567 					  isc__netievent_##type##_t *ievent);
568 
569 #define NETIEVENT_TASK_DEF(type)                                               \
570 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
571 		isc_nm_t *nm, isc_task_t *task) {                              \
572 		isc__netievent_##type##_t *ievent =                            \
573 			isc__nm_get_netievent(nm, netievent_##type);           \
574 		ievent->task = task;                                           \
575                                                                                \
576 		return (ievent);                                               \
577 	}                                                                      \
578                                                                                \
579 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
580 					  isc__netievent_##type##_t *ievent) { \
581 		ievent->task = NULL;                                           \
582 		isc__nm_put_netievent(nm, ievent);                             \
583 	}
584 
585 typedef struct isc__netievent_udpsend {
586 	NETIEVENT__SOCKET;
587 	isc_sockaddr_t peer;
588 	isc__nm_uvreq_t *req;
589 } isc__netievent_udpsend_t;
590 
591 typedef struct isc__netievent_tlsconnect {
592 	isc__netievent_type type;
593 	isc_nmsocket_t *sock;
594 	SSL_CTX *ctx;
595 	isc_sockaddr_t local; /* local address */
596 	isc_sockaddr_t peer;  /* peer address */
597 } isc__netievent_tlsconnect_t;
598 
599 typedef struct isc__netievent {
600 	isc__netievent_type type;
601 } isc__netievent_t;
602 
603 #define NETIEVENT_TYPE(type) typedef isc__netievent_t isc__netievent_##type##_t;
604 
605 #define NETIEVENT_DECL(type)                                                   \
606 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(isc_nm_t *nm); \
607 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
608 					  isc__netievent_##type##_t *ievent);
609 
610 #define NETIEVENT_DEF(type)                                                    \
611 	isc__netievent_##type##_t *isc__nm_get_netievent_##type(               \
612 		isc_nm_t *nm) {                                                \
613 		isc__netievent_##type##_t *ievent =                            \
614 			isc__nm_get_netievent(nm, netievent_##type);           \
615                                                                                \
616 		return (ievent);                                               \
617 	}                                                                      \
618                                                                                \
619 	void isc__nm_put_netievent_##type(isc_nm_t *nm,                        \
620 					  isc__netievent_##type##_t *ievent) { \
621 		isc__nm_put_netievent(nm, ievent);                             \
622 	}
623 
624 typedef union {
625 	isc__netievent_t ni;
626 	isc__netievent__socket_t nis;
627 	isc__netievent__socket_req_t nisr;
628 	isc__netievent_udpsend_t nius;
629 	isc__netievent__socket_quota_t nisq;
630 	isc__netievent_tlsconnect_t nitc;
631 } isc__netievent_storage_t;
632 
633 /*
634  * Work item for a uv_work threadpool.
635  */
636 typedef struct isc__nm_work {
637 	isc_nm_t *netmgr;
638 	uv_work_t req;
639 	isc_nm_workcb_t cb;
640 	isc_nm_after_workcb_t after_cb;
641 	void *data;
642 } isc__nm_work_t;
643 
644 /*
645  * Network manager
646  */
647 #define NM_MAGIC    ISC_MAGIC('N', 'E', 'T', 'M')
648 #define VALID_NM(t) ISC_MAGIC_VALID(t, NM_MAGIC)
649 
650 struct isc_nm {
651 	int magic;
652 	isc_refcount_t references;
653 	isc_mem_t *mctx;
654 	int nworkers;
655 	isc_mutex_t lock;
656 	isc_condition_t wkstatecond;
657 	isc_condition_t wkpausecond;
658 	isc__networker_t *workers;
659 
660 	isc_stats_t *stats;
661 
662 	uint_fast32_t workers_running;
663 	atomic_uint_fast32_t workers_paused;
664 	atomic_uint_fast32_t maxudp;
665 
666 	atomic_bool paused;
667 
668 	/*
669 	 * Active connections are being closed and new connections are
670 	 * no longer allowed.
671 	 */
672 	atomic_bool closing;
673 
674 	/*
675 	 * A worker is actively waiting for other workers, for example to
676 	 * stop listening; that means no other thread can do the same thing
677 	 * or pause, or we'll deadlock. We have to either re-enqueue our
678 	 * event or wait for the other one to finish if we want to pause.
679 	 */
680 	atomic_int interlocked;
681 
682 	/*
683 	 * Timeout values for TCP connections, corresponding to
684 	 * tcp-intiial-timeout, tcp-idle-timeout, tcp-keepalive-timeout,
685 	 * and tcp-advertised-timeout. Note that these are stored in
686 	 * milliseconds so they can be used directly with the libuv timer,
687 	 * but they are configured in tenths of seconds.
688 	 */
689 	atomic_uint_fast32_t init;
690 	atomic_uint_fast32_t idle;
691 	atomic_uint_fast32_t keepalive;
692 	atomic_uint_fast32_t advertised;
693 
694 	isc_barrier_t pausing;
695 	isc_barrier_t resuming;
696 
697 	/*
698 	 * Socket SO_RCVBUF and SO_SNDBUF values
699 	 */
700 	atomic_int_fast32_t recv_udp_buffer_size;
701 	atomic_int_fast32_t send_udp_buffer_size;
702 	atomic_int_fast32_t recv_tcp_buffer_size;
703 	atomic_int_fast32_t send_tcp_buffer_size;
704 
705 #ifdef NETMGR_TRACE
706 	ISC_LIST(isc_nmsocket_t) active_sockets;
707 #endif
708 };
709 
710 typedef enum isc_nmsocket_type {
711 	isc_nm_udpsocket,
712 	isc_nm_udplistener, /* Aggregate of nm_udpsocks */
713 	isc_nm_tcpsocket,
714 	isc_nm_tcplistener,
715 	isc_nm_tcpdnslistener,
716 	isc_nm_tcpdnssocket,
717 	isc_nm_tlslistener,
718 	isc_nm_tlssocket,
719 	isc_nm_tlsdnslistener,
720 	isc_nm_tlsdnssocket,
721 	isc_nm_httplistener,
722 	isc_nm_httpsocket
723 } isc_nmsocket_type;
724 
725 /*%
726  * A universal structure for either a single socket or a group of
727  * dup'd/SO_REUSE_PORT-using sockets listening on the same interface.
728  */
729 #define NMSOCK_MAGIC	ISC_MAGIC('N', 'M', 'S', 'K')
730 #define VALID_NMSOCK(t) ISC_MAGIC_VALID(t, NMSOCK_MAGIC)
731 
732 /*%
733  * Index into socket stat counter arrays.
734  */
735 enum {
736 	STATID_OPEN = 0,
737 	STATID_OPENFAIL = 1,
738 	STATID_CLOSE = 2,
739 	STATID_BINDFAIL = 3,
740 	STATID_CONNECTFAIL = 4,
741 	STATID_CONNECT = 5,
742 	STATID_ACCEPTFAIL = 6,
743 	STATID_ACCEPT = 7,
744 	STATID_SENDFAIL = 8,
745 	STATID_RECVFAIL = 9,
746 	STATID_ACTIVE = 10
747 };
748 
749 #if HAVE_LIBNGHTTP2
750 typedef struct isc_nmsocket_tls_send_req {
751 	isc_nmsocket_t *tlssock;
752 	isc_region_t data;
753 	isc_nm_cb_t cb;
754 	void *cbarg;
755 	isc_nmhandle_t *handle;
756 	bool finish;
757 	uint8_t smallbuf[512];
758 } isc_nmsocket_tls_send_req_t;
759 
760 typedef enum isc_http_request_type {
761 	ISC_HTTP_REQ_GET,
762 	ISC_HTTP_REQ_POST,
763 	ISC_HTTP_REQ_UNSUPPORTED
764 } isc_http_request_type_t;
765 
766 typedef enum isc_http_scheme_type {
767 	ISC_HTTP_SCHEME_HTTP,
768 	ISC_HTTP_SCHEME_HTTP_SECURE,
769 	ISC_HTTP_SCHEME_UNSUPPORTED
770 } isc_http_scheme_type_t;
771 
772 typedef struct isc_nm_httpcbarg {
773 	isc_nm_recv_cb_t cb;
774 	void *cbarg;
775 	LINK(struct isc_nm_httpcbarg) link;
776 } isc_nm_httpcbarg_t;
777 
778 typedef struct isc_nm_httphandler {
779 	char *path;
780 	isc_nm_recv_cb_t cb;
781 	void *cbarg;
782 	size_t extrahandlesize;
783 	LINK(struct isc_nm_httphandler) link;
784 } isc_nm_httphandler_t;
785 
786 struct isc_nm_http_endpoints {
787 	isc_mem_t *mctx;
788 
789 	ISC_LIST(isc_nm_httphandler_t) handlers;
790 	ISC_LIST(isc_nm_httpcbarg_t) handler_cbargs;
791 
792 	isc_refcount_t references;
793 	atomic_bool in_use;
794 };
795 
796 typedef struct isc_nmsocket_h2 {
797 	isc_nmsocket_t *psock; /* owner of the structure */
798 	char *request_path;
799 	char *query_data;
800 	size_t query_data_len;
801 	bool query_too_large;
802 	isc_nm_httphandler_t *handler;
803 
804 	isc_buffer_t rbuf;
805 	isc_buffer_t wbuf;
806 
807 	int32_t stream_id;
808 	isc_nm_http_session_t *session;
809 
810 	isc_nmsocket_t *httpserver;
811 
812 	/* maximum concurrent streams (server-side) */
813 	uint32_t max_concurrent_streams;
814 
815 	isc_http_request_type_t request_type;
816 	isc_http_scheme_type_t request_scheme;
817 
818 	size_t content_length;
819 	char clenbuf[128];
820 
821 	int headers_error_code;
822 	size_t headers_data_processed;
823 
824 	isc_nm_recv_cb_t cb;
825 	void *cbarg;
826 	LINK(struct isc_nmsocket_h2) link;
827 
828 	isc_nm_http_endpoints_t *listener_endpoints;
829 
830 	bool response_submitted;
831 	struct {
832 		char *uri;
833 		bool post;
834 		isc_tlsctx_t *tlsctx;
835 		isc_sockaddr_t local_interface;
836 		void *cstream;
837 	} connect;
838 } isc_nmsocket_h2_t;
839 #endif /* HAVE_LIBNGHTTP2 */
840 
841 typedef void (*isc_nm_closehandlecb_t)(void *arg);
842 /*%<
843  * Opaque callback function, used for isc_nmhandle 'reset' and 'free'
844  * callbacks.
845  */
846 
847 struct isc_nmsocket {
848 	/*% Unlocked, RO */
849 	int magic;
850 	int tid;
851 	isc_nmsocket_type type;
852 	isc_nm_t *mgr;
853 
854 	/*% Parent socket for multithreaded listeners */
855 	isc_nmsocket_t *parent;
856 	/*% Listener socket this connection was accepted on */
857 	isc_nmsocket_t *listener;
858 	/*% Self socket */
859 	isc_nmsocket_t *self;
860 
861 	isc_barrier_t startlistening;
862 	isc_barrier_t stoplistening;
863 
864 	/*% TLS stuff */
865 	struct tls {
866 		isc_tls_t *tls;
867 		isc_tlsctx_t *ctx;
868 		BIO *app_rbio;
869 		BIO *app_wbio;
870 		BIO *ssl_rbio;
871 		BIO *ssl_wbio;
872 		enum {
873 			TLS_STATE_NONE,
874 			TLS_STATE_HANDSHAKE,
875 			TLS_STATE_IO,
876 			TLS_STATE_ERROR,
877 			TLS_STATE_CLOSING
878 		} state;
879 		isc_region_t senddata;
880 		bool cycle;
881 		isc_result_t pending_error;
882 		/* List of active send requests. */
883 		isc__nm_uvreq_t *pending_req;
884 		bool alpn_negotiated;
885 	} tls;
886 
887 #if HAVE_LIBNGHTTP2
888 	/*% TLS stuff */
889 	struct tlsstream {
890 		bool server;
891 		BIO *bio_in;
892 		BIO *bio_out;
893 		isc_tls_t *tls;
894 		isc_tlsctx_t *ctx;
895 		isc_nmsocket_t *tlslistener;
896 		atomic_bool result_updated;
897 		enum {
898 			TLS_INIT,
899 			TLS_HANDSHAKE,
900 			TLS_IO,
901 			TLS_CLOSED
902 		} state; /*%< The order of these is significant */
903 		size_t nsending;
904 		bool reading;
905 	} tlsstream;
906 
907 	isc_nmsocket_h2_t h2;
908 #endif /* HAVE_LIBNGHTTP2 */
909 	/*%
910 	 * quota is the TCP client, attached when a TCP connection
911 	 * is established. pquota is a non-attached pointer to the
912 	 * TCP client quota, stored in listening sockets but only
913 	 * attached in connected sockets.
914 	 */
915 	isc_quota_t *quota;
916 	isc_quota_t *pquota;
917 	isc_quota_cb_t quotacb;
918 
919 	/*%
920 	 * Socket statistics
921 	 */
922 	const isc_statscounter_t *statsindex;
923 
924 	/*%
925 	 * TCP read/connect timeout timers.
926 	 */
927 	uv_timer_t timer;
928 	uint64_t read_timeout;
929 	uint64_t connect_timeout;
930 
931 	/*% outer socket is for 'wrapped' sockets - e.g. tcpdns in tcp */
932 	isc_nmsocket_t *outer;
933 
934 	/*% server socket for connections */
935 	isc_nmsocket_t *server;
936 
937 	/*% Child sockets for multi-socket setups */
938 	isc_nmsocket_t *children;
939 	uint_fast32_t nchildren;
940 	isc_sockaddr_t iface;
941 	isc_nmhandle_t *statichandle;
942 	isc_nmhandle_t *outerhandle;
943 
944 	/*% Extra data allocated at the end of each isc_nmhandle_t */
945 	size_t extrahandlesize;
946 
947 	/*% TCP backlog */
948 	int backlog;
949 
950 	/*% libuv data */
951 	uv_os_sock_t fd;
952 	union uv_any_handle uv_handle;
953 
954 	/*% Peer address */
955 	isc_sockaddr_t peer;
956 
957 	/* Atomic */
958 	/*% Number of running (e.g. listening) child sockets */
959 	atomic_uint_fast32_t rchildren;
960 
961 	/*%
962 	 * Socket is active if it's listening, working, etc. If it's
963 	 * closing, then it doesn't make a sense, for example, to
964 	 * push handles or reqs for reuse.
965 	 */
966 	atomic_bool active;
967 	atomic_bool destroying;
968 
969 	/*%
970 	 * Socket is closed if it's not active and all the possible
971 	 * callbacks were fired, there are no active handles, etc.
972 	 * If active==false but closed==false, that means the socket
973 	 * is closing.
974 	 */
975 	atomic_bool closing;
976 	atomic_bool closed;
977 	atomic_bool listening;
978 	atomic_bool connecting;
979 	atomic_bool connected;
980 	atomic_bool accepting;
981 	atomic_bool reading;
982 	isc_refcount_t references;
983 
984 	/*%
985 	 * Established an outgoing connection, as client not server.
986 	 */
987 	atomic_bool client;
988 
989 	/*%
990 	 * TCPDNS socket has been set not to pipeline.
991 	 */
992 	atomic_bool sequential;
993 
994 	/*%
995 	 * The socket is processing read callback, this is guard to not read
996 	 * data before the readcb is back.
997 	 */
998 	bool processing;
999 
1000 	/*%
1001 	 * A TCP socket has had isc_nm_pauseread() called.
1002 	 */
1003 	atomic_bool readpaused;
1004 
1005 	/*%
1006 	 * A TCP or TCPDNS socket has been set to use the keepalive
1007 	 * timeout instead of the default idle timeout.
1008 	 */
1009 	atomic_bool keepalive;
1010 
1011 	/*%
1012 	 * 'spare' handles for that can be reused to avoid allocations,
1013 	 * for UDP.
1014 	 */
1015 	isc_astack_t *inactivehandles;
1016 	isc_astack_t *inactivereqs;
1017 
1018 	/*%
1019 	 * Used to wait for TCP listening events to complete, and
1020 	 * for the number of running children to reach zero during
1021 	 * shutdown.
1022 	 *
1023 	 * We use two condition variables to prevent the race where the netmgr
1024 	 * threads would be able to finish and destroy the socket before it's
1025 	 * unlocked by the isc_nm_listen<proto>() function.  So, the flow is as
1026 	 * follows:
1027 	 *
1028 	 *   1. parent thread creates all children sockets and passes then to
1029 	 *      netthreads, looks at the signaling variable and WAIT(cond) until
1030 	 *      the childrens are done initializing
1031 	 *
1032 	 *   2. the events get picked by netthreads, calls the libuv API (and
1033 	 *      either succeeds or fails) and WAIT(scond) until all other
1034 	 *      children sockets in netthreads are initialized and the listening
1035 	 *      socket lock is unlocked
1036 	 *
1037 	 *   3. the control is given back to the parent thread which now either
1038 	 *      returns success or shutdowns the listener if an error has
1039 	 *      occured in the children netthread
1040 	 *
1041 	 * NOTE: The other approach would be doing an extra attach to the parent
1042 	 * listening socket, and then detach it in the parent thread, but that
1043 	 * breaks the promise that once the libuv socket is initialized on the
1044 	 * nmsocket, the nmsocket needs to be handled only by matching
1045 	 * netthread, so in fact that would add a complexity in a way that
1046 	 * isc__nmsocket_detach would have to be converted to use an
1047 	 * asynchrounous netievent.
1048 	 */
1049 	isc_mutex_t lock;
1050 	isc_condition_t cond;
1051 	isc_condition_t scond;
1052 
1053 	/*%
1054 	 * Used to pass a result back from listen or connect events.
1055 	 */
1056 	isc_result_t result;
1057 
1058 	/*%
1059 	 * List of active handles.
1060 	 * ah - current position in 'ah_frees'; this represents the
1061 	 *	current number of active handles;
1062 	 * ah_size - size of the 'ah_frees' and 'ah_handles' arrays
1063 	 * ah_handles - array pointers to active handles
1064 	 *
1065 	 * Adding a handle
1066 	 *  - if ah == ah_size, reallocate
1067 	 *  - x = ah_frees[ah]
1068 	 *  - ah_frees[ah++] = 0;
1069 	 *  - ah_handles[x] = handle
1070 	 *  - x must be stored with the handle!
1071 	 * Removing a handle:
1072 	 *  - ah_frees[--ah] = x
1073 	 *  - ah_handles[x] = NULL;
1074 	 *
1075 	 * XXX: for now this is locked with socket->lock, but we
1076 	 * might want to change it to something lockless in the
1077 	 * future.
1078 	 */
1079 	atomic_int_fast32_t ah;
1080 	size_t ah_size;
1081 	size_t *ah_frees;
1082 	isc_nmhandle_t **ah_handles;
1083 
1084 	/*% Buffer for TCPDNS processing */
1085 	size_t buf_size;
1086 	size_t buf_len;
1087 	unsigned char *buf;
1088 
1089 	/*%
1090 	 * This function will be called with handle->sock
1091 	 * as the argument whenever a handle's references drop
1092 	 * to zero, after its reset callback has been called.
1093 	 */
1094 	isc_nm_closehandlecb_t closehandle_cb;
1095 
1096 	isc_nmhandle_t *recv_handle;
1097 	isc_nm_recv_cb_t recv_cb;
1098 	void *recv_cbarg;
1099 	bool recv_read;
1100 
1101 	isc_nm_cb_t connect_cb;
1102 	void *connect_cbarg;
1103 
1104 	isc_nm_accept_cb_t accept_cb;
1105 	void *accept_cbarg;
1106 
1107 	atomic_int_fast32_t active_child_connections;
1108 
1109 #ifdef NETMGR_TRACE
1110 	void *backtrace[TRACE_SIZE];
1111 	int backtrace_size;
1112 	LINK(isc_nmsocket_t) active_link;
1113 	ISC_LIST(isc_nmhandle_t) active_handles;
1114 #endif
1115 };
1116 
1117 bool
1118 isc__nm_in_netthread(void);
1119 /*%<
1120  * Returns 'true' if we're in the network thread.
1121  */
1122 
1123 void
1124 isc__nm_maybe_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event);
1125 /*%<
1126  * If the caller is already in the matching nmthread, process the netievent
1127  * directly, if not enqueue using isc__nm_enqueue_ievent().
1128  */
1129 
1130 void
1131 isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event);
1132 /*%<
1133  * Enqueue an ievent onto a specific worker queue. (This the only safe
1134  * way to use an isc__networker_t from another thread.)
1135  */
1136 
1137 void
1138 isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf);
1139 /*%<
1140  * Free a buffer allocated for a receive operation.
1141  *
1142  * Note that as currently implemented, this doesn't actually
1143  * free anything, marks the isc__networker's UDP receive buffer
1144  * as "not in use".
1145  */
1146 
1147 isc_nmhandle_t *
1148 isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
1149 		   isc_sockaddr_t *local FLARG);
1150 /*%<
1151  * Get a handle for the socket 'sock', allocating a new one
1152  * if there isn't one available in 'sock->inactivehandles'.
1153  *
1154  * If 'peer' is not NULL, set the handle's peer address to 'peer',
1155  * otherwise set it to 'sock->peer'.
1156  *
1157  * If 'local' is not NULL, set the handle's local address to 'local',
1158  * otherwise set it to 'sock->iface->addr'.
1159  *
1160  * 'sock' will be attached to 'handle->sock'. The caller may need
1161  * to detach the socket afterward.
1162  */
1163 
1164 isc__nm_uvreq_t *
1165 isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG);
1166 /*%<
1167  * Get a UV request structure for the socket 'sock', allocating a
1168  * new one if there isn't one available in 'sock->inactivereqs'.
1169  */
1170 
1171 void
1172 isc___nm_uvreq_put(isc__nm_uvreq_t **req, isc_nmsocket_t *sock FLARG);
1173 /*%<
1174  * Completes the use of a UV request structure, setting '*req' to NULL.
1175  *
1176  * The UV request is pushed onto the 'sock->inactivereqs' stack or,
1177  * if that doesn't work, freed.
1178  */
1179 
1180 void
1181 isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
1182 		    isc_sockaddr_t *iface FLARG);
1183 /*%<
1184  * Initialize socket 'sock', attach it to 'mgr', and set it to type 'type'
1185  * and its interface to 'iface'.
1186  */
1187 
1188 void
1189 isc___nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target FLARG);
1190 /*%<
1191  * Attach to a socket, increasing refcount
1192  */
1193 
1194 void
1195 isc___nmsocket_detach(isc_nmsocket_t **socketp FLARG);
1196 /*%<
1197  * Detach from socket, decreasing refcount and possibly destroying the
1198  * socket if it's no longer referenced.
1199  */
1200 
1201 void
1202 isc___nmsocket_prep_destroy(isc_nmsocket_t *sock FLARG);
1203 /*%<
1204  * Market 'sock' as inactive, close it if necessary, and destroy it
1205  * if there are no remaining references or active handles.
1206  */
1207 
1208 void
1209 isc__nmsocket_shutdown(isc_nmsocket_t *sock);
1210 /*%<
1211  * Initiate the socket shutdown which actively calls the active
1212  * callbacks.
1213  */
1214 
1215 bool
1216 isc__nmsocket_active(isc_nmsocket_t *sock);
1217 /*%<
1218  * Determine whether 'sock' is active by checking 'sock->active'
1219  * or, for child sockets, 'sock->parent->active'.
1220  */
1221 
1222 bool
1223 isc__nmsocket_deactivate(isc_nmsocket_t *sock);
1224 /*%<
1225  * @brief Deactivate active socket
1226  *
1227  * Atomically deactive the socket by setting @p sock->active or, for child
1228  * sockets, @p sock->parent->active to @c false
1229  *
1230  * @param[in] sock - valid nmsocket
1231  * @return @c false if the socket was already inactive, @c true otherwise
1232  */
1233 
1234 void
1235 isc__nmsocket_clearcb(isc_nmsocket_t *sock);
1236 /*%<
1237  * Clear the recv and accept callbacks in 'sock'.
1238  */
1239 
1240 void
1241 isc__nmsocket_timer_stop(isc_nmsocket_t *sock);
1242 void
1243 isc__nmsocket_timer_start(isc_nmsocket_t *sock);
1244 void
1245 isc__nmsocket_timer_restart(isc_nmsocket_t *sock);
1246 bool
1247 isc__nmsocket_timer_running(isc_nmsocket_t *sock);
1248 /*%<
1249  * Start/stop/restart/check the timeout on the socket
1250  */
1251 
1252 void
1253 isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
1254 		  isc_result_t eresult, bool async);
1255 
1256 void
1257 isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0);
1258 /*%<
1259  * Issue a connect callback on the socket, used to call the callback
1260  */
1261 
1262 void
1263 isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
1264 	       isc_result_t eresult);
1265 void
1266 isc__nm_async_readcb(isc__networker_t *worker, isc__netievent_t *ev0);
1267 
1268 /*%<
1269  * Issue a read callback on the socket, used to call the callback
1270  * on failed conditions when the event can't be scheduled on the uv loop.
1271  *
1272  */
1273 
1274 void
1275 isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
1276 	       isc_result_t eresult, bool async);
1277 void
1278 isc__nm_async_sendcb(isc__networker_t *worker, isc__netievent_t *ev0);
1279 /*%<
1280  * Issue a write callback on the socket, used to call the callback
1281  * on failed conditions when the event can't be scheduled on the uv loop.
1282  */
1283 
1284 void
1285 isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0);
1286 /*%<
1287  * Walk through all uv handles, get the underlying sockets and issue
1288  * close on them.
1289  */
1290 
1291 void
1292 isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region,
1293 		 isc_nm_cb_t cb, void *cbarg);
1294 /*%<
1295  * Back-end implementation of isc_nm_send() for UDP handles.
1296  */
1297 
1298 void
1299 isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1300 /*
1301  * Back-end implementation of isc_nm_read() for UDP handles.
1302  */
1303 
1304 void
1305 isc__nm_udp_close(isc_nmsocket_t *sock);
1306 /*%<
1307  * Close a UDP socket.
1308  */
1309 
1310 void
1311 isc__nm_udp_cancelread(isc_nmhandle_t *handle);
1312 /*%<
1313  * Stop reading on a connected UDP handle.
1314  */
1315 
1316 void
1317 isc__nm_udp_shutdown(isc_nmsocket_t *sock);
1318 /*%<
1319  * Called during the shutdown process to close and clean up connected
1320  * sockets.
1321  */
1322 
1323 void
1324 isc__nm_udp_stoplistening(isc_nmsocket_t *sock);
1325 /*%<
1326  * Stop listening on 'sock'.
1327  */
1328 
1329 void
1330 isc__nm_udp_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1331 /*%<
1332  * Set or clear the recv timeout for the UDP socket associated with 'handle'.
1333  */
1334 
1335 void
1336 isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0);
1337 void
1338 isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0);
1339 void
1340 isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0);
1341 void
1342 isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0);
1343 void
1344 isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0);
1345 void
1346 isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0);
1347 void
1348 isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0);
1349 /*%<
1350  * Callback handlers for asynchronous UDP events (listen, stoplisten, send).
1351  */
1352 
1353 void
1354 isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region,
1355 		 isc_nm_cb_t cb, void *cbarg);
1356 /*%<
1357  * Back-end implementation of isc_nm_send() for TCP handles.
1358  */
1359 
1360 void
1361 isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1362 /*
1363  * Back-end implementation of isc_nm_read() for TCP handles.
1364  */
1365 
1366 void
1367 isc__nm_tcp_close(isc_nmsocket_t *sock);
1368 /*%<
1369  * Close a TCP socket.
1370  */
1371 void
1372 isc__nm_tcp_pauseread(isc_nmhandle_t *handle);
1373 /*%<
1374  * Pause reading on this handle, while still remembering the callback.
1375  */
1376 
1377 void
1378 isc__nm_tcp_resumeread(isc_nmhandle_t *handle);
1379 /*%<
1380  * Resume reading from socket.
1381  *
1382  */
1383 
1384 void
1385 isc__nm_tcp_shutdown(isc_nmsocket_t *sock);
1386 /*%<
1387  * Called during the shutdown process to close and clean up connected
1388  * sockets.
1389  */
1390 
1391 void
1392 isc__nm_tcp_cancelread(isc_nmhandle_t *handle);
1393 /*%<
1394  * Stop reading on a connected TCP handle.
1395  */
1396 
1397 void
1398 isc__nm_tcp_stoplistening(isc_nmsocket_t *sock);
1399 /*%<
1400  * Stop listening on 'sock'.
1401  */
1402 
1403 int_fast32_t
1404 isc__nm_tcp_listener_nactive(isc_nmsocket_t *sock);
1405 /*%<
1406  * Returns the number of active connections for the TCP listener socket.
1407  */
1408 
1409 void
1410 isc__nm_tcp_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1411 /*%<
1412  * Set the read timeout for the TCP socket associated with 'handle'.
1413  */
1414 
1415 void
1416 isc__nm_async_tcpconnect(isc__networker_t *worker, isc__netievent_t *ev0);
1417 void
1418 isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0);
1419 void
1420 isc__nm_async_tcpaccept(isc__networker_t *worker, isc__netievent_t *ev0);
1421 void
1422 isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0);
1423 void
1424 isc__nm_async_tcpsend(isc__networker_t *worker, isc__netievent_t *ev0);
1425 void
1426 isc__nm_async_startread(isc__networker_t *worker, isc__netievent_t *ev0);
1427 void
1428 isc__nm_async_pauseread(isc__networker_t *worker, isc__netievent_t *ev0);
1429 void
1430 isc__nm_async_tcpstartread(isc__networker_t *worker, isc__netievent_t *ev0);
1431 void
1432 isc__nm_async_tcppauseread(isc__networker_t *worker, isc__netievent_t *ev0);
1433 void
1434 isc__nm_async_tcpcancel(isc__networker_t *worker, isc__netievent_t *ev0);
1435 void
1436 isc__nm_async_tcpclose(isc__networker_t *worker, isc__netievent_t *ev0);
1437 /*%<
1438  * Callback handlers for asynchronous TCP events (connect, listen,
1439  * stoplisten, send, read, pause, close).
1440  */
1441 
1442 void
1443 isc__nm_async_tlsclose(isc__networker_t *worker, isc__netievent_t *ev0);
1444 
1445 void
1446 isc__nm_async_tlssend(isc__networker_t *worker, isc__netievent_t *ev0);
1447 
1448 void
1449 isc__nm_async_tlsstartread(isc__networker_t *worker, isc__netievent_t *ev0);
1450 
1451 void
1452 isc__nm_async_tlsdobio(isc__networker_t *worker, isc__netievent_t *ev0);
1453 
1454 void
1455 isc__nm_async_tlscancel(isc__networker_t *worker, isc__netievent_t *ev0);
1456 /*%<
1457  * Callback handlers for asynchronous TLS events.
1458  */
1459 
1460 void
1461 isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
1462 		    isc_nm_cb_t cb, void *cbarg);
1463 /*%<
1464  * Back-end implementation of isc_nm_send() for TCPDNS handles.
1465  */
1466 
1467 void
1468 isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock);
1469 
1470 void
1471 isc__nm_tcpdns_close(isc_nmsocket_t *sock);
1472 /*%<
1473  * Close a TCPDNS socket.
1474  */
1475 
1476 void
1477 isc__nm_tcpdns_stoplistening(isc_nmsocket_t *sock);
1478 /*%<
1479  * Stop listening on 'sock'.
1480  */
1481 
1482 void
1483 isc__nm_tcpdns_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1484 /*%<
1485  * Set the read timeout and reset the timer for the TCPDNS socket
1486  * associated with 'handle', and the TCP socket it wraps around.
1487  */
1488 
1489 void
1490 isc__nm_async_tcpdnsaccept(isc__networker_t *worker, isc__netievent_t *ev0);
1491 void
1492 isc__nm_async_tcpdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0);
1493 void
1494 isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0);
1495 void
1496 isc__nm_async_tcpdnscancel(isc__networker_t *worker, isc__netievent_t *ev0);
1497 void
1498 isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ev0);
1499 void
1500 isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0);
1501 void
1502 isc__nm_async_tcpdnsstop(isc__networker_t *worker, isc__netievent_t *ev0);
1503 void
1504 isc__nm_async_tcpdnsread(isc__networker_t *worker, isc__netievent_t *ev0);
1505 /*%<
1506  * Callback handlers for asynchronous TCPDNS events.
1507  */
1508 
1509 void
1510 isc__nm_tcpdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1511 /*
1512  * Back-end implementation of isc_nm_read() for TCPDNS handles.
1513  */
1514 
1515 void
1516 isc__nm_tcpdns_cancelread(isc_nmhandle_t *handle);
1517 /*%<
1518  * Stop reading on a connected TCPDNS handle.
1519  */
1520 
1521 void
1522 isc__nm_tlsdns_send(isc_nmhandle_t *handle, isc_region_t *region,
1523 		    isc_nm_cb_t cb, void *cbarg);
1524 
1525 void
1526 isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock);
1527 
1528 void
1529 isc__nm_tlsdns_close(isc_nmsocket_t *sock);
1530 /*%<
1531  * Close a TLSDNS socket.
1532  */
1533 
1534 void
1535 isc__nm_tlsdns_stoplistening(isc_nmsocket_t *sock);
1536 /*%<
1537  * Stop listening on 'sock'.
1538  */
1539 
1540 void
1541 isc__nm_tlsdns_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1542 /*%<
1543  * Set the read timeout and reset the timer for the TLSDNS socket
1544  * associated with 'handle', and the TCP socket it wraps around.
1545  */
1546 
1547 void
1548 isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1549 /*
1550  * Back-end implementation of isc_nm_read() for TLSDNS handles.
1551  */
1552 
1553 void
1554 isc__nm_tlsdns_cancelread(isc_nmhandle_t *handle);
1555 /*%<
1556  * Stop reading on a connected TLSDNS handle.
1557  */
1558 
1559 void
1560 isc__nm_async_tlsdnscycle(isc__networker_t *worker, isc__netievent_t *ev0);
1561 void
1562 isc__nm_async_tlsdnsaccept(isc__networker_t *worker, isc__netievent_t *ev0);
1563 void
1564 isc__nm_async_tlsdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0);
1565 void
1566 isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0);
1567 void
1568 isc__nm_async_tlsdnscancel(isc__networker_t *worker, isc__netievent_t *ev0);
1569 void
1570 isc__nm_async_tlsdnsclose(isc__networker_t *worker, isc__netievent_t *ev0);
1571 void
1572 isc__nm_async_tlsdnssend(isc__networker_t *worker, isc__netievent_t *ev0);
1573 void
1574 isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0);
1575 void
1576 isc__nm_async_tlsdnsshutdown(isc__networker_t *worker, isc__netievent_t *ev0);
1577 void
1578 isc__nm_async_tlsdnsread(isc__networker_t *worker, isc__netievent_t *ev0);
1579 /*%<
1580  * Callback handlers for asynchronous TLSDNS events.
1581  */
1582 
1583 bool
1584 isc__nm_tlsdns_xfr_allowed(isc_nmsocket_t *sock);
1585 /*%<
1586  * Check if it is possible to do a zone transfer over the given TLSDNS
1587  * socket.
1588  *
1589  * Requires:
1590  * \li	'sock' is a valid TLSDNS socket.
1591  */
1592 
1593 #if HAVE_LIBNGHTTP2
1594 void
1595 isc__nm_tls_send(isc_nmhandle_t *handle, const isc_region_t *region,
1596 		 isc_nm_cb_t cb, void *cbarg);
1597 
1598 void
1599 isc__nm_tls_cancelread(isc_nmhandle_t *handle);
1600 
1601 /*%<
1602  * Back-end implementation of isc_nm_send() for TLSDNS handles.
1603  */
1604 
1605 void
1606 isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1607 
1608 void
1609 isc__nm_tls_close(isc_nmsocket_t *sock);
1610 /*%<
1611  * Close a TLS socket.
1612  */
1613 
1614 void
1615 isc__nm_tls_pauseread(isc_nmhandle_t *handle);
1616 /*%<
1617  * Pause reading on this handle, while still remembering the callback.
1618  */
1619 
1620 void
1621 isc__nm_tls_resumeread(isc_nmhandle_t *handle);
1622 /*%<
1623  * Resume reading from the handle.
1624  *
1625  */
1626 
1627 void
1628 isc__nm_tls_cleanup_data(isc_nmsocket_t *sock);
1629 
1630 void
1631 isc__nm_tls_stoplistening(isc_nmsocket_t *sock);
1632 
1633 void
1634 isc__nm_tls_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1635 void
1636 isc__nm_tls_cleartimeout(isc_nmhandle_t *handle);
1637 /*%<
1638  * Set the read timeout and reset the timer for the socket
1639  * associated with 'handle', and the TCP socket it wraps
1640  * around.
1641  */
1642 
1643 void
1644 isc__nmhandle_tls_keepalive(isc_nmhandle_t *handle, bool value);
1645 /*%<
1646  * Set the keepalive value on the underlying TCP handle.
1647  */
1648 
1649 void
1650 isc__nm_http_stoplistening(isc_nmsocket_t *sock);
1651 
1652 void
1653 isc__nm_http_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
1654 void
1655 isc__nm_http_cleartimeout(isc_nmhandle_t *handle);
1656 /*%<
1657  * Set the read timeout and reset the timer for the socket
1658  * associated with 'handle', and the TLS/TCP socket it wraps
1659  * around.
1660  */
1661 
1662 void
1663 isc__nmhandle_http_keepalive(isc_nmhandle_t *handle, bool value);
1664 /*%<
1665  * Set the keepalive value on the underlying session handle
1666  */
1667 
1668 void
1669 isc__nm_http_initsocket(isc_nmsocket_t *sock);
1670 
1671 void
1672 isc__nm_http_cleanup_data(isc_nmsocket_t *sock);
1673 
1674 isc_result_t
1675 isc__nm_http_request(isc_nmhandle_t *handle, isc_region_t *region,
1676 		     isc_nm_recv_cb_t reply_cb, void *cbarg);
1677 
1678 void
1679 isc__nm_http_send(isc_nmhandle_t *handle, const isc_region_t *region,
1680 		  isc_nm_cb_t cb, void *cbarg);
1681 
1682 void
1683 isc__nm_http_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
1684 
1685 void
1686 isc__nm_http_close(isc_nmsocket_t *sock);
1687 
1688 void
1689 isc__nm_http_bad_request(isc_nmhandle_t *handle);
1690 /*%<
1691  * Respond to the request with 400 "Bad Request" status.
1692  *
1693  * Requires:
1694  * \li 'handle' is a valid HTTP netmgr handle object, referencing a server-side
1695  * socket
1696  */
1697 
1698 void
1699 isc__nm_async_httpsend(isc__networker_t *worker, isc__netievent_t *ev0);
1700 
1701 void
1702 isc__nm_async_httpstop(isc__networker_t *worker, isc__netievent_t *ev0);
1703 
1704 void
1705 isc__nm_async_httpclose(isc__networker_t *worker, isc__netievent_t *ev0);
1706 
1707 bool
1708 isc__nm_parse_httpquery(const char *query_string, const char **start,
1709 			size_t *len);
1710 
1711 char *
1712 isc__nm_base64url_to_base64(isc_mem_t *mem, const char *base64url,
1713 			    const size_t base64url_len, size_t *res_len);
1714 
1715 char *
1716 isc__nm_base64_to_base64url(isc_mem_t *mem, const char *base64,
1717 			    const size_t base64_len, size_t *res_len);
1718 
1719 void
1720 isc__nm_httpsession_attach(isc_nm_http_session_t *source,
1721 			   isc_nm_http_session_t **targetp);
1722 void
1723 isc__nm_httpsession_detach(isc_nm_http_session_t **sessionp);
1724 
1725 #endif
1726 
1727 #define isc__nm_uverr2result(x) \
1728 	isc___nm_uverr2result(x, true, __FILE__, __LINE__, __func__)
1729 isc_result_t
1730 isc___nm_uverr2result(int uverr, bool dolog, const char *file,
1731 		      unsigned int line, const char *func);
1732 /*%<
1733  * Convert a libuv error value into an isc_result_t.  The
1734  * list of supported error values is not complete; new users
1735  * of this function should add any expected errors that are
1736  * not already there.
1737  */
1738 
1739 bool
1740 isc__nm_acquire_interlocked(isc_nm_t *mgr);
1741 /*%<
1742  * Try to acquire interlocked state; return true if successful.
1743  */
1744 
1745 void
1746 isc__nm_drop_interlocked(isc_nm_t *mgr);
1747 /*%<
1748  * Drop interlocked state; signal waiters.
1749  */
1750 
1751 void
1752 isc__nm_acquire_interlocked_force(isc_nm_t *mgr);
1753 /*%<
1754  * Actively wait for interlocked state.
1755  */
1756 
1757 void
1758 isc__nm_incstats(isc_nm_t *mgr, isc_statscounter_t counterid);
1759 /*%<
1760  * Increment socket-related statistics counters.
1761  */
1762 
1763 void
1764 isc__nm_decstats(isc_nm_t *mgr, isc_statscounter_t counterid);
1765 /*%<
1766  * Decrement socket-related statistics counters.
1767  */
1768 
1769 isc_result_t
1770 isc__nm_socket(int domain, int type, int protocol, uv_os_sock_t *sockp);
1771 /*%<
1772  * Platform independent socket() version
1773  */
1774 
1775 void
1776 isc__nm_closesocket(uv_os_sock_t sock);
1777 /*%<
1778  * Platform independent closesocket() version
1779  */
1780 
1781 isc_result_t
1782 isc__nm_socket_freebind(uv_os_sock_t fd, sa_family_t sa_family);
1783 /*%<
1784  * Set the IP_FREEBIND (or equivalent) socket option on the uv_handle
1785  */
1786 
1787 isc_result_t
1788 isc__nm_socket_reuse(uv_os_sock_t fd);
1789 /*%<
1790  * Set the SO_REUSEADDR or SO_REUSEPORT (or equivalent) socket option on the fd
1791  */
1792 
1793 isc_result_t
1794 isc__nm_socket_reuse_lb(uv_os_sock_t fd);
1795 /*%<
1796  * Set the SO_REUSEPORT_LB (or equivalent) socket option on the fd
1797  */
1798 
1799 isc_result_t
1800 isc__nm_socket_incoming_cpu(uv_os_sock_t fd);
1801 /*%<
1802  * Set the SO_INCOMING_CPU socket option on the fd if available
1803  */
1804 
1805 isc_result_t
1806 isc__nm_socket_disable_pmtud(uv_os_sock_t fd, sa_family_t sa_family);
1807 /*%<
1808  * Disable the Path MTU Discovery, either by disabling IP(V6)_DONTFRAG socket
1809  * option, or setting the IP(V6)_MTU_DISCOVER socket option to IP_PMTUDISC_OMIT
1810  */
1811 
1812 isc_result_t
1813 isc__nm_socket_connectiontimeout(uv_os_sock_t fd, int timeout_ms);
1814 /*%<
1815  * Set the connection timeout in milliseconds, on non-Linux platforms,
1816  * the minimum value must be at least 1000 (1 second).
1817  */
1818 
1819 isc_result_t
1820 isc__nm_socket_tcp_nodelay(uv_os_sock_t fd);
1821 /*%<
1822  * Disables Nagle's algorithm on a TCP socket (sets TCP_NODELAY).
1823  */
1824 
1825 void
1826 isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle);
1827 /*%>
1828  * Sets the pre-configured network buffers size on the handle.
1829  */
1830 
1831 /*
1832  * typedef all the netievent types
1833  */
1834 
1835 NETIEVENT_SOCKET_TYPE(close);
1836 NETIEVENT_SOCKET_TYPE(tcpclose);
1837 NETIEVENT_SOCKET_TYPE(tcplisten);
1838 NETIEVENT_SOCKET_TYPE(tcppauseread);
1839 NETIEVENT_SOCKET_TYPE(tcpstop);
1840 NETIEVENT_SOCKET_TYPE(tlsclose);
1841 /* NETIEVENT_SOCKET_TYPE(tlsconnect); */ /* unique type, defined independently
1842 					  */
1843 NETIEVENT_SOCKET_TYPE(tlsdobio);
1844 NETIEVENT_SOCKET_TYPE(tlsstartread);
1845 NETIEVENT_SOCKET_HANDLE_TYPE(tlscancel);
1846 NETIEVENT_SOCKET_TYPE(udpclose);
1847 NETIEVENT_SOCKET_TYPE(udplisten);
1848 NETIEVENT_SOCKET_TYPE(udpread);
1849 /* NETIEVENT_SOCKET_TYPE(udpsend); */ /* unique type, defined independently */
1850 NETIEVENT_SOCKET_TYPE(udpstop);
1851 
1852 NETIEVENT_SOCKET_TYPE(tcpdnsclose);
1853 NETIEVENT_SOCKET_TYPE(tcpdnsread);
1854 NETIEVENT_SOCKET_TYPE(tcpdnsstop);
1855 NETIEVENT_SOCKET_TYPE(tcpdnslisten);
1856 NETIEVENT_SOCKET_REQ_TYPE(tcpdnsconnect);
1857 NETIEVENT_SOCKET_REQ_TYPE(tcpdnssend);
1858 NETIEVENT_SOCKET_HANDLE_TYPE(tcpdnscancel);
1859 NETIEVENT_SOCKET_QUOTA_TYPE(tcpdnsaccept);
1860 
1861 NETIEVENT_SOCKET_TYPE(tlsdnsclose);
1862 NETIEVENT_SOCKET_TYPE(tlsdnsread);
1863 NETIEVENT_SOCKET_TYPE(tlsdnsstop);
1864 NETIEVENT_SOCKET_TYPE(tlsdnsshutdown);
1865 NETIEVENT_SOCKET_TYPE(tlsdnslisten);
1866 NETIEVENT_SOCKET_REQ_TYPE(tlsdnsconnect);
1867 NETIEVENT_SOCKET_REQ_TYPE(tlsdnssend);
1868 NETIEVENT_SOCKET_HANDLE_TYPE(tlsdnscancel);
1869 NETIEVENT_SOCKET_QUOTA_TYPE(tlsdnsaccept);
1870 NETIEVENT_SOCKET_TYPE(tlsdnscycle);
1871 
1872 NETIEVENT_SOCKET_TYPE(httpstop);
1873 NETIEVENT_SOCKET_REQ_TYPE(httpsend);
1874 NETIEVENT_SOCKET_TYPE(httpclose);
1875 
1876 NETIEVENT_SOCKET_REQ_TYPE(tcpconnect);
1877 NETIEVENT_SOCKET_REQ_TYPE(tcpsend);
1878 NETIEVENT_SOCKET_TYPE(tcpstartread);
1879 NETIEVENT_SOCKET_REQ_TYPE(tlssend);
1880 NETIEVENT_SOCKET_REQ_TYPE(udpconnect);
1881 
1882 NETIEVENT_SOCKET_REQ_RESULT_TYPE(connectcb);
1883 NETIEVENT_SOCKET_REQ_RESULT_TYPE(readcb);
1884 NETIEVENT_SOCKET_REQ_RESULT_TYPE(sendcb);
1885 
1886 NETIEVENT_SOCKET_HANDLE_TYPE(detach);
1887 NETIEVENT_SOCKET_HANDLE_TYPE(tcpcancel);
1888 NETIEVENT_SOCKET_HANDLE_TYPE(udpcancel);
1889 
1890 NETIEVENT_SOCKET_QUOTA_TYPE(tcpaccept);
1891 
1892 NETIEVENT_TYPE(pause);
1893 NETIEVENT_TYPE(resume);
1894 NETIEVENT_TYPE(shutdown);
1895 NETIEVENT_TYPE(stop);
1896 
1897 NETIEVENT_TASK_TYPE(task);
1898 NETIEVENT_TASK_TYPE(privilegedtask);
1899 
1900 /* Now declared the helper functions */
1901 
1902 NETIEVENT_SOCKET_DECL(close);
1903 NETIEVENT_SOCKET_DECL(tcpclose);
1904 NETIEVENT_SOCKET_DECL(tcplisten);
1905 NETIEVENT_SOCKET_DECL(tcppauseread);
1906 NETIEVENT_SOCKET_DECL(tcpstartread);
1907 NETIEVENT_SOCKET_DECL(tcpstop);
1908 NETIEVENT_SOCKET_DECL(tlsclose);
1909 NETIEVENT_SOCKET_DECL(tlsconnect);
1910 NETIEVENT_SOCKET_DECL(tlsdobio);
1911 NETIEVENT_SOCKET_DECL(tlsstartread);
1912 NETIEVENT_SOCKET_HANDLE_DECL(tlscancel);
1913 NETIEVENT_SOCKET_DECL(udpclose);
1914 NETIEVENT_SOCKET_DECL(udplisten);
1915 NETIEVENT_SOCKET_DECL(udpread);
1916 NETIEVENT_SOCKET_DECL(udpsend);
1917 NETIEVENT_SOCKET_DECL(udpstop);
1918 
1919 NETIEVENT_SOCKET_DECL(tcpdnsclose);
1920 NETIEVENT_SOCKET_DECL(tcpdnsread);
1921 NETIEVENT_SOCKET_DECL(tcpdnsstop);
1922 NETIEVENT_SOCKET_DECL(tcpdnslisten);
1923 NETIEVENT_SOCKET_REQ_DECL(tcpdnsconnect);
1924 NETIEVENT_SOCKET_REQ_DECL(tcpdnssend);
1925 NETIEVENT_SOCKET_HANDLE_DECL(tcpdnscancel);
1926 NETIEVENT_SOCKET_QUOTA_DECL(tcpdnsaccept);
1927 
1928 NETIEVENT_SOCKET_DECL(tlsdnsclose);
1929 NETIEVENT_SOCKET_DECL(tlsdnsread);
1930 NETIEVENT_SOCKET_DECL(tlsdnsstop);
1931 NETIEVENT_SOCKET_DECL(tlsdnsshutdown);
1932 NETIEVENT_SOCKET_DECL(tlsdnslisten);
1933 NETIEVENT_SOCKET_REQ_DECL(tlsdnsconnect);
1934 NETIEVENT_SOCKET_REQ_DECL(tlsdnssend);
1935 NETIEVENT_SOCKET_HANDLE_DECL(tlsdnscancel);
1936 NETIEVENT_SOCKET_QUOTA_DECL(tlsdnsaccept);
1937 NETIEVENT_SOCKET_DECL(tlsdnscycle);
1938 
1939 NETIEVENT_SOCKET_DECL(httpstop);
1940 NETIEVENT_SOCKET_REQ_DECL(httpsend);
1941 NETIEVENT_SOCKET_DECL(httpclose);
1942 
1943 NETIEVENT_SOCKET_REQ_DECL(tcpconnect);
1944 NETIEVENT_SOCKET_REQ_DECL(tcpsend);
1945 NETIEVENT_SOCKET_REQ_DECL(tlssend);
1946 NETIEVENT_SOCKET_REQ_DECL(udpconnect);
1947 
1948 NETIEVENT_SOCKET_REQ_RESULT_DECL(connectcb);
1949 NETIEVENT_SOCKET_REQ_RESULT_DECL(readcb);
1950 NETIEVENT_SOCKET_REQ_RESULT_DECL(sendcb);
1951 
1952 NETIEVENT_SOCKET_HANDLE_DECL(udpcancel);
1953 NETIEVENT_SOCKET_HANDLE_DECL(tcpcancel);
1954 NETIEVENT_SOCKET_DECL(detach);
1955 
1956 NETIEVENT_SOCKET_QUOTA_DECL(tcpaccept);
1957 
1958 NETIEVENT_DECL(pause);
1959 NETIEVENT_DECL(resume);
1960 NETIEVENT_DECL(shutdown);
1961 NETIEVENT_DECL(stop);
1962 
1963 NETIEVENT_TASK_DECL(task);
1964 NETIEVENT_TASK_DECL(privilegedtask);
1965 
1966 void
1967 isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
1968 void
1969 isc__nm_tcp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
1970 void
1971 isc__nm_tcpdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
1972 void
1973 isc__nm_tlsdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
1974 			      bool async);
1975 
1976 isc_result_t
1977 isc__nm_tcpdns_processbuffer(isc_nmsocket_t *sock);
1978 isc_result_t
1979 isc__nm_tlsdns_processbuffer(isc_nmsocket_t *sock);
1980 
1981 isc__nm_uvreq_t *
1982 isc__nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr);
1983 
1984 void
1985 isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf);
1986 
1987 void
1988 isc__nm_udp_read_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
1989 		    const struct sockaddr *addr, unsigned flags);
1990 void
1991 isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf);
1992 void
1993 isc__nm_tcpdns_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf);
1994 void
1995 isc__nm_tlsdns_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf);
1996 
1997 void
1998 isc__nm_start_reading(isc_nmsocket_t *sock);
1999 void
2000 isc__nm_stop_reading(isc_nmsocket_t *sock);
2001 void
2002 isc__nm_process_sock_buffer(isc_nmsocket_t *sock);
2003 void
2004 isc__nm_resume_processing(void *arg);
2005 bool
2006 isc__nmsocket_closing(isc_nmsocket_t *sock);
2007 bool
2008 isc__nm_closing(isc_nmsocket_t *sock);
2009 
2010 void
2011 isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len);
2012 
2013 void
2014 isc__nm_failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
2015 		       isc_result_t eresult);
2016 void
2017 isc__nm_failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult);
2018 void
2019 isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
2020 			  isc_result_t eresult, bool async);
2021 void
2022 isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result, bool async);
2023 
2024 void
2025 isc__nmsocket_connecttimeout_cb(uv_timer_t *timer);
2026 
2027 #define STREAM_CLIENTS_PER_CONN 23
2028