1 /*
2 * Copyright (c) 2013-2021 Joris Vink <joris@coders.se>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <sys/param.h>
18 #include <sys/socket.h>
19 #include <sys/types.h>
20
21 #if defined(__linux__)
22 #include <endian.h>
23 #elif defined(__MACH__)
24 #include <libkern/OSByteOrder.h>
25 #define htobe64(x) OSSwapHostToBigInt64(x)
26 #define be64toh(x) OSSwapBigToHostInt64(x)
27 #else
28 #include <sys/endian.h>
29 #endif
30
31 #include "kore.h"
32
33 struct kore_pool nb_pool;
34
35 void
net_init(void)36 net_init(void)
37 {
38 u_int32_t elm;
39
40 /* Add some overhead so we don't roll over for internal items. */
41 elm = worker_max_connections + 10;
42 kore_pool_init(&nb_pool, "nb_pool", sizeof(struct netbuf), elm);
43 }
44
45 void
net_cleanup(void)46 net_cleanup(void)
47 {
48 kore_debug("net_cleanup()");
49 kore_pool_cleanup(&nb_pool);
50 }
51
52 struct netbuf *
net_netbuf_get(void)53 net_netbuf_get(void)
54 {
55 struct netbuf *nb;
56
57 nb = kore_pool_get(&nb_pool);
58
59 nb->cb = NULL;
60 nb->buf = NULL;
61 nb->owner = NULL;
62 nb->extra = NULL;
63 nb->file_ref = NULL;
64
65 nb->type = 0;
66 nb->s_off = 0;
67 nb->b_len = 0;
68 nb->m_len = 0;
69 nb->flags = 0;
70
71 #if defined(KORE_USE_PLATFORM_SENDFILE)
72 nb->fd_off = -1;
73 nb->fd_len = -1;
74 #endif
75
76 return (nb);
77 }
78
79 void
net_send_queue(struct connection * c,const void * data,size_t len)80 net_send_queue(struct connection *c, const void *data, size_t len)
81 {
82 const u_int8_t *d;
83 struct netbuf *nb;
84 size_t avail;
85
86 kore_debug("net_send_queue(%p, %p, %zu)", c, data, len);
87
88 d = data;
89 nb = TAILQ_LAST(&(c->send_queue), netbuf_head);
90 if (nb != NULL && !(nb->flags & NETBUF_IS_STREAM) &&
91 nb->b_len < nb->m_len) {
92 avail = nb->m_len - nb->b_len;
93 if (len < avail) {
94 memcpy(nb->buf + nb->b_len, d, len);
95 nb->b_len += len;
96 return;
97 } else {
98 memcpy(nb->buf + nb->b_len, d, avail);
99 nb->b_len += avail;
100
101 len -= avail;
102 d += avail;
103 if (len == 0)
104 return;
105 }
106 }
107
108 nb = net_netbuf_get();
109
110 nb->owner = c;
111 nb->b_len = len;
112 nb->type = NETBUF_SEND;
113
114 if (nb->b_len < NETBUF_SEND_PAYLOAD_MAX)
115 nb->m_len = NETBUF_SEND_PAYLOAD_MAX;
116 else
117 nb->m_len = nb->b_len;
118
119 nb->buf = kore_malloc(nb->m_len);
120 memcpy(nb->buf, d, nb->b_len);
121
122 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
123 }
124
125 void
net_send_stream(struct connection * c,void * data,size_t len,int (* cb)(struct netbuf *),struct netbuf ** out)126 net_send_stream(struct connection *c, void *data, size_t len,
127 int (*cb)(struct netbuf *), struct netbuf **out)
128 {
129 struct netbuf *nb;
130
131 kore_debug("net_send_stream(%p, %p, %zu)", c, data, len);
132
133 nb = net_netbuf_get();
134 nb->cb = cb;
135 nb->owner = c;
136 nb->buf = data;
137 nb->b_len = len;
138 nb->m_len = nb->b_len;
139 nb->type = NETBUF_SEND;
140 nb->flags = NETBUF_IS_STREAM;
141
142 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
143
144 if (out != NULL)
145 *out = nb;
146 }
147
148 void
net_send_fileref(struct connection * c,struct kore_fileref * ref)149 net_send_fileref(struct connection *c, struct kore_fileref *ref)
150 {
151 struct netbuf *nb;
152
153 nb = net_netbuf_get();
154 nb->owner = c;
155 nb->file_ref = ref;
156 nb->type = NETBUF_SEND;
157 nb->flags = NETBUF_IS_FILEREF;
158
159 #if defined(KORE_USE_PLATFORM_SENDFILE)
160 if (c->owner->server->tls == 0) {
161 nb->fd_off = 0;
162 nb->fd_len = ref->size;
163 } else {
164 nb->buf = ref->base;
165 nb->b_len = ref->size;
166 nb->m_len = nb->b_len;
167 nb->flags |= NETBUF_IS_STREAM;
168 }
169 #else
170 nb->buf = ref->base;
171 nb->b_len = ref->size;
172 nb->m_len = nb->b_len;
173 nb->flags |= NETBUF_IS_STREAM;
174 #endif
175
176 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
177 }
178
179 void
net_recv_reset(struct connection * c,size_t len,int (* cb)(struct netbuf *))180 net_recv_reset(struct connection *c, size_t len, int (*cb)(struct netbuf *))
181 {
182 kore_debug("net_recv_reset(): %p %zu", c, len);
183
184 c->rnb->cb = cb;
185 c->rnb->s_off = 0;
186 c->rnb->b_len = len;
187
188 if (c->rnb->buf != NULL && c->rnb->b_len <= c->rnb->m_len &&
189 c->rnb->m_len < (NETBUF_SEND_PAYLOAD_MAX / 2))
190 return;
191
192 kore_free(c->rnb->buf);
193 c->rnb->m_len = len;
194 c->rnb->buf = kore_malloc(c->rnb->m_len);
195 }
196
197 void
net_recv_queue(struct connection * c,size_t len,int flags,int (* cb)(struct netbuf *))198 net_recv_queue(struct connection *c, size_t len, int flags,
199 int (*cb)(struct netbuf *))
200 {
201 kore_debug("net_recv_queue(): %p %zu %d", c, len, flags);
202
203 if (c->rnb != NULL)
204 fatal("net_recv_queue(): called incorrectly");
205
206 c->rnb = net_netbuf_get();
207 c->rnb->cb = cb;
208 c->rnb->owner = c;
209 c->rnb->b_len = len;
210 c->rnb->m_len = len;
211 c->rnb->flags = flags;
212 c->rnb->type = NETBUF_RECV;
213 c->rnb->buf = kore_malloc(c->rnb->b_len);
214 }
215
216 void
net_recv_expand(struct connection * c,size_t len,int (* cb)(struct netbuf *))217 net_recv_expand(struct connection *c, size_t len, int (*cb)(struct netbuf *))
218 {
219 kore_debug("net_recv_expand(): %p %d", c, len);
220
221 c->rnb->cb = cb;
222 c->rnb->b_len += len;
223 c->rnb->m_len = c->rnb->b_len;
224 c->rnb->buf = kore_realloc(c->rnb->buf, c->rnb->b_len);
225 }
226
227 int
net_send(struct connection * c)228 net_send(struct connection *c)
229 {
230 size_t r, len, smin;
231
232 c->snb = TAILQ_FIRST(&(c->send_queue));
233
234 #if defined(KORE_USE_PLATFORM_SENDFILE)
235 if ((c->snb->flags & NETBUF_IS_FILEREF) &&
236 !(c->snb->flags & NETBUF_IS_STREAM)) {
237 return (kore_platform_sendfile(c, c->snb));
238 }
239 #endif
240
241 if (c->snb->b_len != 0) {
242 smin = c->snb->b_len - c->snb->s_off;
243 len = MIN(NETBUF_SEND_PAYLOAD_MAX, smin);
244
245 if (!c->write(c, len, &r))
246 return (KORE_RESULT_ERROR);
247 if (!(c->evt.flags & KORE_EVENT_WRITE))
248 return (KORE_RESULT_OK);
249
250 c->snb->s_off += r;
251 c->snb->flags &= ~NETBUF_MUST_RESEND;
252 }
253
254 if (c->snb->s_off == c->snb->b_len ||
255 (c->snb->flags & NETBUF_FORCE_REMOVE)) {
256 net_remove_netbuf(c, c->snb);
257 c->snb = NULL;
258 }
259
260 return (KORE_RESULT_OK);
261 }
262
263 int
net_send_flush(struct connection * c)264 net_send_flush(struct connection *c)
265 {
266 kore_debug("net_send_flush(%p)", c);
267
268 while (!TAILQ_EMPTY(&(c->send_queue)) &&
269 (c->evt.flags & KORE_EVENT_WRITE)) {
270 if (!net_send(c))
271 return (KORE_RESULT_ERROR);
272 }
273
274 if ((c->flags & CONN_CLOSE_EMPTY) && TAILQ_EMPTY(&(c->send_queue))) {
275 kore_connection_disconnect(c);
276 }
277
278 return (KORE_RESULT_OK);
279 }
280
281 int
net_recv_flush(struct connection * c)282 net_recv_flush(struct connection *c)
283 {
284 size_t r;
285
286 kore_debug("net_recv_flush(%p)", c);
287
288 if (c->rnb == NULL)
289 return (KORE_RESULT_OK);
290
291 while (c->evt.flags & KORE_EVENT_READ) {
292 if (c->rnb->buf == NULL)
293 return (KORE_RESULT_OK);
294
295 if (!c->read(c, &r))
296 return (KORE_RESULT_ERROR);
297 if (!(c->evt.flags & KORE_EVENT_READ))
298 break;
299
300 c->rnb->s_off += r;
301 if (c->rnb->s_off == c->rnb->b_len ||
302 (c->rnb->flags & NETBUF_CALL_CB_ALWAYS)) {
303 r = c->rnb->cb(c->rnb);
304 if (r != KORE_RESULT_OK)
305 return (r);
306 }
307 }
308
309 return (KORE_RESULT_OK);
310 }
311
312 void
net_remove_netbuf(struct connection * c,struct netbuf * nb)313 net_remove_netbuf(struct connection *c, struct netbuf *nb)
314 {
315 kore_debug("net_remove_netbuf(%p, %p)", c, nb);
316
317 if (nb->type == NETBUF_RECV)
318 fatal("net_remove_netbuf(): cannot remove recv netbuf");
319
320 if (nb->flags & NETBUF_MUST_RESEND) {
321 kore_debug("retaining %p (MUST_RESEND)", nb);
322 nb->flags |= NETBUF_FORCE_REMOVE;
323 return;
324 }
325
326 if (!(nb->flags & NETBUF_IS_STREAM)) {
327 kore_free(nb->buf);
328 } else if (nb->cb != NULL) {
329 (void)nb->cb(nb);
330 }
331
332 if (nb->flags & NETBUF_IS_FILEREF)
333 kore_fileref_release(nb->file_ref);
334
335 TAILQ_REMOVE(&(c->send_queue), nb, list);
336
337 kore_pool_put(&nb_pool, nb);
338 }
339
340 int
net_write_tls(struct connection * c,size_t len,size_t * written)341 net_write_tls(struct connection *c, size_t len, size_t *written)
342 {
343 int r;
344
345 if (len > INT_MAX)
346 return (KORE_RESULT_ERROR);
347
348 ERR_clear_error();
349 r = SSL_write(c->ssl, (c->snb->buf + c->snb->s_off), len);
350 if (c->tls_reneg > 1)
351 return (KORE_RESULT_ERROR);
352
353 if (r <= 0) {
354 r = SSL_get_error(c->ssl, r);
355 switch (r) {
356 case SSL_ERROR_WANT_READ:
357 case SSL_ERROR_WANT_WRITE:
358 c->evt.flags &= ~KORE_EVENT_WRITE;
359 c->snb->flags |= NETBUF_MUST_RESEND;
360 return (KORE_RESULT_OK);
361 case SSL_ERROR_SYSCALL:
362 switch (errno) {
363 case EINTR:
364 *written = 0;
365 return (KORE_RESULT_OK);
366 case EAGAIN:
367 c->evt.flags &= ~KORE_EVENT_WRITE;
368 c->snb->flags |= NETBUF_MUST_RESEND;
369 return (KORE_RESULT_OK);
370 default:
371 break;
372 }
373 /* FALLTHROUGH */
374 default:
375 kore_debug("SSL_write(): %s", ssl_errno_s);
376 if (c->flags & CONN_LOG_TLS_FAILURE) {
377 kore_log(LOG_NOTICE,
378 "SSL_write(): %s", ssl_errno_s);
379 }
380 return (KORE_RESULT_ERROR);
381 }
382 }
383
384 *written = (size_t)r;
385
386 return (KORE_RESULT_OK);
387 }
388
389 int
net_read_tls(struct connection * c,size_t * bytes)390 net_read_tls(struct connection *c, size_t *bytes)
391 {
392 int r;
393
394 ERR_clear_error();
395 r = SSL_read(c->ssl, (c->rnb->buf + c->rnb->s_off),
396 (c->rnb->b_len - c->rnb->s_off));
397
398 if (c->tls_reneg > 1)
399 return (KORE_RESULT_ERROR);
400
401 if (r <= 0) {
402 r = SSL_get_error(c->ssl, r);
403 switch (r) {
404 case SSL_ERROR_WANT_READ:
405 case SSL_ERROR_WANT_WRITE:
406 c->evt.flags &= ~KORE_EVENT_READ;
407 return (KORE_RESULT_OK);
408 case SSL_ERROR_ZERO_RETURN:
409 return (KORE_RESULT_ERROR);
410 case SSL_ERROR_SYSCALL:
411 switch (errno) {
412 case EINTR:
413 *bytes = 0;
414 return (KORE_RESULT_OK);
415 case EAGAIN:
416 c->evt.flags &= ~KORE_EVENT_READ;
417 c->snb->flags |= NETBUF_MUST_RESEND;
418 return (KORE_RESULT_OK);
419 default:
420 break;
421 }
422 /* FALLTHROUGH */
423 default:
424 kore_debug("SSL_read(): %s", ssl_errno_s);
425 if (c->flags & CONN_LOG_TLS_FAILURE) {
426 kore_log(LOG_NOTICE,
427 "SSL_read(): %s", ssl_errno_s);
428 }
429 return (KORE_RESULT_ERROR);
430 }
431 }
432
433 *bytes = (size_t)r;
434
435 return (KORE_RESULT_OK);
436 }
437
438 int
net_write(struct connection * c,size_t len,size_t * written)439 net_write(struct connection *c, size_t len, size_t *written)
440 {
441 ssize_t r;
442
443 r = send(c->fd, (c->snb->buf + c->snb->s_off), len, 0);
444 if (r == -1) {
445 switch (errno) {
446 case EINTR:
447 *written = 0;
448 return (KORE_RESULT_OK);
449 case EAGAIN:
450 c->evt.flags &= ~KORE_EVENT_WRITE;
451 return (KORE_RESULT_OK);
452 default:
453 kore_debug("write: %s", errno_s);
454 return (KORE_RESULT_ERROR);
455 }
456 }
457
458 *written = (size_t)r;
459
460 return (KORE_RESULT_OK);
461 }
462
463 int
net_read(struct connection * c,size_t * bytes)464 net_read(struct connection *c, size_t *bytes)
465 {
466 ssize_t r;
467
468 r = recv(c->fd, (c->rnb->buf + c->rnb->s_off),
469 (c->rnb->b_len - c->rnb->s_off), 0);
470 if (r == -1) {
471 switch (errno) {
472 case EINTR:
473 *bytes = 0;
474 return (KORE_RESULT_OK);
475 case EAGAIN:
476 c->evt.flags &= ~KORE_EVENT_READ;
477 return (KORE_RESULT_OK);
478 default:
479 kore_debug("read(): %s", errno_s);
480 return (KORE_RESULT_ERROR);
481 }
482 }
483
484 if (r == 0) {
485 kore_connection_disconnect(c);
486 c->evt.flags &= ~KORE_EVENT_READ;
487 return (KORE_RESULT_OK);
488 }
489
490 *bytes = (size_t)r;
491
492 return (KORE_RESULT_OK);
493 }
494
495 u_int16_t
net_read16(u_int8_t * b)496 net_read16(u_int8_t *b)
497 {
498 u_int16_t r;
499
500 r = *(u_int16_t *)b;
501 return (ntohs(r));
502 }
503
504 u_int32_t
net_read32(u_int8_t * b)505 net_read32(u_int8_t *b)
506 {
507 u_int32_t r;
508
509 r = *(u_int32_t *)b;
510 return (ntohl(r));
511 }
512
513 void
net_write16(u_int8_t * p,u_int16_t n)514 net_write16(u_int8_t *p, u_int16_t n)
515 {
516 u_int16_t r;
517
518 r = htons(n);
519 memcpy(p, &r, sizeof(r));
520 }
521
522 void
net_write32(u_int8_t * p,u_int32_t n)523 net_write32(u_int8_t *p, u_int32_t n)
524 {
525 u_int32_t r;
526
527 r = htonl(n);
528 memcpy(p, &r, sizeof(r));
529 }
530
531 u_int64_t
net_read64(u_int8_t * b)532 net_read64(u_int8_t *b)
533 {
534 u_int64_t r;
535
536 r = *(u_int64_t *)b;
537 return (be64toh(r));
538 }
539
540 void
net_write64(u_int8_t * p,u_int64_t n)541 net_write64(u_int8_t *p, u_int64_t n)
542 {
543 u_int64_t r;
544
545 r = htobe64(n);
546 memcpy(p, &r, sizeof(r));
547 }
548