1 /*-
2 * SPDX-License-Identifier: ISC
3 *
4 * Copyright (C) 2015-2021 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * Copyright (C) 2019-2021 Matt Dunwoodie <ncon@noconroy.net>
6 * Copyright (c) 2019-2020 Rubicon Communications, LLC (Netgate)
7 * Copyright (c) 2021 Kyle Evans <kevans@FreeBSD.org>
8 * Copyright (c) 2022 The FreeBSD Foundation
9 * Copyright (c) 2023-2024 Aaron LI <aly@aaronly.me>
10 *
11 * Permission to use, copy, modify, and distribute this software for any
12 * purpose with or without fee is hereby granted, provided that the above
13 * copyright notice and this permission notice appear in all copies.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 */
23
24 #include "opt_inet6.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/callout.h>
29 #include <sys/caps.h>
30 #include <sys/endian.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/malloc.h>
34 #include <sys/mbuf.h>
35 #include <sys/module.h>
36 #include <sys/objcache.h>
37 #include <sys/queue.h>
38 #include <sys/socket.h>
39 #include <sys/socketops.h> /* so_pru_*() functions */
40 #include <sys/socketvar.h>
41 #include <sys/sockio.h> /* SIOC* ioctl commands */
42 #include <sys/taskqueue.h>
43 #include <sys/time.h>
44
45 #include <machine/atomic.h>
46
47 #include <net/bpf.h>
48 #include <net/ethernet.h> /* ETHERMTU */
49 #include <net/if.h>
50 #include <net/if_clone.h>
51 #include <net/if_types.h> /* IFT_WIREGUARD */
52 #include <net/if_var.h>
53 #include <net/ifq_var.h>
54 #include <net/netisr.h>
55 #include <net/radix.h>
56 #include <net/route.h> /* struct rtentry */
57
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip_icmp.h>
61 #include <netinet/ip6.h>
62 #include <netinet/icmp6.h>
63 #include <netinet6/in6_var.h> /* in6_mask2len() */
64 #include <netinet6/nd6.h> /* ND_IFINFO() */
65
66 #include "wg_cookie.h"
67 #include "wg_noise.h"
68 #include "if_wg.h"
69
70 CTASSERT(WG_KEY_SIZE >= NOISE_PUBLIC_KEY_LEN);
71 CTASSERT(WG_KEY_SIZE >= NOISE_SYMMETRIC_KEY_LEN);
72
73 #define DEFAULT_MTU (ETHERMTU - 80)
74 #define MAX_MTU (IF_MAXMTU - 80)
75
76 #ifndef ENOKEY
77 #define ENOKEY ENOENT
78 #endif
79
80 /*
81 * mbuf flags to clear after in-place encryption/decryption, so that the
82 * mbuf can be reused for re-entering the network stack or delivering to
83 * the remote peer.
84 *
85 * For example, the M_HASH and M_LENCHECKED flags must be cleared for an
86 * inbound packet; otherwise, panic is to be expected.
87 */
88 #define MBUF_CLEARFLAGS (M_COPYFLAGS & ~(M_PKTHDR | M_EOR | M_PRIO))
89
90 #define MAX_LOOPS 8 /* 0 means no loop allowed */
91 #define MTAG_WGLOOP 0x77676c70 /* wglp; cookie for loop check */
92
93 #define MAX_STAGED_PKT 128
94 #define MAX_QUEUED_PKT 1024
95 #define MAX_QUEUED_PKT_MASK (MAX_QUEUED_PKT - 1)
96 #define MAX_QUEUED_HANDSHAKES 4096
97
98 #define REKEY_TIMEOUT_JITTER (karc4random() % 334) /* msec */
99 #define MAX_TIMER_HANDSHAKES (90 / REKEY_TIMEOUT)
100 #define NEW_HANDSHAKE_TIMEOUT (REKEY_TIMEOUT + KEEPALIVE_TIMEOUT)
101 #define UNDERLOAD_TIMEOUT 1
102
103 /* First byte indicating packet type on the wire */
104 #define WG_PKT_INITIATION htole32(1)
105 #define WG_PKT_RESPONSE htole32(2)
106 #define WG_PKT_COOKIE htole32(3)
107 #define WG_PKT_DATA htole32(4)
108
109 #define WG_PKT_PADDING 16
110 #define WG_PKT_WITH_PADDING(n) \
111 (((n) + (WG_PKT_PADDING - 1)) & ~(WG_PKT_PADDING - 1))
112 #define WG_PKT_ENCRYPTED_LEN(n) \
113 (offsetof(struct wg_pkt_data, buf[(n)]) + NOISE_AUTHTAG_LEN)
114 #define WG_PKT_IS_INITIATION(m) \
115 (*mtod((m), uint32_t *) == WG_PKT_INITIATION && \
116 (size_t)(m)->m_pkthdr.len == sizeof(struct wg_pkt_initiation))
117 #define WG_PKT_IS_RESPONSE(m) \
118 (*mtod((m), uint32_t *) == WG_PKT_RESPONSE && \
119 (size_t)(m)->m_pkthdr.len == sizeof(struct wg_pkt_response))
120 #define WG_PKT_IS_COOKIE(m) \
121 (*mtod((m), uint32_t *) == WG_PKT_COOKIE && \
122 (size_t)(m)->m_pkthdr.len == sizeof(struct wg_pkt_cookie))
123 #define WG_PKT_IS_DATA(m) \
124 (*mtod((m), uint32_t *) == WG_PKT_DATA && \
125 (size_t)(m)->m_pkthdr.len >= WG_PKT_ENCRYPTED_LEN(0))
126
127
128 #define DPRINTF(sc, fmt, ...) \
129 if (sc->sc_ifp->if_flags & IFF_DEBUG) \
130 if_printf(sc->sc_ifp, fmt, ##__VA_ARGS__)
131
132
133 struct wg_pkt_initiation {
134 uint32_t t;
135 uint32_t s_idx;
136 uint8_t ue[NOISE_PUBLIC_KEY_LEN];
137 uint8_t es[NOISE_PUBLIC_KEY_LEN + NOISE_AUTHTAG_LEN];
138 uint8_t ets[NOISE_TIMESTAMP_LEN + NOISE_AUTHTAG_LEN];
139 struct cookie_macs m;
140 };
141
142 struct wg_pkt_response {
143 uint32_t t;
144 uint32_t s_idx;
145 uint32_t r_idx;
146 uint8_t ue[NOISE_PUBLIC_KEY_LEN];
147 uint8_t en[0 + NOISE_AUTHTAG_LEN];
148 struct cookie_macs m;
149 };
150
151 struct wg_pkt_cookie {
152 uint32_t t;
153 uint32_t r_idx;
154 uint8_t nonce[COOKIE_NONCE_SIZE];
155 uint8_t ec[COOKIE_ENCRYPTED_SIZE];
156 };
157
158 struct wg_pkt_data {
159 uint32_t t;
160 uint32_t r_idx;
161 uint64_t counter;
162 uint8_t buf[];
163 };
164
165 struct wg_endpoint {
166 union {
167 struct sockaddr r_sa;
168 struct sockaddr_in r_sin;
169 #ifdef INET6
170 struct sockaddr_in6 r_sin6;
171 #endif
172 } e_remote;
173 /*
174 * NOTE: No 'e_local' on DragonFly, because the socket upcall
175 * and so_pru_soreceive() cannot provide the local
176 * (i.e., destination) address of a received packet.
177 */
178 };
179
180 struct aip_addr {
181 uint8_t length; /* required by the radix code */
182 union {
183 uint8_t bytes[16];
184 uint32_t ip;
185 uint32_t ip6[4];
186 struct in_addr in;
187 struct in6_addr in6;
188 };
189 };
190
191 struct wg_aip {
192 struct radix_node a_nodes[2]; /* make the first for casting */
193 LIST_ENTRY(wg_aip) a_entry;
194 struct aip_addr a_addr;
195 struct aip_addr a_mask;
196 struct wg_peer *a_peer;
197 sa_family_t a_af;
198 };
199
200 enum wg_packet_state {
201 WG_PACKET_DEAD, /* to be dropped */
202 WG_PACKET_UNCRYPTED, /* before encryption/decryption */
203 WG_PACKET_CRYPTED, /* after encryption/decryption */
204 };
205
206 struct wg_packet {
207 STAILQ_ENTRY(wg_packet) p_serial;
208 STAILQ_ENTRY(wg_packet) p_parallel;
209 struct wg_endpoint p_endpoint;
210 struct noise_keypair *p_keypair;
211 uint64_t p_counter;
212 struct mbuf *p_mbuf;
213 int p_mtu;
214 sa_family_t p_af;
215 unsigned int p_state; /* atomic */
216 };
217
218 STAILQ_HEAD(wg_packet_list, wg_packet);
219
220 struct wg_queue {
221 struct lock q_mtx;
222 struct wg_packet_list q_queue;
223 size_t q_len;
224 };
225
226 struct wg_peer {
227 TAILQ_ENTRY(wg_peer) p_entry;
228 unsigned long p_id;
229 struct wg_softc *p_sc;
230
231 char p_description[WG_PEER_DESCR_SIZE];
232
233 struct noise_remote *p_remote;
234 struct cookie_maker *p_cookie;
235
236 struct lock p_endpoint_lock;
237 struct wg_endpoint p_endpoint;
238
239 struct wg_queue p_stage_queue;
240 struct wg_queue p_encrypt_serial;
241 struct wg_queue p_decrypt_serial;
242
243 bool p_enabled;
244 bool p_need_another_keepalive;
245 uint16_t p_persistent_keepalive_interval;
246 struct callout p_new_handshake;
247 struct callout p_send_keepalive;
248 struct callout p_retry_handshake;
249 struct callout p_zero_key_material;
250 struct callout p_persistent_keepalive;
251
252 struct lock p_handshake_mtx;
253 struct timespec p_handshake_complete; /* nanotime */
254 int p_handshake_retries;
255
256 struct task p_send_task;
257 struct task p_recv_task;
258 struct taskqueue *p_send_taskqueue;
259 struct taskqueue *p_recv_taskqueue;
260
261 uint64_t *p_tx_bytes;
262 uint64_t *p_rx_bytes;
263
264 LIST_HEAD(, wg_aip) p_aips;
265 size_t p_aips_num;
266 };
267
268 struct wg_socket {
269 struct lock so_lock;
270 struct socket *so_so4;
271 struct socket *so_so6;
272 uint32_t so_user_cookie;
273 in_port_t so_port;
274 };
275
276 struct wg_softc {
277 LIST_ENTRY(wg_softc) sc_entry;
278 struct ifnet *sc_ifp;
279 int sc_flags;
280
281 struct wg_socket sc_socket;
282
283 TAILQ_HEAD(, wg_peer) sc_peers;
284 size_t sc_peers_num;
285
286 struct noise_local *sc_local;
287 struct cookie_checker *sc_cookie;
288
289 struct lock sc_aip_lock;
290 struct radix_node_head *sc_aip4;
291 struct radix_node_head *sc_aip6;
292
293 struct taskqueue *sc_handshake_taskqueue;
294 struct task sc_handshake_task;
295 struct wg_queue sc_handshake_queue;
296
297 struct task *sc_encrypt_tasks; /* one per CPU */
298 struct task *sc_decrypt_tasks; /* one per CPU */
299 struct wg_queue sc_encrypt_parallel;
300 struct wg_queue sc_decrypt_parallel;
301 int sc_encrypt_last_cpu;
302 int sc_decrypt_last_cpu;
303
304 struct lock sc_lock;
305 };
306
307
308 static MALLOC_DEFINE(M_WG, "WG", "wireguard");
309 static MALLOC_DEFINE(M_WG_PACKET, "WG packet", "wireguard packet");
310
311 static const char wgname[] = "wg";
312
313 static struct objcache *wg_packet_zone;
314 static struct lock wg_mtx;
315 static struct taskqueue **wg_taskqueues; /* one taskqueue per CPU */
316 static struct radix_node_head *wg_maskhead; /* shared by all interfaces */
317 static LIST_HEAD(, wg_softc) wg_list = LIST_HEAD_INITIALIZER(wg_list);
318
319
320 /* Timers */
321 static void wg_timers_enable(struct wg_peer *);
322 static void wg_timers_disable(struct wg_peer *);
323
324 /* Allowed IP */
325 static int wg_aip_add(struct wg_softc *, struct wg_peer *, sa_family_t,
326 const void *, uint8_t);
327 static struct wg_peer *
328 wg_aip_lookup(struct wg_softc *, sa_family_t, const void *);
329 static void wg_aip_remove_all(struct wg_softc *, struct wg_peer *);
330
331 /* Handshake */
332 static void wg_send_initiation(struct wg_peer *);
333 static void wg_send_response(struct wg_peer *);
334 static void wg_send_cookie(struct wg_softc *, struct cookie_macs *,
335 uint32_t, struct wg_endpoint *);
336 static void wg_send_keepalive(struct wg_peer *);
337
338 /* Transport Packet Functions */
339 static void wg_peer_send_staged(struct wg_peer *);
340 static void wg_deliver_out(void *, int);
341 static void wg_deliver_in(void *, int);
342 static void wg_upcall(struct socket *, void *, int);
343
344 /*----------------------------------------------------------------------------*/
345 /* Packet */
346
347 static struct wg_packet *
wg_packet_alloc(struct mbuf * m)348 wg_packet_alloc(struct mbuf *m)
349 {
350 struct wg_packet *pkt;
351
352 if ((pkt = objcache_get(wg_packet_zone, M_NOWAIT)) == NULL)
353 return (NULL);
354
355 bzero(pkt, sizeof(*pkt)); /* objcache_get() doesn't ensure M_ZERO. */
356 pkt->p_mbuf = m;
357
358 return (pkt);
359 }
360
361 static void
wg_packet_free(struct wg_packet * pkt)362 wg_packet_free(struct wg_packet *pkt)
363 {
364 if (pkt->p_keypair != NULL)
365 noise_keypair_put(pkt->p_keypair);
366 if (pkt->p_mbuf != NULL)
367 m_freem(pkt->p_mbuf);
368 objcache_put(wg_packet_zone, pkt);
369 }
370
371 /*----------------------------------------------------------------------------*/
372 /*
373 * Packet Queue Functions
374 *
375 * WireGuard uses the following queues:
376 * - per-interface handshake queue: track incoming handshake packets
377 * - per-peer staged queue: track the outgoing packets sent by that peer
378 * - per-interface parallel encrypt and decrypt queues
379 * - per-peer serial encrypt and decrypt queues
380 *
381 * For one interface, the handshake packets are only tracked in the handshake
382 * queue and are processed in serial. However, all data packets are tracked
383 * in two queues: a serial queue and a parallel queue. Specifically, the
384 * outgoing packets (from the staged queue) will be queued in both the
385 * parallel encrypt and the serial encrypt queues; the incoming packets will
386 * be queued in both the parallel decrypt and the serial decrypt queues.
387 *
388 * - The parallel queues are used to distribute the encryption/decryption work
389 * across all CPUs. The per-CPU wg_{encrypt,decrypt}_worker() work on the
390 * parallel queues.
391 * - The serial queues ensure that packets are not reordered and are
392 * delivered in sequence for each peer. The per-peer wg_deliver_{in,out}()
393 * work on the serial queues.
394 */
395
396 static void wg_queue_purge(struct wg_queue *);
397
398 static void
wg_queue_init(struct wg_queue * queue,const char * name)399 wg_queue_init(struct wg_queue *queue, const char *name)
400 {
401 lockinit(&queue->q_mtx, name, 0, 0);
402 STAILQ_INIT(&queue->q_queue);
403 queue->q_len = 0;
404 }
405
406 static void
wg_queue_deinit(struct wg_queue * queue)407 wg_queue_deinit(struct wg_queue *queue)
408 {
409 wg_queue_purge(queue);
410 lockuninit(&queue->q_mtx);
411 }
412
413 static size_t
wg_queue_len(const struct wg_queue * queue)414 wg_queue_len(const struct wg_queue *queue)
415 {
416 return (queue->q_len);
417 }
418
419 static bool
wg_queue_enqueue_handshake(struct wg_queue * hs,struct wg_packet * pkt)420 wg_queue_enqueue_handshake(struct wg_queue *hs, struct wg_packet *pkt)
421 {
422 bool ok = false;
423
424 lockmgr(&hs->q_mtx, LK_EXCLUSIVE);
425 if (hs->q_len < MAX_QUEUED_HANDSHAKES) {
426 STAILQ_INSERT_TAIL(&hs->q_queue, pkt, p_parallel);
427 hs->q_len++;
428 ok = true;
429 }
430 lockmgr(&hs->q_mtx, LK_RELEASE);
431
432 if (!ok)
433 wg_packet_free(pkt);
434
435 return (ok);
436 }
437
438 static struct wg_packet *
wg_queue_dequeue_handshake(struct wg_queue * hs)439 wg_queue_dequeue_handshake(struct wg_queue *hs)
440 {
441 struct wg_packet *pkt;
442
443 lockmgr(&hs->q_mtx, LK_EXCLUSIVE);
444 pkt = STAILQ_FIRST(&hs->q_queue);
445 if (pkt != NULL) {
446 STAILQ_REMOVE_HEAD(&hs->q_queue, p_parallel);
447 hs->q_len--;
448 }
449 lockmgr(&hs->q_mtx, LK_RELEASE);
450
451 return (pkt);
452 }
453
454 static void
wg_queue_push_staged(struct wg_queue * staged,struct wg_packet * pkt)455 wg_queue_push_staged(struct wg_queue *staged, struct wg_packet *pkt)
456 {
457 struct wg_packet *old = NULL;
458
459 lockmgr(&staged->q_mtx, LK_EXCLUSIVE);
460 if (staged->q_len >= MAX_STAGED_PKT) {
461 old = STAILQ_FIRST(&staged->q_queue);
462 STAILQ_REMOVE_HEAD(&staged->q_queue, p_parallel);
463 staged->q_len--;
464 }
465 STAILQ_INSERT_TAIL(&staged->q_queue, pkt, p_parallel);
466 staged->q_len++;
467 lockmgr(&staged->q_mtx, LK_RELEASE);
468
469 if (old != NULL)
470 wg_packet_free(old);
471 }
472
473 static void
wg_queue_enlist_staged(struct wg_queue * staged,struct wg_packet_list * list)474 wg_queue_enlist_staged(struct wg_queue *staged, struct wg_packet_list *list)
475 {
476 struct wg_packet *pkt, *tpkt;
477
478 STAILQ_FOREACH_MUTABLE(pkt, list, p_parallel, tpkt)
479 wg_queue_push_staged(staged, pkt);
480 STAILQ_INIT(list);
481 }
482
483 static void
wg_queue_delist_staged(struct wg_queue * staged,struct wg_packet_list * list)484 wg_queue_delist_staged(struct wg_queue *staged, struct wg_packet_list *list)
485 {
486 STAILQ_INIT(list);
487 lockmgr(&staged->q_mtx, LK_EXCLUSIVE);
488 STAILQ_CONCAT(list, &staged->q_queue);
489 staged->q_len = 0;
490 lockmgr(&staged->q_mtx, LK_RELEASE);
491 }
492
493 static void
wg_queue_purge(struct wg_queue * staged)494 wg_queue_purge(struct wg_queue *staged)
495 {
496 struct wg_packet_list list;
497 struct wg_packet *pkt, *tpkt;
498
499 wg_queue_delist_staged(staged, &list);
500 STAILQ_FOREACH_MUTABLE(pkt, &list, p_parallel, tpkt)
501 wg_packet_free(pkt);
502 }
503
504 static bool
wg_queue_both(struct wg_queue * parallel,struct wg_queue * serial,struct wg_packet * pkt)505 wg_queue_both(struct wg_queue *parallel, struct wg_queue *serial,
506 struct wg_packet *pkt)
507 {
508 pkt->p_state = WG_PACKET_UNCRYPTED;
509
510 lockmgr(&serial->q_mtx, LK_EXCLUSIVE);
511 if (serial->q_len < MAX_QUEUED_PKT) {
512 serial->q_len++;
513 STAILQ_INSERT_TAIL(&serial->q_queue, pkt, p_serial);
514 } else {
515 lockmgr(&serial->q_mtx, LK_RELEASE);
516 wg_packet_free(pkt);
517 return (false);
518 }
519 lockmgr(&serial->q_mtx, LK_RELEASE);
520
521 lockmgr(¶llel->q_mtx, LK_EXCLUSIVE);
522 if (parallel->q_len < MAX_QUEUED_PKT) {
523 parallel->q_len++;
524 STAILQ_INSERT_TAIL(¶llel->q_queue, pkt, p_parallel);
525 } else {
526 lockmgr(¶llel->q_mtx, LK_RELEASE);
527 /*
528 * Cannot just free the packet because it's already queued
529 * in the serial queue. Instead, set its state to DEAD and
530 * let the serial worker to free it.
531 */
532 pkt->p_state = WG_PACKET_DEAD;
533 return (false);
534 }
535 lockmgr(¶llel->q_mtx, LK_RELEASE);
536
537 return (true);
538 }
539
540 static struct wg_packet *
wg_queue_dequeue_serial(struct wg_queue * serial)541 wg_queue_dequeue_serial(struct wg_queue *serial)
542 {
543 struct wg_packet *pkt = NULL;
544
545 lockmgr(&serial->q_mtx, LK_EXCLUSIVE);
546 if (serial->q_len > 0 &&
547 STAILQ_FIRST(&serial->q_queue)->p_state != WG_PACKET_UNCRYPTED) {
548 /*
549 * Dequeue both CRYPTED packets (to be delivered) and
550 * DEAD packets (to be freed).
551 */
552 serial->q_len--;
553 pkt = STAILQ_FIRST(&serial->q_queue);
554 STAILQ_REMOVE_HEAD(&serial->q_queue, p_serial);
555 }
556 lockmgr(&serial->q_mtx, LK_RELEASE);
557
558 return (pkt);
559 }
560
561 static struct wg_packet *
wg_queue_dequeue_parallel(struct wg_queue * parallel)562 wg_queue_dequeue_parallel(struct wg_queue *parallel)
563 {
564 struct wg_packet *pkt = NULL;
565
566 lockmgr(¶llel->q_mtx, LK_EXCLUSIVE);
567 if (parallel->q_len > 0) {
568 parallel->q_len--;
569 pkt = STAILQ_FIRST(¶llel->q_queue);
570 STAILQ_REMOVE_HEAD(¶llel->q_queue, p_parallel);
571 }
572 lockmgr(¶llel->q_mtx, LK_RELEASE);
573
574 return (pkt);
575 }
576
577 /*----------------------------------------------------------------------------*/
578 /* Peer */
579
580 static struct wg_peer *
wg_peer_create(struct wg_softc * sc,const uint8_t pub_key[WG_KEY_SIZE])581 wg_peer_create(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE])
582 {
583 static unsigned long peer_counter = 0;
584 struct wg_peer *peer;
585
586 KKASSERT(lockstatus(&sc->sc_lock, curthread) == LK_EXCLUSIVE);
587
588 peer = kmalloc(sizeof(*peer), M_WG, M_WAITOK | M_ZERO);
589
590 peer->p_remote = noise_remote_alloc(sc->sc_local, pub_key, peer);
591 if (noise_remote_enable(peer->p_remote) != 0) {
592 kfree(peer, M_WG);
593 return (NULL);
594 }
595
596 peer->p_cookie = cookie_maker_alloc(pub_key);
597
598 peer->p_id = ++peer_counter;
599 peer->p_sc = sc;
600 peer->p_tx_bytes = kmalloc(sizeof(*peer->p_tx_bytes) * ncpus,
601 M_WG, M_WAITOK | M_ZERO);
602 peer->p_rx_bytes = kmalloc(sizeof(*peer->p_rx_bytes) * ncpus,
603 M_WG, M_WAITOK | M_ZERO);
604
605 lockinit(&peer->p_endpoint_lock, "wg_peer_endpoint", 0, 0);
606 lockinit(&peer->p_handshake_mtx, "wg_peer_handshake", 0, 0);
607
608 wg_queue_init(&peer->p_stage_queue, "stageq");
609 wg_queue_init(&peer->p_encrypt_serial, "txq");
610 wg_queue_init(&peer->p_decrypt_serial, "rxq");
611
612 callout_init_mp(&peer->p_new_handshake);
613 callout_init_mp(&peer->p_send_keepalive);
614 callout_init_mp(&peer->p_retry_handshake);
615 callout_init_mp(&peer->p_persistent_keepalive);
616 callout_init_mp(&peer->p_zero_key_material);
617
618 TASK_INIT(&peer->p_send_task, 0, wg_deliver_out, peer);
619 TASK_INIT(&peer->p_recv_task, 0, wg_deliver_in, peer);
620
621 /* Randomly choose the taskqueues to distribute the load. */
622 peer->p_send_taskqueue = wg_taskqueues[karc4random() % ncpus];
623 peer->p_recv_taskqueue = wg_taskqueues[karc4random() % ncpus];
624
625 LIST_INIT(&peer->p_aips);
626
627 TAILQ_INSERT_TAIL(&sc->sc_peers, peer, p_entry);
628 sc->sc_peers_num++;
629
630 if (sc->sc_ifp->if_link_state == LINK_STATE_UP)
631 wg_timers_enable(peer);
632
633 DPRINTF(sc, "Peer %ld created\n", peer->p_id);
634 return (peer);
635 }
636
637 static void
wg_peer_destroy(struct wg_peer * peer)638 wg_peer_destroy(struct wg_peer *peer)
639 {
640 struct wg_softc *sc = peer->p_sc;
641
642 KKASSERT(lockstatus(&sc->sc_lock, curthread) == LK_EXCLUSIVE);
643
644 /*
645 * Disable remote and timers. This will prevent any new handshakes
646 * from occuring.
647 */
648 noise_remote_disable(peer->p_remote);
649 wg_timers_disable(peer);
650
651 /*
652 * Remove all allowed IPs, so no more packets will be routed to
653 * this peer.
654 */
655 wg_aip_remove_all(sc, peer);
656
657 /* Remove peer from the interface, then free. */
658 sc->sc_peers_num--;
659 TAILQ_REMOVE(&sc->sc_peers, peer, p_entry);
660
661 /*
662 * While there are no references remaining, we may still have
663 * p_{send,recv}_task executing (think empty queue, but
664 * wg_deliver_{in,out} needs to check the queue). We should wait
665 * for them and then free.
666 */
667 taskqueue_drain(peer->p_recv_taskqueue, &peer->p_recv_task);
668 taskqueue_drain(peer->p_send_taskqueue, &peer->p_send_task);
669
670 wg_queue_deinit(&peer->p_decrypt_serial);
671 wg_queue_deinit(&peer->p_encrypt_serial);
672 wg_queue_deinit(&peer->p_stage_queue);
673
674 kfree(peer->p_tx_bytes, M_WG);
675 kfree(peer->p_rx_bytes, M_WG);
676
677 lockuninit(&peer->p_endpoint_lock);
678 lockuninit(&peer->p_handshake_mtx);
679
680 noise_remote_free(peer->p_remote);
681 cookie_maker_free(peer->p_cookie);
682
683 DPRINTF(sc, "Peer %ld destroyed\n", peer->p_id);
684 kfree(peer, M_WG);
685 }
686
687 static void
wg_peer_destroy_all(struct wg_softc * sc)688 wg_peer_destroy_all(struct wg_softc *sc)
689 {
690 struct wg_peer *peer, *tpeer;
691
692 TAILQ_FOREACH_MUTABLE(peer, &sc->sc_peers, p_entry, tpeer)
693 wg_peer_destroy(peer);
694 }
695
696 static int
wg_peer_set_sockaddr(struct wg_peer * peer,const struct sockaddr * remote)697 wg_peer_set_sockaddr(struct wg_peer *peer, const struct sockaddr *remote)
698 {
699 int ret = 0;
700
701 lockmgr(&peer->p_endpoint_lock, LK_EXCLUSIVE);
702
703 memcpy(&peer->p_endpoint.e_remote, remote,
704 sizeof(peer->p_endpoint.e_remote));
705 if (remote->sa_family == AF_INET)
706 memcpy(&peer->p_endpoint.e_remote.r_sin, remote,
707 sizeof(peer->p_endpoint.e_remote.r_sin));
708 #ifdef INET6
709 else if (remote->sa_family == AF_INET6)
710 memcpy(&peer->p_endpoint.e_remote.r_sin6, remote,
711 sizeof(peer->p_endpoint.e_remote.r_sin6));
712 #endif
713 else
714 ret = EAFNOSUPPORT;
715
716 /* No 'e_local' to clear on DragonFly. */
717
718 lockmgr(&peer->p_endpoint_lock, LK_RELEASE);
719 return (ret);
720 }
721
722 static int
wg_peer_get_sockaddr(struct wg_peer * peer,struct sockaddr * remote)723 wg_peer_get_sockaddr(struct wg_peer *peer, struct sockaddr *remote)
724 {
725 int ret = ENOENT;
726
727 lockmgr(&peer->p_endpoint_lock, LK_SHARED);
728 if (peer->p_endpoint.e_remote.r_sa.sa_family != AF_UNSPEC) {
729 memcpy(remote, &peer->p_endpoint.e_remote,
730 sizeof(peer->p_endpoint.e_remote));
731 ret = 0;
732 }
733 lockmgr(&peer->p_endpoint_lock, LK_RELEASE);
734 return (ret);
735 }
736
737 static void
wg_peer_set_endpoint(struct wg_peer * peer,const struct wg_endpoint * e)738 wg_peer_set_endpoint(struct wg_peer *peer, const struct wg_endpoint *e)
739 {
740 KKASSERT(e->e_remote.r_sa.sa_family != AF_UNSPEC);
741
742 if (__predict_true(memcmp(e, &peer->p_endpoint, sizeof(*e)) == 0))
743 return;
744
745 lockmgr(&peer->p_endpoint_lock, LK_EXCLUSIVE);
746 peer->p_endpoint = *e;
747 lockmgr(&peer->p_endpoint_lock, LK_RELEASE);
748 }
749
750 static void
wg_peer_get_endpoint(struct wg_peer * peer,struct wg_endpoint * e)751 wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *e)
752 {
753 if (__predict_true(memcmp(e, &peer->p_endpoint, sizeof(*e)) == 0))
754 return;
755
756 lockmgr(&peer->p_endpoint_lock, LK_SHARED);
757 *e = peer->p_endpoint;
758 lockmgr(&peer->p_endpoint_lock, LK_RELEASE);
759 }
760
761 /*----------------------------------------------------------------------------*/
762 /* Allowed IP */
763
764 static int
wg_aip_add(struct wg_softc * sc,struct wg_peer * peer,sa_family_t af,const void * addr,uint8_t cidr)765 wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af,
766 const void *addr, uint8_t cidr)
767 {
768 struct radix_node_head *head;
769 struct radix_node *node;
770 struct wg_aip *aip;
771 int ret = 0;
772
773 aip = kmalloc(sizeof(*aip), M_WG, M_WAITOK | M_ZERO);
774 aip->a_peer = peer;
775 aip->a_af = af;
776
777 switch (af) {
778 case AF_INET:
779 if (cidr > 32)
780 cidr = 32;
781 head = sc->sc_aip4;
782 aip->a_addr.in = *(const struct in_addr *)addr;
783 aip->a_mask.ip =
784 htonl(~((1LL << (32 - cidr)) - 1) & 0xffffffff);
785 aip->a_addr.ip &= aip->a_mask.ip;
786 aip->a_addr.length = aip->a_mask.length =
787 offsetof(struct aip_addr, in) + sizeof(struct in_addr);
788 break;
789 #ifdef INET6
790 case AF_INET6:
791 if (cidr > 128)
792 cidr = 128;
793 head = sc->sc_aip6;
794 aip->a_addr.in6 = *(const struct in6_addr *)addr;
795 in6_prefixlen2mask(&aip->a_mask.in6, cidr);
796 aip->a_addr.ip6[0] &= aip->a_mask.ip6[0];
797 aip->a_addr.ip6[1] &= aip->a_mask.ip6[1];
798 aip->a_addr.ip6[2] &= aip->a_mask.ip6[2];
799 aip->a_addr.ip6[3] &= aip->a_mask.ip6[3];
800 aip->a_addr.length = aip->a_mask.length =
801 offsetof(struct aip_addr, in6) + sizeof(struct in6_addr);
802 break;
803 #endif
804 default:
805 kfree(aip, M_WG);
806 return (EAFNOSUPPORT);
807 }
808
809 lockmgr(&sc->sc_aip_lock, LK_EXCLUSIVE);
810 node = head->rnh_addaddr(&aip->a_addr, &aip->a_mask, head,
811 aip->a_nodes);
812 if (node != NULL) {
813 KKASSERT(node == aip->a_nodes);
814 LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry);
815 peer->p_aips_num++;
816 } else {
817 /*
818 * Two possibilities:
819 * - out of memory failure
820 * - entry already exists
821 */
822 node = head->rnh_lookup(&aip->a_addr, &aip->a_mask, head);
823 if (node == NULL) {
824 kfree(aip, M_WG);
825 ret = ENOMEM;
826 } else {
827 KKASSERT(node != aip->a_nodes);
828 kfree(aip, M_WG);
829 aip = (struct wg_aip *)node;
830 if (aip->a_peer != peer) {
831 /* Replace the peer. */
832 LIST_REMOVE(aip, a_entry);
833 aip->a_peer->p_aips_num--;
834 aip->a_peer = peer;
835 LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry);
836 aip->a_peer->p_aips_num++;
837 }
838 }
839 }
840 lockmgr(&sc->sc_aip_lock, LK_RELEASE);
841
842 return (ret);
843 }
844
845 static struct wg_peer *
wg_aip_lookup(struct wg_softc * sc,sa_family_t af,const void * a)846 wg_aip_lookup(struct wg_softc *sc, sa_family_t af, const void *a)
847 {
848 struct radix_node_head *head;
849 struct radix_node *node;
850 struct wg_peer *peer;
851 struct aip_addr addr;
852
853 switch (af) {
854 case AF_INET:
855 head = sc->sc_aip4;
856 memcpy(&addr.in, a, sizeof(addr.in));
857 addr.length = offsetof(struct aip_addr, in) + sizeof(addr.in);
858 break;
859 case AF_INET6:
860 head = sc->sc_aip6;
861 memcpy(&addr.in6, a, sizeof(addr.in6));
862 addr.length = offsetof(struct aip_addr, in6) + sizeof(addr.in6);
863 break;
864 default:
865 return (NULL);
866 }
867
868 lockmgr(&sc->sc_aip_lock, LK_SHARED);
869 node = head->rnh_matchaddr(&addr, head);
870 if (node != NULL) {
871 peer = ((struct wg_aip *)node)->a_peer;
872 noise_remote_ref(peer->p_remote);
873 } else {
874 peer = NULL;
875 }
876 lockmgr(&sc->sc_aip_lock, LK_RELEASE);
877
878 return (peer);
879 }
880
881 static void
wg_aip_remove_all(struct wg_softc * sc,struct wg_peer * peer)882 wg_aip_remove_all(struct wg_softc *sc, struct wg_peer *peer)
883 {
884 struct radix_node_head *head;
885 struct radix_node *node;
886 struct wg_aip *aip, *taip;
887
888 lockmgr(&sc->sc_aip_lock, LK_EXCLUSIVE);
889
890 LIST_FOREACH_MUTABLE(aip, &peer->p_aips, a_entry, taip) {
891 switch (aip->a_af) {
892 case AF_INET:
893 head = sc->sc_aip4;
894 break;
895 case AF_INET6:
896 head = sc->sc_aip6;
897 break;
898 default:
899 panic("%s: impossible aip %p", __func__, aip);
900 }
901 node = head->rnh_deladdr(&aip->a_addr, &aip->a_mask, head);
902 if (node == NULL)
903 panic("%s: failed to delete aip %p", __func__, aip);
904 LIST_REMOVE(aip, a_entry);
905 peer->p_aips_num--;
906 kfree(aip, M_WG);
907 }
908
909 if (!LIST_EMPTY(&peer->p_aips) || peer->p_aips_num != 0)
910 panic("%s: could not delete all aips for peer %ld",
911 __func__, peer->p_id);
912
913 lockmgr(&sc->sc_aip_lock, LK_RELEASE);
914 }
915
916 /*----------------------------------------------------------------------------*/
917 /* Socket */
918
919 static int wg_socket_open(struct socket **, sa_family_t, in_port_t *,
920 void *);
921 static int wg_socket_set_sockopt(struct socket *, struct socket *,
922 int, void *, size_t);
923
924 static int
wg_socket_init(struct wg_softc * sc,in_port_t port)925 wg_socket_init(struct wg_softc *sc, in_port_t port)
926 {
927 struct wg_socket *so = &sc->sc_socket;
928 struct socket *so4 = NULL, *so6 = NULL;
929 in_port_t bound_port = port;
930 uint32_t cookie;
931 int ret;
932
933 /*
934 * When a host or a jail doesn't support the AF, sobind() would
935 * return EADDRNOTAVAIL. Handle this case in order to support such
936 * IPv4-only or IPv6-only environments.
937 *
938 * However, in a dual-stack environment, both IPv4 and IPv6 sockets
939 * must bind the same port.
940 */
941 ret = wg_socket_open(&so4, AF_INET, &bound_port, sc);
942 if (ret != 0 && ret != EADDRNOTAVAIL)
943 goto error;
944
945 #ifdef INET6
946 ret = wg_socket_open(&so6, AF_INET6, &bound_port, sc);
947 if (ret != 0 && ret != EADDRNOTAVAIL)
948 goto error;
949 #endif
950
951 if (so4 == NULL && so6 == NULL) {
952 ret = EAFNOSUPPORT;
953 goto error;
954 }
955
956 cookie = so->so_user_cookie;
957 if (cookie != 0) {
958 ret = wg_socket_set_sockopt(so4, so6, SO_USER_COOKIE,
959 &cookie, sizeof(cookie));
960 if (ret != 0)
961 goto error;
962 }
963
964 KKASSERT(lockstatus(&sc->sc_lock, curthread) == LK_EXCLUSIVE);
965
966 lockinit(&so->so_lock, "wg socket lock", 0, 0);
967
968 if (so->so_so4 != NULL)
969 soclose(so->so_so4, 0);
970 if (so->so_so6 != NULL)
971 soclose(so->so_so6, 0);
972 so->so_so4 = so4;
973 so->so_so6 = so6;
974 so->so_port = bound_port;
975
976 return (0);
977
978 error:
979 if (so4 != NULL)
980 soclose(so4, 0);
981 if (so6 != NULL)
982 soclose(so6, 0);
983 return (ret);
984 }
985
986 static int
wg_socket_open(struct socket ** so,sa_family_t af,in_port_t * port,void * upcall_arg)987 wg_socket_open(struct socket **so, sa_family_t af, in_port_t *port,
988 void *upcall_arg)
989 {
990 struct sockaddr_in sin;
991 #ifdef INET6
992 struct sockaddr_in6 sin6;
993 #endif
994 struct sockaddr *sa, *bound_sa;
995 int ret;
996
997 if (af == AF_INET) {
998 bzero(&sin, sizeof(sin));
999 sin.sin_len = sizeof(struct sockaddr_in);
1000 sin.sin_family = AF_INET;
1001 sin.sin_port = htons(*port);
1002 sa = sintosa(&sin);
1003 #ifdef INET6
1004 } else if (af == AF_INET6) {
1005 bzero(&sin6, sizeof(sin6));
1006 sin6.sin6_len = sizeof(struct sockaddr_in6);
1007 sin6.sin6_family = AF_INET6;
1008 sin6.sin6_port = htons(*port);
1009 sa = sintosa(&sin6);
1010 #endif
1011 } else {
1012 return (EAFNOSUPPORT);
1013 }
1014
1015 ret = socreate(af, so, SOCK_DGRAM, IPPROTO_UDP, curthread);
1016 if (ret != 0)
1017 return (ret);
1018
1019 (*so)->so_upcall = wg_upcall;
1020 (*so)->so_upcallarg = upcall_arg;
1021 atomic_set_int(&(*so)->so_rcv.ssb_flags, SSB_UPCALL);
1022
1023 ret = sobind(*so, sa, curthread);
1024 if (ret != 0)
1025 goto error;
1026
1027 if (*port == 0) {
1028 ret = so_pru_sockaddr(*so, &bound_sa);
1029 if (ret != 0)
1030 goto error;
1031 if (bound_sa->sa_family == AF_INET)
1032 *port = ntohs(satosin(bound_sa)->sin_port);
1033 else
1034 *port = ntohs(satosin6(bound_sa)->sin6_port);
1035 kfree(bound_sa, M_SONAME);
1036 }
1037
1038 return (0);
1039
1040 error:
1041 if (*so != NULL) {
1042 soclose(*so, 0);
1043 *so = NULL;
1044 }
1045 return (ret);
1046 }
1047
1048 static void
wg_socket_uninit(struct wg_softc * sc)1049 wg_socket_uninit(struct wg_softc *sc)
1050 {
1051 struct wg_socket *so = &sc->sc_socket;
1052
1053 KKASSERT(lockstatus(&sc->sc_lock, curthread) == LK_EXCLUSIVE);
1054
1055 lockmgr(&so->so_lock, LK_EXCLUSIVE);
1056
1057 if (so->so_so4 != NULL) {
1058 soclose(so->so_so4, 0);
1059 so->so_so4 = NULL;
1060 }
1061 if (so->so_so6 != NULL) {
1062 soclose(so->so_so6, 0);
1063 so->so_so6 = NULL;
1064 }
1065
1066 lockmgr(&so->so_lock, LK_RELEASE);
1067 lockuninit(&so->so_lock);
1068 }
1069
1070 static int
wg_socket_set_sockopt(struct socket * so4,struct socket * so6,int name,void * val,size_t len)1071 wg_socket_set_sockopt(struct socket *so4, struct socket *so6,
1072 int name, void *val, size_t len)
1073 {
1074 struct sockopt sopt = {
1075 .sopt_dir = SOPT_SET,
1076 .sopt_level = SOL_SOCKET,
1077 .sopt_name = name,
1078 .sopt_val = val,
1079 .sopt_valsize = len,
1080 };
1081 int ret;
1082
1083 if (so4 != NULL) {
1084 ret = sosetopt(so4, &sopt);
1085 if (ret != 0)
1086 return (ret);
1087 }
1088 if (so6 != NULL) {
1089 ret = sosetopt(so6, &sopt);
1090 if (ret != 0)
1091 return (ret);
1092 }
1093
1094 return (0);
1095 }
1096
1097 static int
wg_socket_set_cookie(struct wg_softc * sc,uint32_t user_cookie)1098 wg_socket_set_cookie(struct wg_softc *sc, uint32_t user_cookie)
1099 {
1100 struct wg_socket *so;
1101 int ret;
1102
1103 KKASSERT(lockstatus(&sc->sc_lock, curthread) == LK_EXCLUSIVE);
1104
1105 so = &sc->sc_socket;
1106 lockmgr(&so->so_lock, LK_EXCLUSIVE);
1107
1108 ret = wg_socket_set_sockopt(so->so_so4, so->so_so6, SO_USER_COOKIE,
1109 &user_cookie, sizeof(user_cookie));
1110 if (ret == 0)
1111 so->so_user_cookie = user_cookie;
1112
1113 lockmgr(&so->so_lock, LK_RELEASE);
1114 return (ret);
1115 }
1116
1117 static int
wg_send(struct wg_softc * sc,struct wg_endpoint * e,struct mbuf * m)1118 wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m)
1119 {
1120 struct wg_socket *so;
1121 struct sockaddr *sa;
1122 int len, ret;
1123
1124 so = &sc->sc_socket;
1125 sa = &e->e_remote.r_sa;
1126 len = m->m_pkthdr.len;
1127 ret = 0;
1128
1129 /*
1130 * NOTE: DragonFly by default sends UDP packets asynchronously,
1131 * unless the 'net.inet.udp.sosend_async' sysctl MIB is set
1132 * to 0 or the 'MSG_SYNC' flag is set for so_pru_sosend().
1133 * And in the async mode, an error code cannot really be
1134 * replied to the caller. So so_pru_sosend() may return 0
1135 * even if the packet fails to send.
1136 */
1137 lockmgr(&so->so_lock, LK_SHARED);
1138 if (sa->sa_family == AF_INET && so->so_so4 != NULL) {
1139 ret = so_pru_sosend(so->so_so4, sa, NULL /* uio */,
1140 m, NULL /* control */, 0 /* flags */,
1141 curthread);
1142 #ifdef INET6
1143 } else if (sa->sa_family == AF_INET6 && so->so_so6 != NULL) {
1144 ret = so_pru_sosend(so->so_so6, sa, NULL /* uio */,
1145 m, NULL /* control */, 0 /* flags */,
1146 curthread);
1147 #endif
1148 } else {
1149 ret = ENOTCONN;
1150 m_freem(m);
1151 }
1152 lockmgr(&so->so_lock, LK_RELEASE);
1153
1154 if (ret == 0) {
1155 IFNET_STAT_INC(sc->sc_ifp, opackets, 1);
1156 IFNET_STAT_INC(sc->sc_ifp, obytes, len);
1157 } else {
1158 IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
1159 }
1160
1161 return (ret);
1162 }
1163
1164 static void
wg_send_buf(struct wg_softc * sc,struct wg_endpoint * e,const void * buf,size_t len)1165 wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, const void *buf,
1166 size_t len)
1167 {
1168 struct mbuf *m;
1169 int ret;
1170
1171 /*
1172 * This function only sends handshake packets of known lengths that
1173 * are <= MHLEN, so it's safe to just use m_gethdr() and memcpy().
1174 */
1175 KKASSERT(len <= MHLEN);
1176
1177 m = m_gethdr(M_NOWAIT, MT_DATA);
1178 if (m == NULL) {
1179 DPRINTF(sc, "Unable to allocate mbuf\n");
1180 return;
1181 }
1182
1183 /* Just plain copy as it's a single mbuf. */
1184 memcpy(mtod(m, void *), buf, len);
1185 m->m_pkthdr.len = m->m_len = len;
1186
1187 /* Give high priority to the handshake packets. */
1188 m->m_flags |= M_PRIO;
1189
1190 ret = wg_send(sc, e, m);
1191 if (ret != 0)
1192 DPRINTF(sc, "Unable to send packet: %d\n", ret);
1193 }
1194
1195 /*----------------------------------------------------------------------------*/
1196 /*
1197 * Timers
1198 *
1199 * These functions handle the timeout callbacks for a WireGuard session, and
1200 * provide an "event-based" model for controlling WireGuard session timers.
1201 */
1202
1203 static void wg_timers_run_send_initiation(struct wg_peer *, bool);
1204 static void wg_timers_run_retry_handshake(void *);
1205 static void wg_timers_run_send_keepalive(void *);
1206 static void wg_timers_run_new_handshake(void *);
1207 static void wg_timers_run_zero_key_material(void *);
1208 static void wg_timers_run_persistent_keepalive(void *);
1209
1210 static void
wg_timers_enable(struct wg_peer * peer)1211 wg_timers_enable(struct wg_peer *peer)
1212 {
1213 atomic_store_bool(&peer->p_enabled, true);
1214 wg_timers_run_persistent_keepalive(peer);
1215 }
1216
1217 static void
wg_timers_disable(struct wg_peer * peer)1218 wg_timers_disable(struct wg_peer *peer)
1219 {
1220 atomic_store_bool(&peer->p_enabled, false);
1221 atomic_store_bool(&peer->p_need_another_keepalive, false);
1222
1223 /* Cancel the callouts and wait for them to complete. */
1224 callout_drain(&peer->p_new_handshake);
1225 callout_drain(&peer->p_send_keepalive);
1226 callout_drain(&peer->p_retry_handshake);
1227 callout_drain(&peer->p_persistent_keepalive);
1228 callout_drain(&peer->p_zero_key_material);
1229 }
1230
1231 static void
wg_timers_set_persistent_keepalive(struct wg_peer * peer,uint16_t interval)1232 wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval)
1233 {
1234 atomic_store_16(&peer->p_persistent_keepalive_interval, interval);
1235 if (atomic_load_bool(&peer->p_enabled))
1236 wg_timers_run_persistent_keepalive(peer);
1237 }
1238
1239 static bool
wg_timers_get_persistent_keepalive(struct wg_peer * peer,uint16_t * interval)1240 wg_timers_get_persistent_keepalive(struct wg_peer *peer, uint16_t *interval)
1241 {
1242 *interval = atomic_load_16(&peer->p_persistent_keepalive_interval);
1243 return (*interval > 0);
1244 }
1245
1246 static void
wg_timers_get_last_handshake(struct wg_peer * peer,struct timespec * time)1247 wg_timers_get_last_handshake(struct wg_peer *peer, struct timespec *time)
1248 {
1249 lockmgr(&peer->p_handshake_mtx, LK_EXCLUSIVE);
1250 *time = peer->p_handshake_complete;
1251 lockmgr(&peer->p_handshake_mtx, LK_RELEASE);
1252 }
1253
1254 /*
1255 * Should be called after an authenticated data packet is sent.
1256 */
1257 static void
wg_timers_event_data_sent(struct wg_peer * peer)1258 wg_timers_event_data_sent(struct wg_peer *peer)
1259 {
1260 int ticks;
1261
1262 if (atomic_load_bool(&peer->p_enabled) &&
1263 !callout_pending(&peer->p_new_handshake)) {
1264 ticks = NEW_HANDSHAKE_TIMEOUT * hz +
1265 REKEY_TIMEOUT_JITTER * hz / 1000;
1266 callout_reset(&peer->p_new_handshake, ticks,
1267 wg_timers_run_new_handshake, peer);
1268 }
1269 }
1270
1271 /*
1272 * Should be called after an authenticated data packet is received.
1273 */
1274 static void
wg_timers_event_data_received(struct wg_peer * peer)1275 wg_timers_event_data_received(struct wg_peer *peer)
1276 {
1277 if (atomic_load_bool(&peer->p_enabled)) {
1278 if (!callout_pending(&peer->p_send_keepalive)) {
1279 callout_reset(&peer->p_send_keepalive,
1280 KEEPALIVE_TIMEOUT * hz,
1281 wg_timers_run_send_keepalive, peer);
1282 } else {
1283 atomic_store_bool(&peer->p_need_another_keepalive,
1284 true);
1285 }
1286 }
1287 }
1288
1289 /*
1290 * Should be called before any type of authenticated packet is to be sent,
1291 * whether keepalive, data, or handshake.
1292 */
1293 static void
wg_timers_event_any_authenticated_packet_sent(struct wg_peer * peer)1294 wg_timers_event_any_authenticated_packet_sent(struct wg_peer *peer)
1295 {
1296 callout_stop(&peer->p_send_keepalive);
1297 }
1298
1299 /*
1300 * Should be called after any type of authenticated packet is received,
1301 * whether keepalive, data, or handshake.
1302 */
1303 static void
wg_timers_event_any_authenticated_packet_received(struct wg_peer * peer)1304 wg_timers_event_any_authenticated_packet_received(struct wg_peer *peer)
1305 {
1306 callout_stop(&peer->p_new_handshake);
1307 }
1308
1309 /*
1310 * Should be called before a packet with authentication (whether keepalive,
1311 * data, or handshakem) is sent, or after one is received.
1312 */
1313 static void
wg_timers_event_any_authenticated_packet_traversal(struct wg_peer * peer)1314 wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer)
1315 {
1316 uint16_t interval;
1317
1318 interval = atomic_load_16(&peer->p_persistent_keepalive_interval);
1319 if (atomic_load_bool(&peer->p_enabled) && interval > 0) {
1320 callout_reset(&peer->p_persistent_keepalive, interval * hz,
1321 wg_timers_run_persistent_keepalive, peer);
1322 }
1323 }
1324
1325 /*
1326 * Should be called after a handshake initiation message is sent.
1327 */
1328 static void
wg_timers_event_handshake_initiated(struct wg_peer * peer)1329 wg_timers_event_handshake_initiated(struct wg_peer *peer)
1330 {
1331 int ticks;
1332
1333 if (atomic_load_bool(&peer->p_enabled)) {
1334 ticks = REKEY_TIMEOUT * hz + REKEY_TIMEOUT_JITTER * hz / 1000;
1335 callout_reset(&peer->p_retry_handshake, ticks,
1336 wg_timers_run_retry_handshake, peer);
1337 }
1338 }
1339
1340 /*
1341 * Should be called after a handshake response message is received and
1342 * processed, or when getting key confirmation via the first data message.
1343 */
1344 static void
wg_timers_event_handshake_complete(struct wg_peer * peer)1345 wg_timers_event_handshake_complete(struct wg_peer *peer)
1346 {
1347 if (atomic_load_bool(&peer->p_enabled)) {
1348 lockmgr(&peer->p_handshake_mtx, LK_EXCLUSIVE);
1349 callout_stop(&peer->p_retry_handshake);
1350 peer->p_handshake_retries = 0;
1351 getnanotime(&peer->p_handshake_complete);
1352 lockmgr(&peer->p_handshake_mtx, LK_RELEASE);
1353
1354 wg_timers_run_send_keepalive(peer);
1355 }
1356 }
1357
1358 /*
1359 * Should be called after an ephemeral key is created, which is before sending
1360 * a handshake response or after receiving a handshake response.
1361 */
1362 static void
wg_timers_event_session_derived(struct wg_peer * peer)1363 wg_timers_event_session_derived(struct wg_peer *peer)
1364 {
1365 if (atomic_load_bool(&peer->p_enabled)) {
1366 callout_reset(&peer->p_zero_key_material,
1367 REJECT_AFTER_TIME * 3 * hz,
1368 wg_timers_run_zero_key_material, peer);
1369 }
1370 }
1371
1372 /*
1373 * Should be called after data packet sending failure, or after the old
1374 * keypairs expiring (or near expiring).
1375 */
1376 static void
wg_timers_event_want_initiation(struct wg_peer * peer)1377 wg_timers_event_want_initiation(struct wg_peer *peer)
1378 {
1379 if (atomic_load_bool(&peer->p_enabled))
1380 wg_timers_run_send_initiation(peer, false);
1381 }
1382
1383 static void
wg_timers_run_send_initiation(struct wg_peer * peer,bool is_retry)1384 wg_timers_run_send_initiation(struct wg_peer *peer, bool is_retry)
1385 {
1386 if (!is_retry)
1387 peer->p_handshake_retries = 0;
1388 if (noise_remote_initiation_expired(peer->p_remote))
1389 wg_send_initiation(peer);
1390 }
1391
1392 static void
wg_timers_run_retry_handshake(void * _peer)1393 wg_timers_run_retry_handshake(void *_peer)
1394 {
1395 struct wg_peer *peer = _peer;
1396
1397 lockmgr(&peer->p_handshake_mtx, LK_EXCLUSIVE);
1398 if (peer->p_handshake_retries <= MAX_TIMER_HANDSHAKES) {
1399 peer->p_handshake_retries++;
1400 lockmgr(&peer->p_handshake_mtx, LK_RELEASE);
1401
1402 DPRINTF(peer->p_sc, "Handshake for peer %ld did not complete "
1403 "after %d seconds, retrying (try %d)\n", peer->p_id,
1404 REKEY_TIMEOUT, peer->p_handshake_retries + 1);
1405 wg_timers_run_send_initiation(peer, true);
1406 } else {
1407 lockmgr(&peer->p_handshake_mtx, LK_RELEASE);
1408
1409 DPRINTF(peer->p_sc, "Handshake for peer %ld did not complete "
1410 "after %d retries, giving up\n", peer->p_id,
1411 MAX_TIMER_HANDSHAKES + 2);
1412 callout_stop(&peer->p_send_keepalive);
1413 wg_queue_purge(&peer->p_stage_queue);
1414 if (atomic_load_bool(&peer->p_enabled) &&
1415 !callout_pending(&peer->p_zero_key_material)) {
1416 callout_reset(&peer->p_zero_key_material,
1417 REJECT_AFTER_TIME * 3 * hz,
1418 wg_timers_run_zero_key_material, peer);
1419 }
1420 }
1421 }
1422
1423 static void
wg_timers_run_send_keepalive(void * _peer)1424 wg_timers_run_send_keepalive(void *_peer)
1425 {
1426 struct wg_peer *peer = _peer;
1427
1428 wg_send_keepalive(peer);
1429
1430 if (atomic_load_bool(&peer->p_enabled) &&
1431 atomic_load_bool(&peer->p_need_another_keepalive)) {
1432 atomic_store_bool(&peer->p_need_another_keepalive, false);
1433 callout_reset(&peer->p_send_keepalive, KEEPALIVE_TIMEOUT * hz,
1434 wg_timers_run_send_keepalive, peer);
1435 }
1436 }
1437
1438 static void
wg_timers_run_persistent_keepalive(void * _peer)1439 wg_timers_run_persistent_keepalive(void *_peer)
1440 {
1441 struct wg_peer *peer = _peer;
1442
1443 if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0)
1444 wg_send_keepalive(peer);
1445 }
1446
1447 static void
wg_timers_run_new_handshake(void * _peer)1448 wg_timers_run_new_handshake(void *_peer)
1449 {
1450 struct wg_peer *peer = _peer;
1451
1452 DPRINTF(peer->p_sc, "Retrying handshake with peer %ld, "
1453 "because we stopped hearing back after %d seconds\n",
1454 peer->p_id, NEW_HANDSHAKE_TIMEOUT);
1455 wg_timers_run_send_initiation(peer, false);
1456 }
1457
1458 static void
wg_timers_run_zero_key_material(void * _peer)1459 wg_timers_run_zero_key_material(void *_peer)
1460 {
1461 struct wg_peer *peer = _peer;
1462
1463 DPRINTF(peer->p_sc, "Zeroing out keys for peer %ld, "
1464 "since we haven't received a new one in %d seconds\n",
1465 peer->p_id, REJECT_AFTER_TIME * 3);
1466 noise_remote_keypairs_clear(peer->p_remote);
1467 }
1468
1469 /*----------------------------------------------------------------------------*/
1470 /* Handshake */
1471
1472 static void
wg_peer_send_buf(struct wg_peer * peer,const void * buf,size_t len)1473 wg_peer_send_buf(struct wg_peer *peer, const void *buf, size_t len)
1474 {
1475 struct wg_endpoint endpoint;
1476
1477 peer->p_tx_bytes[mycpuid] += len;
1478
1479 wg_timers_event_any_authenticated_packet_traversal(peer);
1480 wg_timers_event_any_authenticated_packet_sent(peer);
1481
1482 wg_peer_get_endpoint(peer, &endpoint);
1483 wg_send_buf(peer->p_sc, &endpoint, buf, len);
1484 }
1485
1486 static void
wg_send_initiation(struct wg_peer * peer)1487 wg_send_initiation(struct wg_peer *peer)
1488 {
1489 struct wg_pkt_initiation pkt;
1490
1491 if (!noise_create_initiation(peer->p_remote, &pkt.s_idx, pkt.ue,
1492 pkt.es, pkt.ets))
1493 return;
1494
1495 DPRINTF(peer->p_sc, "Sending handshake initiation to peer %ld\n",
1496 peer->p_id);
1497
1498 pkt.t = WG_PKT_INITIATION;
1499 cookie_maker_mac(peer->p_cookie, &pkt.m, &pkt,
1500 sizeof(pkt) - sizeof(pkt.m));
1501 wg_peer_send_buf(peer, &pkt, sizeof(pkt));
1502 wg_timers_event_handshake_initiated(peer);
1503 }
1504
1505 static void
wg_send_response(struct wg_peer * peer)1506 wg_send_response(struct wg_peer *peer)
1507 {
1508 struct wg_pkt_response pkt;
1509
1510 if (!noise_create_response(peer->p_remote, &pkt.s_idx, &pkt.r_idx,
1511 pkt.ue, pkt.en))
1512 return;
1513
1514 DPRINTF(peer->p_sc, "Sending handshake response to peer %ld\n",
1515 peer->p_id);
1516
1517 wg_timers_event_session_derived(peer);
1518 pkt.t = WG_PKT_RESPONSE;
1519 cookie_maker_mac(peer->p_cookie, &pkt.m, &pkt,
1520 sizeof(pkt) - sizeof(pkt.m));
1521 wg_peer_send_buf(peer, &pkt, sizeof(pkt));
1522 }
1523
1524 static void
wg_send_cookie(struct wg_softc * sc,struct cookie_macs * cm,uint32_t idx,struct wg_endpoint * e)1525 wg_send_cookie(struct wg_softc *sc, struct cookie_macs *cm, uint32_t idx,
1526 struct wg_endpoint *e)
1527 {
1528 struct wg_pkt_cookie pkt;
1529
1530 DPRINTF(sc, "Sending cookie response for denied handshake message\n");
1531
1532 pkt.t = WG_PKT_COOKIE;
1533 pkt.r_idx = idx;
1534
1535 cookie_checker_create_payload(sc->sc_cookie, cm, pkt.nonce,
1536 pkt.ec, &e->e_remote.r_sa);
1537 wg_send_buf(sc, e, &pkt, sizeof(pkt));
1538 }
1539
1540 static void
wg_send_keepalive(struct wg_peer * peer)1541 wg_send_keepalive(struct wg_peer *peer)
1542 {
1543 struct wg_packet *pkt;
1544 struct mbuf *m;
1545
1546 if (wg_queue_len(&peer->p_stage_queue) > 0)
1547 goto send;
1548 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1549 return;
1550 if ((pkt = wg_packet_alloc(m)) == NULL) {
1551 m_freem(m);
1552 return;
1553 }
1554
1555 wg_queue_push_staged(&peer->p_stage_queue, pkt);
1556 DPRINTF(peer->p_sc, "Sending keepalive packet to peer %ld\n",
1557 peer->p_id);
1558 send:
1559 wg_peer_send_staged(peer);
1560 }
1561
1562 static bool
wg_is_underload(struct wg_softc * sc)1563 wg_is_underload(struct wg_softc *sc)
1564 {
1565 /*
1566 * This is global, so that the load calculation applies to the
1567 * whole system. Don't care about races with it at all.
1568 */
1569 static struct timespec last_underload; /* nanouptime */
1570 struct timespec now;
1571 bool underload;
1572
1573 underload = (wg_queue_len(&sc->sc_handshake_queue) >=
1574 MAX_QUEUED_HANDSHAKES / 8);
1575 if (underload) {
1576 getnanouptime(&last_underload);
1577 } else if (timespecisset(&last_underload)) {
1578 getnanouptime(&now);
1579 now.tv_sec -= UNDERLOAD_TIMEOUT;
1580 underload = timespeccmp(&last_underload, &now, >);
1581 if (!underload)
1582 timespecclear(&last_underload);
1583 }
1584
1585 return (underload);
1586 }
1587
1588 static void
wg_handshake(struct wg_softc * sc,struct wg_packet * pkt)1589 wg_handshake(struct wg_softc *sc, struct wg_packet *pkt)
1590 {
1591 struct wg_pkt_initiation *init;
1592 struct wg_pkt_response *resp;
1593 struct wg_pkt_cookie *cook;
1594 struct wg_endpoint *e;
1595 struct wg_peer *peer;
1596 struct mbuf *m;
1597 struct noise_remote *remote = NULL;
1598 bool underload;
1599 int ret;
1600
1601 pkt->p_mbuf = m_pullup(pkt->p_mbuf, pkt->p_mbuf->m_pkthdr.len);
1602 if (pkt->p_mbuf == NULL)
1603 goto error;
1604
1605 underload = wg_is_underload(sc);
1606 m = pkt->p_mbuf;
1607 e = &pkt->p_endpoint;
1608
1609 switch (*mtod(m, uint32_t *)) {
1610 case WG_PKT_INITIATION:
1611 init = mtod(m, struct wg_pkt_initiation *);
1612
1613 ret = cookie_checker_validate_macs(sc->sc_cookie, &init->m,
1614 init, sizeof(*init) - sizeof(init->m), underload,
1615 &e->e_remote.r_sa);
1616 if (ret != 0) {
1617 switch (ret) {
1618 case EINVAL:
1619 DPRINTF(sc, "Invalid initiation MAC\n");
1620 break;
1621 case ECONNREFUSED:
1622 DPRINTF(sc, "Handshake ratelimited\n");
1623 break;
1624 case EAGAIN:
1625 wg_send_cookie(sc, &init->m, init->s_idx, e);
1626 break;
1627 default:
1628 /*
1629 * cookie_checker_validate_macs() seems could
1630 * return EAFNOSUPPORT, but that is actually
1631 * impossible, because packets of unsupported
1632 * AF have been already dropped.
1633 */
1634 panic("%s: unexpected return: %d",
1635 __func__, ret);
1636 }
1637 goto error;
1638 }
1639
1640 remote = noise_consume_initiation(sc->sc_local, init->s_idx,
1641 init->ue, init->es,
1642 init->ets);
1643 if (remote == NULL) {
1644 DPRINTF(sc, "Invalid handshake initiation\n");
1645 goto error;
1646 }
1647
1648 peer = noise_remote_arg(remote);
1649 DPRINTF(sc, "Receiving handshake initiation from peer %ld\n",
1650 peer->p_id);
1651
1652 wg_peer_set_endpoint(peer, e);
1653 wg_send_response(peer);
1654 break;
1655
1656 case WG_PKT_RESPONSE:
1657 resp = mtod(m, struct wg_pkt_response *);
1658
1659 ret = cookie_checker_validate_macs(sc->sc_cookie, &resp->m,
1660 resp, sizeof(*resp) - sizeof(resp->m), underload,
1661 &e->e_remote.r_sa);
1662 if (ret != 0) {
1663 switch (ret) {
1664 case EINVAL:
1665 DPRINTF(sc, "Invalid response MAC\n");
1666 break;
1667 case ECONNREFUSED:
1668 DPRINTF(sc, "Handshake ratelimited\n");
1669 break;
1670 case EAGAIN:
1671 wg_send_cookie(sc, &resp->m, resp->s_idx, e);
1672 break;
1673 default:
1674 /* See also the comment above. */
1675 panic("%s: unexpected return: %d",
1676 __func__, ret);
1677 }
1678 goto error;
1679 }
1680
1681 remote = noise_consume_response(sc->sc_local, resp->s_idx,
1682 resp->r_idx, resp->ue,
1683 resp->en);
1684 if (remote == NULL) {
1685 DPRINTF(sc, "Invalid handshake response\n");
1686 goto error;
1687 }
1688
1689 peer = noise_remote_arg(remote);
1690 DPRINTF(sc, "Receiving handshake response from peer %ld\n",
1691 peer->p_id);
1692
1693 wg_peer_set_endpoint(peer, e);
1694 wg_timers_event_session_derived(peer);
1695 wg_timers_event_handshake_complete(peer);
1696 break;
1697
1698 case WG_PKT_COOKIE:
1699 cook = mtod(m, struct wg_pkt_cookie *);
1700
1701 /*
1702 * A cookie message can be a reply to an initiation message
1703 * or to a response message. In the latter case, the noise
1704 * index has been transformed from a remote entry to a
1705 * keypair entry. Therefore, we need to lookup the index
1706 * for both remote and keypair entries.
1707 */
1708 remote = noise_remote_index(sc->sc_local, cook->r_idx);
1709 if (remote == NULL) {
1710 DPRINTF(sc, "Unknown cookie index\n");
1711 goto error;
1712 }
1713
1714 peer = noise_remote_arg(remote);
1715 if (cookie_maker_consume_payload(peer->p_cookie, cook->nonce,
1716 cook->ec) == 0) {
1717 DPRINTF(sc, "Receiving cookie response\n");
1718 } else {
1719 DPRINTF(sc, "Could not decrypt cookie response\n");
1720 goto error;
1721 }
1722
1723 goto not_authenticated;
1724
1725 default:
1726 panic("%s: invalid packet in handshake queue", __func__);
1727 }
1728
1729 wg_timers_event_any_authenticated_packet_received(peer);
1730 wg_timers_event_any_authenticated_packet_traversal(peer);
1731
1732 not_authenticated:
1733 IFNET_STAT_INC(sc->sc_ifp, ipackets, 1);
1734 IFNET_STAT_INC(sc->sc_ifp, ibytes, m->m_pkthdr.len);
1735 peer->p_rx_bytes[mycpuid] += m->m_pkthdr.len;
1736 noise_remote_put(remote);
1737 wg_packet_free(pkt);
1738
1739 return;
1740
1741 error:
1742 IFNET_STAT_INC(sc->sc_ifp, ierrors, 1);
1743 if (remote != NULL)
1744 noise_remote_put(remote);
1745 wg_packet_free(pkt);
1746 }
1747
1748 static void
wg_handshake_worker(void * arg,int pending __unused)1749 wg_handshake_worker(void *arg, int pending __unused)
1750 {
1751 struct wg_softc *sc = arg;
1752 struct wg_queue *queue = &sc->sc_handshake_queue;
1753 struct wg_packet *pkt;
1754
1755 while ((pkt = wg_queue_dequeue_handshake(queue)) != NULL)
1756 wg_handshake(sc, pkt);
1757 }
1758
1759 /*----------------------------------------------------------------------------*/
1760 /* Transport Packet Functions */
1761
1762 static inline void
wg_bpf_ptap(struct ifnet * ifp,struct mbuf * m,sa_family_t af)1763 wg_bpf_ptap(struct ifnet *ifp, struct mbuf *m, sa_family_t af)
1764 {
1765 uint32_t bpf_af;
1766
1767 if (ifp->if_bpf == NULL)
1768 return;
1769
1770 bpf_gettoken();
1771 /* Double check after obtaining the token. */
1772 if (ifp->if_bpf != NULL) {
1773 /* Prepend the AF as a 4-byte field for DLT_NULL. */
1774 bpf_af = (uint32_t)af;
1775 bpf_ptap(ifp->if_bpf, m, &bpf_af, sizeof(bpf_af));
1776 }
1777 bpf_reltoken();
1778 }
1779
1780 static inline unsigned int
calculate_padding(struct wg_packet * pkt)1781 calculate_padding(struct wg_packet *pkt)
1782 {
1783 unsigned int padded_size, last_unit;
1784
1785 last_unit = pkt->p_mbuf->m_pkthdr.len;
1786
1787 /* Keepalive packets don't set p_mtu, but also have a length of zero. */
1788 if (__predict_false(pkt->p_mtu == 0))
1789 return WG_PKT_WITH_PADDING(last_unit) - last_unit;
1790
1791 /*
1792 * Just in case the packet is bigger than the MTU and would cause
1793 * the final subtraction to overflow.
1794 */
1795 if (__predict_false(last_unit > pkt->p_mtu))
1796 last_unit %= pkt->p_mtu;
1797
1798 padded_size = MIN(pkt->p_mtu, WG_PKT_WITH_PADDING(last_unit));
1799 return (padded_size - last_unit);
1800 }
1801
1802 static inline int
determine_af_and_pullup(struct mbuf ** m,sa_family_t * af)1803 determine_af_and_pullup(struct mbuf **m, sa_family_t *af)
1804 {
1805 const struct ip *ip;
1806 const struct ip6_hdr *ip6;
1807 int len;
1808
1809 ip = mtod(*m, const struct ip *);
1810 ip6 = mtod(*m, const struct ip6_hdr *);
1811 len = (*m)->m_pkthdr.len;
1812
1813 if (len >= sizeof(*ip) && ip->ip_v == IPVERSION)
1814 *af = AF_INET;
1815 #ifdef INET6
1816 else if (len >= sizeof(*ip6) &&
1817 (ip6->ip6_vfc & IPV6_VERSION_MASK) == IPV6_VERSION)
1818 *af = AF_INET6;
1819 #endif
1820 else
1821 return (EAFNOSUPPORT);
1822
1823 *m = m_pullup(*m, (*af == AF_INET ? sizeof(*ip) : sizeof(*ip6)));
1824 if (*m == NULL)
1825 return (ENOBUFS);
1826
1827 return (0);
1828 }
1829
1830 static void
wg_encrypt(struct wg_softc * sc,struct wg_packet * pkt)1831 wg_encrypt(struct wg_softc *sc, struct wg_packet *pkt)
1832 {
1833 static const uint8_t padding[WG_PKT_PADDING] = { 0 };
1834 struct wg_pkt_data *data;
1835 struct wg_peer *peer;
1836 struct noise_remote *remote;
1837 struct mbuf *m;
1838 unsigned int padlen, state = WG_PACKET_DEAD;
1839 uint32_t idx;
1840
1841 remote = noise_keypair_remote(pkt->p_keypair);
1842 peer = noise_remote_arg(remote);
1843 m = pkt->p_mbuf;
1844
1845 padlen = calculate_padding(pkt);
1846 if (padlen != 0 && !m_append(m, padlen, padding))
1847 goto out;
1848
1849 if (noise_keypair_encrypt(pkt->p_keypair, &idx, pkt->p_counter, m) != 0)
1850 goto out;
1851
1852 M_PREPEND(m, sizeof(struct wg_pkt_data), M_NOWAIT);
1853 if (m == NULL)
1854 goto out;
1855 data = mtod(m, struct wg_pkt_data *);
1856 data->t = WG_PKT_DATA;
1857 data->r_idx = idx;
1858 data->counter = htole64(pkt->p_counter);
1859
1860 state = WG_PACKET_CRYPTED;
1861
1862 out:
1863 pkt->p_mbuf = m;
1864 atomic_store_rel_int(&pkt->p_state, state);
1865 taskqueue_enqueue(peer->p_send_taskqueue, &peer->p_send_task);
1866 noise_remote_put(remote);
1867 }
1868
1869 static void
wg_decrypt(struct wg_softc * sc,struct wg_packet * pkt)1870 wg_decrypt(struct wg_softc *sc, struct wg_packet *pkt)
1871 {
1872 struct wg_peer *peer, *allowed_peer;
1873 struct noise_remote *remote;
1874 struct mbuf *m;
1875 unsigned int state = WG_PACKET_DEAD;
1876 int len;
1877
1878 remote = noise_keypair_remote(pkt->p_keypair);
1879 peer = noise_remote_arg(remote);
1880 m = pkt->p_mbuf;
1881
1882 pkt->p_counter = le64toh(mtod(m, struct wg_pkt_data *)->counter);
1883 m_adj(m, sizeof(struct wg_pkt_data));
1884
1885 if (noise_keypair_decrypt(pkt->p_keypair, pkt->p_counter, m) != 0)
1886 goto out;
1887
1888 /* A packet with a length of zero is a keepalive packet. */
1889 if (__predict_false(m->m_pkthdr.len == 0)) {
1890 DPRINTF(sc, "Receiving keepalive packet from peer %ld\n",
1891 peer->p_id);
1892 state = WG_PACKET_CRYPTED;
1893 goto out;
1894 }
1895
1896 /*
1897 * Extract the source address for wg_aip_lookup(), and trim the
1898 * packet if it was padded before encryption.
1899 */
1900 if (determine_af_and_pullup(&m, &pkt->p_af) != 0)
1901 goto out;
1902 if (pkt->p_af == AF_INET) {
1903 const struct ip *ip = mtod(m, const struct ip *);
1904 allowed_peer = wg_aip_lookup(sc, AF_INET, &ip->ip_src);
1905 len = ntohs(ip->ip_len);
1906 if (len >= sizeof(struct ip) && len < m->m_pkthdr.len)
1907 m_adj(m, len - m->m_pkthdr.len);
1908 } else {
1909 const struct ip6_hdr *ip6 = mtod(m, const struct ip6_hdr *);
1910 allowed_peer = wg_aip_lookup(sc, AF_INET6, &ip6->ip6_src);
1911 len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
1912 if (len < m->m_pkthdr.len)
1913 m_adj(m, len - m->m_pkthdr.len);
1914 }
1915
1916 /* Drop the reference, since no need to dereference it. */
1917 if (allowed_peer != NULL)
1918 noise_remote_put(allowed_peer->p_remote);
1919
1920 if (__predict_false(peer != allowed_peer)) {
1921 DPRINTF(sc, "Packet has disallowed src IP from peer %ld\n",
1922 peer->p_id);
1923 goto out;
1924 }
1925
1926 state = WG_PACKET_CRYPTED;
1927
1928 out:
1929 pkt->p_mbuf = m;
1930 atomic_store_rel_int(&pkt->p_state, state);
1931 taskqueue_enqueue(peer->p_recv_taskqueue, &peer->p_recv_task);
1932 noise_remote_put(remote);
1933 }
1934
1935 static void
wg_encrypt_worker(void * arg,int pending __unused)1936 wg_encrypt_worker(void *arg, int pending __unused)
1937 {
1938 struct wg_softc *sc = arg;
1939 struct wg_queue *queue = &sc->sc_encrypt_parallel;
1940 struct wg_packet *pkt;
1941
1942 while ((pkt = wg_queue_dequeue_parallel(queue)) != NULL)
1943 wg_encrypt(sc, pkt);
1944 }
1945
1946 static void
wg_decrypt_worker(void * arg,int pending __unused)1947 wg_decrypt_worker(void *arg, int pending __unused)
1948 {
1949 struct wg_softc *sc = arg;
1950 struct wg_queue *queue = &sc->sc_decrypt_parallel;
1951 struct wg_packet *pkt;
1952
1953 while ((pkt = wg_queue_dequeue_parallel(queue)) != NULL)
1954 wg_decrypt(sc, pkt);
1955 }
1956
1957 static void
wg_encrypt_dispatch(struct wg_softc * sc)1958 wg_encrypt_dispatch(struct wg_softc *sc)
1959 {
1960 int cpu;
1961
1962 /*
1963 * The update to encrypt_last_cpu is racy such that we may
1964 * reschedule the task for the same CPU multiple times, but
1965 * the race doesn't really matter.
1966 */
1967 cpu = (sc->sc_encrypt_last_cpu + 1) % ncpus;
1968 sc->sc_encrypt_last_cpu = cpu;
1969 taskqueue_enqueue(wg_taskqueues[cpu], &sc->sc_encrypt_tasks[cpu]);
1970 }
1971
1972 static void
wg_decrypt_dispatch(struct wg_softc * sc)1973 wg_decrypt_dispatch(struct wg_softc *sc)
1974 {
1975 int cpu;
1976
1977 cpu = (sc->sc_decrypt_last_cpu + 1) % ncpus;
1978 sc->sc_decrypt_last_cpu = cpu;
1979 taskqueue_enqueue(wg_taskqueues[cpu], &sc->sc_decrypt_tasks[cpu]);
1980 }
1981
1982 static void
wg_deliver_out(void * arg,int pending __unused)1983 wg_deliver_out(void *arg, int pending __unused)
1984 {
1985 struct wg_peer *peer = arg;
1986 struct wg_softc *sc = peer->p_sc;
1987 struct wg_queue *queue = &peer->p_encrypt_serial;
1988 struct wg_endpoint endpoint;
1989 struct wg_packet *pkt;
1990 struct mbuf *m;
1991 int len, cpu;
1992
1993 cpu = mycpuid;
1994
1995 while ((pkt = wg_queue_dequeue_serial(queue)) != NULL) {
1996 if (atomic_load_acq_int(&pkt->p_state) != WG_PACKET_CRYPTED) {
1997 IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
1998 wg_packet_free(pkt);
1999 continue;
2000 }
2001
2002 m = pkt->p_mbuf;
2003 m->m_flags &= ~MBUF_CLEARFLAGS;
2004 len = m->m_pkthdr.len;
2005
2006 pkt->p_mbuf = NULL;
2007 wg_packet_free(pkt);
2008
2009 /*
2010 * The keepalive timers -- both persistent and mandatory --
2011 * are part of the internal state machine, which needs to be
2012 * cranked whether or not the packet was actually sent.
2013 */
2014 wg_timers_event_any_authenticated_packet_traversal(peer);
2015 wg_timers_event_any_authenticated_packet_sent(peer);
2016
2017 wg_peer_get_endpoint(peer, &endpoint);
2018 if (wg_send(sc, &endpoint, m) == 0) {
2019 peer->p_tx_bytes[cpu] += len;
2020 if (len > WG_PKT_ENCRYPTED_LEN(0))
2021 wg_timers_event_data_sent(peer);
2022 if (noise_keypair_should_refresh(peer->p_remote, true))
2023 wg_timers_event_want_initiation(peer);
2024 }
2025 }
2026 }
2027
2028 static void
wg_deliver_in(void * arg,int pending __unused)2029 wg_deliver_in(void *arg, int pending __unused)
2030 {
2031 struct wg_peer *peer = arg;
2032 struct wg_softc *sc = peer->p_sc;
2033 struct wg_queue *queue = &peer->p_decrypt_serial;
2034 struct wg_packet *pkt;
2035 struct ifnet *ifp;
2036 struct mbuf *m;
2037 size_t rx_bytes;
2038 int cpu;
2039
2040 cpu = mycpuid;
2041 ifp = sc->sc_ifp;
2042
2043 while ((pkt = wg_queue_dequeue_serial(queue)) != NULL) {
2044 if (atomic_load_acq_int(&pkt->p_state) != WG_PACKET_CRYPTED ||
2045 noise_keypair_counter_check(pkt->p_keypair, pkt->p_counter)
2046 != 0) {
2047 IFNET_STAT_INC(ifp, ierrors, 1);
2048 wg_packet_free(pkt);
2049 continue;
2050 }
2051
2052 if (noise_keypair_received_with(pkt->p_keypair))
2053 wg_timers_event_handshake_complete(peer);
2054
2055 wg_timers_event_any_authenticated_packet_received(peer);
2056 wg_timers_event_any_authenticated_packet_traversal(peer);
2057 wg_peer_set_endpoint(peer, &pkt->p_endpoint);
2058
2059 m = pkt->p_mbuf;
2060 rx_bytes = WG_PKT_ENCRYPTED_LEN(m->m_pkthdr.len);
2061 peer->p_rx_bytes[cpu] += rx_bytes;
2062 IFNET_STAT_INC(ifp, ipackets, 1);
2063 IFNET_STAT_INC(ifp, ibytes, rx_bytes);
2064
2065 if (m->m_pkthdr.len > 0) {
2066 if (ifp->if_capenable & IFCAP_RXCSUM) {
2067 /*
2068 * The packet is authentic as ensured by the
2069 * AEAD tag, so we can tell the networking
2070 * stack that this packet has valid checksums
2071 * and thus is unnecessary to check again.
2072 */
2073 if (m->m_pkthdr.csum_flags & CSUM_IP)
2074 m->m_pkthdr.csum_flags |=
2075 (CSUM_IP_CHECKED | CSUM_IP_VALID);
2076 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
2077 m->m_pkthdr.csum_flags |=
2078 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2079 m->m_pkthdr.csum_data = 0xffff;
2080 }
2081 }
2082 m->m_flags &= ~MBUF_CLEARFLAGS;
2083 m->m_pkthdr.rcvif = ifp;
2084
2085 wg_bpf_ptap(ifp, m, pkt->p_af);
2086
2087 netisr_queue((pkt->p_af == AF_INET ?
2088 NETISR_IP : NETISR_IPV6), m);
2089 pkt->p_mbuf = NULL;
2090
2091 wg_timers_event_data_received(peer);
2092 }
2093
2094 wg_packet_free(pkt);
2095
2096 if (noise_keypair_should_refresh(peer->p_remote, false))
2097 wg_timers_event_want_initiation(peer);
2098 }
2099 }
2100
2101 static void
wg_input(struct wg_softc * sc,struct mbuf * m,const struct sockaddr * sa)2102 wg_input(struct wg_softc *sc, struct mbuf *m, const struct sockaddr *sa)
2103 {
2104 struct noise_remote *remote;
2105 struct wg_pkt_data *data;
2106 struct wg_packet *pkt;
2107 struct wg_peer *peer;
2108 struct mbuf *defragged;
2109
2110 /*
2111 * Defragment mbufs early on in order to:
2112 * - make the crypto a lot faster;
2113 * - make the subsequent m_pullup()'s no-ops.
2114 */
2115 defragged = m_defrag(m, M_NOWAIT);
2116 if (defragged != NULL)
2117 m = defragged; /* The original mbuf chain is freed. */
2118
2119 /* Ensure the packet is not shared before modifying it. */
2120 m = m_unshare(m, M_NOWAIT);
2121 if (m == NULL) {
2122 IFNET_STAT_INC(sc->sc_ifp, iqdrops, 1);
2123 return;
2124 }
2125
2126 /* Pullup enough to read packet type */
2127 if ((m = m_pullup(m, sizeof(uint32_t))) == NULL) {
2128 IFNET_STAT_INC(sc->sc_ifp, iqdrops, 1);
2129 return;
2130 }
2131
2132 if ((pkt = wg_packet_alloc(m)) == NULL) {
2133 IFNET_STAT_INC(sc->sc_ifp, iqdrops, 1);
2134 m_freem(m);
2135 return;
2136 }
2137
2138 /* Save the remote address and port for later use. */
2139 switch (sa->sa_family) {
2140 case AF_INET:
2141 pkt->p_endpoint.e_remote.r_sin =
2142 *(const struct sockaddr_in *)sa;
2143 break;
2144 #ifdef INET6
2145 case AF_INET6:
2146 pkt->p_endpoint.e_remote.r_sin6 =
2147 *(const struct sockaddr_in6 *)sa;
2148 break;
2149 #endif
2150 default:
2151 DPRINTF(sc, "Unsupported packet address family\n");
2152 goto error;
2153 }
2154
2155 if (WG_PKT_IS_INITIATION(m) ||
2156 WG_PKT_IS_RESPONSE(m) ||
2157 WG_PKT_IS_COOKIE(m)) {
2158 if (!wg_queue_enqueue_handshake(&sc->sc_handshake_queue, pkt)) {
2159 IFNET_STAT_INC(sc->sc_ifp, iqdrops, 1);
2160 DPRINTF(sc, "Dropping handshake packet\n");
2161 }
2162 taskqueue_enqueue(sc->sc_handshake_taskqueue,
2163 &sc->sc_handshake_task);
2164 return;
2165 }
2166
2167 if (WG_PKT_IS_DATA(m)) {
2168 /* Pullup the whole header to read r_idx below. */
2169 pkt->p_mbuf = m_pullup(m, sizeof(struct wg_pkt_data));
2170 if (pkt->p_mbuf == NULL)
2171 goto error;
2172
2173 data = mtod(pkt->p_mbuf, struct wg_pkt_data *);
2174 pkt->p_keypair = noise_keypair_lookup(sc->sc_local,
2175 data->r_idx);
2176 if (pkt->p_keypair == NULL)
2177 goto error;
2178
2179 remote = noise_keypair_remote(pkt->p_keypair);
2180 peer = noise_remote_arg(remote);
2181 if (!wg_queue_both(&sc->sc_decrypt_parallel,
2182 &peer->p_decrypt_serial, pkt))
2183 IFNET_STAT_INC(sc->sc_ifp, iqdrops, 1);
2184
2185 wg_decrypt_dispatch(sc);
2186 noise_remote_put(remote);
2187 return;
2188 }
2189
2190 error:
2191 IFNET_STAT_INC(sc->sc_ifp, ierrors, 1);
2192 wg_packet_free(pkt);
2193 }
2194
2195 static void
wg_upcall(struct socket * so,void * arg,int waitflag __unused)2196 wg_upcall(struct socket *so, void *arg, int waitflag __unused)
2197 {
2198 struct wg_softc *sc = arg;
2199 struct sockaddr *from;
2200 struct sockbuf sio;
2201 int ret, flags;
2202
2203 /*
2204 * For UDP, soreceive typically pulls just one packet,
2205 * so loop to get the whole batch.
2206 */
2207 do {
2208 sbinit(&sio, 1000000000); /* really large to receive all */
2209 flags = MSG_DONTWAIT;
2210 ret = so_pru_soreceive(so, &from, NULL, &sio, NULL, &flags);
2211 if (ret != 0 || sio.sb_mb == NULL) {
2212 if (from != NULL)
2213 kfree(from, M_SONAME);
2214 break;
2215 }
2216 wg_input(sc, sio.sb_mb, from);
2217 kfree(from, M_SONAME);
2218 } while (sio.sb_mb != NULL);
2219 }
2220
2221 static void
wg_peer_send_staged(struct wg_peer * peer)2222 wg_peer_send_staged(struct wg_peer *peer)
2223 {
2224 struct wg_softc *sc = peer->p_sc;
2225 struct wg_packet *pkt, *tpkt;
2226 struct wg_packet_list list;
2227 struct noise_keypair *keypair = NULL;
2228
2229 wg_queue_delist_staged(&peer->p_stage_queue, &list);
2230
2231 if (STAILQ_EMPTY(&list))
2232 return;
2233
2234 if ((keypair = noise_keypair_current(peer->p_remote)) == NULL)
2235 goto error;
2236
2237 /*
2238 * We now try to assign counters to all of the packets in the queue.
2239 * If we can't assign counters for all of them, we just consider it
2240 * a failure and wait for the next handshake.
2241 */
2242 STAILQ_FOREACH(pkt, &list, p_parallel) {
2243 if (!noise_keypair_counter_next(keypair, &pkt->p_counter))
2244 goto error;
2245 }
2246 STAILQ_FOREACH_MUTABLE(pkt, &list, p_parallel, tpkt) {
2247 pkt->p_keypair = noise_keypair_ref(keypair);
2248 if (!wg_queue_both(&sc->sc_encrypt_parallel,
2249 &peer->p_encrypt_serial, pkt))
2250 IFNET_STAT_INC(sc->sc_ifp, oqdrops, 1);
2251 }
2252
2253 wg_encrypt_dispatch(sc);
2254 noise_keypair_put(keypair);
2255 return;
2256
2257 error:
2258 if (keypair != NULL)
2259 noise_keypair_put(keypair);
2260 wg_queue_enlist_staged(&peer->p_stage_queue, &list);
2261 wg_timers_event_want_initiation(peer);
2262 }
2263
2264 static int
wg_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * dst,struct rtentry * rt)2265 wg_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
2266 struct rtentry *rt)
2267 {
2268 struct wg_softc *sc = ifp->if_softc;
2269 struct wg_packet *pkt = NULL;
2270 struct wg_peer *peer = NULL;
2271 struct mbuf *defragged;
2272 sa_family_t af = AF_UNSPEC;
2273 int ret;
2274
2275 if (dst->sa_family == AF_UNSPEC) {
2276 /*
2277 * Specially handle packets written/injected by BPF.
2278 * The packets have the same DLT_NULL link-layer type
2279 * (i.e., 4-byte link-layer header in host byte order).
2280 */
2281 dst->sa_family = *(mtod(m, uint32_t *));
2282 m_adj(m, sizeof(uint32_t));
2283 }
2284 if (dst->sa_family == AF_UNSPEC) {
2285 ret = EAFNOSUPPORT;
2286 goto error;
2287 }
2288
2289 wg_bpf_ptap(ifp, m, dst->sa_family);
2290
2291 if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_WGLOOP,
2292 MAX_LOOPS) != 0)) {
2293 DPRINTF(sc, "Packet looped\n");
2294 ret = ELOOP;
2295 goto error;
2296 }
2297
2298 defragged = m_defrag(m, M_NOWAIT);
2299 if (defragged != NULL)
2300 m = defragged;
2301
2302 m = m_unshare(m, M_NOWAIT);
2303 if (m == NULL) {
2304 ret = ENOBUFS;
2305 goto error;
2306 }
2307
2308 if ((ret = determine_af_and_pullup(&m, &af)) != 0)
2309 goto error;
2310 if (af != dst->sa_family) {
2311 ret = EAFNOSUPPORT;
2312 goto error;
2313 }
2314
2315 if ((pkt = wg_packet_alloc(m)) == NULL) {
2316 ret = ENOBUFS;
2317 goto error;
2318 }
2319
2320 pkt->p_af = af;
2321 pkt->p_mtu = ifp->if_mtu;
2322 if (rt != NULL && rt->rt_rmx.rmx_mtu > 0 &&
2323 rt->rt_rmx.rmx_mtu < pkt->p_mtu)
2324 pkt->p_mtu = rt->rt_rmx.rmx_mtu;
2325
2326 peer = wg_aip_lookup(sc, af,
2327 (af == AF_INET ?
2328 (void *)&mtod(m, struct ip *)->ip_dst :
2329 (void *)&mtod(m, struct ip6_hdr *)->ip6_dst));
2330 if (__predict_false(peer == NULL)) {
2331 ret = ENOKEY;
2332 goto error;
2333 }
2334 if (__predict_false(peer->p_endpoint.e_remote.r_sa.sa_family
2335 == AF_UNSPEC)) {
2336 DPRINTF(sc, "No valid endpoint has been configured or "
2337 "discovered for peer %ld\n", peer->p_id);
2338 ret = EHOSTUNREACH;
2339 goto error;
2340 }
2341
2342 wg_queue_push_staged(&peer->p_stage_queue, pkt);
2343 wg_peer_send_staged(peer);
2344 noise_remote_put(peer->p_remote);
2345
2346 return (0);
2347
2348 error:
2349 IFNET_STAT_INC(ifp, oerrors, 1);
2350 if (ret == ELOOP) {
2351 /* Skip ICMP error for ELOOP to avoid infinite loop. */
2352 m_freem(m); /* m cannot be NULL */
2353 m = NULL;
2354 }
2355 if (m != NULL) {
2356 if (af == AF_INET)
2357 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
2358 #ifdef INET6
2359 else if (af == AF_INET6)
2360 icmp6_error(m, ICMP6_DST_UNREACH, 0, 0);
2361 #endif
2362 else
2363 m_freem(m);
2364 }
2365 if (pkt != NULL) {
2366 pkt->p_mbuf = NULL; /* m already freed above */
2367 wg_packet_free(pkt);
2368 }
2369 if (peer != NULL)
2370 noise_remote_put(peer->p_remote);
2371 return (ret);
2372 }
2373
2374 /*----------------------------------------------------------------------------*/
2375 /* Interface Functions */
2376
2377 static int wg_up(struct wg_softc *);
2378 static void wg_down(struct wg_softc *);
2379
2380 static int
wg_ioctl_get(struct wg_softc * sc,struct wg_data_io * data,bool privileged)2381 wg_ioctl_get(struct wg_softc *sc, struct wg_data_io *data, bool privileged)
2382 {
2383 struct wg_interface_io *iface_p, iface_o;
2384 struct wg_peer_io *peer_p, peer_o;
2385 struct wg_aip_io *aip_p, aip_o;
2386 struct wg_peer *peer;
2387 struct wg_aip *aip;
2388 size_t size, peer_count, aip_count;
2389 int cpu, ret = 0;
2390
2391 lockmgr(&sc->sc_lock, LK_SHARED);
2392
2393 /* Determine the required data size. */
2394 size = sizeof(struct wg_interface_io);
2395 size += sizeof(struct wg_peer_io) * sc->sc_peers_num;
2396 TAILQ_FOREACH(peer, &sc->sc_peers, p_entry)
2397 size += sizeof(struct wg_aip_io) * peer->p_aips_num;
2398
2399 /* Return the required size for userland allocation. */
2400 if (data->wgd_size < size) {
2401 data->wgd_size = size;
2402 lockmgr(&sc->sc_lock, LK_RELEASE);
2403 return (0);
2404 }
2405
2406 iface_p = data->wgd_interface;
2407 bzero(&iface_o, sizeof(iface_o));
2408 /*
2409 * No need to acquire the 'sc_socket.so_lock', because 'sc_lock'
2410 * is acquired and that's enough to prevent modifications to
2411 * 'sc_socket' members.
2412 */
2413 if (sc->sc_socket.so_port != 0) {
2414 iface_o.i_port = sc->sc_socket.so_port;
2415 iface_o.i_flags |= WG_INTERFACE_HAS_PORT;
2416 }
2417 if (sc->sc_socket.so_user_cookie != 0) {
2418 iface_o.i_cookie = sc->sc_socket.so_user_cookie;
2419 iface_o.i_flags |= WG_INTERFACE_HAS_COOKIE;
2420 }
2421 if (noise_local_keys(sc->sc_local, iface_o.i_public,
2422 iface_o.i_private)) {
2423 iface_o.i_flags |= WG_INTERFACE_HAS_PUBLIC;
2424 if (privileged)
2425 iface_o.i_flags |= WG_INTERFACE_HAS_PRIVATE;
2426 else
2427 bzero(iface_o.i_private, sizeof(iface_o.i_private));
2428 }
2429
2430 peer_count = 0;
2431 peer_p = &iface_p->i_peers[0];
2432 TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) {
2433 bzero(&peer_o, sizeof(peer_o));
2434
2435 peer_o.p_flags |= WG_PEER_HAS_PUBLIC;
2436 if (noise_remote_keys(peer->p_remote, peer_o.p_public,
2437 peer_o.p_psk)) {
2438 if (privileged)
2439 peer_o.p_flags |= WG_PEER_HAS_PSK;
2440 else
2441 bzero(peer_o.p_psk, sizeof(peer_o.p_psk));
2442 }
2443 if (wg_timers_get_persistent_keepalive(peer, &peer_o.p_pka))
2444 peer_o.p_flags |= WG_PEER_HAS_PKA;
2445 if (wg_peer_get_sockaddr(peer, &peer_o.p_sa) == 0)
2446 peer_o.p_flags |= WG_PEER_HAS_ENDPOINT;
2447 for (cpu = 0; cpu < ncpus; cpu++) {
2448 peer_o.p_rxbytes += peer->p_rx_bytes[cpu];
2449 peer_o.p_txbytes += peer->p_tx_bytes[cpu];
2450 }
2451 wg_timers_get_last_handshake(peer, &peer_o.p_last_handshake);
2452 peer_o.p_id = (uint64_t)peer->p_id;
2453 strlcpy(peer_o.p_description, peer->p_description,
2454 sizeof(peer_o.p_description));
2455
2456 aip_count = 0;
2457 aip_p = &peer_p->p_aips[0];
2458 LIST_FOREACH(aip, &peer->p_aips, a_entry) {
2459 bzero(&aip_o, sizeof(aip_o));
2460 aip_o.a_af = aip->a_af;
2461 if (aip->a_af == AF_INET) {
2462 aip_o.a_cidr = bitcount32(aip->a_mask.ip);
2463 memcpy(&aip_o.a_ipv4, &aip->a_addr.in,
2464 sizeof(aip->a_addr.in));
2465 } else if (aip->a_af == AF_INET6) {
2466 aip_o.a_cidr = in6_mask2len(&aip->a_mask.in6,
2467 NULL);
2468 memcpy(&aip_o.a_ipv6, &aip->a_addr.in6,
2469 sizeof(aip->a_addr.in6));
2470 }
2471
2472 ret = copyout(&aip_o, aip_p, sizeof(aip_o));
2473 if (ret != 0)
2474 goto out;
2475
2476 aip_p++;
2477 aip_count++;
2478 }
2479 KKASSERT(aip_count == peer->p_aips_num);
2480 peer_o.p_aips_count = aip_count;
2481
2482 ret = copyout(&peer_o, peer_p, sizeof(peer_o));
2483 if (ret != 0)
2484 goto out;
2485
2486 peer_p = (struct wg_peer_io *)aip_p;
2487 peer_count++;
2488 }
2489 KKASSERT(peer_count == sc->sc_peers_num);
2490 iface_o.i_peers_count = peer_count;
2491
2492 ret = copyout(&iface_o, iface_p, sizeof(iface_o));
2493
2494 out:
2495 lockmgr(&sc->sc_lock, LK_RELEASE);
2496 explicit_bzero(&iface_o, sizeof(iface_o));
2497 explicit_bzero(&peer_o, sizeof(peer_o));
2498 return (ret);
2499 }
2500
2501 static int
wg_ioctl_set(struct wg_softc * sc,struct wg_data_io * data)2502 wg_ioctl_set(struct wg_softc *sc, struct wg_data_io *data)
2503 {
2504 struct wg_interface_io *iface_p, iface_o;
2505 struct wg_peer_io *peer_p, peer_o;
2506 struct wg_aip_io *aip_p, aip_o;
2507 struct wg_peer *peer;
2508 struct noise_remote *remote;
2509 uint8_t public[WG_KEY_SIZE], private[WG_KEY_SIZE];
2510 size_t i, j;
2511 int ret;
2512
2513 remote = NULL;
2514 lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
2515
2516 iface_p = data->wgd_interface;
2517 if ((ret = copyin(iface_p, &iface_o, sizeof(iface_o))) != 0)
2518 goto error;
2519
2520 if (iface_o.i_flags & WG_INTERFACE_REPLACE_PEERS)
2521 wg_peer_destroy_all(sc);
2522
2523 if ((iface_o.i_flags & WG_INTERFACE_HAS_PRIVATE) &&
2524 (!noise_local_keys(sc->sc_local, NULL, private) ||
2525 timingsafe_bcmp(private, iface_o.i_private, WG_KEY_SIZE) != 0)) {
2526 if (curve25519_generate_public(public, iface_o.i_private)) {
2527 remote = noise_remote_lookup(sc->sc_local, public);
2528 if (remote != NULL) {
2529 /* Remove the conflicting peer. */
2530 peer = noise_remote_arg(remote);
2531 wg_peer_destroy(peer);
2532 noise_remote_put(remote);
2533 }
2534 }
2535
2536 /*
2537 * Set the private key.
2538 *
2539 * Note: we might be removing the private key.
2540 */
2541 if (noise_local_set_private(sc->sc_local, iface_o.i_private))
2542 cookie_checker_update(sc->sc_cookie, public);
2543 else
2544 cookie_checker_update(sc->sc_cookie, NULL);
2545 }
2546
2547 if ((iface_o.i_flags & WG_INTERFACE_HAS_PORT) &&
2548 iface_o.i_port != sc->sc_socket.so_port) {
2549 if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2550 ret = wg_socket_init(sc, iface_o.i_port);
2551 if (ret != 0)
2552 goto error;
2553 } else {
2554 sc->sc_socket.so_port = iface_o.i_port;
2555 }
2556 }
2557
2558 if (iface_o.i_flags & WG_INTERFACE_HAS_COOKIE) {
2559 ret = wg_socket_set_cookie(sc, iface_o.i_cookie);
2560 if (ret != 0)
2561 goto error;
2562 }
2563
2564 peer_p = &iface_p->i_peers[0];
2565 for (i = 0; i < iface_o.i_peers_count; i++) {
2566 if ((ret = copyin(peer_p, &peer_o, sizeof(peer_o))) != 0)
2567 goto error;
2568
2569 /* Peer must have public key. */
2570 if ((peer_o.p_flags & WG_PEER_HAS_PUBLIC) == 0)
2571 goto next_peer;
2572 /* Ignore peer that has the same public key. */
2573 if (noise_local_keys(sc->sc_local, public, NULL) &&
2574 memcmp(public, peer_o.p_public, WG_KEY_SIZE) == 0)
2575 goto next_peer;
2576
2577 /* Lookup peer, or create if it doesn't exist. */
2578 remote = noise_remote_lookup(sc->sc_local, peer_o.p_public);
2579 if (remote != NULL) {
2580 peer = noise_remote_arg(remote);
2581 } else {
2582 if (peer_o.p_flags & (WG_PEER_REMOVE | WG_PEER_UPDATE))
2583 goto next_peer;
2584
2585 peer = wg_peer_create(sc, peer_o.p_public);
2586 if (peer == NULL) {
2587 ret = ENOMEM;
2588 goto error;
2589 }
2590
2591 /* No allowed IPs to remove for a new peer. */
2592 peer_o.p_flags &= ~WG_PEER_REPLACE_AIPS;
2593 }
2594
2595 if (peer_o.p_flags & WG_PEER_REMOVE) {
2596 wg_peer_destroy(peer);
2597 goto next_peer;
2598 }
2599
2600 if (peer_o.p_flags & WG_PEER_HAS_ENDPOINT) {
2601 ret = wg_peer_set_sockaddr(peer, &peer_o.p_sa);
2602 if (ret != 0)
2603 goto error;
2604 }
2605 if (peer_o.p_flags & WG_PEER_HAS_PSK)
2606 noise_remote_set_psk(peer->p_remote, peer_o.p_psk);
2607 if (peer_o.p_flags & WG_PEER_HAS_PKA)
2608 wg_timers_set_persistent_keepalive(peer, peer_o.p_pka);
2609 if (peer_o.p_flags & WG_PEER_SET_DESCRIPTION)
2610 strlcpy(peer->p_description, peer_o.p_description,
2611 sizeof(peer->p_description));
2612
2613 if (peer_o.p_flags & WG_PEER_REPLACE_AIPS)
2614 wg_aip_remove_all(sc, peer);
2615
2616 for (j = 0; j < peer_o.p_aips_count; j++) {
2617 aip_p = &peer_p->p_aips[j];
2618 if ((ret = copyin(aip_p, &aip_o, sizeof(aip_o))) != 0)
2619 goto error;
2620 ret = wg_aip_add(sc, peer, aip_o.a_af, &aip_o.a_addr,
2621 aip_o.a_cidr);
2622 if (ret != 0)
2623 goto error;
2624 }
2625
2626 if (sc->sc_ifp->if_link_state == LINK_STATE_UP)
2627 wg_peer_send_staged(peer);
2628
2629 next_peer:
2630 if (remote != NULL) {
2631 noise_remote_put(remote);
2632 remote = NULL;
2633 }
2634 aip_p = &peer_p->p_aips[peer_o.p_aips_count];
2635 peer_p = (struct wg_peer_io *)aip_p;
2636 }
2637
2638 error:
2639 if (remote != NULL)
2640 noise_remote_put(remote);
2641 lockmgr(&sc->sc_lock, LK_RELEASE);
2642 explicit_bzero(&iface_o, sizeof(iface_o));
2643 explicit_bzero(&peer_o, sizeof(peer_o));
2644 explicit_bzero(&aip_o, sizeof(aip_o));
2645 explicit_bzero(public, sizeof(public));
2646 explicit_bzero(private, sizeof(private));
2647 return (ret);
2648 }
2649
2650 static int
wg_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cred)2651 wg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cred)
2652 {
2653 struct wg_data_io *wgd;
2654 struct wg_softc *sc;
2655 struct ifreq *ifr;
2656 bool privileged;
2657 int ret, mask;
2658
2659 sc = ifp->if_softc;
2660 ifr = (struct ifreq *)data;
2661 ret = 0;
2662
2663 switch (cmd) {
2664 case SIOCSWG:
2665 ret = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT);
2666 if (ret == 0) {
2667 wgd = (struct wg_data_io *)data;
2668 ret = wg_ioctl_set(sc, wgd);
2669 }
2670 break;
2671 case SIOCGWG:
2672 privileged =
2673 (caps_priv_check(cred, SYSCAP_RESTRICTEDROOT) == 0);
2674 wgd = (struct wg_data_io *)data;
2675 ret = wg_ioctl_get(sc, wgd, privileged);
2676 break;
2677 /* Interface IOCTLs */
2678 case SIOCSIFADDR:
2679 /*
2680 * This differs from *BSD norms, but is more uniform with how
2681 * WireGuard behaves elsewhere.
2682 */
2683 break;
2684 case SIOCSIFFLAGS:
2685 if (ifp->if_flags & IFF_UP)
2686 ret = wg_up(sc);
2687 else
2688 wg_down(sc);
2689 break;
2690 case SIOCSIFMTU:
2691 if (ifr->ifr_mtu <= 0 || ifr->ifr_mtu > MAX_MTU)
2692 ret = EINVAL;
2693 else
2694 ifp->if_mtu = ifr->ifr_mtu;
2695 break;
2696 case SIOCSIFCAP:
2697 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2698 if (mask & IFCAP_RXCSUM)
2699 ifp->if_capenable ^= IFCAP_RXCSUM;
2700 break;
2701 case SIOCADDMULTI:
2702 case SIOCDELMULTI:
2703 break;
2704 default:
2705 ret = ENOTTY;
2706 }
2707
2708 return (ret);
2709 }
2710
2711 static int
wg_up(struct wg_softc * sc)2712 wg_up(struct wg_softc *sc)
2713 {
2714 struct ifnet *ifp = sc->sc_ifp;
2715 struct wg_peer *peer;
2716 int ret = 0;
2717
2718 lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
2719
2720 /* Silent success if we're already running. */
2721 if (ifp->if_flags & IFF_RUNNING)
2722 goto out;
2723 ifp->if_flags |= IFF_RUNNING;
2724
2725 ret = wg_socket_init(sc, sc->sc_socket.so_port);
2726 if (ret == 0) {
2727 TAILQ_FOREACH(peer, &sc->sc_peers, p_entry)
2728 wg_timers_enable(peer);
2729 ifp->if_link_state = LINK_STATE_UP;
2730 if_link_state_change(ifp);
2731 } else {
2732 ifp->if_flags &= ~IFF_RUNNING;
2733 DPRINTF(sc, "Unable to initialize sockets: %d\n", ret);
2734 }
2735
2736 out:
2737 lockmgr(&sc->sc_lock, LK_RELEASE);
2738 return (ret);
2739 }
2740
2741 static void
wg_down(struct wg_softc * sc)2742 wg_down(struct wg_softc *sc)
2743 {
2744 struct ifnet *ifp = sc->sc_ifp;
2745 struct wg_peer *peer;
2746 int i;
2747
2748 lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
2749
2750 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2751 lockmgr(&sc->sc_lock, LK_RELEASE);
2752 return;
2753 }
2754 ifp->if_flags &= ~IFF_RUNNING;
2755
2756 /* Cancel all tasks. */
2757 while (taskqueue_cancel(sc->sc_handshake_taskqueue,
2758 &sc->sc_handshake_task, NULL) != 0) {
2759 taskqueue_drain(sc->sc_handshake_taskqueue,
2760 &sc->sc_handshake_task);
2761 }
2762 for (i = 0; i < ncpus; i++) {
2763 while (taskqueue_cancel(wg_taskqueues[i],
2764 &sc->sc_encrypt_tasks[i], NULL) != 0) {
2765 taskqueue_drain(wg_taskqueues[i],
2766 &sc->sc_encrypt_tasks[i]);
2767 }
2768 while (taskqueue_cancel(wg_taskqueues[i],
2769 &sc->sc_decrypt_tasks[i], NULL) != 0) {
2770 taskqueue_drain(wg_taskqueues[i],
2771 &sc->sc_decrypt_tasks[i]);
2772 }
2773 }
2774
2775 TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) {
2776 wg_queue_purge(&peer->p_stage_queue);
2777 wg_timers_disable(peer);
2778 }
2779
2780 wg_queue_purge(&sc->sc_handshake_queue);
2781
2782 TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) {
2783 noise_remote_handshake_clear(peer->p_remote);
2784 noise_remote_keypairs_clear(peer->p_remote);
2785 }
2786
2787 ifp->if_link_state = LINK_STATE_DOWN;
2788 if_link_state_change(ifp);
2789 wg_socket_uninit(sc);
2790
2791 lockmgr(&sc->sc_lock, LK_RELEASE);
2792 }
2793
2794 static int
wg_clone_create(struct if_clone * ifc __unused,int unit,caddr_t params __unused,caddr_t data __unused)2795 wg_clone_create(struct if_clone *ifc __unused, int unit,
2796 caddr_t params __unused, caddr_t data __unused)
2797 {
2798 struct wg_softc *sc;
2799 struct ifnet *ifp;
2800 int i;
2801
2802 sc = kmalloc(sizeof(*sc), M_WG, M_WAITOK | M_ZERO);
2803
2804 if (!rn_inithead(&sc->sc_aip4, wg_maskhead,
2805 offsetof(struct aip_addr, in)) ||
2806 !rn_inithead(&sc->sc_aip6, wg_maskhead,
2807 offsetof(struct aip_addr, in6))) {
2808 if (sc->sc_aip4 != NULL)
2809 rn_freehead(sc->sc_aip4);
2810 if (sc->sc_aip6 != NULL)
2811 rn_freehead(sc->sc_aip6);
2812 kfree(sc, M_WG);
2813 return (ENOMEM);
2814 }
2815
2816 lockinit(&sc->sc_lock, "wg softc lock", 0, 0);
2817 lockinit(&sc->sc_aip_lock, "wg aip lock", 0, 0);
2818
2819 sc->sc_local = noise_local_alloc();
2820 sc->sc_cookie = cookie_checker_alloc();
2821
2822 TAILQ_INIT(&sc->sc_peers);
2823
2824 sc->sc_handshake_taskqueue = wg_taskqueues[karc4random() % ncpus];
2825 TASK_INIT(&sc->sc_handshake_task, 0, wg_handshake_worker, sc);
2826 wg_queue_init(&sc->sc_handshake_queue, "hsq");
2827
2828 sc->sc_encrypt_tasks = kmalloc(sizeof(*sc->sc_encrypt_tasks) * ncpus,
2829 M_WG, M_WAITOK | M_ZERO);
2830 sc->sc_decrypt_tasks = kmalloc(sizeof(*sc->sc_decrypt_tasks) * ncpus,
2831 M_WG, M_WAITOK | M_ZERO);
2832 for (i = 0; i < ncpus; i++) {
2833 TASK_INIT(&sc->sc_encrypt_tasks[i], 0, wg_encrypt_worker, sc);
2834 TASK_INIT(&sc->sc_decrypt_tasks[i], 0, wg_decrypt_worker, sc);
2835 }
2836 wg_queue_init(&sc->sc_encrypt_parallel, "encp");
2837 wg_queue_init(&sc->sc_decrypt_parallel, "decp");
2838
2839 ifp = sc->sc_ifp = if_alloc(IFT_WIREGUARD);
2840 if_initname(ifp, wgname, unit);
2841 ifp->if_softc = sc;
2842 ifp->if_mtu = DEFAULT_MTU;
2843 ifp->if_flags = IFF_NOARP | IFF_MULTICAST;
2844 ifp->if_capabilities = ifp->if_capenable = IFCAP_RXCSUM;
2845 ifp->if_output = wg_output;
2846 ifp->if_ioctl = wg_ioctl;
2847 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
2848 ifq_set_ready(&ifp->if_snd);
2849
2850 if_attach(ifp, NULL);
2851
2852 /* DLT_NULL link-layer header: a 4-byte field in host byte order */
2853 bpfattach(ifp, DLT_NULL, sizeof(uint32_t));
2854
2855 #ifdef INET6
2856 /* NOTE: ND_IFINFO() is only available after if_attach(). */
2857 ND_IFINFO(ifp)->flags &= ~ND6_IFF_AUTO_LINKLOCAL;
2858 ND_IFINFO(ifp)->flags |= ND6_IFF_NO_DAD;
2859 #endif
2860
2861 lockmgr(&wg_mtx, LK_EXCLUSIVE);
2862 LIST_INSERT_HEAD(&wg_list, sc, sc_entry);
2863 lockmgr(&wg_mtx, LK_RELEASE);
2864
2865 return (0);
2866 }
2867
2868 static int
wg_clone_destroy(struct ifnet * ifp)2869 wg_clone_destroy(struct ifnet *ifp)
2870 {
2871 struct wg_softc *sc = ifp->if_softc;
2872
2873 wg_down(sc);
2874
2875 lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
2876
2877 kfree(sc->sc_encrypt_tasks, M_WG);
2878 kfree(sc->sc_decrypt_tasks, M_WG);
2879 wg_queue_deinit(&sc->sc_handshake_queue);
2880 wg_queue_deinit(&sc->sc_encrypt_parallel);
2881 wg_queue_deinit(&sc->sc_decrypt_parallel);
2882
2883 wg_peer_destroy_all(sc);
2884
2885 /*
2886 * Detach and free the interface before the sc_aip4 and sc_aip6 radix
2887 * trees, because the purge of interface's IPv6 addresses can cause
2888 * packet transmission and thus wg_aip_lookup() calls.
2889 */
2890 bpfdetach(ifp);
2891 if_detach(ifp);
2892 if_free(ifp);
2893
2894 /*
2895 * All peers have been removed, so the sc_aip4 and sc_aip6 radix trees
2896 * must be empty now.
2897 */
2898 rn_freehead(sc->sc_aip4);
2899 rn_freehead(sc->sc_aip6);
2900 lockuninit(&sc->sc_aip_lock);
2901
2902 cookie_checker_free(sc->sc_cookie);
2903 noise_local_free(sc->sc_local);
2904
2905 lockmgr(&wg_mtx, LK_EXCLUSIVE);
2906 LIST_REMOVE(sc, sc_entry);
2907 lockmgr(&wg_mtx, LK_RELEASE);
2908
2909 lockmgr(&sc->sc_lock, LK_RELEASE);
2910 lockuninit(&sc->sc_lock);
2911 kfree(sc, M_WG);
2912
2913 return (0);
2914 }
2915
2916 /*----------------------------------------------------------------------------*/
2917 /* Module Interface */
2918
2919 #ifdef WG_SELFTESTS
2920 #include "selftest/allowedips.c"
2921 static bool
wg_run_selftests(void)2922 wg_run_selftests(void)
2923 {
2924 bool ret = true;
2925
2926 ret &= wg_allowedips_selftest();
2927 ret &= noise_counter_selftest();
2928 ret &= cookie_selftest();
2929
2930 kprintf("%s: %s\n", __func__, ret ? "pass" : "FAIL");
2931 return (ret);
2932 }
2933 #else /* !WG_SELFTESTS */
2934 static inline bool
wg_run_selftests(void)2935 wg_run_selftests(void)
2936 {
2937 return (true);
2938 }
2939 #endif /* WG_SELFTESTS */
2940
2941 static struct if_clone wg_cloner = IF_CLONE_INITIALIZER(
2942 wgname, wg_clone_create, wg_clone_destroy, 0, IF_MAXUNIT);
2943
2944 static int
wg_module_init(void)2945 wg_module_init(void)
2946 {
2947 int i, ret;
2948
2949 lockinit(&wg_mtx, "wg mtx lock", 0, 0);
2950
2951 wg_packet_zone = objcache_create_simple(M_WG_PACKET,
2952 sizeof(struct wg_packet));
2953 if (wg_packet_zone == NULL)
2954 return (ENOMEM);
2955
2956 wg_taskqueues = kmalloc(sizeof(*wg_taskqueues) * ncpus, M_WG,
2957 M_WAITOK | M_ZERO);
2958 for (i = 0; i < ncpus; i++) {
2959 wg_taskqueues[i] = taskqueue_create("wg_taskq", M_WAITOK,
2960 taskqueue_thread_enqueue,
2961 &wg_taskqueues[i]);
2962 taskqueue_start_threads(&wg_taskqueues[i], 1,
2963 TDPRI_KERN_DAEMON, i,
2964 "wg_taskq_cpu_%d", i);
2965 }
2966
2967 if (!rn_inithead(&wg_maskhead, NULL, 0))
2968 return (ENOMEM);
2969
2970 ret = cookie_init();
2971 if (ret != 0)
2972 return (ret);
2973 ret = noise_init();
2974 if (ret != 0)
2975 return (ret);
2976
2977 ret = if_clone_attach(&wg_cloner);
2978 if (ret != 0)
2979 return (ret);
2980
2981 if (!wg_run_selftests())
2982 return (ENOTRECOVERABLE);
2983
2984 return (0);
2985 }
2986
2987 static int
wg_module_deinit(void)2988 wg_module_deinit(void)
2989 {
2990 int i;
2991
2992 lockmgr(&wg_mtx, LK_EXCLUSIVE);
2993
2994 if (!LIST_EMPTY(&wg_list)) {
2995 lockmgr(&wg_mtx, LK_RELEASE);
2996 return (EBUSY);
2997 }
2998
2999 if_clone_detach(&wg_cloner);
3000
3001 noise_deinit();
3002 cookie_deinit();
3003
3004 for (i = 0; i < ncpus; i++)
3005 taskqueue_free(wg_taskqueues[i]);
3006 kfree(wg_taskqueues, M_WG);
3007
3008 rn_flush(wg_maskhead, rn_freemask);
3009 rn_freehead(wg_maskhead);
3010
3011 if (wg_packet_zone != NULL)
3012 objcache_destroy(wg_packet_zone);
3013
3014 lockmgr(&wg_mtx, LK_RELEASE);
3015 lockuninit(&wg_mtx);
3016
3017 return (0);
3018 }
3019
3020 static int
wg_module_event_handler(module_t mod __unused,int what,void * arg __unused)3021 wg_module_event_handler(module_t mod __unused, int what, void *arg __unused)
3022 {
3023 switch (what) {
3024 case MOD_LOAD:
3025 return wg_module_init();
3026 case MOD_UNLOAD:
3027 return wg_module_deinit();
3028 default:
3029 return (EOPNOTSUPP);
3030 }
3031 }
3032
3033 static moduledata_t wg_moduledata = {
3034 "if_wg",
3035 wg_module_event_handler,
3036 NULL
3037 };
3038
3039 DECLARE_MODULE(if_wg, wg_moduledata, SI_SUB_PSEUDO, SI_ORDER_ANY);
3040 MODULE_VERSION(if_wg, 1); /* WireGuard version */
3041 MODULE_DEPEND(if_wg, crypto, 1, 1, 1);
3042