1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5#
6# This script tests the below topology:
7#
8# ┌─────────────────────┐   ┌──────────────────────────────────┐   ┌─────────────────────┐
9# │   $ns1 namespace    │   │          $ns0 namespace          │   │   $ns2 namespace    │
10# │                     │   │                                  │   │                     │
11# │┌────────┐           │   │            ┌────────┐            │   │           ┌────────┐│
12# ││  wg0   │───────────┼───┼────────────│   lo   │────────────┼───┼───────────│  wg0   ││
13# │├────────┴──────────┐│   │    ┌───────┴────────┴────────┐   │   │┌──────────┴────────┤│
14# ││192.168.241.1/24   ││   │    │(ns1)         (ns2)      │   │   ││192.168.241.2/24   ││
15# ││fd00::1/24         ││   │    │127.0.0.1:1   127.0.0.1:2│   │   ││fd00::2/24         ││
16# │└───────────────────┘│   │    │[::]:1        [::]:2     │   │   │└───────────────────┘│
17# └─────────────────────┘   │    └─────────────────────────┘   │   └─────────────────────┘
18#                           └──────────────────────────────────┘
19#
20# After the topology is prepared we run a series of TCP/UDP iperf3 tests between the
21# wireguard peers in $ns1 and $ns2. Note that $ns0 is the endpoint for the wg0
22# interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further
23# details on how this is accomplished.
24set -e
25shopt -s extglob
26
27exec 3>&1
28export LANG=C
29export WG_HIDE_KEYS=never
30NPROC=( /sys/devices/system/cpu/cpu+([0-9]) ); NPROC=${#NPROC[@]}
31netns0="wg-test-$$-0"
32netns1="wg-test-$$-1"
33netns2="wg-test-$$-2"
34pretty() { echo -e "\x1b[32m\x1b[1m[+] ${1:+NS$1: }${2}\x1b[0m" >&3; }
35pp() { pretty "" "$*"; "$@"; }
36maybe_exec() { if [[ $BASHPID -eq $$ ]]; then "$@"; else exec "$@"; fi; }
37n0() { pretty 0 "$*"; maybe_exec ip netns exec $netns0 "$@"; }
38n1() { pretty 1 "$*"; maybe_exec ip netns exec $netns1 "$@"; }
39n2() { pretty 2 "$*"; maybe_exec ip netns exec $netns2 "$@"; }
40ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; }
41ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; }
42ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; }
43sleep() { read -t "$1" -N 1 || true; }
44waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
45waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; }
46waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; }
47
48cleanup() {
49	set +e
50	exec 2>/dev/null
51	printf "$orig_message_cost" > /proc/sys/net/core/message_cost
52	ip0 link del dev wg0
53	ip0 link del dev wg1
54	ip1 link del dev wg0
55	ip1 link del dev wg1
56	ip2 link del dev wg0
57	ip2 link del dev wg1
58	local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
59	[[ -n $to_kill ]] && kill $to_kill
60	pp ip netns del $netns1
61	pp ip netns del $netns2
62	pp ip netns del $netns0
63	exit
64}
65
66orig_message_cost="$(< /proc/sys/net/core/message_cost)"
67trap cleanup EXIT
68printf 0 > /proc/sys/net/core/message_cost
69
70ip netns del $netns0 2>/dev/null || true
71ip netns del $netns1 2>/dev/null || true
72ip netns del $netns2 2>/dev/null || true
73pp ip netns add $netns0
74pp ip netns add $netns1
75pp ip netns add $netns2
76ip0 link set up dev lo
77
78ip0 link add dev wg0 type wireguard
79ip0 link set wg0 netns $netns1
80ip0 link add dev wg0 type wireguard
81ip0 link set wg0 netns $netns2
82key1="$(pp wg genkey)"
83key2="$(pp wg genkey)"
84key3="$(pp wg genkey)"
85key4="$(pp wg genkey)"
86pub1="$(pp wg pubkey <<<"$key1")"
87pub2="$(pp wg pubkey <<<"$key2")"
88pub3="$(pp wg pubkey <<<"$key3")"
89pub4="$(pp wg pubkey <<<"$key4")"
90psk="$(pp wg genpsk)"
91[[ -n $key1 && -n $key2 && -n $psk ]]
92
93configure_peers() {
94	ip1 addr add 192.168.241.1/24 dev wg0
95	ip1 addr add fd00::1/112 dev wg0
96
97	ip2 addr add 192.168.241.2/24 dev wg0
98	ip2 addr add fd00::2/112 dev wg0
99
100	n1 wg set wg0 \
101		private-key <(echo "$key1") \
102		listen-port 1 \
103		peer "$pub2" \
104			preshared-key <(echo "$psk") \
105			allowed-ips 192.168.241.2/32,fd00::2/128
106	n2 wg set wg0 \
107		private-key <(echo "$key2") \
108		listen-port 2 \
109		peer "$pub1" \
110			preshared-key <(echo "$psk") \
111			allowed-ips 192.168.241.1/32,fd00::1/128
112
113	ip1 link set up dev wg0
114	ip2 link set up dev wg0
115}
116configure_peers
117
118tests() {
119	# Ping over IPv4
120	n2 ping -c 10 -f -W 1 192.168.241.1
121	n1 ping -c 10 -f -W 1 192.168.241.2
122
123	# Ping over IPv6
124	n2 ping6 -c 10 -f -W 1 fd00::1
125	n1 ping6 -c 10 -f -W 1 fd00::2
126
127	# TCP over IPv4
128	n2 iperf3 -s -1 -B 192.168.241.2 &
129	waitiperf $netns2 $!
130	n1 iperf3 -Z -t 3 -c 192.168.241.2
131
132	# TCP over IPv6
133	n1 iperf3 -s -1 -B fd00::1 &
134	waitiperf $netns1 $!
135	n2 iperf3 -Z -t 3 -c fd00::1
136
137	# UDP over IPv4
138	n1 iperf3 -s -1 -B 192.168.241.1 &
139	waitiperf $netns1 $!
140	n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1
141
142	# UDP over IPv6
143	n2 iperf3 -s -1 -B fd00::2 &
144	waitiperf $netns2 $!
145	n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2
146
147	# TCP over IPv4, in parallel
148	local pids=( ) i
149	for ((i=0; i < NPROC; ++i)) do
150		n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 &
151		pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i ))
152	done
153	for ((i=0; i < NPROC; ++i)) do
154		n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 &
155	done
156	wait "${pids[@]}"
157}
158
159[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}"
160big_mtu=$(( 34816 - 1500 + $orig_mtu ))
161
162# Test using IPv4 as outer transport
163n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2
164n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1
165# Before calling tests, we first make sure that the stats counters and timestamper are working
166n2 ping -c 10 -f -W 1 192.168.241.1
167{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip2 -stats link show dev wg0)
168(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) ))
169{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip1 -stats link show dev wg0)
170(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) ))
171read _ rx_bytes tx_bytes < <(n2 wg show wg0 transfer)
172(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) ))
173read _ rx_bytes tx_bytes < <(n1 wg show wg0 transfer)
174(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) ))
175read _ timestamp < <(n1 wg show wg0 latest-handshakes)
176(( timestamp != 0 ))
177
178tests
179ip1 link set wg0 mtu $big_mtu
180ip2 link set wg0 mtu $big_mtu
181tests
182
183ip1 link set wg0 mtu $orig_mtu
184ip2 link set wg0 mtu $orig_mtu
185
186# Test using IPv6 as outer transport
187n1 wg set wg0 peer "$pub2" endpoint [::1]:2
188n2 wg set wg0 peer "$pub1" endpoint [::1]:1
189tests
190ip1 link set wg0 mtu $big_mtu
191ip2 link set wg0 mtu $big_mtu
192tests
193
194# Test that route MTUs work with the padding
195ip1 link set wg0 mtu 1300
196ip2 link set wg0 mtu 1300
197n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2
198n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1
199n0 iptables -A INPUT -m length --length 1360 -j DROP
200n1 ip route add 192.168.241.2/32 dev wg0 mtu 1299
201n2 ip route add 192.168.241.1/32 dev wg0 mtu 1299
202n2 ping -c 1 -W 1 -s 1269 192.168.241.1
203n2 ip route delete 192.168.241.1/32 dev wg0 mtu 1299
204n1 ip route delete 192.168.241.2/32 dev wg0 mtu 1299
205n0 iptables -F INPUT
206
207ip1 link set wg0 mtu $orig_mtu
208ip2 link set wg0 mtu $orig_mtu
209
210# Test using IPv4 that roaming works
211ip0 -4 addr del 127.0.0.1/8 dev lo
212ip0 -4 addr add 127.212.121.99/8 dev lo
213n1 wg set wg0 listen-port 9999
214n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2
215n1 ping6 -W 1 -c 1 fd00::2
216[[ $(n2 wg show wg0 endpoints) == "$pub1	127.212.121.99:9999" ]]
217
218# Test using IPv6 that roaming works
219n1 wg set wg0 listen-port 9998
220n1 wg set wg0 peer "$pub2" endpoint [::1]:2
221n1 ping -W 1 -c 1 192.168.241.2
222[[ $(n2 wg show wg0 endpoints) == "$pub1	[::1]:9998" ]]
223
224# Test that crypto-RP filter works
225n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24
226exec 4< <(n1 ncat -l -u -p 1111)
227ncat_pid=$!
228waitncatudp $netns1 $ncat_pid
229n2 ncat -u 192.168.241.1 1111 <<<"X"
230read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]]
231kill $ncat_pid
232more_specific_key="$(pp wg genkey | pp wg pubkey)"
233n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32
234n2 wg set wg0 listen-port 9997
235exec 4< <(n1 ncat -l -u -p 1111)
236ncat_pid=$!
237waitncatudp $netns1 $ncat_pid
238n2 ncat -u 192.168.241.1 1111 <<<"X"
239! read -r -N 1 -t 1 out <&4 || false
240kill $ncat_pid
241n1 wg set wg0 peer "$more_specific_key" remove
242[[ $(n1 wg show wg0 endpoints) == "$pub2	[::1]:9997" ]]
243
244# Test that we can change private keys keys and immediately handshake
245n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips 192.168.241.2/32 endpoint 127.0.0.1:2
246n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32
247n1 ping -W 1 -c 1 192.168.241.2
248n1 wg set wg0 private-key <(echo "$key3")
249n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
250n1 ping -W 1 -c 1 192.168.241.2
251n2 wg set wg0 peer "$pub3" remove
252
253# Test that we can route wg through wg
254ip1 addr flush dev wg0
255ip2 addr flush dev wg0
256ip1 addr add fd00::5:1/112 dev wg0
257ip2 addr add fd00::5:2/112 dev wg0
258n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
259n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
260ip1 link add wg1 type wireguard
261ip2 link add wg1 type wireguard
262ip1 addr add 192.168.241.1/24 dev wg1
263ip1 addr add fd00::1/112 dev wg1
264ip2 addr add 192.168.241.2/24 dev wg1
265ip2 addr add fd00::2/112 dev wg1
266ip1 link set mtu 1340 up dev wg1
267ip2 link set mtu 1340 up dev wg1
268n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
269n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
270tests
271# Try to set up a routing loop between the two namespaces
272ip1 link set netns $netns0 dev wg1
273ip0 addr add 192.168.241.1/24 dev wg1
274ip0 link set up dev wg1
275n0 ping -W 1 -c 1 192.168.241.2
276n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
277ip2 link del wg0
278ip2 link del wg1
279read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
280! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
281sleep 1
282read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
283if ! (( tx_bytes_after - tx_bytes_before < 70000 )); then
284	errstart=$'\x1b[37m\x1b[41m\x1b[1m'
285	errend=$'\x1b[0m'
286	echo "${errstart}                                                ${errend}"
287	echo "${errstart}                   E  R  R  O  R                ${errend}"
288	echo "${errstart}                                                ${errend}"
289	echo "${errstart} This architecture does not do the right thing  ${errend}"
290	echo "${errstart} with cross-namespace routing loops. This test  ${errend}"
291	echo "${errstart} has thus technically failed but, as this issue ${errend}"
292	echo "${errstart} is as yet unsolved, these tests will continue  ${errend}"
293	echo "${errstart} onward. :(                                     ${errend}"
294	echo "${errstart}                                                ${errend}"
295fi
296
297ip0 link del wg1
298ip1 link del wg0
299
300# Test using NAT. We now change the topology to this:
301# ┌────────────────────────────────────────┐    ┌────────────────────────────────────────────────┐     ┌────────────────────────────────────────┐
302# │             $ns1 namespace             │    │                 $ns0 namespace                 │     │             $ns2 namespace             │
303# │                                        │    │                                                │     │                                        │
304# │  ┌─────┐             ┌─────┐           │    │    ┌──────┐              ┌──────┐              │     │  ┌─────┐            ┌─────┐            │
305# │  │ wg0 │─────────────│vethc│───────────┼────┼────│vethrc│              │vethrs│──────────────┼─────┼──│veths│────────────│ wg0 │            │
306# │  ├─────┴──────────┐  ├─────┴──────────┐│    │    ├──────┴─────────┐    ├──────┴────────────┐ │     │  ├─────┴──────────┐ ├─────┴──────────┐ │
307# │  │192.168.241.1/24│  │192.168.1.100/24││    │    │192.168.1.1/24  │    │10.0.0.1/24        │ │     │  │10.0.0.100/24   │ │192.168.241.2/24│ │
308# │  │fd00::1/24      │  │                ││    │    │                │    │SNAT:192.168.1.0/24│ │     │  │                │ │fd00::2/24      │ │
309# │  └────────────────┘  └────────────────┘│    │    └────────────────┘    └───────────────────┘ │     │  └────────────────┘ └────────────────┘ │
310# └────────────────────────────────────────┘    └────────────────────────────────────────────────┘     └────────────────────────────────────────┘
311
312ip1 link add dev wg0 type wireguard
313ip2 link add dev wg0 type wireguard
314configure_peers
315
316ip0 link add vethrc type veth peer name vethc
317ip0 link add vethrs type veth peer name veths
318ip0 link set vethc netns $netns1
319ip0 link set veths netns $netns2
320ip0 link set vethrc up
321ip0 link set vethrs up
322ip0 addr add 192.168.1.1/24 dev vethrc
323ip0 addr add 10.0.0.1/24 dev vethrs
324ip1 addr add 192.168.1.100/24 dev vethc
325ip1 link set vethc up
326ip1 route add default via 192.168.1.1
327ip2 addr add 10.0.0.100/24 dev veths
328ip2 link set veths up
329waitiface $netns0 vethrc
330waitiface $netns0 vethrs
331waitiface $netns1 vethc
332waitiface $netns2 veths
333
334n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward'
335n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout'
336n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream'
337n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1
338
339n1 wg set wg0 peer "$pub2" endpoint 10.0.0.100:2 persistent-keepalive 1
340n1 ping -W 1 -c 1 192.168.241.2
341n2 ping -W 1 -c 1 192.168.241.1
342[[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.1:1" ]]
343# Demonstrate n2 can still send packets to n1, since persistent-keepalive will prevent connection tracking entry from expiring (to see entries: `n0 conntrack -L`).
344pp sleep 3
345n2 ping -W 1 -c 1 192.168.241.1
346n1 wg set wg0 peer "$pub2" persistent-keepalive 0
347
348# Test that sk_bound_dev_if works
349n1 ping -I wg0 -c 1 -W 1 192.168.241.2
350# What about when the mark changes and the packet must be rerouted?
351n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1
352n1 ping -c 1 -W 1 192.168.241.2 # First the boring case
353n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case
354n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1
355
356# Test that onion routing works, even when it loops
357n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
358ip1 addr add 192.168.242.1/24 dev wg0
359ip2 link add wg1 type wireguard
360ip2 addr add 192.168.242.2/24 dev wg1
361n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
362ip2 link set wg1 up
363n1 ping -W 1 -c 1 192.168.242.2
364ip2 link del wg1
365n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
366! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
367n1 wg set wg0 peer "$pub3" remove
368ip1 addr del 192.168.242.1/24 dev wg0
369
370# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
371ip1 -6 addr add fc00::9/96 dev vethc
372ip1 -6 route add default via fc00::1
373ip2 -4 addr add 192.168.99.7/32 dev wg0
374ip2 -6 addr add abab::1111/128 dev wg0
375n1 wg set wg0 fwmark 51820 peer "$pub2" allowed-ips 192.168.99.7,abab::1111
376ip1 -6 route add default dev wg0 table 51820
377ip1 -6 rule add not fwmark 51820 table 51820
378ip1 -6 rule add table main suppress_prefixlength 0
379ip1 -4 route add default dev wg0 table 51820
380ip1 -4 rule add not fwmark 51820 table 51820
381ip1 -4 rule add table main suppress_prefixlength 0
382n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
383# Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
384n1 ping -W 1 -c 100 -f 192.168.99.7
385n1 ping -W 1 -c 100 -f abab::1111
386
387# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route.
388n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2
389n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit.
390n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward'
391ip0 -4 route add 192.168.241.1 via 10.0.0.100
392n2 wg set wg0 peer "$pub1" remove
393[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]]
394
395n0 iptables -t nat -F
396n0 iptables -t filter -F
397n2 iptables -t nat -F
398ip0 link del vethrc
399ip0 link del vethrs
400ip1 link del wg0
401ip2 link del wg0
402
403# Test that saddr routing is sticky but not too sticky, changing to this topology:
404# ┌────────────────────────────────────────┐    ┌────────────────────────────────────────┐
405# │             $ns1 namespace             │    │             $ns2 namespace             │
406# │                                        │    │                                        │
407# │  ┌─────┐             ┌─────┐           │    │  ┌─────┐            ┌─────┐            │
408# │  │ wg0 │─────────────│veth1│───────────┼────┼──│veth2│────────────│ wg0 │            │
409# │  ├─────┴──────────┐  ├─────┴──────────┐│    │  ├─────┴──────────┐ ├─────┴──────────┐ │
410# │  │192.168.241.1/24│  │10.0.0.1/24     ││    │  │10.0.0.2/24     │ │192.168.241.2/24│ │
411# │  │fd00::1/24      │  │fd00:aa::1/96   ││    │  │fd00:aa::2/96   │ │fd00::2/24      │ │
412# │  └────────────────┘  └────────────────┘│    │  └────────────────┘ └────────────────┘ │
413# └────────────────────────────────────────┘    └────────────────────────────────────────┘
414
415ip1 link add dev wg0 type wireguard
416ip2 link add dev wg0 type wireguard
417configure_peers
418ip1 link add veth1 type veth peer name veth2
419ip1 link set veth2 netns $netns2
420n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad'
421n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad'
422n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth1/accept_dad'
423n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth2/accept_dad'
424n1 bash -c 'printf 1 > /proc/sys/net/ipv4/conf/veth1/promote_secondaries'
425
426# First we check that we aren't overly sticky and can fall over to new IPs when old ones are removed
427ip1 addr add 10.0.0.1/24 dev veth1
428ip1 addr add fd00:aa::1/96 dev veth1
429ip2 addr add 10.0.0.2/24 dev veth2
430ip2 addr add fd00:aa::2/96 dev veth2
431ip1 link set veth1 up
432ip2 link set veth2 up
433waitiface $netns1 veth1
434waitiface $netns2 veth2
435n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2
436n1 ping -W 1 -c 1 192.168.241.2
437ip1 addr add 10.0.0.10/24 dev veth1
438ip1 addr del 10.0.0.1/24 dev veth1
439n1 ping -W 1 -c 1 192.168.241.2
440n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
441n1 ping -W 1 -c 1 192.168.241.2
442ip1 addr add fd00:aa::10/96 dev veth1
443ip1 addr del fd00:aa::1/96 dev veth1
444n1 ping -W 1 -c 1 192.168.241.2
445
446# Now we show that we can successfully do reply to sender routing
447ip1 link set veth1 down
448ip2 link set veth2 down
449ip1 addr flush dev veth1
450ip2 addr flush dev veth2
451ip1 addr add 10.0.0.1/24 dev veth1
452ip1 addr add 10.0.0.2/24 dev veth1
453ip1 addr add fd00:aa::1/96 dev veth1
454ip1 addr add fd00:aa::2/96 dev veth1
455ip2 addr add 10.0.0.3/24 dev veth2
456ip2 addr add fd00:aa::3/96 dev veth2
457ip1 link set veth1 up
458ip2 link set veth2 up
459waitiface $netns1 veth1
460waitiface $netns2 veth2
461n2 wg set wg0 peer "$pub1" endpoint 10.0.0.1:1
462n2 ping -W 1 -c 1 192.168.241.1
463[[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.1:1" ]]
464n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
465n2 ping -W 1 -c 1 192.168.241.1
466[[ $(n2 wg show wg0 endpoints) == "$pub1	[fd00:aa::1]:1" ]]
467n2 wg set wg0 peer "$pub1" endpoint 10.0.0.2:1
468n2 ping -W 1 -c 1 192.168.241.1
469[[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.2:1" ]]
470n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::2]:1
471n2 ping -W 1 -c 1 192.168.241.1
472[[ $(n2 wg show wg0 endpoints) == "$pub1	[fd00:aa::2]:1" ]]
473
474# What happens if the inbound destination address belongs to a different interface as the default route?
475ip1 link add dummy0 type dummy
476ip1 addr add 10.50.0.1/24 dev dummy0
477ip1 link set dummy0 up
478ip2 route add 10.50.0.0/24 dev veth2
479n2 wg set wg0 peer "$pub1" endpoint 10.50.0.1:1
480n2 ping -W 1 -c 1 192.168.241.1
481[[ $(n2 wg show wg0 endpoints) == "$pub1	10.50.0.1:1" ]]
482
483ip1 link del dummy0
484ip1 addr flush dev veth1
485ip2 addr flush dev veth2
486ip1 route flush dev veth1
487ip2 route flush dev veth2
488
489# Now we see what happens if another interface route takes precedence over an ongoing one
490ip1 link add veth3 type veth peer name veth4
491ip1 link set veth4 netns $netns2
492ip1 addr add 10.0.0.1/24 dev veth1
493ip2 addr add 10.0.0.2/24 dev veth2
494ip1 addr add 10.0.0.3/24 dev veth3
495ip1 link set veth1 up
496ip2 link set veth2 up
497ip1 link set veth3 up
498ip2 link set veth4 up
499waitiface $netns1 veth1
500waitiface $netns2 veth2
501waitiface $netns1 veth3
502waitiface $netns2 veth4
503ip1 route flush dev veth1
504ip1 route flush dev veth3
505ip1 route add 10.0.0.0/24 dev veth1 src 10.0.0.1 metric 2
506n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2
507n1 ping -W 1 -c 1 192.168.241.2
508[[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.1:1" ]]
509ip1 route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1
510n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth1/rp_filter'
511n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth4/rp_filter'
512n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
513n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
514n1 ping -W 1 -c 1 192.168.241.2
515[[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.3:1" ]]
516
517ip1 link del dev veth3
518ip1 link del dev wg0
519ip2 link del dev wg0
520
521# Make sure persistent keep alives are sent when an adapter comes up
522ip1 link add dev wg0 type wireguard
523n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
524read _ _ tx_bytes < <(n1 wg show wg0 transfer)
525[[ $tx_bytes -eq 0 ]]
526ip1 link set dev wg0 up
527read _ _ tx_bytes < <(n1 wg show wg0 transfer)
528[[ $tx_bytes -gt 0 ]]
529ip1 link del dev wg0
530# This should also happen even if the private key is set later
531ip1 link add dev wg0 type wireguard
532n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
533read _ _ tx_bytes < <(n1 wg show wg0 transfer)
534[[ $tx_bytes -eq 0 ]]
535ip1 link set dev wg0 up
536read _ _ tx_bytes < <(n1 wg show wg0 transfer)
537[[ $tx_bytes -eq 0 ]]
538n1 wg set wg0 private-key <(echo "$key1")
539read _ _ tx_bytes < <(n1 wg show wg0 transfer)
540[[ $tx_bytes -gt 0 ]]
541ip1 link del dev veth1
542ip1 link del dev wg0
543
544# We test that Netlink/IPC is working properly by doing things that usually cause split responses
545ip0 link add dev wg0 type wireguard
546config=( "[Interface]" "PrivateKey=$(wg genkey)" "[Peer]" "PublicKey=$(wg genkey)" )
547for a in {1..255}; do
548	for b in {0..255}; do
549		config+=( "AllowedIPs=$a.$b.0.0/16,$a::$b/128" )
550	done
551done
552n0 wg setconf wg0 <(printf '%s\n' "${config[@]}")
553i=0
554for ip in $(n0 wg show wg0 allowed-ips); do
555	((++i))
556done
557((i == 255*256*2+1))
558ip0 link del wg0
559ip0 link add dev wg0 type wireguard
560config=( "[Interface]" "PrivateKey=$(wg genkey)" )
561for a in {1..40}; do
562	config+=( "[Peer]" "PublicKey=$(wg genkey)" )
563	for b in {1..52}; do
564		config+=( "AllowedIPs=$a.$b.0.0/16" )
565	done
566done
567n0 wg setconf wg0 <(printf '%s\n' "${config[@]}")
568i=0
569while read -r line; do
570	j=0
571	for ip in $line; do
572		((++j))
573	done
574	((j == 53))
575	((++i))
576done < <(n0 wg show wg0 allowed-ips)
577((i == 40))
578ip0 link del wg0
579ip0 link add wg0 type wireguard
580config=( )
581for i in {1..29}; do
582	config+=( "[Peer]" "PublicKey=$(wg genkey)" )
583done
584config+=( "[Peer]" "PublicKey=$(wg genkey)" "AllowedIPs=255.2.3.4/32,abcd::255/128" )
585n0 wg setconf wg0 <(printf '%s\n' "${config[@]}")
586n0 wg showconf wg0 > /dev/null
587ip0 link del wg0
588
589allowedips=( )
590for i in {1..197}; do
591        allowedips+=( abcd::$i )
592done
593saved_ifs="$IFS"
594IFS=,
595allowedips="${allowedips[*]}"
596IFS="$saved_ifs"
597ip0 link add wg0 type wireguard
598n0 wg set wg0 peer "$pub1"
599n0 wg set wg0 peer "$pub2" allowed-ips "$allowedips"
600{
601	read -r pub allowedips
602	[[ $pub == "$pub1" && $allowedips == "(none)" ]]
603	read -r pub allowedips
604	[[ $pub == "$pub2" ]]
605	i=0
606	for _ in $allowedips; do
607		((++i))
608	done
609	((i == 197))
610} < <(n0 wg show wg0 allowed-ips)
611ip0 link del wg0
612
613! n0 wg show doesnotexist || false
614
615ip0 link add wg0 type wireguard
616n0 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk")
617[[ $(n0 wg show wg0 private-key) == "$key1" ]]
618[[ $(n0 wg show wg0 preshared-keys) == "$pub2	$psk" ]]
619n0 wg set wg0 private-key /dev/null peer "$pub2" preshared-key /dev/null
620[[ $(n0 wg show wg0 private-key) == "(none)" ]]
621[[ $(n0 wg show wg0 preshared-keys) == "$pub2	(none)" ]]
622n0 wg set wg0 peer "$pub2"
623n0 wg set wg0 private-key <(echo "$key2")
624[[ $(n0 wg show wg0 public-key) == "$pub2" ]]
625[[ -z $(n0 wg show wg0 peers) ]]
626n0 wg set wg0 peer "$pub2"
627[[ -z $(n0 wg show wg0 peers) ]]
628n0 wg set wg0 private-key <(echo "$key1")
629n0 wg set wg0 peer "$pub2"
630[[ $(n0 wg show wg0 peers) == "$pub2" ]]
631n0 wg set wg0 private-key <(echo "/${key1:1}")
632[[ $(n0 wg show wg0 private-key) == "+${key1:1}" ]]
633n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.0.0/12,192.168.0.0/16
634n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0
635n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75
636n0 wg set wg0 peer "$pub2" allowed-ips ::/0
637n0 wg set wg0 peer "$pub2" remove
638for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do
639	n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111
640done
641[[ -n $(n0 wg show wg0 peers) ]]
642exec 4< <(n0 ncat -l -u -p 1111)
643ncat_pid=$!
644waitncatudp $netns0 $ncat_pid
645ip0 link set wg0 up
646! read -r -n 1 -t 2 <&4 || false
647kill $ncat_pid
648ip0 link del wg0
649
650# Ensure that dst_cache references don't outlive netns lifetime
651ip1 link add dev wg0 type wireguard
652ip2 link add dev wg0 type wireguard
653configure_peers
654ip1 link add veth1 type veth peer name veth2
655ip1 link set veth2 netns $netns2
656ip1 addr add fd00:aa::1/64 dev veth1
657ip2 addr add fd00:aa::2/64 dev veth2
658ip1 link set veth1 up
659ip2 link set veth2 up
660waitiface $netns1 veth1
661waitiface $netns2 veth2
662ip1 -6 route add default dev veth1 via fd00:aa::2
663ip2 -6 route add default dev veth2 via fd00:aa::1
664n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
665n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
666n1 ping6 -c 1 fd00::2
667pp ip netns delete $netns1
668pp ip netns delete $netns2
669pp ip netns add $netns1
670pp ip netns add $netns2
671
672# Ensure there aren't circular reference loops
673ip1 link add wg1 type wireguard
674ip2 link add wg2 type wireguard
675ip1 link set wg1 netns $netns2
676ip2 link set wg2 netns $netns1
677pp ip netns delete $netns1
678pp ip netns delete $netns2
679pp ip netns add $netns1
680pp ip netns add $netns2
681
682sleep 2 # Wait for cleanup and grace periods
683declare -A objects
684while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
685	[[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
686	objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
687done < /dev/kmsg
688alldeleted=1
689for object in "${!objects[@]}"; do
690	if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
691		echo "Error: $object: merely ${objects["$object"]}" >&3
692		alldeleted=0
693	fi
694done
695[[ $alldeleted -eq 1 ]]
696pretty "" "Objects that were created were also destroyed."
697