1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# Setup/topology:
5#
6#    NS1             NS2             NS3
7#   veth1 <---> veth2   veth3 <---> veth4 (the top route)
8#   veth5 <---> veth6   veth7 <---> veth8 (the bottom route)
9#
10#   each vethN gets IPv[4|6]_N address
11#
12#   IPv*_SRC = IPv*_1
13#   IPv*_DST = IPv*_4
14#
15#   all tests test pings from IPv*_SRC to IPv*_DST
16#
17#   by default, routes are configured to allow packets to go
18#   IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
19#
20#   a GRE device is installed in NS3 with IPv*_GRE, and
21#   NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
22#   (the bottom route)
23#
24# Tests:
25#
26#   1. routes NS2->IPv*_DST are brought down, so the only way a ping
27#      from IP*_SRC to IP*_DST can work is via IPv*_GRE
28#
29#   2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
30#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
31#
32#       ping: SRC->[encap at veth1:egress]->GRE:decap->DST
33#       ping replies go DST->SRC directly
34#
35#   2b. in an ingress test, a bpf LWT_IN program is installed on veth2
36#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
37#
38#       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
39#       ping replies go DST->SRC directly
40
41if [[ $EUID -ne 0 ]]; then
42	echo "This script must be run as root"
43	echo "FAIL"
44	exit 1
45fi
46
47readonly NS1="ns1-$(mktemp -u XXXXXX)"
48readonly NS2="ns2-$(mktemp -u XXXXXX)"
49readonly NS3="ns3-$(mktemp -u XXXXXX)"
50
51readonly IPv4_1="172.16.1.100"
52readonly IPv4_2="172.16.2.100"
53readonly IPv4_3="172.16.3.100"
54readonly IPv4_4="172.16.4.100"
55readonly IPv4_5="172.16.5.100"
56readonly IPv4_6="172.16.6.100"
57readonly IPv4_7="172.16.7.100"
58readonly IPv4_8="172.16.8.100"
59readonly IPv4_GRE="172.16.16.100"
60
61readonly IPv4_SRC=$IPv4_1
62readonly IPv4_DST=$IPv4_4
63
64readonly IPv6_1="fb01::1"
65readonly IPv6_2="fb02::1"
66readonly IPv6_3="fb03::1"
67readonly IPv6_4="fb04::1"
68readonly IPv6_5="fb05::1"
69readonly IPv6_6="fb06::1"
70readonly IPv6_7="fb07::1"
71readonly IPv6_8="fb08::1"
72readonly IPv6_GRE="fb10::1"
73
74readonly IPv6_SRC=$IPv6_1
75readonly IPv6_DST=$IPv6_4
76
77TEST_STATUS=0
78TESTS_SUCCEEDED=0
79TESTS_FAILED=0
80
81TMPFILE=""
82
83process_test_results()
84{
85	if [[ "${TEST_STATUS}" -eq 0 ]] ; then
86		echo "PASS"
87		TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
88	else
89		echo "FAIL"
90		TESTS_FAILED=$((TESTS_FAILED+1))
91	fi
92}
93
94print_test_summary_and_exit()
95{
96	echo "passed tests: ${TESTS_SUCCEEDED}"
97	echo "failed tests: ${TESTS_FAILED}"
98	if [ "${TESTS_FAILED}" -eq "0" ] ; then
99		exit 0
100	else
101		exit 1
102	fi
103}
104
105setup()
106{
107	set -e  # exit on error
108	TEST_STATUS=0
109
110	# create devices and namespaces
111	ip netns add "${NS1}"
112	ip netns add "${NS2}"
113	ip netns add "${NS3}"
114
115	# rp_filter gets confused by what these tests are doing, so disable it
116	ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
117	ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
118	ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
119	ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
120	ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
121	ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
122
123	# disable IPv6 DAD because it sometimes takes too long and fails tests
124	ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0
125	ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0
126	ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0
127	ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0
128	ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0
129	ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0
130
131	ip link add veth1 type veth peer name veth2
132	ip link add veth3 type veth peer name veth4
133	ip link add veth5 type veth peer name veth6
134	ip link add veth7 type veth peer name veth8
135
136	ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
137	ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
138
139	ip link set veth1 netns ${NS1}
140	ip link set veth2 netns ${NS2}
141	ip link set veth3 netns ${NS2}
142	ip link set veth4 netns ${NS3}
143	ip link set veth5 netns ${NS1}
144	ip link set veth6 netns ${NS2}
145	ip link set veth7 netns ${NS2}
146	ip link set veth8 netns ${NS3}
147
148	if [ ! -z "${VRF}" ] ; then
149		ip -netns ${NS1} link add red type vrf table 1001
150		ip -netns ${NS1} link set red up
151		ip -netns ${NS1} route add table 1001 unreachable default metric 8192
152		ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192
153		ip -netns ${NS1} link set veth1 vrf red
154		ip -netns ${NS1} link set veth5 vrf red
155
156		ip -netns ${NS2} link add red type vrf table 1001
157		ip -netns ${NS2} link set red up
158		ip -netns ${NS2} route add table 1001 unreachable default metric 8192
159		ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192
160		ip -netns ${NS2} link set veth2 vrf red
161		ip -netns ${NS2} link set veth3 vrf red
162		ip -netns ${NS2} link set veth6 vrf red
163		ip -netns ${NS2} link set veth7 vrf red
164	fi
165
166	# configure addesses: the top route (1-2-3-4)
167	ip -netns ${NS1}    addr add ${IPv4_1}/24  dev veth1
168	ip -netns ${NS2}    addr add ${IPv4_2}/24  dev veth2
169	ip -netns ${NS2}    addr add ${IPv4_3}/24  dev veth3
170	ip -netns ${NS3}    addr add ${IPv4_4}/24  dev veth4
171	ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
172	ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
173	ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
174	ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
175
176	# configure addresses: the bottom route (5-6-7-8)
177	ip -netns ${NS1}    addr add ${IPv4_5}/24  dev veth5
178	ip -netns ${NS2}    addr add ${IPv4_6}/24  dev veth6
179	ip -netns ${NS2}    addr add ${IPv4_7}/24  dev veth7
180	ip -netns ${NS3}    addr add ${IPv4_8}/24  dev veth8
181	ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
182	ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
183	ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
184	ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
185
186	ip -netns ${NS1} link set dev veth1 up
187	ip -netns ${NS2} link set dev veth2 up
188	ip -netns ${NS2} link set dev veth3 up
189	ip -netns ${NS3} link set dev veth4 up
190	ip -netns ${NS1} link set dev veth5 up
191	ip -netns ${NS2} link set dev veth6 up
192	ip -netns ${NS2} link set dev veth7 up
193	ip -netns ${NS3} link set dev veth8 up
194
195	# configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
196	# the bottom route to specific bottom addresses
197
198	# NS1
199	# top route
200	ip -netns ${NS1}    route add ${IPv4_2}/32  dev veth1 ${VRF}
201	ip -netns ${NS1}    route add default dev veth1 via ${IPv4_2} ${VRF}  # go top by default
202	ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF}
203	ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF}  # go top by default
204	# bottom route
205	ip -netns ${NS1}    route add ${IPv4_6}/32  dev veth5 ${VRF}
206	ip -netns ${NS1}    route add ${IPv4_7}/32  dev veth5 via ${IPv4_6} ${VRF}
207	ip -netns ${NS1}    route add ${IPv4_8}/32  dev veth5 via ${IPv4_6} ${VRF}
208	ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF}
209	ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF}
210	ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF}
211
212	# NS2
213	# top route
214	ip -netns ${NS2}    route add ${IPv4_1}/32  dev veth2 ${VRF}
215	ip -netns ${NS2}    route add ${IPv4_4}/32  dev veth3 ${VRF}
216	ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF}
217	ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF}
218	# bottom route
219	ip -netns ${NS2}    route add ${IPv4_5}/32  dev veth6 ${VRF}
220	ip -netns ${NS2}    route add ${IPv4_8}/32  dev veth7 ${VRF}
221	ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF}
222	ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF}
223
224	# NS3
225	# top route
226	ip -netns ${NS3}    route add ${IPv4_3}/32  dev veth4
227	ip -netns ${NS3}    route add ${IPv4_1}/32  dev veth4 via ${IPv4_3}
228	ip -netns ${NS3}    route add ${IPv4_2}/32  dev veth4 via ${IPv4_3}
229	ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
230	ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
231	ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
232	# bottom route
233	ip -netns ${NS3}    route add ${IPv4_7}/32  dev veth8
234	ip -netns ${NS3}    route add ${IPv4_5}/32  dev veth8 via ${IPv4_7}
235	ip -netns ${NS3}    route add ${IPv4_6}/32  dev veth8 via ${IPv4_7}
236	ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
237	ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
238	ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
239
240	# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
241	ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
242	ip -netns ${NS3} link set gre_dev up
243	ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
244	ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF}
245	ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF}
246
247
248	# configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
249	ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
250	ip -netns ${NS3} link set gre6_dev up
251	ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
252	ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
253	ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
254
255	TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
256
257	sleep 1  # reduce flakiness
258	set +e
259}
260
261cleanup()
262{
263	if [ -f ${TMPFILE} ] ; then
264		rm ${TMPFILE}
265	fi
266
267	ip netns del ${NS1} 2> /dev/null
268	ip netns del ${NS2} 2> /dev/null
269	ip netns del ${NS3} 2> /dev/null
270}
271
272trap cleanup EXIT
273
274remove_routes_to_gredev()
275{
276	ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF}
277	ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF}
278	ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF}
279	ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF}
280}
281
282add_unreachable_routes_to_gredev()
283{
284	ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF}
285	ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF}
286	ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
287	ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
288}
289
290test_ping()
291{
292	local readonly PROTO=$1
293	local readonly EXPECTED=$2
294	local RET=0
295
296	if [ "${PROTO}" == "IPv4" ] ; then
297		ip netns exec ${NS1} ping  -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
298		RET=$?
299	elif [ "${PROTO}" == "IPv6" ] ; then
300		ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
301		RET=$?
302	else
303		echo "    test_ping: unknown PROTO: ${PROTO}"
304		TEST_STATUS=1
305	fi
306
307	if [ "0" != "${RET}" ]; then
308		RET=1
309	fi
310
311	if [ "${EXPECTED}" != "${RET}" ] ; then
312		echo "    test_ping failed: expected: ${EXPECTED}; got ${RET}"
313		TEST_STATUS=1
314	fi
315}
316
317test_gso()
318{
319	local readonly PROTO=$1
320	local readonly PKT_SZ=5000
321	local IP_DST=""
322	: > ${TMPFILE}  # trim the capture file
323
324	# check that nc is present
325	command -v nc >/dev/null 2>&1 || \
326		{ echo >&2 "nc is not available: skipping TSO tests"; return; }
327
328	# listen on port 9000, capture TCP into $TMPFILE
329	if [ "${PROTO}" == "IPv4" ] ; then
330		IP_DST=${IPv4_DST}
331		ip netns exec ${NS3} bash -c \
332			"nc -4 -l -p 9000 > ${TMPFILE} &"
333	elif [ "${PROTO}" == "IPv6" ] ; then
334		IP_DST=${IPv6_DST}
335		ip netns exec ${NS3} bash -c \
336			"nc -6 -l -p 9000 > ${TMPFILE} &"
337		RET=$?
338	else
339		echo "    test_gso: unknown PROTO: ${PROTO}"
340		TEST_STATUS=1
341	fi
342	sleep 1  # let nc start listening
343
344	# send a packet larger than MTU
345	ip netns exec ${NS1} bash -c \
346		"dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
347	sleep 2 # let the packet get delivered
348
349	# verify we received all expected bytes
350	SZ=$(stat -c %s ${TMPFILE})
351	if [ "$SZ" != "$PKT_SZ" ] ; then
352		echo "    test_gso failed: ${PROTO}"
353		TEST_STATUS=1
354	fi
355}
356
357test_egress()
358{
359	local readonly ENCAP=$1
360	echo "starting egress ${ENCAP} encap test ${VRF}"
361	setup
362
363	# by default, pings work
364	test_ping IPv4 0
365	test_ping IPv6 0
366
367	# remove NS2->DST routes, ping fails
368	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3 ${VRF}
369	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
370	test_ping IPv4 1
371	test_ping IPv6 1
372
373	# install replacement routes (LWT/eBPF), pings succeed
374	if [ "${ENCAP}" == "IPv4" ] ; then
375		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
376			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
377		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
378			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
379	elif [ "${ENCAP}" == "IPv6" ] ; then
380		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
381			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
382		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
383			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
384	else
385		echo "    unknown encap ${ENCAP}"
386		TEST_STATUS=1
387	fi
388	test_ping IPv4 0
389	test_ping IPv6 0
390
391	# skip GSO tests with VRF: VRF routing needs properly assigned
392	# source IP/device, which is easy to do with ping and hard with dd/nc.
393	if [ -z "${VRF}" ] ; then
394		test_gso IPv4
395		test_gso IPv6
396	fi
397
398	# a negative test: remove routes to GRE devices: ping fails
399	remove_routes_to_gredev
400	test_ping IPv4 1
401	test_ping IPv6 1
402
403	# another negative test
404	add_unreachable_routes_to_gredev
405	test_ping IPv4 1
406	test_ping IPv6 1
407
408	cleanup
409	process_test_results
410}
411
412test_ingress()
413{
414	local readonly ENCAP=$1
415	echo "starting ingress ${ENCAP} encap test ${VRF}"
416	setup
417
418	# need to wait a bit for IPv6 to autoconf, otherwise
419	# ping6 sometimes fails with "unable to bind to address"
420
421	# by default, pings work
422	test_ping IPv4 0
423	test_ping IPv6 0
424
425	# remove NS2->DST routes, pings fail
426	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3 ${VRF}
427	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
428	test_ping IPv4 1
429	test_ping IPv6 1
430
431	# install replacement routes (LWT/eBPF), pings succeed
432	if [ "${ENCAP}" == "IPv4" ] ; then
433		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
434			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
435		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
436			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
437	elif [ "${ENCAP}" == "IPv6" ] ; then
438		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
439			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
440		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
441			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
442	else
443		echo "FAIL: unknown encap ${ENCAP}"
444		TEST_STATUS=1
445	fi
446	test_ping IPv4 0
447	test_ping IPv6 0
448
449	# a negative test: remove routes to GRE devices: ping fails
450	remove_routes_to_gredev
451	test_ping IPv4 1
452	test_ping IPv6 1
453
454	# another negative test
455	add_unreachable_routes_to_gredev
456	test_ping IPv4 1
457	test_ping IPv6 1
458
459	cleanup
460	process_test_results
461}
462
463VRF=""
464test_egress IPv4
465test_egress IPv6
466test_ingress IPv4
467test_ingress IPv6
468
469VRF="vrf red"
470test_egress IPv4
471test_egress IPv6
472test_ingress IPv4
473test_ingress IPv6
474
475print_test_summary_and_exit
476