1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <error.h>
4 #include <linux/if.h>
5 #include <linux/if_tun.h>
6 #include <sys/uio.h>
7 
8 #define CHECK_FLOW_KEYS(desc, got, expected)				\
9 	CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,		\
10 	      desc,							\
11 	      "nhoff=%u/%u "						\
12 	      "thoff=%u/%u "						\
13 	      "addr_proto=0x%x/0x%x "					\
14 	      "is_frag=%u/%u "						\
15 	      "is_first_frag=%u/%u "					\
16 	      "is_encap=%u/%u "						\
17 	      "ip_proto=0x%x/0x%x "					\
18 	      "n_proto=0x%x/0x%x "					\
19 	      "sport=%u/%u "						\
20 	      "dport=%u/%u\n",						\
21 	      got.nhoff, expected.nhoff,				\
22 	      got.thoff, expected.thoff,				\
23 	      got.addr_proto, expected.addr_proto,			\
24 	      got.is_frag, expected.is_frag,				\
25 	      got.is_first_frag, expected.is_first_frag,		\
26 	      got.is_encap, expected.is_encap,				\
27 	      got.ip_proto, expected.ip_proto,				\
28 	      got.n_proto, expected.n_proto,				\
29 	      got.sport, expected.sport,				\
30 	      got.dport, expected.dport)
31 
32 struct ipv4_pkt {
33 	struct ethhdr eth;
34 	struct iphdr iph;
35 	struct tcphdr tcp;
36 } __packed;
37 
38 struct svlan_ipv4_pkt {
39 	struct ethhdr eth;
40 	__u16 vlan_tci;
41 	__u16 vlan_proto;
42 	struct iphdr iph;
43 	struct tcphdr tcp;
44 } __packed;
45 
46 struct ipv6_pkt {
47 	struct ethhdr eth;
48 	struct ipv6hdr iph;
49 	struct tcphdr tcp;
50 } __packed;
51 
52 struct dvlan_ipv6_pkt {
53 	struct ethhdr eth;
54 	__u16 vlan_tci;
55 	__u16 vlan_proto;
56 	__u16 vlan_tci2;
57 	__u16 vlan_proto2;
58 	struct ipv6hdr iph;
59 	struct tcphdr tcp;
60 } __packed;
61 
62 struct test {
63 	const char *name;
64 	union {
65 		struct ipv4_pkt ipv4;
66 		struct svlan_ipv4_pkt svlan_ipv4;
67 		struct ipv6_pkt ipv6;
68 		struct dvlan_ipv6_pkt dvlan_ipv6;
69 	} pkt;
70 	struct bpf_flow_keys keys;
71 };
72 
73 #define VLAN_HLEN	4
74 
75 struct test tests[] = {
76 	{
77 		.name = "ipv4",
78 		.pkt.ipv4 = {
79 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
80 			.iph.ihl = 5,
81 			.iph.protocol = IPPROTO_TCP,
82 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
83 			.tcp.doff = 5,
84 		},
85 		.keys = {
86 			.nhoff = ETH_HLEN,
87 			.thoff = ETH_HLEN + sizeof(struct iphdr),
88 			.addr_proto = ETH_P_IP,
89 			.ip_proto = IPPROTO_TCP,
90 			.n_proto = __bpf_constant_htons(ETH_P_IP),
91 		},
92 	},
93 	{
94 		.name = "ipv6",
95 		.pkt.ipv6 = {
96 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
97 			.iph.nexthdr = IPPROTO_TCP,
98 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
99 			.tcp.doff = 5,
100 		},
101 		.keys = {
102 			.nhoff = ETH_HLEN,
103 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
104 			.addr_proto = ETH_P_IPV6,
105 			.ip_proto = IPPROTO_TCP,
106 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
107 		},
108 	},
109 	{
110 		.name = "802.1q-ipv4",
111 		.pkt.svlan_ipv4 = {
112 			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
113 			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
114 			.iph.ihl = 5,
115 			.iph.protocol = IPPROTO_TCP,
116 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
117 			.tcp.doff = 5,
118 		},
119 		.keys = {
120 			.nhoff = ETH_HLEN + VLAN_HLEN,
121 			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
122 			.addr_proto = ETH_P_IP,
123 			.ip_proto = IPPROTO_TCP,
124 			.n_proto = __bpf_constant_htons(ETH_P_IP),
125 		},
126 	},
127 	{
128 		.name = "802.1ad-ipv6",
129 		.pkt.dvlan_ipv6 = {
130 			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
131 			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
132 			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
133 			.iph.nexthdr = IPPROTO_TCP,
134 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
135 			.tcp.doff = 5,
136 		},
137 		.keys = {
138 			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
139 			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
140 				sizeof(struct ipv6hdr),
141 			.addr_proto = ETH_P_IPV6,
142 			.ip_proto = IPPROTO_TCP,
143 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
144 		},
145 	},
146 };
147 
148 static int create_tap(const char *ifname)
149 {
150 	struct ifreq ifr = {
151 		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
152 	};
153 	int fd, ret;
154 
155 	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
156 
157 	fd = open("/dev/net/tun", O_RDWR);
158 	if (fd < 0)
159 		return -1;
160 
161 	ret = ioctl(fd, TUNSETIFF, &ifr);
162 	if (ret)
163 		return -1;
164 
165 	return fd;
166 }
167 
168 static int tx_tap(int fd, void *pkt, size_t len)
169 {
170 	struct iovec iov[] = {
171 		{
172 			.iov_len = len,
173 			.iov_base = pkt,
174 		},
175 	};
176 	return writev(fd, iov, ARRAY_SIZE(iov));
177 }
178 
179 static int ifup(const char *ifname)
180 {
181 	struct ifreq ifr = {};
182 	int sk, ret;
183 
184 	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
185 
186 	sk = socket(PF_INET, SOCK_DGRAM, 0);
187 	if (sk < 0)
188 		return -1;
189 
190 	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
191 	if (ret) {
192 		close(sk);
193 		return -1;
194 	}
195 
196 	ifr.ifr_flags |= IFF_UP;
197 	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
198 	if (ret) {
199 		close(sk);
200 		return -1;
201 	}
202 
203 	close(sk);
204 	return 0;
205 }
206 
207 void test_flow_dissector(void)
208 {
209 	int i, err, prog_fd, keys_fd = -1, tap_fd;
210 	struct bpf_object *obj;
211 	__u32 duration = 0;
212 
213 	err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
214 			    "jmp_table", "last_dissection", &prog_fd, &keys_fd);
215 	if (err) {
216 		error_cnt++;
217 		return;
218 	}
219 
220 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
221 		struct bpf_flow_keys flow_keys;
222 		struct bpf_prog_test_run_attr tattr = {
223 			.prog_fd = prog_fd,
224 			.data_in = &tests[i].pkt,
225 			.data_size_in = sizeof(tests[i].pkt),
226 			.data_out = &flow_keys,
227 		};
228 
229 		err = bpf_prog_test_run_xattr(&tattr);
230 		CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
231 			   err || tattr.retval != 1,
232 			   tests[i].name,
233 			   "err %d errno %d retval %d duration %d size %u/%lu\n",
234 			   err, errno, tattr.retval, tattr.duration,
235 			   tattr.data_size_out, sizeof(flow_keys));
236 		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
237 	}
238 
239 	/* Do the same tests but for skb-less flow dissector.
240 	 * We use a known path in the net/tun driver that calls
241 	 * eth_get_headlen and we manually export bpf_flow_keys
242 	 * via BPF map in this case.
243 	 */
244 
245 	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
246 	CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
247 
248 	tap_fd = create_tap("tap0");
249 	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
250 	err = ifup("tap0");
251 	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
252 
253 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
254 		struct bpf_flow_keys flow_keys = {};
255 		struct bpf_prog_test_run_attr tattr = {};
256 		__u32 key = 0;
257 
258 		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
259 		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
260 
261 		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
262 		CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
263 
264 		CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
265 		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
266 	}
267 
268 	bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
269 	bpf_object__close(obj);
270 }
271