1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 
4 #include "bpf_tracing_net.h"
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_endian.h>
7 
8 #ifndef ENOENT
9 #define ENOENT 2
10 #endif
11 
12 struct sockaddr_in6 srv_sa6 = {};
13 __u16 listen_tp_sport = 0;
14 __u16 req_sk_sport = 0;
15 __u32 recv_cookie = 0;
16 __u32 gen_cookie = 0;
17 __u32 linum = 0;
18 
19 #define LOG() ({ if (!linum) linum = __LINE__; })
20 
test_syncookie_helper(struct ipv6hdr * ip6h,struct tcphdr * th,struct tcp_sock * tp,struct __sk_buff * skb)21 static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
22 				  struct tcp_sock *tp,
23 				  struct __sk_buff *skb)
24 {
25 	if (th->syn) {
26 		__s64 mss_cookie;
27 		void *data_end;
28 
29 		data_end = (void *)(long)(skb->data_end);
30 
31 		if (th->doff * 4 != 40) {
32 			LOG();
33 			return;
34 		}
35 
36 		if ((void *)th + 40 > data_end) {
37 			LOG();
38 			return;
39 		}
40 
41 		mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
42 						   th, 40);
43 		if (mss_cookie < 0) {
44 			if (mss_cookie != -ENOENT)
45 				LOG();
46 		} else {
47 			gen_cookie = (__u32)mss_cookie;
48 		}
49 	} else if (gen_cookie) {
50 		/* It was in cookie mode */
51 		int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
52 						  th, sizeof(*th));
53 
54 		if (ret < 0) {
55 			if (ret != -ENOENT)
56 				LOG();
57 		} else {
58 			recv_cookie = bpf_ntohl(th->ack_seq) - 1;
59 		}
60 	}
61 }
62 
handle_ip6_tcp(struct ipv6hdr * ip6h,struct __sk_buff * skb)63 static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
64 {
65 	struct bpf_sock_tuple *tuple;
66 	struct bpf_sock *bpf_skc;
67 	unsigned int tuple_len;
68 	struct tcphdr *th;
69 	void *data_end;
70 
71 	data_end = (void *)(long)(skb->data_end);
72 
73 	th = (struct tcphdr *)(ip6h + 1);
74 	if (th + 1 > data_end)
75 		return TC_ACT_OK;
76 
77 	/* Is it the testing traffic? */
78 	if (th->dest != srv_sa6.sin6_port)
79 		return TC_ACT_OK;
80 
81 	tuple_len = sizeof(tuple->ipv6);
82 	tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
83 	if ((void *)tuple + tuple_len > data_end) {
84 		LOG();
85 		return TC_ACT_OK;
86 	}
87 
88 	bpf_skc = bpf_skc_lookup_tcp(skb, tuple, tuple_len,
89 				     BPF_F_CURRENT_NETNS, 0);
90 	if (!bpf_skc) {
91 		LOG();
92 		return TC_ACT_OK;
93 	}
94 
95 	if (bpf_skc->state == BPF_TCP_NEW_SYN_RECV) {
96 		struct request_sock *req_sk;
97 
98 		req_sk = (struct request_sock *)bpf_skc_to_tcp_request_sock(bpf_skc);
99 		if (!req_sk) {
100 			LOG();
101 			goto release;
102 		}
103 
104 		if (bpf_sk_assign(skb, req_sk, 0)) {
105 			LOG();
106 			goto release;
107 		}
108 
109 		req_sk_sport = req_sk->__req_common.skc_num;
110 
111 		bpf_sk_release(req_sk);
112 		return TC_ACT_OK;
113 	} else if (bpf_skc->state == BPF_TCP_LISTEN) {
114 		struct tcp_sock *tp;
115 
116 		tp = bpf_skc_to_tcp_sock(bpf_skc);
117 		if (!tp) {
118 			LOG();
119 			goto release;
120 		}
121 
122 		if (bpf_sk_assign(skb, tp, 0)) {
123 			LOG();
124 			goto release;
125 		}
126 
127 		listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
128 
129 		test_syncookie_helper(ip6h, th, tp, skb);
130 		bpf_sk_release(tp);
131 		return TC_ACT_OK;
132 	}
133 
134 	if (bpf_sk_assign(skb, bpf_skc, 0))
135 		LOG();
136 
137 release:
138 	bpf_sk_release(bpf_skc);
139 	return TC_ACT_OK;
140 }
141 
142 SEC("tc")
cls_ingress(struct __sk_buff * skb)143 int cls_ingress(struct __sk_buff *skb)
144 {
145 	struct ipv6hdr *ip6h;
146 	struct ethhdr *eth;
147 	void *data_end;
148 
149 	data_end = (void *)(long)(skb->data_end);
150 
151 	eth = (struct ethhdr *)(long)(skb->data);
152 	if (eth + 1 > data_end)
153 		return TC_ACT_OK;
154 
155 	if (eth->h_proto != bpf_htons(ETH_P_IPV6))
156 		return TC_ACT_OK;
157 
158 	ip6h = (struct ipv6hdr *)(eth + 1);
159 	if (ip6h + 1 > data_end)
160 		return TC_ACT_OK;
161 
162 	if (ip6h->nexthdr == IPPROTO_TCP)
163 		return handle_ip6_tcp(ip6h, skb);
164 
165 	return TC_ACT_OK;
166 }
167 
168 char _license[] SEC("license") = "GPL";
169