1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/types.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
10 #include <net/tcp.h>
11 #include <net/bpf_sk_storage.h>
12
13 static u32 optional_ops[] = {
14 offsetof(struct tcp_congestion_ops, init),
15 offsetof(struct tcp_congestion_ops, release),
16 offsetof(struct tcp_congestion_ops, set_state),
17 offsetof(struct tcp_congestion_ops, cwnd_event),
18 offsetof(struct tcp_congestion_ops, in_ack_event),
19 offsetof(struct tcp_congestion_ops, pkts_acked),
20 offsetof(struct tcp_congestion_ops, min_tso_segs),
21 offsetof(struct tcp_congestion_ops, sndbuf_expand),
22 offsetof(struct tcp_congestion_ops, cong_control),
23 };
24
25 static u32 unsupported_ops[] = {
26 offsetof(struct tcp_congestion_ops, get_info),
27 };
28
29 static const struct btf_type *tcp_sock_type;
30 static u32 tcp_sock_id, sock_id;
31
bpf_tcp_ca_init(struct btf * btf)32 static int bpf_tcp_ca_init(struct btf *btf)
33 {
34 s32 type_id;
35
36 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
37 if (type_id < 0)
38 return -EINVAL;
39 sock_id = type_id;
40
41 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
42 if (type_id < 0)
43 return -EINVAL;
44 tcp_sock_id = type_id;
45 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
46
47 return 0;
48 }
49
is_optional(u32 member_offset)50 static bool is_optional(u32 member_offset)
51 {
52 unsigned int i;
53
54 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
55 if (member_offset == optional_ops[i])
56 return true;
57 }
58
59 return false;
60 }
61
is_unsupported(u32 member_offset)62 static bool is_unsupported(u32 member_offset)
63 {
64 unsigned int i;
65
66 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
67 if (member_offset == unsupported_ops[i])
68 return true;
69 }
70
71 return false;
72 }
73
74 extern struct btf *btf_vmlinux;
75
bpf_tcp_ca_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)76 static bool bpf_tcp_ca_is_valid_access(int off, int size,
77 enum bpf_access_type type,
78 const struct bpf_prog *prog,
79 struct bpf_insn_access_aux *info)
80 {
81 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
82 return false;
83 if (type != BPF_READ)
84 return false;
85 if (off % size != 0)
86 return false;
87
88 if (!btf_ctx_access(off, size, type, prog, info))
89 return false;
90
91 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
92 /* promote it to tcp_sock */
93 info->btf_id = tcp_sock_id;
94
95 return true;
96 }
97
bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,enum bpf_access_type atype,u32 * next_btf_id)98 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
99 const struct btf *btf,
100 const struct btf_type *t, int off,
101 int size, enum bpf_access_type atype,
102 u32 *next_btf_id)
103 {
104 size_t end;
105
106 if (atype == BPF_READ)
107 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
108
109 if (t != tcp_sock_type) {
110 bpf_log(log, "only read is supported\n");
111 return -EACCES;
112 }
113
114 switch (off) {
115 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
116 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
117 break;
118 case offsetof(struct inet_connection_sock, icsk_ack.pending):
119 end = offsetofend(struct inet_connection_sock,
120 icsk_ack.pending);
121 break;
122 case offsetof(struct tcp_sock, snd_cwnd):
123 end = offsetofend(struct tcp_sock, snd_cwnd);
124 break;
125 case offsetof(struct tcp_sock, snd_cwnd_cnt):
126 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
127 break;
128 case offsetof(struct tcp_sock, snd_ssthresh):
129 end = offsetofend(struct tcp_sock, snd_ssthresh);
130 break;
131 case offsetof(struct tcp_sock, ecn_flags):
132 end = offsetofend(struct tcp_sock, ecn_flags);
133 break;
134 default:
135 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
136 return -EACCES;
137 }
138
139 if (off + size > end) {
140 bpf_log(log,
141 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
142 off, size, end);
143 return -EACCES;
144 }
145
146 return NOT_INIT;
147 }
148
BPF_CALL_2(bpf_tcp_send_ack,struct tcp_sock *,tp,u32,rcv_nxt)149 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
150 {
151 /* bpf_tcp_ca prog cannot have NULL tp */
152 __tcp_send_ack((struct sock *)tp, rcv_nxt);
153 return 0;
154 }
155
156 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
157 .func = bpf_tcp_send_ack,
158 .gpl_only = false,
159 /* In case we want to report error later */
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_BTF_ID,
162 .arg1_btf_id = &tcp_sock_id,
163 .arg2_type = ARG_ANYTHING,
164 };
165
166 static const struct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)167 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
168 const struct bpf_prog *prog)
169 {
170 switch (func_id) {
171 case BPF_FUNC_tcp_send_ack:
172 return &bpf_tcp_send_ack_proto;
173 case BPF_FUNC_sk_storage_get:
174 return &bpf_sk_storage_get_proto;
175 case BPF_FUNC_sk_storage_delete:
176 return &bpf_sk_storage_delete_proto;
177 default:
178 return bpf_base_func_proto(func_id);
179 }
180 }
181
182 BTF_SET_START(bpf_tcp_ca_kfunc_ids)
BTF_ID(func,tcp_reno_ssthresh)183 BTF_ID(func, tcp_reno_ssthresh)
184 BTF_ID(func, tcp_reno_cong_avoid)
185 BTF_ID(func, tcp_reno_undo_cwnd)
186 BTF_ID(func, tcp_slow_start)
187 BTF_ID(func, tcp_cong_avoid_ai)
188 #ifdef CONFIG_DYNAMIC_FTRACE
189 #if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
190 BTF_ID(func, cubictcp_init)
191 BTF_ID(func, cubictcp_recalc_ssthresh)
192 BTF_ID(func, cubictcp_cong_avoid)
193 BTF_ID(func, cubictcp_state)
194 BTF_ID(func, cubictcp_cwnd_event)
195 BTF_ID(func, cubictcp_acked)
196 #endif
197 #if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
198 BTF_ID(func, dctcp_init)
199 BTF_ID(func, dctcp_update_alpha)
200 BTF_ID(func, dctcp_cwnd_event)
201 BTF_ID(func, dctcp_ssthresh)
202 BTF_ID(func, dctcp_cwnd_undo)
203 BTF_ID(func, dctcp_state)
204 #endif
205 #if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
206 BTF_ID(func, bbr_init)
207 BTF_ID(func, bbr_main)
208 BTF_ID(func, bbr_sndbuf_expand)
209 BTF_ID(func, bbr_undo_cwnd)
210 BTF_ID(func, bbr_cwnd_event)
211 BTF_ID(func, bbr_ssthresh)
212 BTF_ID(func, bbr_min_tso_segs)
213 BTF_ID(func, bbr_set_state)
214 #endif
215 #endif /* CONFIG_DYNAMIC_FTRACE */
216 BTF_SET_END(bpf_tcp_ca_kfunc_ids)
217
218 static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
219 {
220 return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
221 }
222
223 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
224 .get_func_proto = bpf_tcp_ca_get_func_proto,
225 .is_valid_access = bpf_tcp_ca_is_valid_access,
226 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
227 .check_kfunc_call = bpf_tcp_ca_check_kfunc_call,
228 };
229
bpf_tcp_ca_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)230 static int bpf_tcp_ca_init_member(const struct btf_type *t,
231 const struct btf_member *member,
232 void *kdata, const void *udata)
233 {
234 const struct tcp_congestion_ops *utcp_ca;
235 struct tcp_congestion_ops *tcp_ca;
236 int prog_fd;
237 u32 moff;
238
239 utcp_ca = (const struct tcp_congestion_ops *)udata;
240 tcp_ca = (struct tcp_congestion_ops *)kdata;
241
242 moff = btf_member_bit_offset(t, member) / 8;
243 switch (moff) {
244 case offsetof(struct tcp_congestion_ops, flags):
245 if (utcp_ca->flags & ~TCP_CONG_MASK)
246 return -EINVAL;
247 tcp_ca->flags = utcp_ca->flags;
248 return 1;
249 case offsetof(struct tcp_congestion_ops, name):
250 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
251 sizeof(tcp_ca->name)) <= 0)
252 return -EINVAL;
253 if (tcp_ca_find(utcp_ca->name))
254 return -EEXIST;
255 return 1;
256 }
257
258 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
259 return 0;
260
261 /* Ensure bpf_prog is provided for compulsory func ptr */
262 prog_fd = (int)(*(unsigned long *)(udata + moff));
263 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
264 return -EINVAL;
265
266 return 0;
267 }
268
bpf_tcp_ca_check_member(const struct btf_type * t,const struct btf_member * member)269 static int bpf_tcp_ca_check_member(const struct btf_type *t,
270 const struct btf_member *member)
271 {
272 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
273 return -ENOTSUPP;
274 return 0;
275 }
276
bpf_tcp_ca_reg(void * kdata)277 static int bpf_tcp_ca_reg(void *kdata)
278 {
279 return tcp_register_congestion_control(kdata);
280 }
281
bpf_tcp_ca_unreg(void * kdata)282 static void bpf_tcp_ca_unreg(void *kdata)
283 {
284 tcp_unregister_congestion_control(kdata);
285 }
286
287 /* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
288 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
289
290 struct bpf_struct_ops bpf_tcp_congestion_ops = {
291 .verifier_ops = &bpf_tcp_ca_verifier_ops,
292 .reg = bpf_tcp_ca_reg,
293 .unreg = bpf_tcp_ca_unreg,
294 .check_member = bpf_tcp_ca_check_member,
295 .init_member = bpf_tcp_ca_init_member,
296 .init = bpf_tcp_ca_init,
297 .name = "tcp_congestion_ops",
298 };
299