1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <linux/atomic.h> 21 #include <linux/bpf_verifier.h> 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/mm.h> 25 #include <linux/fcntl.h> 26 #include <linux/socket.h> 27 #include <linux/sock_diag.h> 28 #include <linux/in.h> 29 #include <linux/inet.h> 30 #include <linux/netdevice.h> 31 #include <linux/if_packet.h> 32 #include <linux/if_arp.h> 33 #include <linux/gfp.h> 34 #include <net/inet_common.h> 35 #include <net/ip.h> 36 #include <net/protocol.h> 37 #include <net/netlink.h> 38 #include <linux/skbuff.h> 39 #include <linux/skmsg.h> 40 #include <net/sock.h> 41 #include <net/flow_dissector.h> 42 #include <linux/errno.h> 43 #include <linux/timer.h> 44 #include <linux/uaccess.h> 45 #include <asm/unaligned.h> 46 #include <linux/filter.h> 47 #include <linux/ratelimit.h> 48 #include <linux/seccomp.h> 49 #include <linux/if_vlan.h> 50 #include <linux/bpf.h> 51 #include <linux/btf.h> 52 #include <net/sch_generic.h> 53 #include <net/cls_cgroup.h> 54 #include <net/dst_metadata.h> 55 #include <net/dst.h> 56 #include <net/sock_reuseport.h> 57 #include <net/busy_poll.h> 58 #include <net/tcp.h> 59 #include <net/xfrm.h> 60 #include <net/udp.h> 61 #include <linux/bpf_trace.h> 62 #include <net/xdp_sock.h> 63 #include <linux/inetdevice.h> 64 #include <net/inet_hashtables.h> 65 #include <net/inet6_hashtables.h> 66 #include <net/ip_fib.h> 67 #include <net/nexthop.h> 68 #include <net/flow.h> 69 #include <net/arp.h> 70 #include <net/ipv6.h> 71 #include <net/net_namespace.h> 72 #include <linux/seg6_local.h> 73 #include <net/seg6.h> 74 #include <net/seg6_local.h> 75 #include <net/lwtunnel.h> 76 #include <net/ipv6_stubs.h> 77 #include <net/bpf_sk_storage.h> 78 #include <net/transp_v6.h> 79 #include <linux/btf_ids.h> 80 #include <net/tls.h> 81 #include <net/xdp.h> 82 #include <net/mptcp.h> 83 84 static const struct bpf_func_proto * 85 bpf_sk_base_func_proto(enum bpf_func_id func_id); 86 87 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) 88 { 89 if (in_compat_syscall()) { 90 struct compat_sock_fprog f32; 91 92 if (len != sizeof(f32)) 93 return -EINVAL; 94 if (copy_from_sockptr(&f32, src, sizeof(f32))) 95 return -EFAULT; 96 memset(dst, 0, sizeof(*dst)); 97 dst->len = f32.len; 98 dst->filter = compat_ptr(f32.filter); 99 } else { 100 if (len != sizeof(*dst)) 101 return -EINVAL; 102 if (copy_from_sockptr(dst, src, sizeof(*dst))) 103 return -EFAULT; 104 } 105 106 return 0; 107 } 108 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); 109 110 /** 111 * sk_filter_trim_cap - run a packet through a socket filter 112 * @sk: sock associated with &sk_buff 113 * @skb: buffer to filter 114 * @cap: limit on how short the eBPF program may trim the packet 115 * 116 * Run the eBPF program and then cut skb->data to correct size returned by 117 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller 118 * than pkt_len we keep whole skb->data. This is the socket level 119 * wrapper to bpf_prog_run. It returns 0 if the packet should 120 * be accepted or -EPERM if the packet should be tossed. 121 * 122 */ 123 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) 124 { 125 int err; 126 struct sk_filter *filter; 127 128 /* 129 * If the skb was allocated from pfmemalloc reserves, only 130 * allow SOCK_MEMALLOC sockets to use it as this socket is 131 * helping free memory 132 */ 133 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { 134 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); 135 return -ENOMEM; 136 } 137 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); 138 if (err) 139 return err; 140 141 err = security_sock_rcv_skb(sk, skb); 142 if (err) 143 return err; 144 145 rcu_read_lock(); 146 filter = rcu_dereference(sk->sk_filter); 147 if (filter) { 148 struct sock *save_sk = skb->sk; 149 unsigned int pkt_len; 150 151 skb->sk = sk; 152 pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 153 skb->sk = save_sk; 154 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; 155 } 156 rcu_read_unlock(); 157 158 return err; 159 } 160 EXPORT_SYMBOL(sk_filter_trim_cap); 161 162 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) 163 { 164 return skb_get_poff(skb); 165 } 166 167 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) 168 { 169 struct nlattr *nla; 170 171 if (skb_is_nonlinear(skb)) 172 return 0; 173 174 if (skb->len < sizeof(struct nlattr)) 175 return 0; 176 177 if (a > skb->len - sizeof(struct nlattr)) 178 return 0; 179 180 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); 181 if (nla) 182 return (void *) nla - (void *) skb->data; 183 184 return 0; 185 } 186 187 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) 188 { 189 struct nlattr *nla; 190 191 if (skb_is_nonlinear(skb)) 192 return 0; 193 194 if (skb->len < sizeof(struct nlattr)) 195 return 0; 196 197 if (a > skb->len - sizeof(struct nlattr)) 198 return 0; 199 200 nla = (struct nlattr *) &skb->data[a]; 201 if (nla->nla_len > skb->len - a) 202 return 0; 203 204 nla = nla_find_nested(nla, x); 205 if (nla) 206 return (void *) nla - (void *) skb->data; 207 208 return 0; 209 } 210 211 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, 212 data, int, headlen, int, offset) 213 { 214 u8 tmp, *ptr; 215 const int len = sizeof(tmp); 216 217 if (offset >= 0) { 218 if (headlen - offset >= len) 219 return *(u8 *)(data + offset); 220 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 221 return tmp; 222 } else { 223 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 224 if (likely(ptr)) 225 return *(u8 *)ptr; 226 } 227 228 return -EFAULT; 229 } 230 231 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, 232 int, offset) 233 { 234 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, 235 offset); 236 } 237 238 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, 239 data, int, headlen, int, offset) 240 { 241 __be16 tmp, *ptr; 242 const int len = sizeof(tmp); 243 244 if (offset >= 0) { 245 if (headlen - offset >= len) 246 return get_unaligned_be16(data + offset); 247 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 248 return be16_to_cpu(tmp); 249 } else { 250 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 251 if (likely(ptr)) 252 return get_unaligned_be16(ptr); 253 } 254 255 return -EFAULT; 256 } 257 258 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, 259 int, offset) 260 { 261 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, 262 offset); 263 } 264 265 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, 266 data, int, headlen, int, offset) 267 { 268 __be32 tmp, *ptr; 269 const int len = sizeof(tmp); 270 271 if (likely(offset >= 0)) { 272 if (headlen - offset >= len) 273 return get_unaligned_be32(data + offset); 274 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 275 return be32_to_cpu(tmp); 276 } else { 277 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 278 if (likely(ptr)) 279 return get_unaligned_be32(ptr); 280 } 281 282 return -EFAULT; 283 } 284 285 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, 286 int, offset) 287 { 288 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, 289 offset); 290 } 291 292 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, 293 struct bpf_insn *insn_buf) 294 { 295 struct bpf_insn *insn = insn_buf; 296 297 switch (skb_field) { 298 case SKF_AD_MARK: 299 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 300 301 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, 302 offsetof(struct sk_buff, mark)); 303 break; 304 305 case SKF_AD_PKTTYPE: 306 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); 307 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); 308 #ifdef __BIG_ENDIAN_BITFIELD 309 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); 310 #endif 311 break; 312 313 case SKF_AD_QUEUE: 314 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); 315 316 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 317 offsetof(struct sk_buff, queue_mapping)); 318 break; 319 320 case SKF_AD_VLAN_TAG: 321 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 322 323 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ 324 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 325 offsetof(struct sk_buff, vlan_tci)); 326 break; 327 case SKF_AD_VLAN_TAG_PRESENT: 328 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); 329 if (PKT_VLAN_PRESENT_BIT) 330 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); 331 if (PKT_VLAN_PRESENT_BIT < 7) 332 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); 333 break; 334 } 335 336 return insn - insn_buf; 337 } 338 339 static bool convert_bpf_extensions(struct sock_filter *fp, 340 struct bpf_insn **insnp) 341 { 342 struct bpf_insn *insn = *insnp; 343 u32 cnt; 344 345 switch (fp->k) { 346 case SKF_AD_OFF + SKF_AD_PROTOCOL: 347 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); 348 349 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 350 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 351 offsetof(struct sk_buff, protocol)); 352 /* A = ntohs(A) [emitting a nop or swap16] */ 353 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 354 break; 355 356 case SKF_AD_OFF + SKF_AD_PKTTYPE: 357 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); 358 insn += cnt - 1; 359 break; 360 361 case SKF_AD_OFF + SKF_AD_IFINDEX: 362 case SKF_AD_OFF + SKF_AD_HATYPE: 363 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); 364 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); 365 366 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 367 BPF_REG_TMP, BPF_REG_CTX, 368 offsetof(struct sk_buff, dev)); 369 /* if (tmp != 0) goto pc + 1 */ 370 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); 371 *insn++ = BPF_EXIT_INSN(); 372 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) 373 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, 374 offsetof(struct net_device, ifindex)); 375 else 376 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, 377 offsetof(struct net_device, type)); 378 break; 379 380 case SKF_AD_OFF + SKF_AD_MARK: 381 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); 382 insn += cnt - 1; 383 break; 384 385 case SKF_AD_OFF + SKF_AD_RXHASH: 386 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 387 388 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 389 offsetof(struct sk_buff, hash)); 390 break; 391 392 case SKF_AD_OFF + SKF_AD_QUEUE: 393 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); 394 insn += cnt - 1; 395 break; 396 397 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 398 cnt = convert_skb_access(SKF_AD_VLAN_TAG, 399 BPF_REG_A, BPF_REG_CTX, insn); 400 insn += cnt - 1; 401 break; 402 403 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 404 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, 405 BPF_REG_A, BPF_REG_CTX, insn); 406 insn += cnt - 1; 407 break; 408 409 case SKF_AD_OFF + SKF_AD_VLAN_TPID: 410 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); 411 412 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ 413 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 414 offsetof(struct sk_buff, vlan_proto)); 415 /* A = ntohs(A) [emitting a nop or swap16] */ 416 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 417 break; 418 419 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 420 case SKF_AD_OFF + SKF_AD_NLATTR: 421 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 422 case SKF_AD_OFF + SKF_AD_CPU: 423 case SKF_AD_OFF + SKF_AD_RANDOM: 424 /* arg1 = CTX */ 425 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 426 /* arg2 = A */ 427 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 428 /* arg3 = X */ 429 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 430 /* Emit call(arg1=CTX, arg2=A, arg3=X) */ 431 switch (fp->k) { 432 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 433 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); 434 break; 435 case SKF_AD_OFF + SKF_AD_NLATTR: 436 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); 437 break; 438 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 439 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); 440 break; 441 case SKF_AD_OFF + SKF_AD_CPU: 442 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); 443 break; 444 case SKF_AD_OFF + SKF_AD_RANDOM: 445 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); 446 bpf_user_rnd_init_once(); 447 break; 448 } 449 break; 450 451 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 452 /* A ^= X */ 453 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); 454 break; 455 456 default: 457 /* This is just a dummy call to avoid letting the compiler 458 * evict __bpf_call_base() as an optimization. Placed here 459 * where no-one bothers. 460 */ 461 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); 462 return false; 463 } 464 465 *insnp = insn; 466 return true; 467 } 468 469 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) 470 { 471 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); 472 int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); 473 bool endian = BPF_SIZE(fp->code) == BPF_H || 474 BPF_SIZE(fp->code) == BPF_W; 475 bool indirect = BPF_MODE(fp->code) == BPF_IND; 476 const int ip_align = NET_IP_ALIGN; 477 struct bpf_insn *insn = *insnp; 478 int offset = fp->k; 479 480 if (!indirect && 481 ((unaligned_ok && offset >= 0) || 482 (!unaligned_ok && offset >= 0 && 483 offset + ip_align >= 0 && 484 offset + ip_align % size == 0))) { 485 bool ldx_off_ok = offset <= S16_MAX; 486 487 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); 488 if (offset) 489 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); 490 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, 491 size, 2 + endian + (!ldx_off_ok * 2)); 492 if (ldx_off_ok) { 493 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 494 BPF_REG_D, offset); 495 } else { 496 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); 497 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); 498 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 499 BPF_REG_TMP, 0); 500 } 501 if (endian) 502 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); 503 *insn++ = BPF_JMP_A(8); 504 } 505 506 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 507 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); 508 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); 509 if (!indirect) { 510 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); 511 } else { 512 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); 513 if (fp->k) 514 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); 515 } 516 517 switch (BPF_SIZE(fp->code)) { 518 case BPF_B: 519 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); 520 break; 521 case BPF_H: 522 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); 523 break; 524 case BPF_W: 525 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); 526 break; 527 default: 528 return false; 529 } 530 531 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); 532 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 533 *insn = BPF_EXIT_INSN(); 534 535 *insnp = insn; 536 return true; 537 } 538 539 /** 540 * bpf_convert_filter - convert filter program 541 * @prog: the user passed filter program 542 * @len: the length of the user passed filter program 543 * @new_prog: allocated 'struct bpf_prog' or NULL 544 * @new_len: pointer to store length of converted program 545 * @seen_ld_abs: bool whether we've seen ld_abs/ind 546 * 547 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' 548 * style extended BPF (eBPF). 549 * Conversion workflow: 550 * 551 * 1) First pass for calculating the new program length: 552 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) 553 * 554 * 2) 2nd pass to remap in two passes: 1st pass finds new 555 * jump offsets, 2nd pass remapping: 556 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) 557 */ 558 static int bpf_convert_filter(struct sock_filter *prog, int len, 559 struct bpf_prog *new_prog, int *new_len, 560 bool *seen_ld_abs) 561 { 562 int new_flen = 0, pass = 0, target, i, stack_off; 563 struct bpf_insn *new_insn, *first_insn = NULL; 564 struct sock_filter *fp; 565 int *addrs = NULL; 566 u8 bpf_src; 567 568 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 569 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 570 571 if (len <= 0 || len > BPF_MAXINSNS) 572 return -EINVAL; 573 574 if (new_prog) { 575 first_insn = new_prog->insnsi; 576 addrs = kcalloc(len, sizeof(*addrs), 577 GFP_KERNEL | __GFP_NOWARN); 578 if (!addrs) 579 return -ENOMEM; 580 } 581 582 do_pass: 583 new_insn = first_insn; 584 fp = prog; 585 586 /* Classic BPF related prologue emission. */ 587 if (new_prog) { 588 /* Classic BPF expects A and X to be reset first. These need 589 * to be guaranteed to be the first two instructions. 590 */ 591 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 592 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); 593 594 /* All programs must keep CTX in callee saved BPF_REG_CTX. 595 * In eBPF case it's done by the compiler, here we need to 596 * do this ourself. Initial CTX is present in BPF_REG_ARG1. 597 */ 598 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 599 if (*seen_ld_abs) { 600 /* For packet access in classic BPF, cache skb->data 601 * in callee-saved BPF R8 and skb->len - skb->data_len 602 * (headlen) in BPF R9. Since classic BPF is read-only 603 * on CTX, we only need to cache it once. 604 */ 605 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 606 BPF_REG_D, BPF_REG_CTX, 607 offsetof(struct sk_buff, data)); 608 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, 609 offsetof(struct sk_buff, len)); 610 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, 611 offsetof(struct sk_buff, data_len)); 612 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); 613 } 614 } else { 615 new_insn += 3; 616 } 617 618 for (i = 0; i < len; fp++, i++) { 619 struct bpf_insn tmp_insns[32] = { }; 620 struct bpf_insn *insn = tmp_insns; 621 622 if (addrs) 623 addrs[i] = new_insn - first_insn; 624 625 switch (fp->code) { 626 /* All arithmetic insns and skb loads map as-is. */ 627 case BPF_ALU | BPF_ADD | BPF_X: 628 case BPF_ALU | BPF_ADD | BPF_K: 629 case BPF_ALU | BPF_SUB | BPF_X: 630 case BPF_ALU | BPF_SUB | BPF_K: 631 case BPF_ALU | BPF_AND | BPF_X: 632 case BPF_ALU | BPF_AND | BPF_K: 633 case BPF_ALU | BPF_OR | BPF_X: 634 case BPF_ALU | BPF_OR | BPF_K: 635 case BPF_ALU | BPF_LSH | BPF_X: 636 case BPF_ALU | BPF_LSH | BPF_K: 637 case BPF_ALU | BPF_RSH | BPF_X: 638 case BPF_ALU | BPF_RSH | BPF_K: 639 case BPF_ALU | BPF_XOR | BPF_X: 640 case BPF_ALU | BPF_XOR | BPF_K: 641 case BPF_ALU | BPF_MUL | BPF_X: 642 case BPF_ALU | BPF_MUL | BPF_K: 643 case BPF_ALU | BPF_DIV | BPF_X: 644 case BPF_ALU | BPF_DIV | BPF_K: 645 case BPF_ALU | BPF_MOD | BPF_X: 646 case BPF_ALU | BPF_MOD | BPF_K: 647 case BPF_ALU | BPF_NEG: 648 case BPF_LD | BPF_ABS | BPF_W: 649 case BPF_LD | BPF_ABS | BPF_H: 650 case BPF_LD | BPF_ABS | BPF_B: 651 case BPF_LD | BPF_IND | BPF_W: 652 case BPF_LD | BPF_IND | BPF_H: 653 case BPF_LD | BPF_IND | BPF_B: 654 /* Check for overloaded BPF extension and 655 * directly convert it if found, otherwise 656 * just move on with mapping. 657 */ 658 if (BPF_CLASS(fp->code) == BPF_LD && 659 BPF_MODE(fp->code) == BPF_ABS && 660 convert_bpf_extensions(fp, &insn)) 661 break; 662 if (BPF_CLASS(fp->code) == BPF_LD && 663 convert_bpf_ld_abs(fp, &insn)) { 664 *seen_ld_abs = true; 665 break; 666 } 667 668 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || 669 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { 670 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); 671 /* Error with exception code on div/mod by 0. 672 * For cBPF programs, this was always return 0. 673 */ 674 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); 675 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 676 *insn++ = BPF_EXIT_INSN(); 677 } 678 679 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 680 break; 681 682 /* Jump transformation cannot use BPF block macros 683 * everywhere as offset calculation and target updates 684 * require a bit more work than the rest, i.e. jump 685 * opcodes map as-is, but offsets need adjustment. 686 */ 687 688 #define BPF_EMIT_JMP \ 689 do { \ 690 const s32 off_min = S16_MIN, off_max = S16_MAX; \ 691 s32 off; \ 692 \ 693 if (target >= len || target < 0) \ 694 goto err; \ 695 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 696 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 697 off -= insn - tmp_insns; \ 698 /* Reject anything not fitting into insn->off. */ \ 699 if (off < off_min || off > off_max) \ 700 goto err; \ 701 insn->off = off; \ 702 } while (0) 703 704 case BPF_JMP | BPF_JA: 705 target = i + fp->k + 1; 706 insn->code = fp->code; 707 BPF_EMIT_JMP; 708 break; 709 710 case BPF_JMP | BPF_JEQ | BPF_K: 711 case BPF_JMP | BPF_JEQ | BPF_X: 712 case BPF_JMP | BPF_JSET | BPF_K: 713 case BPF_JMP | BPF_JSET | BPF_X: 714 case BPF_JMP | BPF_JGT | BPF_K: 715 case BPF_JMP | BPF_JGT | BPF_X: 716 case BPF_JMP | BPF_JGE | BPF_K: 717 case BPF_JMP | BPF_JGE | BPF_X: 718 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 719 /* BPF immediates are signed, zero extend 720 * immediate into tmp register and use it 721 * in compare insn. 722 */ 723 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 724 725 insn->dst_reg = BPF_REG_A; 726 insn->src_reg = BPF_REG_TMP; 727 bpf_src = BPF_X; 728 } else { 729 insn->dst_reg = BPF_REG_A; 730 insn->imm = fp->k; 731 bpf_src = BPF_SRC(fp->code); 732 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 733 } 734 735 /* Common case where 'jump_false' is next insn. */ 736 if (fp->jf == 0) { 737 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 738 target = i + fp->jt + 1; 739 BPF_EMIT_JMP; 740 break; 741 } 742 743 /* Convert some jumps when 'jump_true' is next insn. */ 744 if (fp->jt == 0) { 745 switch (BPF_OP(fp->code)) { 746 case BPF_JEQ: 747 insn->code = BPF_JMP | BPF_JNE | bpf_src; 748 break; 749 case BPF_JGT: 750 insn->code = BPF_JMP | BPF_JLE | bpf_src; 751 break; 752 case BPF_JGE: 753 insn->code = BPF_JMP | BPF_JLT | bpf_src; 754 break; 755 default: 756 goto jmp_rest; 757 } 758 759 target = i + fp->jf + 1; 760 BPF_EMIT_JMP; 761 break; 762 } 763 jmp_rest: 764 /* Other jumps are mapped into two insns: Jxx and JA. */ 765 target = i + fp->jt + 1; 766 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 767 BPF_EMIT_JMP; 768 insn++; 769 770 insn->code = BPF_JMP | BPF_JA; 771 target = i + fp->jf + 1; 772 BPF_EMIT_JMP; 773 break; 774 775 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 776 case BPF_LDX | BPF_MSH | BPF_B: { 777 struct sock_filter tmp = { 778 .code = BPF_LD | BPF_ABS | BPF_B, 779 .k = fp->k, 780 }; 781 782 *seen_ld_abs = true; 783 784 /* X = A */ 785 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 786 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 787 convert_bpf_ld_abs(&tmp, &insn); 788 insn++; 789 /* A &= 0xf */ 790 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 791 /* A <<= 2 */ 792 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 793 /* tmp = X */ 794 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); 795 /* X = A */ 796 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 797 /* A = tmp */ 798 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 799 break; 800 } 801 /* RET_K is remaped into 2 insns. RET_A case doesn't need an 802 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. 803 */ 804 case BPF_RET | BPF_A: 805 case BPF_RET | BPF_K: 806 if (BPF_RVAL(fp->code) == BPF_K) 807 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 808 0, fp->k); 809 *insn = BPF_EXIT_INSN(); 810 break; 811 812 /* Store to stack. */ 813 case BPF_ST: 814 case BPF_STX: 815 stack_off = fp->k * 4 + 4; 816 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 817 BPF_ST ? BPF_REG_A : BPF_REG_X, 818 -stack_off); 819 /* check_load_and_stores() verifies that classic BPF can 820 * load from stack only after write, so tracking 821 * stack_depth for ST|STX insns is enough 822 */ 823 if (new_prog && new_prog->aux->stack_depth < stack_off) 824 new_prog->aux->stack_depth = stack_off; 825 break; 826 827 /* Load from stack. */ 828 case BPF_LD | BPF_MEM: 829 case BPF_LDX | BPF_MEM: 830 stack_off = fp->k * 4 + 4; 831 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 832 BPF_REG_A : BPF_REG_X, BPF_REG_FP, 833 -stack_off); 834 break; 835 836 /* A = K or X = K */ 837 case BPF_LD | BPF_IMM: 838 case BPF_LDX | BPF_IMM: 839 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 840 BPF_REG_A : BPF_REG_X, fp->k); 841 break; 842 843 /* X = A */ 844 case BPF_MISC | BPF_TAX: 845 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 846 break; 847 848 /* A = X */ 849 case BPF_MISC | BPF_TXA: 850 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 851 break; 852 853 /* A = skb->len or X = skb->len */ 854 case BPF_LD | BPF_W | BPF_LEN: 855 case BPF_LDX | BPF_W | BPF_LEN: 856 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 857 BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 858 offsetof(struct sk_buff, len)); 859 break; 860 861 /* Access seccomp_data fields. */ 862 case BPF_LDX | BPF_ABS | BPF_W: 863 /* A = *(u32 *) (ctx + K) */ 864 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 865 break; 866 867 /* Unknown instruction. */ 868 default: 869 goto err; 870 } 871 872 insn++; 873 if (new_prog) 874 memcpy(new_insn, tmp_insns, 875 sizeof(*insn) * (insn - tmp_insns)); 876 new_insn += insn - tmp_insns; 877 } 878 879 if (!new_prog) { 880 /* Only calculating new length. */ 881 *new_len = new_insn - first_insn; 882 if (*seen_ld_abs) 883 *new_len += 4; /* Prologue bits. */ 884 return 0; 885 } 886 887 pass++; 888 if (new_flen != new_insn - first_insn) { 889 new_flen = new_insn - first_insn; 890 if (pass > 2) 891 goto err; 892 goto do_pass; 893 } 894 895 kfree(addrs); 896 BUG_ON(*new_len != new_flen); 897 return 0; 898 err: 899 kfree(addrs); 900 return -EINVAL; 901 } 902 903 /* Security: 904 * 905 * As we dont want to clear mem[] array for each packet going through 906 * __bpf_prog_run(), we check that filter loaded by user never try to read 907 * a cell if not previously written, and we check all branches to be sure 908 * a malicious user doesn't try to abuse us. 909 */ 910 static int check_load_and_stores(const struct sock_filter *filter, int flen) 911 { 912 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ 913 int pc, ret = 0; 914 915 BUILD_BUG_ON(BPF_MEMWORDS > 16); 916 917 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); 918 if (!masks) 919 return -ENOMEM; 920 921 memset(masks, 0xff, flen * sizeof(*masks)); 922 923 for (pc = 0; pc < flen; pc++) { 924 memvalid &= masks[pc]; 925 926 switch (filter[pc].code) { 927 case BPF_ST: 928 case BPF_STX: 929 memvalid |= (1 << filter[pc].k); 930 break; 931 case BPF_LD | BPF_MEM: 932 case BPF_LDX | BPF_MEM: 933 if (!(memvalid & (1 << filter[pc].k))) { 934 ret = -EINVAL; 935 goto error; 936 } 937 break; 938 case BPF_JMP | BPF_JA: 939 /* A jump must set masks on target */ 940 masks[pc + 1 + filter[pc].k] &= memvalid; 941 memvalid = ~0; 942 break; 943 case BPF_JMP | BPF_JEQ | BPF_K: 944 case BPF_JMP | BPF_JEQ | BPF_X: 945 case BPF_JMP | BPF_JGE | BPF_K: 946 case BPF_JMP | BPF_JGE | BPF_X: 947 case BPF_JMP | BPF_JGT | BPF_K: 948 case BPF_JMP | BPF_JGT | BPF_X: 949 case BPF_JMP | BPF_JSET | BPF_K: 950 case BPF_JMP | BPF_JSET | BPF_X: 951 /* A jump must set masks on targets */ 952 masks[pc + 1 + filter[pc].jt] &= memvalid; 953 masks[pc + 1 + filter[pc].jf] &= memvalid; 954 memvalid = ~0; 955 break; 956 } 957 } 958 error: 959 kfree(masks); 960 return ret; 961 } 962 963 static bool chk_code_allowed(u16 code_to_probe) 964 { 965 static const bool codes[] = { 966 /* 32 bit ALU operations */ 967 [BPF_ALU | BPF_ADD | BPF_K] = true, 968 [BPF_ALU | BPF_ADD | BPF_X] = true, 969 [BPF_ALU | BPF_SUB | BPF_K] = true, 970 [BPF_ALU | BPF_SUB | BPF_X] = true, 971 [BPF_ALU | BPF_MUL | BPF_K] = true, 972 [BPF_ALU | BPF_MUL | BPF_X] = true, 973 [BPF_ALU | BPF_DIV | BPF_K] = true, 974 [BPF_ALU | BPF_DIV | BPF_X] = true, 975 [BPF_ALU | BPF_MOD | BPF_K] = true, 976 [BPF_ALU | BPF_MOD | BPF_X] = true, 977 [BPF_ALU | BPF_AND | BPF_K] = true, 978 [BPF_ALU | BPF_AND | BPF_X] = true, 979 [BPF_ALU | BPF_OR | BPF_K] = true, 980 [BPF_ALU | BPF_OR | BPF_X] = true, 981 [BPF_ALU | BPF_XOR | BPF_K] = true, 982 [BPF_ALU | BPF_XOR | BPF_X] = true, 983 [BPF_ALU | BPF_LSH | BPF_K] = true, 984 [BPF_ALU | BPF_LSH | BPF_X] = true, 985 [BPF_ALU | BPF_RSH | BPF_K] = true, 986 [BPF_ALU | BPF_RSH | BPF_X] = true, 987 [BPF_ALU | BPF_NEG] = true, 988 /* Load instructions */ 989 [BPF_LD | BPF_W | BPF_ABS] = true, 990 [BPF_LD | BPF_H | BPF_ABS] = true, 991 [BPF_LD | BPF_B | BPF_ABS] = true, 992 [BPF_LD | BPF_W | BPF_LEN] = true, 993 [BPF_LD | BPF_W | BPF_IND] = true, 994 [BPF_LD | BPF_H | BPF_IND] = true, 995 [BPF_LD | BPF_B | BPF_IND] = true, 996 [BPF_LD | BPF_IMM] = true, 997 [BPF_LD | BPF_MEM] = true, 998 [BPF_LDX | BPF_W | BPF_LEN] = true, 999 [BPF_LDX | BPF_B | BPF_MSH] = true, 1000 [BPF_LDX | BPF_IMM] = true, 1001 [BPF_LDX | BPF_MEM] = true, 1002 /* Store instructions */ 1003 [BPF_ST] = true, 1004 [BPF_STX] = true, 1005 /* Misc instructions */ 1006 [BPF_MISC | BPF_TAX] = true, 1007 [BPF_MISC | BPF_TXA] = true, 1008 /* Return instructions */ 1009 [BPF_RET | BPF_K] = true, 1010 [BPF_RET | BPF_A] = true, 1011 /* Jump instructions */ 1012 [BPF_JMP | BPF_JA] = true, 1013 [BPF_JMP | BPF_JEQ | BPF_K] = true, 1014 [BPF_JMP | BPF_JEQ | BPF_X] = true, 1015 [BPF_JMP | BPF_JGE | BPF_K] = true, 1016 [BPF_JMP | BPF_JGE | BPF_X] = true, 1017 [BPF_JMP | BPF_JGT | BPF_K] = true, 1018 [BPF_JMP | BPF_JGT | BPF_X] = true, 1019 [BPF_JMP | BPF_JSET | BPF_K] = true, 1020 [BPF_JMP | BPF_JSET | BPF_X] = true, 1021 }; 1022 1023 if (code_to_probe >= ARRAY_SIZE(codes)) 1024 return false; 1025 1026 return codes[code_to_probe]; 1027 } 1028 1029 static bool bpf_check_basics_ok(const struct sock_filter *filter, 1030 unsigned int flen) 1031 { 1032 if (filter == NULL) 1033 return false; 1034 if (flen == 0 || flen > BPF_MAXINSNS) 1035 return false; 1036 1037 return true; 1038 } 1039 1040 /** 1041 * bpf_check_classic - verify socket filter code 1042 * @filter: filter to verify 1043 * @flen: length of filter 1044 * 1045 * Check the user's filter code. If we let some ugly 1046 * filter code slip through kaboom! The filter must contain 1047 * no references or jumps that are out of range, no illegal 1048 * instructions, and must end with a RET instruction. 1049 * 1050 * All jumps are forward as they are not signed. 1051 * 1052 * Returns 0 if the rule set is legal or -EINVAL if not. 1053 */ 1054 static int bpf_check_classic(const struct sock_filter *filter, 1055 unsigned int flen) 1056 { 1057 bool anc_found; 1058 int pc; 1059 1060 /* Check the filter code now */ 1061 for (pc = 0; pc < flen; pc++) { 1062 const struct sock_filter *ftest = &filter[pc]; 1063 1064 /* May we actually operate on this code? */ 1065 if (!chk_code_allowed(ftest->code)) 1066 return -EINVAL; 1067 1068 /* Some instructions need special checks */ 1069 switch (ftest->code) { 1070 case BPF_ALU | BPF_DIV | BPF_K: 1071 case BPF_ALU | BPF_MOD | BPF_K: 1072 /* Check for division by zero */ 1073 if (ftest->k == 0) 1074 return -EINVAL; 1075 break; 1076 case BPF_ALU | BPF_LSH | BPF_K: 1077 case BPF_ALU | BPF_RSH | BPF_K: 1078 if (ftest->k >= 32) 1079 return -EINVAL; 1080 break; 1081 case BPF_LD | BPF_MEM: 1082 case BPF_LDX | BPF_MEM: 1083 case BPF_ST: 1084 case BPF_STX: 1085 /* Check for invalid memory addresses */ 1086 if (ftest->k >= BPF_MEMWORDS) 1087 return -EINVAL; 1088 break; 1089 case BPF_JMP | BPF_JA: 1090 /* Note, the large ftest->k might cause loops. 1091 * Compare this with conditional jumps below, 1092 * where offsets are limited. --ANK (981016) 1093 */ 1094 if (ftest->k >= (unsigned int)(flen - pc - 1)) 1095 return -EINVAL; 1096 break; 1097 case BPF_JMP | BPF_JEQ | BPF_K: 1098 case BPF_JMP | BPF_JEQ | BPF_X: 1099 case BPF_JMP | BPF_JGE | BPF_K: 1100 case BPF_JMP | BPF_JGE | BPF_X: 1101 case BPF_JMP | BPF_JGT | BPF_K: 1102 case BPF_JMP | BPF_JGT | BPF_X: 1103 case BPF_JMP | BPF_JSET | BPF_K: 1104 case BPF_JMP | BPF_JSET | BPF_X: 1105 /* Both conditionals must be safe */ 1106 if (pc + ftest->jt + 1 >= flen || 1107 pc + ftest->jf + 1 >= flen) 1108 return -EINVAL; 1109 break; 1110 case BPF_LD | BPF_W | BPF_ABS: 1111 case BPF_LD | BPF_H | BPF_ABS: 1112 case BPF_LD | BPF_B | BPF_ABS: 1113 anc_found = false; 1114 if (bpf_anc_helper(ftest) & BPF_ANC) 1115 anc_found = true; 1116 /* Ancillary operation unknown or unsupported */ 1117 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1118 return -EINVAL; 1119 } 1120 } 1121 1122 /* Last instruction must be a RET code */ 1123 switch (filter[flen - 1].code) { 1124 case BPF_RET | BPF_K: 1125 case BPF_RET | BPF_A: 1126 return check_load_and_stores(filter, flen); 1127 } 1128 1129 return -EINVAL; 1130 } 1131 1132 static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 1133 const struct sock_fprog *fprog) 1134 { 1135 unsigned int fsize = bpf_classic_proglen(fprog); 1136 struct sock_fprog_kern *fkprog; 1137 1138 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 1139 if (!fp->orig_prog) 1140 return -ENOMEM; 1141 1142 fkprog = fp->orig_prog; 1143 fkprog->len = fprog->len; 1144 1145 fkprog->filter = kmemdup(fp->insns, fsize, 1146 GFP_KERNEL | __GFP_NOWARN); 1147 if (!fkprog->filter) { 1148 kfree(fp->orig_prog); 1149 return -ENOMEM; 1150 } 1151 1152 return 0; 1153 } 1154 1155 static void bpf_release_orig_filter(struct bpf_prog *fp) 1156 { 1157 struct sock_fprog_kern *fprog = fp->orig_prog; 1158 1159 if (fprog) { 1160 kfree(fprog->filter); 1161 kfree(fprog); 1162 } 1163 } 1164 1165 static void __bpf_prog_release(struct bpf_prog *prog) 1166 { 1167 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { 1168 bpf_prog_put(prog); 1169 } else { 1170 bpf_release_orig_filter(prog); 1171 bpf_prog_free(prog); 1172 } 1173 } 1174 1175 static void __sk_filter_release(struct sk_filter *fp) 1176 { 1177 __bpf_prog_release(fp->prog); 1178 kfree(fp); 1179 } 1180 1181 /** 1182 * sk_filter_release_rcu - Release a socket filter by rcu_head 1183 * @rcu: rcu_head that contains the sk_filter to free 1184 */ 1185 static void sk_filter_release_rcu(struct rcu_head *rcu) 1186 { 1187 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1188 1189 __sk_filter_release(fp); 1190 } 1191 1192 /** 1193 * sk_filter_release - release a socket filter 1194 * @fp: filter to remove 1195 * 1196 * Remove a filter from a socket and release its resources. 1197 */ 1198 static void sk_filter_release(struct sk_filter *fp) 1199 { 1200 if (refcount_dec_and_test(&fp->refcnt)) 1201 call_rcu(&fp->rcu, sk_filter_release_rcu); 1202 } 1203 1204 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1205 { 1206 u32 filter_size = bpf_prog_size(fp->prog->len); 1207 1208 atomic_sub(filter_size, &sk->sk_omem_alloc); 1209 sk_filter_release(fp); 1210 } 1211 1212 /* try to charge the socket memory if there is space available 1213 * return true on success 1214 */ 1215 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1216 { 1217 u32 filter_size = bpf_prog_size(fp->prog->len); 1218 int optmem_max = READ_ONCE(sysctl_optmem_max); 1219 1220 /* same check as in sock_kmalloc() */ 1221 if (filter_size <= optmem_max && 1222 atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { 1223 atomic_add(filter_size, &sk->sk_omem_alloc); 1224 return true; 1225 } 1226 return false; 1227 } 1228 1229 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1230 { 1231 if (!refcount_inc_not_zero(&fp->refcnt)) 1232 return false; 1233 1234 if (!__sk_filter_charge(sk, fp)) { 1235 sk_filter_release(fp); 1236 return false; 1237 } 1238 return true; 1239 } 1240 1241 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 1242 { 1243 struct sock_filter *old_prog; 1244 struct bpf_prog *old_fp; 1245 int err, new_len, old_len = fp->len; 1246 bool seen_ld_abs = false; 1247 1248 /* We are free to overwrite insns et al right here as it won't be used at 1249 * this point in time anymore internally after the migration to the eBPF 1250 * instruction representation. 1251 */ 1252 BUILD_BUG_ON(sizeof(struct sock_filter) != 1253 sizeof(struct bpf_insn)); 1254 1255 /* Conversion cannot happen on overlapping memory areas, 1256 * so we need to keep the user BPF around until the 2nd 1257 * pass. At this time, the user BPF is stored in fp->insns. 1258 */ 1259 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), 1260 GFP_KERNEL | __GFP_NOWARN); 1261 if (!old_prog) { 1262 err = -ENOMEM; 1263 goto out_err; 1264 } 1265 1266 /* 1st pass: calculate the new program length. */ 1267 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, 1268 &seen_ld_abs); 1269 if (err) 1270 goto out_err_free; 1271 1272 /* Expand fp for appending the new filter representation. */ 1273 old_fp = fp; 1274 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); 1275 if (!fp) { 1276 /* The old_fp is still around in case we couldn't 1277 * allocate new memory, so uncharge on that one. 1278 */ 1279 fp = old_fp; 1280 err = -ENOMEM; 1281 goto out_err_free; 1282 } 1283 1284 fp->len = new_len; 1285 1286 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 1287 err = bpf_convert_filter(old_prog, old_len, fp, &new_len, 1288 &seen_ld_abs); 1289 if (err) 1290 /* 2nd bpf_convert_filter() can fail only if it fails 1291 * to allocate memory, remapping must succeed. Note, 1292 * that at this time old_fp has already been released 1293 * by krealloc(). 1294 */ 1295 goto out_err_free; 1296 1297 fp = bpf_prog_select_runtime(fp, &err); 1298 if (err) 1299 goto out_err_free; 1300 1301 kfree(old_prog); 1302 return fp; 1303 1304 out_err_free: 1305 kfree(old_prog); 1306 out_err: 1307 __bpf_prog_release(fp); 1308 return ERR_PTR(err); 1309 } 1310 1311 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, 1312 bpf_aux_classic_check_t trans) 1313 { 1314 int err; 1315 1316 fp->bpf_func = NULL; 1317 fp->jited = 0; 1318 1319 err = bpf_check_classic(fp->insns, fp->len); 1320 if (err) { 1321 __bpf_prog_release(fp); 1322 return ERR_PTR(err); 1323 } 1324 1325 /* There might be additional checks and transformations 1326 * needed on classic filters, f.e. in case of seccomp. 1327 */ 1328 if (trans) { 1329 err = trans(fp->insns, fp->len); 1330 if (err) { 1331 __bpf_prog_release(fp); 1332 return ERR_PTR(err); 1333 } 1334 } 1335 1336 /* Probe if we can JIT compile the filter and if so, do 1337 * the compilation of the filter. 1338 */ 1339 bpf_jit_compile(fp); 1340 1341 /* JIT compiler couldn't process this filter, so do the eBPF translation 1342 * for the optimized interpreter. 1343 */ 1344 if (!fp->jited) 1345 fp = bpf_migrate_filter(fp); 1346 1347 return fp; 1348 } 1349 1350 /** 1351 * bpf_prog_create - create an unattached filter 1352 * @pfp: the unattached filter that is created 1353 * @fprog: the filter program 1354 * 1355 * Create a filter independent of any socket. We first run some 1356 * sanity checks on it to make sure it does not explode on us later. 1357 * If an error occurs or there is insufficient memory for the filter 1358 * a negative errno code is returned. On success the return is zero. 1359 */ 1360 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 1361 { 1362 unsigned int fsize = bpf_classic_proglen(fprog); 1363 struct bpf_prog *fp; 1364 1365 /* Make sure new filter is there and in the right amounts. */ 1366 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1367 return -EINVAL; 1368 1369 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1370 if (!fp) 1371 return -ENOMEM; 1372 1373 memcpy(fp->insns, fprog->filter, fsize); 1374 1375 fp->len = fprog->len; 1376 /* Since unattached filters are not copied back to user 1377 * space through sk_get_filter(), we do not need to hold 1378 * a copy here, and can spare us the work. 1379 */ 1380 fp->orig_prog = NULL; 1381 1382 /* bpf_prepare_filter() already takes care of freeing 1383 * memory in case something goes wrong. 1384 */ 1385 fp = bpf_prepare_filter(fp, NULL); 1386 if (IS_ERR(fp)) 1387 return PTR_ERR(fp); 1388 1389 *pfp = fp; 1390 return 0; 1391 } 1392 EXPORT_SYMBOL_GPL(bpf_prog_create); 1393 1394 /** 1395 * bpf_prog_create_from_user - create an unattached filter from user buffer 1396 * @pfp: the unattached filter that is created 1397 * @fprog: the filter program 1398 * @trans: post-classic verifier transformation handler 1399 * @save_orig: save classic BPF program 1400 * 1401 * This function effectively does the same as bpf_prog_create(), only 1402 * that it builds up its insns buffer from user space provided buffer. 1403 * It also allows for passing a bpf_aux_classic_check_t handler. 1404 */ 1405 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 1406 bpf_aux_classic_check_t trans, bool save_orig) 1407 { 1408 unsigned int fsize = bpf_classic_proglen(fprog); 1409 struct bpf_prog *fp; 1410 int err; 1411 1412 /* Make sure new filter is there and in the right amounts. */ 1413 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1414 return -EINVAL; 1415 1416 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1417 if (!fp) 1418 return -ENOMEM; 1419 1420 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1421 __bpf_prog_free(fp); 1422 return -EFAULT; 1423 } 1424 1425 fp->len = fprog->len; 1426 fp->orig_prog = NULL; 1427 1428 if (save_orig) { 1429 err = bpf_prog_store_orig_filter(fp, fprog); 1430 if (err) { 1431 __bpf_prog_free(fp); 1432 return -ENOMEM; 1433 } 1434 } 1435 1436 /* bpf_prepare_filter() already takes care of freeing 1437 * memory in case something goes wrong. 1438 */ 1439 fp = bpf_prepare_filter(fp, trans); 1440 if (IS_ERR(fp)) 1441 return PTR_ERR(fp); 1442 1443 *pfp = fp; 1444 return 0; 1445 } 1446 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); 1447 1448 void bpf_prog_destroy(struct bpf_prog *fp) 1449 { 1450 __bpf_prog_release(fp); 1451 } 1452 EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1453 1454 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) 1455 { 1456 struct sk_filter *fp, *old_fp; 1457 1458 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1459 if (!fp) 1460 return -ENOMEM; 1461 1462 fp->prog = prog; 1463 1464 if (!__sk_filter_charge(sk, fp)) { 1465 kfree(fp); 1466 return -ENOMEM; 1467 } 1468 refcount_set(&fp->refcnt, 1); 1469 1470 old_fp = rcu_dereference_protected(sk->sk_filter, 1471 lockdep_sock_is_held(sk)); 1472 rcu_assign_pointer(sk->sk_filter, fp); 1473 1474 if (old_fp) 1475 sk_filter_uncharge(sk, old_fp); 1476 1477 return 0; 1478 } 1479 1480 static 1481 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) 1482 { 1483 unsigned int fsize = bpf_classic_proglen(fprog); 1484 struct bpf_prog *prog; 1485 int err; 1486 1487 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1488 return ERR_PTR(-EPERM); 1489 1490 /* Make sure new filter is there and in the right amounts. */ 1491 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1492 return ERR_PTR(-EINVAL); 1493 1494 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1495 if (!prog) 1496 return ERR_PTR(-ENOMEM); 1497 1498 if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1499 __bpf_prog_free(prog); 1500 return ERR_PTR(-EFAULT); 1501 } 1502 1503 prog->len = fprog->len; 1504 1505 err = bpf_prog_store_orig_filter(prog, fprog); 1506 if (err) { 1507 __bpf_prog_free(prog); 1508 return ERR_PTR(-ENOMEM); 1509 } 1510 1511 /* bpf_prepare_filter() already takes care of freeing 1512 * memory in case something goes wrong. 1513 */ 1514 return bpf_prepare_filter(prog, NULL); 1515 } 1516 1517 /** 1518 * sk_attach_filter - attach a socket filter 1519 * @fprog: the filter program 1520 * @sk: the socket to use 1521 * 1522 * Attach the user's filter code. We first run some sanity checks on 1523 * it to make sure it does not explode on us later. If an error 1524 * occurs or there is insufficient memory for the filter a negative 1525 * errno code is returned. On success the return is zero. 1526 */ 1527 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1528 { 1529 struct bpf_prog *prog = __get_filter(fprog, sk); 1530 int err; 1531 1532 if (IS_ERR(prog)) 1533 return PTR_ERR(prog); 1534 1535 err = __sk_attach_prog(prog, sk); 1536 if (err < 0) { 1537 __bpf_prog_release(prog); 1538 return err; 1539 } 1540 1541 return 0; 1542 } 1543 EXPORT_SYMBOL_GPL(sk_attach_filter); 1544 1545 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1546 { 1547 struct bpf_prog *prog = __get_filter(fprog, sk); 1548 int err; 1549 1550 if (IS_ERR(prog)) 1551 return PTR_ERR(prog); 1552 1553 if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) 1554 err = -ENOMEM; 1555 else 1556 err = reuseport_attach_prog(sk, prog); 1557 1558 if (err) 1559 __bpf_prog_release(prog); 1560 1561 return err; 1562 } 1563 1564 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) 1565 { 1566 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1567 return ERR_PTR(-EPERM); 1568 1569 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1570 } 1571 1572 int sk_attach_bpf(u32 ufd, struct sock *sk) 1573 { 1574 struct bpf_prog *prog = __get_bpf(ufd, sk); 1575 int err; 1576 1577 if (IS_ERR(prog)) 1578 return PTR_ERR(prog); 1579 1580 err = __sk_attach_prog(prog, sk); 1581 if (err < 0) { 1582 bpf_prog_put(prog); 1583 return err; 1584 } 1585 1586 return 0; 1587 } 1588 1589 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) 1590 { 1591 struct bpf_prog *prog; 1592 int err; 1593 1594 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1595 return -EPERM; 1596 1597 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1598 if (PTR_ERR(prog) == -EINVAL) 1599 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); 1600 if (IS_ERR(prog)) 1601 return PTR_ERR(prog); 1602 1603 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { 1604 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER 1605 * bpf prog (e.g. sockmap). It depends on the 1606 * limitation imposed by bpf_prog_load(). 1607 * Hence, sysctl_optmem_max is not checked. 1608 */ 1609 if ((sk->sk_type != SOCK_STREAM && 1610 sk->sk_type != SOCK_DGRAM) || 1611 (sk->sk_protocol != IPPROTO_UDP && 1612 sk->sk_protocol != IPPROTO_TCP) || 1613 (sk->sk_family != AF_INET && 1614 sk->sk_family != AF_INET6)) { 1615 err = -ENOTSUPP; 1616 goto err_prog_put; 1617 } 1618 } else { 1619 /* BPF_PROG_TYPE_SOCKET_FILTER */ 1620 if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) { 1621 err = -ENOMEM; 1622 goto err_prog_put; 1623 } 1624 } 1625 1626 err = reuseport_attach_prog(sk, prog); 1627 err_prog_put: 1628 if (err) 1629 bpf_prog_put(prog); 1630 1631 return err; 1632 } 1633 1634 void sk_reuseport_prog_free(struct bpf_prog *prog) 1635 { 1636 if (!prog) 1637 return; 1638 1639 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) 1640 bpf_prog_put(prog); 1641 else 1642 bpf_prog_destroy(prog); 1643 } 1644 1645 struct bpf_scratchpad { 1646 union { 1647 __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; 1648 u8 buff[MAX_BPF_STACK]; 1649 }; 1650 }; 1651 1652 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); 1653 1654 static inline int __bpf_try_make_writable(struct sk_buff *skb, 1655 unsigned int write_len) 1656 { 1657 return skb_ensure_writable(skb, write_len); 1658 } 1659 1660 static inline int bpf_try_make_writable(struct sk_buff *skb, 1661 unsigned int write_len) 1662 { 1663 int err = __bpf_try_make_writable(skb, write_len); 1664 1665 bpf_compute_data_pointers(skb); 1666 return err; 1667 } 1668 1669 static int bpf_try_make_head_writable(struct sk_buff *skb) 1670 { 1671 return bpf_try_make_writable(skb, skb_headlen(skb)); 1672 } 1673 1674 static inline void bpf_push_mac_rcsum(struct sk_buff *skb) 1675 { 1676 if (skb_at_tc_ingress(skb)) 1677 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1678 } 1679 1680 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) 1681 { 1682 if (skb_at_tc_ingress(skb)) 1683 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1684 } 1685 1686 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, 1687 const void *, from, u32, len, u64, flags) 1688 { 1689 void *ptr; 1690 1691 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1692 return -EINVAL; 1693 if (unlikely(offset > INT_MAX)) 1694 return -EFAULT; 1695 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1696 return -EFAULT; 1697 1698 ptr = skb->data + offset; 1699 if (flags & BPF_F_RECOMPUTE_CSUM) 1700 __skb_postpull_rcsum(skb, ptr, len, offset); 1701 1702 memcpy(ptr, from, len); 1703 1704 if (flags & BPF_F_RECOMPUTE_CSUM) 1705 __skb_postpush_rcsum(skb, ptr, len, offset); 1706 if (flags & BPF_F_INVALIDATE_HASH) 1707 skb_clear_hash(skb); 1708 1709 return 0; 1710 } 1711 1712 static const struct bpf_func_proto bpf_skb_store_bytes_proto = { 1713 .func = bpf_skb_store_bytes, 1714 .gpl_only = false, 1715 .ret_type = RET_INTEGER, 1716 .arg1_type = ARG_PTR_TO_CTX, 1717 .arg2_type = ARG_ANYTHING, 1718 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1719 .arg4_type = ARG_CONST_SIZE, 1720 .arg5_type = ARG_ANYTHING, 1721 }; 1722 1723 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, 1724 void *, to, u32, len) 1725 { 1726 void *ptr; 1727 1728 if (unlikely(offset > INT_MAX)) 1729 goto err_clear; 1730 1731 ptr = skb_header_pointer(skb, offset, len, to); 1732 if (unlikely(!ptr)) 1733 goto err_clear; 1734 if (ptr != to) 1735 memcpy(to, ptr, len); 1736 1737 return 0; 1738 err_clear: 1739 memset(to, 0, len); 1740 return -EFAULT; 1741 } 1742 1743 static const struct bpf_func_proto bpf_skb_load_bytes_proto = { 1744 .func = bpf_skb_load_bytes, 1745 .gpl_only = false, 1746 .ret_type = RET_INTEGER, 1747 .arg1_type = ARG_PTR_TO_CTX, 1748 .arg2_type = ARG_ANYTHING, 1749 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1750 .arg4_type = ARG_CONST_SIZE, 1751 }; 1752 1753 BPF_CALL_4(bpf_flow_dissector_load_bytes, 1754 const struct bpf_flow_dissector *, ctx, u32, offset, 1755 void *, to, u32, len) 1756 { 1757 void *ptr; 1758 1759 if (unlikely(offset > 0xffff)) 1760 goto err_clear; 1761 1762 if (unlikely(!ctx->skb)) 1763 goto err_clear; 1764 1765 ptr = skb_header_pointer(ctx->skb, offset, len, to); 1766 if (unlikely(!ptr)) 1767 goto err_clear; 1768 if (ptr != to) 1769 memcpy(to, ptr, len); 1770 1771 return 0; 1772 err_clear: 1773 memset(to, 0, len); 1774 return -EFAULT; 1775 } 1776 1777 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { 1778 .func = bpf_flow_dissector_load_bytes, 1779 .gpl_only = false, 1780 .ret_type = RET_INTEGER, 1781 .arg1_type = ARG_PTR_TO_CTX, 1782 .arg2_type = ARG_ANYTHING, 1783 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1784 .arg4_type = ARG_CONST_SIZE, 1785 }; 1786 1787 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1788 u32, offset, void *, to, u32, len, u32, start_header) 1789 { 1790 u8 *end = skb_tail_pointer(skb); 1791 u8 *start, *ptr; 1792 1793 if (unlikely(offset > 0xffff)) 1794 goto err_clear; 1795 1796 switch (start_header) { 1797 case BPF_HDR_START_MAC: 1798 if (unlikely(!skb_mac_header_was_set(skb))) 1799 goto err_clear; 1800 start = skb_mac_header(skb); 1801 break; 1802 case BPF_HDR_START_NET: 1803 start = skb_network_header(skb); 1804 break; 1805 default: 1806 goto err_clear; 1807 } 1808 1809 ptr = start + offset; 1810 1811 if (likely(ptr + len <= end)) { 1812 memcpy(to, ptr, len); 1813 return 0; 1814 } 1815 1816 err_clear: 1817 memset(to, 0, len); 1818 return -EFAULT; 1819 } 1820 1821 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { 1822 .func = bpf_skb_load_bytes_relative, 1823 .gpl_only = false, 1824 .ret_type = RET_INTEGER, 1825 .arg1_type = ARG_PTR_TO_CTX, 1826 .arg2_type = ARG_ANYTHING, 1827 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1828 .arg4_type = ARG_CONST_SIZE, 1829 .arg5_type = ARG_ANYTHING, 1830 }; 1831 1832 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) 1833 { 1834 /* Idea is the following: should the needed direct read/write 1835 * test fail during runtime, we can pull in more data and redo 1836 * again, since implicitly, we invalidate previous checks here. 1837 * 1838 * Or, since we know how much we need to make read/writeable, 1839 * this can be done once at the program beginning for direct 1840 * access case. By this we overcome limitations of only current 1841 * headroom being accessible. 1842 */ 1843 return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); 1844 } 1845 1846 static const struct bpf_func_proto bpf_skb_pull_data_proto = { 1847 .func = bpf_skb_pull_data, 1848 .gpl_only = false, 1849 .ret_type = RET_INTEGER, 1850 .arg1_type = ARG_PTR_TO_CTX, 1851 .arg2_type = ARG_ANYTHING, 1852 }; 1853 1854 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) 1855 { 1856 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; 1857 } 1858 1859 static const struct bpf_func_proto bpf_sk_fullsock_proto = { 1860 .func = bpf_sk_fullsock, 1861 .gpl_only = false, 1862 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 1863 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 1864 }; 1865 1866 static inline int sk_skb_try_make_writable(struct sk_buff *skb, 1867 unsigned int write_len) 1868 { 1869 return __bpf_try_make_writable(skb, write_len); 1870 } 1871 1872 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) 1873 { 1874 /* Idea is the following: should the needed direct read/write 1875 * test fail during runtime, we can pull in more data and redo 1876 * again, since implicitly, we invalidate previous checks here. 1877 * 1878 * Or, since we know how much we need to make read/writeable, 1879 * this can be done once at the program beginning for direct 1880 * access case. By this we overcome limitations of only current 1881 * headroom being accessible. 1882 */ 1883 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); 1884 } 1885 1886 static const struct bpf_func_proto sk_skb_pull_data_proto = { 1887 .func = sk_skb_pull_data, 1888 .gpl_only = false, 1889 .ret_type = RET_INTEGER, 1890 .arg1_type = ARG_PTR_TO_CTX, 1891 .arg2_type = ARG_ANYTHING, 1892 }; 1893 1894 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, 1895 u64, from, u64, to, u64, flags) 1896 { 1897 __sum16 *ptr; 1898 1899 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1900 return -EINVAL; 1901 if (unlikely(offset > 0xffff || offset & 1)) 1902 return -EFAULT; 1903 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1904 return -EFAULT; 1905 1906 ptr = (__sum16 *)(skb->data + offset); 1907 switch (flags & BPF_F_HDR_FIELD_MASK) { 1908 case 0: 1909 if (unlikely(from != 0)) 1910 return -EINVAL; 1911 1912 csum_replace_by_diff(ptr, to); 1913 break; 1914 case 2: 1915 csum_replace2(ptr, from, to); 1916 break; 1917 case 4: 1918 csum_replace4(ptr, from, to); 1919 break; 1920 default: 1921 return -EINVAL; 1922 } 1923 1924 return 0; 1925 } 1926 1927 static const struct bpf_func_proto bpf_l3_csum_replace_proto = { 1928 .func = bpf_l3_csum_replace, 1929 .gpl_only = false, 1930 .ret_type = RET_INTEGER, 1931 .arg1_type = ARG_PTR_TO_CTX, 1932 .arg2_type = ARG_ANYTHING, 1933 .arg3_type = ARG_ANYTHING, 1934 .arg4_type = ARG_ANYTHING, 1935 .arg5_type = ARG_ANYTHING, 1936 }; 1937 1938 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, 1939 u64, from, u64, to, u64, flags) 1940 { 1941 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1942 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1943 bool do_mforce = flags & BPF_F_MARK_ENFORCE; 1944 __sum16 *ptr; 1945 1946 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | 1947 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) 1948 return -EINVAL; 1949 if (unlikely(offset > 0xffff || offset & 1)) 1950 return -EFAULT; 1951 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1952 return -EFAULT; 1953 1954 ptr = (__sum16 *)(skb->data + offset); 1955 if (is_mmzero && !do_mforce && !*ptr) 1956 return 0; 1957 1958 switch (flags & BPF_F_HDR_FIELD_MASK) { 1959 case 0: 1960 if (unlikely(from != 0)) 1961 return -EINVAL; 1962 1963 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); 1964 break; 1965 case 2: 1966 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); 1967 break; 1968 case 4: 1969 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); 1970 break; 1971 default: 1972 return -EINVAL; 1973 } 1974 1975 if (is_mmzero && !*ptr) 1976 *ptr = CSUM_MANGLED_0; 1977 return 0; 1978 } 1979 1980 static const struct bpf_func_proto bpf_l4_csum_replace_proto = { 1981 .func = bpf_l4_csum_replace, 1982 .gpl_only = false, 1983 .ret_type = RET_INTEGER, 1984 .arg1_type = ARG_PTR_TO_CTX, 1985 .arg2_type = ARG_ANYTHING, 1986 .arg3_type = ARG_ANYTHING, 1987 .arg4_type = ARG_ANYTHING, 1988 .arg5_type = ARG_ANYTHING, 1989 }; 1990 1991 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, 1992 __be32 *, to, u32, to_size, __wsum, seed) 1993 { 1994 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); 1995 u32 diff_size = from_size + to_size; 1996 int i, j = 0; 1997 1998 /* This is quite flexible, some examples: 1999 * 2000 * from_size == 0, to_size > 0, seed := csum --> pushing data 2001 * from_size > 0, to_size == 0, seed := csum --> pulling data 2002 * from_size > 0, to_size > 0, seed := 0 --> diffing data 2003 * 2004 * Even for diffing, from_size and to_size don't need to be equal. 2005 */ 2006 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || 2007 diff_size > sizeof(sp->diff))) 2008 return -EINVAL; 2009 2010 for (i = 0; i < from_size / sizeof(__be32); i++, j++) 2011 sp->diff[j] = ~from[i]; 2012 for (i = 0; i < to_size / sizeof(__be32); i++, j++) 2013 sp->diff[j] = to[i]; 2014 2015 return csum_partial(sp->diff, diff_size, seed); 2016 } 2017 2018 static const struct bpf_func_proto bpf_csum_diff_proto = { 2019 .func = bpf_csum_diff, 2020 .gpl_only = false, 2021 .pkt_access = true, 2022 .ret_type = RET_INTEGER, 2023 .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2024 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2025 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2026 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 2027 .arg5_type = ARG_ANYTHING, 2028 }; 2029 2030 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) 2031 { 2032 /* The interface is to be used in combination with bpf_csum_diff() 2033 * for direct packet writes. csum rotation for alignment as well 2034 * as emulating csum_sub() can be done from the eBPF program. 2035 */ 2036 if (skb->ip_summed == CHECKSUM_COMPLETE) 2037 return (skb->csum = csum_add(skb->csum, csum)); 2038 2039 return -ENOTSUPP; 2040 } 2041 2042 static const struct bpf_func_proto bpf_csum_update_proto = { 2043 .func = bpf_csum_update, 2044 .gpl_only = false, 2045 .ret_type = RET_INTEGER, 2046 .arg1_type = ARG_PTR_TO_CTX, 2047 .arg2_type = ARG_ANYTHING, 2048 }; 2049 2050 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level) 2051 { 2052 /* The interface is to be used in combination with bpf_skb_adjust_room() 2053 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET 2054 * is passed as flags, for example. 2055 */ 2056 switch (level) { 2057 case BPF_CSUM_LEVEL_INC: 2058 __skb_incr_checksum_unnecessary(skb); 2059 break; 2060 case BPF_CSUM_LEVEL_DEC: 2061 __skb_decr_checksum_unnecessary(skb); 2062 break; 2063 case BPF_CSUM_LEVEL_RESET: 2064 __skb_reset_checksum_unnecessary(skb); 2065 break; 2066 case BPF_CSUM_LEVEL_QUERY: 2067 return skb->ip_summed == CHECKSUM_UNNECESSARY ? 2068 skb->csum_level : -EACCES; 2069 default: 2070 return -EINVAL; 2071 } 2072 2073 return 0; 2074 } 2075 2076 static const struct bpf_func_proto bpf_csum_level_proto = { 2077 .func = bpf_csum_level, 2078 .gpl_only = false, 2079 .ret_type = RET_INTEGER, 2080 .arg1_type = ARG_PTR_TO_CTX, 2081 .arg2_type = ARG_ANYTHING, 2082 }; 2083 2084 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 2085 { 2086 return dev_forward_skb_nomtu(dev, skb); 2087 } 2088 2089 static inline int __bpf_rx_skb_no_mac(struct net_device *dev, 2090 struct sk_buff *skb) 2091 { 2092 int ret = ____dev_forward_skb(dev, skb, false); 2093 2094 if (likely(!ret)) { 2095 skb->dev = dev; 2096 ret = netif_rx(skb); 2097 } 2098 2099 return ret; 2100 } 2101 2102 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 2103 { 2104 int ret; 2105 2106 if (dev_xmit_recursion()) { 2107 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2108 kfree_skb(skb); 2109 return -ENETDOWN; 2110 } 2111 2112 skb->dev = dev; 2113 skb_clear_tstamp(skb); 2114 2115 dev_xmit_recursion_inc(); 2116 ret = dev_queue_xmit(skb); 2117 dev_xmit_recursion_dec(); 2118 2119 return ret; 2120 } 2121 2122 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2123 u32 flags) 2124 { 2125 unsigned int mlen = skb_network_offset(skb); 2126 2127 if (mlen) { 2128 __skb_pull(skb, mlen); 2129 2130 /* At ingress, the mac header has already been pulled once. 2131 * At egress, skb_pospull_rcsum has to be done in case that 2132 * the skb is originated from ingress (i.e. a forwarded skb) 2133 * to ensure that rcsum starts at net header. 2134 */ 2135 if (!skb_at_tc_ingress(skb)) 2136 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2137 } 2138 skb_pop_mac_header(skb); 2139 skb_reset_mac_len(skb); 2140 return flags & BPF_F_INGRESS ? 2141 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); 2142 } 2143 2144 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, 2145 u32 flags) 2146 { 2147 /* Verify that a link layer header is carried */ 2148 if (unlikely(skb->mac_header >= skb->network_header)) { 2149 kfree_skb(skb); 2150 return -ERANGE; 2151 } 2152 2153 bpf_push_mac_rcsum(skb); 2154 return flags & BPF_F_INGRESS ? 2155 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 2156 } 2157 2158 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, 2159 u32 flags) 2160 { 2161 if (dev_is_mac_header_xmit(dev)) 2162 return __bpf_redirect_common(skb, dev, flags); 2163 else 2164 return __bpf_redirect_no_mac(skb, dev, flags); 2165 } 2166 2167 #if IS_ENABLED(CONFIG_IPV6) 2168 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, 2169 struct net_device *dev, struct bpf_nh_params *nh) 2170 { 2171 u32 hh_len = LL_RESERVED_SPACE(dev); 2172 const struct in6_addr *nexthop; 2173 struct dst_entry *dst = NULL; 2174 struct neighbour *neigh; 2175 2176 if (dev_xmit_recursion()) { 2177 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2178 goto out_drop; 2179 } 2180 2181 skb->dev = dev; 2182 skb_clear_tstamp(skb); 2183 2184 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2185 skb = skb_expand_head(skb, hh_len); 2186 if (!skb) 2187 return -ENOMEM; 2188 } 2189 2190 rcu_read_lock_bh(); 2191 if (!nh) { 2192 dst = skb_dst(skb); 2193 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), 2194 &ipv6_hdr(skb)->daddr); 2195 } else { 2196 nexthop = &nh->ipv6_nh; 2197 } 2198 neigh = ip_neigh_gw6(dev, nexthop); 2199 if (likely(!IS_ERR(neigh))) { 2200 int ret; 2201 2202 sock_confirm_neigh(skb, neigh); 2203 dev_xmit_recursion_inc(); 2204 ret = neigh_output(neigh, skb, false); 2205 dev_xmit_recursion_dec(); 2206 rcu_read_unlock_bh(); 2207 return ret; 2208 } 2209 rcu_read_unlock_bh(); 2210 if (dst) 2211 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 2212 out_drop: 2213 kfree_skb(skb); 2214 return -ENETDOWN; 2215 } 2216 2217 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2218 struct bpf_nh_params *nh) 2219 { 2220 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 2221 struct net *net = dev_net(dev); 2222 int err, ret = NET_XMIT_DROP; 2223 2224 if (!nh) { 2225 struct dst_entry *dst; 2226 struct flowi6 fl6 = { 2227 .flowi6_flags = FLOWI_FLAG_ANYSRC, 2228 .flowi6_mark = skb->mark, 2229 .flowlabel = ip6_flowinfo(ip6h), 2230 .flowi6_oif = dev->ifindex, 2231 .flowi6_proto = ip6h->nexthdr, 2232 .daddr = ip6h->daddr, 2233 .saddr = ip6h->saddr, 2234 }; 2235 2236 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); 2237 if (IS_ERR(dst)) 2238 goto out_drop; 2239 2240 skb_dst_set(skb, dst); 2241 } else if (nh->nh_family != AF_INET6) { 2242 goto out_drop; 2243 } 2244 2245 err = bpf_out_neigh_v6(net, skb, dev, nh); 2246 if (unlikely(net_xmit_eval(err))) 2247 dev->stats.tx_errors++; 2248 else 2249 ret = NET_XMIT_SUCCESS; 2250 goto out_xmit; 2251 out_drop: 2252 dev->stats.tx_errors++; 2253 kfree_skb(skb); 2254 out_xmit: 2255 return ret; 2256 } 2257 #else 2258 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2259 struct bpf_nh_params *nh) 2260 { 2261 kfree_skb(skb); 2262 return NET_XMIT_DROP; 2263 } 2264 #endif /* CONFIG_IPV6 */ 2265 2266 #if IS_ENABLED(CONFIG_INET) 2267 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, 2268 struct net_device *dev, struct bpf_nh_params *nh) 2269 { 2270 u32 hh_len = LL_RESERVED_SPACE(dev); 2271 struct neighbour *neigh; 2272 bool is_v6gw = false; 2273 2274 if (dev_xmit_recursion()) { 2275 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2276 goto out_drop; 2277 } 2278 2279 skb->dev = dev; 2280 skb_clear_tstamp(skb); 2281 2282 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2283 skb = skb_expand_head(skb, hh_len); 2284 if (!skb) 2285 return -ENOMEM; 2286 } 2287 2288 rcu_read_lock_bh(); 2289 if (!nh) { 2290 struct dst_entry *dst = skb_dst(skb); 2291 struct rtable *rt = container_of(dst, struct rtable, dst); 2292 2293 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2294 } else if (nh->nh_family == AF_INET6) { 2295 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2296 is_v6gw = true; 2297 } else if (nh->nh_family == AF_INET) { 2298 neigh = ip_neigh_gw4(dev, nh->ipv4_nh); 2299 } else { 2300 rcu_read_unlock_bh(); 2301 goto out_drop; 2302 } 2303 2304 if (likely(!IS_ERR(neigh))) { 2305 int ret; 2306 2307 sock_confirm_neigh(skb, neigh); 2308 dev_xmit_recursion_inc(); 2309 ret = neigh_output(neigh, skb, is_v6gw); 2310 dev_xmit_recursion_dec(); 2311 rcu_read_unlock_bh(); 2312 return ret; 2313 } 2314 rcu_read_unlock_bh(); 2315 out_drop: 2316 kfree_skb(skb); 2317 return -ENETDOWN; 2318 } 2319 2320 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2321 struct bpf_nh_params *nh) 2322 { 2323 const struct iphdr *ip4h = ip_hdr(skb); 2324 struct net *net = dev_net(dev); 2325 int err, ret = NET_XMIT_DROP; 2326 2327 if (!nh) { 2328 struct flowi4 fl4 = { 2329 .flowi4_flags = FLOWI_FLAG_ANYSRC, 2330 .flowi4_mark = skb->mark, 2331 .flowi4_tos = RT_TOS(ip4h->tos), 2332 .flowi4_oif = dev->ifindex, 2333 .flowi4_proto = ip4h->protocol, 2334 .daddr = ip4h->daddr, 2335 .saddr = ip4h->saddr, 2336 }; 2337 struct rtable *rt; 2338 2339 rt = ip_route_output_flow(net, &fl4, NULL); 2340 if (IS_ERR(rt)) 2341 goto out_drop; 2342 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 2343 ip_rt_put(rt); 2344 goto out_drop; 2345 } 2346 2347 skb_dst_set(skb, &rt->dst); 2348 } 2349 2350 err = bpf_out_neigh_v4(net, skb, dev, nh); 2351 if (unlikely(net_xmit_eval(err))) 2352 dev->stats.tx_errors++; 2353 else 2354 ret = NET_XMIT_SUCCESS; 2355 goto out_xmit; 2356 out_drop: 2357 dev->stats.tx_errors++; 2358 kfree_skb(skb); 2359 out_xmit: 2360 return ret; 2361 } 2362 #else 2363 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2364 struct bpf_nh_params *nh) 2365 { 2366 kfree_skb(skb); 2367 return NET_XMIT_DROP; 2368 } 2369 #endif /* CONFIG_INET */ 2370 2371 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, 2372 struct bpf_nh_params *nh) 2373 { 2374 struct ethhdr *ethh = eth_hdr(skb); 2375 2376 if (unlikely(skb->mac_header >= skb->network_header)) 2377 goto out; 2378 bpf_push_mac_rcsum(skb); 2379 if (is_multicast_ether_addr(ethh->h_dest)) 2380 goto out; 2381 2382 skb_pull(skb, sizeof(*ethh)); 2383 skb_unset_mac_header(skb); 2384 skb_reset_network_header(skb); 2385 2386 if (skb->protocol == htons(ETH_P_IP)) 2387 return __bpf_redirect_neigh_v4(skb, dev, nh); 2388 else if (skb->protocol == htons(ETH_P_IPV6)) 2389 return __bpf_redirect_neigh_v6(skb, dev, nh); 2390 out: 2391 kfree_skb(skb); 2392 return -ENOTSUPP; 2393 } 2394 2395 /* Internal, non-exposed redirect flags. */ 2396 enum { 2397 BPF_F_NEIGH = (1ULL << 1), 2398 BPF_F_PEER = (1ULL << 2), 2399 BPF_F_NEXTHOP = (1ULL << 3), 2400 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) 2401 }; 2402 2403 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 2404 { 2405 struct net_device *dev; 2406 struct sk_buff *clone; 2407 int ret; 2408 2409 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2410 return -EINVAL; 2411 2412 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); 2413 if (unlikely(!dev)) 2414 return -EINVAL; 2415 2416 clone = skb_clone(skb, GFP_ATOMIC); 2417 if (unlikely(!clone)) 2418 return -ENOMEM; 2419 2420 /* For direct write, we need to keep the invariant that the skbs 2421 * we're dealing with need to be uncloned. Should uncloning fail 2422 * here, we need to free the just generated clone to unclone once 2423 * again. 2424 */ 2425 ret = bpf_try_make_head_writable(skb); 2426 if (unlikely(ret)) { 2427 kfree_skb(clone); 2428 return -ENOMEM; 2429 } 2430 2431 return __bpf_redirect(clone, dev, flags); 2432 } 2433 2434 static const struct bpf_func_proto bpf_clone_redirect_proto = { 2435 .func = bpf_clone_redirect, 2436 .gpl_only = false, 2437 .ret_type = RET_INTEGER, 2438 .arg1_type = ARG_PTR_TO_CTX, 2439 .arg2_type = ARG_ANYTHING, 2440 .arg3_type = ARG_ANYTHING, 2441 }; 2442 2443 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 2444 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); 2445 2446 int skb_do_redirect(struct sk_buff *skb) 2447 { 2448 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2449 struct net *net = dev_net(skb->dev); 2450 struct net_device *dev; 2451 u32 flags = ri->flags; 2452 2453 dev = dev_get_by_index_rcu(net, ri->tgt_index); 2454 ri->tgt_index = 0; 2455 ri->flags = 0; 2456 if (unlikely(!dev)) 2457 goto out_drop; 2458 if (flags & BPF_F_PEER) { 2459 const struct net_device_ops *ops = dev->netdev_ops; 2460 2461 if (unlikely(!ops->ndo_get_peer_dev || 2462 !skb_at_tc_ingress(skb))) 2463 goto out_drop; 2464 dev = ops->ndo_get_peer_dev(dev); 2465 if (unlikely(!dev || 2466 !(dev->flags & IFF_UP) || 2467 net_eq(net, dev_net(dev)))) 2468 goto out_drop; 2469 skb->dev = dev; 2470 return -EAGAIN; 2471 } 2472 return flags & BPF_F_NEIGH ? 2473 __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? 2474 &ri->nh : NULL) : 2475 __bpf_redirect(skb, dev, flags); 2476 out_drop: 2477 kfree_skb(skb); 2478 return -EINVAL; 2479 } 2480 2481 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) 2482 { 2483 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2484 2485 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2486 return TC_ACT_SHOT; 2487 2488 ri->flags = flags; 2489 ri->tgt_index = ifindex; 2490 2491 return TC_ACT_REDIRECT; 2492 } 2493 2494 static const struct bpf_func_proto bpf_redirect_proto = { 2495 .func = bpf_redirect, 2496 .gpl_only = false, 2497 .ret_type = RET_INTEGER, 2498 .arg1_type = ARG_ANYTHING, 2499 .arg2_type = ARG_ANYTHING, 2500 }; 2501 2502 BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) 2503 { 2504 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2505 2506 if (unlikely(flags)) 2507 return TC_ACT_SHOT; 2508 2509 ri->flags = BPF_F_PEER; 2510 ri->tgt_index = ifindex; 2511 2512 return TC_ACT_REDIRECT; 2513 } 2514 2515 static const struct bpf_func_proto bpf_redirect_peer_proto = { 2516 .func = bpf_redirect_peer, 2517 .gpl_only = false, 2518 .ret_type = RET_INTEGER, 2519 .arg1_type = ARG_ANYTHING, 2520 .arg2_type = ARG_ANYTHING, 2521 }; 2522 2523 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, 2524 int, plen, u64, flags) 2525 { 2526 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2527 2528 if (unlikely((plen && plen < sizeof(*params)) || flags)) 2529 return TC_ACT_SHOT; 2530 2531 ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); 2532 ri->tgt_index = ifindex; 2533 2534 BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); 2535 if (plen) 2536 memcpy(&ri->nh, params, sizeof(ri->nh)); 2537 2538 return TC_ACT_REDIRECT; 2539 } 2540 2541 static const struct bpf_func_proto bpf_redirect_neigh_proto = { 2542 .func = bpf_redirect_neigh, 2543 .gpl_only = false, 2544 .ret_type = RET_INTEGER, 2545 .arg1_type = ARG_ANYTHING, 2546 .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2547 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 2548 .arg4_type = ARG_ANYTHING, 2549 }; 2550 2551 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) 2552 { 2553 msg->apply_bytes = bytes; 2554 return 0; 2555 } 2556 2557 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { 2558 .func = bpf_msg_apply_bytes, 2559 .gpl_only = false, 2560 .ret_type = RET_INTEGER, 2561 .arg1_type = ARG_PTR_TO_CTX, 2562 .arg2_type = ARG_ANYTHING, 2563 }; 2564 2565 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) 2566 { 2567 msg->cork_bytes = bytes; 2568 return 0; 2569 } 2570 2571 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { 2572 .func = bpf_msg_cork_bytes, 2573 .gpl_only = false, 2574 .ret_type = RET_INTEGER, 2575 .arg1_type = ARG_PTR_TO_CTX, 2576 .arg2_type = ARG_ANYTHING, 2577 }; 2578 2579 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, 2580 u32, end, u64, flags) 2581 { 2582 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; 2583 u32 first_sge, last_sge, i, shift, bytes_sg_total; 2584 struct scatterlist *sge; 2585 u8 *raw, *to, *from; 2586 struct page *page; 2587 2588 if (unlikely(flags || end <= start)) 2589 return -EINVAL; 2590 2591 /* First find the starting scatterlist element */ 2592 i = msg->sg.start; 2593 do { 2594 offset += len; 2595 len = sk_msg_elem(msg, i)->length; 2596 if (start < offset + len) 2597 break; 2598 sk_msg_iter_var_next(i); 2599 } while (i != msg->sg.end); 2600 2601 if (unlikely(start >= offset + len)) 2602 return -EINVAL; 2603 2604 first_sge = i; 2605 /* The start may point into the sg element so we need to also 2606 * account for the headroom. 2607 */ 2608 bytes_sg_total = start - offset + bytes; 2609 if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) 2610 goto out; 2611 2612 /* At this point we need to linearize multiple scatterlist 2613 * elements or a single shared page. Either way we need to 2614 * copy into a linear buffer exclusively owned by BPF. Then 2615 * place the buffer in the scatterlist and fixup the original 2616 * entries by removing the entries now in the linear buffer 2617 * and shifting the remaining entries. For now we do not try 2618 * to copy partial entries to avoid complexity of running out 2619 * of sg_entry slots. The downside is reading a single byte 2620 * will copy the entire sg entry. 2621 */ 2622 do { 2623 copy += sk_msg_elem(msg, i)->length; 2624 sk_msg_iter_var_next(i); 2625 if (bytes_sg_total <= copy) 2626 break; 2627 } while (i != msg->sg.end); 2628 last_sge = i; 2629 2630 if (unlikely(bytes_sg_total > copy)) 2631 return -EINVAL; 2632 2633 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2634 get_order(copy)); 2635 if (unlikely(!page)) 2636 return -ENOMEM; 2637 2638 raw = page_address(page); 2639 i = first_sge; 2640 do { 2641 sge = sk_msg_elem(msg, i); 2642 from = sg_virt(sge); 2643 len = sge->length; 2644 to = raw + poffset; 2645 2646 memcpy(to, from, len); 2647 poffset += len; 2648 sge->length = 0; 2649 put_page(sg_page(sge)); 2650 2651 sk_msg_iter_var_next(i); 2652 } while (i != last_sge); 2653 2654 sg_set_page(&msg->sg.data[first_sge], page, copy, 0); 2655 2656 /* To repair sg ring we need to shift entries. If we only 2657 * had a single entry though we can just replace it and 2658 * be done. Otherwise walk the ring and shift the entries. 2659 */ 2660 WARN_ON_ONCE(last_sge == first_sge); 2661 shift = last_sge > first_sge ? 2662 last_sge - first_sge - 1 : 2663 NR_MSG_FRAG_IDS - first_sge + last_sge - 1; 2664 if (!shift) 2665 goto out; 2666 2667 i = first_sge; 2668 sk_msg_iter_var_next(i); 2669 do { 2670 u32 move_from; 2671 2672 if (i + shift >= NR_MSG_FRAG_IDS) 2673 move_from = i + shift - NR_MSG_FRAG_IDS; 2674 else 2675 move_from = i + shift; 2676 if (move_from == msg->sg.end) 2677 break; 2678 2679 msg->sg.data[i] = msg->sg.data[move_from]; 2680 msg->sg.data[move_from].length = 0; 2681 msg->sg.data[move_from].page_link = 0; 2682 msg->sg.data[move_from].offset = 0; 2683 sk_msg_iter_var_next(i); 2684 } while (1); 2685 2686 msg->sg.end = msg->sg.end - shift > msg->sg.end ? 2687 msg->sg.end - shift + NR_MSG_FRAG_IDS : 2688 msg->sg.end - shift; 2689 out: 2690 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; 2691 msg->data_end = msg->data + bytes; 2692 return 0; 2693 } 2694 2695 static const struct bpf_func_proto bpf_msg_pull_data_proto = { 2696 .func = bpf_msg_pull_data, 2697 .gpl_only = false, 2698 .ret_type = RET_INTEGER, 2699 .arg1_type = ARG_PTR_TO_CTX, 2700 .arg2_type = ARG_ANYTHING, 2701 .arg3_type = ARG_ANYTHING, 2702 .arg4_type = ARG_ANYTHING, 2703 }; 2704 2705 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, 2706 u32, len, u64, flags) 2707 { 2708 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; 2709 u32 new, i = 0, l = 0, space, copy = 0, offset = 0; 2710 u8 *raw, *to, *from; 2711 struct page *page; 2712 2713 if (unlikely(flags)) 2714 return -EINVAL; 2715 2716 if (unlikely(len == 0)) 2717 return 0; 2718 2719 /* First find the starting scatterlist element */ 2720 i = msg->sg.start; 2721 do { 2722 offset += l; 2723 l = sk_msg_elem(msg, i)->length; 2724 2725 if (start < offset + l) 2726 break; 2727 sk_msg_iter_var_next(i); 2728 } while (i != msg->sg.end); 2729 2730 if (start >= offset + l) 2731 return -EINVAL; 2732 2733 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2734 2735 /* If no space available will fallback to copy, we need at 2736 * least one scatterlist elem available to push data into 2737 * when start aligns to the beginning of an element or two 2738 * when it falls inside an element. We handle the start equals 2739 * offset case because its the common case for inserting a 2740 * header. 2741 */ 2742 if (!space || (space == 1 && start != offset)) 2743 copy = msg->sg.data[i].length; 2744 2745 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2746 get_order(copy + len)); 2747 if (unlikely(!page)) 2748 return -ENOMEM; 2749 2750 if (copy) { 2751 int front, back; 2752 2753 raw = page_address(page); 2754 2755 psge = sk_msg_elem(msg, i); 2756 front = start - offset; 2757 back = psge->length - front; 2758 from = sg_virt(psge); 2759 2760 if (front) 2761 memcpy(raw, from, front); 2762 2763 if (back) { 2764 from += front; 2765 to = raw + front + len; 2766 2767 memcpy(to, from, back); 2768 } 2769 2770 put_page(sg_page(psge)); 2771 } else if (start - offset) { 2772 psge = sk_msg_elem(msg, i); 2773 rsge = sk_msg_elem_cpy(msg, i); 2774 2775 psge->length = start - offset; 2776 rsge.length -= psge->length; 2777 rsge.offset += start; 2778 2779 sk_msg_iter_var_next(i); 2780 sg_unmark_end(psge); 2781 sg_unmark_end(&rsge); 2782 sk_msg_iter_next(msg, end); 2783 } 2784 2785 /* Slot(s) to place newly allocated data */ 2786 new = i; 2787 2788 /* Shift one or two slots as needed */ 2789 if (!copy) { 2790 sge = sk_msg_elem_cpy(msg, i); 2791 2792 sk_msg_iter_var_next(i); 2793 sg_unmark_end(&sge); 2794 sk_msg_iter_next(msg, end); 2795 2796 nsge = sk_msg_elem_cpy(msg, i); 2797 if (rsge.length) { 2798 sk_msg_iter_var_next(i); 2799 nnsge = sk_msg_elem_cpy(msg, i); 2800 } 2801 2802 while (i != msg->sg.end) { 2803 msg->sg.data[i] = sge; 2804 sge = nsge; 2805 sk_msg_iter_var_next(i); 2806 if (rsge.length) { 2807 nsge = nnsge; 2808 nnsge = sk_msg_elem_cpy(msg, i); 2809 } else { 2810 nsge = sk_msg_elem_cpy(msg, i); 2811 } 2812 } 2813 } 2814 2815 /* Place newly allocated data buffer */ 2816 sk_mem_charge(msg->sk, len); 2817 msg->sg.size += len; 2818 __clear_bit(new, msg->sg.copy); 2819 sg_set_page(&msg->sg.data[new], page, len + copy, 0); 2820 if (rsge.length) { 2821 get_page(sg_page(&rsge)); 2822 sk_msg_iter_var_next(new); 2823 msg->sg.data[new] = rsge; 2824 } 2825 2826 sk_msg_compute_data_pointers(msg); 2827 return 0; 2828 } 2829 2830 static const struct bpf_func_proto bpf_msg_push_data_proto = { 2831 .func = bpf_msg_push_data, 2832 .gpl_only = false, 2833 .ret_type = RET_INTEGER, 2834 .arg1_type = ARG_PTR_TO_CTX, 2835 .arg2_type = ARG_ANYTHING, 2836 .arg3_type = ARG_ANYTHING, 2837 .arg4_type = ARG_ANYTHING, 2838 }; 2839 2840 static void sk_msg_shift_left(struct sk_msg *msg, int i) 2841 { 2842 int prev; 2843 2844 do { 2845 prev = i; 2846 sk_msg_iter_var_next(i); 2847 msg->sg.data[prev] = msg->sg.data[i]; 2848 } while (i != msg->sg.end); 2849 2850 sk_msg_iter_prev(msg, end); 2851 } 2852 2853 static void sk_msg_shift_right(struct sk_msg *msg, int i) 2854 { 2855 struct scatterlist tmp, sge; 2856 2857 sk_msg_iter_next(msg, end); 2858 sge = sk_msg_elem_cpy(msg, i); 2859 sk_msg_iter_var_next(i); 2860 tmp = sk_msg_elem_cpy(msg, i); 2861 2862 while (i != msg->sg.end) { 2863 msg->sg.data[i] = sge; 2864 sk_msg_iter_var_next(i); 2865 sge = tmp; 2866 tmp = sk_msg_elem_cpy(msg, i); 2867 } 2868 } 2869 2870 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, 2871 u32, len, u64, flags) 2872 { 2873 u32 i = 0, l = 0, space, offset = 0; 2874 u64 last = start + len; 2875 int pop; 2876 2877 if (unlikely(flags)) 2878 return -EINVAL; 2879 2880 /* First find the starting scatterlist element */ 2881 i = msg->sg.start; 2882 do { 2883 offset += l; 2884 l = sk_msg_elem(msg, i)->length; 2885 2886 if (start < offset + l) 2887 break; 2888 sk_msg_iter_var_next(i); 2889 } while (i != msg->sg.end); 2890 2891 /* Bounds checks: start and pop must be inside message */ 2892 if (start >= offset + l || last >= msg->sg.size) 2893 return -EINVAL; 2894 2895 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2896 2897 pop = len; 2898 /* --------------| offset 2899 * -| start |-------- len -------| 2900 * 2901 * |----- a ----|-------- pop -------|----- b ----| 2902 * |______________________________________________| length 2903 * 2904 * 2905 * a: region at front of scatter element to save 2906 * b: region at back of scatter element to save when length > A + pop 2907 * pop: region to pop from element, same as input 'pop' here will be 2908 * decremented below per iteration. 2909 * 2910 * Two top-level cases to handle when start != offset, first B is non 2911 * zero and second B is zero corresponding to when a pop includes more 2912 * than one element. 2913 * 2914 * Then if B is non-zero AND there is no space allocate space and 2915 * compact A, B regions into page. If there is space shift ring to 2916 * the rigth free'ing the next element in ring to place B, leaving 2917 * A untouched except to reduce length. 2918 */ 2919 if (start != offset) { 2920 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); 2921 int a = start; 2922 int b = sge->length - pop - a; 2923 2924 sk_msg_iter_var_next(i); 2925 2926 if (pop < sge->length - a) { 2927 if (space) { 2928 sge->length = a; 2929 sk_msg_shift_right(msg, i); 2930 nsge = sk_msg_elem(msg, i); 2931 get_page(sg_page(sge)); 2932 sg_set_page(nsge, 2933 sg_page(sge), 2934 b, sge->offset + pop + a); 2935 } else { 2936 struct page *page, *orig; 2937 u8 *to, *from; 2938 2939 page = alloc_pages(__GFP_NOWARN | 2940 __GFP_COMP | GFP_ATOMIC, 2941 get_order(a + b)); 2942 if (unlikely(!page)) 2943 return -ENOMEM; 2944 2945 sge->length = a; 2946 orig = sg_page(sge); 2947 from = sg_virt(sge); 2948 to = page_address(page); 2949 memcpy(to, from, a); 2950 memcpy(to + a, from + a + pop, b); 2951 sg_set_page(sge, page, a + b, 0); 2952 put_page(orig); 2953 } 2954 pop = 0; 2955 } else if (pop >= sge->length - a) { 2956 pop -= (sge->length - a); 2957 sge->length = a; 2958 } 2959 } 2960 2961 /* From above the current layout _must_ be as follows, 2962 * 2963 * -| offset 2964 * -| start 2965 * 2966 * |---- pop ---|---------------- b ------------| 2967 * |____________________________________________| length 2968 * 2969 * Offset and start of the current msg elem are equal because in the 2970 * previous case we handled offset != start and either consumed the 2971 * entire element and advanced to the next element OR pop == 0. 2972 * 2973 * Two cases to handle here are first pop is less than the length 2974 * leaving some remainder b above. Simply adjust the element's layout 2975 * in this case. Or pop >= length of the element so that b = 0. In this 2976 * case advance to next element decrementing pop. 2977 */ 2978 while (pop) { 2979 struct scatterlist *sge = sk_msg_elem(msg, i); 2980 2981 if (pop < sge->length) { 2982 sge->length -= pop; 2983 sge->offset += pop; 2984 pop = 0; 2985 } else { 2986 pop -= sge->length; 2987 sk_msg_shift_left(msg, i); 2988 } 2989 sk_msg_iter_var_next(i); 2990 } 2991 2992 sk_mem_uncharge(msg->sk, len - pop); 2993 msg->sg.size -= (len - pop); 2994 sk_msg_compute_data_pointers(msg); 2995 return 0; 2996 } 2997 2998 static const struct bpf_func_proto bpf_msg_pop_data_proto = { 2999 .func = bpf_msg_pop_data, 3000 .gpl_only = false, 3001 .ret_type = RET_INTEGER, 3002 .arg1_type = ARG_PTR_TO_CTX, 3003 .arg2_type = ARG_ANYTHING, 3004 .arg3_type = ARG_ANYTHING, 3005 .arg4_type = ARG_ANYTHING, 3006 }; 3007 3008 #ifdef CONFIG_CGROUP_NET_CLASSID 3009 BPF_CALL_0(bpf_get_cgroup_classid_curr) 3010 { 3011 return __task_get_classid(current); 3012 } 3013 3014 const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { 3015 .func = bpf_get_cgroup_classid_curr, 3016 .gpl_only = false, 3017 .ret_type = RET_INTEGER, 3018 }; 3019 3020 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) 3021 { 3022 struct sock *sk = skb_to_full_sk(skb); 3023 3024 if (!sk || !sk_fullsock(sk)) 3025 return 0; 3026 3027 return sock_cgroup_classid(&sk->sk_cgrp_data); 3028 } 3029 3030 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { 3031 .func = bpf_skb_cgroup_classid, 3032 .gpl_only = false, 3033 .ret_type = RET_INTEGER, 3034 .arg1_type = ARG_PTR_TO_CTX, 3035 }; 3036 #endif 3037 3038 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 3039 { 3040 return task_get_classid(skb); 3041 } 3042 3043 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { 3044 .func = bpf_get_cgroup_classid, 3045 .gpl_only = false, 3046 .ret_type = RET_INTEGER, 3047 .arg1_type = ARG_PTR_TO_CTX, 3048 }; 3049 3050 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) 3051 { 3052 return dst_tclassid(skb); 3053 } 3054 3055 static const struct bpf_func_proto bpf_get_route_realm_proto = { 3056 .func = bpf_get_route_realm, 3057 .gpl_only = false, 3058 .ret_type = RET_INTEGER, 3059 .arg1_type = ARG_PTR_TO_CTX, 3060 }; 3061 3062 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) 3063 { 3064 /* If skb_clear_hash() was called due to mangling, we can 3065 * trigger SW recalculation here. Later access to hash 3066 * can then use the inline skb->hash via context directly 3067 * instead of calling this helper again. 3068 */ 3069 return skb_get_hash(skb); 3070 } 3071 3072 static const struct bpf_func_proto bpf_get_hash_recalc_proto = { 3073 .func = bpf_get_hash_recalc, 3074 .gpl_only = false, 3075 .ret_type = RET_INTEGER, 3076 .arg1_type = ARG_PTR_TO_CTX, 3077 }; 3078 3079 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) 3080 { 3081 /* After all direct packet write, this can be used once for 3082 * triggering a lazy recalc on next skb_get_hash() invocation. 3083 */ 3084 skb_clear_hash(skb); 3085 return 0; 3086 } 3087 3088 static const struct bpf_func_proto bpf_set_hash_invalid_proto = { 3089 .func = bpf_set_hash_invalid, 3090 .gpl_only = false, 3091 .ret_type = RET_INTEGER, 3092 .arg1_type = ARG_PTR_TO_CTX, 3093 }; 3094 3095 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) 3096 { 3097 /* Set user specified hash as L4(+), so that it gets returned 3098 * on skb_get_hash() call unless BPF prog later on triggers a 3099 * skb_clear_hash(). 3100 */ 3101 __skb_set_sw_hash(skb, hash, true); 3102 return 0; 3103 } 3104 3105 static const struct bpf_func_proto bpf_set_hash_proto = { 3106 .func = bpf_set_hash, 3107 .gpl_only = false, 3108 .ret_type = RET_INTEGER, 3109 .arg1_type = ARG_PTR_TO_CTX, 3110 .arg2_type = ARG_ANYTHING, 3111 }; 3112 3113 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, 3114 u16, vlan_tci) 3115 { 3116 int ret; 3117 3118 if (unlikely(vlan_proto != htons(ETH_P_8021Q) && 3119 vlan_proto != htons(ETH_P_8021AD))) 3120 vlan_proto = htons(ETH_P_8021Q); 3121 3122 bpf_push_mac_rcsum(skb); 3123 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 3124 bpf_pull_mac_rcsum(skb); 3125 3126 bpf_compute_data_pointers(skb); 3127 return ret; 3128 } 3129 3130 static const struct bpf_func_proto bpf_skb_vlan_push_proto = { 3131 .func = bpf_skb_vlan_push, 3132 .gpl_only = false, 3133 .ret_type = RET_INTEGER, 3134 .arg1_type = ARG_PTR_TO_CTX, 3135 .arg2_type = ARG_ANYTHING, 3136 .arg3_type = ARG_ANYTHING, 3137 }; 3138 3139 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) 3140 { 3141 int ret; 3142 3143 bpf_push_mac_rcsum(skb); 3144 ret = skb_vlan_pop(skb); 3145 bpf_pull_mac_rcsum(skb); 3146 3147 bpf_compute_data_pointers(skb); 3148 return ret; 3149 } 3150 3151 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { 3152 .func = bpf_skb_vlan_pop, 3153 .gpl_only = false, 3154 .ret_type = RET_INTEGER, 3155 .arg1_type = ARG_PTR_TO_CTX, 3156 }; 3157 3158 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) 3159 { 3160 /* Caller already did skb_cow() with len as headroom, 3161 * so no need to do it here. 3162 */ 3163 skb_push(skb, len); 3164 memmove(skb->data, skb->data + len, off); 3165 memset(skb->data + off, 0, len); 3166 3167 /* No skb_postpush_rcsum(skb, skb->data + off, len) 3168 * needed here as it does not change the skb->csum 3169 * result for checksum complete when summing over 3170 * zeroed blocks. 3171 */ 3172 return 0; 3173 } 3174 3175 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) 3176 { 3177 /* skb_ensure_writable() is not needed here, as we're 3178 * already working on an uncloned skb. 3179 */ 3180 if (unlikely(!pskb_may_pull(skb, off + len))) 3181 return -ENOMEM; 3182 3183 skb_postpull_rcsum(skb, skb->data + off, len); 3184 memmove(skb->data + len, skb->data, off); 3185 __skb_pull(skb, len); 3186 3187 return 0; 3188 } 3189 3190 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) 3191 { 3192 bool trans_same = skb->transport_header == skb->network_header; 3193 int ret; 3194 3195 /* There's no need for __skb_push()/__skb_pull() pair to 3196 * get to the start of the mac header as we're guaranteed 3197 * to always start from here under eBPF. 3198 */ 3199 ret = bpf_skb_generic_push(skb, off, len); 3200 if (likely(!ret)) { 3201 skb->mac_header -= len; 3202 skb->network_header -= len; 3203 if (trans_same) 3204 skb->transport_header = skb->network_header; 3205 } 3206 3207 return ret; 3208 } 3209 3210 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) 3211 { 3212 bool trans_same = skb->transport_header == skb->network_header; 3213 int ret; 3214 3215 /* Same here, __skb_push()/__skb_pull() pair not needed. */ 3216 ret = bpf_skb_generic_pop(skb, off, len); 3217 if (likely(!ret)) { 3218 skb->mac_header += len; 3219 skb->network_header += len; 3220 if (trans_same) 3221 skb->transport_header = skb->network_header; 3222 } 3223 3224 return ret; 3225 } 3226 3227 static int bpf_skb_proto_4_to_6(struct sk_buff *skb) 3228 { 3229 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3230 u32 off = skb_mac_header_len(skb); 3231 int ret; 3232 3233 ret = skb_cow(skb, len_diff); 3234 if (unlikely(ret < 0)) 3235 return ret; 3236 3237 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3238 if (unlikely(ret < 0)) 3239 return ret; 3240 3241 if (skb_is_gso(skb)) { 3242 struct skb_shared_info *shinfo = skb_shinfo(skb); 3243 3244 /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */ 3245 if (shinfo->gso_type & SKB_GSO_TCPV4) { 3246 shinfo->gso_type &= ~SKB_GSO_TCPV4; 3247 shinfo->gso_type |= SKB_GSO_TCPV6; 3248 } 3249 } 3250 3251 skb->protocol = htons(ETH_P_IPV6); 3252 skb_clear_hash(skb); 3253 3254 return 0; 3255 } 3256 3257 static int bpf_skb_proto_6_to_4(struct sk_buff *skb) 3258 { 3259 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3260 u32 off = skb_mac_header_len(skb); 3261 int ret; 3262 3263 ret = skb_unclone(skb, GFP_ATOMIC); 3264 if (unlikely(ret < 0)) 3265 return ret; 3266 3267 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3268 if (unlikely(ret < 0)) 3269 return ret; 3270 3271 if (skb_is_gso(skb)) { 3272 struct skb_shared_info *shinfo = skb_shinfo(skb); 3273 3274 /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */ 3275 if (shinfo->gso_type & SKB_GSO_TCPV6) { 3276 shinfo->gso_type &= ~SKB_GSO_TCPV6; 3277 shinfo->gso_type |= SKB_GSO_TCPV4; 3278 } 3279 } 3280 3281 skb->protocol = htons(ETH_P_IP); 3282 skb_clear_hash(skb); 3283 3284 return 0; 3285 } 3286 3287 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) 3288 { 3289 __be16 from_proto = skb->protocol; 3290 3291 if (from_proto == htons(ETH_P_IP) && 3292 to_proto == htons(ETH_P_IPV6)) 3293 return bpf_skb_proto_4_to_6(skb); 3294 3295 if (from_proto == htons(ETH_P_IPV6) && 3296 to_proto == htons(ETH_P_IP)) 3297 return bpf_skb_proto_6_to_4(skb); 3298 3299 return -ENOTSUPP; 3300 } 3301 3302 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, 3303 u64, flags) 3304 { 3305 int ret; 3306 3307 if (unlikely(flags)) 3308 return -EINVAL; 3309 3310 /* General idea is that this helper does the basic groundwork 3311 * needed for changing the protocol, and eBPF program fills the 3312 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() 3313 * and other helpers, rather than passing a raw buffer here. 3314 * 3315 * The rationale is to keep this minimal and without a need to 3316 * deal with raw packet data. F.e. even if we would pass buffers 3317 * here, the program still needs to call the bpf_lX_csum_replace() 3318 * helpers anyway. Plus, this way we keep also separation of 3319 * concerns, since f.e. bpf_skb_store_bytes() should only take 3320 * care of stores. 3321 * 3322 * Currently, additional options and extension header space are 3323 * not supported, but flags register is reserved so we can adapt 3324 * that. For offloads, we mark packet as dodgy, so that headers 3325 * need to be verified first. 3326 */ 3327 ret = bpf_skb_proto_xlat(skb, proto); 3328 bpf_compute_data_pointers(skb); 3329 return ret; 3330 } 3331 3332 static const struct bpf_func_proto bpf_skb_change_proto_proto = { 3333 .func = bpf_skb_change_proto, 3334 .gpl_only = false, 3335 .ret_type = RET_INTEGER, 3336 .arg1_type = ARG_PTR_TO_CTX, 3337 .arg2_type = ARG_ANYTHING, 3338 .arg3_type = ARG_ANYTHING, 3339 }; 3340 3341 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) 3342 { 3343 /* We only allow a restricted subset to be changed for now. */ 3344 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || 3345 !skb_pkt_type_ok(pkt_type))) 3346 return -EINVAL; 3347 3348 skb->pkt_type = pkt_type; 3349 return 0; 3350 } 3351 3352 static const struct bpf_func_proto bpf_skb_change_type_proto = { 3353 .func = bpf_skb_change_type, 3354 .gpl_only = false, 3355 .ret_type = RET_INTEGER, 3356 .arg1_type = ARG_PTR_TO_CTX, 3357 .arg2_type = ARG_ANYTHING, 3358 }; 3359 3360 static u32 bpf_skb_net_base_len(const struct sk_buff *skb) 3361 { 3362 switch (skb->protocol) { 3363 case htons(ETH_P_IP): 3364 return sizeof(struct iphdr); 3365 case htons(ETH_P_IPV6): 3366 return sizeof(struct ipv6hdr); 3367 default: 3368 return ~0U; 3369 } 3370 } 3371 3372 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ 3373 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3374 3375 #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ 3376 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ 3377 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ 3378 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ 3379 BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \ 3380 BPF_F_ADJ_ROOM_ENCAP_L2( \ 3381 BPF_ADJ_ROOM_ENCAP_L2_MASK)) 3382 3383 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, 3384 u64 flags) 3385 { 3386 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; 3387 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; 3388 u16 mac_len = 0, inner_net = 0, inner_trans = 0; 3389 unsigned int gso_type = SKB_GSO_DODGY; 3390 int ret; 3391 3392 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3393 /* udp gso_size delineates datagrams, only allow if fixed */ 3394 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3395 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3396 return -ENOTSUPP; 3397 } 3398 3399 ret = skb_cow_head(skb, len_diff); 3400 if (unlikely(ret < 0)) 3401 return ret; 3402 3403 if (encap) { 3404 if (skb->protocol != htons(ETH_P_IP) && 3405 skb->protocol != htons(ETH_P_IPV6)) 3406 return -ENOTSUPP; 3407 3408 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && 3409 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3410 return -EINVAL; 3411 3412 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && 3413 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3414 return -EINVAL; 3415 3416 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH && 3417 inner_mac_len < ETH_HLEN) 3418 return -EINVAL; 3419 3420 if (skb->encapsulation) 3421 return -EALREADY; 3422 3423 mac_len = skb->network_header - skb->mac_header; 3424 inner_net = skb->network_header; 3425 if (inner_mac_len > len_diff) 3426 return -EINVAL; 3427 inner_trans = skb->transport_header; 3428 } 3429 3430 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3431 if (unlikely(ret < 0)) 3432 return ret; 3433 3434 if (encap) { 3435 skb->inner_mac_header = inner_net - inner_mac_len; 3436 skb->inner_network_header = inner_net; 3437 skb->inner_transport_header = inner_trans; 3438 3439 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH) 3440 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 3441 else 3442 skb_set_inner_protocol(skb, skb->protocol); 3443 3444 skb->encapsulation = 1; 3445 skb_set_network_header(skb, mac_len); 3446 3447 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3448 gso_type |= SKB_GSO_UDP_TUNNEL; 3449 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) 3450 gso_type |= SKB_GSO_GRE; 3451 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3452 gso_type |= SKB_GSO_IPXIP6; 3453 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3454 gso_type |= SKB_GSO_IPXIP4; 3455 3456 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || 3457 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { 3458 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? 3459 sizeof(struct ipv6hdr) : 3460 sizeof(struct iphdr); 3461 3462 skb_set_transport_header(skb, mac_len + nh_len); 3463 } 3464 3465 /* Match skb->protocol to new outer l3 protocol */ 3466 if (skb->protocol == htons(ETH_P_IP) && 3467 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3468 skb->protocol = htons(ETH_P_IPV6); 3469 else if (skb->protocol == htons(ETH_P_IPV6) && 3470 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3471 skb->protocol = htons(ETH_P_IP); 3472 } 3473 3474 if (skb_is_gso(skb)) { 3475 struct skb_shared_info *shinfo = skb_shinfo(skb); 3476 3477 /* Due to header grow, MSS needs to be downgraded. */ 3478 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3479 skb_decrease_gso_size(shinfo, len_diff); 3480 3481 /* Header must be checked, and gso_segs recomputed. */ 3482 shinfo->gso_type |= gso_type; 3483 shinfo->gso_segs = 0; 3484 } 3485 3486 return 0; 3487 } 3488 3489 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, 3490 u64 flags) 3491 { 3492 int ret; 3493 3494 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | 3495 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3496 return -EINVAL; 3497 3498 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3499 /* udp gso_size delineates datagrams, only allow if fixed */ 3500 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3501 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3502 return -ENOTSUPP; 3503 } 3504 3505 ret = skb_unclone(skb, GFP_ATOMIC); 3506 if (unlikely(ret < 0)) 3507 return ret; 3508 3509 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3510 if (unlikely(ret < 0)) 3511 return ret; 3512 3513 if (skb_is_gso(skb)) { 3514 struct skb_shared_info *shinfo = skb_shinfo(skb); 3515 3516 /* Due to header shrink, MSS can be upgraded. */ 3517 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3518 skb_increase_gso_size(shinfo, len_diff); 3519 3520 /* Header must be checked, and gso_segs recomputed. */ 3521 shinfo->gso_type |= SKB_GSO_DODGY; 3522 shinfo->gso_segs = 0; 3523 } 3524 3525 return 0; 3526 } 3527 3528 #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC 3529 3530 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3531 u32, mode, u64, flags) 3532 { 3533 u32 len_diff_abs = abs(len_diff); 3534 bool shrink = len_diff < 0; 3535 int ret = 0; 3536 3537 if (unlikely(flags || mode)) 3538 return -EINVAL; 3539 if (unlikely(len_diff_abs > 0xfffU)) 3540 return -EFAULT; 3541 3542 if (!shrink) { 3543 ret = skb_cow(skb, len_diff); 3544 if (unlikely(ret < 0)) 3545 return ret; 3546 __skb_push(skb, len_diff_abs); 3547 memset(skb->data, 0, len_diff_abs); 3548 } else { 3549 if (unlikely(!pskb_may_pull(skb, len_diff_abs))) 3550 return -ENOMEM; 3551 __skb_pull(skb, len_diff_abs); 3552 } 3553 if (tls_sw_has_ctx_rx(skb->sk)) { 3554 struct strp_msg *rxm = strp_msg(skb); 3555 3556 rxm->full_len += len_diff; 3557 } 3558 return ret; 3559 } 3560 3561 static const struct bpf_func_proto sk_skb_adjust_room_proto = { 3562 .func = sk_skb_adjust_room, 3563 .gpl_only = false, 3564 .ret_type = RET_INTEGER, 3565 .arg1_type = ARG_PTR_TO_CTX, 3566 .arg2_type = ARG_ANYTHING, 3567 .arg3_type = ARG_ANYTHING, 3568 .arg4_type = ARG_ANYTHING, 3569 }; 3570 3571 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3572 u32, mode, u64, flags) 3573 { 3574 u32 len_cur, len_diff_abs = abs(len_diff); 3575 u32 len_min = bpf_skb_net_base_len(skb); 3576 u32 len_max = BPF_SKB_MAX_LEN; 3577 __be16 proto = skb->protocol; 3578 bool shrink = len_diff < 0; 3579 u32 off; 3580 int ret; 3581 3582 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | 3583 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3584 return -EINVAL; 3585 if (unlikely(len_diff_abs > 0xfffU)) 3586 return -EFAULT; 3587 if (unlikely(proto != htons(ETH_P_IP) && 3588 proto != htons(ETH_P_IPV6))) 3589 return -ENOTSUPP; 3590 3591 off = skb_mac_header_len(skb); 3592 switch (mode) { 3593 case BPF_ADJ_ROOM_NET: 3594 off += bpf_skb_net_base_len(skb); 3595 break; 3596 case BPF_ADJ_ROOM_MAC: 3597 break; 3598 default: 3599 return -ENOTSUPP; 3600 } 3601 3602 len_cur = skb->len - skb_network_offset(skb); 3603 if ((shrink && (len_diff_abs >= len_cur || 3604 len_cur - len_diff_abs < len_min)) || 3605 (!shrink && (skb->len + len_diff_abs > len_max && 3606 !skb_is_gso(skb)))) 3607 return -ENOTSUPP; 3608 3609 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : 3610 bpf_skb_net_grow(skb, off, len_diff_abs, flags); 3611 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET)) 3612 __skb_reset_checksum_unnecessary(skb); 3613 3614 bpf_compute_data_pointers(skb); 3615 return ret; 3616 } 3617 3618 static const struct bpf_func_proto bpf_skb_adjust_room_proto = { 3619 .func = bpf_skb_adjust_room, 3620 .gpl_only = false, 3621 .ret_type = RET_INTEGER, 3622 .arg1_type = ARG_PTR_TO_CTX, 3623 .arg2_type = ARG_ANYTHING, 3624 .arg3_type = ARG_ANYTHING, 3625 .arg4_type = ARG_ANYTHING, 3626 }; 3627 3628 static u32 __bpf_skb_min_len(const struct sk_buff *skb) 3629 { 3630 u32 min_len = skb_network_offset(skb); 3631 3632 if (skb_transport_header_was_set(skb)) 3633 min_len = skb_transport_offset(skb); 3634 if (skb->ip_summed == CHECKSUM_PARTIAL) 3635 min_len = skb_checksum_start_offset(skb) + 3636 skb->csum_offset + sizeof(__sum16); 3637 return min_len; 3638 } 3639 3640 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) 3641 { 3642 unsigned int old_len = skb->len; 3643 int ret; 3644 3645 ret = __skb_grow_rcsum(skb, new_len); 3646 if (!ret) 3647 memset(skb->data + old_len, 0, new_len - old_len); 3648 return ret; 3649 } 3650 3651 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) 3652 { 3653 return __skb_trim_rcsum(skb, new_len); 3654 } 3655 3656 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, 3657 u64 flags) 3658 { 3659 u32 max_len = BPF_SKB_MAX_LEN; 3660 u32 min_len = __bpf_skb_min_len(skb); 3661 int ret; 3662 3663 if (unlikely(flags || new_len > max_len || new_len < min_len)) 3664 return -EINVAL; 3665 if (skb->encapsulation) 3666 return -ENOTSUPP; 3667 3668 /* The basic idea of this helper is that it's performing the 3669 * needed work to either grow or trim an skb, and eBPF program 3670 * rewrites the rest via helpers like bpf_skb_store_bytes(), 3671 * bpf_lX_csum_replace() and others rather than passing a raw 3672 * buffer here. This one is a slow path helper and intended 3673 * for replies with control messages. 3674 * 3675 * Like in bpf_skb_change_proto(), we want to keep this rather 3676 * minimal and without protocol specifics so that we are able 3677 * to separate concerns as in bpf_skb_store_bytes() should only 3678 * be the one responsible for writing buffers. 3679 * 3680 * It's really expected to be a slow path operation here for 3681 * control message replies, so we're implicitly linearizing, 3682 * uncloning and drop offloads from the skb by this. 3683 */ 3684 ret = __bpf_try_make_writable(skb, skb->len); 3685 if (!ret) { 3686 if (new_len > skb->len) 3687 ret = bpf_skb_grow_rcsum(skb, new_len); 3688 else if (new_len < skb->len) 3689 ret = bpf_skb_trim_rcsum(skb, new_len); 3690 if (!ret && skb_is_gso(skb)) 3691 skb_gso_reset(skb); 3692 } 3693 return ret; 3694 } 3695 3696 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3697 u64, flags) 3698 { 3699 int ret = __bpf_skb_change_tail(skb, new_len, flags); 3700 3701 bpf_compute_data_pointers(skb); 3702 return ret; 3703 } 3704 3705 static const struct bpf_func_proto bpf_skb_change_tail_proto = { 3706 .func = bpf_skb_change_tail, 3707 .gpl_only = false, 3708 .ret_type = RET_INTEGER, 3709 .arg1_type = ARG_PTR_TO_CTX, 3710 .arg2_type = ARG_ANYTHING, 3711 .arg3_type = ARG_ANYTHING, 3712 }; 3713 3714 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3715 u64, flags) 3716 { 3717 return __bpf_skb_change_tail(skb, new_len, flags); 3718 } 3719 3720 static const struct bpf_func_proto sk_skb_change_tail_proto = { 3721 .func = sk_skb_change_tail, 3722 .gpl_only = false, 3723 .ret_type = RET_INTEGER, 3724 .arg1_type = ARG_PTR_TO_CTX, 3725 .arg2_type = ARG_ANYTHING, 3726 .arg3_type = ARG_ANYTHING, 3727 }; 3728 3729 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, 3730 u64 flags) 3731 { 3732 u32 max_len = BPF_SKB_MAX_LEN; 3733 u32 new_len = skb->len + head_room; 3734 int ret; 3735 3736 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3737 new_len < skb->len)) 3738 return -EINVAL; 3739 3740 ret = skb_cow(skb, head_room); 3741 if (likely(!ret)) { 3742 /* Idea for this helper is that we currently only 3743 * allow to expand on mac header. This means that 3744 * skb->protocol network header, etc, stay as is. 3745 * Compared to bpf_skb_change_tail(), we're more 3746 * flexible due to not needing to linearize or 3747 * reset GSO. Intention for this helper is to be 3748 * used by an L3 skb that needs to push mac header 3749 * for redirection into L2 device. 3750 */ 3751 __skb_push(skb, head_room); 3752 memset(skb->data, 0, head_room); 3753 skb_reset_mac_header(skb); 3754 skb_reset_mac_len(skb); 3755 } 3756 3757 return ret; 3758 } 3759 3760 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, 3761 u64, flags) 3762 { 3763 int ret = __bpf_skb_change_head(skb, head_room, flags); 3764 3765 bpf_compute_data_pointers(skb); 3766 return ret; 3767 } 3768 3769 static const struct bpf_func_proto bpf_skb_change_head_proto = { 3770 .func = bpf_skb_change_head, 3771 .gpl_only = false, 3772 .ret_type = RET_INTEGER, 3773 .arg1_type = ARG_PTR_TO_CTX, 3774 .arg2_type = ARG_ANYTHING, 3775 .arg3_type = ARG_ANYTHING, 3776 }; 3777 3778 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, 3779 u64, flags) 3780 { 3781 return __bpf_skb_change_head(skb, head_room, flags); 3782 } 3783 3784 static const struct bpf_func_proto sk_skb_change_head_proto = { 3785 .func = sk_skb_change_head, 3786 .gpl_only = false, 3787 .ret_type = RET_INTEGER, 3788 .arg1_type = ARG_PTR_TO_CTX, 3789 .arg2_type = ARG_ANYTHING, 3790 .arg3_type = ARG_ANYTHING, 3791 }; 3792 3793 BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) 3794 { 3795 return xdp_get_buff_len(xdp); 3796 } 3797 3798 static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { 3799 .func = bpf_xdp_get_buff_len, 3800 .gpl_only = false, 3801 .ret_type = RET_INTEGER, 3802 .arg1_type = ARG_PTR_TO_CTX, 3803 }; 3804 3805 BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff) 3806 3807 const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = { 3808 .func = bpf_xdp_get_buff_len, 3809 .gpl_only = false, 3810 .arg1_type = ARG_PTR_TO_BTF_ID, 3811 .arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0], 3812 }; 3813 3814 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) 3815 { 3816 return xdp_data_meta_unsupported(xdp) ? 0 : 3817 xdp->data - xdp->data_meta; 3818 } 3819 3820 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) 3821 { 3822 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 3823 unsigned long metalen = xdp_get_metalen(xdp); 3824 void *data_start = xdp_frame_end + metalen; 3825 void *data = xdp->data + offset; 3826 3827 if (unlikely(data < data_start || 3828 data > xdp->data_end - ETH_HLEN)) 3829 return -EINVAL; 3830 3831 if (metalen) 3832 memmove(xdp->data_meta + offset, 3833 xdp->data_meta, metalen); 3834 xdp->data_meta += offset; 3835 xdp->data = data; 3836 3837 return 0; 3838 } 3839 3840 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { 3841 .func = bpf_xdp_adjust_head, 3842 .gpl_only = false, 3843 .ret_type = RET_INTEGER, 3844 .arg1_type = ARG_PTR_TO_CTX, 3845 .arg2_type = ARG_ANYTHING, 3846 }; 3847 3848 static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, 3849 void *buf, unsigned long len, bool flush) 3850 { 3851 unsigned long ptr_len, ptr_off = 0; 3852 skb_frag_t *next_frag, *end_frag; 3853 struct skb_shared_info *sinfo; 3854 void *src, *dst; 3855 u8 *ptr_buf; 3856 3857 if (likely(xdp->data_end - xdp->data >= off + len)) { 3858 src = flush ? buf : xdp->data + off; 3859 dst = flush ? xdp->data + off : buf; 3860 memcpy(dst, src, len); 3861 return; 3862 } 3863 3864 sinfo = xdp_get_shared_info_from_buff(xdp); 3865 end_frag = &sinfo->frags[sinfo->nr_frags]; 3866 next_frag = &sinfo->frags[0]; 3867 3868 ptr_len = xdp->data_end - xdp->data; 3869 ptr_buf = xdp->data; 3870 3871 while (true) { 3872 if (off < ptr_off + ptr_len) { 3873 unsigned long copy_off = off - ptr_off; 3874 unsigned long copy_len = min(len, ptr_len - copy_off); 3875 3876 src = flush ? buf : ptr_buf + copy_off; 3877 dst = flush ? ptr_buf + copy_off : buf; 3878 memcpy(dst, src, copy_len); 3879 3880 off += copy_len; 3881 len -= copy_len; 3882 buf += copy_len; 3883 } 3884 3885 if (!len || next_frag == end_frag) 3886 break; 3887 3888 ptr_off += ptr_len; 3889 ptr_buf = skb_frag_address(next_frag); 3890 ptr_len = skb_frag_size(next_frag); 3891 next_frag++; 3892 } 3893 } 3894 3895 static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) 3896 { 3897 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3898 u32 size = xdp->data_end - xdp->data; 3899 void *addr = xdp->data; 3900 int i; 3901 3902 if (unlikely(offset > 0xffff || len > 0xffff)) 3903 return ERR_PTR(-EFAULT); 3904 3905 if (offset + len > xdp_get_buff_len(xdp)) 3906 return ERR_PTR(-EINVAL); 3907 3908 if (offset < size) /* linear area */ 3909 goto out; 3910 3911 offset -= size; 3912 for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ 3913 u32 frag_size = skb_frag_size(&sinfo->frags[i]); 3914 3915 if (offset < frag_size) { 3916 addr = skb_frag_address(&sinfo->frags[i]); 3917 size = frag_size; 3918 break; 3919 } 3920 offset -= frag_size; 3921 } 3922 out: 3923 return offset + len <= size ? addr + offset : NULL; 3924 } 3925 3926 BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, 3927 void *, buf, u32, len) 3928 { 3929 void *ptr; 3930 3931 ptr = bpf_xdp_pointer(xdp, offset, len); 3932 if (IS_ERR(ptr)) 3933 return PTR_ERR(ptr); 3934 3935 if (!ptr) 3936 bpf_xdp_copy_buf(xdp, offset, buf, len, false); 3937 else 3938 memcpy(buf, ptr, len); 3939 3940 return 0; 3941 } 3942 3943 static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { 3944 .func = bpf_xdp_load_bytes, 3945 .gpl_only = false, 3946 .ret_type = RET_INTEGER, 3947 .arg1_type = ARG_PTR_TO_CTX, 3948 .arg2_type = ARG_ANYTHING, 3949 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3950 .arg4_type = ARG_CONST_SIZE, 3951 }; 3952 3953 BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, 3954 void *, buf, u32, len) 3955 { 3956 void *ptr; 3957 3958 ptr = bpf_xdp_pointer(xdp, offset, len); 3959 if (IS_ERR(ptr)) 3960 return PTR_ERR(ptr); 3961 3962 if (!ptr) 3963 bpf_xdp_copy_buf(xdp, offset, buf, len, true); 3964 else 3965 memcpy(ptr, buf, len); 3966 3967 return 0; 3968 } 3969 3970 static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { 3971 .func = bpf_xdp_store_bytes, 3972 .gpl_only = false, 3973 .ret_type = RET_INTEGER, 3974 .arg1_type = ARG_PTR_TO_CTX, 3975 .arg2_type = ARG_ANYTHING, 3976 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3977 .arg4_type = ARG_CONST_SIZE, 3978 }; 3979 3980 static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) 3981 { 3982 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3983 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1]; 3984 struct xdp_rxq_info *rxq = xdp->rxq; 3985 unsigned int tailroom; 3986 3987 if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz) 3988 return -EOPNOTSUPP; 3989 3990 tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag); 3991 if (unlikely(offset > tailroom)) 3992 return -EINVAL; 3993 3994 memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); 3995 skb_frag_size_add(frag, offset); 3996 sinfo->xdp_frags_size += offset; 3997 3998 return 0; 3999 } 4000 4001 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) 4002 { 4003 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 4004 int i, n_frags_free = 0, len_free = 0; 4005 4006 if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN)) 4007 return -EINVAL; 4008 4009 for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) { 4010 skb_frag_t *frag = &sinfo->frags[i]; 4011 int shrink = min_t(int, offset, skb_frag_size(frag)); 4012 4013 len_free += shrink; 4014 offset -= shrink; 4015 4016 if (skb_frag_size(frag) == shrink) { 4017 struct page *page = skb_frag_page(frag); 4018 4019 __xdp_return(page_address(page), &xdp->rxq->mem, 4020 false, NULL); 4021 n_frags_free++; 4022 } else { 4023 skb_frag_size_sub(frag, shrink); 4024 break; 4025 } 4026 } 4027 sinfo->nr_frags -= n_frags_free; 4028 sinfo->xdp_frags_size -= len_free; 4029 4030 if (unlikely(!sinfo->nr_frags)) { 4031 xdp_buff_clear_frags_flag(xdp); 4032 xdp->data_end -= offset; 4033 } 4034 4035 return 0; 4036 } 4037 4038 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) 4039 { 4040 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ 4041 void *data_end = xdp->data_end + offset; 4042 4043 if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */ 4044 if (offset < 0) 4045 return bpf_xdp_frags_shrink_tail(xdp, -offset); 4046 4047 return bpf_xdp_frags_increase_tail(xdp, offset); 4048 } 4049 4050 /* Notice that xdp_data_hard_end have reserved some tailroom */ 4051 if (unlikely(data_end > data_hard_end)) 4052 return -EINVAL; 4053 4054 /* ALL drivers MUST init xdp->frame_sz, chicken check below */ 4055 if (unlikely(xdp->frame_sz > PAGE_SIZE)) { 4056 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); 4057 return -EINVAL; 4058 } 4059 4060 if (unlikely(data_end < xdp->data + ETH_HLEN)) 4061 return -EINVAL; 4062 4063 /* Clear memory area on grow, can contain uninit kernel memory */ 4064 if (offset > 0) 4065 memset(xdp->data_end, 0, offset); 4066 4067 xdp->data_end = data_end; 4068 4069 return 0; 4070 } 4071 4072 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { 4073 .func = bpf_xdp_adjust_tail, 4074 .gpl_only = false, 4075 .ret_type = RET_INTEGER, 4076 .arg1_type = ARG_PTR_TO_CTX, 4077 .arg2_type = ARG_ANYTHING, 4078 }; 4079 4080 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) 4081 { 4082 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 4083 void *meta = xdp->data_meta + offset; 4084 unsigned long metalen = xdp->data - meta; 4085 4086 if (xdp_data_meta_unsupported(xdp)) 4087 return -ENOTSUPP; 4088 if (unlikely(meta < xdp_frame_end || 4089 meta > xdp->data)) 4090 return -EINVAL; 4091 if (unlikely(xdp_metalen_invalid(metalen))) 4092 return -EACCES; 4093 4094 xdp->data_meta = meta; 4095 4096 return 0; 4097 } 4098 4099 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { 4100 .func = bpf_xdp_adjust_meta, 4101 .gpl_only = false, 4102 .ret_type = RET_INTEGER, 4103 .arg1_type = ARG_PTR_TO_CTX, 4104 .arg2_type = ARG_ANYTHING, 4105 }; 4106 4107 /* XDP_REDIRECT works by a three-step process, implemented in the functions 4108 * below: 4109 * 4110 * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target 4111 * of the redirect and store it (along with some other metadata) in a per-CPU 4112 * struct bpf_redirect_info. 4113 * 4114 * 2. When the program returns the XDP_REDIRECT return code, the driver will 4115 * call xdp_do_redirect() which will use the information in struct 4116 * bpf_redirect_info to actually enqueue the frame into a map type-specific 4117 * bulk queue structure. 4118 * 4119 * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(), 4120 * which will flush all the different bulk queues, thus completing the 4121 * redirect. 4122 * 4123 * Pointers to the map entries will be kept around for this whole sequence of 4124 * steps, protected by RCU. However, there is no top-level rcu_read_lock() in 4125 * the core code; instead, the RCU protection relies on everything happening 4126 * inside a single NAPI poll sequence, which means it's between a pair of calls 4127 * to local_bh_disable()/local_bh_enable(). 4128 * 4129 * The map entries are marked as __rcu and the map code makes sure to 4130 * dereference those pointers with rcu_dereference_check() in a way that works 4131 * for both sections that to hold an rcu_read_lock() and sections that are 4132 * called from NAPI without a separate rcu_read_lock(). The code below does not 4133 * use RCU annotations, but relies on those in the map code. 4134 */ 4135 void xdp_do_flush(void) 4136 { 4137 __dev_flush(); 4138 __cpu_map_flush(); 4139 __xsk_map_flush(); 4140 } 4141 EXPORT_SYMBOL_GPL(xdp_do_flush); 4142 4143 void bpf_clear_redirect_map(struct bpf_map *map) 4144 { 4145 struct bpf_redirect_info *ri; 4146 int cpu; 4147 4148 for_each_possible_cpu(cpu) { 4149 ri = per_cpu_ptr(&bpf_redirect_info, cpu); 4150 /* Avoid polluting remote cacheline due to writes if 4151 * not needed. Once we pass this test, we need the 4152 * cmpxchg() to make sure it hasn't been changed in 4153 * the meantime by remote CPU. 4154 */ 4155 if (unlikely(READ_ONCE(ri->map) == map)) 4156 cmpxchg(&ri->map, map, NULL); 4157 } 4158 } 4159 4160 DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 4161 EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); 4162 4163 u32 xdp_master_redirect(struct xdp_buff *xdp) 4164 { 4165 struct net_device *master, *slave; 4166 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4167 4168 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); 4169 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); 4170 if (slave && slave != xdp->rxq->dev) { 4171 /* The target device is different from the receiving device, so 4172 * redirect it to the new device. 4173 * Using XDP_REDIRECT gets the correct behaviour from XDP enabled 4174 * drivers to unmap the packet from their rx ring. 4175 */ 4176 ri->tgt_index = slave->ifindex; 4177 ri->map_id = INT_MAX; 4178 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4179 return XDP_REDIRECT; 4180 } 4181 return XDP_TX; 4182 } 4183 EXPORT_SYMBOL_GPL(xdp_master_redirect); 4184 4185 static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, 4186 struct net_device *dev, 4187 struct xdp_buff *xdp, 4188 struct bpf_prog *xdp_prog) 4189 { 4190 enum bpf_map_type map_type = ri->map_type; 4191 void *fwd = ri->tgt_value; 4192 u32 map_id = ri->map_id; 4193 int err; 4194 4195 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4196 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4197 4198 err = __xsk_map_redirect(fwd, xdp); 4199 if (unlikely(err)) 4200 goto err; 4201 4202 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4203 return 0; 4204 err: 4205 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4206 return err; 4207 } 4208 4209 static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, 4210 struct net_device *dev, 4211 struct xdp_frame *xdpf, 4212 struct bpf_prog *xdp_prog) 4213 { 4214 enum bpf_map_type map_type = ri->map_type; 4215 void *fwd = ri->tgt_value; 4216 u32 map_id = ri->map_id; 4217 struct bpf_map *map; 4218 int err; 4219 4220 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4221 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4222 4223 if (unlikely(!xdpf)) { 4224 err = -EOVERFLOW; 4225 goto err; 4226 } 4227 4228 switch (map_type) { 4229 case BPF_MAP_TYPE_DEVMAP: 4230 fallthrough; 4231 case BPF_MAP_TYPE_DEVMAP_HASH: 4232 map = READ_ONCE(ri->map); 4233 if (unlikely(map)) { 4234 WRITE_ONCE(ri->map, NULL); 4235 err = dev_map_enqueue_multi(xdpf, dev, map, 4236 ri->flags & BPF_F_EXCLUDE_INGRESS); 4237 } else { 4238 err = dev_map_enqueue(fwd, xdpf, dev); 4239 } 4240 break; 4241 case BPF_MAP_TYPE_CPUMAP: 4242 err = cpu_map_enqueue(fwd, xdpf, dev); 4243 break; 4244 case BPF_MAP_TYPE_UNSPEC: 4245 if (map_id == INT_MAX) { 4246 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4247 if (unlikely(!fwd)) { 4248 err = -EINVAL; 4249 break; 4250 } 4251 err = dev_xdp_enqueue(fwd, xdpf, dev); 4252 break; 4253 } 4254 fallthrough; 4255 default: 4256 err = -EBADRQC; 4257 } 4258 4259 if (unlikely(err)) 4260 goto err; 4261 4262 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4263 return 0; 4264 err: 4265 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4266 return err; 4267 } 4268 4269 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 4270 struct bpf_prog *xdp_prog) 4271 { 4272 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4273 enum bpf_map_type map_type = ri->map_type; 4274 4275 /* XDP_REDIRECT is not fully supported yet for xdp frags since 4276 * not all XDP capable drivers can map non-linear xdp_frame in 4277 * ndo_xdp_xmit. 4278 */ 4279 if (unlikely(xdp_buff_has_frags(xdp) && 4280 map_type != BPF_MAP_TYPE_CPUMAP)) 4281 return -EOPNOTSUPP; 4282 4283 if (map_type == BPF_MAP_TYPE_XSKMAP) 4284 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4285 4286 return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp), 4287 xdp_prog); 4288 } 4289 EXPORT_SYMBOL_GPL(xdp_do_redirect); 4290 4291 int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, 4292 struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) 4293 { 4294 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4295 enum bpf_map_type map_type = ri->map_type; 4296 4297 if (map_type == BPF_MAP_TYPE_XSKMAP) 4298 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4299 4300 return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog); 4301 } 4302 EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); 4303 4304 static int xdp_do_generic_redirect_map(struct net_device *dev, 4305 struct sk_buff *skb, 4306 struct xdp_buff *xdp, 4307 struct bpf_prog *xdp_prog, 4308 void *fwd, 4309 enum bpf_map_type map_type, u32 map_id) 4310 { 4311 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4312 struct bpf_map *map; 4313 int err; 4314 4315 switch (map_type) { 4316 case BPF_MAP_TYPE_DEVMAP: 4317 fallthrough; 4318 case BPF_MAP_TYPE_DEVMAP_HASH: 4319 map = READ_ONCE(ri->map); 4320 if (unlikely(map)) { 4321 WRITE_ONCE(ri->map, NULL); 4322 err = dev_map_redirect_multi(dev, skb, xdp_prog, map, 4323 ri->flags & BPF_F_EXCLUDE_INGRESS); 4324 } else { 4325 err = dev_map_generic_redirect(fwd, skb, xdp_prog); 4326 } 4327 if (unlikely(err)) 4328 goto err; 4329 break; 4330 case BPF_MAP_TYPE_XSKMAP: 4331 err = xsk_generic_rcv(fwd, xdp); 4332 if (err) 4333 goto err; 4334 consume_skb(skb); 4335 break; 4336 case BPF_MAP_TYPE_CPUMAP: 4337 err = cpu_map_generic_redirect(fwd, skb); 4338 if (unlikely(err)) 4339 goto err; 4340 break; 4341 default: 4342 err = -EBADRQC; 4343 goto err; 4344 } 4345 4346 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4347 return 0; 4348 err: 4349 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4350 return err; 4351 } 4352 4353 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 4354 struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 4355 { 4356 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4357 enum bpf_map_type map_type = ri->map_type; 4358 void *fwd = ri->tgt_value; 4359 u32 map_id = ri->map_id; 4360 int err; 4361 4362 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4363 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4364 4365 if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { 4366 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4367 if (unlikely(!fwd)) { 4368 err = -EINVAL; 4369 goto err; 4370 } 4371 4372 err = xdp_ok_fwd_dev(fwd, skb->len); 4373 if (unlikely(err)) 4374 goto err; 4375 4376 skb->dev = fwd; 4377 _trace_xdp_redirect(dev, xdp_prog, ri->tgt_index); 4378 generic_xdp_tx(skb, xdp_prog); 4379 return 0; 4380 } 4381 4382 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id); 4383 err: 4384 _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); 4385 return err; 4386 } 4387 4388 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) 4389 { 4390 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4391 4392 if (unlikely(flags)) 4393 return XDP_ABORTED; 4394 4395 /* NB! Map type UNSPEC and map_id == INT_MAX (never generated 4396 * by map_idr) is used for ifindex based XDP redirect. 4397 */ 4398 ri->tgt_index = ifindex; 4399 ri->map_id = INT_MAX; 4400 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4401 4402 return XDP_REDIRECT; 4403 } 4404 4405 static const struct bpf_func_proto bpf_xdp_redirect_proto = { 4406 .func = bpf_xdp_redirect, 4407 .gpl_only = false, 4408 .ret_type = RET_INTEGER, 4409 .arg1_type = ARG_ANYTHING, 4410 .arg2_type = ARG_ANYTHING, 4411 }; 4412 4413 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, 4414 u64, flags) 4415 { 4416 return map->ops->map_redirect(map, ifindex, flags); 4417 } 4418 4419 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { 4420 .func = bpf_xdp_redirect_map, 4421 .gpl_only = false, 4422 .ret_type = RET_INTEGER, 4423 .arg1_type = ARG_CONST_MAP_PTR, 4424 .arg2_type = ARG_ANYTHING, 4425 .arg3_type = ARG_ANYTHING, 4426 }; 4427 4428 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, 4429 unsigned long off, unsigned long len) 4430 { 4431 void *ptr = skb_header_pointer(skb, off, len, dst_buff); 4432 4433 if (unlikely(!ptr)) 4434 return len; 4435 if (ptr != dst_buff) 4436 memcpy(dst_buff, ptr, len); 4437 4438 return 0; 4439 } 4440 4441 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, 4442 u64, flags, void *, meta, u64, meta_size) 4443 { 4444 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4445 4446 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4447 return -EINVAL; 4448 if (unlikely(!skb || skb_size > skb->len)) 4449 return -EFAULT; 4450 4451 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, 4452 bpf_skb_copy); 4453 } 4454 4455 static const struct bpf_func_proto bpf_skb_event_output_proto = { 4456 .func = bpf_skb_event_output, 4457 .gpl_only = true, 4458 .ret_type = RET_INTEGER, 4459 .arg1_type = ARG_PTR_TO_CTX, 4460 .arg2_type = ARG_CONST_MAP_PTR, 4461 .arg3_type = ARG_ANYTHING, 4462 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4463 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4464 }; 4465 4466 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff) 4467 4468 const struct bpf_func_proto bpf_skb_output_proto = { 4469 .func = bpf_skb_event_output, 4470 .gpl_only = true, 4471 .ret_type = RET_INTEGER, 4472 .arg1_type = ARG_PTR_TO_BTF_ID, 4473 .arg1_btf_id = &bpf_skb_output_btf_ids[0], 4474 .arg2_type = ARG_CONST_MAP_PTR, 4475 .arg3_type = ARG_ANYTHING, 4476 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4477 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4478 }; 4479 4480 static unsigned short bpf_tunnel_key_af(u64 flags) 4481 { 4482 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; 4483 } 4484 4485 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, 4486 u32, size, u64, flags) 4487 { 4488 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4489 u8 compat[sizeof(struct bpf_tunnel_key)]; 4490 void *to_orig = to; 4491 int err; 4492 4493 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6 | 4494 BPF_F_TUNINFO_FLAGS)))) { 4495 err = -EINVAL; 4496 goto err_clear; 4497 } 4498 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { 4499 err = -EPROTO; 4500 goto err_clear; 4501 } 4502 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4503 err = -EINVAL; 4504 switch (size) { 4505 case offsetof(struct bpf_tunnel_key, local_ipv6[0]): 4506 case offsetof(struct bpf_tunnel_key, tunnel_label): 4507 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4508 goto set_compat; 4509 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4510 /* Fixup deprecated structure layouts here, so we have 4511 * a common path later on. 4512 */ 4513 if (ip_tunnel_info_af(info) != AF_INET) 4514 goto err_clear; 4515 set_compat: 4516 to = (struct bpf_tunnel_key *)compat; 4517 break; 4518 default: 4519 goto err_clear; 4520 } 4521 } 4522 4523 to->tunnel_id = be64_to_cpu(info->key.tun_id); 4524 to->tunnel_tos = info->key.tos; 4525 to->tunnel_ttl = info->key.ttl; 4526 if (flags & BPF_F_TUNINFO_FLAGS) 4527 to->tunnel_flags = info->key.tun_flags; 4528 else 4529 to->tunnel_ext = 0; 4530 4531 if (flags & BPF_F_TUNINFO_IPV6) { 4532 memcpy(to->remote_ipv6, &info->key.u.ipv6.src, 4533 sizeof(to->remote_ipv6)); 4534 memcpy(to->local_ipv6, &info->key.u.ipv6.dst, 4535 sizeof(to->local_ipv6)); 4536 to->tunnel_label = be32_to_cpu(info->key.label); 4537 } else { 4538 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); 4539 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 4540 to->local_ipv4 = be32_to_cpu(info->key.u.ipv4.dst); 4541 memset(&to->local_ipv6[1], 0, sizeof(__u32) * 3); 4542 to->tunnel_label = 0; 4543 } 4544 4545 if (unlikely(size != sizeof(struct bpf_tunnel_key))) 4546 memcpy(to_orig, to, size); 4547 4548 return 0; 4549 err_clear: 4550 memset(to_orig, 0, size); 4551 return err; 4552 } 4553 4554 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { 4555 .func = bpf_skb_get_tunnel_key, 4556 .gpl_only = false, 4557 .ret_type = RET_INTEGER, 4558 .arg1_type = ARG_PTR_TO_CTX, 4559 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4560 .arg3_type = ARG_CONST_SIZE, 4561 .arg4_type = ARG_ANYTHING, 4562 }; 4563 4564 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) 4565 { 4566 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4567 int err; 4568 4569 if (unlikely(!info || 4570 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { 4571 err = -ENOENT; 4572 goto err_clear; 4573 } 4574 if (unlikely(size < info->options_len)) { 4575 err = -ENOMEM; 4576 goto err_clear; 4577 } 4578 4579 ip_tunnel_info_opts_get(to, info); 4580 if (size > info->options_len) 4581 memset(to + info->options_len, 0, size - info->options_len); 4582 4583 return info->options_len; 4584 err_clear: 4585 memset(to, 0, size); 4586 return err; 4587 } 4588 4589 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { 4590 .func = bpf_skb_get_tunnel_opt, 4591 .gpl_only = false, 4592 .ret_type = RET_INTEGER, 4593 .arg1_type = ARG_PTR_TO_CTX, 4594 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4595 .arg3_type = ARG_CONST_SIZE, 4596 }; 4597 4598 static struct metadata_dst __percpu *md_dst; 4599 4600 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, 4601 const struct bpf_tunnel_key *, from, u32, size, u64, flags) 4602 { 4603 struct metadata_dst *md = this_cpu_ptr(md_dst); 4604 u8 compat[sizeof(struct bpf_tunnel_key)]; 4605 struct ip_tunnel_info *info; 4606 4607 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | 4608 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) 4609 return -EINVAL; 4610 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4611 switch (size) { 4612 case offsetof(struct bpf_tunnel_key, local_ipv6[0]): 4613 case offsetof(struct bpf_tunnel_key, tunnel_label): 4614 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4615 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4616 /* Fixup deprecated structure layouts here, so we have 4617 * a common path later on. 4618 */ 4619 memcpy(compat, from, size); 4620 memset(compat + size, 0, sizeof(compat) - size); 4621 from = (const struct bpf_tunnel_key *) compat; 4622 break; 4623 default: 4624 return -EINVAL; 4625 } 4626 } 4627 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || 4628 from->tunnel_ext)) 4629 return -EINVAL; 4630 4631 skb_dst_drop(skb); 4632 dst_hold((struct dst_entry *) md); 4633 skb_dst_set(skb, (struct dst_entry *) md); 4634 4635 info = &md->u.tun_info; 4636 memset(info, 0, sizeof(*info)); 4637 info->mode = IP_TUNNEL_INFO_TX; 4638 4639 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 4640 if (flags & BPF_F_DONT_FRAGMENT) 4641 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; 4642 if (flags & BPF_F_ZERO_CSUM_TX) 4643 info->key.tun_flags &= ~TUNNEL_CSUM; 4644 if (flags & BPF_F_SEQ_NUMBER) 4645 info->key.tun_flags |= TUNNEL_SEQ; 4646 4647 info->key.tun_id = cpu_to_be64(from->tunnel_id); 4648 info->key.tos = from->tunnel_tos; 4649 info->key.ttl = from->tunnel_ttl; 4650 4651 if (flags & BPF_F_TUNINFO_IPV6) { 4652 info->mode |= IP_TUNNEL_INFO_IPV6; 4653 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, 4654 sizeof(from->remote_ipv6)); 4655 memcpy(&info->key.u.ipv6.src, from->local_ipv6, 4656 sizeof(from->local_ipv6)); 4657 info->key.label = cpu_to_be32(from->tunnel_label) & 4658 IPV6_FLOWLABEL_MASK; 4659 } else { 4660 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); 4661 info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4); 4662 info->key.flow_flags = FLOWI_FLAG_ANYSRC; 4663 } 4664 4665 return 0; 4666 } 4667 4668 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { 4669 .func = bpf_skb_set_tunnel_key, 4670 .gpl_only = false, 4671 .ret_type = RET_INTEGER, 4672 .arg1_type = ARG_PTR_TO_CTX, 4673 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4674 .arg3_type = ARG_CONST_SIZE, 4675 .arg4_type = ARG_ANYTHING, 4676 }; 4677 4678 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, 4679 const u8 *, from, u32, size) 4680 { 4681 struct ip_tunnel_info *info = skb_tunnel_info(skb); 4682 const struct metadata_dst *md = this_cpu_ptr(md_dst); 4683 4684 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) 4685 return -EINVAL; 4686 if (unlikely(size > IP_TUNNEL_OPTS_MAX)) 4687 return -ENOMEM; 4688 4689 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); 4690 4691 return 0; 4692 } 4693 4694 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { 4695 .func = bpf_skb_set_tunnel_opt, 4696 .gpl_only = false, 4697 .ret_type = RET_INTEGER, 4698 .arg1_type = ARG_PTR_TO_CTX, 4699 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4700 .arg3_type = ARG_CONST_SIZE, 4701 }; 4702 4703 static const struct bpf_func_proto * 4704 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) 4705 { 4706 if (!md_dst) { 4707 struct metadata_dst __percpu *tmp; 4708 4709 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, 4710 METADATA_IP_TUNNEL, 4711 GFP_KERNEL); 4712 if (!tmp) 4713 return NULL; 4714 if (cmpxchg(&md_dst, NULL, tmp)) 4715 metadata_dst_free_percpu(tmp); 4716 } 4717 4718 switch (which) { 4719 case BPF_FUNC_skb_set_tunnel_key: 4720 return &bpf_skb_set_tunnel_key_proto; 4721 case BPF_FUNC_skb_set_tunnel_opt: 4722 return &bpf_skb_set_tunnel_opt_proto; 4723 default: 4724 return NULL; 4725 } 4726 } 4727 4728 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, 4729 u32, idx) 4730 { 4731 struct bpf_array *array = container_of(map, struct bpf_array, map); 4732 struct cgroup *cgrp; 4733 struct sock *sk; 4734 4735 sk = skb_to_full_sk(skb); 4736 if (!sk || !sk_fullsock(sk)) 4737 return -ENOENT; 4738 if (unlikely(idx >= array->map.max_entries)) 4739 return -E2BIG; 4740 4741 cgrp = READ_ONCE(array->ptrs[idx]); 4742 if (unlikely(!cgrp)) 4743 return -EAGAIN; 4744 4745 return sk_under_cgroup_hierarchy(sk, cgrp); 4746 } 4747 4748 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { 4749 .func = bpf_skb_under_cgroup, 4750 .gpl_only = false, 4751 .ret_type = RET_INTEGER, 4752 .arg1_type = ARG_PTR_TO_CTX, 4753 .arg2_type = ARG_CONST_MAP_PTR, 4754 .arg3_type = ARG_ANYTHING, 4755 }; 4756 4757 #ifdef CONFIG_SOCK_CGROUP_DATA 4758 static inline u64 __bpf_sk_cgroup_id(struct sock *sk) 4759 { 4760 struct cgroup *cgrp; 4761 4762 sk = sk_to_full_sk(sk); 4763 if (!sk || !sk_fullsock(sk)) 4764 return 0; 4765 4766 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4767 return cgroup_id(cgrp); 4768 } 4769 4770 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) 4771 { 4772 return __bpf_sk_cgroup_id(skb->sk); 4773 } 4774 4775 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { 4776 .func = bpf_skb_cgroup_id, 4777 .gpl_only = false, 4778 .ret_type = RET_INTEGER, 4779 .arg1_type = ARG_PTR_TO_CTX, 4780 }; 4781 4782 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, 4783 int ancestor_level) 4784 { 4785 struct cgroup *ancestor; 4786 struct cgroup *cgrp; 4787 4788 sk = sk_to_full_sk(sk); 4789 if (!sk || !sk_fullsock(sk)) 4790 return 0; 4791 4792 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4793 ancestor = cgroup_ancestor(cgrp, ancestor_level); 4794 if (!ancestor) 4795 return 0; 4796 4797 return cgroup_id(ancestor); 4798 } 4799 4800 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, 4801 ancestor_level) 4802 { 4803 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level); 4804 } 4805 4806 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { 4807 .func = bpf_skb_ancestor_cgroup_id, 4808 .gpl_only = false, 4809 .ret_type = RET_INTEGER, 4810 .arg1_type = ARG_PTR_TO_CTX, 4811 .arg2_type = ARG_ANYTHING, 4812 }; 4813 4814 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk) 4815 { 4816 return __bpf_sk_cgroup_id(sk); 4817 } 4818 4819 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = { 4820 .func = bpf_sk_cgroup_id, 4821 .gpl_only = false, 4822 .ret_type = RET_INTEGER, 4823 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4824 }; 4825 4826 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level) 4827 { 4828 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level); 4829 } 4830 4831 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { 4832 .func = bpf_sk_ancestor_cgroup_id, 4833 .gpl_only = false, 4834 .ret_type = RET_INTEGER, 4835 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4836 .arg2_type = ARG_ANYTHING, 4837 }; 4838 #endif 4839 4840 static unsigned long bpf_xdp_copy(void *dst, const void *ctx, 4841 unsigned long off, unsigned long len) 4842 { 4843 struct xdp_buff *xdp = (struct xdp_buff *)ctx; 4844 4845 bpf_xdp_copy_buf(xdp, off, dst, len, false); 4846 return 0; 4847 } 4848 4849 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, 4850 u64, flags, void *, meta, u64, meta_size) 4851 { 4852 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4853 4854 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4855 return -EINVAL; 4856 4857 if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp))) 4858 return -EFAULT; 4859 4860 return bpf_event_output(map, flags, meta, meta_size, xdp, 4861 xdp_size, bpf_xdp_copy); 4862 } 4863 4864 static const struct bpf_func_proto bpf_xdp_event_output_proto = { 4865 .func = bpf_xdp_event_output, 4866 .gpl_only = true, 4867 .ret_type = RET_INTEGER, 4868 .arg1_type = ARG_PTR_TO_CTX, 4869 .arg2_type = ARG_CONST_MAP_PTR, 4870 .arg3_type = ARG_ANYTHING, 4871 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4872 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4873 }; 4874 4875 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff) 4876 4877 const struct bpf_func_proto bpf_xdp_output_proto = { 4878 .func = bpf_xdp_event_output, 4879 .gpl_only = true, 4880 .ret_type = RET_INTEGER, 4881 .arg1_type = ARG_PTR_TO_BTF_ID, 4882 .arg1_btf_id = &bpf_xdp_output_btf_ids[0], 4883 .arg2_type = ARG_CONST_MAP_PTR, 4884 .arg3_type = ARG_ANYTHING, 4885 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4886 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4887 }; 4888 4889 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) 4890 { 4891 return skb->sk ? __sock_gen_cookie(skb->sk) : 0; 4892 } 4893 4894 static const struct bpf_func_proto bpf_get_socket_cookie_proto = { 4895 .func = bpf_get_socket_cookie, 4896 .gpl_only = false, 4897 .ret_type = RET_INTEGER, 4898 .arg1_type = ARG_PTR_TO_CTX, 4899 }; 4900 4901 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4902 { 4903 return __sock_gen_cookie(ctx->sk); 4904 } 4905 4906 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { 4907 .func = bpf_get_socket_cookie_sock_addr, 4908 .gpl_only = false, 4909 .ret_type = RET_INTEGER, 4910 .arg1_type = ARG_PTR_TO_CTX, 4911 }; 4912 4913 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx) 4914 { 4915 return __sock_gen_cookie(ctx); 4916 } 4917 4918 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = { 4919 .func = bpf_get_socket_cookie_sock, 4920 .gpl_only = false, 4921 .ret_type = RET_INTEGER, 4922 .arg1_type = ARG_PTR_TO_CTX, 4923 }; 4924 4925 BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk) 4926 { 4927 return sk ? sock_gen_cookie(sk) : 0; 4928 } 4929 4930 const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = { 4931 .func = bpf_get_socket_ptr_cookie, 4932 .gpl_only = false, 4933 .ret_type = RET_INTEGER, 4934 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4935 }; 4936 4937 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4938 { 4939 return __sock_gen_cookie(ctx->sk); 4940 } 4941 4942 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { 4943 .func = bpf_get_socket_cookie_sock_ops, 4944 .gpl_only = false, 4945 .ret_type = RET_INTEGER, 4946 .arg1_type = ARG_PTR_TO_CTX, 4947 }; 4948 4949 static u64 __bpf_get_netns_cookie(struct sock *sk) 4950 { 4951 const struct net *net = sk ? sock_net(sk) : &init_net; 4952 4953 return net->net_cookie; 4954 } 4955 4956 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) 4957 { 4958 return __bpf_get_netns_cookie(ctx); 4959 } 4960 4961 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = { 4962 .func = bpf_get_netns_cookie_sock, 4963 .gpl_only = false, 4964 .ret_type = RET_INTEGER, 4965 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4966 }; 4967 4968 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4969 { 4970 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4971 } 4972 4973 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = { 4974 .func = bpf_get_netns_cookie_sock_addr, 4975 .gpl_only = false, 4976 .ret_type = RET_INTEGER, 4977 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4978 }; 4979 4980 BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4981 { 4982 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4983 } 4984 4985 static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = { 4986 .func = bpf_get_netns_cookie_sock_ops, 4987 .gpl_only = false, 4988 .ret_type = RET_INTEGER, 4989 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4990 }; 4991 4992 BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx) 4993 { 4994 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4995 } 4996 4997 static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = { 4998 .func = bpf_get_netns_cookie_sk_msg, 4999 .gpl_only = false, 5000 .ret_type = RET_INTEGER, 5001 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 5002 }; 5003 5004 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) 5005 { 5006 struct sock *sk = sk_to_full_sk(skb->sk); 5007 kuid_t kuid; 5008 5009 if (!sk || !sk_fullsock(sk)) 5010 return overflowuid; 5011 kuid = sock_net_uid(sock_net(sk), sk); 5012 return from_kuid_munged(sock_net(sk)->user_ns, kuid); 5013 } 5014 5015 static const struct bpf_func_proto bpf_get_socket_uid_proto = { 5016 .func = bpf_get_socket_uid, 5017 .gpl_only = false, 5018 .ret_type = RET_INTEGER, 5019 .arg1_type = ARG_PTR_TO_CTX, 5020 }; 5021 5022 static int sol_socket_sockopt(struct sock *sk, int optname, 5023 char *optval, int *optlen, 5024 bool getopt) 5025 { 5026 switch (optname) { 5027 case SO_REUSEADDR: 5028 case SO_SNDBUF: 5029 case SO_RCVBUF: 5030 case SO_KEEPALIVE: 5031 case SO_PRIORITY: 5032 case SO_REUSEPORT: 5033 case SO_RCVLOWAT: 5034 case SO_MARK: 5035 case SO_MAX_PACING_RATE: 5036 case SO_BINDTOIFINDEX: 5037 case SO_TXREHASH: 5038 if (*optlen != sizeof(int)) 5039 return -EINVAL; 5040 break; 5041 case SO_BINDTODEVICE: 5042 break; 5043 default: 5044 return -EINVAL; 5045 } 5046 5047 if (getopt) { 5048 if (optname == SO_BINDTODEVICE) 5049 return -EINVAL; 5050 return sk_getsockopt(sk, SOL_SOCKET, optname, 5051 KERNEL_SOCKPTR(optval), 5052 KERNEL_SOCKPTR(optlen)); 5053 } 5054 5055 return sk_setsockopt(sk, SOL_SOCKET, optname, 5056 KERNEL_SOCKPTR(optval), *optlen); 5057 } 5058 5059 static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, 5060 char *optval, int optlen) 5061 { 5062 struct tcp_sock *tp = tcp_sk(sk); 5063 unsigned long timeout; 5064 int val; 5065 5066 if (optlen != sizeof(int)) 5067 return -EINVAL; 5068 5069 val = *(int *)optval; 5070 5071 /* Only some options are supported */ 5072 switch (optname) { 5073 case TCP_BPF_IW: 5074 if (val <= 0 || tp->data_segs_out > tp->syn_data) 5075 return -EINVAL; 5076 tcp_snd_cwnd_set(tp, val); 5077 break; 5078 case TCP_BPF_SNDCWND_CLAMP: 5079 if (val <= 0) 5080 return -EINVAL; 5081 tp->snd_cwnd_clamp = val; 5082 tp->snd_ssthresh = val; 5083 break; 5084 case TCP_BPF_DELACK_MAX: 5085 timeout = usecs_to_jiffies(val); 5086 if (timeout > TCP_DELACK_MAX || 5087 timeout < TCP_TIMEOUT_MIN) 5088 return -EINVAL; 5089 inet_csk(sk)->icsk_delack_max = timeout; 5090 break; 5091 case TCP_BPF_RTO_MIN: 5092 timeout = usecs_to_jiffies(val); 5093 if (timeout > TCP_RTO_MIN || 5094 timeout < TCP_TIMEOUT_MIN) 5095 return -EINVAL; 5096 inet_csk(sk)->icsk_rto_min = timeout; 5097 break; 5098 default: 5099 return -EINVAL; 5100 } 5101 5102 return 0; 5103 } 5104 5105 static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, 5106 int *optlen, bool getopt) 5107 { 5108 struct tcp_sock *tp; 5109 int ret; 5110 5111 if (*optlen < 2) 5112 return -EINVAL; 5113 5114 if (getopt) { 5115 if (!inet_csk(sk)->icsk_ca_ops) 5116 return -EINVAL; 5117 /* BPF expects NULL-terminated tcp-cc string */ 5118 optval[--(*optlen)] = '\0'; 5119 return do_tcp_getsockopt(sk, SOL_TCP, TCP_CONGESTION, 5120 KERNEL_SOCKPTR(optval), 5121 KERNEL_SOCKPTR(optlen)); 5122 } 5123 5124 /* "cdg" is the only cc that alloc a ptr 5125 * in inet_csk_ca area. The bpf-tcp-cc may 5126 * overwrite this ptr after switching to cdg. 5127 */ 5128 if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) 5129 return -ENOTSUPP; 5130 5131 /* It stops this looping 5132 * 5133 * .init => bpf_setsockopt(tcp_cc) => .init => 5134 * bpf_setsockopt(tcp_cc)" => .init => .... 5135 * 5136 * The second bpf_setsockopt(tcp_cc) is not allowed 5137 * in order to break the loop when both .init 5138 * are the same bpf prog. 5139 * 5140 * This applies even the second bpf_setsockopt(tcp_cc) 5141 * does not cause a loop. This limits only the first 5142 * '.init' can call bpf_setsockopt(TCP_CONGESTION) to 5143 * pick a fallback cc (eg. peer does not support ECN) 5144 * and the second '.init' cannot fallback to 5145 * another. 5146 */ 5147 tp = tcp_sk(sk); 5148 if (tp->bpf_chg_cc_inprogress) 5149 return -EBUSY; 5150 5151 tp->bpf_chg_cc_inprogress = 1; 5152 ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, 5153 KERNEL_SOCKPTR(optval), *optlen); 5154 tp->bpf_chg_cc_inprogress = 0; 5155 return ret; 5156 } 5157 5158 static int sol_tcp_sockopt(struct sock *sk, int optname, 5159 char *optval, int *optlen, 5160 bool getopt) 5161 { 5162 if (sk->sk_prot->setsockopt != tcp_setsockopt) 5163 return -EINVAL; 5164 5165 switch (optname) { 5166 case TCP_NODELAY: 5167 case TCP_MAXSEG: 5168 case TCP_KEEPIDLE: 5169 case TCP_KEEPINTVL: 5170 case TCP_KEEPCNT: 5171 case TCP_SYNCNT: 5172 case TCP_WINDOW_CLAMP: 5173 case TCP_THIN_LINEAR_TIMEOUTS: 5174 case TCP_USER_TIMEOUT: 5175 case TCP_NOTSENT_LOWAT: 5176 case TCP_SAVE_SYN: 5177 if (*optlen != sizeof(int)) 5178 return -EINVAL; 5179 break; 5180 case TCP_CONGESTION: 5181 return sol_tcp_sockopt_congestion(sk, optval, optlen, getopt); 5182 case TCP_SAVED_SYN: 5183 if (*optlen < 1) 5184 return -EINVAL; 5185 break; 5186 default: 5187 if (getopt) 5188 return -EINVAL; 5189 return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen); 5190 } 5191 5192 if (getopt) { 5193 if (optname == TCP_SAVED_SYN) { 5194 struct tcp_sock *tp = tcp_sk(sk); 5195 5196 if (!tp->saved_syn || 5197 *optlen > tcp_saved_syn_len(tp->saved_syn)) 5198 return -EINVAL; 5199 memcpy(optval, tp->saved_syn->data, *optlen); 5200 /* It cannot free tp->saved_syn here because it 5201 * does not know if the user space still needs it. 5202 */ 5203 return 0; 5204 } 5205 5206 return do_tcp_getsockopt(sk, SOL_TCP, optname, 5207 KERNEL_SOCKPTR(optval), 5208 KERNEL_SOCKPTR(optlen)); 5209 } 5210 5211 return do_tcp_setsockopt(sk, SOL_TCP, optname, 5212 KERNEL_SOCKPTR(optval), *optlen); 5213 } 5214 5215 static int sol_ip_sockopt(struct sock *sk, int optname, 5216 char *optval, int *optlen, 5217 bool getopt) 5218 { 5219 if (sk->sk_family != AF_INET) 5220 return -EINVAL; 5221 5222 switch (optname) { 5223 case IP_TOS: 5224 if (*optlen != sizeof(int)) 5225 return -EINVAL; 5226 break; 5227 default: 5228 return -EINVAL; 5229 } 5230 5231 if (getopt) 5232 return do_ip_getsockopt(sk, SOL_IP, optname, 5233 KERNEL_SOCKPTR(optval), 5234 KERNEL_SOCKPTR(optlen)); 5235 5236 return do_ip_setsockopt(sk, SOL_IP, optname, 5237 KERNEL_SOCKPTR(optval), *optlen); 5238 } 5239 5240 static int sol_ipv6_sockopt(struct sock *sk, int optname, 5241 char *optval, int *optlen, 5242 bool getopt) 5243 { 5244 if (sk->sk_family != AF_INET6) 5245 return -EINVAL; 5246 5247 switch (optname) { 5248 case IPV6_TCLASS: 5249 case IPV6_AUTOFLOWLABEL: 5250 if (*optlen != sizeof(int)) 5251 return -EINVAL; 5252 break; 5253 default: 5254 return -EINVAL; 5255 } 5256 5257 if (getopt) 5258 return ipv6_bpf_stub->ipv6_getsockopt(sk, SOL_IPV6, optname, 5259 KERNEL_SOCKPTR(optval), 5260 KERNEL_SOCKPTR(optlen)); 5261 5262 return ipv6_bpf_stub->ipv6_setsockopt(sk, SOL_IPV6, optname, 5263 KERNEL_SOCKPTR(optval), *optlen); 5264 } 5265 5266 static int __bpf_setsockopt(struct sock *sk, int level, int optname, 5267 char *optval, int optlen) 5268 { 5269 if (!sk_fullsock(sk)) 5270 return -EINVAL; 5271 5272 if (level == SOL_SOCKET) 5273 return sol_socket_sockopt(sk, optname, optval, &optlen, false); 5274 else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP) 5275 return sol_ip_sockopt(sk, optname, optval, &optlen, false); 5276 else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6) 5277 return sol_ipv6_sockopt(sk, optname, optval, &optlen, false); 5278 else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP) 5279 return sol_tcp_sockopt(sk, optname, optval, &optlen, false); 5280 5281 return -EINVAL; 5282 } 5283 5284 static int _bpf_setsockopt(struct sock *sk, int level, int optname, 5285 char *optval, int optlen) 5286 { 5287 if (sk_fullsock(sk)) 5288 sock_owned_by_me(sk); 5289 return __bpf_setsockopt(sk, level, optname, optval, optlen); 5290 } 5291 5292 static int __bpf_getsockopt(struct sock *sk, int level, int optname, 5293 char *optval, int optlen) 5294 { 5295 int err, saved_optlen = optlen; 5296 5297 if (!sk_fullsock(sk)) { 5298 err = -EINVAL; 5299 goto done; 5300 } 5301 5302 if (level == SOL_SOCKET) 5303 err = sol_socket_sockopt(sk, optname, optval, &optlen, true); 5304 else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP) 5305 err = sol_tcp_sockopt(sk, optname, optval, &optlen, true); 5306 else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP) 5307 err = sol_ip_sockopt(sk, optname, optval, &optlen, true); 5308 else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6) 5309 err = sol_ipv6_sockopt(sk, optname, optval, &optlen, true); 5310 else 5311 err = -EINVAL; 5312 5313 done: 5314 if (err) 5315 optlen = 0; 5316 if (optlen < saved_optlen) 5317 memset(optval + optlen, 0, saved_optlen - optlen); 5318 return err; 5319 } 5320 5321 static int _bpf_getsockopt(struct sock *sk, int level, int optname, 5322 char *optval, int optlen) 5323 { 5324 if (sk_fullsock(sk)) 5325 sock_owned_by_me(sk); 5326 return __bpf_getsockopt(sk, level, optname, optval, optlen); 5327 } 5328 5329 BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level, 5330 int, optname, char *, optval, int, optlen) 5331 { 5332 return _bpf_setsockopt(sk, level, optname, optval, optlen); 5333 } 5334 5335 const struct bpf_func_proto bpf_sk_setsockopt_proto = { 5336 .func = bpf_sk_setsockopt, 5337 .gpl_only = false, 5338 .ret_type = RET_INTEGER, 5339 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5340 .arg2_type = ARG_ANYTHING, 5341 .arg3_type = ARG_ANYTHING, 5342 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5343 .arg5_type = ARG_CONST_SIZE, 5344 }; 5345 5346 BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level, 5347 int, optname, char *, optval, int, optlen) 5348 { 5349 return _bpf_getsockopt(sk, level, optname, optval, optlen); 5350 } 5351 5352 const struct bpf_func_proto bpf_sk_getsockopt_proto = { 5353 .func = bpf_sk_getsockopt, 5354 .gpl_only = false, 5355 .ret_type = RET_INTEGER, 5356 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5357 .arg2_type = ARG_ANYTHING, 5358 .arg3_type = ARG_ANYTHING, 5359 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5360 .arg5_type = ARG_CONST_SIZE, 5361 }; 5362 5363 BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level, 5364 int, optname, char *, optval, int, optlen) 5365 { 5366 return __bpf_setsockopt(sk, level, optname, optval, optlen); 5367 } 5368 5369 const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = { 5370 .func = bpf_unlocked_sk_setsockopt, 5371 .gpl_only = false, 5372 .ret_type = RET_INTEGER, 5373 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5374 .arg2_type = ARG_ANYTHING, 5375 .arg3_type = ARG_ANYTHING, 5376 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5377 .arg5_type = ARG_CONST_SIZE, 5378 }; 5379 5380 BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level, 5381 int, optname, char *, optval, int, optlen) 5382 { 5383 return __bpf_getsockopt(sk, level, optname, optval, optlen); 5384 } 5385 5386 const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = { 5387 .func = bpf_unlocked_sk_getsockopt, 5388 .gpl_only = false, 5389 .ret_type = RET_INTEGER, 5390 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5391 .arg2_type = ARG_ANYTHING, 5392 .arg3_type = ARG_ANYTHING, 5393 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5394 .arg5_type = ARG_CONST_SIZE, 5395 }; 5396 5397 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx, 5398 int, level, int, optname, char *, optval, int, optlen) 5399 { 5400 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen); 5401 } 5402 5403 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = { 5404 .func = bpf_sock_addr_setsockopt, 5405 .gpl_only = false, 5406 .ret_type = RET_INTEGER, 5407 .arg1_type = ARG_PTR_TO_CTX, 5408 .arg2_type = ARG_ANYTHING, 5409 .arg3_type = ARG_ANYTHING, 5410 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5411 .arg5_type = ARG_CONST_SIZE, 5412 }; 5413 5414 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, 5415 int, level, int, optname, char *, optval, int, optlen) 5416 { 5417 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen); 5418 } 5419 5420 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { 5421 .func = bpf_sock_addr_getsockopt, 5422 .gpl_only = false, 5423 .ret_type = RET_INTEGER, 5424 .arg1_type = ARG_PTR_TO_CTX, 5425 .arg2_type = ARG_ANYTHING, 5426 .arg3_type = ARG_ANYTHING, 5427 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5428 .arg5_type = ARG_CONST_SIZE, 5429 }; 5430 5431 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5432 int, level, int, optname, char *, optval, int, optlen) 5433 { 5434 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen); 5435 } 5436 5437 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = { 5438 .func = bpf_sock_ops_setsockopt, 5439 .gpl_only = false, 5440 .ret_type = RET_INTEGER, 5441 .arg1_type = ARG_PTR_TO_CTX, 5442 .arg2_type = ARG_ANYTHING, 5443 .arg3_type = ARG_ANYTHING, 5444 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5445 .arg5_type = ARG_CONST_SIZE, 5446 }; 5447 5448 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, 5449 int optname, const u8 **start) 5450 { 5451 struct sk_buff *syn_skb = bpf_sock->syn_skb; 5452 const u8 *hdr_start; 5453 int ret; 5454 5455 if (syn_skb) { 5456 /* sk is a request_sock here */ 5457 5458 if (optname == TCP_BPF_SYN) { 5459 hdr_start = syn_skb->data; 5460 ret = tcp_hdrlen(syn_skb); 5461 } else if (optname == TCP_BPF_SYN_IP) { 5462 hdr_start = skb_network_header(syn_skb); 5463 ret = skb_network_header_len(syn_skb) + 5464 tcp_hdrlen(syn_skb); 5465 } else { 5466 /* optname == TCP_BPF_SYN_MAC */ 5467 hdr_start = skb_mac_header(syn_skb); 5468 ret = skb_mac_header_len(syn_skb) + 5469 skb_network_header_len(syn_skb) + 5470 tcp_hdrlen(syn_skb); 5471 } 5472 } else { 5473 struct sock *sk = bpf_sock->sk; 5474 struct saved_syn *saved_syn; 5475 5476 if (sk->sk_state == TCP_NEW_SYN_RECV) 5477 /* synack retransmit. bpf_sock->syn_skb will 5478 * not be available. It has to resort to 5479 * saved_syn (if it is saved). 5480 */ 5481 saved_syn = inet_reqsk(sk)->saved_syn; 5482 else 5483 saved_syn = tcp_sk(sk)->saved_syn; 5484 5485 if (!saved_syn) 5486 return -ENOENT; 5487 5488 if (optname == TCP_BPF_SYN) { 5489 hdr_start = saved_syn->data + 5490 saved_syn->mac_hdrlen + 5491 saved_syn->network_hdrlen; 5492 ret = saved_syn->tcp_hdrlen; 5493 } else if (optname == TCP_BPF_SYN_IP) { 5494 hdr_start = saved_syn->data + 5495 saved_syn->mac_hdrlen; 5496 ret = saved_syn->network_hdrlen + 5497 saved_syn->tcp_hdrlen; 5498 } else { 5499 /* optname == TCP_BPF_SYN_MAC */ 5500 5501 /* TCP_SAVE_SYN may not have saved the mac hdr */ 5502 if (!saved_syn->mac_hdrlen) 5503 return -ENOENT; 5504 5505 hdr_start = saved_syn->data; 5506 ret = saved_syn->mac_hdrlen + 5507 saved_syn->network_hdrlen + 5508 saved_syn->tcp_hdrlen; 5509 } 5510 } 5511 5512 *start = hdr_start; 5513 return ret; 5514 } 5515 5516 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5517 int, level, int, optname, char *, optval, int, optlen) 5518 { 5519 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && 5520 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { 5521 int ret, copy_len = 0; 5522 const u8 *start; 5523 5524 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); 5525 if (ret > 0) { 5526 copy_len = ret; 5527 if (optlen < copy_len) { 5528 copy_len = optlen; 5529 ret = -ENOSPC; 5530 } 5531 5532 memcpy(optval, start, copy_len); 5533 } 5534 5535 /* Zero out unused buffer at the end */ 5536 memset(optval + copy_len, 0, optlen - copy_len); 5537 5538 return ret; 5539 } 5540 5541 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); 5542 } 5543 5544 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = { 5545 .func = bpf_sock_ops_getsockopt, 5546 .gpl_only = false, 5547 .ret_type = RET_INTEGER, 5548 .arg1_type = ARG_PTR_TO_CTX, 5549 .arg2_type = ARG_ANYTHING, 5550 .arg3_type = ARG_ANYTHING, 5551 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5552 .arg5_type = ARG_CONST_SIZE, 5553 }; 5554 5555 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, 5556 int, argval) 5557 { 5558 struct sock *sk = bpf_sock->sk; 5559 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; 5560 5561 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) 5562 return -EINVAL; 5563 5564 tcp_sk(sk)->bpf_sock_ops_cb_flags = val; 5565 5566 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); 5567 } 5568 5569 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { 5570 .func = bpf_sock_ops_cb_flags_set, 5571 .gpl_only = false, 5572 .ret_type = RET_INTEGER, 5573 .arg1_type = ARG_PTR_TO_CTX, 5574 .arg2_type = ARG_ANYTHING, 5575 }; 5576 5577 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; 5578 EXPORT_SYMBOL_GPL(ipv6_bpf_stub); 5579 5580 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, 5581 int, addr_len) 5582 { 5583 #ifdef CONFIG_INET 5584 struct sock *sk = ctx->sk; 5585 u32 flags = BIND_FROM_BPF; 5586 int err; 5587 5588 err = -EINVAL; 5589 if (addr_len < offsetofend(struct sockaddr, sa_family)) 5590 return err; 5591 if (addr->sa_family == AF_INET) { 5592 if (addr_len < sizeof(struct sockaddr_in)) 5593 return err; 5594 if (((struct sockaddr_in *)addr)->sin_port == htons(0)) 5595 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5596 return __inet_bind(sk, addr, addr_len, flags); 5597 #if IS_ENABLED(CONFIG_IPV6) 5598 } else if (addr->sa_family == AF_INET6) { 5599 if (addr_len < SIN6_LEN_RFC2133) 5600 return err; 5601 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0)) 5602 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5603 /* ipv6_bpf_stub cannot be NULL, since it's called from 5604 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded 5605 */ 5606 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); 5607 #endif /* CONFIG_IPV6 */ 5608 } 5609 #endif /* CONFIG_INET */ 5610 5611 return -EAFNOSUPPORT; 5612 } 5613 5614 static const struct bpf_func_proto bpf_bind_proto = { 5615 .func = bpf_bind, 5616 .gpl_only = false, 5617 .ret_type = RET_INTEGER, 5618 .arg1_type = ARG_PTR_TO_CTX, 5619 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5620 .arg3_type = ARG_CONST_SIZE, 5621 }; 5622 5623 #ifdef CONFIG_XFRM 5624 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, 5625 struct bpf_xfrm_state *, to, u32, size, u64, flags) 5626 { 5627 const struct sec_path *sp = skb_sec_path(skb); 5628 const struct xfrm_state *x; 5629 5630 if (!sp || unlikely(index >= sp->len || flags)) 5631 goto err_clear; 5632 5633 x = sp->xvec[index]; 5634 5635 if (unlikely(size != sizeof(struct bpf_xfrm_state))) 5636 goto err_clear; 5637 5638 to->reqid = x->props.reqid; 5639 to->spi = x->id.spi; 5640 to->family = x->props.family; 5641 to->ext = 0; 5642 5643 if (to->family == AF_INET6) { 5644 memcpy(to->remote_ipv6, x->props.saddr.a6, 5645 sizeof(to->remote_ipv6)); 5646 } else { 5647 to->remote_ipv4 = x->props.saddr.a4; 5648 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 5649 } 5650 5651 return 0; 5652 err_clear: 5653 memset(to, 0, size); 5654 return -EINVAL; 5655 } 5656 5657 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { 5658 .func = bpf_skb_get_xfrm_state, 5659 .gpl_only = false, 5660 .ret_type = RET_INTEGER, 5661 .arg1_type = ARG_PTR_TO_CTX, 5662 .arg2_type = ARG_ANYTHING, 5663 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 5664 .arg4_type = ARG_CONST_SIZE, 5665 .arg5_type = ARG_ANYTHING, 5666 }; 5667 #endif 5668 5669 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) 5670 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, 5671 const struct neighbour *neigh, 5672 const struct net_device *dev, u32 mtu) 5673 { 5674 memcpy(params->dmac, neigh->ha, ETH_ALEN); 5675 memcpy(params->smac, dev->dev_addr, ETH_ALEN); 5676 params->h_vlan_TCI = 0; 5677 params->h_vlan_proto = 0; 5678 if (mtu) 5679 params->mtu_result = mtu; /* union with tot_len */ 5680 5681 return 0; 5682 } 5683 #endif 5684 5685 #if IS_ENABLED(CONFIG_INET) 5686 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5687 u32 flags, bool check_mtu) 5688 { 5689 struct fib_nh_common *nhc; 5690 struct in_device *in_dev; 5691 struct neighbour *neigh; 5692 struct net_device *dev; 5693 struct fib_result res; 5694 struct flowi4 fl4; 5695 u32 mtu = 0; 5696 int err; 5697 5698 dev = dev_get_by_index_rcu(net, params->ifindex); 5699 if (unlikely(!dev)) 5700 return -ENODEV; 5701 5702 /* verify forwarding is enabled on this interface */ 5703 in_dev = __in_dev_get_rcu(dev); 5704 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) 5705 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5706 5707 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5708 fl4.flowi4_iif = 1; 5709 fl4.flowi4_oif = params->ifindex; 5710 } else { 5711 fl4.flowi4_iif = params->ifindex; 5712 fl4.flowi4_oif = 0; 5713 } 5714 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; 5715 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 5716 fl4.flowi4_flags = 0; 5717 5718 fl4.flowi4_proto = params->l4_protocol; 5719 fl4.daddr = params->ipv4_dst; 5720 fl4.saddr = params->ipv4_src; 5721 fl4.fl4_sport = params->sport; 5722 fl4.fl4_dport = params->dport; 5723 fl4.flowi4_multipath_hash = 0; 5724 5725 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5726 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5727 struct fib_table *tb; 5728 5729 tb = fib_get_table(net, tbid); 5730 if (unlikely(!tb)) 5731 return BPF_FIB_LKUP_RET_NOT_FWDED; 5732 5733 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); 5734 } else { 5735 fl4.flowi4_mark = 0; 5736 fl4.flowi4_secid = 0; 5737 fl4.flowi4_tun_key.tun_id = 0; 5738 fl4.flowi4_uid = sock_net_uid(net, NULL); 5739 5740 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); 5741 } 5742 5743 if (err) { 5744 /* map fib lookup errors to RTN_ type */ 5745 if (err == -EINVAL) 5746 return BPF_FIB_LKUP_RET_BLACKHOLE; 5747 if (err == -EHOSTUNREACH) 5748 return BPF_FIB_LKUP_RET_UNREACHABLE; 5749 if (err == -EACCES) 5750 return BPF_FIB_LKUP_RET_PROHIBIT; 5751 5752 return BPF_FIB_LKUP_RET_NOT_FWDED; 5753 } 5754 5755 if (res.type != RTN_UNICAST) 5756 return BPF_FIB_LKUP_RET_NOT_FWDED; 5757 5758 if (fib_info_num_path(res.fi) > 1) 5759 fib_select_path(net, &res, &fl4, NULL); 5760 5761 if (check_mtu) { 5762 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); 5763 if (params->tot_len > mtu) { 5764 params->mtu_result = mtu; /* union with tot_len */ 5765 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5766 } 5767 } 5768 5769 nhc = res.nhc; 5770 5771 /* do not handle lwt encaps right now */ 5772 if (nhc->nhc_lwtstate) 5773 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5774 5775 dev = nhc->nhc_dev; 5776 5777 params->rt_metric = res.fi->fib_priority; 5778 params->ifindex = dev->ifindex; 5779 5780 /* xdp and cls_bpf programs are run in RCU-bh so 5781 * rcu_read_lock_bh is not needed here 5782 */ 5783 if (likely(nhc->nhc_gw_family != AF_INET6)) { 5784 if (nhc->nhc_gw_family) 5785 params->ipv4_dst = nhc->nhc_gw.ipv4; 5786 5787 neigh = __ipv4_neigh_lookup_noref(dev, 5788 (__force u32)params->ipv4_dst); 5789 } else { 5790 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; 5791 5792 params->family = AF_INET6; 5793 *dst = nhc->nhc_gw.ipv6; 5794 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5795 } 5796 5797 if (!neigh) 5798 return BPF_FIB_LKUP_RET_NO_NEIGH; 5799 5800 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5801 } 5802 #endif 5803 5804 #if IS_ENABLED(CONFIG_IPV6) 5805 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5806 u32 flags, bool check_mtu) 5807 { 5808 struct in6_addr *src = (struct in6_addr *) params->ipv6_src; 5809 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; 5810 struct fib6_result res = {}; 5811 struct neighbour *neigh; 5812 struct net_device *dev; 5813 struct inet6_dev *idev; 5814 struct flowi6 fl6; 5815 int strict = 0; 5816 int oif, err; 5817 u32 mtu = 0; 5818 5819 /* link local addresses are never forwarded */ 5820 if (rt6_need_strict(dst) || rt6_need_strict(src)) 5821 return BPF_FIB_LKUP_RET_NOT_FWDED; 5822 5823 dev = dev_get_by_index_rcu(net, params->ifindex); 5824 if (unlikely(!dev)) 5825 return -ENODEV; 5826 5827 idev = __in6_dev_get_safely(dev); 5828 if (unlikely(!idev || !idev->cnf.forwarding)) 5829 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5830 5831 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5832 fl6.flowi6_iif = 1; 5833 oif = fl6.flowi6_oif = params->ifindex; 5834 } else { 5835 oif = fl6.flowi6_iif = params->ifindex; 5836 fl6.flowi6_oif = 0; 5837 strict = RT6_LOOKUP_F_HAS_SADDR; 5838 } 5839 fl6.flowlabel = params->flowinfo; 5840 fl6.flowi6_scope = 0; 5841 fl6.flowi6_flags = 0; 5842 fl6.mp_hash = 0; 5843 5844 fl6.flowi6_proto = params->l4_protocol; 5845 fl6.daddr = *dst; 5846 fl6.saddr = *src; 5847 fl6.fl6_sport = params->sport; 5848 fl6.fl6_dport = params->dport; 5849 5850 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5851 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5852 struct fib6_table *tb; 5853 5854 tb = ipv6_stub->fib6_get_table(net, tbid); 5855 if (unlikely(!tb)) 5856 return BPF_FIB_LKUP_RET_NOT_FWDED; 5857 5858 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res, 5859 strict); 5860 } else { 5861 fl6.flowi6_mark = 0; 5862 fl6.flowi6_secid = 0; 5863 fl6.flowi6_tun_key.tun_id = 0; 5864 fl6.flowi6_uid = sock_net_uid(net, NULL); 5865 5866 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict); 5867 } 5868 5869 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) || 5870 res.f6i == net->ipv6.fib6_null_entry)) 5871 return BPF_FIB_LKUP_RET_NOT_FWDED; 5872 5873 switch (res.fib6_type) { 5874 /* only unicast is forwarded */ 5875 case RTN_UNICAST: 5876 break; 5877 case RTN_BLACKHOLE: 5878 return BPF_FIB_LKUP_RET_BLACKHOLE; 5879 case RTN_UNREACHABLE: 5880 return BPF_FIB_LKUP_RET_UNREACHABLE; 5881 case RTN_PROHIBIT: 5882 return BPF_FIB_LKUP_RET_PROHIBIT; 5883 default: 5884 return BPF_FIB_LKUP_RET_NOT_FWDED; 5885 } 5886 5887 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif, 5888 fl6.flowi6_oif != 0, NULL, strict); 5889 5890 if (check_mtu) { 5891 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src); 5892 if (params->tot_len > mtu) { 5893 params->mtu_result = mtu; /* union with tot_len */ 5894 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5895 } 5896 } 5897 5898 if (res.nh->fib_nh_lws) 5899 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5900 5901 if (res.nh->fib_nh_gw_family) 5902 *dst = res.nh->fib_nh_gw6; 5903 5904 dev = res.nh->fib_nh_dev; 5905 params->rt_metric = res.f6i->fib6_metric; 5906 params->ifindex = dev->ifindex; 5907 5908 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is 5909 * not needed here. 5910 */ 5911 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5912 if (!neigh) 5913 return BPF_FIB_LKUP_RET_NO_NEIGH; 5914 5915 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5916 } 5917 #endif 5918 5919 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, 5920 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5921 { 5922 if (plen < sizeof(*params)) 5923 return -EINVAL; 5924 5925 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5926 return -EINVAL; 5927 5928 switch (params->family) { 5929 #if IS_ENABLED(CONFIG_INET) 5930 case AF_INET: 5931 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, 5932 flags, true); 5933 #endif 5934 #if IS_ENABLED(CONFIG_IPV6) 5935 case AF_INET6: 5936 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, 5937 flags, true); 5938 #endif 5939 } 5940 return -EAFNOSUPPORT; 5941 } 5942 5943 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = { 5944 .func = bpf_xdp_fib_lookup, 5945 .gpl_only = true, 5946 .ret_type = RET_INTEGER, 5947 .arg1_type = ARG_PTR_TO_CTX, 5948 .arg2_type = ARG_PTR_TO_MEM, 5949 .arg3_type = ARG_CONST_SIZE, 5950 .arg4_type = ARG_ANYTHING, 5951 }; 5952 5953 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, 5954 struct bpf_fib_lookup *, params, int, plen, u32, flags) 5955 { 5956 struct net *net = dev_net(skb->dev); 5957 int rc = -EAFNOSUPPORT; 5958 bool check_mtu = false; 5959 5960 if (plen < sizeof(*params)) 5961 return -EINVAL; 5962 5963 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) 5964 return -EINVAL; 5965 5966 if (params->tot_len) 5967 check_mtu = true; 5968 5969 switch (params->family) { 5970 #if IS_ENABLED(CONFIG_INET) 5971 case AF_INET: 5972 rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu); 5973 break; 5974 #endif 5975 #if IS_ENABLED(CONFIG_IPV6) 5976 case AF_INET6: 5977 rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu); 5978 break; 5979 #endif 5980 } 5981 5982 if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) { 5983 struct net_device *dev; 5984 5985 /* When tot_len isn't provided by user, check skb 5986 * against MTU of FIB lookup resulting net_device 5987 */ 5988 dev = dev_get_by_index_rcu(net, params->ifindex); 5989 if (!is_skb_forwardable(dev, skb)) 5990 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; 5991 5992 params->mtu_result = dev->mtu; /* union with tot_len */ 5993 } 5994 5995 return rc; 5996 } 5997 5998 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { 5999 .func = bpf_skb_fib_lookup, 6000 .gpl_only = true, 6001 .ret_type = RET_INTEGER, 6002 .arg1_type = ARG_PTR_TO_CTX, 6003 .arg2_type = ARG_PTR_TO_MEM, 6004 .arg3_type = ARG_CONST_SIZE, 6005 .arg4_type = ARG_ANYTHING, 6006 }; 6007 6008 static struct net_device *__dev_via_ifindex(struct net_device *dev_curr, 6009 u32 ifindex) 6010 { 6011 struct net *netns = dev_net(dev_curr); 6012 6013 /* Non-redirect use-cases can use ifindex=0 and save ifindex lookup */ 6014 if (ifindex == 0) 6015 return dev_curr; 6016 6017 return dev_get_by_index_rcu(netns, ifindex); 6018 } 6019 6020 BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, 6021 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 6022 { 6023 int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 6024 struct net_device *dev = skb->dev; 6025 int skb_len, dev_len; 6026 int mtu; 6027 6028 if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) 6029 return -EINVAL; 6030 6031 if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) 6032 return -EINVAL; 6033 6034 dev = __dev_via_ifindex(dev, ifindex); 6035 if (unlikely(!dev)) 6036 return -ENODEV; 6037 6038 mtu = READ_ONCE(dev->mtu); 6039 6040 dev_len = mtu + dev->hard_header_len; 6041 6042 /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 6043 skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len; 6044 6045 skb_len += len_diff; /* minus result pass check */ 6046 if (skb_len <= dev_len) { 6047 ret = BPF_MTU_CHK_RET_SUCCESS; 6048 goto out; 6049 } 6050 /* At this point, skb->len exceed MTU, but as it include length of all 6051 * segments, it can still be below MTU. The SKB can possibly get 6052 * re-segmented in transmit path (see validate_xmit_skb). Thus, user 6053 * must choose if segs are to be MTU checked. 6054 */ 6055 if (skb_is_gso(skb)) { 6056 ret = BPF_MTU_CHK_RET_SUCCESS; 6057 6058 if (flags & BPF_MTU_CHK_SEGS && 6059 !skb_gso_validate_network_len(skb, mtu)) 6060 ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; 6061 } 6062 out: 6063 /* BPF verifier guarantees valid pointer */ 6064 *mtu_len = mtu; 6065 6066 return ret; 6067 } 6068 6069 BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, 6070 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags) 6071 { 6072 struct net_device *dev = xdp->rxq->dev; 6073 int xdp_len = xdp->data_end - xdp->data; 6074 int ret = BPF_MTU_CHK_RET_SUCCESS; 6075 int mtu, dev_len; 6076 6077 /* XDP variant doesn't support multi-buffer segment check (yet) */ 6078 if (unlikely(flags)) 6079 return -EINVAL; 6080 6081 dev = __dev_via_ifindex(dev, ifindex); 6082 if (unlikely(!dev)) 6083 return -ENODEV; 6084 6085 mtu = READ_ONCE(dev->mtu); 6086 6087 /* Add L2-header as dev MTU is L3 size */ 6088 dev_len = mtu + dev->hard_header_len; 6089 6090 /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 6091 if (*mtu_len) 6092 xdp_len = *mtu_len + dev->hard_header_len; 6093 6094 xdp_len += len_diff; /* minus result pass check */ 6095 if (xdp_len > dev_len) 6096 ret = BPF_MTU_CHK_RET_FRAG_NEEDED; 6097 6098 /* BPF verifier guarantees valid pointer */ 6099 *mtu_len = mtu; 6100 6101 return ret; 6102 } 6103 6104 static const struct bpf_func_proto bpf_skb_check_mtu_proto = { 6105 .func = bpf_skb_check_mtu, 6106 .gpl_only = true, 6107 .ret_type = RET_INTEGER, 6108 .arg1_type = ARG_PTR_TO_CTX, 6109 .arg2_type = ARG_ANYTHING, 6110 .arg3_type = ARG_PTR_TO_INT, 6111 .arg4_type = ARG_ANYTHING, 6112 .arg5_type = ARG_ANYTHING, 6113 }; 6114 6115 static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { 6116 .func = bpf_xdp_check_mtu, 6117 .gpl_only = true, 6118 .ret_type = RET_INTEGER, 6119 .arg1_type = ARG_PTR_TO_CTX, 6120 .arg2_type = ARG_ANYTHING, 6121 .arg3_type = ARG_PTR_TO_INT, 6122 .arg4_type = ARG_ANYTHING, 6123 .arg5_type = ARG_ANYTHING, 6124 }; 6125 6126 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6127 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 6128 { 6129 int err; 6130 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; 6131 6132 if (!seg6_validate_srh(srh, len, false)) 6133 return -EINVAL; 6134 6135 switch (type) { 6136 case BPF_LWT_ENCAP_SEG6_INLINE: 6137 if (skb->protocol != htons(ETH_P_IPV6)) 6138 return -EBADMSG; 6139 6140 err = seg6_do_srh_inline(skb, srh); 6141 break; 6142 case BPF_LWT_ENCAP_SEG6: 6143 skb_reset_inner_headers(skb); 6144 skb->encapsulation = 1; 6145 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); 6146 break; 6147 default: 6148 return -EINVAL; 6149 } 6150 6151 bpf_compute_data_pointers(skb); 6152 if (err) 6153 return err; 6154 6155 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 6156 6157 return seg6_lookup_nexthop(skb, NULL, 0); 6158 } 6159 #endif /* CONFIG_IPV6_SEG6_BPF */ 6160 6161 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6162 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, 6163 bool ingress) 6164 { 6165 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress); 6166 } 6167 #endif 6168 6169 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, 6170 u32, len) 6171 { 6172 switch (type) { 6173 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6174 case BPF_LWT_ENCAP_SEG6: 6175 case BPF_LWT_ENCAP_SEG6_INLINE: 6176 return bpf_push_seg6_encap(skb, type, hdr, len); 6177 #endif 6178 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6179 case BPF_LWT_ENCAP_IP: 6180 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); 6181 #endif 6182 default: 6183 return -EINVAL; 6184 } 6185 } 6186 6187 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type, 6188 void *, hdr, u32, len) 6189 { 6190 switch (type) { 6191 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) 6192 case BPF_LWT_ENCAP_IP: 6193 return bpf_push_ip_encap(skb, hdr, len, false /* egress */); 6194 #endif 6195 default: 6196 return -EINVAL; 6197 } 6198 } 6199 6200 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { 6201 .func = bpf_lwt_in_push_encap, 6202 .gpl_only = false, 6203 .ret_type = RET_INTEGER, 6204 .arg1_type = ARG_PTR_TO_CTX, 6205 .arg2_type = ARG_ANYTHING, 6206 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6207 .arg4_type = ARG_CONST_SIZE 6208 }; 6209 6210 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { 6211 .func = bpf_lwt_xmit_push_encap, 6212 .gpl_only = false, 6213 .ret_type = RET_INTEGER, 6214 .arg1_type = ARG_PTR_TO_CTX, 6215 .arg2_type = ARG_ANYTHING, 6216 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6217 .arg4_type = ARG_CONST_SIZE 6218 }; 6219 6220 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 6221 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, 6222 const void *, from, u32, len) 6223 { 6224 struct seg6_bpf_srh_state *srh_state = 6225 this_cpu_ptr(&seg6_bpf_srh_states); 6226 struct ipv6_sr_hdr *srh = srh_state->srh; 6227 void *srh_tlvs, *srh_end, *ptr; 6228 int srhoff = 0; 6229 6230 if (srh == NULL) 6231 return -EINVAL; 6232 6233 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); 6234 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); 6235 6236 ptr = skb->data + offset; 6237 if (ptr >= srh_tlvs && ptr + len <= srh_end) 6238 srh_state->valid = false; 6239 else if (ptr < (void *)&srh->flags || 6240 ptr + len > (void *)&srh->segments) 6241 return -EFAULT; 6242 6243 if (unlikely(bpf_try_make_writable(skb, offset + len))) 6244 return -EFAULT; 6245 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 6246 return -EINVAL; 6247 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6248 6249 memcpy(skb->data + offset, from, len); 6250 return 0; 6251 } 6252 6253 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { 6254 .func = bpf_lwt_seg6_store_bytes, 6255 .gpl_only = false, 6256 .ret_type = RET_INTEGER, 6257 .arg1_type = ARG_PTR_TO_CTX, 6258 .arg2_type = ARG_ANYTHING, 6259 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6260 .arg4_type = ARG_CONST_SIZE 6261 }; 6262 6263 static void bpf_update_srh_state(struct sk_buff *skb) 6264 { 6265 struct seg6_bpf_srh_state *srh_state = 6266 this_cpu_ptr(&seg6_bpf_srh_states); 6267 int srhoff = 0; 6268 6269 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { 6270 srh_state->srh = NULL; 6271 } else { 6272 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6273 srh_state->hdrlen = srh_state->srh->hdrlen << 3; 6274 srh_state->valid = true; 6275 } 6276 } 6277 6278 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, 6279 u32, action, void *, param, u32, param_len) 6280 { 6281 struct seg6_bpf_srh_state *srh_state = 6282 this_cpu_ptr(&seg6_bpf_srh_states); 6283 int hdroff = 0; 6284 int err; 6285 6286 switch (action) { 6287 case SEG6_LOCAL_ACTION_END_X: 6288 if (!seg6_bpf_has_valid_srh(skb)) 6289 return -EBADMSG; 6290 if (param_len != sizeof(struct in6_addr)) 6291 return -EINVAL; 6292 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); 6293 case SEG6_LOCAL_ACTION_END_T: 6294 if (!seg6_bpf_has_valid_srh(skb)) 6295 return -EBADMSG; 6296 if (param_len != sizeof(int)) 6297 return -EINVAL; 6298 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 6299 case SEG6_LOCAL_ACTION_END_DT6: 6300 if (!seg6_bpf_has_valid_srh(skb)) 6301 return -EBADMSG; 6302 if (param_len != sizeof(int)) 6303 return -EINVAL; 6304 6305 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) 6306 return -EBADMSG; 6307 if (!pskb_pull(skb, hdroff)) 6308 return -EBADMSG; 6309 6310 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); 6311 skb_reset_network_header(skb); 6312 skb_reset_transport_header(skb); 6313 skb->encapsulation = 0; 6314 6315 bpf_compute_data_pointers(skb); 6316 bpf_update_srh_state(skb); 6317 return seg6_lookup_nexthop(skb, NULL, *(int *)param); 6318 case SEG6_LOCAL_ACTION_END_B6: 6319 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 6320 return -EBADMSG; 6321 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, 6322 param, param_len); 6323 if (!err) 6324 bpf_update_srh_state(skb); 6325 6326 return err; 6327 case SEG6_LOCAL_ACTION_END_B6_ENCAP: 6328 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) 6329 return -EBADMSG; 6330 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, 6331 param, param_len); 6332 if (!err) 6333 bpf_update_srh_state(skb); 6334 6335 return err; 6336 default: 6337 return -EINVAL; 6338 } 6339 } 6340 6341 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { 6342 .func = bpf_lwt_seg6_action, 6343 .gpl_only = false, 6344 .ret_type = RET_INTEGER, 6345 .arg1_type = ARG_PTR_TO_CTX, 6346 .arg2_type = ARG_ANYTHING, 6347 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6348 .arg4_type = ARG_CONST_SIZE 6349 }; 6350 6351 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, 6352 s32, len) 6353 { 6354 struct seg6_bpf_srh_state *srh_state = 6355 this_cpu_ptr(&seg6_bpf_srh_states); 6356 struct ipv6_sr_hdr *srh = srh_state->srh; 6357 void *srh_end, *srh_tlvs, *ptr; 6358 struct ipv6hdr *hdr; 6359 int srhoff = 0; 6360 int ret; 6361 6362 if (unlikely(srh == NULL)) 6363 return -EINVAL; 6364 6365 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + 6366 ((srh->first_segment + 1) << 4)); 6367 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + 6368 srh_state->hdrlen); 6369 ptr = skb->data + offset; 6370 6371 if (unlikely(ptr < srh_tlvs || ptr > srh_end)) 6372 return -EFAULT; 6373 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) 6374 return -EFAULT; 6375 6376 if (len > 0) { 6377 ret = skb_cow_head(skb, len); 6378 if (unlikely(ret < 0)) 6379 return ret; 6380 6381 ret = bpf_skb_net_hdr_push(skb, offset, len); 6382 } else { 6383 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); 6384 } 6385 6386 bpf_compute_data_pointers(skb); 6387 if (unlikely(ret < 0)) 6388 return ret; 6389 6390 hdr = (struct ipv6hdr *)skb->data; 6391 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 6392 6393 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) 6394 return -EINVAL; 6395 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); 6396 srh_state->hdrlen += len; 6397 srh_state->valid = false; 6398 return 0; 6399 } 6400 6401 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { 6402 .func = bpf_lwt_seg6_adjust_srh, 6403 .gpl_only = false, 6404 .ret_type = RET_INTEGER, 6405 .arg1_type = ARG_PTR_TO_CTX, 6406 .arg2_type = ARG_ANYTHING, 6407 .arg3_type = ARG_ANYTHING, 6408 }; 6409 #endif /* CONFIG_IPV6_SEG6_BPF */ 6410 6411 #ifdef CONFIG_INET 6412 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, 6413 int dif, int sdif, u8 family, u8 proto) 6414 { 6415 struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo; 6416 bool refcounted = false; 6417 struct sock *sk = NULL; 6418 6419 if (family == AF_INET) { 6420 __be32 src4 = tuple->ipv4.saddr; 6421 __be32 dst4 = tuple->ipv4.daddr; 6422 6423 if (proto == IPPROTO_TCP) 6424 sk = __inet_lookup(net, hinfo, NULL, 0, 6425 src4, tuple->ipv4.sport, 6426 dst4, tuple->ipv4.dport, 6427 dif, sdif, &refcounted); 6428 else 6429 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, 6430 dst4, tuple->ipv4.dport, 6431 dif, sdif, &udp_table, NULL); 6432 #if IS_ENABLED(CONFIG_IPV6) 6433 } else { 6434 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; 6435 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; 6436 6437 if (proto == IPPROTO_TCP) 6438 sk = __inet6_lookup(net, hinfo, NULL, 0, 6439 src6, tuple->ipv6.sport, 6440 dst6, ntohs(tuple->ipv6.dport), 6441 dif, sdif, &refcounted); 6442 else if (likely(ipv6_bpf_stub)) 6443 sk = ipv6_bpf_stub->udp6_lib_lookup(net, 6444 src6, tuple->ipv6.sport, 6445 dst6, tuple->ipv6.dport, 6446 dif, sdif, 6447 &udp_table, NULL); 6448 #endif 6449 } 6450 6451 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { 6452 WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); 6453 sk = NULL; 6454 } 6455 return sk; 6456 } 6457 6458 /* bpf_skc_lookup performs the core lookup for different types of sockets, 6459 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. 6460 */ 6461 static struct sock * 6462 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6463 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6464 u64 flags) 6465 { 6466 struct sock *sk = NULL; 6467 struct net *net; 6468 u8 family; 6469 int sdif; 6470 6471 if (len == sizeof(tuple->ipv4)) 6472 family = AF_INET; 6473 else if (len == sizeof(tuple->ipv6)) 6474 family = AF_INET6; 6475 else 6476 return NULL; 6477 6478 if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX))) 6479 goto out; 6480 6481 if (family == AF_INET) 6482 sdif = inet_sdif(skb); 6483 else 6484 sdif = inet6_sdif(skb); 6485 6486 if ((s32)netns_id < 0) { 6487 net = caller_net; 6488 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6489 } else { 6490 net = get_net_ns_by_id(caller_net, netns_id); 6491 if (unlikely(!net)) 6492 goto out; 6493 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); 6494 put_net(net); 6495 } 6496 6497 out: 6498 return sk; 6499 } 6500 6501 static struct sock * 6502 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6503 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, 6504 u64 flags) 6505 { 6506 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, 6507 ifindex, proto, netns_id, flags); 6508 6509 if (sk) { 6510 struct sock *sk2 = sk_to_full_sk(sk); 6511 6512 /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk 6513 * sock refcnt is decremented to prevent a request_sock leak. 6514 */ 6515 if (!sk_fullsock(sk2)) 6516 sk2 = NULL; 6517 if (sk2 != sk) { 6518 sock_gen_put(sk); 6519 /* Ensure there is no need to bump sk2 refcnt */ 6520 if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { 6521 WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); 6522 return NULL; 6523 } 6524 sk = sk2; 6525 } 6526 } 6527 6528 return sk; 6529 } 6530 6531 static struct sock * 6532 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6533 u8 proto, u64 netns_id, u64 flags) 6534 { 6535 struct net *caller_net; 6536 int ifindex; 6537 6538 if (skb->dev) { 6539 caller_net = dev_net(skb->dev); 6540 ifindex = skb->dev->ifindex; 6541 } else { 6542 caller_net = sock_net(skb->sk); 6543 ifindex = 0; 6544 } 6545 6546 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, 6547 netns_id, flags); 6548 } 6549 6550 static struct sock * 6551 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, 6552 u8 proto, u64 netns_id, u64 flags) 6553 { 6554 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, 6555 flags); 6556 6557 if (sk) { 6558 struct sock *sk2 = sk_to_full_sk(sk); 6559 6560 /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk 6561 * sock refcnt is decremented to prevent a request_sock leak. 6562 */ 6563 if (!sk_fullsock(sk2)) 6564 sk2 = NULL; 6565 if (sk2 != sk) { 6566 sock_gen_put(sk); 6567 /* Ensure there is no need to bump sk2 refcnt */ 6568 if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { 6569 WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); 6570 return NULL; 6571 } 6572 sk = sk2; 6573 } 6574 } 6575 6576 return sk; 6577 } 6578 6579 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb, 6580 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6581 { 6582 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, 6583 netns_id, flags); 6584 } 6585 6586 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { 6587 .func = bpf_skc_lookup_tcp, 6588 .gpl_only = false, 6589 .pkt_access = true, 6590 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6591 .arg1_type = ARG_PTR_TO_CTX, 6592 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6593 .arg3_type = ARG_CONST_SIZE, 6594 .arg4_type = ARG_ANYTHING, 6595 .arg5_type = ARG_ANYTHING, 6596 }; 6597 6598 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, 6599 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6600 { 6601 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, 6602 netns_id, flags); 6603 } 6604 6605 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { 6606 .func = bpf_sk_lookup_tcp, 6607 .gpl_only = false, 6608 .pkt_access = true, 6609 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6610 .arg1_type = ARG_PTR_TO_CTX, 6611 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6612 .arg3_type = ARG_CONST_SIZE, 6613 .arg4_type = ARG_ANYTHING, 6614 .arg5_type = ARG_ANYTHING, 6615 }; 6616 6617 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, 6618 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6619 { 6620 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, 6621 netns_id, flags); 6622 } 6623 6624 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { 6625 .func = bpf_sk_lookup_udp, 6626 .gpl_only = false, 6627 .pkt_access = true, 6628 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6629 .arg1_type = ARG_PTR_TO_CTX, 6630 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6631 .arg3_type = ARG_CONST_SIZE, 6632 .arg4_type = ARG_ANYTHING, 6633 .arg5_type = ARG_ANYTHING, 6634 }; 6635 6636 BPF_CALL_1(bpf_sk_release, struct sock *, sk) 6637 { 6638 if (sk && sk_is_refcounted(sk)) 6639 sock_gen_put(sk); 6640 return 0; 6641 } 6642 6643 static const struct bpf_func_proto bpf_sk_release_proto = { 6644 .func = bpf_sk_release, 6645 .gpl_only = false, 6646 .ret_type = RET_INTEGER, 6647 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON | OBJ_RELEASE, 6648 }; 6649 6650 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, 6651 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6652 { 6653 struct net *caller_net = dev_net(ctx->rxq->dev); 6654 int ifindex = ctx->rxq->dev->ifindex; 6655 6656 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6657 ifindex, IPPROTO_UDP, netns_id, 6658 flags); 6659 } 6660 6661 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { 6662 .func = bpf_xdp_sk_lookup_udp, 6663 .gpl_only = false, 6664 .pkt_access = true, 6665 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6666 .arg1_type = ARG_PTR_TO_CTX, 6667 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6668 .arg3_type = ARG_CONST_SIZE, 6669 .arg4_type = ARG_ANYTHING, 6670 .arg5_type = ARG_ANYTHING, 6671 }; 6672 6673 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx, 6674 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6675 { 6676 struct net *caller_net = dev_net(ctx->rxq->dev); 6677 int ifindex = ctx->rxq->dev->ifindex; 6678 6679 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, 6680 ifindex, IPPROTO_TCP, netns_id, 6681 flags); 6682 } 6683 6684 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { 6685 .func = bpf_xdp_skc_lookup_tcp, 6686 .gpl_only = false, 6687 .pkt_access = true, 6688 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6689 .arg1_type = ARG_PTR_TO_CTX, 6690 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6691 .arg3_type = ARG_CONST_SIZE, 6692 .arg4_type = ARG_ANYTHING, 6693 .arg5_type = ARG_ANYTHING, 6694 }; 6695 6696 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, 6697 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) 6698 { 6699 struct net *caller_net = dev_net(ctx->rxq->dev); 6700 int ifindex = ctx->rxq->dev->ifindex; 6701 6702 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, 6703 ifindex, IPPROTO_TCP, netns_id, 6704 flags); 6705 } 6706 6707 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { 6708 .func = bpf_xdp_sk_lookup_tcp, 6709 .gpl_only = false, 6710 .pkt_access = true, 6711 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6712 .arg1_type = ARG_PTR_TO_CTX, 6713 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6714 .arg3_type = ARG_CONST_SIZE, 6715 .arg4_type = ARG_ANYTHING, 6716 .arg5_type = ARG_ANYTHING, 6717 }; 6718 6719 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6720 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6721 { 6722 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, 6723 sock_net(ctx->sk), 0, 6724 IPPROTO_TCP, netns_id, flags); 6725 } 6726 6727 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { 6728 .func = bpf_sock_addr_skc_lookup_tcp, 6729 .gpl_only = false, 6730 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, 6731 .arg1_type = ARG_PTR_TO_CTX, 6732 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6733 .arg3_type = ARG_CONST_SIZE, 6734 .arg4_type = ARG_ANYTHING, 6735 .arg5_type = ARG_ANYTHING, 6736 }; 6737 6738 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, 6739 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6740 { 6741 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6742 sock_net(ctx->sk), 0, IPPROTO_TCP, 6743 netns_id, flags); 6744 } 6745 6746 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { 6747 .func = bpf_sock_addr_sk_lookup_tcp, 6748 .gpl_only = false, 6749 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6750 .arg1_type = ARG_PTR_TO_CTX, 6751 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6752 .arg3_type = ARG_CONST_SIZE, 6753 .arg4_type = ARG_ANYTHING, 6754 .arg5_type = ARG_ANYTHING, 6755 }; 6756 6757 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, 6758 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) 6759 { 6760 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, 6761 sock_net(ctx->sk), 0, IPPROTO_UDP, 6762 netns_id, flags); 6763 } 6764 6765 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { 6766 .func = bpf_sock_addr_sk_lookup_udp, 6767 .gpl_only = false, 6768 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6769 .arg1_type = ARG_PTR_TO_CTX, 6770 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6771 .arg3_type = ARG_CONST_SIZE, 6772 .arg4_type = ARG_ANYTHING, 6773 .arg5_type = ARG_ANYTHING, 6774 }; 6775 6776 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6777 struct bpf_insn_access_aux *info) 6778 { 6779 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, 6780 icsk_retransmits)) 6781 return false; 6782 6783 if (off % size != 0) 6784 return false; 6785 6786 switch (off) { 6787 case offsetof(struct bpf_tcp_sock, bytes_received): 6788 case offsetof(struct bpf_tcp_sock, bytes_acked): 6789 return size == sizeof(__u64); 6790 default: 6791 return size == sizeof(__u32); 6792 } 6793 } 6794 6795 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 6796 const struct bpf_insn *si, 6797 struct bpf_insn *insn_buf, 6798 struct bpf_prog *prog, u32 *target_size) 6799 { 6800 struct bpf_insn *insn = insn_buf; 6801 6802 #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ 6803 do { \ 6804 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \ 6805 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6806 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ 6807 si->dst_reg, si->src_reg, \ 6808 offsetof(struct tcp_sock, FIELD)); \ 6809 } while (0) 6810 6811 #define BPF_INET_SOCK_GET_COMMON(FIELD) \ 6812 do { \ 6813 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \ 6814 FIELD) > \ 6815 sizeof_field(struct bpf_tcp_sock, FIELD)); \ 6816 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 6817 struct inet_connection_sock, \ 6818 FIELD), \ 6819 si->dst_reg, si->src_reg, \ 6820 offsetof( \ 6821 struct inet_connection_sock, \ 6822 FIELD)); \ 6823 } while (0) 6824 6825 if (insn > insn_buf) 6826 return insn - insn_buf; 6827 6828 switch (si->off) { 6829 case offsetof(struct bpf_tcp_sock, rtt_min): 6830 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 6831 sizeof(struct minmax)); 6832 BUILD_BUG_ON(sizeof(struct minmax) < 6833 sizeof(struct minmax_sample)); 6834 6835 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 6836 offsetof(struct tcp_sock, rtt_min) + 6837 offsetof(struct minmax_sample, v)); 6838 break; 6839 case offsetof(struct bpf_tcp_sock, snd_cwnd): 6840 BPF_TCP_SOCK_GET_COMMON(snd_cwnd); 6841 break; 6842 case offsetof(struct bpf_tcp_sock, srtt_us): 6843 BPF_TCP_SOCK_GET_COMMON(srtt_us); 6844 break; 6845 case offsetof(struct bpf_tcp_sock, snd_ssthresh): 6846 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh); 6847 break; 6848 case offsetof(struct bpf_tcp_sock, rcv_nxt): 6849 BPF_TCP_SOCK_GET_COMMON(rcv_nxt); 6850 break; 6851 case offsetof(struct bpf_tcp_sock, snd_nxt): 6852 BPF_TCP_SOCK_GET_COMMON(snd_nxt); 6853 break; 6854 case offsetof(struct bpf_tcp_sock, snd_una): 6855 BPF_TCP_SOCK_GET_COMMON(snd_una); 6856 break; 6857 case offsetof(struct bpf_tcp_sock, mss_cache): 6858 BPF_TCP_SOCK_GET_COMMON(mss_cache); 6859 break; 6860 case offsetof(struct bpf_tcp_sock, ecn_flags): 6861 BPF_TCP_SOCK_GET_COMMON(ecn_flags); 6862 break; 6863 case offsetof(struct bpf_tcp_sock, rate_delivered): 6864 BPF_TCP_SOCK_GET_COMMON(rate_delivered); 6865 break; 6866 case offsetof(struct bpf_tcp_sock, rate_interval_us): 6867 BPF_TCP_SOCK_GET_COMMON(rate_interval_us); 6868 break; 6869 case offsetof(struct bpf_tcp_sock, packets_out): 6870 BPF_TCP_SOCK_GET_COMMON(packets_out); 6871 break; 6872 case offsetof(struct bpf_tcp_sock, retrans_out): 6873 BPF_TCP_SOCK_GET_COMMON(retrans_out); 6874 break; 6875 case offsetof(struct bpf_tcp_sock, total_retrans): 6876 BPF_TCP_SOCK_GET_COMMON(total_retrans); 6877 break; 6878 case offsetof(struct bpf_tcp_sock, segs_in): 6879 BPF_TCP_SOCK_GET_COMMON(segs_in); 6880 break; 6881 case offsetof(struct bpf_tcp_sock, data_segs_in): 6882 BPF_TCP_SOCK_GET_COMMON(data_segs_in); 6883 break; 6884 case offsetof(struct bpf_tcp_sock, segs_out): 6885 BPF_TCP_SOCK_GET_COMMON(segs_out); 6886 break; 6887 case offsetof(struct bpf_tcp_sock, data_segs_out): 6888 BPF_TCP_SOCK_GET_COMMON(data_segs_out); 6889 break; 6890 case offsetof(struct bpf_tcp_sock, lost_out): 6891 BPF_TCP_SOCK_GET_COMMON(lost_out); 6892 break; 6893 case offsetof(struct bpf_tcp_sock, sacked_out): 6894 BPF_TCP_SOCK_GET_COMMON(sacked_out); 6895 break; 6896 case offsetof(struct bpf_tcp_sock, bytes_received): 6897 BPF_TCP_SOCK_GET_COMMON(bytes_received); 6898 break; 6899 case offsetof(struct bpf_tcp_sock, bytes_acked): 6900 BPF_TCP_SOCK_GET_COMMON(bytes_acked); 6901 break; 6902 case offsetof(struct bpf_tcp_sock, dsack_dups): 6903 BPF_TCP_SOCK_GET_COMMON(dsack_dups); 6904 break; 6905 case offsetof(struct bpf_tcp_sock, delivered): 6906 BPF_TCP_SOCK_GET_COMMON(delivered); 6907 break; 6908 case offsetof(struct bpf_tcp_sock, delivered_ce): 6909 BPF_TCP_SOCK_GET_COMMON(delivered_ce); 6910 break; 6911 case offsetof(struct bpf_tcp_sock, icsk_retransmits): 6912 BPF_INET_SOCK_GET_COMMON(icsk_retransmits); 6913 break; 6914 } 6915 6916 return insn - insn_buf; 6917 } 6918 6919 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) 6920 { 6921 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 6922 return (unsigned long)sk; 6923 6924 return (unsigned long)NULL; 6925 } 6926 6927 const struct bpf_func_proto bpf_tcp_sock_proto = { 6928 .func = bpf_tcp_sock, 6929 .gpl_only = false, 6930 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, 6931 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6932 }; 6933 6934 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk) 6935 { 6936 sk = sk_to_full_sk(sk); 6937 6938 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) 6939 return (unsigned long)sk; 6940 6941 return (unsigned long)NULL; 6942 } 6943 6944 static const struct bpf_func_proto bpf_get_listener_sock_proto = { 6945 .func = bpf_get_listener_sock, 6946 .gpl_only = false, 6947 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 6948 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 6949 }; 6950 6951 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) 6952 { 6953 unsigned int iphdr_len; 6954 6955 switch (skb_protocol(skb, true)) { 6956 case cpu_to_be16(ETH_P_IP): 6957 iphdr_len = sizeof(struct iphdr); 6958 break; 6959 case cpu_to_be16(ETH_P_IPV6): 6960 iphdr_len = sizeof(struct ipv6hdr); 6961 break; 6962 default: 6963 return 0; 6964 } 6965 6966 if (skb_headlen(skb) < iphdr_len) 6967 return 0; 6968 6969 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len)) 6970 return 0; 6971 6972 return INET_ECN_set_ce(skb); 6973 } 6974 6975 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 6976 struct bpf_insn_access_aux *info) 6977 { 6978 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id)) 6979 return false; 6980 6981 if (off % size != 0) 6982 return false; 6983 6984 switch (off) { 6985 default: 6986 return size == sizeof(__u32); 6987 } 6988 } 6989 6990 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 6991 const struct bpf_insn *si, 6992 struct bpf_insn *insn_buf, 6993 struct bpf_prog *prog, u32 *target_size) 6994 { 6995 struct bpf_insn *insn = insn_buf; 6996 6997 #define BPF_XDP_SOCK_GET(FIELD) \ 6998 do { \ 6999 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \ 7000 sizeof_field(struct bpf_xdp_sock, FIELD)); \ 7001 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ 7002 si->dst_reg, si->src_reg, \ 7003 offsetof(struct xdp_sock, FIELD)); \ 7004 } while (0) 7005 7006 switch (si->off) { 7007 case offsetof(struct bpf_xdp_sock, queue_id): 7008 BPF_XDP_SOCK_GET(queue_id); 7009 break; 7010 } 7011 7012 return insn - insn_buf; 7013 } 7014 7015 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { 7016 .func = bpf_skb_ecn_set_ce, 7017 .gpl_only = false, 7018 .ret_type = RET_INTEGER, 7019 .arg1_type = ARG_PTR_TO_CTX, 7020 }; 7021 7022 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 7023 struct tcphdr *, th, u32, th_len) 7024 { 7025 #ifdef CONFIG_SYN_COOKIES 7026 u32 cookie; 7027 int ret; 7028 7029 if (unlikely(!sk || th_len < sizeof(*th))) 7030 return -EINVAL; 7031 7032 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ 7033 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 7034 return -EINVAL; 7035 7036 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) 7037 return -EINVAL; 7038 7039 if (!th->ack || th->rst || th->syn) 7040 return -ENOENT; 7041 7042 if (unlikely(iph_len < sizeof(struct iphdr))) 7043 return -EINVAL; 7044 7045 if (tcp_synq_no_recent_overflow(sk)) 7046 return -ENOENT; 7047 7048 cookie = ntohl(th->ack_seq) - 1; 7049 7050 /* Both struct iphdr and struct ipv6hdr have the version field at the 7051 * same offset so we can cast to the shorter header (struct iphdr). 7052 */ 7053 switch (((struct iphdr *)iph)->version) { 7054 case 4: 7055 if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk)) 7056 return -EINVAL; 7057 7058 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); 7059 break; 7060 7061 #if IS_BUILTIN(CONFIG_IPV6) 7062 case 6: 7063 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 7064 return -EINVAL; 7065 7066 if (sk->sk_family != AF_INET6) 7067 return -EINVAL; 7068 7069 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); 7070 break; 7071 #endif /* CONFIG_IPV6 */ 7072 7073 default: 7074 return -EPROTONOSUPPORT; 7075 } 7076 7077 if (ret > 0) 7078 return 0; 7079 7080 return -ENOENT; 7081 #else 7082 return -ENOTSUPP; 7083 #endif 7084 } 7085 7086 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { 7087 .func = bpf_tcp_check_syncookie, 7088 .gpl_only = true, 7089 .pkt_access = true, 7090 .ret_type = RET_INTEGER, 7091 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7092 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7093 .arg3_type = ARG_CONST_SIZE, 7094 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7095 .arg5_type = ARG_CONST_SIZE, 7096 }; 7097 7098 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, 7099 struct tcphdr *, th, u32, th_len) 7100 { 7101 #ifdef CONFIG_SYN_COOKIES 7102 u32 cookie; 7103 u16 mss; 7104 7105 if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4)) 7106 return -EINVAL; 7107 7108 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) 7109 return -EINVAL; 7110 7111 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) 7112 return -ENOENT; 7113 7114 if (!th->syn || th->ack || th->fin || th->rst) 7115 return -EINVAL; 7116 7117 if (unlikely(iph_len < sizeof(struct iphdr))) 7118 return -EINVAL; 7119 7120 /* Both struct iphdr and struct ipv6hdr have the version field at the 7121 * same offset so we can cast to the shorter header (struct iphdr). 7122 */ 7123 switch (((struct iphdr *)iph)->version) { 7124 case 4: 7125 if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk)) 7126 return -EINVAL; 7127 7128 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); 7129 break; 7130 7131 #if IS_BUILTIN(CONFIG_IPV6) 7132 case 6: 7133 if (unlikely(iph_len < sizeof(struct ipv6hdr))) 7134 return -EINVAL; 7135 7136 if (sk->sk_family != AF_INET6) 7137 return -EINVAL; 7138 7139 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie); 7140 break; 7141 #endif /* CONFIG_IPV6 */ 7142 7143 default: 7144 return -EPROTONOSUPPORT; 7145 } 7146 if (mss == 0) 7147 return -ENOENT; 7148 7149 return cookie | ((u64)mss << 32); 7150 #else 7151 return -EOPNOTSUPP; 7152 #endif /* CONFIG_SYN_COOKIES */ 7153 } 7154 7155 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { 7156 .func = bpf_tcp_gen_syncookie, 7157 .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */ 7158 .pkt_access = true, 7159 .ret_type = RET_INTEGER, 7160 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7161 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7162 .arg3_type = ARG_CONST_SIZE, 7163 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7164 .arg5_type = ARG_CONST_SIZE, 7165 }; 7166 7167 BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags) 7168 { 7169 if (!sk || flags != 0) 7170 return -EINVAL; 7171 if (!skb_at_tc_ingress(skb)) 7172 return -EOPNOTSUPP; 7173 if (unlikely(dev_net(skb->dev) != sock_net(sk))) 7174 return -ENETUNREACH; 7175 if (unlikely(sk_fullsock(sk) && sk->sk_reuseport)) 7176 return -ESOCKTNOSUPPORT; 7177 if (sk_is_refcounted(sk) && 7178 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 7179 return -ENOENT; 7180 7181 skb_orphan(skb); 7182 skb->sk = sk; 7183 skb->destructor = sock_pfree; 7184 7185 return 0; 7186 } 7187 7188 static const struct bpf_func_proto bpf_sk_assign_proto = { 7189 .func = bpf_sk_assign, 7190 .gpl_only = false, 7191 .ret_type = RET_INTEGER, 7192 .arg1_type = ARG_PTR_TO_CTX, 7193 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 7194 .arg3_type = ARG_ANYTHING, 7195 }; 7196 7197 static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend, 7198 u8 search_kind, const u8 *magic, 7199 u8 magic_len, bool *eol) 7200 { 7201 u8 kind, kind_len; 7202 7203 *eol = false; 7204 7205 while (op < opend) { 7206 kind = op[0]; 7207 7208 if (kind == TCPOPT_EOL) { 7209 *eol = true; 7210 return ERR_PTR(-ENOMSG); 7211 } else if (kind == TCPOPT_NOP) { 7212 op++; 7213 continue; 7214 } 7215 7216 if (opend - op < 2 || opend - op < op[1] || op[1] < 2) 7217 /* Something is wrong in the received header. 7218 * Follow the TCP stack's tcp_parse_options() 7219 * and just bail here. 7220 */ 7221 return ERR_PTR(-EFAULT); 7222 7223 kind_len = op[1]; 7224 if (search_kind == kind) { 7225 if (!magic_len) 7226 return op; 7227 7228 if (magic_len > kind_len - 2) 7229 return ERR_PTR(-ENOMSG); 7230 7231 if (!memcmp(&op[2], magic, magic_len)) 7232 return op; 7233 } 7234 7235 op += kind_len; 7236 } 7237 7238 return ERR_PTR(-ENOMSG); 7239 } 7240 7241 BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7242 void *, search_res, u32, len, u64, flags) 7243 { 7244 bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN; 7245 const u8 *op, *opend, *magic, *search = search_res; 7246 u8 search_kind, search_len, copy_len, magic_len; 7247 int ret; 7248 7249 /* 2 byte is the minimal option len except TCPOPT_NOP and 7250 * TCPOPT_EOL which are useless for the bpf prog to learn 7251 * and this helper disallow loading them also. 7252 */ 7253 if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN) 7254 return -EINVAL; 7255 7256 search_kind = search[0]; 7257 search_len = search[1]; 7258 7259 if (search_len > len || search_kind == TCPOPT_NOP || 7260 search_kind == TCPOPT_EOL) 7261 return -EINVAL; 7262 7263 if (search_kind == TCPOPT_EXP || search_kind == 253) { 7264 /* 16 or 32 bit magic. +2 for kind and kind length */ 7265 if (search_len != 4 && search_len != 6) 7266 return -EINVAL; 7267 magic = &search[2]; 7268 magic_len = search_len - 2; 7269 } else { 7270 if (search_len) 7271 return -EINVAL; 7272 magic = NULL; 7273 magic_len = 0; 7274 } 7275 7276 if (load_syn) { 7277 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op); 7278 if (ret < 0) 7279 return ret; 7280 7281 opend = op + ret; 7282 op += sizeof(struct tcphdr); 7283 } else { 7284 if (!bpf_sock->skb || 7285 bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB) 7286 /* This bpf_sock->op cannot call this helper */ 7287 return -EPERM; 7288 7289 opend = bpf_sock->skb_data_end; 7290 op = bpf_sock->skb->data + sizeof(struct tcphdr); 7291 } 7292 7293 op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len, 7294 &eol); 7295 if (IS_ERR(op)) 7296 return PTR_ERR(op); 7297 7298 copy_len = op[1]; 7299 ret = copy_len; 7300 if (copy_len > len) { 7301 ret = -ENOSPC; 7302 copy_len = len; 7303 } 7304 7305 memcpy(search_res, op, copy_len); 7306 return ret; 7307 } 7308 7309 static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { 7310 .func = bpf_sock_ops_load_hdr_opt, 7311 .gpl_only = false, 7312 .ret_type = RET_INTEGER, 7313 .arg1_type = ARG_PTR_TO_CTX, 7314 .arg2_type = ARG_PTR_TO_MEM, 7315 .arg3_type = ARG_CONST_SIZE, 7316 .arg4_type = ARG_ANYTHING, 7317 }; 7318 7319 BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7320 const void *, from, u32, len, u64, flags) 7321 { 7322 u8 new_kind, new_kind_len, magic_len = 0, *opend; 7323 const u8 *op, *new_op, *magic = NULL; 7324 struct sk_buff *skb; 7325 bool eol; 7326 7327 if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB) 7328 return -EPERM; 7329 7330 if (len < 2 || flags) 7331 return -EINVAL; 7332 7333 new_op = from; 7334 new_kind = new_op[0]; 7335 new_kind_len = new_op[1]; 7336 7337 if (new_kind_len > len || new_kind == TCPOPT_NOP || 7338 new_kind == TCPOPT_EOL) 7339 return -EINVAL; 7340 7341 if (new_kind_len > bpf_sock->remaining_opt_len) 7342 return -ENOSPC; 7343 7344 /* 253 is another experimental kind */ 7345 if (new_kind == TCPOPT_EXP || new_kind == 253) { 7346 if (new_kind_len < 4) 7347 return -EINVAL; 7348 /* Match for the 2 byte magic also. 7349 * RFC 6994: the magic could be 2 or 4 bytes. 7350 * Hence, matching by 2 byte only is on the 7351 * conservative side but it is the right 7352 * thing to do for the 'search-for-duplication' 7353 * purpose. 7354 */ 7355 magic = &new_op[2]; 7356 magic_len = 2; 7357 } 7358 7359 /* Check for duplication */ 7360 skb = bpf_sock->skb; 7361 op = skb->data + sizeof(struct tcphdr); 7362 opend = bpf_sock->skb_data_end; 7363 7364 op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len, 7365 &eol); 7366 if (!IS_ERR(op)) 7367 return -EEXIST; 7368 7369 if (PTR_ERR(op) != -ENOMSG) 7370 return PTR_ERR(op); 7371 7372 if (eol) 7373 /* The option has been ended. Treat it as no more 7374 * header option can be written. 7375 */ 7376 return -ENOSPC; 7377 7378 /* No duplication found. Store the header option. */ 7379 memcpy(opend, from, new_kind_len); 7380 7381 bpf_sock->remaining_opt_len -= new_kind_len; 7382 bpf_sock->skb_data_end += new_kind_len; 7383 7384 return 0; 7385 } 7386 7387 static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { 7388 .func = bpf_sock_ops_store_hdr_opt, 7389 .gpl_only = false, 7390 .ret_type = RET_INTEGER, 7391 .arg1_type = ARG_PTR_TO_CTX, 7392 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7393 .arg3_type = ARG_CONST_SIZE, 7394 .arg4_type = ARG_ANYTHING, 7395 }; 7396 7397 BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, 7398 u32, len, u64, flags) 7399 { 7400 if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB) 7401 return -EPERM; 7402 7403 if (flags || len < 2) 7404 return -EINVAL; 7405 7406 if (len > bpf_sock->remaining_opt_len) 7407 return -ENOSPC; 7408 7409 bpf_sock->remaining_opt_len -= len; 7410 7411 return 0; 7412 } 7413 7414 static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { 7415 .func = bpf_sock_ops_reserve_hdr_opt, 7416 .gpl_only = false, 7417 .ret_type = RET_INTEGER, 7418 .arg1_type = ARG_PTR_TO_CTX, 7419 .arg2_type = ARG_ANYTHING, 7420 .arg3_type = ARG_ANYTHING, 7421 }; 7422 7423 BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, 7424 u64, tstamp, u32, tstamp_type) 7425 { 7426 /* skb_clear_delivery_time() is done for inet protocol */ 7427 if (skb->protocol != htons(ETH_P_IP) && 7428 skb->protocol != htons(ETH_P_IPV6)) 7429 return -EOPNOTSUPP; 7430 7431 switch (tstamp_type) { 7432 case BPF_SKB_TSTAMP_DELIVERY_MONO: 7433 if (!tstamp) 7434 return -EINVAL; 7435 skb->tstamp = tstamp; 7436 skb->mono_delivery_time = 1; 7437 break; 7438 case BPF_SKB_TSTAMP_UNSPEC: 7439 if (tstamp) 7440 return -EINVAL; 7441 skb->tstamp = 0; 7442 skb->mono_delivery_time = 0; 7443 break; 7444 default: 7445 return -EINVAL; 7446 } 7447 7448 return 0; 7449 } 7450 7451 static const struct bpf_func_proto bpf_skb_set_tstamp_proto = { 7452 .func = bpf_skb_set_tstamp, 7453 .gpl_only = false, 7454 .ret_type = RET_INTEGER, 7455 .arg1_type = ARG_PTR_TO_CTX, 7456 .arg2_type = ARG_ANYTHING, 7457 .arg3_type = ARG_ANYTHING, 7458 }; 7459 7460 #ifdef CONFIG_SYN_COOKIES 7461 BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv4, struct iphdr *, iph, 7462 struct tcphdr *, th, u32, th_len) 7463 { 7464 u32 cookie; 7465 u16 mss; 7466 7467 if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4)) 7468 return -EINVAL; 7469 7470 mss = tcp_parse_mss_option(th, 0) ?: TCP_MSS_DEFAULT; 7471 cookie = __cookie_v4_init_sequence(iph, th, &mss); 7472 7473 return cookie | ((u64)mss << 32); 7474 } 7475 7476 static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = { 7477 .func = bpf_tcp_raw_gen_syncookie_ipv4, 7478 .gpl_only = true, /* __cookie_v4_init_sequence() is GPL */ 7479 .pkt_access = true, 7480 .ret_type = RET_INTEGER, 7481 .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7482 .arg1_size = sizeof(struct iphdr), 7483 .arg2_type = ARG_PTR_TO_MEM, 7484 .arg3_type = ARG_CONST_SIZE, 7485 }; 7486 7487 BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph, 7488 struct tcphdr *, th, u32, th_len) 7489 { 7490 #if IS_BUILTIN(CONFIG_IPV6) 7491 const u16 mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - 7492 sizeof(struct ipv6hdr); 7493 u32 cookie; 7494 u16 mss; 7495 7496 if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4)) 7497 return -EINVAL; 7498 7499 mss = tcp_parse_mss_option(th, 0) ?: mss_clamp; 7500 cookie = __cookie_v6_init_sequence(iph, th, &mss); 7501 7502 return cookie | ((u64)mss << 32); 7503 #else 7504 return -EPROTONOSUPPORT; 7505 #endif 7506 } 7507 7508 static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = { 7509 .func = bpf_tcp_raw_gen_syncookie_ipv6, 7510 .gpl_only = true, /* __cookie_v6_init_sequence() is GPL */ 7511 .pkt_access = true, 7512 .ret_type = RET_INTEGER, 7513 .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7514 .arg1_size = sizeof(struct ipv6hdr), 7515 .arg2_type = ARG_PTR_TO_MEM, 7516 .arg3_type = ARG_CONST_SIZE, 7517 }; 7518 7519 BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph, 7520 struct tcphdr *, th) 7521 { 7522 u32 cookie = ntohl(th->ack_seq) - 1; 7523 7524 if (__cookie_v4_check(iph, th, cookie) > 0) 7525 return 0; 7526 7527 return -EACCES; 7528 } 7529 7530 static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv4_proto = { 7531 .func = bpf_tcp_raw_check_syncookie_ipv4, 7532 .gpl_only = true, /* __cookie_v4_check is GPL */ 7533 .pkt_access = true, 7534 .ret_type = RET_INTEGER, 7535 .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7536 .arg1_size = sizeof(struct iphdr), 7537 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7538 .arg2_size = sizeof(struct tcphdr), 7539 }; 7540 7541 BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv6, struct ipv6hdr *, iph, 7542 struct tcphdr *, th) 7543 { 7544 #if IS_BUILTIN(CONFIG_IPV6) 7545 u32 cookie = ntohl(th->ack_seq) - 1; 7546 7547 if (__cookie_v6_check(iph, th, cookie) > 0) 7548 return 0; 7549 7550 return -EACCES; 7551 #else 7552 return -EPROTONOSUPPORT; 7553 #endif 7554 } 7555 7556 static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = { 7557 .func = bpf_tcp_raw_check_syncookie_ipv6, 7558 .gpl_only = true, /* __cookie_v6_check is GPL */ 7559 .pkt_access = true, 7560 .ret_type = RET_INTEGER, 7561 .arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7562 .arg1_size = sizeof(struct ipv6hdr), 7563 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM, 7564 .arg2_size = sizeof(struct tcphdr), 7565 }; 7566 #endif /* CONFIG_SYN_COOKIES */ 7567 7568 #endif /* CONFIG_INET */ 7569 7570 bool bpf_helper_changes_pkt_data(void *func) 7571 { 7572 if (func == bpf_skb_vlan_push || 7573 func == bpf_skb_vlan_pop || 7574 func == bpf_skb_store_bytes || 7575 func == bpf_skb_change_proto || 7576 func == bpf_skb_change_head || 7577 func == sk_skb_change_head || 7578 func == bpf_skb_change_tail || 7579 func == sk_skb_change_tail || 7580 func == bpf_skb_adjust_room || 7581 func == sk_skb_adjust_room || 7582 func == bpf_skb_pull_data || 7583 func == sk_skb_pull_data || 7584 func == bpf_clone_redirect || 7585 func == bpf_l3_csum_replace || 7586 func == bpf_l4_csum_replace || 7587 func == bpf_xdp_adjust_head || 7588 func == bpf_xdp_adjust_meta || 7589 func == bpf_msg_pull_data || 7590 func == bpf_msg_push_data || 7591 func == bpf_msg_pop_data || 7592 func == bpf_xdp_adjust_tail || 7593 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 7594 func == bpf_lwt_seg6_store_bytes || 7595 func == bpf_lwt_seg6_adjust_srh || 7596 func == bpf_lwt_seg6_action || 7597 #endif 7598 #ifdef CONFIG_INET 7599 func == bpf_sock_ops_store_hdr_opt || 7600 #endif 7601 func == bpf_lwt_in_push_encap || 7602 func == bpf_lwt_xmit_push_encap) 7603 return true; 7604 7605 return false; 7606 } 7607 7608 const struct bpf_func_proto bpf_event_output_data_proto __weak; 7609 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak; 7610 7611 static const struct bpf_func_proto * 7612 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7613 { 7614 const struct bpf_func_proto *func_proto; 7615 7616 func_proto = cgroup_common_func_proto(func_id, prog); 7617 if (func_proto) 7618 return func_proto; 7619 7620 func_proto = cgroup_current_func_proto(func_id, prog); 7621 if (func_proto) 7622 return func_proto; 7623 7624 switch (func_id) { 7625 case BPF_FUNC_get_socket_cookie: 7626 return &bpf_get_socket_cookie_sock_proto; 7627 case BPF_FUNC_get_netns_cookie: 7628 return &bpf_get_netns_cookie_sock_proto; 7629 case BPF_FUNC_perf_event_output: 7630 return &bpf_event_output_data_proto; 7631 case BPF_FUNC_sk_storage_get: 7632 return &bpf_sk_storage_get_cg_sock_proto; 7633 case BPF_FUNC_ktime_get_coarse_ns: 7634 return &bpf_ktime_get_coarse_ns_proto; 7635 default: 7636 return bpf_base_func_proto(func_id); 7637 } 7638 } 7639 7640 static const struct bpf_func_proto * 7641 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7642 { 7643 const struct bpf_func_proto *func_proto; 7644 7645 func_proto = cgroup_common_func_proto(func_id, prog); 7646 if (func_proto) 7647 return func_proto; 7648 7649 func_proto = cgroup_current_func_proto(func_id, prog); 7650 if (func_proto) 7651 return func_proto; 7652 7653 switch (func_id) { 7654 case BPF_FUNC_bind: 7655 switch (prog->expected_attach_type) { 7656 case BPF_CGROUP_INET4_CONNECT: 7657 case BPF_CGROUP_INET6_CONNECT: 7658 return &bpf_bind_proto; 7659 default: 7660 return NULL; 7661 } 7662 case BPF_FUNC_get_socket_cookie: 7663 return &bpf_get_socket_cookie_sock_addr_proto; 7664 case BPF_FUNC_get_netns_cookie: 7665 return &bpf_get_netns_cookie_sock_addr_proto; 7666 case BPF_FUNC_perf_event_output: 7667 return &bpf_event_output_data_proto; 7668 #ifdef CONFIG_INET 7669 case BPF_FUNC_sk_lookup_tcp: 7670 return &bpf_sock_addr_sk_lookup_tcp_proto; 7671 case BPF_FUNC_sk_lookup_udp: 7672 return &bpf_sock_addr_sk_lookup_udp_proto; 7673 case BPF_FUNC_sk_release: 7674 return &bpf_sk_release_proto; 7675 case BPF_FUNC_skc_lookup_tcp: 7676 return &bpf_sock_addr_skc_lookup_tcp_proto; 7677 #endif /* CONFIG_INET */ 7678 case BPF_FUNC_sk_storage_get: 7679 return &bpf_sk_storage_get_proto; 7680 case BPF_FUNC_sk_storage_delete: 7681 return &bpf_sk_storage_delete_proto; 7682 case BPF_FUNC_setsockopt: 7683 switch (prog->expected_attach_type) { 7684 case BPF_CGROUP_INET4_BIND: 7685 case BPF_CGROUP_INET6_BIND: 7686 case BPF_CGROUP_INET4_CONNECT: 7687 case BPF_CGROUP_INET6_CONNECT: 7688 case BPF_CGROUP_UDP4_RECVMSG: 7689 case BPF_CGROUP_UDP6_RECVMSG: 7690 case BPF_CGROUP_UDP4_SENDMSG: 7691 case BPF_CGROUP_UDP6_SENDMSG: 7692 case BPF_CGROUP_INET4_GETPEERNAME: 7693 case BPF_CGROUP_INET6_GETPEERNAME: 7694 case BPF_CGROUP_INET4_GETSOCKNAME: 7695 case BPF_CGROUP_INET6_GETSOCKNAME: 7696 return &bpf_sock_addr_setsockopt_proto; 7697 default: 7698 return NULL; 7699 } 7700 case BPF_FUNC_getsockopt: 7701 switch (prog->expected_attach_type) { 7702 case BPF_CGROUP_INET4_BIND: 7703 case BPF_CGROUP_INET6_BIND: 7704 case BPF_CGROUP_INET4_CONNECT: 7705 case BPF_CGROUP_INET6_CONNECT: 7706 case BPF_CGROUP_UDP4_RECVMSG: 7707 case BPF_CGROUP_UDP6_RECVMSG: 7708 case BPF_CGROUP_UDP4_SENDMSG: 7709 case BPF_CGROUP_UDP6_SENDMSG: 7710 case BPF_CGROUP_INET4_GETPEERNAME: 7711 case BPF_CGROUP_INET6_GETPEERNAME: 7712 case BPF_CGROUP_INET4_GETSOCKNAME: 7713 case BPF_CGROUP_INET6_GETSOCKNAME: 7714 return &bpf_sock_addr_getsockopt_proto; 7715 default: 7716 return NULL; 7717 } 7718 default: 7719 return bpf_sk_base_func_proto(func_id); 7720 } 7721 } 7722 7723 static const struct bpf_func_proto * 7724 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7725 { 7726 switch (func_id) { 7727 case BPF_FUNC_skb_load_bytes: 7728 return &bpf_skb_load_bytes_proto; 7729 case BPF_FUNC_skb_load_bytes_relative: 7730 return &bpf_skb_load_bytes_relative_proto; 7731 case BPF_FUNC_get_socket_cookie: 7732 return &bpf_get_socket_cookie_proto; 7733 case BPF_FUNC_get_socket_uid: 7734 return &bpf_get_socket_uid_proto; 7735 case BPF_FUNC_perf_event_output: 7736 return &bpf_skb_event_output_proto; 7737 default: 7738 return bpf_sk_base_func_proto(func_id); 7739 } 7740 } 7741 7742 const struct bpf_func_proto bpf_sk_storage_get_proto __weak; 7743 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; 7744 7745 static const struct bpf_func_proto * 7746 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7747 { 7748 const struct bpf_func_proto *func_proto; 7749 7750 func_proto = cgroup_common_func_proto(func_id, prog); 7751 if (func_proto) 7752 return func_proto; 7753 7754 switch (func_id) { 7755 case BPF_FUNC_sk_fullsock: 7756 return &bpf_sk_fullsock_proto; 7757 case BPF_FUNC_sk_storage_get: 7758 return &bpf_sk_storage_get_proto; 7759 case BPF_FUNC_sk_storage_delete: 7760 return &bpf_sk_storage_delete_proto; 7761 case BPF_FUNC_perf_event_output: 7762 return &bpf_skb_event_output_proto; 7763 #ifdef CONFIG_SOCK_CGROUP_DATA 7764 case BPF_FUNC_skb_cgroup_id: 7765 return &bpf_skb_cgroup_id_proto; 7766 case BPF_FUNC_skb_ancestor_cgroup_id: 7767 return &bpf_skb_ancestor_cgroup_id_proto; 7768 case BPF_FUNC_sk_cgroup_id: 7769 return &bpf_sk_cgroup_id_proto; 7770 case BPF_FUNC_sk_ancestor_cgroup_id: 7771 return &bpf_sk_ancestor_cgroup_id_proto; 7772 #endif 7773 #ifdef CONFIG_INET 7774 case BPF_FUNC_sk_lookup_tcp: 7775 return &bpf_sk_lookup_tcp_proto; 7776 case BPF_FUNC_sk_lookup_udp: 7777 return &bpf_sk_lookup_udp_proto; 7778 case BPF_FUNC_sk_release: 7779 return &bpf_sk_release_proto; 7780 case BPF_FUNC_skc_lookup_tcp: 7781 return &bpf_skc_lookup_tcp_proto; 7782 case BPF_FUNC_tcp_sock: 7783 return &bpf_tcp_sock_proto; 7784 case BPF_FUNC_get_listener_sock: 7785 return &bpf_get_listener_sock_proto; 7786 case BPF_FUNC_skb_ecn_set_ce: 7787 return &bpf_skb_ecn_set_ce_proto; 7788 #endif 7789 default: 7790 return sk_filter_func_proto(func_id, prog); 7791 } 7792 } 7793 7794 static const struct bpf_func_proto * 7795 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7796 { 7797 switch (func_id) { 7798 case BPF_FUNC_skb_store_bytes: 7799 return &bpf_skb_store_bytes_proto; 7800 case BPF_FUNC_skb_load_bytes: 7801 return &bpf_skb_load_bytes_proto; 7802 case BPF_FUNC_skb_load_bytes_relative: 7803 return &bpf_skb_load_bytes_relative_proto; 7804 case BPF_FUNC_skb_pull_data: 7805 return &bpf_skb_pull_data_proto; 7806 case BPF_FUNC_csum_diff: 7807 return &bpf_csum_diff_proto; 7808 case BPF_FUNC_csum_update: 7809 return &bpf_csum_update_proto; 7810 case BPF_FUNC_csum_level: 7811 return &bpf_csum_level_proto; 7812 case BPF_FUNC_l3_csum_replace: 7813 return &bpf_l3_csum_replace_proto; 7814 case BPF_FUNC_l4_csum_replace: 7815 return &bpf_l4_csum_replace_proto; 7816 case BPF_FUNC_clone_redirect: 7817 return &bpf_clone_redirect_proto; 7818 case BPF_FUNC_get_cgroup_classid: 7819 return &bpf_get_cgroup_classid_proto; 7820 case BPF_FUNC_skb_vlan_push: 7821 return &bpf_skb_vlan_push_proto; 7822 case BPF_FUNC_skb_vlan_pop: 7823 return &bpf_skb_vlan_pop_proto; 7824 case BPF_FUNC_skb_change_proto: 7825 return &bpf_skb_change_proto_proto; 7826 case BPF_FUNC_skb_change_type: 7827 return &bpf_skb_change_type_proto; 7828 case BPF_FUNC_skb_adjust_room: 7829 return &bpf_skb_adjust_room_proto; 7830 case BPF_FUNC_skb_change_tail: 7831 return &bpf_skb_change_tail_proto; 7832 case BPF_FUNC_skb_change_head: 7833 return &bpf_skb_change_head_proto; 7834 case BPF_FUNC_skb_get_tunnel_key: 7835 return &bpf_skb_get_tunnel_key_proto; 7836 case BPF_FUNC_skb_set_tunnel_key: 7837 return bpf_get_skb_set_tunnel_proto(func_id); 7838 case BPF_FUNC_skb_get_tunnel_opt: 7839 return &bpf_skb_get_tunnel_opt_proto; 7840 case BPF_FUNC_skb_set_tunnel_opt: 7841 return bpf_get_skb_set_tunnel_proto(func_id); 7842 case BPF_FUNC_redirect: 7843 return &bpf_redirect_proto; 7844 case BPF_FUNC_redirect_neigh: 7845 return &bpf_redirect_neigh_proto; 7846 case BPF_FUNC_redirect_peer: 7847 return &bpf_redirect_peer_proto; 7848 case BPF_FUNC_get_route_realm: 7849 return &bpf_get_route_realm_proto; 7850 case BPF_FUNC_get_hash_recalc: 7851 return &bpf_get_hash_recalc_proto; 7852 case BPF_FUNC_set_hash_invalid: 7853 return &bpf_set_hash_invalid_proto; 7854 case BPF_FUNC_set_hash: 7855 return &bpf_set_hash_proto; 7856 case BPF_FUNC_perf_event_output: 7857 return &bpf_skb_event_output_proto; 7858 case BPF_FUNC_get_smp_processor_id: 7859 return &bpf_get_smp_processor_id_proto; 7860 case BPF_FUNC_skb_under_cgroup: 7861 return &bpf_skb_under_cgroup_proto; 7862 case BPF_FUNC_get_socket_cookie: 7863 return &bpf_get_socket_cookie_proto; 7864 case BPF_FUNC_get_socket_uid: 7865 return &bpf_get_socket_uid_proto; 7866 case BPF_FUNC_fib_lookup: 7867 return &bpf_skb_fib_lookup_proto; 7868 case BPF_FUNC_check_mtu: 7869 return &bpf_skb_check_mtu_proto; 7870 case BPF_FUNC_sk_fullsock: 7871 return &bpf_sk_fullsock_proto; 7872 case BPF_FUNC_sk_storage_get: 7873 return &bpf_sk_storage_get_proto; 7874 case BPF_FUNC_sk_storage_delete: 7875 return &bpf_sk_storage_delete_proto; 7876 #ifdef CONFIG_XFRM 7877 case BPF_FUNC_skb_get_xfrm_state: 7878 return &bpf_skb_get_xfrm_state_proto; 7879 #endif 7880 #ifdef CONFIG_CGROUP_NET_CLASSID 7881 case BPF_FUNC_skb_cgroup_classid: 7882 return &bpf_skb_cgroup_classid_proto; 7883 #endif 7884 #ifdef CONFIG_SOCK_CGROUP_DATA 7885 case BPF_FUNC_skb_cgroup_id: 7886 return &bpf_skb_cgroup_id_proto; 7887 case BPF_FUNC_skb_ancestor_cgroup_id: 7888 return &bpf_skb_ancestor_cgroup_id_proto; 7889 #endif 7890 #ifdef CONFIG_INET 7891 case BPF_FUNC_sk_lookup_tcp: 7892 return &bpf_sk_lookup_tcp_proto; 7893 case BPF_FUNC_sk_lookup_udp: 7894 return &bpf_sk_lookup_udp_proto; 7895 case BPF_FUNC_sk_release: 7896 return &bpf_sk_release_proto; 7897 case BPF_FUNC_tcp_sock: 7898 return &bpf_tcp_sock_proto; 7899 case BPF_FUNC_get_listener_sock: 7900 return &bpf_get_listener_sock_proto; 7901 case BPF_FUNC_skc_lookup_tcp: 7902 return &bpf_skc_lookup_tcp_proto; 7903 case BPF_FUNC_tcp_check_syncookie: 7904 return &bpf_tcp_check_syncookie_proto; 7905 case BPF_FUNC_skb_ecn_set_ce: 7906 return &bpf_skb_ecn_set_ce_proto; 7907 case BPF_FUNC_tcp_gen_syncookie: 7908 return &bpf_tcp_gen_syncookie_proto; 7909 case BPF_FUNC_sk_assign: 7910 return &bpf_sk_assign_proto; 7911 case BPF_FUNC_skb_set_tstamp: 7912 return &bpf_skb_set_tstamp_proto; 7913 #ifdef CONFIG_SYN_COOKIES 7914 case BPF_FUNC_tcp_raw_gen_syncookie_ipv4: 7915 return &bpf_tcp_raw_gen_syncookie_ipv4_proto; 7916 case BPF_FUNC_tcp_raw_gen_syncookie_ipv6: 7917 return &bpf_tcp_raw_gen_syncookie_ipv6_proto; 7918 case BPF_FUNC_tcp_raw_check_syncookie_ipv4: 7919 return &bpf_tcp_raw_check_syncookie_ipv4_proto; 7920 case BPF_FUNC_tcp_raw_check_syncookie_ipv6: 7921 return &bpf_tcp_raw_check_syncookie_ipv6_proto; 7922 #endif 7923 #endif 7924 default: 7925 return bpf_sk_base_func_proto(func_id); 7926 } 7927 } 7928 7929 static const struct bpf_func_proto * 7930 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7931 { 7932 switch (func_id) { 7933 case BPF_FUNC_perf_event_output: 7934 return &bpf_xdp_event_output_proto; 7935 case BPF_FUNC_get_smp_processor_id: 7936 return &bpf_get_smp_processor_id_proto; 7937 case BPF_FUNC_csum_diff: 7938 return &bpf_csum_diff_proto; 7939 case BPF_FUNC_xdp_adjust_head: 7940 return &bpf_xdp_adjust_head_proto; 7941 case BPF_FUNC_xdp_adjust_meta: 7942 return &bpf_xdp_adjust_meta_proto; 7943 case BPF_FUNC_redirect: 7944 return &bpf_xdp_redirect_proto; 7945 case BPF_FUNC_redirect_map: 7946 return &bpf_xdp_redirect_map_proto; 7947 case BPF_FUNC_xdp_adjust_tail: 7948 return &bpf_xdp_adjust_tail_proto; 7949 case BPF_FUNC_xdp_get_buff_len: 7950 return &bpf_xdp_get_buff_len_proto; 7951 case BPF_FUNC_xdp_load_bytes: 7952 return &bpf_xdp_load_bytes_proto; 7953 case BPF_FUNC_xdp_store_bytes: 7954 return &bpf_xdp_store_bytes_proto; 7955 case BPF_FUNC_fib_lookup: 7956 return &bpf_xdp_fib_lookup_proto; 7957 case BPF_FUNC_check_mtu: 7958 return &bpf_xdp_check_mtu_proto; 7959 #ifdef CONFIG_INET 7960 case BPF_FUNC_sk_lookup_udp: 7961 return &bpf_xdp_sk_lookup_udp_proto; 7962 case BPF_FUNC_sk_lookup_tcp: 7963 return &bpf_xdp_sk_lookup_tcp_proto; 7964 case BPF_FUNC_sk_release: 7965 return &bpf_sk_release_proto; 7966 case BPF_FUNC_skc_lookup_tcp: 7967 return &bpf_xdp_skc_lookup_tcp_proto; 7968 case BPF_FUNC_tcp_check_syncookie: 7969 return &bpf_tcp_check_syncookie_proto; 7970 case BPF_FUNC_tcp_gen_syncookie: 7971 return &bpf_tcp_gen_syncookie_proto; 7972 #ifdef CONFIG_SYN_COOKIES 7973 case BPF_FUNC_tcp_raw_gen_syncookie_ipv4: 7974 return &bpf_tcp_raw_gen_syncookie_ipv4_proto; 7975 case BPF_FUNC_tcp_raw_gen_syncookie_ipv6: 7976 return &bpf_tcp_raw_gen_syncookie_ipv6_proto; 7977 case BPF_FUNC_tcp_raw_check_syncookie_ipv4: 7978 return &bpf_tcp_raw_check_syncookie_ipv4_proto; 7979 case BPF_FUNC_tcp_raw_check_syncookie_ipv6: 7980 return &bpf_tcp_raw_check_syncookie_ipv6_proto; 7981 #endif 7982 #endif 7983 default: 7984 return bpf_sk_base_func_proto(func_id); 7985 } 7986 } 7987 7988 const struct bpf_func_proto bpf_sock_map_update_proto __weak; 7989 const struct bpf_func_proto bpf_sock_hash_update_proto __weak; 7990 7991 static const struct bpf_func_proto * 7992 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 7993 { 7994 const struct bpf_func_proto *func_proto; 7995 7996 func_proto = cgroup_common_func_proto(func_id, prog); 7997 if (func_proto) 7998 return func_proto; 7999 8000 switch (func_id) { 8001 case BPF_FUNC_setsockopt: 8002 return &bpf_sock_ops_setsockopt_proto; 8003 case BPF_FUNC_getsockopt: 8004 return &bpf_sock_ops_getsockopt_proto; 8005 case BPF_FUNC_sock_ops_cb_flags_set: 8006 return &bpf_sock_ops_cb_flags_set_proto; 8007 case BPF_FUNC_sock_map_update: 8008 return &bpf_sock_map_update_proto; 8009 case BPF_FUNC_sock_hash_update: 8010 return &bpf_sock_hash_update_proto; 8011 case BPF_FUNC_get_socket_cookie: 8012 return &bpf_get_socket_cookie_sock_ops_proto; 8013 case BPF_FUNC_perf_event_output: 8014 return &bpf_event_output_data_proto; 8015 case BPF_FUNC_sk_storage_get: 8016 return &bpf_sk_storage_get_proto; 8017 case BPF_FUNC_sk_storage_delete: 8018 return &bpf_sk_storage_delete_proto; 8019 case BPF_FUNC_get_netns_cookie: 8020 return &bpf_get_netns_cookie_sock_ops_proto; 8021 #ifdef CONFIG_INET 8022 case BPF_FUNC_load_hdr_opt: 8023 return &bpf_sock_ops_load_hdr_opt_proto; 8024 case BPF_FUNC_store_hdr_opt: 8025 return &bpf_sock_ops_store_hdr_opt_proto; 8026 case BPF_FUNC_reserve_hdr_opt: 8027 return &bpf_sock_ops_reserve_hdr_opt_proto; 8028 case BPF_FUNC_tcp_sock: 8029 return &bpf_tcp_sock_proto; 8030 #endif /* CONFIG_INET */ 8031 default: 8032 return bpf_sk_base_func_proto(func_id); 8033 } 8034 } 8035 8036 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; 8037 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; 8038 8039 static const struct bpf_func_proto * 8040 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8041 { 8042 switch (func_id) { 8043 case BPF_FUNC_msg_redirect_map: 8044 return &bpf_msg_redirect_map_proto; 8045 case BPF_FUNC_msg_redirect_hash: 8046 return &bpf_msg_redirect_hash_proto; 8047 case BPF_FUNC_msg_apply_bytes: 8048 return &bpf_msg_apply_bytes_proto; 8049 case BPF_FUNC_msg_cork_bytes: 8050 return &bpf_msg_cork_bytes_proto; 8051 case BPF_FUNC_msg_pull_data: 8052 return &bpf_msg_pull_data_proto; 8053 case BPF_FUNC_msg_push_data: 8054 return &bpf_msg_push_data_proto; 8055 case BPF_FUNC_msg_pop_data: 8056 return &bpf_msg_pop_data_proto; 8057 case BPF_FUNC_perf_event_output: 8058 return &bpf_event_output_data_proto; 8059 case BPF_FUNC_get_current_uid_gid: 8060 return &bpf_get_current_uid_gid_proto; 8061 case BPF_FUNC_get_current_pid_tgid: 8062 return &bpf_get_current_pid_tgid_proto; 8063 case BPF_FUNC_sk_storage_get: 8064 return &bpf_sk_storage_get_proto; 8065 case BPF_FUNC_sk_storage_delete: 8066 return &bpf_sk_storage_delete_proto; 8067 case BPF_FUNC_get_netns_cookie: 8068 return &bpf_get_netns_cookie_sk_msg_proto; 8069 #ifdef CONFIG_CGROUPS 8070 case BPF_FUNC_get_current_cgroup_id: 8071 return &bpf_get_current_cgroup_id_proto; 8072 case BPF_FUNC_get_current_ancestor_cgroup_id: 8073 return &bpf_get_current_ancestor_cgroup_id_proto; 8074 #endif 8075 #ifdef CONFIG_CGROUP_NET_CLASSID 8076 case BPF_FUNC_get_cgroup_classid: 8077 return &bpf_get_cgroup_classid_curr_proto; 8078 #endif 8079 default: 8080 return bpf_sk_base_func_proto(func_id); 8081 } 8082 } 8083 8084 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; 8085 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; 8086 8087 static const struct bpf_func_proto * 8088 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8089 { 8090 switch (func_id) { 8091 case BPF_FUNC_skb_store_bytes: 8092 return &bpf_skb_store_bytes_proto; 8093 case BPF_FUNC_skb_load_bytes: 8094 return &bpf_skb_load_bytes_proto; 8095 case BPF_FUNC_skb_pull_data: 8096 return &sk_skb_pull_data_proto; 8097 case BPF_FUNC_skb_change_tail: 8098 return &sk_skb_change_tail_proto; 8099 case BPF_FUNC_skb_change_head: 8100 return &sk_skb_change_head_proto; 8101 case BPF_FUNC_skb_adjust_room: 8102 return &sk_skb_adjust_room_proto; 8103 case BPF_FUNC_get_socket_cookie: 8104 return &bpf_get_socket_cookie_proto; 8105 case BPF_FUNC_get_socket_uid: 8106 return &bpf_get_socket_uid_proto; 8107 case BPF_FUNC_sk_redirect_map: 8108 return &bpf_sk_redirect_map_proto; 8109 case BPF_FUNC_sk_redirect_hash: 8110 return &bpf_sk_redirect_hash_proto; 8111 case BPF_FUNC_perf_event_output: 8112 return &bpf_skb_event_output_proto; 8113 #ifdef CONFIG_INET 8114 case BPF_FUNC_sk_lookup_tcp: 8115 return &bpf_sk_lookup_tcp_proto; 8116 case BPF_FUNC_sk_lookup_udp: 8117 return &bpf_sk_lookup_udp_proto; 8118 case BPF_FUNC_sk_release: 8119 return &bpf_sk_release_proto; 8120 case BPF_FUNC_skc_lookup_tcp: 8121 return &bpf_skc_lookup_tcp_proto; 8122 #endif 8123 default: 8124 return bpf_sk_base_func_proto(func_id); 8125 } 8126 } 8127 8128 static const struct bpf_func_proto * 8129 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8130 { 8131 switch (func_id) { 8132 case BPF_FUNC_skb_load_bytes: 8133 return &bpf_flow_dissector_load_bytes_proto; 8134 default: 8135 return bpf_sk_base_func_proto(func_id); 8136 } 8137 } 8138 8139 static const struct bpf_func_proto * 8140 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8141 { 8142 switch (func_id) { 8143 case BPF_FUNC_skb_load_bytes: 8144 return &bpf_skb_load_bytes_proto; 8145 case BPF_FUNC_skb_pull_data: 8146 return &bpf_skb_pull_data_proto; 8147 case BPF_FUNC_csum_diff: 8148 return &bpf_csum_diff_proto; 8149 case BPF_FUNC_get_cgroup_classid: 8150 return &bpf_get_cgroup_classid_proto; 8151 case BPF_FUNC_get_route_realm: 8152 return &bpf_get_route_realm_proto; 8153 case BPF_FUNC_get_hash_recalc: 8154 return &bpf_get_hash_recalc_proto; 8155 case BPF_FUNC_perf_event_output: 8156 return &bpf_skb_event_output_proto; 8157 case BPF_FUNC_get_smp_processor_id: 8158 return &bpf_get_smp_processor_id_proto; 8159 case BPF_FUNC_skb_under_cgroup: 8160 return &bpf_skb_under_cgroup_proto; 8161 default: 8162 return bpf_sk_base_func_proto(func_id); 8163 } 8164 } 8165 8166 static const struct bpf_func_proto * 8167 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8168 { 8169 switch (func_id) { 8170 case BPF_FUNC_lwt_push_encap: 8171 return &bpf_lwt_in_push_encap_proto; 8172 default: 8173 return lwt_out_func_proto(func_id, prog); 8174 } 8175 } 8176 8177 static const struct bpf_func_proto * 8178 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8179 { 8180 switch (func_id) { 8181 case BPF_FUNC_skb_get_tunnel_key: 8182 return &bpf_skb_get_tunnel_key_proto; 8183 case BPF_FUNC_skb_set_tunnel_key: 8184 return bpf_get_skb_set_tunnel_proto(func_id); 8185 case BPF_FUNC_skb_get_tunnel_opt: 8186 return &bpf_skb_get_tunnel_opt_proto; 8187 case BPF_FUNC_skb_set_tunnel_opt: 8188 return bpf_get_skb_set_tunnel_proto(func_id); 8189 case BPF_FUNC_redirect: 8190 return &bpf_redirect_proto; 8191 case BPF_FUNC_clone_redirect: 8192 return &bpf_clone_redirect_proto; 8193 case BPF_FUNC_skb_change_tail: 8194 return &bpf_skb_change_tail_proto; 8195 case BPF_FUNC_skb_change_head: 8196 return &bpf_skb_change_head_proto; 8197 case BPF_FUNC_skb_store_bytes: 8198 return &bpf_skb_store_bytes_proto; 8199 case BPF_FUNC_csum_update: 8200 return &bpf_csum_update_proto; 8201 case BPF_FUNC_csum_level: 8202 return &bpf_csum_level_proto; 8203 case BPF_FUNC_l3_csum_replace: 8204 return &bpf_l3_csum_replace_proto; 8205 case BPF_FUNC_l4_csum_replace: 8206 return &bpf_l4_csum_replace_proto; 8207 case BPF_FUNC_set_hash_invalid: 8208 return &bpf_set_hash_invalid_proto; 8209 case BPF_FUNC_lwt_push_encap: 8210 return &bpf_lwt_xmit_push_encap_proto; 8211 default: 8212 return lwt_out_func_proto(func_id, prog); 8213 } 8214 } 8215 8216 static const struct bpf_func_proto * 8217 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 8218 { 8219 switch (func_id) { 8220 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) 8221 case BPF_FUNC_lwt_seg6_store_bytes: 8222 return &bpf_lwt_seg6_store_bytes_proto; 8223 case BPF_FUNC_lwt_seg6_action: 8224 return &bpf_lwt_seg6_action_proto; 8225 case BPF_FUNC_lwt_seg6_adjust_srh: 8226 return &bpf_lwt_seg6_adjust_srh_proto; 8227 #endif 8228 default: 8229 return lwt_out_func_proto(func_id, prog); 8230 } 8231 } 8232 8233 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, 8234 const struct bpf_prog *prog, 8235 struct bpf_insn_access_aux *info) 8236 { 8237 const int size_default = sizeof(__u32); 8238 8239 if (off < 0 || off >= sizeof(struct __sk_buff)) 8240 return false; 8241 8242 /* The verifier guarantees that size > 0. */ 8243 if (off % size != 0) 8244 return false; 8245 8246 switch (off) { 8247 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8248 if (off + size > offsetofend(struct __sk_buff, cb[4])) 8249 return false; 8250 break; 8251 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): 8252 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): 8253 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): 8254 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): 8255 case bpf_ctx_range(struct __sk_buff, data): 8256 case bpf_ctx_range(struct __sk_buff, data_meta): 8257 case bpf_ctx_range(struct __sk_buff, data_end): 8258 if (size != size_default) 8259 return false; 8260 break; 8261 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 8262 return false; 8263 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8264 if (type == BPF_WRITE || size != sizeof(__u64)) 8265 return false; 8266 break; 8267 case bpf_ctx_range(struct __sk_buff, tstamp): 8268 if (size != sizeof(__u64)) 8269 return false; 8270 break; 8271 case offsetof(struct __sk_buff, sk): 8272 if (type == BPF_WRITE || size != sizeof(__u64)) 8273 return false; 8274 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 8275 break; 8276 case offsetof(struct __sk_buff, tstamp_type): 8277 return false; 8278 case offsetofend(struct __sk_buff, tstamp_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: 8279 /* Explicitly prohibit access to padding in __sk_buff. */ 8280 return false; 8281 default: 8282 /* Only narrow read access allowed for now. */ 8283 if (type == BPF_WRITE) { 8284 if (size != size_default) 8285 return false; 8286 } else { 8287 bpf_ctx_record_field_size(info, size_default); 8288 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 8289 return false; 8290 } 8291 } 8292 8293 return true; 8294 } 8295 8296 static bool sk_filter_is_valid_access(int off, int size, 8297 enum bpf_access_type type, 8298 const struct bpf_prog *prog, 8299 struct bpf_insn_access_aux *info) 8300 { 8301 switch (off) { 8302 case bpf_ctx_range(struct __sk_buff, tc_classid): 8303 case bpf_ctx_range(struct __sk_buff, data): 8304 case bpf_ctx_range(struct __sk_buff, data_meta): 8305 case bpf_ctx_range(struct __sk_buff, data_end): 8306 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8307 case bpf_ctx_range(struct __sk_buff, tstamp): 8308 case bpf_ctx_range(struct __sk_buff, wire_len): 8309 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8310 return false; 8311 } 8312 8313 if (type == BPF_WRITE) { 8314 switch (off) { 8315 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8316 break; 8317 default: 8318 return false; 8319 } 8320 } 8321 8322 return bpf_skb_is_valid_access(off, size, type, prog, info); 8323 } 8324 8325 static bool cg_skb_is_valid_access(int off, int size, 8326 enum bpf_access_type type, 8327 const struct bpf_prog *prog, 8328 struct bpf_insn_access_aux *info) 8329 { 8330 switch (off) { 8331 case bpf_ctx_range(struct __sk_buff, tc_classid): 8332 case bpf_ctx_range(struct __sk_buff, data_meta): 8333 case bpf_ctx_range(struct __sk_buff, wire_len): 8334 return false; 8335 case bpf_ctx_range(struct __sk_buff, data): 8336 case bpf_ctx_range(struct __sk_buff, data_end): 8337 if (!bpf_capable()) 8338 return false; 8339 break; 8340 } 8341 8342 if (type == BPF_WRITE) { 8343 switch (off) { 8344 case bpf_ctx_range(struct __sk_buff, mark): 8345 case bpf_ctx_range(struct __sk_buff, priority): 8346 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8347 break; 8348 case bpf_ctx_range(struct __sk_buff, tstamp): 8349 if (!bpf_capable()) 8350 return false; 8351 break; 8352 default: 8353 return false; 8354 } 8355 } 8356 8357 switch (off) { 8358 case bpf_ctx_range(struct __sk_buff, data): 8359 info->reg_type = PTR_TO_PACKET; 8360 break; 8361 case bpf_ctx_range(struct __sk_buff, data_end): 8362 info->reg_type = PTR_TO_PACKET_END; 8363 break; 8364 } 8365 8366 return bpf_skb_is_valid_access(off, size, type, prog, info); 8367 } 8368 8369 static bool lwt_is_valid_access(int off, int size, 8370 enum bpf_access_type type, 8371 const struct bpf_prog *prog, 8372 struct bpf_insn_access_aux *info) 8373 { 8374 switch (off) { 8375 case bpf_ctx_range(struct __sk_buff, tc_classid): 8376 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8377 case bpf_ctx_range(struct __sk_buff, data_meta): 8378 case bpf_ctx_range(struct __sk_buff, tstamp): 8379 case bpf_ctx_range(struct __sk_buff, wire_len): 8380 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8381 return false; 8382 } 8383 8384 if (type == BPF_WRITE) { 8385 switch (off) { 8386 case bpf_ctx_range(struct __sk_buff, mark): 8387 case bpf_ctx_range(struct __sk_buff, priority): 8388 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8389 break; 8390 default: 8391 return false; 8392 } 8393 } 8394 8395 switch (off) { 8396 case bpf_ctx_range(struct __sk_buff, data): 8397 info->reg_type = PTR_TO_PACKET; 8398 break; 8399 case bpf_ctx_range(struct __sk_buff, data_end): 8400 info->reg_type = PTR_TO_PACKET_END; 8401 break; 8402 } 8403 8404 return bpf_skb_is_valid_access(off, size, type, prog, info); 8405 } 8406 8407 /* Attach type specific accesses */ 8408 static bool __sock_filter_check_attach_type(int off, 8409 enum bpf_access_type access_type, 8410 enum bpf_attach_type attach_type) 8411 { 8412 switch (off) { 8413 case offsetof(struct bpf_sock, bound_dev_if): 8414 case offsetof(struct bpf_sock, mark): 8415 case offsetof(struct bpf_sock, priority): 8416 switch (attach_type) { 8417 case BPF_CGROUP_INET_SOCK_CREATE: 8418 case BPF_CGROUP_INET_SOCK_RELEASE: 8419 goto full_access; 8420 default: 8421 return false; 8422 } 8423 case bpf_ctx_range(struct bpf_sock, src_ip4): 8424 switch (attach_type) { 8425 case BPF_CGROUP_INET4_POST_BIND: 8426 goto read_only; 8427 default: 8428 return false; 8429 } 8430 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 8431 switch (attach_type) { 8432 case BPF_CGROUP_INET6_POST_BIND: 8433 goto read_only; 8434 default: 8435 return false; 8436 } 8437 case bpf_ctx_range(struct bpf_sock, src_port): 8438 switch (attach_type) { 8439 case BPF_CGROUP_INET4_POST_BIND: 8440 case BPF_CGROUP_INET6_POST_BIND: 8441 goto read_only; 8442 default: 8443 return false; 8444 } 8445 } 8446 read_only: 8447 return access_type == BPF_READ; 8448 full_access: 8449 return true; 8450 } 8451 8452 bool bpf_sock_common_is_valid_access(int off, int size, 8453 enum bpf_access_type type, 8454 struct bpf_insn_access_aux *info) 8455 { 8456 switch (off) { 8457 case bpf_ctx_range_till(struct bpf_sock, type, priority): 8458 return false; 8459 default: 8460 return bpf_sock_is_valid_access(off, size, type, info); 8461 } 8462 } 8463 8464 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 8465 struct bpf_insn_access_aux *info) 8466 { 8467 const int size_default = sizeof(__u32); 8468 int field_size; 8469 8470 if (off < 0 || off >= sizeof(struct bpf_sock)) 8471 return false; 8472 if (off % size != 0) 8473 return false; 8474 8475 switch (off) { 8476 case offsetof(struct bpf_sock, state): 8477 case offsetof(struct bpf_sock, family): 8478 case offsetof(struct bpf_sock, type): 8479 case offsetof(struct bpf_sock, protocol): 8480 case offsetof(struct bpf_sock, src_port): 8481 case offsetof(struct bpf_sock, rx_queue_mapping): 8482 case bpf_ctx_range(struct bpf_sock, src_ip4): 8483 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 8484 case bpf_ctx_range(struct bpf_sock, dst_ip4): 8485 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 8486 bpf_ctx_record_field_size(info, size_default); 8487 return bpf_ctx_narrow_access_ok(off, size, size_default); 8488 case bpf_ctx_range(struct bpf_sock, dst_port): 8489 field_size = size == size_default ? 8490 size_default : sizeof_field(struct bpf_sock, dst_port); 8491 bpf_ctx_record_field_size(info, field_size); 8492 return bpf_ctx_narrow_access_ok(off, size, field_size); 8493 case offsetofend(struct bpf_sock, dst_port) ... 8494 offsetof(struct bpf_sock, dst_ip4) - 1: 8495 return false; 8496 } 8497 8498 return size == size_default; 8499 } 8500 8501 static bool sock_filter_is_valid_access(int off, int size, 8502 enum bpf_access_type type, 8503 const struct bpf_prog *prog, 8504 struct bpf_insn_access_aux *info) 8505 { 8506 if (!bpf_sock_is_valid_access(off, size, type, info)) 8507 return false; 8508 return __sock_filter_check_attach_type(off, type, 8509 prog->expected_attach_type); 8510 } 8511 8512 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, 8513 const struct bpf_prog *prog) 8514 { 8515 /* Neither direct read nor direct write requires any preliminary 8516 * action. 8517 */ 8518 return 0; 8519 } 8520 8521 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, 8522 const struct bpf_prog *prog, int drop_verdict) 8523 { 8524 struct bpf_insn *insn = insn_buf; 8525 8526 if (!direct_write) 8527 return 0; 8528 8529 /* if (!skb->cloned) 8530 * goto start; 8531 * 8532 * (Fast-path, otherwise approximation that we might be 8533 * a clone, do the rest in helper.) 8534 */ 8535 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET); 8536 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); 8537 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); 8538 8539 /* ret = bpf_skb_pull_data(skb, 0); */ 8540 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 8541 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); 8542 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 8543 BPF_FUNC_skb_pull_data); 8544 /* if (!ret) 8545 * goto restore; 8546 * return TC_ACT_SHOT; 8547 */ 8548 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); 8549 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); 8550 *insn++ = BPF_EXIT_INSN(); 8551 8552 /* restore: */ 8553 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 8554 /* start: */ 8555 *insn++ = prog->insnsi[0]; 8556 8557 return insn - insn_buf; 8558 } 8559 8560 static int bpf_gen_ld_abs(const struct bpf_insn *orig, 8561 struct bpf_insn *insn_buf) 8562 { 8563 bool indirect = BPF_MODE(orig->code) == BPF_IND; 8564 struct bpf_insn *insn = insn_buf; 8565 8566 if (!indirect) { 8567 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); 8568 } else { 8569 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); 8570 if (orig->imm) 8571 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); 8572 } 8573 /* We're guaranteed here that CTX is in R6. */ 8574 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); 8575 8576 switch (BPF_SIZE(orig->code)) { 8577 case BPF_B: 8578 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); 8579 break; 8580 case BPF_H: 8581 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); 8582 break; 8583 case BPF_W: 8584 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); 8585 break; 8586 } 8587 8588 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); 8589 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); 8590 *insn++ = BPF_EXIT_INSN(); 8591 8592 return insn - insn_buf; 8593 } 8594 8595 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, 8596 const struct bpf_prog *prog) 8597 { 8598 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); 8599 } 8600 8601 static bool tc_cls_act_is_valid_access(int off, int size, 8602 enum bpf_access_type type, 8603 const struct bpf_prog *prog, 8604 struct bpf_insn_access_aux *info) 8605 { 8606 if (type == BPF_WRITE) { 8607 switch (off) { 8608 case bpf_ctx_range(struct __sk_buff, mark): 8609 case bpf_ctx_range(struct __sk_buff, tc_index): 8610 case bpf_ctx_range(struct __sk_buff, priority): 8611 case bpf_ctx_range(struct __sk_buff, tc_classid): 8612 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): 8613 case bpf_ctx_range(struct __sk_buff, tstamp): 8614 case bpf_ctx_range(struct __sk_buff, queue_mapping): 8615 break; 8616 default: 8617 return false; 8618 } 8619 } 8620 8621 switch (off) { 8622 case bpf_ctx_range(struct __sk_buff, data): 8623 info->reg_type = PTR_TO_PACKET; 8624 break; 8625 case bpf_ctx_range(struct __sk_buff, data_meta): 8626 info->reg_type = PTR_TO_PACKET_META; 8627 break; 8628 case bpf_ctx_range(struct __sk_buff, data_end): 8629 info->reg_type = PTR_TO_PACKET_END; 8630 break; 8631 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 8632 return false; 8633 case offsetof(struct __sk_buff, tstamp_type): 8634 /* The convert_ctx_access() on reading and writing 8635 * __sk_buff->tstamp depends on whether the bpf prog 8636 * has used __sk_buff->tstamp_type or not. 8637 * Thus, we need to set prog->tstamp_type_access 8638 * earlier during is_valid_access() here. 8639 */ 8640 ((struct bpf_prog *)prog)->tstamp_type_access = 1; 8641 return size == sizeof(__u8); 8642 } 8643 8644 return bpf_skb_is_valid_access(off, size, type, prog, info); 8645 } 8646 8647 DEFINE_MUTEX(nf_conn_btf_access_lock); 8648 EXPORT_SYMBOL_GPL(nf_conn_btf_access_lock); 8649 8650 int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf, 8651 const struct btf_type *t, int off, int size, 8652 enum bpf_access_type atype, u32 *next_btf_id, 8653 enum bpf_type_flag *flag); 8654 EXPORT_SYMBOL_GPL(nfct_btf_struct_access); 8655 8656 static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log, 8657 const struct btf *btf, 8658 const struct btf_type *t, int off, 8659 int size, enum bpf_access_type atype, 8660 u32 *next_btf_id, 8661 enum bpf_type_flag *flag) 8662 { 8663 int ret = -EACCES; 8664 8665 if (atype == BPF_READ) 8666 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, 8667 flag); 8668 8669 mutex_lock(&nf_conn_btf_access_lock); 8670 if (nfct_btf_struct_access) 8671 ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag); 8672 mutex_unlock(&nf_conn_btf_access_lock); 8673 8674 return ret; 8675 } 8676 8677 static bool __is_valid_xdp_access(int off, int size) 8678 { 8679 if (off < 0 || off >= sizeof(struct xdp_md)) 8680 return false; 8681 if (off % size != 0) 8682 return false; 8683 if (size != sizeof(__u32)) 8684 return false; 8685 8686 return true; 8687 } 8688 8689 static bool xdp_is_valid_access(int off, int size, 8690 enum bpf_access_type type, 8691 const struct bpf_prog *prog, 8692 struct bpf_insn_access_aux *info) 8693 { 8694 if (prog->expected_attach_type != BPF_XDP_DEVMAP) { 8695 switch (off) { 8696 case offsetof(struct xdp_md, egress_ifindex): 8697 return false; 8698 } 8699 } 8700 8701 if (type == BPF_WRITE) { 8702 if (bpf_prog_is_dev_bound(prog->aux)) { 8703 switch (off) { 8704 case offsetof(struct xdp_md, rx_queue_index): 8705 return __is_valid_xdp_access(off, size); 8706 } 8707 } 8708 return false; 8709 } 8710 8711 switch (off) { 8712 case offsetof(struct xdp_md, data): 8713 info->reg_type = PTR_TO_PACKET; 8714 break; 8715 case offsetof(struct xdp_md, data_meta): 8716 info->reg_type = PTR_TO_PACKET_META; 8717 break; 8718 case offsetof(struct xdp_md, data_end): 8719 info->reg_type = PTR_TO_PACKET_END; 8720 break; 8721 } 8722 8723 return __is_valid_xdp_access(off, size); 8724 } 8725 8726 void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act) 8727 { 8728 const u32 act_max = XDP_REDIRECT; 8729 8730 pr_warn_once("%s XDP return value %u on prog %s (id %d) dev %s, expect packet loss!\n", 8731 act > act_max ? "Illegal" : "Driver unsupported", 8732 act, prog->aux->name, prog->aux->id, dev ? dev->name : "N/A"); 8733 } 8734 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); 8735 8736 static int xdp_btf_struct_access(struct bpf_verifier_log *log, 8737 const struct btf *btf, 8738 const struct btf_type *t, int off, 8739 int size, enum bpf_access_type atype, 8740 u32 *next_btf_id, 8741 enum bpf_type_flag *flag) 8742 { 8743 int ret = -EACCES; 8744 8745 if (atype == BPF_READ) 8746 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, 8747 flag); 8748 8749 mutex_lock(&nf_conn_btf_access_lock); 8750 if (nfct_btf_struct_access) 8751 ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag); 8752 mutex_unlock(&nf_conn_btf_access_lock); 8753 8754 return ret; 8755 } 8756 8757 static bool sock_addr_is_valid_access(int off, int size, 8758 enum bpf_access_type type, 8759 const struct bpf_prog *prog, 8760 struct bpf_insn_access_aux *info) 8761 { 8762 const int size_default = sizeof(__u32); 8763 8764 if (off < 0 || off >= sizeof(struct bpf_sock_addr)) 8765 return false; 8766 if (off % size != 0) 8767 return false; 8768 8769 /* Disallow access to IPv6 fields from IPv4 contex and vise 8770 * versa. 8771 */ 8772 switch (off) { 8773 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8774 switch (prog->expected_attach_type) { 8775 case BPF_CGROUP_INET4_BIND: 8776 case BPF_CGROUP_INET4_CONNECT: 8777 case BPF_CGROUP_INET4_GETPEERNAME: 8778 case BPF_CGROUP_INET4_GETSOCKNAME: 8779 case BPF_CGROUP_UDP4_SENDMSG: 8780 case BPF_CGROUP_UDP4_RECVMSG: 8781 break; 8782 default: 8783 return false; 8784 } 8785 break; 8786 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8787 switch (prog->expected_attach_type) { 8788 case BPF_CGROUP_INET6_BIND: 8789 case BPF_CGROUP_INET6_CONNECT: 8790 case BPF_CGROUP_INET6_GETPEERNAME: 8791 case BPF_CGROUP_INET6_GETSOCKNAME: 8792 case BPF_CGROUP_UDP6_SENDMSG: 8793 case BPF_CGROUP_UDP6_RECVMSG: 8794 break; 8795 default: 8796 return false; 8797 } 8798 break; 8799 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8800 switch (prog->expected_attach_type) { 8801 case BPF_CGROUP_UDP4_SENDMSG: 8802 break; 8803 default: 8804 return false; 8805 } 8806 break; 8807 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8808 msg_src_ip6[3]): 8809 switch (prog->expected_attach_type) { 8810 case BPF_CGROUP_UDP6_SENDMSG: 8811 break; 8812 default: 8813 return false; 8814 } 8815 break; 8816 } 8817 8818 switch (off) { 8819 case bpf_ctx_range(struct bpf_sock_addr, user_ip4): 8820 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 8821 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): 8822 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 8823 msg_src_ip6[3]): 8824 case bpf_ctx_range(struct bpf_sock_addr, user_port): 8825 if (type == BPF_READ) { 8826 bpf_ctx_record_field_size(info, size_default); 8827 8828 if (bpf_ctx_wide_access_ok(off, size, 8829 struct bpf_sock_addr, 8830 user_ip6)) 8831 return true; 8832 8833 if (bpf_ctx_wide_access_ok(off, size, 8834 struct bpf_sock_addr, 8835 msg_src_ip6)) 8836 return true; 8837 8838 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 8839 return false; 8840 } else { 8841 if (bpf_ctx_wide_access_ok(off, size, 8842 struct bpf_sock_addr, 8843 user_ip6)) 8844 return true; 8845 8846 if (bpf_ctx_wide_access_ok(off, size, 8847 struct bpf_sock_addr, 8848 msg_src_ip6)) 8849 return true; 8850 8851 if (size != size_default) 8852 return false; 8853 } 8854 break; 8855 case offsetof(struct bpf_sock_addr, sk): 8856 if (type != BPF_READ) 8857 return false; 8858 if (size != sizeof(__u64)) 8859 return false; 8860 info->reg_type = PTR_TO_SOCKET; 8861 break; 8862 default: 8863 if (type == BPF_READ) { 8864 if (size != size_default) 8865 return false; 8866 } else { 8867 return false; 8868 } 8869 } 8870 8871 return true; 8872 } 8873 8874 static bool sock_ops_is_valid_access(int off, int size, 8875 enum bpf_access_type type, 8876 const struct bpf_prog *prog, 8877 struct bpf_insn_access_aux *info) 8878 { 8879 const int size_default = sizeof(__u32); 8880 8881 if (off < 0 || off >= sizeof(struct bpf_sock_ops)) 8882 return false; 8883 8884 /* The verifier guarantees that size > 0. */ 8885 if (off % size != 0) 8886 return false; 8887 8888 if (type == BPF_WRITE) { 8889 switch (off) { 8890 case offsetof(struct bpf_sock_ops, reply): 8891 case offsetof(struct bpf_sock_ops, sk_txhash): 8892 if (size != size_default) 8893 return false; 8894 break; 8895 default: 8896 return false; 8897 } 8898 } else { 8899 switch (off) { 8900 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, 8901 bytes_acked): 8902 if (size != sizeof(__u64)) 8903 return false; 8904 break; 8905 case offsetof(struct bpf_sock_ops, sk): 8906 if (size != sizeof(__u64)) 8907 return false; 8908 info->reg_type = PTR_TO_SOCKET_OR_NULL; 8909 break; 8910 case offsetof(struct bpf_sock_ops, skb_data): 8911 if (size != sizeof(__u64)) 8912 return false; 8913 info->reg_type = PTR_TO_PACKET; 8914 break; 8915 case offsetof(struct bpf_sock_ops, skb_data_end): 8916 if (size != sizeof(__u64)) 8917 return false; 8918 info->reg_type = PTR_TO_PACKET_END; 8919 break; 8920 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 8921 bpf_ctx_record_field_size(info, size_default); 8922 return bpf_ctx_narrow_access_ok(off, size, 8923 size_default); 8924 default: 8925 if (size != size_default) 8926 return false; 8927 break; 8928 } 8929 } 8930 8931 return true; 8932 } 8933 8934 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, 8935 const struct bpf_prog *prog) 8936 { 8937 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); 8938 } 8939 8940 static bool sk_skb_is_valid_access(int off, int size, 8941 enum bpf_access_type type, 8942 const struct bpf_prog *prog, 8943 struct bpf_insn_access_aux *info) 8944 { 8945 switch (off) { 8946 case bpf_ctx_range(struct __sk_buff, tc_classid): 8947 case bpf_ctx_range(struct __sk_buff, data_meta): 8948 case bpf_ctx_range(struct __sk_buff, tstamp): 8949 case bpf_ctx_range(struct __sk_buff, wire_len): 8950 case bpf_ctx_range(struct __sk_buff, hwtstamp): 8951 return false; 8952 } 8953 8954 if (type == BPF_WRITE) { 8955 switch (off) { 8956 case bpf_ctx_range(struct __sk_buff, tc_index): 8957 case bpf_ctx_range(struct __sk_buff, priority): 8958 break; 8959 default: 8960 return false; 8961 } 8962 } 8963 8964 switch (off) { 8965 case bpf_ctx_range(struct __sk_buff, mark): 8966 return false; 8967 case bpf_ctx_range(struct __sk_buff, data): 8968 info->reg_type = PTR_TO_PACKET; 8969 break; 8970 case bpf_ctx_range(struct __sk_buff, data_end): 8971 info->reg_type = PTR_TO_PACKET_END; 8972 break; 8973 } 8974 8975 return bpf_skb_is_valid_access(off, size, type, prog, info); 8976 } 8977 8978 static bool sk_msg_is_valid_access(int off, int size, 8979 enum bpf_access_type type, 8980 const struct bpf_prog *prog, 8981 struct bpf_insn_access_aux *info) 8982 { 8983 if (type == BPF_WRITE) 8984 return false; 8985 8986 if (off % size != 0) 8987 return false; 8988 8989 switch (off) { 8990 case offsetof(struct sk_msg_md, data): 8991 info->reg_type = PTR_TO_PACKET; 8992 if (size != sizeof(__u64)) 8993 return false; 8994 break; 8995 case offsetof(struct sk_msg_md, data_end): 8996 info->reg_type = PTR_TO_PACKET_END; 8997 if (size != sizeof(__u64)) 8998 return false; 8999 break; 9000 case offsetof(struct sk_msg_md, sk): 9001 if (size != sizeof(__u64)) 9002 return false; 9003 info->reg_type = PTR_TO_SOCKET; 9004 break; 9005 case bpf_ctx_range(struct sk_msg_md, family): 9006 case bpf_ctx_range(struct sk_msg_md, remote_ip4): 9007 case bpf_ctx_range(struct sk_msg_md, local_ip4): 9008 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]): 9009 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]): 9010 case bpf_ctx_range(struct sk_msg_md, remote_port): 9011 case bpf_ctx_range(struct sk_msg_md, local_port): 9012 case bpf_ctx_range(struct sk_msg_md, size): 9013 if (size != sizeof(__u32)) 9014 return false; 9015 break; 9016 default: 9017 return false; 9018 } 9019 return true; 9020 } 9021 9022 static bool flow_dissector_is_valid_access(int off, int size, 9023 enum bpf_access_type type, 9024 const struct bpf_prog *prog, 9025 struct bpf_insn_access_aux *info) 9026 { 9027 const int size_default = sizeof(__u32); 9028 9029 if (off < 0 || off >= sizeof(struct __sk_buff)) 9030 return false; 9031 9032 if (type == BPF_WRITE) 9033 return false; 9034 9035 switch (off) { 9036 case bpf_ctx_range(struct __sk_buff, data): 9037 if (size != size_default) 9038 return false; 9039 info->reg_type = PTR_TO_PACKET; 9040 return true; 9041 case bpf_ctx_range(struct __sk_buff, data_end): 9042 if (size != size_default) 9043 return false; 9044 info->reg_type = PTR_TO_PACKET_END; 9045 return true; 9046 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 9047 if (size != sizeof(__u64)) 9048 return false; 9049 info->reg_type = PTR_TO_FLOW_KEYS; 9050 return true; 9051 default: 9052 return false; 9053 } 9054 } 9055 9056 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, 9057 const struct bpf_insn *si, 9058 struct bpf_insn *insn_buf, 9059 struct bpf_prog *prog, 9060 u32 *target_size) 9061 9062 { 9063 struct bpf_insn *insn = insn_buf; 9064 9065 switch (si->off) { 9066 case offsetof(struct __sk_buff, data): 9067 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data), 9068 si->dst_reg, si->src_reg, 9069 offsetof(struct bpf_flow_dissector, data)); 9070 break; 9071 9072 case offsetof(struct __sk_buff, data_end): 9073 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end), 9074 si->dst_reg, si->src_reg, 9075 offsetof(struct bpf_flow_dissector, data_end)); 9076 break; 9077 9078 case offsetof(struct __sk_buff, flow_keys): 9079 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys), 9080 si->dst_reg, si->src_reg, 9081 offsetof(struct bpf_flow_dissector, flow_keys)); 9082 break; 9083 } 9084 9085 return insn - insn_buf; 9086 } 9087 9088 static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, 9089 struct bpf_insn *insn) 9090 { 9091 __u8 value_reg = si->dst_reg; 9092 __u8 skb_reg = si->src_reg; 9093 /* AX is needed because src_reg and dst_reg could be the same */ 9094 __u8 tmp_reg = BPF_REG_AX; 9095 9096 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, 9097 PKT_VLAN_PRESENT_OFFSET); 9098 *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, 9099 SKB_MONO_DELIVERY_TIME_MASK, 2); 9100 *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); 9101 *insn++ = BPF_JMP_A(1); 9102 *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); 9103 9104 return insn; 9105 } 9106 9107 static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, 9108 struct bpf_insn *insn) 9109 { 9110 /* si->dst_reg = skb_shinfo(SKB); */ 9111 #ifdef NET_SKBUFF_DATA_USES_OFFSET 9112 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 9113 BPF_REG_AX, si->src_reg, 9114 offsetof(struct sk_buff, end)); 9115 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), 9116 si->dst_reg, si->src_reg, 9117 offsetof(struct sk_buff, head)); 9118 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); 9119 #else 9120 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), 9121 si->dst_reg, si->src_reg, 9122 offsetof(struct sk_buff, end)); 9123 #endif 9124 9125 return insn; 9126 } 9127 9128 static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, 9129 const struct bpf_insn *si, 9130 struct bpf_insn *insn) 9131 { 9132 __u8 value_reg = si->dst_reg; 9133 __u8 skb_reg = si->src_reg; 9134 9135 #ifdef CONFIG_NET_CLS_ACT 9136 /* If the tstamp_type is read, 9137 * the bpf prog is aware the tstamp could have delivery time. 9138 * Thus, read skb->tstamp as is if tstamp_type_access is true. 9139 */ 9140 if (!prog->tstamp_type_access) { 9141 /* AX is needed because src_reg and dst_reg could be the same */ 9142 __u8 tmp_reg = BPF_REG_AX; 9143 9144 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); 9145 *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, 9146 TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); 9147 *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, 9148 TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); 9149 /* skb->tc_at_ingress && skb->mono_delivery_time, 9150 * read 0 as the (rcv) timestamp. 9151 */ 9152 *insn++ = BPF_MOV64_IMM(value_reg, 0); 9153 *insn++ = BPF_JMP_A(1); 9154 } 9155 #endif 9156 9157 *insn++ = BPF_LDX_MEM(BPF_DW, value_reg, skb_reg, 9158 offsetof(struct sk_buff, tstamp)); 9159 return insn; 9160 } 9161 9162 static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, 9163 const struct bpf_insn *si, 9164 struct bpf_insn *insn) 9165 { 9166 __u8 value_reg = si->src_reg; 9167 __u8 skb_reg = si->dst_reg; 9168 9169 #ifdef CONFIG_NET_CLS_ACT 9170 /* If the tstamp_type is read, 9171 * the bpf prog is aware the tstamp could have delivery time. 9172 * Thus, write skb->tstamp as is if tstamp_type_access is true. 9173 * Otherwise, writing at ingress will have to clear the 9174 * mono_delivery_time bit also. 9175 */ 9176 if (!prog->tstamp_type_access) { 9177 __u8 tmp_reg = BPF_REG_AX; 9178 9179 *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); 9180 /* Writing __sk_buff->tstamp as ingress, goto <clear> */ 9181 *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); 9182 /* goto <store> */ 9183 *insn++ = BPF_JMP_A(2); 9184 /* <clear>: mono_delivery_time */ 9185 *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); 9186 *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, PKT_VLAN_PRESENT_OFFSET); 9187 } 9188 #endif 9189 9190 /* <store>: skb->tstamp = tstamp */ 9191 *insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg, 9192 offsetof(struct sk_buff, tstamp)); 9193 return insn; 9194 } 9195 9196 static u32 bpf_convert_ctx_access(enum bpf_access_type type, 9197 const struct bpf_insn *si, 9198 struct bpf_insn *insn_buf, 9199 struct bpf_prog *prog, u32 *target_size) 9200 { 9201 struct bpf_insn *insn = insn_buf; 9202 int off; 9203 9204 switch (si->off) { 9205 case offsetof(struct __sk_buff, len): 9206 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9207 bpf_target_off(struct sk_buff, len, 4, 9208 target_size)); 9209 break; 9210 9211 case offsetof(struct __sk_buff, protocol): 9212 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9213 bpf_target_off(struct sk_buff, protocol, 2, 9214 target_size)); 9215 break; 9216 9217 case offsetof(struct __sk_buff, vlan_proto): 9218 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9219 bpf_target_off(struct sk_buff, vlan_proto, 2, 9220 target_size)); 9221 break; 9222 9223 case offsetof(struct __sk_buff, priority): 9224 if (type == BPF_WRITE) 9225 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9226 bpf_target_off(struct sk_buff, priority, 4, 9227 target_size)); 9228 else 9229 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9230 bpf_target_off(struct sk_buff, priority, 4, 9231 target_size)); 9232 break; 9233 9234 case offsetof(struct __sk_buff, ingress_ifindex): 9235 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9236 bpf_target_off(struct sk_buff, skb_iif, 4, 9237 target_size)); 9238 break; 9239 9240 case offsetof(struct __sk_buff, ifindex): 9241 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 9242 si->dst_reg, si->src_reg, 9243 offsetof(struct sk_buff, dev)); 9244 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 9245 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9246 bpf_target_off(struct net_device, ifindex, 4, 9247 target_size)); 9248 break; 9249 9250 case offsetof(struct __sk_buff, hash): 9251 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9252 bpf_target_off(struct sk_buff, hash, 4, 9253 target_size)); 9254 break; 9255 9256 case offsetof(struct __sk_buff, mark): 9257 if (type == BPF_WRITE) 9258 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9259 bpf_target_off(struct sk_buff, mark, 4, 9260 target_size)); 9261 else 9262 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9263 bpf_target_off(struct sk_buff, mark, 4, 9264 target_size)); 9265 break; 9266 9267 case offsetof(struct __sk_buff, pkt_type): 9268 *target_size = 1; 9269 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 9270 PKT_TYPE_OFFSET); 9271 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); 9272 #ifdef __BIG_ENDIAN_BITFIELD 9273 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); 9274 #endif 9275 break; 9276 9277 case offsetof(struct __sk_buff, queue_mapping): 9278 if (type == BPF_WRITE) { 9279 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); 9280 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 9281 bpf_target_off(struct sk_buff, 9282 queue_mapping, 9283 2, target_size)); 9284 } else { 9285 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9286 bpf_target_off(struct sk_buff, 9287 queue_mapping, 9288 2, target_size)); 9289 } 9290 break; 9291 9292 case offsetof(struct __sk_buff, vlan_present): 9293 *target_size = 1; 9294 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, 9295 PKT_VLAN_PRESENT_OFFSET); 9296 if (PKT_VLAN_PRESENT_BIT) 9297 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); 9298 if (PKT_VLAN_PRESENT_BIT < 7) 9299 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); 9300 break; 9301 9302 case offsetof(struct __sk_buff, vlan_tci): 9303 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9304 bpf_target_off(struct sk_buff, vlan_tci, 2, 9305 target_size)); 9306 break; 9307 9308 case offsetof(struct __sk_buff, cb[0]) ... 9309 offsetofend(struct __sk_buff, cb[4]) - 1: 9310 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20); 9311 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 9312 offsetof(struct qdisc_skb_cb, data)) % 9313 sizeof(__u64)); 9314 9315 prog->cb_access = 1; 9316 off = si->off; 9317 off -= offsetof(struct __sk_buff, cb[0]); 9318 off += offsetof(struct sk_buff, cb); 9319 off += offsetof(struct qdisc_skb_cb, data); 9320 if (type == BPF_WRITE) 9321 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, 9322 si->src_reg, off); 9323 else 9324 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, 9325 si->src_reg, off); 9326 break; 9327 9328 case offsetof(struct __sk_buff, tc_classid): 9329 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2); 9330 9331 off = si->off; 9332 off -= offsetof(struct __sk_buff, tc_classid); 9333 off += offsetof(struct sk_buff, cb); 9334 off += offsetof(struct qdisc_skb_cb, tc_classid); 9335 *target_size = 2; 9336 if (type == BPF_WRITE) 9337 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, 9338 si->src_reg, off); 9339 else 9340 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, 9341 si->src_reg, off); 9342 break; 9343 9344 case offsetof(struct __sk_buff, data): 9345 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 9346 si->dst_reg, si->src_reg, 9347 offsetof(struct sk_buff, data)); 9348 break; 9349 9350 case offsetof(struct __sk_buff, data_meta): 9351 off = si->off; 9352 off -= offsetof(struct __sk_buff, data_meta); 9353 off += offsetof(struct sk_buff, cb); 9354 off += offsetof(struct bpf_skb_data_end, data_meta); 9355 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 9356 si->src_reg, off); 9357 break; 9358 9359 case offsetof(struct __sk_buff, data_end): 9360 off = si->off; 9361 off -= offsetof(struct __sk_buff, data_end); 9362 off += offsetof(struct sk_buff, cb); 9363 off += offsetof(struct bpf_skb_data_end, data_end); 9364 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, 9365 si->src_reg, off); 9366 break; 9367 9368 case offsetof(struct __sk_buff, tc_index): 9369 #ifdef CONFIG_NET_SCHED 9370 if (type == BPF_WRITE) 9371 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, 9372 bpf_target_off(struct sk_buff, tc_index, 2, 9373 target_size)); 9374 else 9375 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 9376 bpf_target_off(struct sk_buff, tc_index, 2, 9377 target_size)); 9378 #else 9379 *target_size = 2; 9380 if (type == BPF_WRITE) 9381 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); 9382 else 9383 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9384 #endif 9385 break; 9386 9387 case offsetof(struct __sk_buff, napi_id): 9388 #if defined(CONFIG_NET_RX_BUSY_POLL) 9389 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9390 bpf_target_off(struct sk_buff, napi_id, 4, 9391 target_size)); 9392 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); 9393 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9394 #else 9395 *target_size = 4; 9396 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); 9397 #endif 9398 break; 9399 case offsetof(struct __sk_buff, family): 9400 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 9401 9402 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9403 si->dst_reg, si->src_reg, 9404 offsetof(struct sk_buff, sk)); 9405 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9406 bpf_target_off(struct sock_common, 9407 skc_family, 9408 2, target_size)); 9409 break; 9410 case offsetof(struct __sk_buff, remote_ip4): 9411 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 9412 9413 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9414 si->dst_reg, si->src_reg, 9415 offsetof(struct sk_buff, sk)); 9416 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9417 bpf_target_off(struct sock_common, 9418 skc_daddr, 9419 4, target_size)); 9420 break; 9421 case offsetof(struct __sk_buff, local_ip4): 9422 BUILD_BUG_ON(sizeof_field(struct sock_common, 9423 skc_rcv_saddr) != 4); 9424 9425 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9426 si->dst_reg, si->src_reg, 9427 offsetof(struct sk_buff, sk)); 9428 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9429 bpf_target_off(struct sock_common, 9430 skc_rcv_saddr, 9431 4, target_size)); 9432 break; 9433 case offsetof(struct __sk_buff, remote_ip6[0]) ... 9434 offsetof(struct __sk_buff, remote_ip6[3]): 9435 #if IS_ENABLED(CONFIG_IPV6) 9436 BUILD_BUG_ON(sizeof_field(struct sock_common, 9437 skc_v6_daddr.s6_addr32[0]) != 4); 9438 9439 off = si->off; 9440 off -= offsetof(struct __sk_buff, remote_ip6[0]); 9441 9442 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9443 si->dst_reg, si->src_reg, 9444 offsetof(struct sk_buff, sk)); 9445 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9446 offsetof(struct sock_common, 9447 skc_v6_daddr.s6_addr32[0]) + 9448 off); 9449 #else 9450 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9451 #endif 9452 break; 9453 case offsetof(struct __sk_buff, local_ip6[0]) ... 9454 offsetof(struct __sk_buff, local_ip6[3]): 9455 #if IS_ENABLED(CONFIG_IPV6) 9456 BUILD_BUG_ON(sizeof_field(struct sock_common, 9457 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 9458 9459 off = si->off; 9460 off -= offsetof(struct __sk_buff, local_ip6[0]); 9461 9462 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9463 si->dst_reg, si->src_reg, 9464 offsetof(struct sk_buff, sk)); 9465 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9466 offsetof(struct sock_common, 9467 skc_v6_rcv_saddr.s6_addr32[0]) + 9468 off); 9469 #else 9470 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9471 #endif 9472 break; 9473 9474 case offsetof(struct __sk_buff, remote_port): 9475 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 9476 9477 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9478 si->dst_reg, si->src_reg, 9479 offsetof(struct sk_buff, sk)); 9480 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9481 bpf_target_off(struct sock_common, 9482 skc_dport, 9483 2, target_size)); 9484 #ifndef __BIG_ENDIAN_BITFIELD 9485 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 9486 #endif 9487 break; 9488 9489 case offsetof(struct __sk_buff, local_port): 9490 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 9491 9492 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9493 si->dst_reg, si->src_reg, 9494 offsetof(struct sk_buff, sk)); 9495 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 9496 bpf_target_off(struct sock_common, 9497 skc_num, 2, target_size)); 9498 break; 9499 9500 case offsetof(struct __sk_buff, tstamp): 9501 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); 9502 9503 if (type == BPF_WRITE) 9504 insn = bpf_convert_tstamp_write(prog, si, insn); 9505 else 9506 insn = bpf_convert_tstamp_read(prog, si, insn); 9507 break; 9508 9509 case offsetof(struct __sk_buff, tstamp_type): 9510 insn = bpf_convert_tstamp_type_read(si, insn); 9511 break; 9512 9513 case offsetof(struct __sk_buff, gso_segs): 9514 insn = bpf_convert_shinfo_access(si, insn); 9515 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), 9516 si->dst_reg, si->dst_reg, 9517 bpf_target_off(struct skb_shared_info, 9518 gso_segs, 2, 9519 target_size)); 9520 break; 9521 case offsetof(struct __sk_buff, gso_size): 9522 insn = bpf_convert_shinfo_access(si, insn); 9523 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size), 9524 si->dst_reg, si->dst_reg, 9525 bpf_target_off(struct skb_shared_info, 9526 gso_size, 2, 9527 target_size)); 9528 break; 9529 case offsetof(struct __sk_buff, wire_len): 9530 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4); 9531 9532 off = si->off; 9533 off -= offsetof(struct __sk_buff, wire_len); 9534 off += offsetof(struct sk_buff, cb); 9535 off += offsetof(struct qdisc_skb_cb, pkt_len); 9536 *target_size = 4; 9537 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); 9538 break; 9539 9540 case offsetof(struct __sk_buff, sk): 9541 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 9542 si->dst_reg, si->src_reg, 9543 offsetof(struct sk_buff, sk)); 9544 break; 9545 case offsetof(struct __sk_buff, hwtstamp): 9546 BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8); 9547 BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0); 9548 9549 insn = bpf_convert_shinfo_access(si, insn); 9550 *insn++ = BPF_LDX_MEM(BPF_DW, 9551 si->dst_reg, si->dst_reg, 9552 bpf_target_off(struct skb_shared_info, 9553 hwtstamps, 8, 9554 target_size)); 9555 break; 9556 } 9557 9558 return insn - insn_buf; 9559 } 9560 9561 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 9562 const struct bpf_insn *si, 9563 struct bpf_insn *insn_buf, 9564 struct bpf_prog *prog, u32 *target_size) 9565 { 9566 struct bpf_insn *insn = insn_buf; 9567 int off; 9568 9569 switch (si->off) { 9570 case offsetof(struct bpf_sock, bound_dev_if): 9571 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); 9572 9573 if (type == BPF_WRITE) 9574 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9575 offsetof(struct sock, sk_bound_dev_if)); 9576 else 9577 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9578 offsetof(struct sock, sk_bound_dev_if)); 9579 break; 9580 9581 case offsetof(struct bpf_sock, mark): 9582 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); 9583 9584 if (type == BPF_WRITE) 9585 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9586 offsetof(struct sock, sk_mark)); 9587 else 9588 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9589 offsetof(struct sock, sk_mark)); 9590 break; 9591 9592 case offsetof(struct bpf_sock, priority): 9593 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); 9594 9595 if (type == BPF_WRITE) 9596 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 9597 offsetof(struct sock, sk_priority)); 9598 else 9599 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 9600 offsetof(struct sock, sk_priority)); 9601 break; 9602 9603 case offsetof(struct bpf_sock, family): 9604 *insn++ = BPF_LDX_MEM( 9605 BPF_FIELD_SIZEOF(struct sock_common, skc_family), 9606 si->dst_reg, si->src_reg, 9607 bpf_target_off(struct sock_common, 9608 skc_family, 9609 sizeof_field(struct sock_common, 9610 skc_family), 9611 target_size)); 9612 break; 9613 9614 case offsetof(struct bpf_sock, type): 9615 *insn++ = BPF_LDX_MEM( 9616 BPF_FIELD_SIZEOF(struct sock, sk_type), 9617 si->dst_reg, si->src_reg, 9618 bpf_target_off(struct sock, sk_type, 9619 sizeof_field(struct sock, sk_type), 9620 target_size)); 9621 break; 9622 9623 case offsetof(struct bpf_sock, protocol): 9624 *insn++ = BPF_LDX_MEM( 9625 BPF_FIELD_SIZEOF(struct sock, sk_protocol), 9626 si->dst_reg, si->src_reg, 9627 bpf_target_off(struct sock, sk_protocol, 9628 sizeof_field(struct sock, sk_protocol), 9629 target_size)); 9630 break; 9631 9632 case offsetof(struct bpf_sock, src_ip4): 9633 *insn++ = BPF_LDX_MEM( 9634 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9635 bpf_target_off(struct sock_common, skc_rcv_saddr, 9636 sizeof_field(struct sock_common, 9637 skc_rcv_saddr), 9638 target_size)); 9639 break; 9640 9641 case offsetof(struct bpf_sock, dst_ip4): 9642 *insn++ = BPF_LDX_MEM( 9643 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9644 bpf_target_off(struct sock_common, skc_daddr, 9645 sizeof_field(struct sock_common, 9646 skc_daddr), 9647 target_size)); 9648 break; 9649 9650 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): 9651 #if IS_ENABLED(CONFIG_IPV6) 9652 off = si->off; 9653 off -= offsetof(struct bpf_sock, src_ip6[0]); 9654 *insn++ = BPF_LDX_MEM( 9655 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9656 bpf_target_off( 9657 struct sock_common, 9658 skc_v6_rcv_saddr.s6_addr32[0], 9659 sizeof_field(struct sock_common, 9660 skc_v6_rcv_saddr.s6_addr32[0]), 9661 target_size) + off); 9662 #else 9663 (void)off; 9664 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9665 #endif 9666 break; 9667 9668 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): 9669 #if IS_ENABLED(CONFIG_IPV6) 9670 off = si->off; 9671 off -= offsetof(struct bpf_sock, dst_ip6[0]); 9672 *insn++ = BPF_LDX_MEM( 9673 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 9674 bpf_target_off(struct sock_common, 9675 skc_v6_daddr.s6_addr32[0], 9676 sizeof_field(struct sock_common, 9677 skc_v6_daddr.s6_addr32[0]), 9678 target_size) + off); 9679 #else 9680 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 9681 *target_size = 4; 9682 #endif 9683 break; 9684 9685 case offsetof(struct bpf_sock, src_port): 9686 *insn++ = BPF_LDX_MEM( 9687 BPF_FIELD_SIZEOF(struct sock_common, skc_num), 9688 si->dst_reg, si->src_reg, 9689 bpf_target_off(struct sock_common, skc_num, 9690 sizeof_field(struct sock_common, 9691 skc_num), 9692 target_size)); 9693 break; 9694 9695 case offsetof(struct bpf_sock, dst_port): 9696 *insn++ = BPF_LDX_MEM( 9697 BPF_FIELD_SIZEOF(struct sock_common, skc_dport), 9698 si->dst_reg, si->src_reg, 9699 bpf_target_off(struct sock_common, skc_dport, 9700 sizeof_field(struct sock_common, 9701 skc_dport), 9702 target_size)); 9703 break; 9704 9705 case offsetof(struct bpf_sock, state): 9706 *insn++ = BPF_LDX_MEM( 9707 BPF_FIELD_SIZEOF(struct sock_common, skc_state), 9708 si->dst_reg, si->src_reg, 9709 bpf_target_off(struct sock_common, skc_state, 9710 sizeof_field(struct sock_common, 9711 skc_state), 9712 target_size)); 9713 break; 9714 case offsetof(struct bpf_sock, rx_queue_mapping): 9715 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING 9716 *insn++ = BPF_LDX_MEM( 9717 BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping), 9718 si->dst_reg, si->src_reg, 9719 bpf_target_off(struct sock, sk_rx_queue_mapping, 9720 sizeof_field(struct sock, 9721 sk_rx_queue_mapping), 9722 target_size)); 9723 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING, 9724 1); 9725 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 9726 #else 9727 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1); 9728 *target_size = 2; 9729 #endif 9730 break; 9731 } 9732 9733 return insn - insn_buf; 9734 } 9735 9736 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, 9737 const struct bpf_insn *si, 9738 struct bpf_insn *insn_buf, 9739 struct bpf_prog *prog, u32 *target_size) 9740 { 9741 struct bpf_insn *insn = insn_buf; 9742 9743 switch (si->off) { 9744 case offsetof(struct __sk_buff, ifindex): 9745 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 9746 si->dst_reg, si->src_reg, 9747 offsetof(struct sk_buff, dev)); 9748 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9749 bpf_target_off(struct net_device, ifindex, 4, 9750 target_size)); 9751 break; 9752 default: 9753 return bpf_convert_ctx_access(type, si, insn_buf, prog, 9754 target_size); 9755 } 9756 9757 return insn - insn_buf; 9758 } 9759 9760 static u32 xdp_convert_ctx_access(enum bpf_access_type type, 9761 const struct bpf_insn *si, 9762 struct bpf_insn *insn_buf, 9763 struct bpf_prog *prog, u32 *target_size) 9764 { 9765 struct bpf_insn *insn = insn_buf; 9766 9767 switch (si->off) { 9768 case offsetof(struct xdp_md, data): 9769 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), 9770 si->dst_reg, si->src_reg, 9771 offsetof(struct xdp_buff, data)); 9772 break; 9773 case offsetof(struct xdp_md, data_meta): 9774 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), 9775 si->dst_reg, si->src_reg, 9776 offsetof(struct xdp_buff, data_meta)); 9777 break; 9778 case offsetof(struct xdp_md, data_end): 9779 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), 9780 si->dst_reg, si->src_reg, 9781 offsetof(struct xdp_buff, data_end)); 9782 break; 9783 case offsetof(struct xdp_md, ingress_ifindex): 9784 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9785 si->dst_reg, si->src_reg, 9786 offsetof(struct xdp_buff, rxq)); 9787 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), 9788 si->dst_reg, si->dst_reg, 9789 offsetof(struct xdp_rxq_info, dev)); 9790 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9791 offsetof(struct net_device, ifindex)); 9792 break; 9793 case offsetof(struct xdp_md, rx_queue_index): 9794 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), 9795 si->dst_reg, si->src_reg, 9796 offsetof(struct xdp_buff, rxq)); 9797 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9798 offsetof(struct xdp_rxq_info, 9799 queue_index)); 9800 break; 9801 case offsetof(struct xdp_md, egress_ifindex): 9802 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq), 9803 si->dst_reg, si->src_reg, 9804 offsetof(struct xdp_buff, txq)); 9805 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev), 9806 si->dst_reg, si->dst_reg, 9807 offsetof(struct xdp_txq_info, dev)); 9808 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 9809 offsetof(struct net_device, ifindex)); 9810 break; 9811 } 9812 9813 return insn - insn_buf; 9814 } 9815 9816 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of 9817 * context Structure, F is Field in context structure that contains a pointer 9818 * to Nested Structure of type NS that has the field NF. 9819 * 9820 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make 9821 * sure that SIZE is not greater than actual size of S.F.NF. 9822 * 9823 * If offset OFF is provided, the load happens from that offset relative to 9824 * offset of NF. 9825 */ 9826 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ 9827 do { \ 9828 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ 9829 si->src_reg, offsetof(S, F)); \ 9830 *insn++ = BPF_LDX_MEM( \ 9831 SIZE, si->dst_reg, si->dst_reg, \ 9832 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9833 target_size) \ 9834 + OFF); \ 9835 } while (0) 9836 9837 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ 9838 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ 9839 BPF_FIELD_SIZEOF(NS, NF), 0) 9840 9841 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to 9842 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. 9843 * 9844 * In addition it uses Temporary Field TF (member of struct S) as the 3rd 9845 * "register" since two registers available in convert_ctx_access are not 9846 * enough: we can't override neither SRC, since it contains value to store, nor 9847 * DST since it contains pointer to context that may be used by later 9848 * instructions. But we need a temporary place to save pointer to nested 9849 * structure whose field we want to store to. 9850 */ 9851 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \ 9852 do { \ 9853 int tmp_reg = BPF_REG_9; \ 9854 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9855 --tmp_reg; \ 9856 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ 9857 --tmp_reg; \ 9858 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ 9859 offsetof(S, TF)); \ 9860 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ 9861 si->dst_reg, offsetof(S, F)); \ 9862 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ 9863 bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 9864 target_size) \ 9865 + OFF); \ 9866 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ 9867 offsetof(S, TF)); \ 9868 } while (0) 9869 9870 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ 9871 TF) \ 9872 do { \ 9873 if (type == BPF_WRITE) { \ 9874 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \ 9875 OFF, TF); \ 9876 } else { \ 9877 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ 9878 S, NS, F, NF, SIZE, OFF); \ 9879 } \ 9880 } while (0) 9881 9882 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ 9883 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ 9884 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) 9885 9886 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, 9887 const struct bpf_insn *si, 9888 struct bpf_insn *insn_buf, 9889 struct bpf_prog *prog, u32 *target_size) 9890 { 9891 int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port); 9892 struct bpf_insn *insn = insn_buf; 9893 9894 switch (si->off) { 9895 case offsetof(struct bpf_sock_addr, user_family): 9896 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9897 struct sockaddr, uaddr, sa_family); 9898 break; 9899 9900 case offsetof(struct bpf_sock_addr, user_ip4): 9901 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9902 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, 9903 sin_addr, BPF_SIZE(si->code), 0, tmp_reg); 9904 break; 9905 9906 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): 9907 off = si->off; 9908 off -= offsetof(struct bpf_sock_addr, user_ip6[0]); 9909 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9910 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9911 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, 9912 tmp_reg); 9913 break; 9914 9915 case offsetof(struct bpf_sock_addr, user_port): 9916 /* To get port we need to know sa_family first and then treat 9917 * sockaddr as either sockaddr_in or sockaddr_in6. 9918 * Though we can simplify since port field has same offset and 9919 * size in both structures. 9920 * Here we check this invariant and use just one of the 9921 * structures if it's true. 9922 */ 9923 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != 9924 offsetof(struct sockaddr_in6, sin6_port)); 9925 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) != 9926 sizeof_field(struct sockaddr_in6, sin6_port)); 9927 /* Account for sin6_port being smaller than user_port. */ 9928 port_size = min(port_size, BPF_LDST_BYTES(si)); 9929 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9930 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, 9931 sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg); 9932 break; 9933 9934 case offsetof(struct bpf_sock_addr, family): 9935 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9936 struct sock, sk, sk_family); 9937 break; 9938 9939 case offsetof(struct bpf_sock_addr, type): 9940 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9941 struct sock, sk, sk_type); 9942 break; 9943 9944 case offsetof(struct bpf_sock_addr, protocol): 9945 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, 9946 struct sock, sk, sk_protocol); 9947 break; 9948 9949 case offsetof(struct bpf_sock_addr, msg_src_ip4): 9950 /* Treat t_ctx as struct in_addr for msg_src_ip4. */ 9951 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9952 struct bpf_sock_addr_kern, struct in_addr, t_ctx, 9953 s_addr, BPF_SIZE(si->code), 0, tmp_reg); 9954 break; 9955 9956 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], 9957 msg_src_ip6[3]): 9958 off = si->off; 9959 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); 9960 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ 9961 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( 9962 struct bpf_sock_addr_kern, struct in6_addr, t_ctx, 9963 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); 9964 break; 9965 case offsetof(struct bpf_sock_addr, sk): 9966 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk), 9967 si->dst_reg, si->src_reg, 9968 offsetof(struct bpf_sock_addr_kern, sk)); 9969 break; 9970 } 9971 9972 return insn - insn_buf; 9973 } 9974 9975 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, 9976 const struct bpf_insn *si, 9977 struct bpf_insn *insn_buf, 9978 struct bpf_prog *prog, 9979 u32 *target_size) 9980 { 9981 struct bpf_insn *insn = insn_buf; 9982 int off; 9983 9984 /* Helper macro for adding read access to tcp_sock or sock fields. */ 9985 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 9986 do { \ 9987 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \ 9988 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 9989 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 9990 if (si->dst_reg == reg || si->src_reg == reg) \ 9991 reg--; \ 9992 if (si->dst_reg == reg || si->src_reg == reg) \ 9993 reg--; \ 9994 if (si->dst_reg == si->src_reg) { \ 9995 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 9996 offsetof(struct bpf_sock_ops_kern, \ 9997 temp)); \ 9998 fullsock_reg = reg; \ 9999 jmp += 2; \ 10000 } \ 10001 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10002 struct bpf_sock_ops_kern, \ 10003 is_fullsock), \ 10004 fullsock_reg, si->src_reg, \ 10005 offsetof(struct bpf_sock_ops_kern, \ 10006 is_fullsock)); \ 10007 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 10008 if (si->dst_reg == si->src_reg) \ 10009 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10010 offsetof(struct bpf_sock_ops_kern, \ 10011 temp)); \ 10012 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10013 struct bpf_sock_ops_kern, sk),\ 10014 si->dst_reg, si->src_reg, \ 10015 offsetof(struct bpf_sock_ops_kern, sk));\ 10016 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ 10017 OBJ_FIELD), \ 10018 si->dst_reg, si->dst_reg, \ 10019 offsetof(OBJ, OBJ_FIELD)); \ 10020 if (si->dst_reg == si->src_reg) { \ 10021 *insn++ = BPF_JMP_A(1); \ 10022 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10023 offsetof(struct bpf_sock_ops_kern, \ 10024 temp)); \ 10025 } \ 10026 } while (0) 10027 10028 #define SOCK_OPS_GET_SK() \ 10029 do { \ 10030 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ 10031 if (si->dst_reg == reg || si->src_reg == reg) \ 10032 reg--; \ 10033 if (si->dst_reg == reg || si->src_reg == reg) \ 10034 reg--; \ 10035 if (si->dst_reg == si->src_reg) { \ 10036 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ 10037 offsetof(struct bpf_sock_ops_kern, \ 10038 temp)); \ 10039 fullsock_reg = reg; \ 10040 jmp += 2; \ 10041 } \ 10042 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10043 struct bpf_sock_ops_kern, \ 10044 is_fullsock), \ 10045 fullsock_reg, si->src_reg, \ 10046 offsetof(struct bpf_sock_ops_kern, \ 10047 is_fullsock)); \ 10048 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ 10049 if (si->dst_reg == si->src_reg) \ 10050 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10051 offsetof(struct bpf_sock_ops_kern, \ 10052 temp)); \ 10053 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10054 struct bpf_sock_ops_kern, sk),\ 10055 si->dst_reg, si->src_reg, \ 10056 offsetof(struct bpf_sock_ops_kern, sk));\ 10057 if (si->dst_reg == si->src_reg) { \ 10058 *insn++ = BPF_JMP_A(1); \ 10059 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10060 offsetof(struct bpf_sock_ops_kern, \ 10061 temp)); \ 10062 } \ 10063 } while (0) 10064 10065 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ 10066 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) 10067 10068 /* Helper macro for adding write access to tcp_sock or sock fields. 10069 * The macro is called with two registers, dst_reg which contains a pointer 10070 * to ctx (context) and src_reg which contains the value that should be 10071 * stored. However, we need an additional register since we cannot overwrite 10072 * dst_reg because it may be used later in the program. 10073 * Instead we "borrow" one of the other register. We first save its value 10074 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore 10075 * it at the end of the macro. 10076 */ 10077 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 10078 do { \ 10079 int reg = BPF_REG_9; \ 10080 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 10081 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 10082 if (si->dst_reg == reg || si->src_reg == reg) \ 10083 reg--; \ 10084 if (si->dst_reg == reg || si->src_reg == reg) \ 10085 reg--; \ 10086 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ 10087 offsetof(struct bpf_sock_ops_kern, \ 10088 temp)); \ 10089 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10090 struct bpf_sock_ops_kern, \ 10091 is_fullsock), \ 10092 reg, si->dst_reg, \ 10093 offsetof(struct bpf_sock_ops_kern, \ 10094 is_fullsock)); \ 10095 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ 10096 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 10097 struct bpf_sock_ops_kern, sk),\ 10098 reg, si->dst_reg, \ 10099 offsetof(struct bpf_sock_ops_kern, sk));\ 10100 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ 10101 reg, si->src_reg, \ 10102 offsetof(OBJ, OBJ_FIELD)); \ 10103 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ 10104 offsetof(struct bpf_sock_ops_kern, \ 10105 temp)); \ 10106 } while (0) 10107 10108 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ 10109 do { \ 10110 if (TYPE == BPF_WRITE) \ 10111 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 10112 else \ 10113 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ 10114 } while (0) 10115 10116 if (insn > insn_buf) 10117 return insn - insn_buf; 10118 10119 switch (si->off) { 10120 case offsetof(struct bpf_sock_ops, op): 10121 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10122 op), 10123 si->dst_reg, si->src_reg, 10124 offsetof(struct bpf_sock_ops_kern, op)); 10125 break; 10126 10127 case offsetof(struct bpf_sock_ops, replylong[0]) ... 10128 offsetof(struct bpf_sock_ops, replylong[3]): 10129 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != 10130 sizeof_field(struct bpf_sock_ops_kern, reply)); 10131 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != 10132 sizeof_field(struct bpf_sock_ops_kern, replylong)); 10133 off = si->off; 10134 off -= offsetof(struct bpf_sock_ops, replylong[0]); 10135 off += offsetof(struct bpf_sock_ops_kern, replylong[0]); 10136 if (type == BPF_WRITE) 10137 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, 10138 off); 10139 else 10140 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 10141 off); 10142 break; 10143 10144 case offsetof(struct bpf_sock_ops, family): 10145 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 10146 10147 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10148 struct bpf_sock_ops_kern, sk), 10149 si->dst_reg, si->src_reg, 10150 offsetof(struct bpf_sock_ops_kern, sk)); 10151 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10152 offsetof(struct sock_common, skc_family)); 10153 break; 10154 10155 case offsetof(struct bpf_sock_ops, remote_ip4): 10156 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 10157 10158 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10159 struct bpf_sock_ops_kern, sk), 10160 si->dst_reg, si->src_reg, 10161 offsetof(struct bpf_sock_ops_kern, sk)); 10162 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10163 offsetof(struct sock_common, skc_daddr)); 10164 break; 10165 10166 case offsetof(struct bpf_sock_ops, local_ip4): 10167 BUILD_BUG_ON(sizeof_field(struct sock_common, 10168 skc_rcv_saddr) != 4); 10169 10170 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10171 struct bpf_sock_ops_kern, sk), 10172 si->dst_reg, si->src_reg, 10173 offsetof(struct bpf_sock_ops_kern, sk)); 10174 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10175 offsetof(struct sock_common, 10176 skc_rcv_saddr)); 10177 break; 10178 10179 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... 10180 offsetof(struct bpf_sock_ops, remote_ip6[3]): 10181 #if IS_ENABLED(CONFIG_IPV6) 10182 BUILD_BUG_ON(sizeof_field(struct sock_common, 10183 skc_v6_daddr.s6_addr32[0]) != 4); 10184 10185 off = si->off; 10186 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); 10187 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10188 struct bpf_sock_ops_kern, sk), 10189 si->dst_reg, si->src_reg, 10190 offsetof(struct bpf_sock_ops_kern, sk)); 10191 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10192 offsetof(struct sock_common, 10193 skc_v6_daddr.s6_addr32[0]) + 10194 off); 10195 #else 10196 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10197 #endif 10198 break; 10199 10200 case offsetof(struct bpf_sock_ops, local_ip6[0]) ... 10201 offsetof(struct bpf_sock_ops, local_ip6[3]): 10202 #if IS_ENABLED(CONFIG_IPV6) 10203 BUILD_BUG_ON(sizeof_field(struct sock_common, 10204 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 10205 10206 off = si->off; 10207 off -= offsetof(struct bpf_sock_ops, local_ip6[0]); 10208 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10209 struct bpf_sock_ops_kern, sk), 10210 si->dst_reg, si->src_reg, 10211 offsetof(struct bpf_sock_ops_kern, sk)); 10212 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10213 offsetof(struct sock_common, 10214 skc_v6_rcv_saddr.s6_addr32[0]) + 10215 off); 10216 #else 10217 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10218 #endif 10219 break; 10220 10221 case offsetof(struct bpf_sock_ops, remote_port): 10222 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 10223 10224 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10225 struct bpf_sock_ops_kern, sk), 10226 si->dst_reg, si->src_reg, 10227 offsetof(struct bpf_sock_ops_kern, sk)); 10228 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10229 offsetof(struct sock_common, skc_dport)); 10230 #ifndef __BIG_ENDIAN_BITFIELD 10231 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 10232 #endif 10233 break; 10234 10235 case offsetof(struct bpf_sock_ops, local_port): 10236 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 10237 10238 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10239 struct bpf_sock_ops_kern, sk), 10240 si->dst_reg, si->src_reg, 10241 offsetof(struct bpf_sock_ops_kern, sk)); 10242 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10243 offsetof(struct sock_common, skc_num)); 10244 break; 10245 10246 case offsetof(struct bpf_sock_ops, is_fullsock): 10247 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10248 struct bpf_sock_ops_kern, 10249 is_fullsock), 10250 si->dst_reg, si->src_reg, 10251 offsetof(struct bpf_sock_ops_kern, 10252 is_fullsock)); 10253 break; 10254 10255 case offsetof(struct bpf_sock_ops, state): 10256 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1); 10257 10258 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10259 struct bpf_sock_ops_kern, sk), 10260 si->dst_reg, si->src_reg, 10261 offsetof(struct bpf_sock_ops_kern, sk)); 10262 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, 10263 offsetof(struct sock_common, skc_state)); 10264 break; 10265 10266 case offsetof(struct bpf_sock_ops, rtt_min): 10267 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 10268 sizeof(struct minmax)); 10269 BUILD_BUG_ON(sizeof(struct minmax) < 10270 sizeof(struct minmax_sample)); 10271 10272 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10273 struct bpf_sock_ops_kern, sk), 10274 si->dst_reg, si->src_reg, 10275 offsetof(struct bpf_sock_ops_kern, sk)); 10276 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10277 offsetof(struct tcp_sock, rtt_min) + 10278 sizeof_field(struct minmax_sample, t)); 10279 break; 10280 10281 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): 10282 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags, 10283 struct tcp_sock); 10284 break; 10285 10286 case offsetof(struct bpf_sock_ops, sk_txhash): 10287 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, 10288 struct sock, type); 10289 break; 10290 case offsetof(struct bpf_sock_ops, snd_cwnd): 10291 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd); 10292 break; 10293 case offsetof(struct bpf_sock_ops, srtt_us): 10294 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us); 10295 break; 10296 case offsetof(struct bpf_sock_ops, snd_ssthresh): 10297 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh); 10298 break; 10299 case offsetof(struct bpf_sock_ops, rcv_nxt): 10300 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt); 10301 break; 10302 case offsetof(struct bpf_sock_ops, snd_nxt): 10303 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt); 10304 break; 10305 case offsetof(struct bpf_sock_ops, snd_una): 10306 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una); 10307 break; 10308 case offsetof(struct bpf_sock_ops, mss_cache): 10309 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache); 10310 break; 10311 case offsetof(struct bpf_sock_ops, ecn_flags): 10312 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags); 10313 break; 10314 case offsetof(struct bpf_sock_ops, rate_delivered): 10315 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered); 10316 break; 10317 case offsetof(struct bpf_sock_ops, rate_interval_us): 10318 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us); 10319 break; 10320 case offsetof(struct bpf_sock_ops, packets_out): 10321 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out); 10322 break; 10323 case offsetof(struct bpf_sock_ops, retrans_out): 10324 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out); 10325 break; 10326 case offsetof(struct bpf_sock_ops, total_retrans): 10327 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans); 10328 break; 10329 case offsetof(struct bpf_sock_ops, segs_in): 10330 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in); 10331 break; 10332 case offsetof(struct bpf_sock_ops, data_segs_in): 10333 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in); 10334 break; 10335 case offsetof(struct bpf_sock_ops, segs_out): 10336 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out); 10337 break; 10338 case offsetof(struct bpf_sock_ops, data_segs_out): 10339 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out); 10340 break; 10341 case offsetof(struct bpf_sock_ops, lost_out): 10342 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out); 10343 break; 10344 case offsetof(struct bpf_sock_ops, sacked_out): 10345 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out); 10346 break; 10347 case offsetof(struct bpf_sock_ops, bytes_received): 10348 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received); 10349 break; 10350 case offsetof(struct bpf_sock_ops, bytes_acked): 10351 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); 10352 break; 10353 case offsetof(struct bpf_sock_ops, sk): 10354 SOCK_OPS_GET_SK(); 10355 break; 10356 case offsetof(struct bpf_sock_ops, skb_data_end): 10357 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10358 skb_data_end), 10359 si->dst_reg, si->src_reg, 10360 offsetof(struct bpf_sock_ops_kern, 10361 skb_data_end)); 10362 break; 10363 case offsetof(struct bpf_sock_ops, skb_data): 10364 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10365 skb), 10366 si->dst_reg, si->src_reg, 10367 offsetof(struct bpf_sock_ops_kern, 10368 skb)); 10369 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10370 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 10371 si->dst_reg, si->dst_reg, 10372 offsetof(struct sk_buff, data)); 10373 break; 10374 case offsetof(struct bpf_sock_ops, skb_len): 10375 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10376 skb), 10377 si->dst_reg, si->src_reg, 10378 offsetof(struct bpf_sock_ops_kern, 10379 skb)); 10380 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10381 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), 10382 si->dst_reg, si->dst_reg, 10383 offsetof(struct sk_buff, len)); 10384 break; 10385 case offsetof(struct bpf_sock_ops, skb_tcp_flags): 10386 off = offsetof(struct sk_buff, cb); 10387 off += offsetof(struct tcp_skb_cb, tcp_flags); 10388 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags); 10389 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern, 10390 skb), 10391 si->dst_reg, si->src_reg, 10392 offsetof(struct bpf_sock_ops_kern, 10393 skb)); 10394 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 10395 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb, 10396 tcp_flags), 10397 si->dst_reg, si->dst_reg, off); 10398 break; 10399 } 10400 return insn - insn_buf; 10401 } 10402 10403 /* data_end = skb->data + skb_headlen() */ 10404 static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si, 10405 struct bpf_insn *insn) 10406 { 10407 int reg; 10408 int temp_reg_off = offsetof(struct sk_buff, cb) + 10409 offsetof(struct sk_skb_cb, temp_reg); 10410 10411 if (si->src_reg == si->dst_reg) { 10412 /* We need an extra register, choose and save a register. */ 10413 reg = BPF_REG_9; 10414 if (si->src_reg == reg || si->dst_reg == reg) 10415 reg--; 10416 if (si->src_reg == reg || si->dst_reg == reg) 10417 reg--; 10418 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, temp_reg_off); 10419 } else { 10420 reg = si->dst_reg; 10421 } 10422 10423 /* reg = skb->data */ 10424 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 10425 reg, si->src_reg, 10426 offsetof(struct sk_buff, data)); 10427 /* AX = skb->len */ 10428 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len), 10429 BPF_REG_AX, si->src_reg, 10430 offsetof(struct sk_buff, len)); 10431 /* reg = skb->data + skb->len */ 10432 *insn++ = BPF_ALU64_REG(BPF_ADD, reg, BPF_REG_AX); 10433 /* AX = skb->data_len */ 10434 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data_len), 10435 BPF_REG_AX, si->src_reg, 10436 offsetof(struct sk_buff, data_len)); 10437 10438 /* reg = skb->data + skb->len - skb->data_len */ 10439 *insn++ = BPF_ALU64_REG(BPF_SUB, reg, BPF_REG_AX); 10440 10441 if (si->src_reg == si->dst_reg) { 10442 /* Restore the saved register */ 10443 *insn++ = BPF_MOV64_REG(BPF_REG_AX, si->src_reg); 10444 *insn++ = BPF_MOV64_REG(si->dst_reg, reg); 10445 *insn++ = BPF_LDX_MEM(BPF_DW, reg, BPF_REG_AX, temp_reg_off); 10446 } 10447 10448 return insn; 10449 } 10450 10451 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, 10452 const struct bpf_insn *si, 10453 struct bpf_insn *insn_buf, 10454 struct bpf_prog *prog, u32 *target_size) 10455 { 10456 struct bpf_insn *insn = insn_buf; 10457 int off; 10458 10459 switch (si->off) { 10460 case offsetof(struct __sk_buff, data_end): 10461 insn = bpf_convert_data_end_access(si, insn); 10462 break; 10463 case offsetof(struct __sk_buff, cb[0]) ... 10464 offsetofend(struct __sk_buff, cb[4]) - 1: 10465 BUILD_BUG_ON(sizeof_field(struct sk_skb_cb, data) < 20); 10466 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 10467 offsetof(struct sk_skb_cb, data)) % 10468 sizeof(__u64)); 10469 10470 prog->cb_access = 1; 10471 off = si->off; 10472 off -= offsetof(struct __sk_buff, cb[0]); 10473 off += offsetof(struct sk_buff, cb); 10474 off += offsetof(struct sk_skb_cb, data); 10475 if (type == BPF_WRITE) 10476 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, 10477 si->src_reg, off); 10478 else 10479 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, 10480 si->src_reg, off); 10481 break; 10482 10483 10484 default: 10485 return bpf_convert_ctx_access(type, si, insn_buf, prog, 10486 target_size); 10487 } 10488 10489 return insn - insn_buf; 10490 } 10491 10492 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, 10493 const struct bpf_insn *si, 10494 struct bpf_insn *insn_buf, 10495 struct bpf_prog *prog, u32 *target_size) 10496 { 10497 struct bpf_insn *insn = insn_buf; 10498 #if IS_ENABLED(CONFIG_IPV6) 10499 int off; 10500 #endif 10501 10502 /* convert ctx uses the fact sg element is first in struct */ 10503 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0); 10504 10505 switch (si->off) { 10506 case offsetof(struct sk_msg_md, data): 10507 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), 10508 si->dst_reg, si->src_reg, 10509 offsetof(struct sk_msg, data)); 10510 break; 10511 case offsetof(struct sk_msg_md, data_end): 10512 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), 10513 si->dst_reg, si->src_reg, 10514 offsetof(struct sk_msg, data_end)); 10515 break; 10516 case offsetof(struct sk_msg_md, family): 10517 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 10518 10519 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10520 struct sk_msg, sk), 10521 si->dst_reg, si->src_reg, 10522 offsetof(struct sk_msg, sk)); 10523 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10524 offsetof(struct sock_common, skc_family)); 10525 break; 10526 10527 case offsetof(struct sk_msg_md, remote_ip4): 10528 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 10529 10530 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10531 struct sk_msg, sk), 10532 si->dst_reg, si->src_reg, 10533 offsetof(struct sk_msg, sk)); 10534 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10535 offsetof(struct sock_common, skc_daddr)); 10536 break; 10537 10538 case offsetof(struct sk_msg_md, local_ip4): 10539 BUILD_BUG_ON(sizeof_field(struct sock_common, 10540 skc_rcv_saddr) != 4); 10541 10542 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10543 struct sk_msg, sk), 10544 si->dst_reg, si->src_reg, 10545 offsetof(struct sk_msg, sk)); 10546 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10547 offsetof(struct sock_common, 10548 skc_rcv_saddr)); 10549 break; 10550 10551 case offsetof(struct sk_msg_md, remote_ip6[0]) ... 10552 offsetof(struct sk_msg_md, remote_ip6[3]): 10553 #if IS_ENABLED(CONFIG_IPV6) 10554 BUILD_BUG_ON(sizeof_field(struct sock_common, 10555 skc_v6_daddr.s6_addr32[0]) != 4); 10556 10557 off = si->off; 10558 off -= offsetof(struct sk_msg_md, remote_ip6[0]); 10559 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10560 struct sk_msg, sk), 10561 si->dst_reg, si->src_reg, 10562 offsetof(struct sk_msg, sk)); 10563 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10564 offsetof(struct sock_common, 10565 skc_v6_daddr.s6_addr32[0]) + 10566 off); 10567 #else 10568 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10569 #endif 10570 break; 10571 10572 case offsetof(struct sk_msg_md, local_ip6[0]) ... 10573 offsetof(struct sk_msg_md, local_ip6[3]): 10574 #if IS_ENABLED(CONFIG_IPV6) 10575 BUILD_BUG_ON(sizeof_field(struct sock_common, 10576 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 10577 10578 off = si->off; 10579 off -= offsetof(struct sk_msg_md, local_ip6[0]); 10580 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10581 struct sk_msg, sk), 10582 si->dst_reg, si->src_reg, 10583 offsetof(struct sk_msg, sk)); 10584 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 10585 offsetof(struct sock_common, 10586 skc_v6_rcv_saddr.s6_addr32[0]) + 10587 off); 10588 #else 10589 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 10590 #endif 10591 break; 10592 10593 case offsetof(struct sk_msg_md, remote_port): 10594 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 10595 10596 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10597 struct sk_msg, sk), 10598 si->dst_reg, si->src_reg, 10599 offsetof(struct sk_msg, sk)); 10600 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10601 offsetof(struct sock_common, skc_dport)); 10602 #ifndef __BIG_ENDIAN_BITFIELD 10603 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); 10604 #endif 10605 break; 10606 10607 case offsetof(struct sk_msg_md, local_port): 10608 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 10609 10610 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 10611 struct sk_msg, sk), 10612 si->dst_reg, si->src_reg, 10613 offsetof(struct sk_msg, sk)); 10614 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 10615 offsetof(struct sock_common, skc_num)); 10616 break; 10617 10618 case offsetof(struct sk_msg_md, size): 10619 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size), 10620 si->dst_reg, si->src_reg, 10621 offsetof(struct sk_msg_sg, size)); 10622 break; 10623 10624 case offsetof(struct sk_msg_md, sk): 10625 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk), 10626 si->dst_reg, si->src_reg, 10627 offsetof(struct sk_msg, sk)); 10628 break; 10629 } 10630 10631 return insn - insn_buf; 10632 } 10633 10634 const struct bpf_verifier_ops sk_filter_verifier_ops = { 10635 .get_func_proto = sk_filter_func_proto, 10636 .is_valid_access = sk_filter_is_valid_access, 10637 .convert_ctx_access = bpf_convert_ctx_access, 10638 .gen_ld_abs = bpf_gen_ld_abs, 10639 }; 10640 10641 const struct bpf_prog_ops sk_filter_prog_ops = { 10642 .test_run = bpf_prog_test_run_skb, 10643 }; 10644 10645 const struct bpf_verifier_ops tc_cls_act_verifier_ops = { 10646 .get_func_proto = tc_cls_act_func_proto, 10647 .is_valid_access = tc_cls_act_is_valid_access, 10648 .convert_ctx_access = tc_cls_act_convert_ctx_access, 10649 .gen_prologue = tc_cls_act_prologue, 10650 .gen_ld_abs = bpf_gen_ld_abs, 10651 .btf_struct_access = tc_cls_act_btf_struct_access, 10652 }; 10653 10654 const struct bpf_prog_ops tc_cls_act_prog_ops = { 10655 .test_run = bpf_prog_test_run_skb, 10656 }; 10657 10658 const struct bpf_verifier_ops xdp_verifier_ops = { 10659 .get_func_proto = xdp_func_proto, 10660 .is_valid_access = xdp_is_valid_access, 10661 .convert_ctx_access = xdp_convert_ctx_access, 10662 .gen_prologue = bpf_noop_prologue, 10663 .btf_struct_access = xdp_btf_struct_access, 10664 }; 10665 10666 const struct bpf_prog_ops xdp_prog_ops = { 10667 .test_run = bpf_prog_test_run_xdp, 10668 }; 10669 10670 const struct bpf_verifier_ops cg_skb_verifier_ops = { 10671 .get_func_proto = cg_skb_func_proto, 10672 .is_valid_access = cg_skb_is_valid_access, 10673 .convert_ctx_access = bpf_convert_ctx_access, 10674 }; 10675 10676 const struct bpf_prog_ops cg_skb_prog_ops = { 10677 .test_run = bpf_prog_test_run_skb, 10678 }; 10679 10680 const struct bpf_verifier_ops lwt_in_verifier_ops = { 10681 .get_func_proto = lwt_in_func_proto, 10682 .is_valid_access = lwt_is_valid_access, 10683 .convert_ctx_access = bpf_convert_ctx_access, 10684 }; 10685 10686 const struct bpf_prog_ops lwt_in_prog_ops = { 10687 .test_run = bpf_prog_test_run_skb, 10688 }; 10689 10690 const struct bpf_verifier_ops lwt_out_verifier_ops = { 10691 .get_func_proto = lwt_out_func_proto, 10692 .is_valid_access = lwt_is_valid_access, 10693 .convert_ctx_access = bpf_convert_ctx_access, 10694 }; 10695 10696 const struct bpf_prog_ops lwt_out_prog_ops = { 10697 .test_run = bpf_prog_test_run_skb, 10698 }; 10699 10700 const struct bpf_verifier_ops lwt_xmit_verifier_ops = { 10701 .get_func_proto = lwt_xmit_func_proto, 10702 .is_valid_access = lwt_is_valid_access, 10703 .convert_ctx_access = bpf_convert_ctx_access, 10704 .gen_prologue = tc_cls_act_prologue, 10705 }; 10706 10707 const struct bpf_prog_ops lwt_xmit_prog_ops = { 10708 .test_run = bpf_prog_test_run_skb, 10709 }; 10710 10711 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { 10712 .get_func_proto = lwt_seg6local_func_proto, 10713 .is_valid_access = lwt_is_valid_access, 10714 .convert_ctx_access = bpf_convert_ctx_access, 10715 }; 10716 10717 const struct bpf_prog_ops lwt_seg6local_prog_ops = { 10718 .test_run = bpf_prog_test_run_skb, 10719 }; 10720 10721 const struct bpf_verifier_ops cg_sock_verifier_ops = { 10722 .get_func_proto = sock_filter_func_proto, 10723 .is_valid_access = sock_filter_is_valid_access, 10724 .convert_ctx_access = bpf_sock_convert_ctx_access, 10725 }; 10726 10727 const struct bpf_prog_ops cg_sock_prog_ops = { 10728 }; 10729 10730 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { 10731 .get_func_proto = sock_addr_func_proto, 10732 .is_valid_access = sock_addr_is_valid_access, 10733 .convert_ctx_access = sock_addr_convert_ctx_access, 10734 }; 10735 10736 const struct bpf_prog_ops cg_sock_addr_prog_ops = { 10737 }; 10738 10739 const struct bpf_verifier_ops sock_ops_verifier_ops = { 10740 .get_func_proto = sock_ops_func_proto, 10741 .is_valid_access = sock_ops_is_valid_access, 10742 .convert_ctx_access = sock_ops_convert_ctx_access, 10743 }; 10744 10745 const struct bpf_prog_ops sock_ops_prog_ops = { 10746 }; 10747 10748 const struct bpf_verifier_ops sk_skb_verifier_ops = { 10749 .get_func_proto = sk_skb_func_proto, 10750 .is_valid_access = sk_skb_is_valid_access, 10751 .convert_ctx_access = sk_skb_convert_ctx_access, 10752 .gen_prologue = sk_skb_prologue, 10753 }; 10754 10755 const struct bpf_prog_ops sk_skb_prog_ops = { 10756 }; 10757 10758 const struct bpf_verifier_ops sk_msg_verifier_ops = { 10759 .get_func_proto = sk_msg_func_proto, 10760 .is_valid_access = sk_msg_is_valid_access, 10761 .convert_ctx_access = sk_msg_convert_ctx_access, 10762 .gen_prologue = bpf_noop_prologue, 10763 }; 10764 10765 const struct bpf_prog_ops sk_msg_prog_ops = { 10766 }; 10767 10768 const struct bpf_verifier_ops flow_dissector_verifier_ops = { 10769 .get_func_proto = flow_dissector_func_proto, 10770 .is_valid_access = flow_dissector_is_valid_access, 10771 .convert_ctx_access = flow_dissector_convert_ctx_access, 10772 }; 10773 10774 const struct bpf_prog_ops flow_dissector_prog_ops = { 10775 .test_run = bpf_prog_test_run_flow_dissector, 10776 }; 10777 10778 int sk_detach_filter(struct sock *sk) 10779 { 10780 int ret = -ENOENT; 10781 struct sk_filter *filter; 10782 10783 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 10784 return -EPERM; 10785 10786 filter = rcu_dereference_protected(sk->sk_filter, 10787 lockdep_sock_is_held(sk)); 10788 if (filter) { 10789 RCU_INIT_POINTER(sk->sk_filter, NULL); 10790 sk_filter_uncharge(sk, filter); 10791 ret = 0; 10792 } 10793 10794 return ret; 10795 } 10796 EXPORT_SYMBOL_GPL(sk_detach_filter); 10797 10798 int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len) 10799 { 10800 struct sock_fprog_kern *fprog; 10801 struct sk_filter *filter; 10802 int ret = 0; 10803 10804 sockopt_lock_sock(sk); 10805 filter = rcu_dereference_protected(sk->sk_filter, 10806 lockdep_sock_is_held(sk)); 10807 if (!filter) 10808 goto out; 10809 10810 /* We're copying the filter that has been originally attached, 10811 * so no conversion/decode needed anymore. eBPF programs that 10812 * have no original program cannot be dumped through this. 10813 */ 10814 ret = -EACCES; 10815 fprog = filter->prog->orig_prog; 10816 if (!fprog) 10817 goto out; 10818 10819 ret = fprog->len; 10820 if (!len) 10821 /* User space only enquires number of filter blocks. */ 10822 goto out; 10823 10824 ret = -EINVAL; 10825 if (len < fprog->len) 10826 goto out; 10827 10828 ret = -EFAULT; 10829 if (copy_to_sockptr(optval, fprog->filter, bpf_classic_proglen(fprog))) 10830 goto out; 10831 10832 /* Instead of bytes, the API requests to return the number 10833 * of filter blocks. 10834 */ 10835 ret = fprog->len; 10836 out: 10837 sockopt_release_sock(sk); 10838 return ret; 10839 } 10840 10841 #ifdef CONFIG_INET 10842 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, 10843 struct sock_reuseport *reuse, 10844 struct sock *sk, struct sk_buff *skb, 10845 struct sock *migrating_sk, 10846 u32 hash) 10847 { 10848 reuse_kern->skb = skb; 10849 reuse_kern->sk = sk; 10850 reuse_kern->selected_sk = NULL; 10851 reuse_kern->migrating_sk = migrating_sk; 10852 reuse_kern->data_end = skb->data + skb_headlen(skb); 10853 reuse_kern->hash = hash; 10854 reuse_kern->reuseport_id = reuse->reuseport_id; 10855 reuse_kern->bind_inany = reuse->bind_inany; 10856 } 10857 10858 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 10859 struct bpf_prog *prog, struct sk_buff *skb, 10860 struct sock *migrating_sk, 10861 u32 hash) 10862 { 10863 struct sk_reuseport_kern reuse_kern; 10864 enum sk_action action; 10865 10866 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, migrating_sk, hash); 10867 action = bpf_prog_run(prog, &reuse_kern); 10868 10869 if (action == SK_PASS) 10870 return reuse_kern.selected_sk; 10871 else 10872 return ERR_PTR(-ECONNREFUSED); 10873 } 10874 10875 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, 10876 struct bpf_map *, map, void *, key, u32, flags) 10877 { 10878 bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; 10879 struct sock_reuseport *reuse; 10880 struct sock *selected_sk; 10881 10882 selected_sk = map->ops->map_lookup_elem(map, key); 10883 if (!selected_sk) 10884 return -ENOENT; 10885 10886 reuse = rcu_dereference(selected_sk->sk_reuseport_cb); 10887 if (!reuse) { 10888 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ 10889 if (sk_is_refcounted(selected_sk)) 10890 sock_put(selected_sk); 10891 10892 /* reuseport_array has only sk with non NULL sk_reuseport_cb. 10893 * The only (!reuse) case here is - the sk has already been 10894 * unhashed (e.g. by close()), so treat it as -ENOENT. 10895 * 10896 * Other maps (e.g. sock_map) do not provide this guarantee and 10897 * the sk may never be in the reuseport group to begin with. 10898 */ 10899 return is_sockarray ? -ENOENT : -EINVAL; 10900 } 10901 10902 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { 10903 struct sock *sk = reuse_kern->sk; 10904 10905 if (sk->sk_protocol != selected_sk->sk_protocol) 10906 return -EPROTOTYPE; 10907 else if (sk->sk_family != selected_sk->sk_family) 10908 return -EAFNOSUPPORT; 10909 10910 /* Catch all. Likely bound to a different sockaddr. */ 10911 return -EBADFD; 10912 } 10913 10914 reuse_kern->selected_sk = selected_sk; 10915 10916 return 0; 10917 } 10918 10919 static const struct bpf_func_proto sk_select_reuseport_proto = { 10920 .func = sk_select_reuseport, 10921 .gpl_only = false, 10922 .ret_type = RET_INTEGER, 10923 .arg1_type = ARG_PTR_TO_CTX, 10924 .arg2_type = ARG_CONST_MAP_PTR, 10925 .arg3_type = ARG_PTR_TO_MAP_KEY, 10926 .arg4_type = ARG_ANYTHING, 10927 }; 10928 10929 BPF_CALL_4(sk_reuseport_load_bytes, 10930 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10931 void *, to, u32, len) 10932 { 10933 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); 10934 } 10935 10936 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { 10937 .func = sk_reuseport_load_bytes, 10938 .gpl_only = false, 10939 .ret_type = RET_INTEGER, 10940 .arg1_type = ARG_PTR_TO_CTX, 10941 .arg2_type = ARG_ANYTHING, 10942 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10943 .arg4_type = ARG_CONST_SIZE, 10944 }; 10945 10946 BPF_CALL_5(sk_reuseport_load_bytes_relative, 10947 const struct sk_reuseport_kern *, reuse_kern, u32, offset, 10948 void *, to, u32, len, u32, start_header) 10949 { 10950 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, 10951 len, start_header); 10952 } 10953 10954 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { 10955 .func = sk_reuseport_load_bytes_relative, 10956 .gpl_only = false, 10957 .ret_type = RET_INTEGER, 10958 .arg1_type = ARG_PTR_TO_CTX, 10959 .arg2_type = ARG_ANYTHING, 10960 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 10961 .arg4_type = ARG_CONST_SIZE, 10962 .arg5_type = ARG_ANYTHING, 10963 }; 10964 10965 static const struct bpf_func_proto * 10966 sk_reuseport_func_proto(enum bpf_func_id func_id, 10967 const struct bpf_prog *prog) 10968 { 10969 switch (func_id) { 10970 case BPF_FUNC_sk_select_reuseport: 10971 return &sk_select_reuseport_proto; 10972 case BPF_FUNC_skb_load_bytes: 10973 return &sk_reuseport_load_bytes_proto; 10974 case BPF_FUNC_skb_load_bytes_relative: 10975 return &sk_reuseport_load_bytes_relative_proto; 10976 case BPF_FUNC_get_socket_cookie: 10977 return &bpf_get_socket_ptr_cookie_proto; 10978 case BPF_FUNC_ktime_get_coarse_ns: 10979 return &bpf_ktime_get_coarse_ns_proto; 10980 default: 10981 return bpf_base_func_proto(func_id); 10982 } 10983 } 10984 10985 static bool 10986 sk_reuseport_is_valid_access(int off, int size, 10987 enum bpf_access_type type, 10988 const struct bpf_prog *prog, 10989 struct bpf_insn_access_aux *info) 10990 { 10991 const u32 size_default = sizeof(__u32); 10992 10993 if (off < 0 || off >= sizeof(struct sk_reuseport_md) || 10994 off % size || type != BPF_READ) 10995 return false; 10996 10997 switch (off) { 10998 case offsetof(struct sk_reuseport_md, data): 10999 info->reg_type = PTR_TO_PACKET; 11000 return size == sizeof(__u64); 11001 11002 case offsetof(struct sk_reuseport_md, data_end): 11003 info->reg_type = PTR_TO_PACKET_END; 11004 return size == sizeof(__u64); 11005 11006 case offsetof(struct sk_reuseport_md, hash): 11007 return size == size_default; 11008 11009 case offsetof(struct sk_reuseport_md, sk): 11010 info->reg_type = PTR_TO_SOCKET; 11011 return size == sizeof(__u64); 11012 11013 case offsetof(struct sk_reuseport_md, migrating_sk): 11014 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 11015 return size == sizeof(__u64); 11016 11017 /* Fields that allow narrowing */ 11018 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): 11019 if (size < sizeof_field(struct sk_buff, protocol)) 11020 return false; 11021 fallthrough; 11022 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): 11023 case bpf_ctx_range(struct sk_reuseport_md, bind_inany): 11024 case bpf_ctx_range(struct sk_reuseport_md, len): 11025 bpf_ctx_record_field_size(info, size_default); 11026 return bpf_ctx_narrow_access_ok(off, size, size_default); 11027 11028 default: 11029 return false; 11030 } 11031 } 11032 11033 #define SK_REUSEPORT_LOAD_FIELD(F) ({ \ 11034 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ 11035 si->dst_reg, si->src_reg, \ 11036 bpf_target_off(struct sk_reuseport_kern, F, \ 11037 sizeof_field(struct sk_reuseport_kern, F), \ 11038 target_size)); \ 11039 }) 11040 11041 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ 11042 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 11043 struct sk_buff, \ 11044 skb, \ 11045 SKB_FIELD) 11046 11047 #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \ 11048 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ 11049 struct sock, \ 11050 sk, \ 11051 SK_FIELD) 11052 11053 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, 11054 const struct bpf_insn *si, 11055 struct bpf_insn *insn_buf, 11056 struct bpf_prog *prog, 11057 u32 *target_size) 11058 { 11059 struct bpf_insn *insn = insn_buf; 11060 11061 switch (si->off) { 11062 case offsetof(struct sk_reuseport_md, data): 11063 SK_REUSEPORT_LOAD_SKB_FIELD(data); 11064 break; 11065 11066 case offsetof(struct sk_reuseport_md, len): 11067 SK_REUSEPORT_LOAD_SKB_FIELD(len); 11068 break; 11069 11070 case offsetof(struct sk_reuseport_md, eth_protocol): 11071 SK_REUSEPORT_LOAD_SKB_FIELD(protocol); 11072 break; 11073 11074 case offsetof(struct sk_reuseport_md, ip_protocol): 11075 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol); 11076 break; 11077 11078 case offsetof(struct sk_reuseport_md, data_end): 11079 SK_REUSEPORT_LOAD_FIELD(data_end); 11080 break; 11081 11082 case offsetof(struct sk_reuseport_md, hash): 11083 SK_REUSEPORT_LOAD_FIELD(hash); 11084 break; 11085 11086 case offsetof(struct sk_reuseport_md, bind_inany): 11087 SK_REUSEPORT_LOAD_FIELD(bind_inany); 11088 break; 11089 11090 case offsetof(struct sk_reuseport_md, sk): 11091 SK_REUSEPORT_LOAD_FIELD(sk); 11092 break; 11093 11094 case offsetof(struct sk_reuseport_md, migrating_sk): 11095 SK_REUSEPORT_LOAD_FIELD(migrating_sk); 11096 break; 11097 } 11098 11099 return insn - insn_buf; 11100 } 11101 11102 const struct bpf_verifier_ops sk_reuseport_verifier_ops = { 11103 .get_func_proto = sk_reuseport_func_proto, 11104 .is_valid_access = sk_reuseport_is_valid_access, 11105 .convert_ctx_access = sk_reuseport_convert_ctx_access, 11106 }; 11107 11108 const struct bpf_prog_ops sk_reuseport_prog_ops = { 11109 }; 11110 11111 DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); 11112 EXPORT_SYMBOL(bpf_sk_lookup_enabled); 11113 11114 BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, 11115 struct sock *, sk, u64, flags) 11116 { 11117 if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | 11118 BPF_SK_LOOKUP_F_NO_REUSEPORT))) 11119 return -EINVAL; 11120 if (unlikely(sk && sk_is_refcounted(sk))) 11121 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ 11122 if (unlikely(sk && sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN)) 11123 return -ESOCKTNOSUPPORT; /* only accept TCP socket in LISTEN */ 11124 if (unlikely(sk && sk_is_udp(sk) && sk->sk_state != TCP_CLOSE)) 11125 return -ESOCKTNOSUPPORT; /* only accept UDP socket in CLOSE */ 11126 11127 /* Check if socket is suitable for packet L3/L4 protocol */ 11128 if (sk && sk->sk_protocol != ctx->protocol) 11129 return -EPROTOTYPE; 11130 if (sk && sk->sk_family != ctx->family && 11131 (sk->sk_family == AF_INET || ipv6_only_sock(sk))) 11132 return -EAFNOSUPPORT; 11133 11134 if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) 11135 return -EEXIST; 11136 11137 /* Select socket as lookup result */ 11138 ctx->selected_sk = sk; 11139 ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; 11140 return 0; 11141 } 11142 11143 static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { 11144 .func = bpf_sk_lookup_assign, 11145 .gpl_only = false, 11146 .ret_type = RET_INTEGER, 11147 .arg1_type = ARG_PTR_TO_CTX, 11148 .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, 11149 .arg3_type = ARG_ANYTHING, 11150 }; 11151 11152 static const struct bpf_func_proto * 11153 sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 11154 { 11155 switch (func_id) { 11156 case BPF_FUNC_perf_event_output: 11157 return &bpf_event_output_data_proto; 11158 case BPF_FUNC_sk_assign: 11159 return &bpf_sk_lookup_assign_proto; 11160 case BPF_FUNC_sk_release: 11161 return &bpf_sk_release_proto; 11162 default: 11163 return bpf_sk_base_func_proto(func_id); 11164 } 11165 } 11166 11167 static bool sk_lookup_is_valid_access(int off, int size, 11168 enum bpf_access_type type, 11169 const struct bpf_prog *prog, 11170 struct bpf_insn_access_aux *info) 11171 { 11172 if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) 11173 return false; 11174 if (off % size != 0) 11175 return false; 11176 if (type != BPF_READ) 11177 return false; 11178 11179 switch (off) { 11180 case offsetof(struct bpf_sk_lookup, sk): 11181 info->reg_type = PTR_TO_SOCKET_OR_NULL; 11182 return size == sizeof(__u64); 11183 11184 case bpf_ctx_range(struct bpf_sk_lookup, family): 11185 case bpf_ctx_range(struct bpf_sk_lookup, protocol): 11186 case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): 11187 case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): 11188 case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): 11189 case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): 11190 case bpf_ctx_range(struct bpf_sk_lookup, local_port): 11191 case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): 11192 bpf_ctx_record_field_size(info, sizeof(__u32)); 11193 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); 11194 11195 case bpf_ctx_range(struct bpf_sk_lookup, remote_port): 11196 /* Allow 4-byte access to 2-byte field for backward compatibility */ 11197 if (size == sizeof(__u32)) 11198 return true; 11199 bpf_ctx_record_field_size(info, sizeof(__be16)); 11200 return bpf_ctx_narrow_access_ok(off, size, sizeof(__be16)); 11201 11202 case offsetofend(struct bpf_sk_lookup, remote_port) ... 11203 offsetof(struct bpf_sk_lookup, local_ip4) - 1: 11204 /* Allow access to zero padding for backward compatibility */ 11205 bpf_ctx_record_field_size(info, sizeof(__u16)); 11206 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u16)); 11207 11208 default: 11209 return false; 11210 } 11211 } 11212 11213 static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, 11214 const struct bpf_insn *si, 11215 struct bpf_insn *insn_buf, 11216 struct bpf_prog *prog, 11217 u32 *target_size) 11218 { 11219 struct bpf_insn *insn = insn_buf; 11220 11221 switch (si->off) { 11222 case offsetof(struct bpf_sk_lookup, sk): 11223 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11224 offsetof(struct bpf_sk_lookup_kern, selected_sk)); 11225 break; 11226 11227 case offsetof(struct bpf_sk_lookup, family): 11228 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11229 bpf_target_off(struct bpf_sk_lookup_kern, 11230 family, 2, target_size)); 11231 break; 11232 11233 case offsetof(struct bpf_sk_lookup, protocol): 11234 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11235 bpf_target_off(struct bpf_sk_lookup_kern, 11236 protocol, 2, target_size)); 11237 break; 11238 11239 case offsetof(struct bpf_sk_lookup, remote_ip4): 11240 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11241 bpf_target_off(struct bpf_sk_lookup_kern, 11242 v4.saddr, 4, target_size)); 11243 break; 11244 11245 case offsetof(struct bpf_sk_lookup, local_ip4): 11246 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11247 bpf_target_off(struct bpf_sk_lookup_kern, 11248 v4.daddr, 4, target_size)); 11249 break; 11250 11251 case bpf_ctx_range_till(struct bpf_sk_lookup, 11252 remote_ip6[0], remote_ip6[3]): { 11253 #if IS_ENABLED(CONFIG_IPV6) 11254 int off = si->off; 11255 11256 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); 11257 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 11258 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11259 offsetof(struct bpf_sk_lookup_kern, v6.saddr)); 11260 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 11261 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 11262 #else 11263 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11264 #endif 11265 break; 11266 } 11267 case bpf_ctx_range_till(struct bpf_sk_lookup, 11268 local_ip6[0], local_ip6[3]): { 11269 #if IS_ENABLED(CONFIG_IPV6) 11270 int off = si->off; 11271 11272 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); 11273 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); 11274 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, 11275 offsetof(struct bpf_sk_lookup_kern, v6.daddr)); 11276 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); 11277 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); 11278 #else 11279 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11280 #endif 11281 break; 11282 } 11283 case offsetof(struct bpf_sk_lookup, remote_port): 11284 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11285 bpf_target_off(struct bpf_sk_lookup_kern, 11286 sport, 2, target_size)); 11287 break; 11288 11289 case offsetofend(struct bpf_sk_lookup, remote_port): 11290 *target_size = 2; 11291 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); 11292 break; 11293 11294 case offsetof(struct bpf_sk_lookup, local_port): 11295 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, 11296 bpf_target_off(struct bpf_sk_lookup_kern, 11297 dport, 2, target_size)); 11298 break; 11299 11300 case offsetof(struct bpf_sk_lookup, ingress_ifindex): 11301 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, 11302 bpf_target_off(struct bpf_sk_lookup_kern, 11303 ingress_ifindex, 4, target_size)); 11304 break; 11305 } 11306 11307 return insn - insn_buf; 11308 } 11309 11310 const struct bpf_prog_ops sk_lookup_prog_ops = { 11311 .test_run = bpf_prog_test_run_sk_lookup, 11312 }; 11313 11314 const struct bpf_verifier_ops sk_lookup_verifier_ops = { 11315 .get_func_proto = sk_lookup_func_proto, 11316 .is_valid_access = sk_lookup_is_valid_access, 11317 .convert_ctx_access = sk_lookup_convert_ctx_access, 11318 }; 11319 11320 #endif /* CONFIG_INET */ 11321 11322 DEFINE_BPF_DISPATCHER(xdp) 11323 11324 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) 11325 { 11326 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); 11327 } 11328 11329 BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE) 11330 #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) 11331 BTF_SOCK_TYPE_xxx 11332 #undef BTF_SOCK_TYPE 11333 11334 BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) 11335 { 11336 /* tcp6_sock type is not generated in dwarf and hence btf, 11337 * trigger an explicit type generation here. 11338 */ 11339 BTF_TYPE_EMIT(struct tcp6_sock); 11340 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && 11341 sk->sk_family == AF_INET6) 11342 return (unsigned long)sk; 11343 11344 return (unsigned long)NULL; 11345 } 11346 11347 const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { 11348 .func = bpf_skc_to_tcp6_sock, 11349 .gpl_only = false, 11350 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11351 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11352 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], 11353 }; 11354 11355 BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) 11356 { 11357 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 11358 return (unsigned long)sk; 11359 11360 return (unsigned long)NULL; 11361 } 11362 11363 const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { 11364 .func = bpf_skc_to_tcp_sock, 11365 .gpl_only = false, 11366 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11367 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11368 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 11369 }; 11370 11371 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) 11372 { 11373 /* BTF types for tcp_timewait_sock and inet_timewait_sock are not 11374 * generated if CONFIG_INET=n. Trigger an explicit generation here. 11375 */ 11376 BTF_TYPE_EMIT(struct inet_timewait_sock); 11377 BTF_TYPE_EMIT(struct tcp_timewait_sock); 11378 11379 #ifdef CONFIG_INET 11380 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) 11381 return (unsigned long)sk; 11382 #endif 11383 11384 #if IS_BUILTIN(CONFIG_IPV6) 11385 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) 11386 return (unsigned long)sk; 11387 #endif 11388 11389 return (unsigned long)NULL; 11390 } 11391 11392 const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { 11393 .func = bpf_skc_to_tcp_timewait_sock, 11394 .gpl_only = false, 11395 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11396 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11397 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], 11398 }; 11399 11400 BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) 11401 { 11402 #ifdef CONFIG_INET 11403 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) 11404 return (unsigned long)sk; 11405 #endif 11406 11407 #if IS_BUILTIN(CONFIG_IPV6) 11408 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) 11409 return (unsigned long)sk; 11410 #endif 11411 11412 return (unsigned long)NULL; 11413 } 11414 11415 const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { 11416 .func = bpf_skc_to_tcp_request_sock, 11417 .gpl_only = false, 11418 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11419 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11420 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], 11421 }; 11422 11423 BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) 11424 { 11425 /* udp6_sock type is not generated in dwarf and hence btf, 11426 * trigger an explicit type generation here. 11427 */ 11428 BTF_TYPE_EMIT(struct udp6_sock); 11429 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && 11430 sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) 11431 return (unsigned long)sk; 11432 11433 return (unsigned long)NULL; 11434 } 11435 11436 const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { 11437 .func = bpf_skc_to_udp6_sock, 11438 .gpl_only = false, 11439 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11440 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11441 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], 11442 }; 11443 11444 BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk) 11445 { 11446 /* unix_sock type is not generated in dwarf and hence btf, 11447 * trigger an explicit type generation here. 11448 */ 11449 BTF_TYPE_EMIT(struct unix_sock); 11450 if (sk && sk_fullsock(sk) && sk->sk_family == AF_UNIX) 11451 return (unsigned long)sk; 11452 11453 return (unsigned long)NULL; 11454 } 11455 11456 const struct bpf_func_proto bpf_skc_to_unix_sock_proto = { 11457 .func = bpf_skc_to_unix_sock, 11458 .gpl_only = false, 11459 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11460 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 11461 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX], 11462 }; 11463 11464 BPF_CALL_1(bpf_skc_to_mptcp_sock, struct sock *, sk) 11465 { 11466 BTF_TYPE_EMIT(struct mptcp_sock); 11467 return (unsigned long)bpf_mptcp_sock_from_subflow(sk); 11468 } 11469 11470 const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto = { 11471 .func = bpf_skc_to_mptcp_sock, 11472 .gpl_only = false, 11473 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11474 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 11475 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_MPTCP], 11476 }; 11477 11478 BPF_CALL_1(bpf_sock_from_file, struct file *, file) 11479 { 11480 return (unsigned long)sock_from_file(file); 11481 } 11482 11483 BTF_ID_LIST(bpf_sock_from_file_btf_ids) 11484 BTF_ID(struct, socket) 11485 BTF_ID(struct, file) 11486 11487 const struct bpf_func_proto bpf_sock_from_file_proto = { 11488 .func = bpf_sock_from_file, 11489 .gpl_only = false, 11490 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 11491 .ret_btf_id = &bpf_sock_from_file_btf_ids[0], 11492 .arg1_type = ARG_PTR_TO_BTF_ID, 11493 .arg1_btf_id = &bpf_sock_from_file_btf_ids[1], 11494 }; 11495 11496 static const struct bpf_func_proto * 11497 bpf_sk_base_func_proto(enum bpf_func_id func_id) 11498 { 11499 const struct bpf_func_proto *func; 11500 11501 switch (func_id) { 11502 case BPF_FUNC_skc_to_tcp6_sock: 11503 func = &bpf_skc_to_tcp6_sock_proto; 11504 break; 11505 case BPF_FUNC_skc_to_tcp_sock: 11506 func = &bpf_skc_to_tcp_sock_proto; 11507 break; 11508 case BPF_FUNC_skc_to_tcp_timewait_sock: 11509 func = &bpf_skc_to_tcp_timewait_sock_proto; 11510 break; 11511 case BPF_FUNC_skc_to_tcp_request_sock: 11512 func = &bpf_skc_to_tcp_request_sock_proto; 11513 break; 11514 case BPF_FUNC_skc_to_udp6_sock: 11515 func = &bpf_skc_to_udp6_sock_proto; 11516 break; 11517 case BPF_FUNC_skc_to_unix_sock: 11518 func = &bpf_skc_to_unix_sock_proto; 11519 break; 11520 case BPF_FUNC_skc_to_mptcp_sock: 11521 func = &bpf_skc_to_mptcp_sock_proto; 11522 break; 11523 case BPF_FUNC_ktime_get_coarse_ns: 11524 return &bpf_ktime_get_coarse_ns_proto; 11525 default: 11526 return bpf_base_func_proto(func_id); 11527 } 11528 11529 if (!perfmon_capable()) 11530 return NULL; 11531 11532 return func; 11533 } 11534