1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/filter.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/net.h> 10 #include <linux/workqueue.h> 11 #include <linux/skmsg.h> 12 #include <linux/list.h> 13 #include <linux/jhash.h> 14 #include <linux/sock_diag.h> 15 #include <net/udp.h> 16 17 struct bpf_stab { 18 struct bpf_map map; 19 struct sock **sks; 20 struct sk_psock_progs progs; 21 raw_spinlock_t lock; 22 }; 23 24 #define SOCK_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 26 27 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 28 struct bpf_prog *old, u32 which); 29 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); 30 31 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 32 { 33 struct bpf_stab *stab; 34 35 if (!capable(CAP_NET_ADMIN)) 36 return ERR_PTR(-EPERM); 37 if (attr->max_entries == 0 || 38 attr->key_size != 4 || 39 (attr->value_size != sizeof(u32) && 40 attr->value_size != sizeof(u64)) || 41 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 42 return ERR_PTR(-EINVAL); 43 44 stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT); 45 if (!stab) 46 return ERR_PTR(-ENOMEM); 47 48 bpf_map_init_from_attr(&stab->map, attr); 49 raw_spin_lock_init(&stab->lock); 50 51 stab->sks = bpf_map_area_alloc(stab->map.max_entries * 52 sizeof(struct sock *), 53 stab->map.numa_node); 54 if (!stab->sks) { 55 kfree(stab); 56 return ERR_PTR(-ENOMEM); 57 } 58 59 return &stab->map; 60 } 61 62 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 63 { 64 u32 ufd = attr->target_fd; 65 struct bpf_map *map; 66 struct fd f; 67 int ret; 68 69 if (attr->attach_flags || attr->replace_bpf_fd) 70 return -EINVAL; 71 72 f = fdget(ufd); 73 map = __bpf_map_get(f); 74 if (IS_ERR(map)) 75 return PTR_ERR(map); 76 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); 77 fdput(f); 78 return ret; 79 } 80 81 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 82 { 83 u32 ufd = attr->target_fd; 84 struct bpf_prog *prog; 85 struct bpf_map *map; 86 struct fd f; 87 int ret; 88 89 if (attr->attach_flags || attr->replace_bpf_fd) 90 return -EINVAL; 91 92 f = fdget(ufd); 93 map = __bpf_map_get(f); 94 if (IS_ERR(map)) 95 return PTR_ERR(map); 96 97 prog = bpf_prog_get(attr->attach_bpf_fd); 98 if (IS_ERR(prog)) { 99 ret = PTR_ERR(prog); 100 goto put_map; 101 } 102 103 if (prog->type != ptype) { 104 ret = -EINVAL; 105 goto put_prog; 106 } 107 108 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); 109 put_prog: 110 bpf_prog_put(prog); 111 put_map: 112 fdput(f); 113 return ret; 114 } 115 116 static void sock_map_sk_acquire(struct sock *sk) 117 __acquires(&sk->sk_lock.slock) 118 { 119 lock_sock(sk); 120 preempt_disable(); 121 rcu_read_lock(); 122 } 123 124 static void sock_map_sk_release(struct sock *sk) 125 __releases(&sk->sk_lock.slock) 126 { 127 rcu_read_unlock(); 128 preempt_enable(); 129 release_sock(sk); 130 } 131 132 static void sock_map_add_link(struct sk_psock *psock, 133 struct sk_psock_link *link, 134 struct bpf_map *map, void *link_raw) 135 { 136 link->link_raw = link_raw; 137 link->map = map; 138 spin_lock_bh(&psock->link_lock); 139 list_add_tail(&link->list, &psock->link); 140 spin_unlock_bh(&psock->link_lock); 141 } 142 143 static void sock_map_del_link(struct sock *sk, 144 struct sk_psock *psock, void *link_raw) 145 { 146 bool strp_stop = false, verdict_stop = false; 147 struct sk_psock_link *link, *tmp; 148 149 spin_lock_bh(&psock->link_lock); 150 list_for_each_entry_safe(link, tmp, &psock->link, list) { 151 if (link->link_raw == link_raw) { 152 struct bpf_map *map = link->map; 153 struct bpf_stab *stab = container_of(map, struct bpf_stab, 154 map); 155 if (psock->saved_data_ready && stab->progs.stream_parser) 156 strp_stop = true; 157 if (psock->saved_data_ready && stab->progs.stream_verdict) 158 verdict_stop = true; 159 list_del(&link->list); 160 sk_psock_free_link(link); 161 } 162 } 163 spin_unlock_bh(&psock->link_lock); 164 if (strp_stop || verdict_stop) { 165 write_lock_bh(&sk->sk_callback_lock); 166 if (strp_stop) 167 sk_psock_stop_strp(sk, psock); 168 else 169 sk_psock_stop_verdict(sk, psock); 170 write_unlock_bh(&sk->sk_callback_lock); 171 } 172 } 173 174 static void sock_map_unref(struct sock *sk, void *link_raw) 175 { 176 struct sk_psock *psock = sk_psock(sk); 177 178 if (likely(psock)) { 179 sock_map_del_link(sk, psock, link_raw); 180 sk_psock_put(sk, psock); 181 } 182 } 183 184 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 185 { 186 struct proto *prot; 187 188 switch (sk->sk_type) { 189 case SOCK_STREAM: 190 prot = tcp_bpf_get_proto(sk, psock); 191 break; 192 193 case SOCK_DGRAM: 194 prot = udp_bpf_get_proto(sk, psock); 195 break; 196 197 default: 198 return -EINVAL; 199 } 200 201 if (IS_ERR(prot)) 202 return PTR_ERR(prot); 203 204 sk_psock_update_proto(sk, psock, prot); 205 return 0; 206 } 207 208 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 209 { 210 struct sk_psock *psock; 211 212 rcu_read_lock(); 213 psock = sk_psock(sk); 214 if (psock) { 215 if (sk->sk_prot->close != sock_map_close) { 216 psock = ERR_PTR(-EBUSY); 217 goto out; 218 } 219 220 if (!refcount_inc_not_zero(&psock->refcnt)) 221 psock = ERR_PTR(-EBUSY); 222 } 223 out: 224 rcu_read_unlock(); 225 return psock; 226 } 227 228 static int sock_map_link(struct bpf_map *map, struct sock *sk) 229 { 230 struct bpf_prog *msg_parser, *stream_parser, *stream_verdict; 231 struct sk_psock_progs *progs = sock_map_progs(map); 232 struct sk_psock *psock; 233 int ret; 234 235 stream_verdict = READ_ONCE(progs->stream_verdict); 236 if (stream_verdict) { 237 stream_verdict = bpf_prog_inc_not_zero(stream_verdict); 238 if (IS_ERR(stream_verdict)) 239 return PTR_ERR(stream_verdict); 240 } 241 242 stream_parser = READ_ONCE(progs->stream_parser); 243 if (stream_parser) { 244 stream_parser = bpf_prog_inc_not_zero(stream_parser); 245 if (IS_ERR(stream_parser)) { 246 ret = PTR_ERR(stream_parser); 247 goto out_put_stream_verdict; 248 } 249 } 250 251 msg_parser = READ_ONCE(progs->msg_parser); 252 if (msg_parser) { 253 msg_parser = bpf_prog_inc_not_zero(msg_parser); 254 if (IS_ERR(msg_parser)) { 255 ret = PTR_ERR(msg_parser); 256 goto out_put_stream_parser; 257 } 258 } 259 260 psock = sock_map_psock_get_checked(sk); 261 if (IS_ERR(psock)) { 262 ret = PTR_ERR(psock); 263 goto out_progs; 264 } 265 266 if (psock) { 267 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 268 (stream_parser && READ_ONCE(psock->progs.stream_parser)) || 269 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { 270 sk_psock_put(sk, psock); 271 ret = -EBUSY; 272 goto out_progs; 273 } 274 } else { 275 psock = sk_psock_init(sk, map->numa_node); 276 if (IS_ERR(psock)) { 277 ret = PTR_ERR(psock); 278 goto out_progs; 279 } 280 } 281 282 if (msg_parser) 283 psock_set_prog(&psock->progs.msg_parser, msg_parser); 284 285 ret = sock_map_init_proto(sk, psock); 286 if (ret < 0) 287 goto out_drop; 288 289 write_lock_bh(&sk->sk_callback_lock); 290 if (stream_parser && stream_verdict && !psock->saved_data_ready) { 291 ret = sk_psock_init_strp(sk, psock); 292 if (ret) 293 goto out_unlock_drop; 294 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 295 psock_set_prog(&psock->progs.stream_parser, stream_parser); 296 sk_psock_start_strp(sk, psock); 297 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { 298 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 299 sk_psock_start_verdict(sk,psock); 300 } 301 write_unlock_bh(&sk->sk_callback_lock); 302 return 0; 303 out_unlock_drop: 304 write_unlock_bh(&sk->sk_callback_lock); 305 out_drop: 306 sk_psock_put(sk, psock); 307 out_progs: 308 if (msg_parser) 309 bpf_prog_put(msg_parser); 310 out_put_stream_parser: 311 if (stream_parser) 312 bpf_prog_put(stream_parser); 313 out_put_stream_verdict: 314 if (stream_verdict) 315 bpf_prog_put(stream_verdict); 316 return ret; 317 } 318 319 static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk) 320 { 321 struct sk_psock *psock; 322 int ret; 323 324 psock = sock_map_psock_get_checked(sk); 325 if (IS_ERR(psock)) 326 return PTR_ERR(psock); 327 328 if (!psock) { 329 psock = sk_psock_init(sk, map->numa_node); 330 if (IS_ERR(psock)) 331 return PTR_ERR(psock); 332 } 333 334 ret = sock_map_init_proto(sk, psock); 335 if (ret < 0) 336 sk_psock_put(sk, psock); 337 return ret; 338 } 339 340 static void sock_map_free(struct bpf_map *map) 341 { 342 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 343 int i; 344 345 /* After the sync no updates or deletes will be in-flight so it 346 * is safe to walk map and remove entries without risking a race 347 * in EEXIST update case. 348 */ 349 synchronize_rcu(); 350 for (i = 0; i < stab->map.max_entries; i++) { 351 struct sock **psk = &stab->sks[i]; 352 struct sock *sk; 353 354 sk = xchg(psk, NULL); 355 if (sk) { 356 lock_sock(sk); 357 rcu_read_lock(); 358 sock_map_unref(sk, psk); 359 rcu_read_unlock(); 360 release_sock(sk); 361 } 362 } 363 364 /* wait for psock readers accessing its map link */ 365 synchronize_rcu(); 366 367 bpf_map_area_free(stab->sks); 368 kfree(stab); 369 } 370 371 static void sock_map_release_progs(struct bpf_map *map) 372 { 373 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 374 } 375 376 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 377 { 378 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 379 380 WARN_ON_ONCE(!rcu_read_lock_held()); 381 382 if (unlikely(key >= map->max_entries)) 383 return NULL; 384 return READ_ONCE(stab->sks[key]); 385 } 386 387 static void *sock_map_lookup(struct bpf_map *map, void *key) 388 { 389 struct sock *sk; 390 391 sk = __sock_map_lookup_elem(map, *(u32 *)key); 392 if (!sk) 393 return NULL; 394 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 395 return NULL; 396 return sk; 397 } 398 399 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 400 { 401 struct sock *sk; 402 403 if (map->value_size != sizeof(u64)) 404 return ERR_PTR(-ENOSPC); 405 406 sk = __sock_map_lookup_elem(map, *(u32 *)key); 407 if (!sk) 408 return ERR_PTR(-ENOENT); 409 410 __sock_gen_cookie(sk); 411 return &sk->sk_cookie; 412 } 413 414 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 415 struct sock **psk) 416 { 417 struct sock *sk; 418 int err = 0; 419 420 raw_spin_lock_bh(&stab->lock); 421 sk = *psk; 422 if (!sk_test || sk_test == sk) 423 sk = xchg(psk, NULL); 424 425 if (likely(sk)) 426 sock_map_unref(sk, psk); 427 else 428 err = -EINVAL; 429 430 raw_spin_unlock_bh(&stab->lock); 431 return err; 432 } 433 434 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 435 void *link_raw) 436 { 437 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 438 439 __sock_map_delete(stab, sk, link_raw); 440 } 441 442 static int sock_map_delete_elem(struct bpf_map *map, void *key) 443 { 444 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 445 u32 i = *(u32 *)key; 446 struct sock **psk; 447 448 if (unlikely(i >= map->max_entries)) 449 return -EINVAL; 450 451 psk = &stab->sks[i]; 452 return __sock_map_delete(stab, NULL, psk); 453 } 454 455 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 456 { 457 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 458 u32 i = key ? *(u32 *)key : U32_MAX; 459 u32 *key_next = next; 460 461 if (i == stab->map.max_entries - 1) 462 return -ENOENT; 463 if (i >= stab->map.max_entries) 464 *key_next = 0; 465 else 466 *key_next = i + 1; 467 return 0; 468 } 469 470 static bool sock_map_redirect_allowed(const struct sock *sk); 471 472 static int sock_map_update_common(struct bpf_map *map, u32 idx, 473 struct sock *sk, u64 flags) 474 { 475 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 476 struct sk_psock_link *link; 477 struct sk_psock *psock; 478 struct sock *osk; 479 int ret; 480 481 WARN_ON_ONCE(!rcu_read_lock_held()); 482 if (unlikely(flags > BPF_EXIST)) 483 return -EINVAL; 484 if (unlikely(idx >= map->max_entries)) 485 return -E2BIG; 486 487 link = sk_psock_init_link(); 488 if (!link) 489 return -ENOMEM; 490 491 /* Only sockets we can redirect into/from in BPF need to hold 492 * refs to parser/verdict progs and have their sk_data_ready 493 * and sk_write_space callbacks overridden. 494 */ 495 if (sock_map_redirect_allowed(sk)) 496 ret = sock_map_link(map, sk); 497 else 498 ret = sock_map_link_no_progs(map, sk); 499 if (ret < 0) 500 goto out_free; 501 502 psock = sk_psock(sk); 503 WARN_ON_ONCE(!psock); 504 505 raw_spin_lock_bh(&stab->lock); 506 osk = stab->sks[idx]; 507 if (osk && flags == BPF_NOEXIST) { 508 ret = -EEXIST; 509 goto out_unlock; 510 } else if (!osk && flags == BPF_EXIST) { 511 ret = -ENOENT; 512 goto out_unlock; 513 } 514 515 sock_map_add_link(psock, link, map, &stab->sks[idx]); 516 stab->sks[idx] = sk; 517 if (osk) 518 sock_map_unref(osk, &stab->sks[idx]); 519 raw_spin_unlock_bh(&stab->lock); 520 return 0; 521 out_unlock: 522 raw_spin_unlock_bh(&stab->lock); 523 if (psock) 524 sk_psock_put(sk, psock); 525 out_free: 526 sk_psock_free_link(link); 527 return ret; 528 } 529 530 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 531 { 532 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 533 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 534 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 535 } 536 537 static bool sk_is_tcp(const struct sock *sk) 538 { 539 return sk->sk_type == SOCK_STREAM && 540 sk->sk_protocol == IPPROTO_TCP; 541 } 542 543 static bool sk_is_udp(const struct sock *sk) 544 { 545 return sk->sk_type == SOCK_DGRAM && 546 sk->sk_protocol == IPPROTO_UDP; 547 } 548 549 static bool sock_map_redirect_allowed(const struct sock *sk) 550 { 551 return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN; 552 } 553 554 static bool sock_map_sk_is_suitable(const struct sock *sk) 555 { 556 return sk_is_tcp(sk) || sk_is_udp(sk); 557 } 558 559 static bool sock_map_sk_state_allowed(const struct sock *sk) 560 { 561 if (sk_is_tcp(sk)) 562 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 563 else if (sk_is_udp(sk)) 564 return sk_hashed(sk); 565 566 return false; 567 } 568 569 static int sock_hash_update_common(struct bpf_map *map, void *key, 570 struct sock *sk, u64 flags); 571 572 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 573 u64 flags) 574 { 575 struct socket *sock; 576 struct sock *sk; 577 int ret; 578 u64 ufd; 579 580 if (map->value_size == sizeof(u64)) 581 ufd = *(u64 *)value; 582 else 583 ufd = *(u32 *)value; 584 if (ufd > S32_MAX) 585 return -EINVAL; 586 587 sock = sockfd_lookup(ufd, &ret); 588 if (!sock) 589 return ret; 590 sk = sock->sk; 591 if (!sk) { 592 ret = -EINVAL; 593 goto out; 594 } 595 if (!sock_map_sk_is_suitable(sk)) { 596 ret = -EOPNOTSUPP; 597 goto out; 598 } 599 600 sock_map_sk_acquire(sk); 601 if (!sock_map_sk_state_allowed(sk)) 602 ret = -EOPNOTSUPP; 603 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 604 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 605 else 606 ret = sock_hash_update_common(map, key, sk, flags); 607 sock_map_sk_release(sk); 608 out: 609 sockfd_put(sock); 610 return ret; 611 } 612 613 static int sock_map_update_elem(struct bpf_map *map, void *key, 614 void *value, u64 flags) 615 { 616 struct sock *sk = (struct sock *)value; 617 int ret; 618 619 if (unlikely(!sk || !sk_fullsock(sk))) 620 return -EINVAL; 621 622 if (!sock_map_sk_is_suitable(sk)) 623 return -EOPNOTSUPP; 624 625 local_bh_disable(); 626 bh_lock_sock(sk); 627 if (!sock_map_sk_state_allowed(sk)) 628 ret = -EOPNOTSUPP; 629 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 630 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 631 else 632 ret = sock_hash_update_common(map, key, sk, flags); 633 bh_unlock_sock(sk); 634 local_bh_enable(); 635 return ret; 636 } 637 638 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 639 struct bpf_map *, map, void *, key, u64, flags) 640 { 641 WARN_ON_ONCE(!rcu_read_lock_held()); 642 643 if (likely(sock_map_sk_is_suitable(sops->sk) && 644 sock_map_op_okay(sops))) 645 return sock_map_update_common(map, *(u32 *)key, sops->sk, 646 flags); 647 return -EOPNOTSUPP; 648 } 649 650 const struct bpf_func_proto bpf_sock_map_update_proto = { 651 .func = bpf_sock_map_update, 652 .gpl_only = false, 653 .pkt_access = true, 654 .ret_type = RET_INTEGER, 655 .arg1_type = ARG_PTR_TO_CTX, 656 .arg2_type = ARG_CONST_MAP_PTR, 657 .arg3_type = ARG_PTR_TO_MAP_KEY, 658 .arg4_type = ARG_ANYTHING, 659 }; 660 661 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 662 struct bpf_map *, map, u32, key, u64, flags) 663 { 664 struct sock *sk; 665 666 if (unlikely(flags & ~(BPF_F_INGRESS))) 667 return SK_DROP; 668 669 sk = __sock_map_lookup_elem(map, key); 670 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 671 return SK_DROP; 672 673 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 674 return SK_PASS; 675 } 676 677 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 678 .func = bpf_sk_redirect_map, 679 .gpl_only = false, 680 .ret_type = RET_INTEGER, 681 .arg1_type = ARG_PTR_TO_CTX, 682 .arg2_type = ARG_CONST_MAP_PTR, 683 .arg3_type = ARG_ANYTHING, 684 .arg4_type = ARG_ANYTHING, 685 }; 686 687 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 688 struct bpf_map *, map, u32, key, u64, flags) 689 { 690 struct sock *sk; 691 692 if (unlikely(flags & ~(BPF_F_INGRESS))) 693 return SK_DROP; 694 695 sk = __sock_map_lookup_elem(map, key); 696 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 697 return SK_DROP; 698 699 msg->flags = flags; 700 msg->sk_redir = sk; 701 return SK_PASS; 702 } 703 704 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 705 .func = bpf_msg_redirect_map, 706 .gpl_only = false, 707 .ret_type = RET_INTEGER, 708 .arg1_type = ARG_PTR_TO_CTX, 709 .arg2_type = ARG_CONST_MAP_PTR, 710 .arg3_type = ARG_ANYTHING, 711 .arg4_type = ARG_ANYTHING, 712 }; 713 714 struct sock_map_seq_info { 715 struct bpf_map *map; 716 struct sock *sk; 717 u32 index; 718 }; 719 720 struct bpf_iter__sockmap { 721 __bpf_md_ptr(struct bpf_iter_meta *, meta); 722 __bpf_md_ptr(struct bpf_map *, map); 723 __bpf_md_ptr(void *, key); 724 __bpf_md_ptr(struct sock *, sk); 725 }; 726 727 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, 728 struct bpf_map *map, void *key, 729 struct sock *sk) 730 731 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) 732 { 733 if (unlikely(info->index >= info->map->max_entries)) 734 return NULL; 735 736 info->sk = __sock_map_lookup_elem(info->map, info->index); 737 738 /* can't return sk directly, since that might be NULL */ 739 return info; 740 } 741 742 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) 743 __acquires(rcu) 744 { 745 struct sock_map_seq_info *info = seq->private; 746 747 if (*pos == 0) 748 ++*pos; 749 750 /* pairs with sock_map_seq_stop */ 751 rcu_read_lock(); 752 return sock_map_seq_lookup_elem(info); 753 } 754 755 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 756 __must_hold(rcu) 757 { 758 struct sock_map_seq_info *info = seq->private; 759 760 ++*pos; 761 ++info->index; 762 763 return sock_map_seq_lookup_elem(info); 764 } 765 766 static int sock_map_seq_show(struct seq_file *seq, void *v) 767 __must_hold(rcu) 768 { 769 struct sock_map_seq_info *info = seq->private; 770 struct bpf_iter__sockmap ctx = {}; 771 struct bpf_iter_meta meta; 772 struct bpf_prog *prog; 773 774 meta.seq = seq; 775 prog = bpf_iter_get_info(&meta, !v); 776 if (!prog) 777 return 0; 778 779 ctx.meta = &meta; 780 ctx.map = info->map; 781 if (v) { 782 ctx.key = &info->index; 783 ctx.sk = info->sk; 784 } 785 786 return bpf_iter_run_prog(prog, &ctx); 787 } 788 789 static void sock_map_seq_stop(struct seq_file *seq, void *v) 790 __releases(rcu) 791 { 792 if (!v) 793 (void)sock_map_seq_show(seq, NULL); 794 795 /* pairs with sock_map_seq_start */ 796 rcu_read_unlock(); 797 } 798 799 static const struct seq_operations sock_map_seq_ops = { 800 .start = sock_map_seq_start, 801 .next = sock_map_seq_next, 802 .stop = sock_map_seq_stop, 803 .show = sock_map_seq_show, 804 }; 805 806 static int sock_map_init_seq_private(void *priv_data, 807 struct bpf_iter_aux_info *aux) 808 { 809 struct sock_map_seq_info *info = priv_data; 810 811 info->map = aux->map; 812 return 0; 813 } 814 815 static const struct bpf_iter_seq_info sock_map_iter_seq_info = { 816 .seq_ops = &sock_map_seq_ops, 817 .init_seq_private = sock_map_init_seq_private, 818 .seq_priv_size = sizeof(struct sock_map_seq_info), 819 }; 820 821 static int sock_map_btf_id; 822 const struct bpf_map_ops sock_map_ops = { 823 .map_meta_equal = bpf_map_meta_equal, 824 .map_alloc = sock_map_alloc, 825 .map_free = sock_map_free, 826 .map_get_next_key = sock_map_get_next_key, 827 .map_lookup_elem_sys_only = sock_map_lookup_sys, 828 .map_update_elem = sock_map_update_elem, 829 .map_delete_elem = sock_map_delete_elem, 830 .map_lookup_elem = sock_map_lookup, 831 .map_release_uref = sock_map_release_progs, 832 .map_check_btf = map_check_no_btf, 833 .map_btf_name = "bpf_stab", 834 .map_btf_id = &sock_map_btf_id, 835 .iter_seq_info = &sock_map_iter_seq_info, 836 }; 837 838 struct bpf_shtab_elem { 839 struct rcu_head rcu; 840 u32 hash; 841 struct sock *sk; 842 struct hlist_node node; 843 u8 key[]; 844 }; 845 846 struct bpf_shtab_bucket { 847 struct hlist_head head; 848 raw_spinlock_t lock; 849 }; 850 851 struct bpf_shtab { 852 struct bpf_map map; 853 struct bpf_shtab_bucket *buckets; 854 u32 buckets_num; 855 u32 elem_size; 856 struct sk_psock_progs progs; 857 atomic_t count; 858 }; 859 860 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 861 { 862 return jhash(key, len, 0); 863 } 864 865 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, 866 u32 hash) 867 { 868 return &htab->buckets[hash & (htab->buckets_num - 1)]; 869 } 870 871 static struct bpf_shtab_elem * 872 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 873 u32 key_size) 874 { 875 struct bpf_shtab_elem *elem; 876 877 hlist_for_each_entry_rcu(elem, head, node) { 878 if (elem->hash == hash && 879 !memcmp(&elem->key, key, key_size)) 880 return elem; 881 } 882 883 return NULL; 884 } 885 886 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 887 { 888 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 889 u32 key_size = map->key_size, hash; 890 struct bpf_shtab_bucket *bucket; 891 struct bpf_shtab_elem *elem; 892 893 WARN_ON_ONCE(!rcu_read_lock_held()); 894 895 hash = sock_hash_bucket_hash(key, key_size); 896 bucket = sock_hash_select_bucket(htab, hash); 897 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 898 899 return elem ? elem->sk : NULL; 900 } 901 902 static void sock_hash_free_elem(struct bpf_shtab *htab, 903 struct bpf_shtab_elem *elem) 904 { 905 atomic_dec(&htab->count); 906 kfree_rcu(elem, rcu); 907 } 908 909 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 910 void *link_raw) 911 { 912 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 913 struct bpf_shtab_elem *elem_probe, *elem = link_raw; 914 struct bpf_shtab_bucket *bucket; 915 916 WARN_ON_ONCE(!rcu_read_lock_held()); 917 bucket = sock_hash_select_bucket(htab, elem->hash); 918 919 /* elem may be deleted in parallel from the map, but access here 920 * is okay since it's going away only after RCU grace period. 921 * However, we need to check whether it's still present. 922 */ 923 raw_spin_lock_bh(&bucket->lock); 924 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 925 elem->key, map->key_size); 926 if (elem_probe && elem_probe == elem) { 927 hlist_del_rcu(&elem->node); 928 sock_map_unref(elem->sk, elem); 929 sock_hash_free_elem(htab, elem); 930 } 931 raw_spin_unlock_bh(&bucket->lock); 932 } 933 934 static int sock_hash_delete_elem(struct bpf_map *map, void *key) 935 { 936 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 937 u32 hash, key_size = map->key_size; 938 struct bpf_shtab_bucket *bucket; 939 struct bpf_shtab_elem *elem; 940 int ret = -ENOENT; 941 942 hash = sock_hash_bucket_hash(key, key_size); 943 bucket = sock_hash_select_bucket(htab, hash); 944 945 raw_spin_lock_bh(&bucket->lock); 946 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 947 if (elem) { 948 hlist_del_rcu(&elem->node); 949 sock_map_unref(elem->sk, elem); 950 sock_hash_free_elem(htab, elem); 951 ret = 0; 952 } 953 raw_spin_unlock_bh(&bucket->lock); 954 return ret; 955 } 956 957 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, 958 void *key, u32 key_size, 959 u32 hash, struct sock *sk, 960 struct bpf_shtab_elem *old) 961 { 962 struct bpf_shtab_elem *new; 963 964 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 965 if (!old) { 966 atomic_dec(&htab->count); 967 return ERR_PTR(-E2BIG); 968 } 969 } 970 971 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 972 GFP_ATOMIC | __GFP_NOWARN, 973 htab->map.numa_node); 974 if (!new) { 975 atomic_dec(&htab->count); 976 return ERR_PTR(-ENOMEM); 977 } 978 memcpy(new->key, key, key_size); 979 new->sk = sk; 980 new->hash = hash; 981 return new; 982 } 983 984 static int sock_hash_update_common(struct bpf_map *map, void *key, 985 struct sock *sk, u64 flags) 986 { 987 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 988 u32 key_size = map->key_size, hash; 989 struct bpf_shtab_elem *elem, *elem_new; 990 struct bpf_shtab_bucket *bucket; 991 struct sk_psock_link *link; 992 struct sk_psock *psock; 993 int ret; 994 995 WARN_ON_ONCE(!rcu_read_lock_held()); 996 if (unlikely(flags > BPF_EXIST)) 997 return -EINVAL; 998 999 link = sk_psock_init_link(); 1000 if (!link) 1001 return -ENOMEM; 1002 1003 /* Only sockets we can redirect into/from in BPF need to hold 1004 * refs to parser/verdict progs and have their sk_data_ready 1005 * and sk_write_space callbacks overridden. 1006 */ 1007 if (sock_map_redirect_allowed(sk)) 1008 ret = sock_map_link(map, sk); 1009 else 1010 ret = sock_map_link_no_progs(map, sk); 1011 if (ret < 0) 1012 goto out_free; 1013 1014 psock = sk_psock(sk); 1015 WARN_ON_ONCE(!psock); 1016 1017 hash = sock_hash_bucket_hash(key, key_size); 1018 bucket = sock_hash_select_bucket(htab, hash); 1019 1020 raw_spin_lock_bh(&bucket->lock); 1021 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 1022 if (elem && flags == BPF_NOEXIST) { 1023 ret = -EEXIST; 1024 goto out_unlock; 1025 } else if (!elem && flags == BPF_EXIST) { 1026 ret = -ENOENT; 1027 goto out_unlock; 1028 } 1029 1030 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 1031 if (IS_ERR(elem_new)) { 1032 ret = PTR_ERR(elem_new); 1033 goto out_unlock; 1034 } 1035 1036 sock_map_add_link(psock, link, map, elem_new); 1037 /* Add new element to the head of the list, so that 1038 * concurrent search will find it before old elem. 1039 */ 1040 hlist_add_head_rcu(&elem_new->node, &bucket->head); 1041 if (elem) { 1042 hlist_del_rcu(&elem->node); 1043 sock_map_unref(elem->sk, elem); 1044 sock_hash_free_elem(htab, elem); 1045 } 1046 raw_spin_unlock_bh(&bucket->lock); 1047 return 0; 1048 out_unlock: 1049 raw_spin_unlock_bh(&bucket->lock); 1050 sk_psock_put(sk, psock); 1051 out_free: 1052 sk_psock_free_link(link); 1053 return ret; 1054 } 1055 1056 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 1057 void *key_next) 1058 { 1059 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1060 struct bpf_shtab_elem *elem, *elem_next; 1061 u32 hash, key_size = map->key_size; 1062 struct hlist_head *head; 1063 int i = 0; 1064 1065 if (!key) 1066 goto find_first_elem; 1067 hash = sock_hash_bucket_hash(key, key_size); 1068 head = &sock_hash_select_bucket(htab, hash)->head; 1069 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 1070 if (!elem) 1071 goto find_first_elem; 1072 1073 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), 1074 struct bpf_shtab_elem, node); 1075 if (elem_next) { 1076 memcpy(key_next, elem_next->key, key_size); 1077 return 0; 1078 } 1079 1080 i = hash & (htab->buckets_num - 1); 1081 i++; 1082 find_first_elem: 1083 for (; i < htab->buckets_num; i++) { 1084 head = &sock_hash_select_bucket(htab, i)->head; 1085 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), 1086 struct bpf_shtab_elem, node); 1087 if (elem_next) { 1088 memcpy(key_next, elem_next->key, key_size); 1089 return 0; 1090 } 1091 } 1092 1093 return -ENOENT; 1094 } 1095 1096 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 1097 { 1098 struct bpf_shtab *htab; 1099 int i, err; 1100 1101 if (!capable(CAP_NET_ADMIN)) 1102 return ERR_PTR(-EPERM); 1103 if (attr->max_entries == 0 || 1104 attr->key_size == 0 || 1105 (attr->value_size != sizeof(u32) && 1106 attr->value_size != sizeof(u64)) || 1107 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1108 return ERR_PTR(-EINVAL); 1109 if (attr->key_size > MAX_BPF_STACK) 1110 return ERR_PTR(-E2BIG); 1111 1112 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); 1113 if (!htab) 1114 return ERR_PTR(-ENOMEM); 1115 1116 bpf_map_init_from_attr(&htab->map, attr); 1117 1118 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1119 htab->elem_size = sizeof(struct bpf_shtab_elem) + 1120 round_up(htab->map.key_size, 8); 1121 if (htab->buckets_num == 0 || 1122 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { 1123 err = -EINVAL; 1124 goto free_htab; 1125 } 1126 1127 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1128 sizeof(struct bpf_shtab_bucket), 1129 htab->map.numa_node); 1130 if (!htab->buckets) { 1131 err = -ENOMEM; 1132 goto free_htab; 1133 } 1134 1135 for (i = 0; i < htab->buckets_num; i++) { 1136 INIT_HLIST_HEAD(&htab->buckets[i].head); 1137 raw_spin_lock_init(&htab->buckets[i].lock); 1138 } 1139 1140 return &htab->map; 1141 free_htab: 1142 kfree(htab); 1143 return ERR_PTR(err); 1144 } 1145 1146 static void sock_hash_free(struct bpf_map *map) 1147 { 1148 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1149 struct bpf_shtab_bucket *bucket; 1150 struct hlist_head unlink_list; 1151 struct bpf_shtab_elem *elem; 1152 struct hlist_node *node; 1153 int i; 1154 1155 /* After the sync no updates or deletes will be in-flight so it 1156 * is safe to walk map and remove entries without risking a race 1157 * in EEXIST update case. 1158 */ 1159 synchronize_rcu(); 1160 for (i = 0; i < htab->buckets_num; i++) { 1161 bucket = sock_hash_select_bucket(htab, i); 1162 1163 /* We are racing with sock_hash_delete_from_link to 1164 * enter the spin-lock critical section. Every socket on 1165 * the list is still linked to sockhash. Since link 1166 * exists, psock exists and holds a ref to socket. That 1167 * lets us to grab a socket ref too. 1168 */ 1169 raw_spin_lock_bh(&bucket->lock); 1170 hlist_for_each_entry(elem, &bucket->head, node) 1171 sock_hold(elem->sk); 1172 hlist_move_list(&bucket->head, &unlink_list); 1173 raw_spin_unlock_bh(&bucket->lock); 1174 1175 /* Process removed entries out of atomic context to 1176 * block for socket lock before deleting the psock's 1177 * link to sockhash. 1178 */ 1179 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1180 hlist_del(&elem->node); 1181 lock_sock(elem->sk); 1182 rcu_read_lock(); 1183 sock_map_unref(elem->sk, elem); 1184 rcu_read_unlock(); 1185 release_sock(elem->sk); 1186 sock_put(elem->sk); 1187 sock_hash_free_elem(htab, elem); 1188 } 1189 } 1190 1191 /* wait for psock readers accessing its map link */ 1192 synchronize_rcu(); 1193 1194 bpf_map_area_free(htab->buckets); 1195 kfree(htab); 1196 } 1197 1198 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1199 { 1200 struct sock *sk; 1201 1202 if (map->value_size != sizeof(u64)) 1203 return ERR_PTR(-ENOSPC); 1204 1205 sk = __sock_hash_lookup_elem(map, key); 1206 if (!sk) 1207 return ERR_PTR(-ENOENT); 1208 1209 __sock_gen_cookie(sk); 1210 return &sk->sk_cookie; 1211 } 1212 1213 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1214 { 1215 struct sock *sk; 1216 1217 sk = __sock_hash_lookup_elem(map, key); 1218 if (!sk) 1219 return NULL; 1220 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 1221 return NULL; 1222 return sk; 1223 } 1224 1225 static void sock_hash_release_progs(struct bpf_map *map) 1226 { 1227 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); 1228 } 1229 1230 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1231 struct bpf_map *, map, void *, key, u64, flags) 1232 { 1233 WARN_ON_ONCE(!rcu_read_lock_held()); 1234 1235 if (likely(sock_map_sk_is_suitable(sops->sk) && 1236 sock_map_op_okay(sops))) 1237 return sock_hash_update_common(map, key, sops->sk, flags); 1238 return -EOPNOTSUPP; 1239 } 1240 1241 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1242 .func = bpf_sock_hash_update, 1243 .gpl_only = false, 1244 .pkt_access = true, 1245 .ret_type = RET_INTEGER, 1246 .arg1_type = ARG_PTR_TO_CTX, 1247 .arg2_type = ARG_CONST_MAP_PTR, 1248 .arg3_type = ARG_PTR_TO_MAP_KEY, 1249 .arg4_type = ARG_ANYTHING, 1250 }; 1251 1252 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1253 struct bpf_map *, map, void *, key, u64, flags) 1254 { 1255 struct sock *sk; 1256 1257 if (unlikely(flags & ~(BPF_F_INGRESS))) 1258 return SK_DROP; 1259 1260 sk = __sock_hash_lookup_elem(map, key); 1261 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1262 return SK_DROP; 1263 1264 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1265 return SK_PASS; 1266 } 1267 1268 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1269 .func = bpf_sk_redirect_hash, 1270 .gpl_only = false, 1271 .ret_type = RET_INTEGER, 1272 .arg1_type = ARG_PTR_TO_CTX, 1273 .arg2_type = ARG_CONST_MAP_PTR, 1274 .arg3_type = ARG_PTR_TO_MAP_KEY, 1275 .arg4_type = ARG_ANYTHING, 1276 }; 1277 1278 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1279 struct bpf_map *, map, void *, key, u64, flags) 1280 { 1281 struct sock *sk; 1282 1283 if (unlikely(flags & ~(BPF_F_INGRESS))) 1284 return SK_DROP; 1285 1286 sk = __sock_hash_lookup_elem(map, key); 1287 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1288 return SK_DROP; 1289 1290 msg->flags = flags; 1291 msg->sk_redir = sk; 1292 return SK_PASS; 1293 } 1294 1295 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1296 .func = bpf_msg_redirect_hash, 1297 .gpl_only = false, 1298 .ret_type = RET_INTEGER, 1299 .arg1_type = ARG_PTR_TO_CTX, 1300 .arg2_type = ARG_CONST_MAP_PTR, 1301 .arg3_type = ARG_PTR_TO_MAP_KEY, 1302 .arg4_type = ARG_ANYTHING, 1303 }; 1304 1305 struct sock_hash_seq_info { 1306 struct bpf_map *map; 1307 struct bpf_shtab *htab; 1308 u32 bucket_id; 1309 }; 1310 1311 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, 1312 struct bpf_shtab_elem *prev_elem) 1313 { 1314 const struct bpf_shtab *htab = info->htab; 1315 struct bpf_shtab_bucket *bucket; 1316 struct bpf_shtab_elem *elem; 1317 struct hlist_node *node; 1318 1319 /* try to find next elem in the same bucket */ 1320 if (prev_elem) { 1321 node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); 1322 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1323 if (elem) 1324 return elem; 1325 1326 /* no more elements, continue in the next bucket */ 1327 info->bucket_id++; 1328 } 1329 1330 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { 1331 bucket = &htab->buckets[info->bucket_id]; 1332 node = rcu_dereference(hlist_first_rcu(&bucket->head)); 1333 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1334 if (elem) 1335 return elem; 1336 } 1337 1338 return NULL; 1339 } 1340 1341 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) 1342 __acquires(rcu) 1343 { 1344 struct sock_hash_seq_info *info = seq->private; 1345 1346 if (*pos == 0) 1347 ++*pos; 1348 1349 /* pairs with sock_hash_seq_stop */ 1350 rcu_read_lock(); 1351 return sock_hash_seq_find_next(info, NULL); 1352 } 1353 1354 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1355 __must_hold(rcu) 1356 { 1357 struct sock_hash_seq_info *info = seq->private; 1358 1359 ++*pos; 1360 return sock_hash_seq_find_next(info, v); 1361 } 1362 1363 static int sock_hash_seq_show(struct seq_file *seq, void *v) 1364 __must_hold(rcu) 1365 { 1366 struct sock_hash_seq_info *info = seq->private; 1367 struct bpf_iter__sockmap ctx = {}; 1368 struct bpf_shtab_elem *elem = v; 1369 struct bpf_iter_meta meta; 1370 struct bpf_prog *prog; 1371 1372 meta.seq = seq; 1373 prog = bpf_iter_get_info(&meta, !elem); 1374 if (!prog) 1375 return 0; 1376 1377 ctx.meta = &meta; 1378 ctx.map = info->map; 1379 if (elem) { 1380 ctx.key = elem->key; 1381 ctx.sk = elem->sk; 1382 } 1383 1384 return bpf_iter_run_prog(prog, &ctx); 1385 } 1386 1387 static void sock_hash_seq_stop(struct seq_file *seq, void *v) 1388 __releases(rcu) 1389 { 1390 if (!v) 1391 (void)sock_hash_seq_show(seq, NULL); 1392 1393 /* pairs with sock_hash_seq_start */ 1394 rcu_read_unlock(); 1395 } 1396 1397 static const struct seq_operations sock_hash_seq_ops = { 1398 .start = sock_hash_seq_start, 1399 .next = sock_hash_seq_next, 1400 .stop = sock_hash_seq_stop, 1401 .show = sock_hash_seq_show, 1402 }; 1403 1404 static int sock_hash_init_seq_private(void *priv_data, 1405 struct bpf_iter_aux_info *aux) 1406 { 1407 struct sock_hash_seq_info *info = priv_data; 1408 1409 info->map = aux->map; 1410 info->htab = container_of(aux->map, struct bpf_shtab, map); 1411 return 0; 1412 } 1413 1414 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { 1415 .seq_ops = &sock_hash_seq_ops, 1416 .init_seq_private = sock_hash_init_seq_private, 1417 .seq_priv_size = sizeof(struct sock_hash_seq_info), 1418 }; 1419 1420 static int sock_hash_map_btf_id; 1421 const struct bpf_map_ops sock_hash_ops = { 1422 .map_meta_equal = bpf_map_meta_equal, 1423 .map_alloc = sock_hash_alloc, 1424 .map_free = sock_hash_free, 1425 .map_get_next_key = sock_hash_get_next_key, 1426 .map_update_elem = sock_map_update_elem, 1427 .map_delete_elem = sock_hash_delete_elem, 1428 .map_lookup_elem = sock_hash_lookup, 1429 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1430 .map_release_uref = sock_hash_release_progs, 1431 .map_check_btf = map_check_no_btf, 1432 .map_btf_name = "bpf_shtab", 1433 .map_btf_id = &sock_hash_map_btf_id, 1434 .iter_seq_info = &sock_hash_iter_seq_info, 1435 }; 1436 1437 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1438 { 1439 switch (map->map_type) { 1440 case BPF_MAP_TYPE_SOCKMAP: 1441 return &container_of(map, struct bpf_stab, map)->progs; 1442 case BPF_MAP_TYPE_SOCKHASH: 1443 return &container_of(map, struct bpf_shtab, map)->progs; 1444 default: 1445 break; 1446 } 1447 1448 return NULL; 1449 } 1450 1451 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1452 struct bpf_prog *old, u32 which) 1453 { 1454 struct sk_psock_progs *progs = sock_map_progs(map); 1455 struct bpf_prog **pprog; 1456 1457 if (!progs) 1458 return -EOPNOTSUPP; 1459 1460 switch (which) { 1461 case BPF_SK_MSG_VERDICT: 1462 pprog = &progs->msg_parser; 1463 break; 1464 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1465 case BPF_SK_SKB_STREAM_PARSER: 1466 pprog = &progs->stream_parser; 1467 break; 1468 #endif 1469 case BPF_SK_SKB_STREAM_VERDICT: 1470 pprog = &progs->stream_verdict; 1471 break; 1472 default: 1473 return -EOPNOTSUPP; 1474 } 1475 1476 if (old) 1477 return psock_replace_prog(pprog, prog, old); 1478 1479 psock_set_prog(pprog, prog); 1480 return 0; 1481 } 1482 1483 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1484 { 1485 switch (link->map->map_type) { 1486 case BPF_MAP_TYPE_SOCKMAP: 1487 return sock_map_delete_from_link(link->map, sk, 1488 link->link_raw); 1489 case BPF_MAP_TYPE_SOCKHASH: 1490 return sock_hash_delete_from_link(link->map, sk, 1491 link->link_raw); 1492 default: 1493 break; 1494 } 1495 } 1496 1497 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1498 { 1499 struct sk_psock_link *link; 1500 1501 while ((link = sk_psock_link_pop(psock))) { 1502 sock_map_unlink(sk, link); 1503 sk_psock_free_link(link); 1504 } 1505 } 1506 1507 void sock_map_unhash(struct sock *sk) 1508 { 1509 void (*saved_unhash)(struct sock *sk); 1510 struct sk_psock *psock; 1511 1512 rcu_read_lock(); 1513 psock = sk_psock(sk); 1514 if (unlikely(!psock)) { 1515 rcu_read_unlock(); 1516 if (sk->sk_prot->unhash) 1517 sk->sk_prot->unhash(sk); 1518 return; 1519 } 1520 1521 saved_unhash = psock->saved_unhash; 1522 sock_map_remove_links(sk, psock); 1523 rcu_read_unlock(); 1524 saved_unhash(sk); 1525 } 1526 1527 void sock_map_close(struct sock *sk, long timeout) 1528 { 1529 void (*saved_close)(struct sock *sk, long timeout); 1530 struct sk_psock *psock; 1531 1532 lock_sock(sk); 1533 rcu_read_lock(); 1534 psock = sk_psock(sk); 1535 if (unlikely(!psock)) { 1536 rcu_read_unlock(); 1537 release_sock(sk); 1538 return sk->sk_prot->close(sk, timeout); 1539 } 1540 1541 saved_close = psock->saved_close; 1542 sock_map_remove_links(sk, psock); 1543 rcu_read_unlock(); 1544 sk_psock_stop(psock, true); 1545 release_sock(sk); 1546 saved_close(sk, timeout); 1547 } 1548 1549 static int sock_map_iter_attach_target(struct bpf_prog *prog, 1550 union bpf_iter_link_info *linfo, 1551 struct bpf_iter_aux_info *aux) 1552 { 1553 struct bpf_map *map; 1554 int err = -EINVAL; 1555 1556 if (!linfo->map.map_fd) 1557 return -EBADF; 1558 1559 map = bpf_map_get_with_uref(linfo->map.map_fd); 1560 if (IS_ERR(map)) 1561 return PTR_ERR(map); 1562 1563 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && 1564 map->map_type != BPF_MAP_TYPE_SOCKHASH) 1565 goto put_map; 1566 1567 if (prog->aux->max_rdonly_access > map->key_size) { 1568 err = -EACCES; 1569 goto put_map; 1570 } 1571 1572 aux->map = map; 1573 return 0; 1574 1575 put_map: 1576 bpf_map_put_with_uref(map); 1577 return err; 1578 } 1579 1580 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) 1581 { 1582 bpf_map_put_with_uref(aux->map); 1583 } 1584 1585 static struct bpf_iter_reg sock_map_iter_reg = { 1586 .target = "sockmap", 1587 .attach_target = sock_map_iter_attach_target, 1588 .detach_target = sock_map_iter_detach_target, 1589 .show_fdinfo = bpf_iter_map_show_fdinfo, 1590 .fill_link_info = bpf_iter_map_fill_link_info, 1591 .ctx_arg_info_size = 2, 1592 .ctx_arg_info = { 1593 { offsetof(struct bpf_iter__sockmap, key), 1594 PTR_TO_RDONLY_BUF_OR_NULL }, 1595 { offsetof(struct bpf_iter__sockmap, sk), 1596 PTR_TO_BTF_ID_OR_NULL }, 1597 }, 1598 }; 1599 1600 static int __init bpf_sockmap_iter_init(void) 1601 { 1602 sock_map_iter_reg.ctx_arg_info[1].btf_id = 1603 btf_sock_ids[BTF_SOCK_TYPE_SOCK]; 1604 return bpf_iter_reg_target(&sock_map_iter_reg); 1605 } 1606 late_initcall(bpf_sockmap_iter_init); 1607