1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_H 5 #define _LINUX_BPF_H 1 6 7 #include <uapi/linux/bpf.h> 8 #include <uapi/linux/filter.h> 9 10 #include <linux/workqueue.h> 11 #include <linux/file.h> 12 #include <linux/percpu.h> 13 #include <linux/err.h> 14 #include <linux/rbtree_latch.h> 15 #include <linux/numa.h> 16 #include <linux/mm_types.h> 17 #include <linux/wait.h> 18 #include <linux/refcount.h> 19 #include <linux/mutex.h> 20 #include <linux/module.h> 21 #include <linux/kallsyms.h> 22 #include <linux/capability.h> 23 #include <linux/sched/mm.h> 24 #include <linux/slab.h> 25 #include <linux/percpu-refcount.h> 26 #include <linux/stddef.h> 27 #include <linux/bpfptr.h> 28 #include <linux/btf.h> 29 #include <linux/rcupdate_trace.h> 30 #include <linux/static_call.h> 31 #include <linux/memcontrol.h> 32 33 struct bpf_verifier_env; 34 struct bpf_verifier_log; 35 struct perf_event; 36 struct bpf_prog; 37 struct bpf_prog_aux; 38 struct bpf_map; 39 struct sock; 40 struct seq_file; 41 struct btf; 42 struct btf_type; 43 struct exception_table_entry; 44 struct seq_operations; 45 struct bpf_iter_aux_info; 46 struct bpf_local_storage; 47 struct bpf_local_storage_map; 48 struct kobject; 49 struct mem_cgroup; 50 struct module; 51 struct bpf_func_state; 52 struct ftrace_ops; 53 struct cgroup; 54 55 extern struct idr btf_idr; 56 extern spinlock_t btf_idr_lock; 57 extern struct kobject *btf_kobj; 58 extern struct bpf_mem_alloc bpf_global_ma; 59 extern bool bpf_global_ma_set; 60 61 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); 62 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, 63 struct bpf_iter_aux_info *aux); 64 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 65 typedef unsigned int (*bpf_func_t)(const void *, 66 const struct bpf_insn *); 67 struct bpf_iter_seq_info { 68 const struct seq_operations *seq_ops; 69 bpf_iter_init_seq_priv_t init_seq_private; 70 bpf_iter_fini_seq_priv_t fini_seq_private; 71 u32 seq_priv_size; 72 }; 73 74 /* map is generic key/value storage optionally accessible by eBPF programs */ 75 struct bpf_map_ops { 76 /* funcs callable from userspace (via syscall) */ 77 int (*map_alloc_check)(union bpf_attr *attr); 78 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 79 void (*map_release)(struct bpf_map *map, struct file *map_file); 80 void (*map_free)(struct bpf_map *map); 81 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 82 void (*map_release_uref)(struct bpf_map *map); 83 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 84 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, 85 union bpf_attr __user *uattr); 86 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, 87 void *value, u64 flags); 88 int (*map_lookup_and_delete_batch)(struct bpf_map *map, 89 const union bpf_attr *attr, 90 union bpf_attr __user *uattr); 91 int (*map_update_batch)(struct bpf_map *map, struct file *map_file, 92 const union bpf_attr *attr, 93 union bpf_attr __user *uattr); 94 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, 95 union bpf_attr __user *uattr); 96 97 /* funcs callable from userspace and from eBPF programs */ 98 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 99 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 100 int (*map_delete_elem)(struct bpf_map *map, void *key); 101 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 102 int (*map_pop_elem)(struct bpf_map *map, void *value); 103 int (*map_peek_elem)(struct bpf_map *map, void *value); 104 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); 105 106 /* funcs called by prog_array and perf_event_array map */ 107 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 108 int fd); 109 void (*map_fd_put_ptr)(void *ptr); 110 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 111 u32 (*map_fd_sys_lookup_elem)(void *ptr); 112 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 113 struct seq_file *m); 114 int (*map_check_btf)(const struct bpf_map *map, 115 const struct btf *btf, 116 const struct btf_type *key_type, 117 const struct btf_type *value_type); 118 119 /* Prog poke tracking helpers. */ 120 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 121 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 122 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, 123 struct bpf_prog *new); 124 125 /* Direct value access helpers. */ 126 int (*map_direct_value_addr)(const struct bpf_map *map, 127 u64 *imm, u32 off); 128 int (*map_direct_value_meta)(const struct bpf_map *map, 129 u64 imm, u32 *off); 130 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 131 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, 132 struct poll_table_struct *pts); 133 134 /* Functions called by bpf_local_storage maps */ 135 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, 136 void *owner, u32 size); 137 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, 138 void *owner, u32 size); 139 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); 140 141 /* Misc helpers.*/ 142 int (*map_redirect)(struct bpf_map *map, u64 key, u64 flags); 143 144 /* map_meta_equal must be implemented for maps that can be 145 * used as an inner map. It is a runtime check to ensure 146 * an inner map can be inserted to an outer map. 147 * 148 * Some properties of the inner map has been used during the 149 * verification time. When inserting an inner map at the runtime, 150 * map_meta_equal has to ensure the inserting map has the same 151 * properties that the verifier has used earlier. 152 */ 153 bool (*map_meta_equal)(const struct bpf_map *meta0, 154 const struct bpf_map *meta1); 155 156 157 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, 158 struct bpf_func_state *caller, 159 struct bpf_func_state *callee); 160 int (*map_for_each_callback)(struct bpf_map *map, 161 bpf_callback_t callback_fn, 162 void *callback_ctx, u64 flags); 163 164 /* BTF id of struct allocated by map_alloc */ 165 int *map_btf_id; 166 167 /* bpf_iter info used to open a seq_file */ 168 const struct bpf_iter_seq_info *iter_seq_info; 169 }; 170 171 enum { 172 /* Support at most 10 fields in a BTF type */ 173 BTF_FIELDS_MAX = 10, 174 }; 175 176 enum btf_field_type { 177 BPF_SPIN_LOCK = (1 << 0), 178 BPF_TIMER = (1 << 1), 179 BPF_KPTR_UNREF = (1 << 2), 180 BPF_KPTR_REF = (1 << 3), 181 BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF, 182 BPF_LIST_HEAD = (1 << 4), 183 BPF_LIST_NODE = (1 << 5), 184 BPF_RB_ROOT = (1 << 6), 185 BPF_RB_NODE = (1 << 7), 186 BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD | 187 BPF_RB_NODE | BPF_RB_ROOT, 188 }; 189 190 struct btf_field_kptr { 191 struct btf *btf; 192 struct module *module; 193 btf_dtor_kfunc_t dtor; 194 u32 btf_id; 195 }; 196 197 struct btf_field_graph_root { 198 struct btf *btf; 199 u32 value_btf_id; 200 u32 node_offset; 201 struct btf_record *value_rec; 202 }; 203 204 struct btf_field { 205 u32 offset; 206 enum btf_field_type type; 207 union { 208 struct btf_field_kptr kptr; 209 struct btf_field_graph_root graph_root; 210 }; 211 }; 212 213 struct btf_record { 214 u32 cnt; 215 u32 field_mask; 216 int spin_lock_off; 217 int timer_off; 218 struct btf_field fields[]; 219 }; 220 221 struct btf_field_offs { 222 u32 cnt; 223 u32 field_off[BTF_FIELDS_MAX]; 224 u8 field_sz[BTF_FIELDS_MAX]; 225 }; 226 227 struct bpf_map { 228 /* The first two cachelines with read-mostly members of which some 229 * are also accessed in fast-path (e.g. ops, max_entries). 230 */ 231 const struct bpf_map_ops *ops ____cacheline_aligned; 232 struct bpf_map *inner_map_meta; 233 #ifdef CONFIG_SECURITY 234 void *security; 235 #endif 236 enum bpf_map_type map_type; 237 u32 key_size; 238 u32 value_size; 239 u32 max_entries; 240 u64 map_extra; /* any per-map-type extra fields */ 241 u32 map_flags; 242 u32 id; 243 struct btf_record *record; 244 int numa_node; 245 u32 btf_key_type_id; 246 u32 btf_value_type_id; 247 u32 btf_vmlinux_value_type_id; 248 struct btf *btf; 249 #ifdef CONFIG_MEMCG_KMEM 250 struct obj_cgroup *objcg; 251 #endif 252 char name[BPF_OBJ_NAME_LEN]; 253 struct btf_field_offs *field_offs; 254 /* The 3rd and 4th cacheline with misc members to avoid false sharing 255 * particularly with refcounting. 256 */ 257 atomic64_t refcnt ____cacheline_aligned; 258 atomic64_t usercnt; 259 struct work_struct work; 260 struct mutex freeze_mutex; 261 atomic64_t writecnt; 262 /* 'Ownership' of program-containing map is claimed by the first program 263 * that is going to use this map or by the first program which FD is 264 * stored in the map to make sure that all callers and callees have the 265 * same prog type, JITed flag and xdp_has_frags flag. 266 */ 267 struct { 268 spinlock_t lock; 269 enum bpf_prog_type type; 270 bool jited; 271 bool xdp_has_frags; 272 } owner; 273 bool bypass_spec_v1; 274 bool frozen; /* write-once; write-protected by freeze_mutex */ 275 }; 276 277 static inline const char *btf_field_type_name(enum btf_field_type type) 278 { 279 switch (type) { 280 case BPF_SPIN_LOCK: 281 return "bpf_spin_lock"; 282 case BPF_TIMER: 283 return "bpf_timer"; 284 case BPF_KPTR_UNREF: 285 case BPF_KPTR_REF: 286 return "kptr"; 287 case BPF_LIST_HEAD: 288 return "bpf_list_head"; 289 case BPF_LIST_NODE: 290 return "bpf_list_node"; 291 case BPF_RB_ROOT: 292 return "bpf_rb_root"; 293 case BPF_RB_NODE: 294 return "bpf_rb_node"; 295 default: 296 WARN_ON_ONCE(1); 297 return "unknown"; 298 } 299 } 300 301 static inline u32 btf_field_type_size(enum btf_field_type type) 302 { 303 switch (type) { 304 case BPF_SPIN_LOCK: 305 return sizeof(struct bpf_spin_lock); 306 case BPF_TIMER: 307 return sizeof(struct bpf_timer); 308 case BPF_KPTR_UNREF: 309 case BPF_KPTR_REF: 310 return sizeof(u64); 311 case BPF_LIST_HEAD: 312 return sizeof(struct bpf_list_head); 313 case BPF_LIST_NODE: 314 return sizeof(struct bpf_list_node); 315 case BPF_RB_ROOT: 316 return sizeof(struct bpf_rb_root); 317 case BPF_RB_NODE: 318 return sizeof(struct bpf_rb_node); 319 default: 320 WARN_ON_ONCE(1); 321 return 0; 322 } 323 } 324 325 static inline u32 btf_field_type_align(enum btf_field_type type) 326 { 327 switch (type) { 328 case BPF_SPIN_LOCK: 329 return __alignof__(struct bpf_spin_lock); 330 case BPF_TIMER: 331 return __alignof__(struct bpf_timer); 332 case BPF_KPTR_UNREF: 333 case BPF_KPTR_REF: 334 return __alignof__(u64); 335 case BPF_LIST_HEAD: 336 return __alignof__(struct bpf_list_head); 337 case BPF_LIST_NODE: 338 return __alignof__(struct bpf_list_node); 339 case BPF_RB_ROOT: 340 return __alignof__(struct bpf_rb_root); 341 case BPF_RB_NODE: 342 return __alignof__(struct bpf_rb_node); 343 default: 344 WARN_ON_ONCE(1); 345 return 0; 346 } 347 } 348 349 static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type) 350 { 351 if (IS_ERR_OR_NULL(rec)) 352 return false; 353 return rec->field_mask & type; 354 } 355 356 static inline void bpf_obj_init(const struct btf_field_offs *foffs, void *obj) 357 { 358 int i; 359 360 if (!foffs) 361 return; 362 for (i = 0; i < foffs->cnt; i++) 363 memset(obj + foffs->field_off[i], 0, foffs->field_sz[i]); 364 } 365 366 /* 'dst' must be a temporary buffer and should not point to memory that is being 367 * used in parallel by a bpf program or bpf syscall, otherwise the access from 368 * the bpf program or bpf syscall may be corrupted by the reinitialization, 369 * leading to weird problems. Even 'dst' is newly-allocated from bpf memory 370 * allocator, it is still possible for 'dst' to be used in parallel by a bpf 371 * program or bpf syscall. 372 */ 373 static inline void check_and_init_map_value(struct bpf_map *map, void *dst) 374 { 375 bpf_obj_init(map->field_offs, dst); 376 } 377 378 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 379 * forced to use 'long' read/writes to try to atomically copy long counters. 380 * Best-effort only. No barriers here, since it _will_ race with concurrent 381 * updates from BPF programs. Called from bpf syscall and mostly used with 382 * size 8 or 16 bytes, so ask compiler to inline it. 383 */ 384 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 385 { 386 const long *lsrc = src; 387 long *ldst = dst; 388 389 size /= sizeof(long); 390 while (size--) 391 *ldst++ = *lsrc++; 392 } 393 394 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ 395 static inline void bpf_obj_memcpy(struct btf_field_offs *foffs, 396 void *dst, void *src, u32 size, 397 bool long_memcpy) 398 { 399 u32 curr_off = 0; 400 int i; 401 402 if (likely(!foffs)) { 403 if (long_memcpy) 404 bpf_long_memcpy(dst, src, round_up(size, 8)); 405 else 406 memcpy(dst, src, size); 407 return; 408 } 409 410 for (i = 0; i < foffs->cnt; i++) { 411 u32 next_off = foffs->field_off[i]; 412 u32 sz = next_off - curr_off; 413 414 memcpy(dst + curr_off, src + curr_off, sz); 415 curr_off += foffs->field_sz[i] + sz; 416 } 417 memcpy(dst + curr_off, src + curr_off, size - curr_off); 418 } 419 420 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 421 { 422 bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, false); 423 } 424 425 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) 426 { 427 bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, true); 428 } 429 430 static inline void bpf_obj_memzero(struct btf_field_offs *foffs, void *dst, u32 size) 431 { 432 u32 curr_off = 0; 433 int i; 434 435 if (likely(!foffs)) { 436 memset(dst, 0, size); 437 return; 438 } 439 440 for (i = 0; i < foffs->cnt; i++) { 441 u32 next_off = foffs->field_off[i]; 442 u32 sz = next_off - curr_off; 443 444 memset(dst + curr_off, 0, sz); 445 curr_off += foffs->field_sz[i] + sz; 446 } 447 memset(dst + curr_off, 0, size - curr_off); 448 } 449 450 static inline void zero_map_value(struct bpf_map *map, void *dst) 451 { 452 bpf_obj_memzero(map->field_offs, dst, map->value_size); 453 } 454 455 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 456 bool lock_src); 457 void bpf_timer_cancel_and_free(void *timer); 458 void bpf_list_head_free(const struct btf_field *field, void *list_head, 459 struct bpf_spin_lock *spin_lock); 460 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 461 struct bpf_spin_lock *spin_lock); 462 463 464 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 465 466 struct bpf_offload_dev; 467 struct bpf_offloaded_map; 468 469 struct bpf_map_dev_ops { 470 int (*map_get_next_key)(struct bpf_offloaded_map *map, 471 void *key, void *next_key); 472 int (*map_lookup_elem)(struct bpf_offloaded_map *map, 473 void *key, void *value); 474 int (*map_update_elem)(struct bpf_offloaded_map *map, 475 void *key, void *value, u64 flags); 476 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 477 }; 478 479 struct bpf_offloaded_map { 480 struct bpf_map map; 481 struct net_device *netdev; 482 const struct bpf_map_dev_ops *dev_ops; 483 void *dev_priv; 484 struct list_head offloads; 485 }; 486 487 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 488 { 489 return container_of(map, struct bpf_offloaded_map, map); 490 } 491 492 static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 493 { 494 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 495 } 496 497 static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 498 { 499 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && 500 map->ops->map_seq_show_elem; 501 } 502 503 int map_check_no_btf(const struct bpf_map *map, 504 const struct btf *btf, 505 const struct btf_type *key_type, 506 const struct btf_type *value_type); 507 508 bool bpf_map_meta_equal(const struct bpf_map *meta0, 509 const struct bpf_map *meta1); 510 511 extern const struct bpf_map_ops bpf_map_offload_ops; 512 513 /* bpf_type_flag contains a set of flags that are applicable to the values of 514 * arg_type, ret_type and reg_type. For example, a pointer value may be null, 515 * or a memory is read-only. We classify types into two categories: base types 516 * and extended types. Extended types are base types combined with a type flag. 517 * 518 * Currently there are no more than 32 base types in arg_type, ret_type and 519 * reg_types. 520 */ 521 #define BPF_BASE_TYPE_BITS 8 522 523 enum bpf_type_flag { 524 /* PTR may be NULL. */ 525 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), 526 527 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is 528 * compatible with both mutable and immutable memory. 529 */ 530 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), 531 532 /* MEM points to BPF ring buffer reservation. */ 533 MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS), 534 535 /* MEM is in user address space. */ 536 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), 537 538 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged 539 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In 540 * order to drop this tag, it must be passed into bpf_per_cpu_ptr() 541 * or bpf_this_cpu_ptr(), which will return the pointer corresponding 542 * to the specified cpu. 543 */ 544 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), 545 546 /* Indicates that the argument will be released. */ 547 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), 548 549 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark 550 * unreferenced and referenced kptr loaded from map value using a load 551 * instruction, so that they can only be dereferenced but not escape the 552 * BPF program into the kernel (i.e. cannot be passed as arguments to 553 * kfunc or bpf helpers). 554 */ 555 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), 556 557 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), 558 559 /* DYNPTR points to memory local to the bpf program. */ 560 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), 561 562 /* DYNPTR points to a kernel-produced ringbuf record. */ 563 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), 564 565 /* Size is known at compile time. */ 566 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), 567 568 /* MEM is of an allocated object of type in program BTF. This is used to 569 * tag PTR_TO_BTF_ID allocated using bpf_obj_new. 570 */ 571 MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS), 572 573 /* PTR was passed from the kernel in a trusted context, and may be 574 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions. 575 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above. 576 * PTR_UNTRUSTED refers to a kptr that was read directly from a map 577 * without invoking bpf_kptr_xchg(). What we really need to know is 578 * whether a pointer is safe to pass to a kfunc or BPF helper function. 579 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF 580 * helpers, they do not cover all possible instances of unsafe 581 * pointers. For example, a pointer that was obtained from walking a 582 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the 583 * fact that it may be NULL, invalid, etc. This is due to backwards 584 * compatibility requirements, as this was the behavior that was first 585 * introduced when kptrs were added. The behavior is now considered 586 * deprecated, and PTR_UNTRUSTED will eventually be removed. 587 * 588 * PTR_TRUSTED, on the other hand, is a pointer that the kernel 589 * guarantees to be valid and safe to pass to kfuncs and BPF helpers. 590 * For example, pointers passed to tracepoint arguments are considered 591 * PTR_TRUSTED, as are pointers that are passed to struct_ops 592 * callbacks. As alluded to above, pointers that are obtained from 593 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a 594 * struct task_struct *task is PTR_TRUSTED, then accessing 595 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored 596 * in a BPF register. Similarly, pointers passed to certain programs 597 * types such as kretprobes are not guaranteed to be valid, as they may 598 * for example contain an object that was recently freed. 599 */ 600 PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS), 601 602 /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ 603 MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), 604 605 /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. 606 * Currently only valid for linked-list and rbtree nodes. 607 */ 608 NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), 609 610 __BPF_TYPE_FLAG_MAX, 611 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, 612 }; 613 614 #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF) 615 616 /* Max number of base types. */ 617 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) 618 619 /* Max number of all types. */ 620 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) 621 622 /* function argument constraints */ 623 enum bpf_arg_type { 624 ARG_DONTCARE = 0, /* unused argument in helper function */ 625 626 /* the following constraints used to prototype 627 * bpf_map_lookup/update/delete_elem() functions 628 */ 629 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 630 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 631 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 632 633 /* Used to prototype bpf_memcmp() and other functions that access data 634 * on eBPF program stack 635 */ 636 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 637 638 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 639 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 640 641 ARG_PTR_TO_CTX, /* pointer to context */ 642 ARG_ANYTHING, /* any (initialized) argument is ok */ 643 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 644 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 645 ARG_PTR_TO_INT, /* pointer to int */ 646 ARG_PTR_TO_LONG, /* pointer to long */ 647 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 648 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 649 ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ 650 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 651 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 652 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 653 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ 654 ARG_PTR_TO_STACK, /* pointer to stack */ 655 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ 656 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ 657 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ 658 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ 659 __BPF_ARG_TYPE_MAX, 660 661 /* Extended arg_types. */ 662 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, 663 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, 664 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, 665 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, 666 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, 667 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, 668 /* pointer to memory does not need to be initialized, helper function must fill 669 * all bytes or clear them in error case. 670 */ 671 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, 672 /* Pointer to valid memory of size known at compile time. */ 673 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, 674 675 /* This must be the last entry. Its purpose is to ensure the enum is 676 * wide enough to hold the higher bits reserved for bpf_type_flag. 677 */ 678 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, 679 }; 680 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 681 682 /* type of values returned from helper functions */ 683 enum bpf_return_type { 684 RET_INTEGER, /* function returns integer */ 685 RET_VOID, /* function doesn't return anything */ 686 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 687 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ 688 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ 689 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ 690 RET_PTR_TO_MEM, /* returns a pointer to memory */ 691 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 692 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 693 __BPF_RET_TYPE_MAX, 694 695 /* Extended ret_types. */ 696 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, 697 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, 698 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, 699 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, 700 RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM, 701 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM, 702 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, 703 RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID, 704 705 /* This must be the last entry. Its purpose is to ensure the enum is 706 * wide enough to hold the higher bits reserved for bpf_type_flag. 707 */ 708 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, 709 }; 710 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 711 712 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 713 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 714 * instructions after verifying 715 */ 716 struct bpf_func_proto { 717 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 718 bool gpl_only; 719 bool pkt_access; 720 bool might_sleep; 721 enum bpf_return_type ret_type; 722 union { 723 struct { 724 enum bpf_arg_type arg1_type; 725 enum bpf_arg_type arg2_type; 726 enum bpf_arg_type arg3_type; 727 enum bpf_arg_type arg4_type; 728 enum bpf_arg_type arg5_type; 729 }; 730 enum bpf_arg_type arg_type[5]; 731 }; 732 union { 733 struct { 734 u32 *arg1_btf_id; 735 u32 *arg2_btf_id; 736 u32 *arg3_btf_id; 737 u32 *arg4_btf_id; 738 u32 *arg5_btf_id; 739 }; 740 u32 *arg_btf_id[5]; 741 struct { 742 size_t arg1_size; 743 size_t arg2_size; 744 size_t arg3_size; 745 size_t arg4_size; 746 size_t arg5_size; 747 }; 748 size_t arg_size[5]; 749 }; 750 int *ret_btf_id; /* return value btf_id */ 751 bool (*allowed)(const struct bpf_prog *prog); 752 }; 753 754 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is 755 * the first argument to eBPF programs. 756 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 757 */ 758 struct bpf_context; 759 760 enum bpf_access_type { 761 BPF_READ = 1, 762 BPF_WRITE = 2 763 }; 764 765 /* types of values stored in eBPF registers */ 766 /* Pointer types represent: 767 * pointer 768 * pointer + imm 769 * pointer + (u16) var 770 * pointer + (u16) var + imm 771 * if (range > 0) then [ptr, ptr + range - off) is safe to access 772 * if (id > 0) means that some 'var' was added 773 * if (off > 0) means that 'imm' was added 774 */ 775 enum bpf_reg_type { 776 NOT_INIT = 0, /* nothing was written into register */ 777 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 778 PTR_TO_CTX, /* reg points to bpf_context */ 779 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 780 PTR_TO_MAP_VALUE, /* reg points to map element value */ 781 PTR_TO_MAP_KEY, /* reg points to a map element key */ 782 PTR_TO_STACK, /* reg == frame_pointer + offset */ 783 PTR_TO_PACKET_META, /* skb->data - meta_len */ 784 PTR_TO_PACKET, /* reg points to skb->data */ 785 PTR_TO_PACKET_END, /* skb->data + headlen */ 786 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 787 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 788 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 789 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 790 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 791 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 792 /* PTR_TO_BTF_ID points to a kernel struct that does not need 793 * to be null checked by the BPF program. This does not imply the 794 * pointer is _not_ null and in practice this can easily be a null 795 * pointer when reading pointer chains. The assumption is program 796 * context will handle null pointer dereference typically via fault 797 * handling. The verifier must keep this in mind and can make no 798 * assumptions about null or non-null when doing branch analysis. 799 * Further, when passed into helpers the helpers can not, without 800 * additional context, assume the value is non-null. 801 */ 802 PTR_TO_BTF_ID, 803 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 804 * been checked for null. Used primarily to inform the verifier 805 * an explicit null check is required for this struct. 806 */ 807 PTR_TO_MEM, /* reg points to valid memory region */ 808 PTR_TO_BUF, /* reg points to a read/write buffer */ 809 PTR_TO_FUNC, /* reg points to a bpf program function */ 810 CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */ 811 __BPF_REG_TYPE_MAX, 812 813 /* Extended reg_types. */ 814 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, 815 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, 816 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, 817 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, 818 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, 819 820 /* This must be the last entry. Its purpose is to ensure the enum is 821 * wide enough to hold the higher bits reserved for bpf_type_flag. 822 */ 823 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, 824 }; 825 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 826 827 /* The information passed from prog-specific *_is_valid_access 828 * back to the verifier. 829 */ 830 struct bpf_insn_access_aux { 831 enum bpf_reg_type reg_type; 832 union { 833 int ctx_field_size; 834 struct { 835 struct btf *btf; 836 u32 btf_id; 837 }; 838 }; 839 struct bpf_verifier_log *log; /* for verbose logs */ 840 }; 841 842 static inline void 843 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 844 { 845 aux->ctx_field_size = size; 846 } 847 848 static inline bool bpf_pseudo_func(const struct bpf_insn *insn) 849 { 850 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && 851 insn->src_reg == BPF_PSEUDO_FUNC; 852 } 853 854 struct bpf_prog_ops { 855 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, 856 union bpf_attr __user *uattr); 857 }; 858 859 struct bpf_reg_state; 860 struct bpf_verifier_ops { 861 /* return eBPF function prototype for verification */ 862 const struct bpf_func_proto * 863 (*get_func_proto)(enum bpf_func_id func_id, 864 const struct bpf_prog *prog); 865 866 /* return true if 'size' wide access at offset 'off' within bpf_context 867 * with 'type' (read or write) is allowed 868 */ 869 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 870 const struct bpf_prog *prog, 871 struct bpf_insn_access_aux *info); 872 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 873 const struct bpf_prog *prog); 874 int (*gen_ld_abs)(const struct bpf_insn *orig, 875 struct bpf_insn *insn_buf); 876 u32 (*convert_ctx_access)(enum bpf_access_type type, 877 const struct bpf_insn *src, 878 struct bpf_insn *dst, 879 struct bpf_prog *prog, u32 *target_size); 880 int (*btf_struct_access)(struct bpf_verifier_log *log, 881 const struct bpf_reg_state *reg, 882 int off, int size, enum bpf_access_type atype, 883 u32 *next_btf_id, enum bpf_type_flag *flag); 884 }; 885 886 struct bpf_prog_offload_ops { 887 /* verifier basic callbacks */ 888 int (*insn_hook)(struct bpf_verifier_env *env, 889 int insn_idx, int prev_insn_idx); 890 int (*finalize)(struct bpf_verifier_env *env); 891 /* verifier optimization callbacks (called after .finalize) */ 892 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, 893 struct bpf_insn *insn); 894 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 895 /* program management callbacks */ 896 int (*prepare)(struct bpf_prog *prog); 897 int (*translate)(struct bpf_prog *prog); 898 void (*destroy)(struct bpf_prog *prog); 899 }; 900 901 struct bpf_prog_offload { 902 struct bpf_prog *prog; 903 struct net_device *netdev; 904 struct bpf_offload_dev *offdev; 905 void *dev_priv; 906 struct list_head offloads; 907 bool dev_state; 908 bool opt_failed; 909 void *jited_image; 910 u32 jited_len; 911 }; 912 913 enum bpf_cgroup_storage_type { 914 BPF_CGROUP_STORAGE_SHARED, 915 BPF_CGROUP_STORAGE_PERCPU, 916 __BPF_CGROUP_STORAGE_MAX 917 }; 918 919 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 920 921 /* The longest tracepoint has 12 args. 922 * See include/trace/bpf_probe.h 923 */ 924 #define MAX_BPF_FUNC_ARGS 12 925 926 /* The maximum number of arguments passed through registers 927 * a single function may have. 928 */ 929 #define MAX_BPF_FUNC_REG_ARGS 5 930 931 /* The argument is a structure. */ 932 #define BTF_FMODEL_STRUCT_ARG BIT(0) 933 934 /* The argument is signed. */ 935 #define BTF_FMODEL_SIGNED_ARG BIT(1) 936 937 struct btf_func_model { 938 u8 ret_size; 939 u8 ret_flags; 940 u8 nr_args; 941 u8 arg_size[MAX_BPF_FUNC_ARGS]; 942 u8 arg_flags[MAX_BPF_FUNC_ARGS]; 943 }; 944 945 /* Restore arguments before returning from trampoline to let original function 946 * continue executing. This flag is used for fentry progs when there are no 947 * fexit progs. 948 */ 949 #define BPF_TRAMP_F_RESTORE_REGS BIT(0) 950 /* Call original function after fentry progs, but before fexit progs. 951 * Makes sense for fentry/fexit, normal calls and indirect calls. 952 */ 953 #define BPF_TRAMP_F_CALL_ORIG BIT(1) 954 /* Skip current frame and return to parent. Makes sense for fentry/fexit 955 * programs only. Should not be used with normal calls and indirect calls. 956 */ 957 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 958 /* Store IP address of the caller on the trampoline stack, 959 * so it's available for trampoline's programs. 960 */ 961 #define BPF_TRAMP_F_IP_ARG BIT(3) 962 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 963 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 964 965 /* Get original function from stack instead of from provided direct address. 966 * Makes sense for trampolines with fexit or fmod_ret programs. 967 */ 968 #define BPF_TRAMP_F_ORIG_STACK BIT(5) 969 970 /* This trampoline is on a function with another ftrace_ops with IPMODIFY, 971 * e.g., a live patch. This flag is set and cleared by ftrace call backs, 972 */ 973 #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) 974 975 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 976 * bytes on x86. 977 */ 978 enum { 979 #if defined(__s390x__) 980 BPF_MAX_TRAMP_LINKS = 27, 981 #else 982 BPF_MAX_TRAMP_LINKS = 38, 983 #endif 984 }; 985 986 struct bpf_tramp_links { 987 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; 988 int nr_links; 989 }; 990 991 struct bpf_tramp_run_ctx; 992 993 /* Different use cases for BPF trampoline: 994 * 1. replace nop at the function entry (kprobe equivalent) 995 * flags = BPF_TRAMP_F_RESTORE_REGS 996 * fentry = a set of programs to run before returning from trampoline 997 * 998 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 999 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 1000 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 1001 * fentry = a set of program to run before calling original function 1002 * fexit = a set of program to run after original function 1003 * 1004 * 3. replace direct call instruction anywhere in the function body 1005 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 1006 * With flags = 0 1007 * fentry = a set of programs to run before returning from trampoline 1008 * With flags = BPF_TRAMP_F_CALL_ORIG 1009 * orig_call = original callback addr or direct function addr 1010 * fentry = a set of program to run before calling original function 1011 * fexit = a set of program to run after original function 1012 */ 1013 struct bpf_tramp_image; 1014 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 1015 const struct btf_func_model *m, u32 flags, 1016 struct bpf_tramp_links *tlinks, 1017 void *orig_call); 1018 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, 1019 struct bpf_tramp_run_ctx *run_ctx); 1020 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, 1021 struct bpf_tramp_run_ctx *run_ctx); 1022 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 1023 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 1024 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog, 1025 struct bpf_tramp_run_ctx *run_ctx); 1026 typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start, 1027 struct bpf_tramp_run_ctx *run_ctx); 1028 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog); 1029 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog); 1030 1031 struct bpf_ksym { 1032 unsigned long start; 1033 unsigned long end; 1034 char name[KSYM_NAME_LEN]; 1035 struct list_head lnode; 1036 struct latch_tree_node tnode; 1037 bool prog; 1038 }; 1039 1040 enum bpf_tramp_prog_type { 1041 BPF_TRAMP_FENTRY, 1042 BPF_TRAMP_FEXIT, 1043 BPF_TRAMP_MODIFY_RETURN, 1044 BPF_TRAMP_MAX, 1045 BPF_TRAMP_REPLACE, /* more than MAX */ 1046 }; 1047 1048 struct bpf_tramp_image { 1049 void *image; 1050 struct bpf_ksym ksym; 1051 struct percpu_ref pcref; 1052 void *ip_after_call; 1053 void *ip_epilogue; 1054 union { 1055 struct rcu_head rcu; 1056 struct work_struct work; 1057 }; 1058 }; 1059 1060 struct bpf_trampoline { 1061 /* hlist for trampoline_table */ 1062 struct hlist_node hlist; 1063 struct ftrace_ops *fops; 1064 /* serializes access to fields of this trampoline */ 1065 struct mutex mutex; 1066 refcount_t refcnt; 1067 u32 flags; 1068 u64 key; 1069 struct { 1070 struct btf_func_model model; 1071 void *addr; 1072 bool ftrace_managed; 1073 } func; 1074 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 1075 * program by replacing one of its functions. func.addr is the address 1076 * of the function it replaced. 1077 */ 1078 struct bpf_prog *extension_prog; 1079 /* list of BPF programs using this trampoline */ 1080 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 1081 /* Number of attached programs. A counter per kind. */ 1082 int progs_cnt[BPF_TRAMP_MAX]; 1083 /* Executable image of trampoline */ 1084 struct bpf_tramp_image *cur_image; 1085 u64 selector; 1086 struct module *mod; 1087 }; 1088 1089 struct bpf_attach_target_info { 1090 struct btf_func_model fmodel; 1091 long tgt_addr; 1092 const char *tgt_name; 1093 const struct btf_type *tgt_type; 1094 }; 1095 1096 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 1097 1098 struct bpf_dispatcher_prog { 1099 struct bpf_prog *prog; 1100 refcount_t users; 1101 }; 1102 1103 struct bpf_dispatcher { 1104 /* dispatcher mutex */ 1105 struct mutex mutex; 1106 void *func; 1107 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 1108 int num_progs; 1109 void *image; 1110 void *rw_image; 1111 u32 image_off; 1112 struct bpf_ksym ksym; 1113 #ifdef CONFIG_HAVE_STATIC_CALL 1114 struct static_call_key *sc_key; 1115 void *sc_tramp; 1116 #endif 1117 }; 1118 1119 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 1120 const void *ctx, 1121 const struct bpf_insn *insnsi, 1122 bpf_func_t bpf_func) 1123 { 1124 return bpf_func(ctx, insnsi); 1125 } 1126 1127 #ifdef CONFIG_BPF_JIT 1128 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 1129 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); 1130 struct bpf_trampoline *bpf_trampoline_get(u64 key, 1131 struct bpf_attach_target_info *tgt_info); 1132 void bpf_trampoline_put(struct bpf_trampoline *tr); 1133 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); 1134 1135 /* 1136 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn 1137 * indirection with a direct call to the bpf program. If the architecture does 1138 * not have STATIC_CALL, avoid a double-indirection. 1139 */ 1140 #ifdef CONFIG_HAVE_STATIC_CALL 1141 1142 #define __BPF_DISPATCHER_SC_INIT(_name) \ 1143 .sc_key = &STATIC_CALL_KEY(_name), \ 1144 .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), 1145 1146 #define __BPF_DISPATCHER_SC(name) \ 1147 DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) 1148 1149 #define __BPF_DISPATCHER_CALL(name) \ 1150 static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) 1151 1152 #define __BPF_DISPATCHER_UPDATE(_d, _new) \ 1153 __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) 1154 1155 #else 1156 #define __BPF_DISPATCHER_SC_INIT(name) 1157 #define __BPF_DISPATCHER_SC(name) 1158 #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) 1159 #define __BPF_DISPATCHER_UPDATE(_d, _new) 1160 #endif 1161 1162 #define BPF_DISPATCHER_INIT(_name) { \ 1163 .mutex = __MUTEX_INITIALIZER(_name.mutex), \ 1164 .func = &_name##_func, \ 1165 .progs = {}, \ 1166 .num_progs = 0, \ 1167 .image = NULL, \ 1168 .image_off = 0, \ 1169 .ksym = { \ 1170 .name = #_name, \ 1171 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 1172 }, \ 1173 __BPF_DISPATCHER_SC_INIT(_name##_call) \ 1174 } 1175 1176 #define DEFINE_BPF_DISPATCHER(name) \ 1177 __BPF_DISPATCHER_SC(name); \ 1178 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 1179 const void *ctx, \ 1180 const struct bpf_insn *insnsi, \ 1181 bpf_func_t bpf_func) \ 1182 { \ 1183 return __BPF_DISPATCHER_CALL(name); \ 1184 } \ 1185 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 1186 struct bpf_dispatcher bpf_dispatcher_##name = \ 1187 BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 1188 1189 #define DECLARE_BPF_DISPATCHER(name) \ 1190 unsigned int bpf_dispatcher_##name##_func( \ 1191 const void *ctx, \ 1192 const struct bpf_insn *insnsi, \ 1193 bpf_func_t bpf_func); \ 1194 extern struct bpf_dispatcher bpf_dispatcher_##name; 1195 1196 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 1197 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 1198 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 1199 struct bpf_prog *to); 1200 /* Called only from JIT-enabled code, so there's no need for stubs. */ 1201 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 1202 void bpf_image_ksym_del(struct bpf_ksym *ksym); 1203 void bpf_ksym_add(struct bpf_ksym *ksym); 1204 void bpf_ksym_del(struct bpf_ksym *ksym); 1205 int bpf_jit_charge_modmem(u32 size); 1206 void bpf_jit_uncharge_modmem(u32 size); 1207 bool bpf_prog_has_trampoline(const struct bpf_prog *prog); 1208 #else 1209 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, 1210 struct bpf_trampoline *tr) 1211 { 1212 return -ENOTSUPP; 1213 } 1214 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, 1215 struct bpf_trampoline *tr) 1216 { 1217 return -ENOTSUPP; 1218 } 1219 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, 1220 struct bpf_attach_target_info *tgt_info) 1221 { 1222 return ERR_PTR(-EOPNOTSUPP); 1223 } 1224 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} 1225 #define DEFINE_BPF_DISPATCHER(name) 1226 #define DECLARE_BPF_DISPATCHER(name) 1227 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 1228 #define BPF_DISPATCHER_PTR(name) NULL 1229 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, 1230 struct bpf_prog *from, 1231 struct bpf_prog *to) {} 1232 static inline bool is_bpf_image_address(unsigned long address) 1233 { 1234 return false; 1235 } 1236 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) 1237 { 1238 return false; 1239 } 1240 #endif 1241 1242 struct bpf_func_info_aux { 1243 u16 linkage; 1244 bool unreliable; 1245 }; 1246 1247 enum bpf_jit_poke_reason { 1248 BPF_POKE_REASON_TAIL_CALL, 1249 }; 1250 1251 /* Descriptor of pokes pointing /into/ the JITed image. */ 1252 struct bpf_jit_poke_descriptor { 1253 void *tailcall_target; 1254 void *tailcall_bypass; 1255 void *bypass_addr; 1256 void *aux; 1257 union { 1258 struct { 1259 struct bpf_map *map; 1260 u32 key; 1261 } tail_call; 1262 }; 1263 bool tailcall_target_stable; 1264 u8 adj_off; 1265 u16 reason; 1266 u32 insn_idx; 1267 }; 1268 1269 /* reg_type info for ctx arguments */ 1270 struct bpf_ctx_arg_aux { 1271 u32 offset; 1272 enum bpf_reg_type reg_type; 1273 u32 btf_id; 1274 }; 1275 1276 struct btf_mod_pair { 1277 struct btf *btf; 1278 struct module *module; 1279 }; 1280 1281 struct bpf_kfunc_desc_tab; 1282 1283 struct bpf_prog_aux { 1284 atomic64_t refcnt; 1285 u32 used_map_cnt; 1286 u32 used_btf_cnt; 1287 u32 max_ctx_offset; 1288 u32 max_pkt_offset; 1289 u32 max_tp_access; 1290 u32 stack_depth; 1291 u32 id; 1292 u32 func_cnt; /* used by non-func prog as the number of func progs */ 1293 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 1294 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 1295 u32 ctx_arg_info_size; 1296 u32 max_rdonly_access; 1297 u32 max_rdwr_access; 1298 struct btf *attach_btf; 1299 const struct bpf_ctx_arg_aux *ctx_arg_info; 1300 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 1301 struct bpf_prog *dst_prog; 1302 struct bpf_trampoline *dst_trampoline; 1303 enum bpf_prog_type saved_dst_prog_type; 1304 enum bpf_attach_type saved_dst_attach_type; 1305 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 1306 bool dev_bound; /* Program is bound to the netdev. */ 1307 bool offload_requested; /* Program is bound and offloaded to the netdev. */ 1308 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 1309 bool func_proto_unreliable; 1310 bool sleepable; 1311 bool tail_call_reachable; 1312 bool xdp_has_frags; 1313 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 1314 const struct btf_type *attach_func_proto; 1315 /* function name for valid attach_btf_id */ 1316 const char *attach_func_name; 1317 struct bpf_prog **func; 1318 void *jit_data; /* JIT specific data. arch dependent */ 1319 struct bpf_jit_poke_descriptor *poke_tab; 1320 struct bpf_kfunc_desc_tab *kfunc_tab; 1321 struct bpf_kfunc_btf_tab *kfunc_btf_tab; 1322 u32 size_poke_tab; 1323 struct bpf_ksym ksym; 1324 const struct bpf_prog_ops *ops; 1325 struct bpf_map **used_maps; 1326 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 1327 struct btf_mod_pair *used_btfs; 1328 struct bpf_prog *prog; 1329 struct user_struct *user; 1330 u64 load_time; /* ns since boottime */ 1331 u32 verified_insns; 1332 int cgroup_atype; /* enum cgroup_bpf_attach_type */ 1333 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1334 char name[BPF_OBJ_NAME_LEN]; 1335 #ifdef CONFIG_SECURITY 1336 void *security; 1337 #endif 1338 struct bpf_prog_offload *offload; 1339 struct btf *btf; 1340 struct bpf_func_info *func_info; 1341 struct bpf_func_info_aux *func_info_aux; 1342 /* bpf_line_info loaded from userspace. linfo->insn_off 1343 * has the xlated insn offset. 1344 * Both the main and sub prog share the same linfo. 1345 * The subprog can access its first linfo by 1346 * using the linfo_idx. 1347 */ 1348 struct bpf_line_info *linfo; 1349 /* jited_linfo is the jited addr of the linfo. It has a 1350 * one to one mapping to linfo: 1351 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 1352 * Both the main and sub prog share the same jited_linfo. 1353 * The subprog can access its first jited_linfo by 1354 * using the linfo_idx. 1355 */ 1356 void **jited_linfo; 1357 u32 func_info_cnt; 1358 u32 nr_linfo; 1359 /* subprog can use linfo_idx to access its first linfo and 1360 * jited_linfo. 1361 * main prog always has linfo_idx == 0 1362 */ 1363 u32 linfo_idx; 1364 u32 num_exentries; 1365 struct exception_table_entry *extable; 1366 union { 1367 struct work_struct work; 1368 struct rcu_head rcu; 1369 }; 1370 }; 1371 1372 struct bpf_prog { 1373 u16 pages; /* Number of allocated pages */ 1374 u16 jited:1, /* Is our filter JIT'ed? */ 1375 jit_requested:1,/* archs need to JIT the prog */ 1376 gpl_compatible:1, /* Is filter GPL compatible? */ 1377 cb_access:1, /* Is control block accessed? */ 1378 dst_needed:1, /* Do we need dst entry? */ 1379 blinding_requested:1, /* needs constant blinding */ 1380 blinded:1, /* Was blinded */ 1381 is_func:1, /* program is a bpf function */ 1382 kprobe_override:1, /* Do we override a kprobe? */ 1383 has_callchain_buf:1, /* callchain buffer allocated? */ 1384 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ 1385 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ 1386 call_get_func_ip:1, /* Do we call get_func_ip() */ 1387 tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ 1388 enum bpf_prog_type type; /* Type of BPF program */ 1389 enum bpf_attach_type expected_attach_type; /* For some prog types */ 1390 u32 len; /* Number of filter blocks */ 1391 u32 jited_len; /* Size of jited insns in bytes */ 1392 u8 tag[BPF_TAG_SIZE]; 1393 struct bpf_prog_stats __percpu *stats; 1394 int __percpu *active; 1395 unsigned int (*bpf_func)(const void *ctx, 1396 const struct bpf_insn *insn); 1397 struct bpf_prog_aux *aux; /* Auxiliary fields */ 1398 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 1399 /* Instructions for interpreter */ 1400 union { 1401 DECLARE_FLEX_ARRAY(struct sock_filter, insns); 1402 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); 1403 }; 1404 }; 1405 1406 struct bpf_array_aux { 1407 /* Programs with direct jumps into programs part of this array. */ 1408 struct list_head poke_progs; 1409 struct bpf_map *map; 1410 struct mutex poke_mutex; 1411 struct work_struct work; 1412 }; 1413 1414 struct bpf_link { 1415 atomic64_t refcnt; 1416 u32 id; 1417 enum bpf_link_type type; 1418 const struct bpf_link_ops *ops; 1419 struct bpf_prog *prog; 1420 struct work_struct work; 1421 }; 1422 1423 struct bpf_link_ops { 1424 void (*release)(struct bpf_link *link); 1425 void (*dealloc)(struct bpf_link *link); 1426 int (*detach)(struct bpf_link *link); 1427 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, 1428 struct bpf_prog *old_prog); 1429 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 1430 int (*fill_link_info)(const struct bpf_link *link, 1431 struct bpf_link_info *info); 1432 }; 1433 1434 struct bpf_tramp_link { 1435 struct bpf_link link; 1436 struct hlist_node tramp_hlist; 1437 u64 cookie; 1438 }; 1439 1440 struct bpf_shim_tramp_link { 1441 struct bpf_tramp_link link; 1442 struct bpf_trampoline *trampoline; 1443 }; 1444 1445 struct bpf_tracing_link { 1446 struct bpf_tramp_link link; 1447 enum bpf_attach_type attach_type; 1448 struct bpf_trampoline *trampoline; 1449 struct bpf_prog *tgt_prog; 1450 }; 1451 1452 struct bpf_link_primer { 1453 struct bpf_link *link; 1454 struct file *file; 1455 int fd; 1456 u32 id; 1457 }; 1458 1459 struct bpf_struct_ops_value; 1460 struct btf_member; 1461 1462 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 1463 struct bpf_struct_ops { 1464 const struct bpf_verifier_ops *verifier_ops; 1465 int (*init)(struct btf *btf); 1466 int (*check_member)(const struct btf_type *t, 1467 const struct btf_member *member, 1468 const struct bpf_prog *prog); 1469 int (*init_member)(const struct btf_type *t, 1470 const struct btf_member *member, 1471 void *kdata, const void *udata); 1472 int (*reg)(void *kdata); 1473 void (*unreg)(void *kdata); 1474 const struct btf_type *type; 1475 const struct btf_type *value_type; 1476 const char *name; 1477 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 1478 u32 type_id; 1479 u32 value_id; 1480 }; 1481 1482 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 1483 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 1484 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 1485 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 1486 bool bpf_struct_ops_get(const void *kdata); 1487 void bpf_struct_ops_put(const void *kdata); 1488 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, 1489 void *value); 1490 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, 1491 struct bpf_tramp_link *link, 1492 const struct btf_func_model *model, 1493 void *image, void *image_end); 1494 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1495 { 1496 if (owner == BPF_MODULE_OWNER) 1497 return bpf_struct_ops_get(data); 1498 else 1499 return try_module_get(owner); 1500 } 1501 static inline void bpf_module_put(const void *data, struct module *owner) 1502 { 1503 if (owner == BPF_MODULE_OWNER) 1504 bpf_struct_ops_put(data); 1505 else 1506 module_put(owner); 1507 } 1508 1509 #ifdef CONFIG_NET 1510 /* Define it here to avoid the use of forward declaration */ 1511 struct bpf_dummy_ops_state { 1512 int val; 1513 }; 1514 1515 struct bpf_dummy_ops { 1516 int (*test_1)(struct bpf_dummy_ops_state *cb); 1517 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, 1518 char a3, unsigned long a4); 1519 int (*test_sleepable)(struct bpf_dummy_ops_state *cb); 1520 }; 1521 1522 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, 1523 union bpf_attr __user *uattr); 1524 #endif 1525 #else 1526 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 1527 { 1528 return NULL; 1529 } 1530 static inline void bpf_struct_ops_init(struct btf *btf, 1531 struct bpf_verifier_log *log) 1532 { 1533 } 1534 static inline bool bpf_try_module_get(const void *data, struct module *owner) 1535 { 1536 return try_module_get(owner); 1537 } 1538 static inline void bpf_module_put(const void *data, struct module *owner) 1539 { 1540 module_put(owner); 1541 } 1542 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, 1543 void *key, 1544 void *value) 1545 { 1546 return -EINVAL; 1547 } 1548 #endif 1549 1550 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) 1551 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1552 int cgroup_atype); 1553 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); 1554 #else 1555 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 1556 int cgroup_atype) 1557 { 1558 return -EOPNOTSUPP; 1559 } 1560 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) 1561 { 1562 } 1563 #endif 1564 1565 struct bpf_array { 1566 struct bpf_map map; 1567 u32 elem_size; 1568 u32 index_mask; 1569 struct bpf_array_aux *aux; 1570 union { 1571 DECLARE_FLEX_ARRAY(char, value) __aligned(8); 1572 DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); 1573 DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); 1574 }; 1575 }; 1576 1577 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 1578 #define MAX_TAIL_CALL_CNT 33 1579 1580 /* Maximum number of loops for bpf_loop */ 1581 #define BPF_MAX_LOOPS BIT(23) 1582 1583 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ 1584 BPF_F_RDONLY_PROG | \ 1585 BPF_F_WRONLY | \ 1586 BPF_F_WRONLY_PROG) 1587 1588 #define BPF_MAP_CAN_READ BIT(0) 1589 #define BPF_MAP_CAN_WRITE BIT(1) 1590 1591 /* Maximum number of user-producer ring buffer samples that can be drained in 1592 * a call to bpf_user_ringbuf_drain(). 1593 */ 1594 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) 1595 1596 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 1597 { 1598 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1599 1600 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 1601 * not possible. 1602 */ 1603 if (access_flags & BPF_F_RDONLY_PROG) 1604 return BPF_MAP_CAN_READ; 1605 else if (access_flags & BPF_F_WRONLY_PROG) 1606 return BPF_MAP_CAN_WRITE; 1607 else 1608 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1609 } 1610 1611 static inline bool bpf_map_flags_access_ok(u32 access_flags) 1612 { 1613 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != 1614 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1615 } 1616 1617 struct bpf_event_entry { 1618 struct perf_event *event; 1619 struct file *perf_file; 1620 struct file *map_file; 1621 struct rcu_head rcu; 1622 }; 1623 1624 static inline bool map_type_contains_progs(struct bpf_map *map) 1625 { 1626 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || 1627 map->map_type == BPF_MAP_TYPE_DEVMAP || 1628 map->map_type == BPF_MAP_TYPE_CPUMAP; 1629 } 1630 1631 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); 1632 int bpf_prog_calc_tag(struct bpf_prog *fp); 1633 1634 const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1635 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); 1636 1637 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 1638 unsigned long off, unsigned long len); 1639 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, 1640 const struct bpf_insn *src, 1641 struct bpf_insn *dst, 1642 struct bpf_prog *prog, 1643 u32 *target_size); 1644 1645 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 1646 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 1647 1648 /* an array of programs to be executed under rcu_lock. 1649 * 1650 * Typical usage: 1651 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); 1652 * 1653 * the structure returned by bpf_prog_array_alloc() should be populated 1654 * with program pointers and the last pointer must be NULL. 1655 * The user has to keep refcnt on the program and make sure the program 1656 * is removed from the array before bpf_prog_put(). 1657 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1658 * since other cpus are walking the array of pointers in parallel. 1659 */ 1660 struct bpf_prog_array_item { 1661 struct bpf_prog *prog; 1662 union { 1663 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1664 u64 bpf_cookie; 1665 }; 1666 }; 1667 1668 struct bpf_prog_array { 1669 struct rcu_head rcu; 1670 struct bpf_prog_array_item items[]; 1671 }; 1672 1673 struct bpf_empty_prog_array { 1674 struct bpf_prog_array hdr; 1675 struct bpf_prog *null_prog; 1676 }; 1677 1678 /* to avoid allocating empty bpf_prog_array for cgroups that 1679 * don't have bpf program attached use one global 'bpf_empty_prog_array' 1680 * It will not be modified the caller of bpf_prog_array_alloc() 1681 * (since caller requested prog_cnt == 0) 1682 * that pointer should be 'freed' by bpf_prog_array_free() 1683 */ 1684 extern struct bpf_empty_prog_array bpf_empty_prog_array; 1685 1686 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1687 void bpf_prog_array_free(struct bpf_prog_array *progs); 1688 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ 1689 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); 1690 int bpf_prog_array_length(struct bpf_prog_array *progs); 1691 bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1692 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, 1693 __u32 __user *prog_ids, u32 cnt); 1694 1695 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, 1696 struct bpf_prog *old_prog); 1697 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1698 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, 1699 struct bpf_prog *prog); 1700 int bpf_prog_array_copy_info(struct bpf_prog_array *array, 1701 u32 *prog_ids, u32 request_cnt, 1702 u32 *prog_cnt); 1703 int bpf_prog_array_copy(struct bpf_prog_array *old_array, 1704 struct bpf_prog *exclude_prog, 1705 struct bpf_prog *include_prog, 1706 u64 bpf_cookie, 1707 struct bpf_prog_array **new_array); 1708 1709 struct bpf_run_ctx {}; 1710 1711 struct bpf_cg_run_ctx { 1712 struct bpf_run_ctx run_ctx; 1713 const struct bpf_prog_array_item *prog_item; 1714 int retval; 1715 }; 1716 1717 struct bpf_trace_run_ctx { 1718 struct bpf_run_ctx run_ctx; 1719 u64 bpf_cookie; 1720 }; 1721 1722 struct bpf_tramp_run_ctx { 1723 struct bpf_run_ctx run_ctx; 1724 u64 bpf_cookie; 1725 struct bpf_run_ctx *saved_run_ctx; 1726 }; 1727 1728 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1729 { 1730 struct bpf_run_ctx *old_ctx = NULL; 1731 1732 #ifdef CONFIG_BPF_SYSCALL 1733 old_ctx = current->bpf_ctx; 1734 current->bpf_ctx = new_ctx; 1735 #endif 1736 return old_ctx; 1737 } 1738 1739 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1740 { 1741 #ifdef CONFIG_BPF_SYSCALL 1742 current->bpf_ctx = old_ctx; 1743 #endif 1744 } 1745 1746 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ 1747 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) 1748 /* BPF program asks to set CN on the packet. */ 1749 #define BPF_RET_SET_CN (1 << 0) 1750 1751 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); 1752 1753 static __always_inline u32 1754 bpf_prog_run_array(const struct bpf_prog_array *array, 1755 const void *ctx, bpf_prog_run_fn run_prog) 1756 { 1757 const struct bpf_prog_array_item *item; 1758 const struct bpf_prog *prog; 1759 struct bpf_run_ctx *old_run_ctx; 1760 struct bpf_trace_run_ctx run_ctx; 1761 u32 ret = 1; 1762 1763 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); 1764 1765 if (unlikely(!array)) 1766 return ret; 1767 1768 migrate_disable(); 1769 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1770 item = &array->items[0]; 1771 while ((prog = READ_ONCE(item->prog))) { 1772 run_ctx.bpf_cookie = item->bpf_cookie; 1773 ret &= run_prog(prog, ctx); 1774 item++; 1775 } 1776 bpf_reset_run_ctx(old_run_ctx); 1777 migrate_enable(); 1778 return ret; 1779 } 1780 1781 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs: 1782 * 1783 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array 1784 * overall. As a result, we must use the bpf_prog_array_free_sleepable 1785 * in order to use the tasks_trace rcu grace period. 1786 * 1787 * When a non-sleepable program is inside the array, we take the rcu read 1788 * section and disable preemption for that program alone, so it can access 1789 * rcu-protected dynamically sized maps. 1790 */ 1791 static __always_inline u32 1792 bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, 1793 const void *ctx, bpf_prog_run_fn run_prog) 1794 { 1795 const struct bpf_prog_array_item *item; 1796 const struct bpf_prog *prog; 1797 const struct bpf_prog_array *array; 1798 struct bpf_run_ctx *old_run_ctx; 1799 struct bpf_trace_run_ctx run_ctx; 1800 u32 ret = 1; 1801 1802 might_fault(); 1803 1804 rcu_read_lock_trace(); 1805 migrate_disable(); 1806 1807 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); 1808 if (unlikely(!array)) 1809 goto out; 1810 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 1811 item = &array->items[0]; 1812 while ((prog = READ_ONCE(item->prog))) { 1813 if (!prog->aux->sleepable) 1814 rcu_read_lock(); 1815 1816 run_ctx.bpf_cookie = item->bpf_cookie; 1817 ret &= run_prog(prog, ctx); 1818 item++; 1819 1820 if (!prog->aux->sleepable) 1821 rcu_read_unlock(); 1822 } 1823 bpf_reset_run_ctx(old_run_ctx); 1824 out: 1825 migrate_enable(); 1826 rcu_read_unlock_trace(); 1827 return ret; 1828 } 1829 1830 #ifdef CONFIG_BPF_SYSCALL 1831 DECLARE_PER_CPU(int, bpf_prog_active); 1832 extern struct mutex bpf_stats_enabled_mutex; 1833 1834 /* 1835 * Block execution of BPF programs attached to instrumentation (perf, 1836 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1837 * these events can happen inside a region which holds a map bucket lock 1838 * and can deadlock on it. 1839 */ 1840 static inline void bpf_disable_instrumentation(void) 1841 { 1842 migrate_disable(); 1843 this_cpu_inc(bpf_prog_active); 1844 } 1845 1846 static inline void bpf_enable_instrumentation(void) 1847 { 1848 this_cpu_dec(bpf_prog_active); 1849 migrate_enable(); 1850 } 1851 1852 extern const struct file_operations bpf_map_fops; 1853 extern const struct file_operations bpf_prog_fops; 1854 extern const struct file_operations bpf_iter_fops; 1855 1856 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1857 extern const struct bpf_prog_ops _name ## _prog_ops; \ 1858 extern const struct bpf_verifier_ops _name ## _verifier_ops; 1859 #define BPF_MAP_TYPE(_id, _ops) \ 1860 extern const struct bpf_map_ops _ops; 1861 #define BPF_LINK_TYPE(_id, _name) 1862 #include <linux/bpf_types.h> 1863 #undef BPF_PROG_TYPE 1864 #undef BPF_MAP_TYPE 1865 #undef BPF_LINK_TYPE 1866 1867 extern const struct bpf_prog_ops bpf_offload_prog_ops; 1868 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1869 extern const struct bpf_verifier_ops xdp_analyzer_ops; 1870 1871 struct bpf_prog *bpf_prog_get(u32 ufd); 1872 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1873 bool attach_drv); 1874 void bpf_prog_add(struct bpf_prog *prog, int i); 1875 void bpf_prog_sub(struct bpf_prog *prog, int i); 1876 void bpf_prog_inc(struct bpf_prog *prog); 1877 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1878 void bpf_prog_put(struct bpf_prog *prog); 1879 1880 void bpf_prog_free_id(struct bpf_prog *prog); 1881 void bpf_map_free_id(struct bpf_map *map); 1882 1883 struct btf_field *btf_record_find(const struct btf_record *rec, 1884 u32 offset, enum btf_field_type type); 1885 void btf_record_free(struct btf_record *rec); 1886 void bpf_map_free_record(struct bpf_map *map); 1887 struct btf_record *btf_record_dup(const struct btf_record *rec); 1888 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); 1889 void bpf_obj_free_timer(const struct btf_record *rec, void *obj); 1890 void bpf_obj_free_fields(const struct btf_record *rec, void *obj); 1891 1892 struct bpf_map *bpf_map_get(u32 ufd); 1893 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1894 struct bpf_map *__bpf_map_get(struct fd f); 1895 void bpf_map_inc(struct bpf_map *map); 1896 void bpf_map_inc_with_uref(struct bpf_map *map); 1897 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); 1898 void bpf_map_put_with_uref(struct bpf_map *map); 1899 void bpf_map_put(struct bpf_map *map); 1900 void *bpf_map_area_alloc(u64 size, int numa_node); 1901 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1902 void bpf_map_area_free(void *base); 1903 bool bpf_map_write_active(const struct bpf_map *map); 1904 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1905 int generic_map_lookup_batch(struct bpf_map *map, 1906 const union bpf_attr *attr, 1907 union bpf_attr __user *uattr); 1908 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1909 const union bpf_attr *attr, 1910 union bpf_attr __user *uattr); 1911 int generic_map_delete_batch(struct bpf_map *map, 1912 const union bpf_attr *attr, 1913 union bpf_attr __user *uattr); 1914 struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1915 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1916 1917 #ifdef CONFIG_MEMCG_KMEM 1918 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1919 int node); 1920 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); 1921 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 1922 gfp_t flags); 1923 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 1924 size_t align, gfp_t flags); 1925 #else 1926 static inline void * 1927 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 1928 int node) 1929 { 1930 return kmalloc_node(size, flags, node); 1931 } 1932 1933 static inline void * 1934 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 1935 { 1936 return kzalloc(size, flags); 1937 } 1938 1939 static inline void * 1940 bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) 1941 { 1942 return kvcalloc(n, size, flags); 1943 } 1944 1945 static inline void __percpu * 1946 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 1947 gfp_t flags) 1948 { 1949 return __alloc_percpu_gfp(size, align, flags); 1950 } 1951 #endif 1952 1953 extern int sysctl_unprivileged_bpf_disabled; 1954 1955 static inline bool bpf_allow_ptr_leaks(void) 1956 { 1957 return perfmon_capable(); 1958 } 1959 1960 static inline bool bpf_allow_uninit_stack(void) 1961 { 1962 return perfmon_capable(); 1963 } 1964 1965 static inline bool bpf_bypass_spec_v1(void) 1966 { 1967 return perfmon_capable(); 1968 } 1969 1970 static inline bool bpf_bypass_spec_v4(void) 1971 { 1972 return perfmon_capable(); 1973 } 1974 1975 int bpf_map_new_fd(struct bpf_map *map, int flags); 1976 int bpf_prog_new_fd(struct bpf_prog *prog); 1977 1978 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 1979 const struct bpf_link_ops *ops, struct bpf_prog *prog); 1980 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1981 int bpf_link_settle(struct bpf_link_primer *primer); 1982 void bpf_link_cleanup(struct bpf_link_primer *primer); 1983 void bpf_link_inc(struct bpf_link *link); 1984 void bpf_link_put(struct bpf_link *link); 1985 int bpf_link_new_fd(struct bpf_link *link); 1986 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1987 struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1988 struct bpf_link *bpf_link_get_curr_or_next(u32 *id); 1989 1990 int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1991 int bpf_obj_get_user(const char __user *pathname, int flags); 1992 1993 #define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1994 #define DEFINE_BPF_ITER_FUNC(target, args...) \ 1995 extern int bpf_iter_ ## target(args); \ 1996 int __init bpf_iter_ ## target(args) { return 0; } 1997 1998 /* 1999 * The task type of iterators. 2000 * 2001 * For BPF task iterators, they can be parameterized with various 2002 * parameters to visit only some of tasks. 2003 * 2004 * BPF_TASK_ITER_ALL (default) 2005 * Iterate over resources of every task. 2006 * 2007 * BPF_TASK_ITER_TID 2008 * Iterate over resources of a task/tid. 2009 * 2010 * BPF_TASK_ITER_TGID 2011 * Iterate over resources of every task of a process / task group. 2012 */ 2013 enum bpf_iter_task_type { 2014 BPF_TASK_ITER_ALL = 0, 2015 BPF_TASK_ITER_TID, 2016 BPF_TASK_ITER_TGID, 2017 }; 2018 2019 struct bpf_iter_aux_info { 2020 /* for map_elem iter */ 2021 struct bpf_map *map; 2022 2023 /* for cgroup iter */ 2024 struct { 2025 struct cgroup *start; /* starting cgroup */ 2026 enum bpf_cgroup_iter_order order; 2027 } cgroup; 2028 struct { 2029 enum bpf_iter_task_type type; 2030 u32 pid; 2031 } task; 2032 }; 2033 2034 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, 2035 union bpf_iter_link_info *linfo, 2036 struct bpf_iter_aux_info *aux); 2037 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 2038 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, 2039 struct seq_file *seq); 2040 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, 2041 struct bpf_link_info *info); 2042 typedef const struct bpf_func_proto * 2043 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, 2044 const struct bpf_prog *prog); 2045 2046 enum bpf_iter_feature { 2047 BPF_ITER_RESCHED = BIT(0), 2048 }; 2049 2050 #define BPF_ITER_CTX_ARG_MAX 2 2051 struct bpf_iter_reg { 2052 const char *target; 2053 bpf_iter_attach_target_t attach_target; 2054 bpf_iter_detach_target_t detach_target; 2055 bpf_iter_show_fdinfo_t show_fdinfo; 2056 bpf_iter_fill_link_info_t fill_link_info; 2057 bpf_iter_get_func_proto_t get_func_proto; 2058 u32 ctx_arg_info_size; 2059 u32 feature; 2060 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 2061 const struct bpf_iter_seq_info *seq_info; 2062 }; 2063 2064 struct bpf_iter_meta { 2065 __bpf_md_ptr(struct seq_file *, seq); 2066 u64 session_id; 2067 u64 seq_num; 2068 }; 2069 2070 struct bpf_iter__bpf_map_elem { 2071 __bpf_md_ptr(struct bpf_iter_meta *, meta); 2072 __bpf_md_ptr(struct bpf_map *, map); 2073 __bpf_md_ptr(void *, key); 2074 __bpf_md_ptr(void *, value); 2075 }; 2076 2077 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 2078 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 2079 bool bpf_iter_prog_supported(struct bpf_prog *prog); 2080 const struct bpf_func_proto * 2081 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 2082 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); 2083 int bpf_iter_new_fd(struct bpf_link *link); 2084 bool bpf_link_is_iter(struct bpf_link *link); 2085 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 2086 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 2087 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, 2088 struct seq_file *seq); 2089 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, 2090 struct bpf_link_info *info); 2091 2092 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 2093 struct bpf_func_state *caller, 2094 struct bpf_func_state *callee); 2095 2096 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 2097 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 2098 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, 2099 u64 flags); 2100 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 2101 u64 flags); 2102 2103 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 2104 2105 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 2106 void *key, void *value, u64 map_flags); 2107 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 2108 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 2109 void *key, void *value, u64 map_flags); 2110 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 2111 2112 int bpf_get_file_flag(int flags); 2113 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, 2114 size_t actual_size); 2115 2116 /* verify correctness of eBPF program */ 2117 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); 2118 2119 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2120 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 2121 #endif 2122 2123 struct btf *bpf_get_btf_vmlinux(void); 2124 2125 /* Map specifics */ 2126 struct xdp_frame; 2127 struct sk_buff; 2128 struct bpf_dtab_netdev; 2129 struct bpf_cpu_map_entry; 2130 2131 void __dev_flush(void); 2132 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 2133 struct net_device *dev_rx); 2134 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 2135 struct net_device *dev_rx); 2136 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 2137 struct bpf_map *map, bool exclude_ingress); 2138 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, 2139 struct bpf_prog *xdp_prog); 2140 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 2141 struct bpf_prog *xdp_prog, struct bpf_map *map, 2142 bool exclude_ingress); 2143 2144 void __cpu_map_flush(void); 2145 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, 2146 struct net_device *dev_rx); 2147 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 2148 struct sk_buff *skb); 2149 2150 /* Return map's numa specified by userspace */ 2151 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 2152 { 2153 return (attr->map_flags & BPF_F_NUMA_NODE) ? 2154 attr->numa_node : NUMA_NO_NODE; 2155 } 2156 2157 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 2158 int array_map_alloc_check(union bpf_attr *attr); 2159 2160 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 2161 union bpf_attr __user *uattr); 2162 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 2163 union bpf_attr __user *uattr); 2164 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 2165 const union bpf_attr *kattr, 2166 union bpf_attr __user *uattr); 2167 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 2168 const union bpf_attr *kattr, 2169 union bpf_attr __user *uattr); 2170 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 2171 const union bpf_attr *kattr, 2172 union bpf_attr __user *uattr); 2173 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 2174 const union bpf_attr *kattr, 2175 union bpf_attr __user *uattr); 2176 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 2177 const struct bpf_prog *prog, 2178 struct bpf_insn_access_aux *info); 2179 2180 static inline bool bpf_tracing_ctx_access(int off, int size, 2181 enum bpf_access_type type) 2182 { 2183 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 2184 return false; 2185 if (type != BPF_READ) 2186 return false; 2187 if (off % size != 0) 2188 return false; 2189 return true; 2190 } 2191 2192 static inline bool bpf_tracing_btf_ctx_access(int off, int size, 2193 enum bpf_access_type type, 2194 const struct bpf_prog *prog, 2195 struct bpf_insn_access_aux *info) 2196 { 2197 if (!bpf_tracing_ctx_access(off, size, type)) 2198 return false; 2199 return btf_ctx_access(off, size, type, prog, info); 2200 } 2201 2202 int btf_struct_access(struct bpf_verifier_log *log, 2203 const struct bpf_reg_state *reg, 2204 int off, int size, enum bpf_access_type atype, 2205 u32 *next_btf_id, enum bpf_type_flag *flag); 2206 bool btf_struct_ids_match(struct bpf_verifier_log *log, 2207 const struct btf *btf, u32 id, int off, 2208 const struct btf *need_btf, u32 need_type_id, 2209 bool strict); 2210 2211 int btf_distill_func_proto(struct bpf_verifier_log *log, 2212 struct btf *btf, 2213 const struct btf_type *func_proto, 2214 const char *func_name, 2215 struct btf_func_model *m); 2216 2217 struct bpf_reg_state; 2218 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 2219 struct bpf_reg_state *regs); 2220 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, 2221 struct bpf_reg_state *regs); 2222 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 2223 struct bpf_reg_state *reg); 2224 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 2225 struct btf *btf, const struct btf_type *t); 2226 2227 struct bpf_prog *bpf_prog_by_id(u32 id); 2228 struct bpf_link *bpf_link_by_id(u32 id); 2229 2230 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 2231 void bpf_task_storage_free(struct task_struct *task); 2232 void bpf_cgrp_storage_free(struct cgroup *cgroup); 2233 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); 2234 const struct btf_func_model * 2235 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2236 const struct bpf_insn *insn); 2237 struct bpf_core_ctx { 2238 struct bpf_verifier_log *log; 2239 const struct btf *btf; 2240 }; 2241 2242 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, 2243 const struct bpf_reg_state *reg, 2244 int off); 2245 2246 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, 2247 const struct btf *reg_btf, u32 reg_id, 2248 const struct btf *arg_btf, u32 arg_id); 2249 2250 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 2251 int relo_idx, void *insn); 2252 2253 static inline bool unprivileged_ebpf_enabled(void) 2254 { 2255 return !sysctl_unprivileged_bpf_disabled; 2256 } 2257 2258 /* Not all bpf prog type has the bpf_ctx. 2259 * For the bpf prog type that has initialized the bpf_ctx, 2260 * this function can be used to decide if a kernel function 2261 * is called by a bpf program. 2262 */ 2263 static inline bool has_current_bpf_ctx(void) 2264 { 2265 return !!current->bpf_ctx; 2266 } 2267 2268 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); 2269 #else /* !CONFIG_BPF_SYSCALL */ 2270 static inline struct bpf_prog *bpf_prog_get(u32 ufd) 2271 { 2272 return ERR_PTR(-EOPNOTSUPP); 2273 } 2274 2275 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 2276 enum bpf_prog_type type, 2277 bool attach_drv) 2278 { 2279 return ERR_PTR(-EOPNOTSUPP); 2280 } 2281 2282 static inline void bpf_prog_add(struct bpf_prog *prog, int i) 2283 { 2284 } 2285 2286 static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 2287 { 2288 } 2289 2290 static inline void bpf_prog_put(struct bpf_prog *prog) 2291 { 2292 } 2293 2294 static inline void bpf_prog_inc(struct bpf_prog *prog) 2295 { 2296 } 2297 2298 static inline struct bpf_prog *__must_check 2299 bpf_prog_inc_not_zero(struct bpf_prog *prog) 2300 { 2301 return ERR_PTR(-EOPNOTSUPP); 2302 } 2303 2304 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2305 const struct bpf_link_ops *ops, 2306 struct bpf_prog *prog) 2307 { 2308 } 2309 2310 static inline int bpf_link_prime(struct bpf_link *link, 2311 struct bpf_link_primer *primer) 2312 { 2313 return -EOPNOTSUPP; 2314 } 2315 2316 static inline int bpf_link_settle(struct bpf_link_primer *primer) 2317 { 2318 return -EOPNOTSUPP; 2319 } 2320 2321 static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 2322 { 2323 } 2324 2325 static inline void bpf_link_inc(struct bpf_link *link) 2326 { 2327 } 2328 2329 static inline void bpf_link_put(struct bpf_link *link) 2330 { 2331 } 2332 2333 static inline int bpf_obj_get_user(const char __user *pathname, int flags) 2334 { 2335 return -EOPNOTSUPP; 2336 } 2337 2338 static inline void __dev_flush(void) 2339 { 2340 } 2341 2342 struct xdp_frame; 2343 struct bpf_dtab_netdev; 2344 struct bpf_cpu_map_entry; 2345 2346 static inline 2347 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, 2348 struct net_device *dev_rx) 2349 { 2350 return 0; 2351 } 2352 2353 static inline 2354 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, 2355 struct net_device *dev_rx) 2356 { 2357 return 0; 2358 } 2359 2360 static inline 2361 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, 2362 struct bpf_map *map, bool exclude_ingress) 2363 { 2364 return 0; 2365 } 2366 2367 struct sk_buff; 2368 2369 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, 2370 struct sk_buff *skb, 2371 struct bpf_prog *xdp_prog) 2372 { 2373 return 0; 2374 } 2375 2376 static inline 2377 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, 2378 struct bpf_prog *xdp_prog, struct bpf_map *map, 2379 bool exclude_ingress) 2380 { 2381 return 0; 2382 } 2383 2384 static inline void __cpu_map_flush(void) 2385 { 2386 } 2387 2388 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, 2389 struct xdp_frame *xdpf, 2390 struct net_device *dev_rx) 2391 { 2392 return 0; 2393 } 2394 2395 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 2396 struct sk_buff *skb) 2397 { 2398 return -EOPNOTSUPP; 2399 } 2400 2401 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, 2402 enum bpf_prog_type type) 2403 { 2404 return ERR_PTR(-EOPNOTSUPP); 2405 } 2406 2407 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, 2408 const union bpf_attr *kattr, 2409 union bpf_attr __user *uattr) 2410 { 2411 return -ENOTSUPP; 2412 } 2413 2414 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, 2415 const union bpf_attr *kattr, 2416 union bpf_attr __user *uattr) 2417 { 2418 return -ENOTSUPP; 2419 } 2420 2421 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, 2422 const union bpf_attr *kattr, 2423 union bpf_attr __user *uattr) 2424 { 2425 return -ENOTSUPP; 2426 } 2427 2428 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 2429 const union bpf_attr *kattr, 2430 union bpf_attr __user *uattr) 2431 { 2432 return -ENOTSUPP; 2433 } 2434 2435 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, 2436 const union bpf_attr *kattr, 2437 union bpf_attr __user *uattr) 2438 { 2439 return -ENOTSUPP; 2440 } 2441 2442 static inline void bpf_map_put(struct bpf_map *map) 2443 { 2444 } 2445 2446 static inline struct bpf_prog *bpf_prog_by_id(u32 id) 2447 { 2448 return ERR_PTR(-ENOTSUPP); 2449 } 2450 2451 static inline int btf_struct_access(struct bpf_verifier_log *log, 2452 const struct bpf_reg_state *reg, 2453 int off, int size, enum bpf_access_type atype, 2454 u32 *next_btf_id, enum bpf_type_flag *flag) 2455 { 2456 return -EACCES; 2457 } 2458 2459 static inline const struct bpf_func_proto * 2460 bpf_base_func_proto(enum bpf_func_id func_id) 2461 { 2462 return NULL; 2463 } 2464 2465 static inline void bpf_task_storage_free(struct task_struct *task) 2466 { 2467 } 2468 2469 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2470 { 2471 return false; 2472 } 2473 2474 static inline const struct btf_func_model * 2475 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2476 const struct bpf_insn *insn) 2477 { 2478 return NULL; 2479 } 2480 2481 static inline bool unprivileged_ebpf_enabled(void) 2482 { 2483 return false; 2484 } 2485 2486 static inline bool has_current_bpf_ctx(void) 2487 { 2488 return false; 2489 } 2490 2491 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2492 { 2493 } 2494 2495 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) 2496 { 2497 } 2498 #endif /* CONFIG_BPF_SYSCALL */ 2499 2500 void __bpf_free_used_btfs(struct bpf_prog_aux *aux, 2501 struct btf_mod_pair *used_btfs, u32 len); 2502 2503 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 2504 enum bpf_prog_type type) 2505 { 2506 return bpf_prog_get_type_dev(ufd, type, false); 2507 } 2508 2509 void __bpf_free_used_maps(struct bpf_prog_aux *aux, 2510 struct bpf_map **used_maps, u32 len); 2511 2512 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 2513 2514 int bpf_prog_offload_compile(struct bpf_prog *prog); 2515 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); 2516 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 2517 struct bpf_prog *prog); 2518 2519 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 2520 2521 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 2522 int bpf_map_offload_update_elem(struct bpf_map *map, 2523 void *key, void *value, u64 flags); 2524 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 2525 int bpf_map_offload_get_next_key(struct bpf_map *map, 2526 void *key, void *next_key); 2527 2528 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 2529 2530 struct bpf_offload_dev * 2531 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 2532 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 2533 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 2534 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 2535 struct net_device *netdev); 2536 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 2537 struct net_device *netdev); 2538 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 2539 2540 void unpriv_ebpf_notify(int new_state); 2541 2542 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 2543 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 2544 struct bpf_prog_aux *prog_aux); 2545 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); 2546 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); 2547 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); 2548 void bpf_dev_bound_netdev_unregister(struct net_device *dev); 2549 2550 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2551 { 2552 return aux->dev_bound; 2553 } 2554 2555 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) 2556 { 2557 return aux->offload_requested; 2558 } 2559 2560 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); 2561 2562 static inline bool bpf_map_is_offloaded(struct bpf_map *map) 2563 { 2564 return unlikely(map->ops == &bpf_map_offload_ops); 2565 } 2566 2567 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 2568 void bpf_map_offload_map_free(struct bpf_map *map); 2569 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2570 const union bpf_attr *kattr, 2571 union bpf_attr __user *uattr); 2572 2573 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 2574 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 2575 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 2576 int sock_map_bpf_prog_query(const union bpf_attr *attr, 2577 union bpf_attr __user *uattr); 2578 2579 void sock_map_unhash(struct sock *sk); 2580 void sock_map_destroy(struct sock *sk); 2581 void sock_map_close(struct sock *sk, long timeout); 2582 #else 2583 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 2584 struct bpf_prog_aux *prog_aux) 2585 { 2586 return -EOPNOTSUPP; 2587 } 2588 2589 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, 2590 u32 func_id) 2591 { 2592 return NULL; 2593 } 2594 2595 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, 2596 union bpf_attr *attr) 2597 { 2598 return -EOPNOTSUPP; 2599 } 2600 2601 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, 2602 struct bpf_prog *old_prog) 2603 { 2604 return -EOPNOTSUPP; 2605 } 2606 2607 static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) 2608 { 2609 } 2610 2611 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 2612 { 2613 return false; 2614 } 2615 2616 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) 2617 { 2618 return false; 2619 } 2620 2621 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) 2622 { 2623 return false; 2624 } 2625 2626 static inline bool bpf_map_is_offloaded(struct bpf_map *map) 2627 { 2628 return false; 2629 } 2630 2631 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 2632 { 2633 return ERR_PTR(-EOPNOTSUPP); 2634 } 2635 2636 static inline void bpf_map_offload_map_free(struct bpf_map *map) 2637 { 2638 } 2639 2640 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, 2641 const union bpf_attr *kattr, 2642 union bpf_attr __user *uattr) 2643 { 2644 return -ENOTSUPP; 2645 } 2646 2647 #ifdef CONFIG_BPF_SYSCALL 2648 static inline int sock_map_get_from_fd(const union bpf_attr *attr, 2649 struct bpf_prog *prog) 2650 { 2651 return -EINVAL; 2652 } 2653 2654 static inline int sock_map_prog_detach(const union bpf_attr *attr, 2655 enum bpf_prog_type ptype) 2656 { 2657 return -EOPNOTSUPP; 2658 } 2659 2660 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 2661 u64 flags) 2662 { 2663 return -EOPNOTSUPP; 2664 } 2665 2666 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, 2667 union bpf_attr __user *uattr) 2668 { 2669 return -EINVAL; 2670 } 2671 #endif /* CONFIG_BPF_SYSCALL */ 2672 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 2673 2674 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 2675 void bpf_sk_reuseport_detach(struct sock *sk); 2676 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, 2677 void *value); 2678 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, 2679 void *value, u64 map_flags); 2680 #else 2681 static inline void bpf_sk_reuseport_detach(struct sock *sk) 2682 { 2683 } 2684 2685 #ifdef CONFIG_BPF_SYSCALL 2686 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, 2687 void *key, void *value) 2688 { 2689 return -EOPNOTSUPP; 2690 } 2691 2692 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, 2693 void *key, void *value, 2694 u64 map_flags) 2695 { 2696 return -EOPNOTSUPP; 2697 } 2698 #endif /* CONFIG_BPF_SYSCALL */ 2699 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 2700 2701 /* verifier prototypes for helper functions called from eBPF programs */ 2702 extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 2703 extern const struct bpf_func_proto bpf_map_update_elem_proto; 2704 extern const struct bpf_func_proto bpf_map_delete_elem_proto; 2705 extern const struct bpf_func_proto bpf_map_push_elem_proto; 2706 extern const struct bpf_func_proto bpf_map_pop_elem_proto; 2707 extern const struct bpf_func_proto bpf_map_peek_elem_proto; 2708 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; 2709 2710 extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 2711 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 2712 extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 2713 extern const struct bpf_func_proto bpf_tail_call_proto; 2714 extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 2715 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 2716 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; 2717 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 2718 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 2719 extern const struct bpf_func_proto bpf_get_current_comm_proto; 2720 extern const struct bpf_func_proto bpf_get_stackid_proto; 2721 extern const struct bpf_func_proto bpf_get_stack_proto; 2722 extern const struct bpf_func_proto bpf_get_task_stack_proto; 2723 extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 2724 extern const struct bpf_func_proto bpf_get_stack_proto_pe; 2725 extern const struct bpf_func_proto bpf_sock_map_update_proto; 2726 extern const struct bpf_func_proto bpf_sock_hash_update_proto; 2727 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 2728 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 2729 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; 2730 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 2731 extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 2732 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 2733 extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 2734 extern const struct bpf_func_proto bpf_spin_lock_proto; 2735 extern const struct bpf_func_proto bpf_spin_unlock_proto; 2736 extern const struct bpf_func_proto bpf_get_local_storage_proto; 2737 extern const struct bpf_func_proto bpf_strtol_proto; 2738 extern const struct bpf_func_proto bpf_strtoul_proto; 2739 extern const struct bpf_func_proto bpf_tcp_sock_proto; 2740 extern const struct bpf_func_proto bpf_jiffies64_proto; 2741 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 2742 extern const struct bpf_func_proto bpf_event_output_data_proto; 2743 extern const struct bpf_func_proto bpf_ringbuf_output_proto; 2744 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 2745 extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 2746 extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 2747 extern const struct bpf_func_proto bpf_ringbuf_query_proto; 2748 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; 2749 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; 2750 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; 2751 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 2752 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 2753 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 2754 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 2755 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 2756 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; 2757 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; 2758 extern const struct bpf_func_proto bpf_copy_from_user_proto; 2759 extern const struct bpf_func_proto bpf_snprintf_btf_proto; 2760 extern const struct bpf_func_proto bpf_snprintf_proto; 2761 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 2762 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 2763 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; 2764 extern const struct bpf_func_proto bpf_sock_from_file_proto; 2765 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; 2766 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto; 2767 extern const struct bpf_func_proto bpf_task_storage_get_proto; 2768 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto; 2769 extern const struct bpf_func_proto bpf_task_storage_delete_proto; 2770 extern const struct bpf_func_proto bpf_for_each_map_elem_proto; 2771 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; 2772 extern const struct bpf_func_proto bpf_sk_setsockopt_proto; 2773 extern const struct bpf_func_proto bpf_sk_getsockopt_proto; 2774 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; 2775 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; 2776 extern const struct bpf_func_proto bpf_find_vma_proto; 2777 extern const struct bpf_func_proto bpf_loop_proto; 2778 extern const struct bpf_func_proto bpf_copy_from_user_task_proto; 2779 extern const struct bpf_func_proto bpf_set_retval_proto; 2780 extern const struct bpf_func_proto bpf_get_retval_proto; 2781 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; 2782 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto; 2783 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto; 2784 2785 const struct bpf_func_proto *tracing_prog_func_proto( 2786 enum bpf_func_id func_id, const struct bpf_prog *prog); 2787 2788 /* Shared helpers among cBPF and eBPF. */ 2789 void bpf_user_rnd_init_once(void); 2790 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2791 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 2792 2793 #if defined(CONFIG_NET) 2794 bool bpf_sock_common_is_valid_access(int off, int size, 2795 enum bpf_access_type type, 2796 struct bpf_insn_access_aux *info); 2797 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2798 struct bpf_insn_access_aux *info); 2799 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2800 const struct bpf_insn *si, 2801 struct bpf_insn *insn_buf, 2802 struct bpf_prog *prog, 2803 u32 *target_size); 2804 #else 2805 static inline bool bpf_sock_common_is_valid_access(int off, int size, 2806 enum bpf_access_type type, 2807 struct bpf_insn_access_aux *info) 2808 { 2809 return false; 2810 } 2811 static inline bool bpf_sock_is_valid_access(int off, int size, 2812 enum bpf_access_type type, 2813 struct bpf_insn_access_aux *info) 2814 { 2815 return false; 2816 } 2817 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, 2818 const struct bpf_insn *si, 2819 struct bpf_insn *insn_buf, 2820 struct bpf_prog *prog, 2821 u32 *target_size) 2822 { 2823 return 0; 2824 } 2825 #endif 2826 2827 #ifdef CONFIG_INET 2828 struct sk_reuseport_kern { 2829 struct sk_buff *skb; 2830 struct sock *sk; 2831 struct sock *selected_sk; 2832 struct sock *migrating_sk; 2833 void *data_end; 2834 u32 hash; 2835 u32 reuseport_id; 2836 bool bind_inany; 2837 }; 2838 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2839 struct bpf_insn_access_aux *info); 2840 2841 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2842 const struct bpf_insn *si, 2843 struct bpf_insn *insn_buf, 2844 struct bpf_prog *prog, 2845 u32 *target_size); 2846 2847 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 2848 struct bpf_insn_access_aux *info); 2849 2850 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2851 const struct bpf_insn *si, 2852 struct bpf_insn *insn_buf, 2853 struct bpf_prog *prog, 2854 u32 *target_size); 2855 #else 2856 static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 2857 enum bpf_access_type type, 2858 struct bpf_insn_access_aux *info) 2859 { 2860 return false; 2861 } 2862 2863 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, 2864 const struct bpf_insn *si, 2865 struct bpf_insn *insn_buf, 2866 struct bpf_prog *prog, 2867 u32 *target_size) 2868 { 2869 return 0; 2870 } 2871 static inline bool bpf_xdp_sock_is_valid_access(int off, int size, 2872 enum bpf_access_type type, 2873 struct bpf_insn_access_aux *info) 2874 { 2875 return false; 2876 } 2877 2878 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, 2879 const struct bpf_insn *si, 2880 struct bpf_insn *insn_buf, 2881 struct bpf_prog *prog, 2882 u32 *target_size) 2883 { 2884 return 0; 2885 } 2886 #endif /* CONFIG_INET */ 2887 2888 enum bpf_text_poke_type { 2889 BPF_MOD_CALL, 2890 BPF_MOD_JUMP, 2891 }; 2892 2893 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 2894 void *addr1, void *addr2); 2895 2896 void *bpf_arch_text_copy(void *dst, void *src, size_t len); 2897 int bpf_arch_text_invalidate(void *dst, size_t len); 2898 2899 struct btf_id_set; 2900 bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 2901 2902 #define MAX_BPRINTF_VARARGS 12 2903 #define MAX_BPRINTF_BUF 1024 2904 2905 struct bpf_bprintf_data { 2906 u32 *bin_args; 2907 char *buf; 2908 bool get_bin_args; 2909 bool get_buf; 2910 }; 2911 2912 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 2913 u32 num_args, struct bpf_bprintf_data *data); 2914 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); 2915 2916 /* the implementation of the opaque uapi struct bpf_dynptr */ 2917 struct bpf_dynptr_kern { 2918 void *data; 2919 /* Size represents the number of usable bytes of dynptr data. 2920 * If for example the offset is at 4 for a local dynptr whose data is 2921 * of type u64, the number of usable bytes is 4. 2922 * 2923 * The upper 8 bits are reserved. It is as follows: 2924 * Bits 0 - 23 = size 2925 * Bits 24 - 30 = dynptr type 2926 * Bit 31 = whether dynptr is read-only 2927 */ 2928 u32 size; 2929 u32 offset; 2930 } __aligned(8); 2931 2932 enum bpf_dynptr_type { 2933 BPF_DYNPTR_TYPE_INVALID, 2934 /* Points to memory that is local to the bpf program */ 2935 BPF_DYNPTR_TYPE_LOCAL, 2936 /* Underlying data is a kernel-produced ringbuf record */ 2937 BPF_DYNPTR_TYPE_RINGBUF, 2938 }; 2939 2940 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 2941 enum bpf_dynptr_type type, u32 offset, u32 size); 2942 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); 2943 int bpf_dynptr_check_size(u32 size); 2944 u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr); 2945 2946 #ifdef CONFIG_BPF_LSM 2947 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); 2948 void bpf_cgroup_atype_put(int cgroup_atype); 2949 #else 2950 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} 2951 static inline void bpf_cgroup_atype_put(int cgroup_atype) {} 2952 #endif /* CONFIG_BPF_LSM */ 2953 2954 struct key; 2955 2956 #ifdef CONFIG_KEYS 2957 struct bpf_key { 2958 struct key *key; 2959 bool has_ref; 2960 }; 2961 #endif /* CONFIG_KEYS */ 2962 2963 static inline bool type_is_alloc(u32 type) 2964 { 2965 return type & MEM_ALLOC; 2966 } 2967 2968 static inline gfp_t bpf_memcg_flags(gfp_t flags) 2969 { 2970 if (memcg_bpf_enabled()) 2971 return flags | __GFP_ACCOUNT; 2972 return flags; 2973 } 2974 2975 #endif /* _LINUX_BPF_H */ 2976