1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26
27 #include "../../lib/kstrtox.h"
28
29 /* If kernel subsystem is allowing eBPF programs to call this function,
30 * inside its own verifier_ops->get_func_proto() callback it should return
31 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
32 *
33 * Different map implementations will rely on rcu in map methods
34 * lookup/update/delete, therefore eBPF programs must run under rcu lock
35 * if program is allowed to access maps, so check rcu_read_lock_held() or
36 * rcu_read_lock_trace_held() in all three functions.
37 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
39 {
40 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
41 !rcu_read_lock_bh_held());
42 return (unsigned long) map->ops->map_lookup_elem(map, key);
43 }
44
45 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
46 .func = bpf_map_lookup_elem,
47 .gpl_only = false,
48 .pkt_access = true,
49 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
50 .arg1_type = ARG_CONST_MAP_PTR,
51 .arg2_type = ARG_PTR_TO_MAP_KEY,
52 };
53
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
55 void *, value, u64, flags)
56 {
57 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
58 !rcu_read_lock_bh_held());
59 return map->ops->map_update_elem(map, key, value, flags);
60 }
61
62 const struct bpf_func_proto bpf_map_update_elem_proto = {
63 .func = bpf_map_update_elem,
64 .gpl_only = false,
65 .pkt_access = true,
66 .ret_type = RET_INTEGER,
67 .arg1_type = ARG_CONST_MAP_PTR,
68 .arg2_type = ARG_PTR_TO_MAP_KEY,
69 .arg3_type = ARG_PTR_TO_MAP_VALUE,
70 .arg4_type = ARG_ANYTHING,
71 };
72
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
74 {
75 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
76 !rcu_read_lock_bh_held());
77 return map->ops->map_delete_elem(map, key);
78 }
79
80 const struct bpf_func_proto bpf_map_delete_elem_proto = {
81 .func = bpf_map_delete_elem,
82 .gpl_only = false,
83 .pkt_access = true,
84 .ret_type = RET_INTEGER,
85 .arg1_type = ARG_CONST_MAP_PTR,
86 .arg2_type = ARG_PTR_TO_MAP_KEY,
87 };
88
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
90 {
91 return map->ops->map_push_elem(map, value, flags);
92 }
93
94 const struct bpf_func_proto bpf_map_push_elem_proto = {
95 .func = bpf_map_push_elem,
96 .gpl_only = false,
97 .pkt_access = true,
98 .ret_type = RET_INTEGER,
99 .arg1_type = ARG_CONST_MAP_PTR,
100 .arg2_type = ARG_PTR_TO_MAP_VALUE,
101 .arg3_type = ARG_ANYTHING,
102 };
103
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
105 {
106 return map->ops->map_pop_elem(map, value);
107 }
108
109 const struct bpf_func_proto bpf_map_pop_elem_proto = {
110 .func = bpf_map_pop_elem,
111 .gpl_only = false,
112 .ret_type = RET_INTEGER,
113 .arg1_type = ARG_CONST_MAP_PTR,
114 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
115 };
116
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
118 {
119 return map->ops->map_peek_elem(map, value);
120 }
121
122 const struct bpf_func_proto bpf_map_peek_elem_proto = {
123 .func = bpf_map_peek_elem,
124 .gpl_only = false,
125 .ret_type = RET_INTEGER,
126 .arg1_type = ARG_CONST_MAP_PTR,
127 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
128 };
129
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
131 {
132 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
134 }
135
136 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
137 .func = bpf_map_lookup_percpu_elem,
138 .gpl_only = false,
139 .pkt_access = true,
140 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
141 .arg1_type = ARG_CONST_MAP_PTR,
142 .arg2_type = ARG_PTR_TO_MAP_KEY,
143 .arg3_type = ARG_ANYTHING,
144 };
145
146 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
147 .func = bpf_user_rnd_u32,
148 .gpl_only = false,
149 .ret_type = RET_INTEGER,
150 };
151
BPF_CALL_0(bpf_get_smp_processor_id)152 BPF_CALL_0(bpf_get_smp_processor_id)
153 {
154 return smp_processor_id();
155 }
156
157 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
158 .func = bpf_get_smp_processor_id,
159 .gpl_only = false,
160 .ret_type = RET_INTEGER,
161 };
162
BPF_CALL_0(bpf_get_numa_node_id)163 BPF_CALL_0(bpf_get_numa_node_id)
164 {
165 return numa_node_id();
166 }
167
168 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
169 .func = bpf_get_numa_node_id,
170 .gpl_only = false,
171 .ret_type = RET_INTEGER,
172 };
173
BPF_CALL_0(bpf_ktime_get_ns)174 BPF_CALL_0(bpf_ktime_get_ns)
175 {
176 /* NMI safe access to clock monotonic */
177 return ktime_get_mono_fast_ns();
178 }
179
180 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
181 .func = bpf_ktime_get_ns,
182 .gpl_only = false,
183 .ret_type = RET_INTEGER,
184 };
185
BPF_CALL_0(bpf_ktime_get_boot_ns)186 BPF_CALL_0(bpf_ktime_get_boot_ns)
187 {
188 /* NMI safe access to clock boottime */
189 return ktime_get_boot_fast_ns();
190 }
191
192 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
193 .func = bpf_ktime_get_boot_ns,
194 .gpl_only = false,
195 .ret_type = RET_INTEGER,
196 };
197
BPF_CALL_0(bpf_ktime_get_coarse_ns)198 BPF_CALL_0(bpf_ktime_get_coarse_ns)
199 {
200 return ktime_get_coarse_ns();
201 }
202
203 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
204 .func = bpf_ktime_get_coarse_ns,
205 .gpl_only = false,
206 .ret_type = RET_INTEGER,
207 };
208
BPF_CALL_0(bpf_ktime_get_tai_ns)209 BPF_CALL_0(bpf_ktime_get_tai_ns)
210 {
211 /* NMI safe access to clock tai */
212 return ktime_get_tai_fast_ns();
213 }
214
215 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
216 .func = bpf_ktime_get_tai_ns,
217 .gpl_only = false,
218 .ret_type = RET_INTEGER,
219 };
220
BPF_CALL_0(bpf_get_current_pid_tgid)221 BPF_CALL_0(bpf_get_current_pid_tgid)
222 {
223 struct task_struct *task = current;
224
225 if (unlikely(!task))
226 return -EINVAL;
227
228 return (u64) task->tgid << 32 | task->pid;
229 }
230
231 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
232 .func = bpf_get_current_pid_tgid,
233 .gpl_only = false,
234 .ret_type = RET_INTEGER,
235 };
236
BPF_CALL_0(bpf_get_current_uid_gid)237 BPF_CALL_0(bpf_get_current_uid_gid)
238 {
239 struct task_struct *task = current;
240 kuid_t uid;
241 kgid_t gid;
242
243 if (unlikely(!task))
244 return -EINVAL;
245
246 current_uid_gid(&uid, &gid);
247 return (u64) from_kgid(&init_user_ns, gid) << 32 |
248 from_kuid(&init_user_ns, uid);
249 }
250
251 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
252 .func = bpf_get_current_uid_gid,
253 .gpl_only = false,
254 .ret_type = RET_INTEGER,
255 };
256
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)257 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
258 {
259 struct task_struct *task = current;
260
261 if (unlikely(!task))
262 goto err_clear;
263
264 /* Verifier guarantees that size > 0 */
265 strscpy_pad(buf, task->comm, size);
266 return 0;
267 err_clear:
268 memset(buf, 0, size);
269 return -EINVAL;
270 }
271
272 const struct bpf_func_proto bpf_get_current_comm_proto = {
273 .func = bpf_get_current_comm,
274 .gpl_only = false,
275 .ret_type = RET_INTEGER,
276 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
277 .arg2_type = ARG_CONST_SIZE,
278 };
279
280 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
281
__bpf_spin_lock(struct bpf_spin_lock * lock)282 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
283 {
284 arch_spinlock_t *l = (void *)lock;
285 union {
286 __u32 val;
287 arch_spinlock_t lock;
288 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
289
290 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
291 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
292 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
293 preempt_disable();
294 arch_spin_lock(l);
295 }
296
__bpf_spin_unlock(struct bpf_spin_lock * lock)297 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
298 {
299 arch_spinlock_t *l = (void *)lock;
300
301 arch_spin_unlock(l);
302 preempt_enable();
303 }
304
305 #else
306
__bpf_spin_lock(struct bpf_spin_lock * lock)307 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
308 {
309 atomic_t *l = (void *)lock;
310
311 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
312 do {
313 atomic_cond_read_relaxed(l, !VAL);
314 } while (atomic_xchg(l, 1));
315 }
316
__bpf_spin_unlock(struct bpf_spin_lock * lock)317 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
318 {
319 atomic_t *l = (void *)lock;
320
321 atomic_set_release(l, 0);
322 }
323
324 #endif
325
326 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
327
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)328 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
329 {
330 unsigned long flags;
331
332 local_irq_save(flags);
333 __bpf_spin_lock(lock);
334 __this_cpu_write(irqsave_flags, flags);
335 }
336
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)337 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
338 {
339 __bpf_spin_lock_irqsave(lock);
340 return 0;
341 }
342
343 const struct bpf_func_proto bpf_spin_lock_proto = {
344 .func = bpf_spin_lock,
345 .gpl_only = false,
346 .ret_type = RET_VOID,
347 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
348 .arg1_btf_id = BPF_PTR_POISON,
349 };
350
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)351 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
352 {
353 unsigned long flags;
354
355 flags = __this_cpu_read(irqsave_flags);
356 __bpf_spin_unlock(lock);
357 local_irq_restore(flags);
358 }
359
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)360 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
361 {
362 __bpf_spin_unlock_irqrestore(lock);
363 return 0;
364 }
365
366 const struct bpf_func_proto bpf_spin_unlock_proto = {
367 .func = bpf_spin_unlock,
368 .gpl_only = false,
369 .ret_type = RET_VOID,
370 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
371 .arg1_btf_id = BPF_PTR_POISON,
372 };
373
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)374 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
375 bool lock_src)
376 {
377 struct bpf_spin_lock *lock;
378
379 if (lock_src)
380 lock = src + map->record->spin_lock_off;
381 else
382 lock = dst + map->record->spin_lock_off;
383 preempt_disable();
384 __bpf_spin_lock_irqsave(lock);
385 copy_map_value(map, dst, src);
386 __bpf_spin_unlock_irqrestore(lock);
387 preempt_enable();
388 }
389
BPF_CALL_0(bpf_jiffies64)390 BPF_CALL_0(bpf_jiffies64)
391 {
392 return get_jiffies_64();
393 }
394
395 const struct bpf_func_proto bpf_jiffies64_proto = {
396 .func = bpf_jiffies64,
397 .gpl_only = false,
398 .ret_type = RET_INTEGER,
399 };
400
401 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)402 BPF_CALL_0(bpf_get_current_cgroup_id)
403 {
404 struct cgroup *cgrp;
405 u64 cgrp_id;
406
407 rcu_read_lock();
408 cgrp = task_dfl_cgroup(current);
409 cgrp_id = cgroup_id(cgrp);
410 rcu_read_unlock();
411
412 return cgrp_id;
413 }
414
415 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
416 .func = bpf_get_current_cgroup_id,
417 .gpl_only = false,
418 .ret_type = RET_INTEGER,
419 };
420
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)421 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
422 {
423 struct cgroup *cgrp;
424 struct cgroup *ancestor;
425 u64 cgrp_id;
426
427 rcu_read_lock();
428 cgrp = task_dfl_cgroup(current);
429 ancestor = cgroup_ancestor(cgrp, ancestor_level);
430 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
431 rcu_read_unlock();
432
433 return cgrp_id;
434 }
435
436 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
437 .func = bpf_get_current_ancestor_cgroup_id,
438 .gpl_only = false,
439 .ret_type = RET_INTEGER,
440 .arg1_type = ARG_ANYTHING,
441 };
442 #endif /* CONFIG_CGROUPS */
443
444 #define BPF_STRTOX_BASE_MASK 0x1F
445
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)446 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
447 unsigned long long *res, bool *is_negative)
448 {
449 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
450 const char *cur_buf = buf;
451 size_t cur_len = buf_len;
452 unsigned int consumed;
453 size_t val_len;
454 char str[64];
455
456 if (!buf || !buf_len || !res || !is_negative)
457 return -EINVAL;
458
459 if (base != 0 && base != 8 && base != 10 && base != 16)
460 return -EINVAL;
461
462 if (flags & ~BPF_STRTOX_BASE_MASK)
463 return -EINVAL;
464
465 while (cur_buf < buf + buf_len && isspace(*cur_buf))
466 ++cur_buf;
467
468 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
469 if (*is_negative)
470 ++cur_buf;
471
472 consumed = cur_buf - buf;
473 cur_len -= consumed;
474 if (!cur_len)
475 return -EINVAL;
476
477 cur_len = min(cur_len, sizeof(str) - 1);
478 memcpy(str, cur_buf, cur_len);
479 str[cur_len] = '\0';
480 cur_buf = str;
481
482 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
483 val_len = _parse_integer(cur_buf, base, res);
484
485 if (val_len & KSTRTOX_OVERFLOW)
486 return -ERANGE;
487
488 if (val_len == 0)
489 return -EINVAL;
490
491 cur_buf += val_len;
492 consumed += cur_buf - str;
493
494 return consumed;
495 }
496
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)497 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
498 long long *res)
499 {
500 unsigned long long _res;
501 bool is_negative;
502 int err;
503
504 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
505 if (err < 0)
506 return err;
507 if (is_negative) {
508 if ((long long)-_res > 0)
509 return -ERANGE;
510 *res = -_res;
511 } else {
512 if ((long long)_res < 0)
513 return -ERANGE;
514 *res = _res;
515 }
516 return err;
517 }
518
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,long *,res)519 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
520 long *, res)
521 {
522 long long _res;
523 int err;
524
525 err = __bpf_strtoll(buf, buf_len, flags, &_res);
526 if (err < 0)
527 return err;
528 if (_res != (long)_res)
529 return -ERANGE;
530 *res = _res;
531 return err;
532 }
533
534 const struct bpf_func_proto bpf_strtol_proto = {
535 .func = bpf_strtol,
536 .gpl_only = false,
537 .ret_type = RET_INTEGER,
538 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
539 .arg2_type = ARG_CONST_SIZE,
540 .arg3_type = ARG_ANYTHING,
541 .arg4_type = ARG_PTR_TO_LONG,
542 };
543
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,unsigned long *,res)544 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
545 unsigned long *, res)
546 {
547 unsigned long long _res;
548 bool is_negative;
549 int err;
550
551 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
552 if (err < 0)
553 return err;
554 if (is_negative)
555 return -EINVAL;
556 if (_res != (unsigned long)_res)
557 return -ERANGE;
558 *res = _res;
559 return err;
560 }
561
562 const struct bpf_func_proto bpf_strtoul_proto = {
563 .func = bpf_strtoul,
564 .gpl_only = false,
565 .ret_type = RET_INTEGER,
566 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
567 .arg2_type = ARG_CONST_SIZE,
568 .arg3_type = ARG_ANYTHING,
569 .arg4_type = ARG_PTR_TO_LONG,
570 };
571
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)572 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
573 {
574 return strncmp(s1, s2, s1_sz);
575 }
576
577 static const struct bpf_func_proto bpf_strncmp_proto = {
578 .func = bpf_strncmp,
579 .gpl_only = false,
580 .ret_type = RET_INTEGER,
581 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
582 .arg2_type = ARG_CONST_SIZE,
583 .arg3_type = ARG_PTR_TO_CONST_STR,
584 };
585
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)586 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
587 struct bpf_pidns_info *, nsdata, u32, size)
588 {
589 struct task_struct *task = current;
590 struct pid_namespace *pidns;
591 int err = -EINVAL;
592
593 if (unlikely(size != sizeof(struct bpf_pidns_info)))
594 goto clear;
595
596 if (unlikely((u64)(dev_t)dev != dev))
597 goto clear;
598
599 if (unlikely(!task))
600 goto clear;
601
602 pidns = task_active_pid_ns(task);
603 if (unlikely(!pidns)) {
604 err = -ENOENT;
605 goto clear;
606 }
607
608 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
609 goto clear;
610
611 nsdata->pid = task_pid_nr_ns(task, pidns);
612 nsdata->tgid = task_tgid_nr_ns(task, pidns);
613 return 0;
614 clear:
615 memset((void *)nsdata, 0, (size_t) size);
616 return err;
617 }
618
619 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
620 .func = bpf_get_ns_current_pid_tgid,
621 .gpl_only = false,
622 .ret_type = RET_INTEGER,
623 .arg1_type = ARG_ANYTHING,
624 .arg2_type = ARG_ANYTHING,
625 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
626 .arg4_type = ARG_CONST_SIZE,
627 };
628
629 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
630 .func = bpf_get_raw_cpu_id,
631 .gpl_only = false,
632 .ret_type = RET_INTEGER,
633 };
634
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)635 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
636 u64, flags, void *, data, u64, size)
637 {
638 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
639 return -EINVAL;
640
641 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
642 }
643
644 const struct bpf_func_proto bpf_event_output_data_proto = {
645 .func = bpf_event_output_data,
646 .gpl_only = true,
647 .ret_type = RET_INTEGER,
648 .arg1_type = ARG_PTR_TO_CTX,
649 .arg2_type = ARG_CONST_MAP_PTR,
650 .arg3_type = ARG_ANYTHING,
651 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
652 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
653 };
654
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)655 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
656 const void __user *, user_ptr)
657 {
658 int ret = copy_from_user(dst, user_ptr, size);
659
660 if (unlikely(ret)) {
661 memset(dst, 0, size);
662 ret = -EFAULT;
663 }
664
665 return ret;
666 }
667
668 const struct bpf_func_proto bpf_copy_from_user_proto = {
669 .func = bpf_copy_from_user,
670 .gpl_only = false,
671 .might_sleep = true,
672 .ret_type = RET_INTEGER,
673 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
674 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
675 .arg3_type = ARG_ANYTHING,
676 };
677
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)678 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
679 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
680 {
681 int ret;
682
683 /* flags is not used yet */
684 if (unlikely(flags))
685 return -EINVAL;
686
687 if (unlikely(!size))
688 return 0;
689
690 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
691 if (ret == size)
692 return 0;
693
694 memset(dst, 0, size);
695 /* Return -EFAULT for partial read */
696 return ret < 0 ? ret : -EFAULT;
697 }
698
699 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
700 .func = bpf_copy_from_user_task,
701 .gpl_only = true,
702 .might_sleep = true,
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
705 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
706 .arg3_type = ARG_ANYTHING,
707 .arg4_type = ARG_PTR_TO_BTF_ID,
708 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
709 .arg5_type = ARG_ANYTHING
710 };
711
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)712 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
713 {
714 if (cpu >= nr_cpu_ids)
715 return (unsigned long)NULL;
716
717 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
718 }
719
720 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
721 .func = bpf_per_cpu_ptr,
722 .gpl_only = false,
723 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
724 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
725 .arg2_type = ARG_ANYTHING,
726 };
727
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)728 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
729 {
730 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
731 }
732
733 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
734 .func = bpf_this_cpu_ptr,
735 .gpl_only = false,
736 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
737 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
738 };
739
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)740 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
741 size_t bufsz)
742 {
743 void __user *user_ptr = (__force void __user *)unsafe_ptr;
744
745 buf[0] = 0;
746
747 switch (fmt_ptype) {
748 case 's':
749 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
750 if ((unsigned long)unsafe_ptr < TASK_SIZE)
751 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
752 fallthrough;
753 #endif
754 case 'k':
755 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
756 case 'u':
757 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
758 }
759
760 return -EINVAL;
761 }
762
763 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
764 * arguments representation.
765 */
766 #define MAX_BPRINTF_BIN_ARGS 512
767
768 /* Support executing three nested bprintf helper calls on a given CPU */
769 #define MAX_BPRINTF_NEST_LEVEL 3
770 struct bpf_bprintf_buffers {
771 char bin_args[MAX_BPRINTF_BIN_ARGS];
772 char buf[MAX_BPRINTF_BUF];
773 };
774
775 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
776 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
777
try_get_buffers(struct bpf_bprintf_buffers ** bufs)778 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
779 {
780 int nest_level;
781
782 preempt_disable();
783 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
784 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
785 this_cpu_dec(bpf_bprintf_nest_level);
786 preempt_enable();
787 return -EBUSY;
788 }
789 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
790
791 return 0;
792 }
793
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)794 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
795 {
796 if (!data->bin_args && !data->buf)
797 return;
798 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
799 return;
800 this_cpu_dec(bpf_bprintf_nest_level);
801 preempt_enable();
802 }
803
804 /*
805 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806 *
807 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
808 *
809 * This can be used in two ways:
810 * - Format string verification only: when data->get_bin_args is false
811 * - Arguments preparation: in addition to the above verification, it writes in
812 * data->bin_args a binary representation of arguments usable by bstr_printf
813 * where pointers from BPF have been sanitized.
814 *
815 * In argument preparation mode, if 0 is returned, safe temporary buffers are
816 * allocated and bpf_bprintf_cleanup should be called to free them after use.
817 */
bpf_bprintf_prepare(char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)818 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
819 u32 num_args, struct bpf_bprintf_data *data)
820 {
821 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823 struct bpf_bprintf_buffers *buffers = NULL;
824 size_t sizeof_cur_arg, sizeof_cur_ip;
825 int err, i, num_spec = 0;
826 u64 cur_arg;
827 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828
829 fmt_end = strnchr(fmt, fmt_size, 0);
830 if (!fmt_end)
831 return -EINVAL;
832 fmt_size = fmt_end - fmt;
833
834 if (get_buffers && try_get_buffers(&buffers))
835 return -EBUSY;
836
837 if (data->get_bin_args) {
838 if (num_args)
839 tmp_buf = buffers->bin_args;
840 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841 data->bin_args = (u32 *)tmp_buf;
842 }
843
844 if (data->get_buf)
845 data->buf = buffers->buf;
846
847 for (i = 0; i < fmt_size; i++) {
848 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849 err = -EINVAL;
850 goto out;
851 }
852
853 if (fmt[i] != '%')
854 continue;
855
856 if (fmt[i + 1] == '%') {
857 i++;
858 continue;
859 }
860
861 if (num_spec >= num_args) {
862 err = -EINVAL;
863 goto out;
864 }
865
866 /* The string is zero-terminated so if fmt[i] != 0, we can
867 * always access fmt[i + 1], in the worst case it will be a 0
868 */
869 i++;
870
871 /* skip optional "[0 +-][num]" width formatting field */
872 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
873 fmt[i] == ' ')
874 i++;
875 if (fmt[i] >= '1' && fmt[i] <= '9') {
876 i++;
877 while (fmt[i] >= '0' && fmt[i] <= '9')
878 i++;
879 }
880
881 if (fmt[i] == 'p') {
882 sizeof_cur_arg = sizeof(long);
883
884 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
885 fmt[i + 2] == 's') {
886 fmt_ptype = fmt[i + 1];
887 i += 2;
888 goto fmt_str;
889 }
890
891 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
892 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
893 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
894 fmt[i + 1] == 'S') {
895 /* just kernel pointers */
896 if (tmp_buf)
897 cur_arg = raw_args[num_spec];
898 i++;
899 goto nocopy_fmt;
900 }
901
902 if (fmt[i + 1] == 'B') {
903 if (tmp_buf) {
904 err = snprintf(tmp_buf,
905 (tmp_buf_end - tmp_buf),
906 "%pB",
907 (void *)(long)raw_args[num_spec]);
908 tmp_buf += (err + 1);
909 }
910
911 i++;
912 num_spec++;
913 continue;
914 }
915
916 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
917 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
918 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
919 err = -EINVAL;
920 goto out;
921 }
922
923 i += 2;
924 if (!tmp_buf)
925 goto nocopy_fmt;
926
927 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
928 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
929 err = -ENOSPC;
930 goto out;
931 }
932
933 unsafe_ptr = (char *)(long)raw_args[num_spec];
934 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
935 sizeof_cur_ip);
936 if (err < 0)
937 memset(cur_ip, 0, sizeof_cur_ip);
938
939 /* hack: bstr_printf expects IP addresses to be
940 * pre-formatted as strings, ironically, the easiest way
941 * to do that is to call snprintf.
942 */
943 ip_spec[2] = fmt[i - 1];
944 ip_spec[3] = fmt[i];
945 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
946 ip_spec, &cur_ip);
947
948 tmp_buf += err + 1;
949 num_spec++;
950
951 continue;
952 } else if (fmt[i] == 's') {
953 fmt_ptype = fmt[i];
954 fmt_str:
955 if (fmt[i + 1] != 0 &&
956 !isspace(fmt[i + 1]) &&
957 !ispunct(fmt[i + 1])) {
958 err = -EINVAL;
959 goto out;
960 }
961
962 if (!tmp_buf)
963 goto nocopy_fmt;
964
965 if (tmp_buf_end == tmp_buf) {
966 err = -ENOSPC;
967 goto out;
968 }
969
970 unsafe_ptr = (char *)(long)raw_args[num_spec];
971 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
972 fmt_ptype,
973 tmp_buf_end - tmp_buf);
974 if (err < 0) {
975 tmp_buf[0] = '\0';
976 err = 1;
977 }
978
979 tmp_buf += err;
980 num_spec++;
981
982 continue;
983 } else if (fmt[i] == 'c') {
984 if (!tmp_buf)
985 goto nocopy_fmt;
986
987 if (tmp_buf_end == tmp_buf) {
988 err = -ENOSPC;
989 goto out;
990 }
991
992 *tmp_buf = raw_args[num_spec];
993 tmp_buf++;
994 num_spec++;
995
996 continue;
997 }
998
999 sizeof_cur_arg = sizeof(int);
1000
1001 if (fmt[i] == 'l') {
1002 sizeof_cur_arg = sizeof(long);
1003 i++;
1004 }
1005 if (fmt[i] == 'l') {
1006 sizeof_cur_arg = sizeof(long long);
1007 i++;
1008 }
1009
1010 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1011 fmt[i] != 'x' && fmt[i] != 'X') {
1012 err = -EINVAL;
1013 goto out;
1014 }
1015
1016 if (tmp_buf)
1017 cur_arg = raw_args[num_spec];
1018 nocopy_fmt:
1019 if (tmp_buf) {
1020 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1021 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1022 err = -ENOSPC;
1023 goto out;
1024 }
1025
1026 if (sizeof_cur_arg == 8) {
1027 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1028 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1029 } else {
1030 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1031 }
1032 tmp_buf += sizeof_cur_arg;
1033 }
1034 num_spec++;
1035 }
1036
1037 err = 0;
1038 out:
1039 if (err)
1040 bpf_bprintf_cleanup(data);
1041 return err;
1042 }
1043
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1044 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1045 const void *, args, u32, data_len)
1046 {
1047 struct bpf_bprintf_data data = {
1048 .get_bin_args = true,
1049 };
1050 int err, num_args;
1051
1052 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1053 (data_len && !args))
1054 return -EINVAL;
1055 num_args = data_len / 8;
1056
1057 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1058 * can safely give an unbounded size.
1059 */
1060 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1061 if (err < 0)
1062 return err;
1063
1064 err = bstr_printf(str, str_size, fmt, data.bin_args);
1065
1066 bpf_bprintf_cleanup(&data);
1067
1068 return err + 1;
1069 }
1070
1071 const struct bpf_func_proto bpf_snprintf_proto = {
1072 .func = bpf_snprintf,
1073 .gpl_only = true,
1074 .ret_type = RET_INTEGER,
1075 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1076 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1077 .arg3_type = ARG_PTR_TO_CONST_STR,
1078 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1079 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1080 };
1081
1082 struct bpf_async_cb {
1083 struct bpf_map *map;
1084 struct bpf_prog *prog;
1085 void __rcu *callback_fn;
1086 void *value;
1087 union {
1088 struct rcu_head rcu;
1089 struct work_struct delete_work;
1090 };
1091 u64 flags;
1092 };
1093
1094 /* BPF map elements can contain 'struct bpf_timer'.
1095 * Such map owns all of its BPF timers.
1096 * 'struct bpf_timer' is allocated as part of map element allocation
1097 * and it's zero initialized.
1098 * That space is used to keep 'struct bpf_async_kern'.
1099 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1100 * remembers 'struct bpf_map *' pointer it's part of.
1101 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1102 * bpf_timer_start() arms the timer.
1103 * If user space reference to a map goes to zero at this point
1104 * ops->map_release_uref callback is responsible for cancelling the timers,
1105 * freeing their memory, and decrementing prog's refcnts.
1106 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1107 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1108 * freeing the timers when inner map is replaced or deleted by user space.
1109 */
1110 struct bpf_hrtimer {
1111 struct bpf_async_cb cb;
1112 struct hrtimer timer;
1113 atomic_t cancelling;
1114 };
1115
1116 struct bpf_work {
1117 struct bpf_async_cb cb;
1118 struct work_struct work;
1119 struct work_struct delete_work;
1120 };
1121
1122 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1123 struct bpf_async_kern {
1124 union {
1125 struct bpf_async_cb *cb;
1126 struct bpf_hrtimer *timer;
1127 struct bpf_work *work;
1128 };
1129 /* bpf_spin_lock is used here instead of spinlock_t to make
1130 * sure that it always fits into space reserved by struct bpf_timer
1131 * regardless of LOCKDEP and spinlock debug flags.
1132 */
1133 struct bpf_spin_lock lock;
1134 } __attribute__((aligned(8)));
1135
1136 enum bpf_async_type {
1137 BPF_ASYNC_TYPE_TIMER = 0,
1138 BPF_ASYNC_TYPE_WQ,
1139 };
1140
1141 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1142
bpf_timer_cb(struct hrtimer * hrtimer)1143 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1144 {
1145 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1146 struct bpf_map *map = t->cb.map;
1147 void *value = t->cb.value;
1148 bpf_callback_t callback_fn;
1149 void *key;
1150 u32 idx;
1151
1152 BTF_TYPE_EMIT(struct bpf_timer);
1153 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1154 if (!callback_fn)
1155 goto out;
1156
1157 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1158 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1159 * Remember the timer this callback is servicing to prevent
1160 * deadlock if callback_fn() calls bpf_timer_cancel() or
1161 * bpf_map_delete_elem() on the same timer.
1162 */
1163 this_cpu_write(hrtimer_running, t);
1164 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1165 struct bpf_array *array = container_of(map, struct bpf_array, map);
1166
1167 /* compute the key */
1168 idx = ((char *)value - array->value) / array->elem_size;
1169 key = &idx;
1170 } else { /* hash or lru */
1171 key = value - round_up(map->key_size, 8);
1172 }
1173
1174 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1175 /* The verifier checked that return value is zero. */
1176
1177 this_cpu_write(hrtimer_running, NULL);
1178 out:
1179 return HRTIMER_NORESTART;
1180 }
1181
bpf_wq_work(struct work_struct * work)1182 static void bpf_wq_work(struct work_struct *work)
1183 {
1184 struct bpf_work *w = container_of(work, struct bpf_work, work);
1185 struct bpf_async_cb *cb = &w->cb;
1186 struct bpf_map *map = cb->map;
1187 bpf_callback_t callback_fn;
1188 void *value = cb->value;
1189 void *key;
1190 u32 idx;
1191
1192 BTF_TYPE_EMIT(struct bpf_wq);
1193
1194 callback_fn = READ_ONCE(cb->callback_fn);
1195 if (!callback_fn)
1196 return;
1197
1198 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1199 struct bpf_array *array = container_of(map, struct bpf_array, map);
1200
1201 /* compute the key */
1202 idx = ((char *)value - array->value) / array->elem_size;
1203 key = &idx;
1204 } else { /* hash or lru */
1205 key = value - round_up(map->key_size, 8);
1206 }
1207
1208 rcu_read_lock_trace();
1209 migrate_disable();
1210
1211 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1212
1213 migrate_enable();
1214 rcu_read_unlock_trace();
1215 }
1216
bpf_wq_delete_work(struct work_struct * work)1217 static void bpf_wq_delete_work(struct work_struct *work)
1218 {
1219 struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1220
1221 cancel_work_sync(&w->work);
1222
1223 kfree_rcu(w, cb.rcu);
1224 }
1225
bpf_timer_delete_work(struct work_struct * work)1226 static void bpf_timer_delete_work(struct work_struct *work)
1227 {
1228 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1229
1230 /* Cancel the timer and wait for callback to complete if it was running.
1231 * If hrtimer_cancel() can be safely called it's safe to call
1232 * kfree_rcu(t) right after for both preallocated and non-preallocated
1233 * maps. The async->cb = NULL was already done and no code path can see
1234 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1235 * bpf_timer_cancel_and_free will have been cancelled.
1236 */
1237 hrtimer_cancel(&t->timer);
1238 kfree_rcu(t, cb.rcu);
1239 }
1240
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1241 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1242 enum bpf_async_type type)
1243 {
1244 struct bpf_async_cb *cb;
1245 struct bpf_hrtimer *t;
1246 struct bpf_work *w;
1247 clockid_t clockid;
1248 size_t size;
1249 int ret = 0;
1250
1251 if (in_nmi())
1252 return -EOPNOTSUPP;
1253
1254 switch (type) {
1255 case BPF_ASYNC_TYPE_TIMER:
1256 size = sizeof(struct bpf_hrtimer);
1257 break;
1258 case BPF_ASYNC_TYPE_WQ:
1259 size = sizeof(struct bpf_work);
1260 break;
1261 default:
1262 return -EINVAL;
1263 }
1264
1265 __bpf_spin_lock_irqsave(&async->lock);
1266 t = async->timer;
1267 if (t) {
1268 ret = -EBUSY;
1269 goto out;
1270 }
1271
1272 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1273 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1274 if (!cb) {
1275 ret = -ENOMEM;
1276 goto out;
1277 }
1278
1279 switch (type) {
1280 case BPF_ASYNC_TYPE_TIMER:
1281 clockid = flags & (MAX_CLOCKS - 1);
1282 t = (struct bpf_hrtimer *)cb;
1283
1284 atomic_set(&t->cancelling, 0);
1285 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1286 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1287 t->timer.function = bpf_timer_cb;
1288 cb->value = (void *)async - map->record->timer_off;
1289 break;
1290 case BPF_ASYNC_TYPE_WQ:
1291 w = (struct bpf_work *)cb;
1292
1293 INIT_WORK(&w->work, bpf_wq_work);
1294 INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1295 cb->value = (void *)async - map->record->wq_off;
1296 break;
1297 }
1298 cb->map = map;
1299 cb->prog = NULL;
1300 cb->flags = flags;
1301 rcu_assign_pointer(cb->callback_fn, NULL);
1302
1303 WRITE_ONCE(async->cb, cb);
1304 /* Guarantee the order between async->cb and map->usercnt. So
1305 * when there are concurrent uref release and bpf timer init, either
1306 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1307 * timer or atomic64_read() below returns a zero usercnt.
1308 */
1309 smp_mb();
1310 if (!atomic64_read(&map->usercnt)) {
1311 /* maps with timers must be either held by user space
1312 * or pinned in bpffs.
1313 */
1314 WRITE_ONCE(async->cb, NULL);
1315 kfree(cb);
1316 ret = -EPERM;
1317 }
1318 out:
1319 __bpf_spin_unlock_irqrestore(&async->lock);
1320 return ret;
1321 }
1322
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1323 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1324 u64, flags)
1325 {
1326 clock_t clockid = flags & (MAX_CLOCKS - 1);
1327
1328 BUILD_BUG_ON(MAX_CLOCKS != 16);
1329 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1330 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1331
1332 if (flags >= MAX_CLOCKS ||
1333 /* similar to timerfd except _ALARM variants are not supported */
1334 (clockid != CLOCK_MONOTONIC &&
1335 clockid != CLOCK_REALTIME &&
1336 clockid != CLOCK_BOOTTIME))
1337 return -EINVAL;
1338
1339 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1340 }
1341
1342 static const struct bpf_func_proto bpf_timer_init_proto = {
1343 .func = bpf_timer_init,
1344 .gpl_only = true,
1345 .ret_type = RET_INTEGER,
1346 .arg1_type = ARG_PTR_TO_TIMER,
1347 .arg2_type = ARG_CONST_MAP_PTR,
1348 .arg3_type = ARG_ANYTHING,
1349 };
1350
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog_aux * aux,unsigned int flags,enum bpf_async_type type)1351 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1352 struct bpf_prog_aux *aux, unsigned int flags,
1353 enum bpf_async_type type)
1354 {
1355 struct bpf_prog *prev, *prog = aux->prog;
1356 struct bpf_async_cb *cb;
1357 int ret = 0;
1358
1359 if (in_nmi())
1360 return -EOPNOTSUPP;
1361 __bpf_spin_lock_irqsave(&async->lock);
1362 cb = async->cb;
1363 if (!cb) {
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367 if (!atomic64_read(&cb->map->usercnt)) {
1368 /* maps with timers must be either held by user space
1369 * or pinned in bpffs. Otherwise timer might still be
1370 * running even when bpf prog is detached and user space
1371 * is gone, since map_release_uref won't ever be called.
1372 */
1373 ret = -EPERM;
1374 goto out;
1375 }
1376 prev = cb->prog;
1377 if (prev != prog) {
1378 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1379 * can pick different callback_fn-s within the same prog.
1380 */
1381 prog = bpf_prog_inc_not_zero(prog);
1382 if (IS_ERR(prog)) {
1383 ret = PTR_ERR(prog);
1384 goto out;
1385 }
1386 if (prev)
1387 /* Drop prev prog refcnt when swapping with new prog */
1388 bpf_prog_put(prev);
1389 cb->prog = prog;
1390 }
1391 rcu_assign_pointer(cb->callback_fn, callback_fn);
1392 out:
1393 __bpf_spin_unlock_irqrestore(&async->lock);
1394 return ret;
1395 }
1396
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1397 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1398 struct bpf_prog_aux *, aux)
1399 {
1400 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1401 }
1402
1403 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1404 .func = bpf_timer_set_callback,
1405 .gpl_only = true,
1406 .ret_type = RET_INTEGER,
1407 .arg1_type = ARG_PTR_TO_TIMER,
1408 .arg2_type = ARG_PTR_TO_FUNC,
1409 };
1410
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,timer,u64,nsecs,u64,flags)1411 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1412 {
1413 struct bpf_hrtimer *t;
1414 int ret = 0;
1415 enum hrtimer_mode mode;
1416
1417 if (in_nmi())
1418 return -EOPNOTSUPP;
1419 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1420 return -EINVAL;
1421 __bpf_spin_lock_irqsave(&timer->lock);
1422 t = timer->timer;
1423 if (!t || !t->cb.prog) {
1424 ret = -EINVAL;
1425 goto out;
1426 }
1427
1428 if (flags & BPF_F_TIMER_ABS)
1429 mode = HRTIMER_MODE_ABS_SOFT;
1430 else
1431 mode = HRTIMER_MODE_REL_SOFT;
1432
1433 if (flags & BPF_F_TIMER_CPU_PIN)
1434 mode |= HRTIMER_MODE_PINNED;
1435
1436 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1437 out:
1438 __bpf_spin_unlock_irqrestore(&timer->lock);
1439 return ret;
1440 }
1441
1442 static const struct bpf_func_proto bpf_timer_start_proto = {
1443 .func = bpf_timer_start,
1444 .gpl_only = true,
1445 .ret_type = RET_INTEGER,
1446 .arg1_type = ARG_PTR_TO_TIMER,
1447 .arg2_type = ARG_ANYTHING,
1448 .arg3_type = ARG_ANYTHING,
1449 };
1450
drop_prog_refcnt(struct bpf_async_cb * async)1451 static void drop_prog_refcnt(struct bpf_async_cb *async)
1452 {
1453 struct bpf_prog *prog = async->prog;
1454
1455 if (prog) {
1456 bpf_prog_put(prog);
1457 async->prog = NULL;
1458 rcu_assign_pointer(async->callback_fn, NULL);
1459 }
1460 }
1461
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,timer)1462 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1463 {
1464 struct bpf_hrtimer *t, *cur_t;
1465 bool inc = false;
1466 int ret = 0;
1467
1468 if (in_nmi())
1469 return -EOPNOTSUPP;
1470 rcu_read_lock();
1471 __bpf_spin_lock_irqsave(&timer->lock);
1472 t = timer->timer;
1473 if (!t) {
1474 ret = -EINVAL;
1475 goto out;
1476 }
1477
1478 cur_t = this_cpu_read(hrtimer_running);
1479 if (cur_t == t) {
1480 /* If bpf callback_fn is trying to bpf_timer_cancel()
1481 * its own timer the hrtimer_cancel() will deadlock
1482 * since it waits for callback_fn to finish.
1483 */
1484 ret = -EDEADLK;
1485 goto out;
1486 }
1487
1488 /* Only account in-flight cancellations when invoked from a timer
1489 * callback, since we want to avoid waiting only if other _callbacks_
1490 * are waiting on us, to avoid introducing lockups. Non-callback paths
1491 * are ok, since nobody would synchronously wait for their completion.
1492 */
1493 if (!cur_t)
1494 goto drop;
1495 atomic_inc(&t->cancelling);
1496 /* Need full barrier after relaxed atomic_inc */
1497 smp_mb__after_atomic();
1498 inc = true;
1499 if (atomic_read(&cur_t->cancelling)) {
1500 /* We're cancelling timer t, while some other timer callback is
1501 * attempting to cancel us. In such a case, it might be possible
1502 * that timer t belongs to the other callback, or some other
1503 * callback waiting upon it (creating transitive dependencies
1504 * upon us), and we will enter a deadlock if we continue
1505 * cancelling and waiting for it synchronously, since it might
1506 * do the same. Bail!
1507 */
1508 ret = -EDEADLK;
1509 goto out;
1510 }
1511 drop:
1512 drop_prog_refcnt(&t->cb);
1513 out:
1514 __bpf_spin_unlock_irqrestore(&timer->lock);
1515 /* Cancel the timer and wait for associated callback to finish
1516 * if it was running.
1517 */
1518 ret = ret ?: hrtimer_cancel(&t->timer);
1519 if (inc)
1520 atomic_dec(&t->cancelling);
1521 rcu_read_unlock();
1522 return ret;
1523 }
1524
1525 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1526 .func = bpf_timer_cancel,
1527 .gpl_only = true,
1528 .ret_type = RET_INTEGER,
1529 .arg1_type = ARG_PTR_TO_TIMER,
1530 };
1531
__bpf_async_cancel_and_free(struct bpf_async_kern * async)1532 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1533 {
1534 struct bpf_async_cb *cb;
1535
1536 /* Performance optimization: read async->cb without lock first. */
1537 if (!READ_ONCE(async->cb))
1538 return NULL;
1539
1540 __bpf_spin_lock_irqsave(&async->lock);
1541 /* re-read it under lock */
1542 cb = async->cb;
1543 if (!cb)
1544 goto out;
1545 drop_prog_refcnt(cb);
1546 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1547 * this timer, since it won't be initialized.
1548 */
1549 WRITE_ONCE(async->cb, NULL);
1550 out:
1551 __bpf_spin_unlock_irqrestore(&async->lock);
1552 return cb;
1553 }
1554
1555 /* This function is called by map_delete/update_elem for individual element and
1556 * by ops->map_release_uref when the user space reference to a map reaches zero.
1557 */
bpf_timer_cancel_and_free(void * val)1558 void bpf_timer_cancel_and_free(void *val)
1559 {
1560 struct bpf_hrtimer *t;
1561
1562 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1563
1564 if (!t)
1565 return;
1566 /* We check that bpf_map_delete/update_elem() was called from timer
1567 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1568 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1569 * just return -1). Though callback_fn is still running on this cpu it's
1570 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1571 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1572 * since async->cb = NULL was already done. The timer will be
1573 * effectively cancelled because bpf_timer_cb() will return
1574 * HRTIMER_NORESTART.
1575 *
1576 * However, it is possible the timer callback_fn calling us armed the
1577 * timer _before_ calling us, such that failing to cancel it here will
1578 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1579 * Therefore, we _need_ to cancel any outstanding timers before we do
1580 * kfree_rcu, even though no more timers can be armed.
1581 *
1582 * Moreover, we need to schedule work even if timer does not belong to
1583 * the calling callback_fn, as on two different CPUs, we can end up in a
1584 * situation where both sides run in parallel, try to cancel one
1585 * another, and we end up waiting on both sides in hrtimer_cancel
1586 * without making forward progress, since timer1 depends on time2
1587 * callback to finish, and vice versa.
1588 *
1589 * CPU 1 (timer1_cb) CPU 2 (timer2_cb)
1590 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1)
1591 *
1592 * To avoid these issues, punt to workqueue context when we are in a
1593 * timer callback.
1594 */
1595 if (this_cpu_read(hrtimer_running))
1596 queue_work(system_unbound_wq, &t->cb.delete_work);
1597 else
1598 bpf_timer_delete_work(&t->cb.delete_work);
1599 }
1600
1601 /* This function is called by map_delete/update_elem for individual element and
1602 * by ops->map_release_uref when the user space reference to a map reaches zero.
1603 */
bpf_wq_cancel_and_free(void * val)1604 void bpf_wq_cancel_and_free(void *val)
1605 {
1606 struct bpf_work *work;
1607
1608 BTF_TYPE_EMIT(struct bpf_wq);
1609
1610 work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1611 if (!work)
1612 return;
1613 /* Trigger cancel of the sleepable work, but *do not* wait for
1614 * it to finish if it was running as we might not be in a
1615 * sleepable context.
1616 * kfree will be called once the work has finished.
1617 */
1618 schedule_work(&work->delete_work);
1619 }
1620
BPF_CALL_2(bpf_kptr_xchg,void *,map_value,void *,ptr)1621 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1622 {
1623 unsigned long *kptr = map_value;
1624
1625 /* This helper may be inlined by verifier. */
1626 return xchg(kptr, (unsigned long)ptr);
1627 }
1628
1629 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1630 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1631 * denote type that verifier will determine.
1632 */
1633 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1634 .func = bpf_kptr_xchg,
1635 .gpl_only = false,
1636 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1637 .ret_btf_id = BPF_PTR_POISON,
1638 .arg1_type = ARG_PTR_TO_KPTR,
1639 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1640 .arg2_btf_id = BPF_PTR_POISON,
1641 };
1642
1643 /* Since the upper 8 bits of dynptr->size is reserved, the
1644 * maximum supported size is 2^24 - 1.
1645 */
1646 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1647 #define DYNPTR_TYPE_SHIFT 28
1648 #define DYNPTR_SIZE_MASK 0xFFFFFF
1649 #define DYNPTR_RDONLY_BIT BIT(31)
1650
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1651 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1652 {
1653 return ptr->size & DYNPTR_RDONLY_BIT;
1654 }
1655
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1656 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1657 {
1658 ptr->size |= DYNPTR_RDONLY_BIT;
1659 }
1660
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1661 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1662 {
1663 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1664 }
1665
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1666 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1667 {
1668 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1669 }
1670
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1671 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1672 {
1673 return ptr->size & DYNPTR_SIZE_MASK;
1674 }
1675
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u32 new_size)1676 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1677 {
1678 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1679
1680 ptr->size = new_size | metadata;
1681 }
1682
bpf_dynptr_check_size(u32 size)1683 int bpf_dynptr_check_size(u32 size)
1684 {
1685 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1686 }
1687
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1688 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1689 enum bpf_dynptr_type type, u32 offset, u32 size)
1690 {
1691 ptr->data = data;
1692 ptr->offset = offset;
1693 ptr->size = size;
1694 bpf_dynptr_set_type(ptr, type);
1695 }
1696
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1697 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1698 {
1699 memset(ptr, 0, sizeof(*ptr));
1700 }
1701
bpf_dynptr_check_off_len(const struct bpf_dynptr_kern * ptr,u32 offset,u32 len)1702 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1703 {
1704 u32 size = __bpf_dynptr_size(ptr);
1705
1706 if (len > size || offset > size - len)
1707 return -E2BIG;
1708
1709 return 0;
1710 }
1711
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)1712 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1713 {
1714 int err;
1715
1716 BTF_TYPE_EMIT(struct bpf_dynptr);
1717
1718 err = bpf_dynptr_check_size(size);
1719 if (err)
1720 goto error;
1721
1722 /* flags is currently unsupported */
1723 if (flags) {
1724 err = -EINVAL;
1725 goto error;
1726 }
1727
1728 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1729
1730 return 0;
1731
1732 error:
1733 bpf_dynptr_set_null(ptr);
1734 return err;
1735 }
1736
1737 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1738 .func = bpf_dynptr_from_mem,
1739 .gpl_only = false,
1740 .ret_type = RET_INTEGER,
1741 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1742 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1743 .arg3_type = ARG_ANYTHING,
1744 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1745 };
1746
BPF_CALL_5(bpf_dynptr_read,void *,dst,u32,len,const struct bpf_dynptr_kern *,src,u32,offset,u64,flags)1747 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1748 u32, offset, u64, flags)
1749 {
1750 enum bpf_dynptr_type type;
1751 int err;
1752
1753 if (!src->data || flags)
1754 return -EINVAL;
1755
1756 err = bpf_dynptr_check_off_len(src, offset, len);
1757 if (err)
1758 return err;
1759
1760 type = bpf_dynptr_get_type(src);
1761
1762 switch (type) {
1763 case BPF_DYNPTR_TYPE_LOCAL:
1764 case BPF_DYNPTR_TYPE_RINGBUF:
1765 /* Source and destination may possibly overlap, hence use memmove to
1766 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1767 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1768 */
1769 memmove(dst, src->data + src->offset + offset, len);
1770 return 0;
1771 case BPF_DYNPTR_TYPE_SKB:
1772 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1773 case BPF_DYNPTR_TYPE_XDP:
1774 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1775 default:
1776 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1777 return -EFAULT;
1778 }
1779 }
1780
1781 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1782 .func = bpf_dynptr_read,
1783 .gpl_only = false,
1784 .ret_type = RET_INTEGER,
1785 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1786 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1787 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1788 .arg4_type = ARG_ANYTHING,
1789 .arg5_type = ARG_ANYTHING,
1790 };
1791
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u32,offset,void *,src,u32,len,u64,flags)1792 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1793 u32, len, u64, flags)
1794 {
1795 enum bpf_dynptr_type type;
1796 int err;
1797
1798 if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1799 return -EINVAL;
1800
1801 err = bpf_dynptr_check_off_len(dst, offset, len);
1802 if (err)
1803 return err;
1804
1805 type = bpf_dynptr_get_type(dst);
1806
1807 switch (type) {
1808 case BPF_DYNPTR_TYPE_LOCAL:
1809 case BPF_DYNPTR_TYPE_RINGBUF:
1810 if (flags)
1811 return -EINVAL;
1812 /* Source and destination may possibly overlap, hence use memmove to
1813 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1814 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1815 */
1816 memmove(dst->data + dst->offset + offset, src, len);
1817 return 0;
1818 case BPF_DYNPTR_TYPE_SKB:
1819 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1820 flags);
1821 case BPF_DYNPTR_TYPE_XDP:
1822 if (flags)
1823 return -EINVAL;
1824 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1825 default:
1826 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1827 return -EFAULT;
1828 }
1829 }
1830
1831 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1832 .func = bpf_dynptr_write,
1833 .gpl_only = false,
1834 .ret_type = RET_INTEGER,
1835 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1836 .arg2_type = ARG_ANYTHING,
1837 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1838 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1839 .arg5_type = ARG_ANYTHING,
1840 };
1841
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u32,offset,u32,len)1842 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1843 {
1844 enum bpf_dynptr_type type;
1845 int err;
1846
1847 if (!ptr->data)
1848 return 0;
1849
1850 err = bpf_dynptr_check_off_len(ptr, offset, len);
1851 if (err)
1852 return 0;
1853
1854 if (__bpf_dynptr_is_rdonly(ptr))
1855 return 0;
1856
1857 type = bpf_dynptr_get_type(ptr);
1858
1859 switch (type) {
1860 case BPF_DYNPTR_TYPE_LOCAL:
1861 case BPF_DYNPTR_TYPE_RINGBUF:
1862 return (unsigned long)(ptr->data + ptr->offset + offset);
1863 case BPF_DYNPTR_TYPE_SKB:
1864 case BPF_DYNPTR_TYPE_XDP:
1865 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1866 return 0;
1867 default:
1868 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1869 return 0;
1870 }
1871 }
1872
1873 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1874 .func = bpf_dynptr_data,
1875 .gpl_only = false,
1876 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1877 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1878 .arg2_type = ARG_ANYTHING,
1879 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1880 };
1881
1882 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1883 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1884 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1885 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1886 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1887 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1888 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1889
1890 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1891 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1892 {
1893 switch (func_id) {
1894 case BPF_FUNC_map_lookup_elem:
1895 return &bpf_map_lookup_elem_proto;
1896 case BPF_FUNC_map_update_elem:
1897 return &bpf_map_update_elem_proto;
1898 case BPF_FUNC_map_delete_elem:
1899 return &bpf_map_delete_elem_proto;
1900 case BPF_FUNC_map_push_elem:
1901 return &bpf_map_push_elem_proto;
1902 case BPF_FUNC_map_pop_elem:
1903 return &bpf_map_pop_elem_proto;
1904 case BPF_FUNC_map_peek_elem:
1905 return &bpf_map_peek_elem_proto;
1906 case BPF_FUNC_map_lookup_percpu_elem:
1907 return &bpf_map_lookup_percpu_elem_proto;
1908 case BPF_FUNC_get_prandom_u32:
1909 return &bpf_get_prandom_u32_proto;
1910 case BPF_FUNC_get_smp_processor_id:
1911 return &bpf_get_raw_smp_processor_id_proto;
1912 case BPF_FUNC_get_numa_node_id:
1913 return &bpf_get_numa_node_id_proto;
1914 case BPF_FUNC_tail_call:
1915 return &bpf_tail_call_proto;
1916 case BPF_FUNC_ktime_get_ns:
1917 return &bpf_ktime_get_ns_proto;
1918 case BPF_FUNC_ktime_get_boot_ns:
1919 return &bpf_ktime_get_boot_ns_proto;
1920 case BPF_FUNC_ktime_get_tai_ns:
1921 return &bpf_ktime_get_tai_ns_proto;
1922 case BPF_FUNC_ringbuf_output:
1923 return &bpf_ringbuf_output_proto;
1924 case BPF_FUNC_ringbuf_reserve:
1925 return &bpf_ringbuf_reserve_proto;
1926 case BPF_FUNC_ringbuf_submit:
1927 return &bpf_ringbuf_submit_proto;
1928 case BPF_FUNC_ringbuf_discard:
1929 return &bpf_ringbuf_discard_proto;
1930 case BPF_FUNC_ringbuf_query:
1931 return &bpf_ringbuf_query_proto;
1932 case BPF_FUNC_strncmp:
1933 return &bpf_strncmp_proto;
1934 case BPF_FUNC_strtol:
1935 return &bpf_strtol_proto;
1936 case BPF_FUNC_strtoul:
1937 return &bpf_strtoul_proto;
1938 case BPF_FUNC_get_current_pid_tgid:
1939 return &bpf_get_current_pid_tgid_proto;
1940 case BPF_FUNC_get_ns_current_pid_tgid:
1941 return &bpf_get_ns_current_pid_tgid_proto;
1942 default:
1943 break;
1944 }
1945
1946 if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1947 return NULL;
1948
1949 switch (func_id) {
1950 case BPF_FUNC_spin_lock:
1951 return &bpf_spin_lock_proto;
1952 case BPF_FUNC_spin_unlock:
1953 return &bpf_spin_unlock_proto;
1954 case BPF_FUNC_jiffies64:
1955 return &bpf_jiffies64_proto;
1956 case BPF_FUNC_per_cpu_ptr:
1957 return &bpf_per_cpu_ptr_proto;
1958 case BPF_FUNC_this_cpu_ptr:
1959 return &bpf_this_cpu_ptr_proto;
1960 case BPF_FUNC_timer_init:
1961 return &bpf_timer_init_proto;
1962 case BPF_FUNC_timer_set_callback:
1963 return &bpf_timer_set_callback_proto;
1964 case BPF_FUNC_timer_start:
1965 return &bpf_timer_start_proto;
1966 case BPF_FUNC_timer_cancel:
1967 return &bpf_timer_cancel_proto;
1968 case BPF_FUNC_kptr_xchg:
1969 return &bpf_kptr_xchg_proto;
1970 case BPF_FUNC_for_each_map_elem:
1971 return &bpf_for_each_map_elem_proto;
1972 case BPF_FUNC_loop:
1973 return &bpf_loop_proto;
1974 case BPF_FUNC_user_ringbuf_drain:
1975 return &bpf_user_ringbuf_drain_proto;
1976 case BPF_FUNC_ringbuf_reserve_dynptr:
1977 return &bpf_ringbuf_reserve_dynptr_proto;
1978 case BPF_FUNC_ringbuf_submit_dynptr:
1979 return &bpf_ringbuf_submit_dynptr_proto;
1980 case BPF_FUNC_ringbuf_discard_dynptr:
1981 return &bpf_ringbuf_discard_dynptr_proto;
1982 case BPF_FUNC_dynptr_from_mem:
1983 return &bpf_dynptr_from_mem_proto;
1984 case BPF_FUNC_dynptr_read:
1985 return &bpf_dynptr_read_proto;
1986 case BPF_FUNC_dynptr_write:
1987 return &bpf_dynptr_write_proto;
1988 case BPF_FUNC_dynptr_data:
1989 return &bpf_dynptr_data_proto;
1990 #ifdef CONFIG_CGROUPS
1991 case BPF_FUNC_cgrp_storage_get:
1992 return &bpf_cgrp_storage_get_proto;
1993 case BPF_FUNC_cgrp_storage_delete:
1994 return &bpf_cgrp_storage_delete_proto;
1995 case BPF_FUNC_get_current_cgroup_id:
1996 return &bpf_get_current_cgroup_id_proto;
1997 case BPF_FUNC_get_current_ancestor_cgroup_id:
1998 return &bpf_get_current_ancestor_cgroup_id_proto;
1999 #endif
2000 default:
2001 break;
2002 }
2003
2004 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2005 return NULL;
2006
2007 switch (func_id) {
2008 case BPF_FUNC_trace_printk:
2009 return bpf_get_trace_printk_proto();
2010 case BPF_FUNC_get_current_task:
2011 return &bpf_get_current_task_proto;
2012 case BPF_FUNC_get_current_task_btf:
2013 return &bpf_get_current_task_btf_proto;
2014 case BPF_FUNC_probe_read_user:
2015 return &bpf_probe_read_user_proto;
2016 case BPF_FUNC_probe_read_kernel:
2017 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2018 NULL : &bpf_probe_read_kernel_proto;
2019 case BPF_FUNC_probe_read_user_str:
2020 return &bpf_probe_read_user_str_proto;
2021 case BPF_FUNC_probe_read_kernel_str:
2022 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2023 NULL : &bpf_probe_read_kernel_str_proto;
2024 case BPF_FUNC_snprintf_btf:
2025 return &bpf_snprintf_btf_proto;
2026 case BPF_FUNC_snprintf:
2027 return &bpf_snprintf_proto;
2028 case BPF_FUNC_task_pt_regs:
2029 return &bpf_task_pt_regs_proto;
2030 case BPF_FUNC_trace_vprintk:
2031 return bpf_get_trace_vprintk_proto();
2032 default:
2033 return NULL;
2034 }
2035 }
2036
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2037 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2038 struct bpf_spin_lock *spin_lock)
2039 {
2040 struct list_head *head = list_head, *orig_head = list_head;
2041
2042 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2043 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2044
2045 /* Do the actual list draining outside the lock to not hold the lock for
2046 * too long, and also prevent deadlocks if tracing programs end up
2047 * executing on entry/exit of functions called inside the critical
2048 * section, and end up doing map ops that call bpf_list_head_free for
2049 * the same map value again.
2050 */
2051 __bpf_spin_lock_irqsave(spin_lock);
2052 if (!head->next || list_empty(head))
2053 goto unlock;
2054 head = head->next;
2055 unlock:
2056 INIT_LIST_HEAD(orig_head);
2057 __bpf_spin_unlock_irqrestore(spin_lock);
2058
2059 while (head != orig_head) {
2060 void *obj = head;
2061
2062 obj -= field->graph_root.node_offset;
2063 head = head->next;
2064 /* The contained type can also have resources, including a
2065 * bpf_list_head which needs to be freed.
2066 */
2067 migrate_disable();
2068 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2069 migrate_enable();
2070 }
2071 }
2072
2073 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2074 * 'rb_node *', so field name of rb_node within containing struct is not
2075 * needed.
2076 *
2077 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2078 * graph_root.node_offset, it's not necessary to know field name
2079 * or type of node struct
2080 */
2081 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2082 for (pos = rb_first_postorder(root); \
2083 pos && ({ n = rb_next_postorder(pos); 1; }); \
2084 pos = n)
2085
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2086 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2087 struct bpf_spin_lock *spin_lock)
2088 {
2089 struct rb_root_cached orig_root, *root = rb_root;
2090 struct rb_node *pos, *n;
2091 void *obj;
2092
2093 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2094 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2095
2096 __bpf_spin_lock_irqsave(spin_lock);
2097 orig_root = *root;
2098 *root = RB_ROOT_CACHED;
2099 __bpf_spin_unlock_irqrestore(spin_lock);
2100
2101 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2102 obj = pos;
2103 obj -= field->graph_root.node_offset;
2104
2105
2106 migrate_disable();
2107 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2108 migrate_enable();
2109 }
2110 }
2111
2112 __bpf_kfunc_start_defs();
2113
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2114 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2115 {
2116 struct btf_struct_meta *meta = meta__ign;
2117 u64 size = local_type_id__k;
2118 void *p;
2119
2120 p = bpf_mem_alloc(&bpf_global_ma, size);
2121 if (!p)
2122 return NULL;
2123 if (meta)
2124 bpf_obj_init(meta->record, p);
2125 return p;
2126 }
2127
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2128 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2129 {
2130 u64 size = local_type_id__k;
2131
2132 /* The verifier has ensured that meta__ign must be NULL */
2133 return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2134 }
2135
2136 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2137 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2138 {
2139 struct bpf_mem_alloc *ma;
2140
2141 if (rec && rec->refcount_off >= 0 &&
2142 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2143 /* Object is refcounted and refcount_dec didn't result in 0
2144 * refcount. Return without freeing the object
2145 */
2146 return;
2147 }
2148
2149 if (rec)
2150 bpf_obj_free_fields(rec, p);
2151
2152 if (percpu)
2153 ma = &bpf_global_percpu_ma;
2154 else
2155 ma = &bpf_global_ma;
2156 bpf_mem_free_rcu(ma, p);
2157 }
2158
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2159 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2160 {
2161 struct btf_struct_meta *meta = meta__ign;
2162 void *p = p__alloc;
2163
2164 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2165 }
2166
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2167 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2168 {
2169 /* The verifier has ensured that meta__ign must be NULL */
2170 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2171 }
2172
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2173 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2174 {
2175 struct btf_struct_meta *meta = meta__ign;
2176 struct bpf_refcount *ref;
2177
2178 /* Could just cast directly to refcount_t *, but need some code using
2179 * bpf_refcount type so that it is emitted in vmlinux BTF
2180 */
2181 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2182 if (!refcount_inc_not_zero((refcount_t *)ref))
2183 return NULL;
2184
2185 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2186 * in verifier.c
2187 */
2188 return (void *)p__refcounted_kptr;
2189 }
2190
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2191 static int __bpf_list_add(struct bpf_list_node_kern *node,
2192 struct bpf_list_head *head,
2193 bool tail, struct btf_record *rec, u64 off)
2194 {
2195 struct list_head *n = &node->list_head, *h = (void *)head;
2196
2197 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2198 * called on its fields, so init here
2199 */
2200 if (unlikely(!h->next))
2201 INIT_LIST_HEAD(h);
2202
2203 /* node->owner != NULL implies !list_empty(n), no need to separately
2204 * check the latter
2205 */
2206 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2207 /* Only called from BPF prog, no need to migrate_disable */
2208 __bpf_obj_drop_impl((void *)n - off, rec, false);
2209 return -EINVAL;
2210 }
2211
2212 tail ? list_add_tail(n, h) : list_add(n, h);
2213 WRITE_ONCE(node->owner, head);
2214
2215 return 0;
2216 }
2217
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2218 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2219 struct bpf_list_node *node,
2220 void *meta__ign, u64 off)
2221 {
2222 struct bpf_list_node_kern *n = (void *)node;
2223 struct btf_struct_meta *meta = meta__ign;
2224
2225 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2226 }
2227
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2228 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2229 struct bpf_list_node *node,
2230 void *meta__ign, u64 off)
2231 {
2232 struct bpf_list_node_kern *n = (void *)node;
2233 struct btf_struct_meta *meta = meta__ign;
2234
2235 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2236 }
2237
__bpf_list_del(struct bpf_list_head * head,bool tail)2238 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2239 {
2240 struct list_head *n, *h = (void *)head;
2241 struct bpf_list_node_kern *node;
2242
2243 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2244 * called on its fields, so init here
2245 */
2246 if (unlikely(!h->next))
2247 INIT_LIST_HEAD(h);
2248 if (list_empty(h))
2249 return NULL;
2250
2251 n = tail ? h->prev : h->next;
2252 node = container_of(n, struct bpf_list_node_kern, list_head);
2253 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2254 return NULL;
2255
2256 list_del_init(n);
2257 WRITE_ONCE(node->owner, NULL);
2258 return (struct bpf_list_node *)n;
2259 }
2260
bpf_list_pop_front(struct bpf_list_head * head)2261 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2262 {
2263 return __bpf_list_del(head, false);
2264 }
2265
bpf_list_pop_back(struct bpf_list_head * head)2266 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2267 {
2268 return __bpf_list_del(head, true);
2269 }
2270
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2271 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2272 struct bpf_rb_node *node)
2273 {
2274 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2275 struct rb_root_cached *r = (struct rb_root_cached *)root;
2276 struct rb_node *n = &node_internal->rb_node;
2277
2278 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2279 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2280 */
2281 if (READ_ONCE(node_internal->owner) != root)
2282 return NULL;
2283
2284 rb_erase_cached(n, r);
2285 RB_CLEAR_NODE(n);
2286 WRITE_ONCE(node_internal->owner, NULL);
2287 return (struct bpf_rb_node *)n;
2288 }
2289
2290 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2291 * program
2292 */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2293 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2294 struct bpf_rb_node_kern *node,
2295 void *less, struct btf_record *rec, u64 off)
2296 {
2297 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2298 struct rb_node *parent = NULL, *n = &node->rb_node;
2299 bpf_callback_t cb = (bpf_callback_t)less;
2300 bool leftmost = true;
2301
2302 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2303 * check the latter
2304 */
2305 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2306 /* Only called from BPF prog, no need to migrate_disable */
2307 __bpf_obj_drop_impl((void *)n - off, rec, false);
2308 return -EINVAL;
2309 }
2310
2311 while (*link) {
2312 parent = *link;
2313 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2314 link = &parent->rb_left;
2315 } else {
2316 link = &parent->rb_right;
2317 leftmost = false;
2318 }
2319 }
2320
2321 rb_link_node(n, parent, link);
2322 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2323 WRITE_ONCE(node->owner, root);
2324 return 0;
2325 }
2326
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2327 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2328 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2329 void *meta__ign, u64 off)
2330 {
2331 struct btf_struct_meta *meta = meta__ign;
2332 struct bpf_rb_node_kern *n = (void *)node;
2333
2334 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2335 }
2336
bpf_rbtree_first(struct bpf_rb_root * root)2337 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2338 {
2339 struct rb_root_cached *r = (struct rb_root_cached *)root;
2340
2341 return (struct bpf_rb_node *)rb_first_cached(r);
2342 }
2343
2344 /**
2345 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2346 * kfunc which is not stored in a map as a kptr, must be released by calling
2347 * bpf_task_release().
2348 * @p: The task on which a reference is being acquired.
2349 */
bpf_task_acquire(struct task_struct * p)2350 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2351 {
2352 if (refcount_inc_not_zero(&p->rcu_users))
2353 return p;
2354 return NULL;
2355 }
2356
2357 /**
2358 * bpf_task_release - Release the reference acquired on a task.
2359 * @p: The task on which a reference is being released.
2360 */
bpf_task_release(struct task_struct * p)2361 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2362 {
2363 put_task_struct_rcu_user(p);
2364 }
2365
bpf_task_release_dtor(void * p)2366 __bpf_kfunc void bpf_task_release_dtor(void *p)
2367 {
2368 put_task_struct_rcu_user(p);
2369 }
2370 CFI_NOSEAL(bpf_task_release_dtor);
2371
2372 #ifdef CONFIG_CGROUPS
2373 /**
2374 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2375 * this kfunc which is not stored in a map as a kptr, must be released by
2376 * calling bpf_cgroup_release().
2377 * @cgrp: The cgroup on which a reference is being acquired.
2378 */
bpf_cgroup_acquire(struct cgroup * cgrp)2379 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2380 {
2381 return cgroup_tryget(cgrp) ? cgrp : NULL;
2382 }
2383
2384 /**
2385 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2386 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2387 * not be freed until the current grace period has ended, even if its refcount
2388 * drops to 0.
2389 * @cgrp: The cgroup on which a reference is being released.
2390 */
bpf_cgroup_release(struct cgroup * cgrp)2391 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2392 {
2393 cgroup_put(cgrp);
2394 }
2395
bpf_cgroup_release_dtor(void * cgrp)2396 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2397 {
2398 cgroup_put(cgrp);
2399 }
2400 CFI_NOSEAL(bpf_cgroup_release_dtor);
2401
2402 /**
2403 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2404 * array. A cgroup returned by this kfunc which is not subsequently stored in a
2405 * map, must be released by calling bpf_cgroup_release().
2406 * @cgrp: The cgroup for which we're performing a lookup.
2407 * @level: The level of ancestor to look up.
2408 */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2409 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2410 {
2411 struct cgroup *ancestor;
2412
2413 if (level > cgrp->level || level < 0)
2414 return NULL;
2415
2416 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2417 ancestor = cgrp->ancestors[level];
2418 if (!cgroup_tryget(ancestor))
2419 return NULL;
2420 return ancestor;
2421 }
2422
2423 /**
2424 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2425 * kfunc which is not subsequently stored in a map, must be released by calling
2426 * bpf_cgroup_release().
2427 * @cgid: cgroup id.
2428 */
bpf_cgroup_from_id(u64 cgid)2429 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2430 {
2431 struct cgroup *cgrp;
2432
2433 cgrp = cgroup_get_from_id(cgid);
2434 if (IS_ERR(cgrp))
2435 return NULL;
2436 return cgrp;
2437 }
2438
2439 /**
2440 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2441 * task's membership of cgroup ancestry.
2442 * @task: the task to be tested
2443 * @ancestor: possible ancestor of @task's cgroup
2444 *
2445 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2446 * It follows all the same rules as cgroup_is_descendant, and only applies
2447 * to the default hierarchy.
2448 */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2449 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2450 struct cgroup *ancestor)
2451 {
2452 long ret;
2453
2454 rcu_read_lock();
2455 ret = task_under_cgroup_hierarchy(task, ancestor);
2456 rcu_read_unlock();
2457 return ret;
2458 }
2459
2460 /**
2461 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2462 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2463 * hierarchy ID.
2464 * @task: The target task
2465 * @hierarchy_id: The ID of a cgroup1 hierarchy
2466 *
2467 * On success, the cgroup is returen. On failure, NULL is returned.
2468 */
2469 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2470 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2471 {
2472 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2473
2474 if (IS_ERR(cgrp))
2475 return NULL;
2476 return cgrp;
2477 }
2478 #endif /* CONFIG_CGROUPS */
2479
2480 /**
2481 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2482 * in the root pid namespace idr. If a task is returned, it must either be
2483 * stored in a map, or released with bpf_task_release().
2484 * @pid: The pid of the task being looked up.
2485 */
bpf_task_from_pid(s32 pid)2486 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2487 {
2488 struct task_struct *p;
2489
2490 rcu_read_lock();
2491 p = find_task_by_pid_ns(pid, &init_pid_ns);
2492 if (p)
2493 p = bpf_task_acquire(p);
2494 rcu_read_unlock();
2495
2496 return p;
2497 }
2498
2499 /**
2500 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2501 * @p: The dynptr whose data slice to retrieve
2502 * @offset: Offset into the dynptr
2503 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2504 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2505 * length of the requested slice. This must be a constant.
2506 *
2507 * For non-skb and non-xdp type dynptrs, there is no difference between
2508 * bpf_dynptr_slice and bpf_dynptr_data.
2509 *
2510 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2511 *
2512 * If the intention is to write to the data slice, please use
2513 * bpf_dynptr_slice_rdwr.
2514 *
2515 * The user must check that the returned pointer is not null before using it.
2516 *
2517 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2518 * does not change the underlying packet data pointers, so a call to
2519 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2520 * the bpf program.
2521 *
2522 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2523 * data slice (can be either direct pointer to the data or a pointer to the user
2524 * provided buffer, with its contents containing the data, if unable to obtain
2525 * direct pointer)
2526 */
bpf_dynptr_slice(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2527 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2528 void *buffer__opt, u32 buffer__szk)
2529 {
2530 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2531 enum bpf_dynptr_type type;
2532 u32 len = buffer__szk;
2533 int err;
2534
2535 if (!ptr->data)
2536 return NULL;
2537
2538 err = bpf_dynptr_check_off_len(ptr, offset, len);
2539 if (err)
2540 return NULL;
2541
2542 type = bpf_dynptr_get_type(ptr);
2543
2544 switch (type) {
2545 case BPF_DYNPTR_TYPE_LOCAL:
2546 case BPF_DYNPTR_TYPE_RINGBUF:
2547 return ptr->data + ptr->offset + offset;
2548 case BPF_DYNPTR_TYPE_SKB:
2549 if (buffer__opt)
2550 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2551 else
2552 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2553 case BPF_DYNPTR_TYPE_XDP:
2554 {
2555 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2556 if (!IS_ERR_OR_NULL(xdp_ptr))
2557 return xdp_ptr;
2558
2559 if (!buffer__opt)
2560 return NULL;
2561 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2562 return buffer__opt;
2563 }
2564 default:
2565 WARN_ONCE(true, "unknown dynptr type %d\n", type);
2566 return NULL;
2567 }
2568 }
2569
2570 /**
2571 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2572 * @p: The dynptr whose data slice to retrieve
2573 * @offset: Offset into the dynptr
2574 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2575 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2576 * length of the requested slice. This must be a constant.
2577 *
2578 * For non-skb and non-xdp type dynptrs, there is no difference between
2579 * bpf_dynptr_slice and bpf_dynptr_data.
2580 *
2581 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2582 *
2583 * The returned pointer is writable and may point to either directly the dynptr
2584 * data at the requested offset or to the buffer if unable to obtain a direct
2585 * data pointer to (example: the requested slice is to the paged area of an skb
2586 * packet). In the case where the returned pointer is to the buffer, the user
2587 * is responsible for persisting writes through calling bpf_dynptr_write(). This
2588 * usually looks something like this pattern:
2589 *
2590 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2591 * if (!eth)
2592 * return TC_ACT_SHOT;
2593 *
2594 * // mutate eth header //
2595 *
2596 * if (eth == buffer)
2597 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2598 *
2599 * Please note that, as in the example above, the user must check that the
2600 * returned pointer is not null before using it.
2601 *
2602 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2603 * does not change the underlying packet data pointers, so a call to
2604 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2605 * the bpf program.
2606 *
2607 * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2608 * data slice (can be either direct pointer to the data or a pointer to the user
2609 * provided buffer, with its contents containing the data, if unable to obtain
2610 * direct pointer)
2611 */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2612 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2613 void *buffer__opt, u32 buffer__szk)
2614 {
2615 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2616
2617 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2618 return NULL;
2619
2620 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2621 *
2622 * For skb-type dynptrs, it is safe to write into the returned pointer
2623 * if the bpf program allows skb data writes. There are two possibilities
2624 * that may occur when calling bpf_dynptr_slice_rdwr:
2625 *
2626 * 1) The requested slice is in the head of the skb. In this case, the
2627 * returned pointer is directly to skb data, and if the skb is cloned, the
2628 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2629 * The pointer can be directly written into.
2630 *
2631 * 2) Some portion of the requested slice is in the paged buffer area.
2632 * In this case, the requested data will be copied out into the buffer
2633 * and the returned pointer will be a pointer to the buffer. The skb
2634 * will not be pulled. To persist the write, the user will need to call
2635 * bpf_dynptr_write(), which will pull the skb and commit the write.
2636 *
2637 * Similarly for xdp programs, if the requested slice is not across xdp
2638 * fragments, then a direct pointer will be returned, otherwise the data
2639 * will be copied out into the buffer and the user will need to call
2640 * bpf_dynptr_write() to commit changes.
2641 */
2642 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2643 }
2644
bpf_dynptr_adjust(const struct bpf_dynptr * p,u32 start,u32 end)2645 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2646 {
2647 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2648 u32 size;
2649
2650 if (!ptr->data || start > end)
2651 return -EINVAL;
2652
2653 size = __bpf_dynptr_size(ptr);
2654
2655 if (start > size || end > size)
2656 return -ERANGE;
2657
2658 ptr->offset += start;
2659 bpf_dynptr_set_size(ptr, end - start);
2660
2661 return 0;
2662 }
2663
bpf_dynptr_is_null(const struct bpf_dynptr * p)2664 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2665 {
2666 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2667
2668 return !ptr->data;
2669 }
2670
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2671 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2672 {
2673 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2674
2675 if (!ptr->data)
2676 return false;
2677
2678 return __bpf_dynptr_is_rdonly(ptr);
2679 }
2680
bpf_dynptr_size(const struct bpf_dynptr * p)2681 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2682 {
2683 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2684
2685 if (!ptr->data)
2686 return -EINVAL;
2687
2688 return __bpf_dynptr_size(ptr);
2689 }
2690
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2691 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2692 struct bpf_dynptr *clone__uninit)
2693 {
2694 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2695 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2696
2697 if (!ptr->data) {
2698 bpf_dynptr_set_null(clone);
2699 return -EINVAL;
2700 }
2701
2702 *clone = *ptr;
2703
2704 return 0;
2705 }
2706
bpf_cast_to_kern_ctx(void * obj)2707 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2708 {
2709 return obj;
2710 }
2711
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)2712 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2713 {
2714 return (void *)obj__ign;
2715 }
2716
bpf_rcu_read_lock(void)2717 __bpf_kfunc void bpf_rcu_read_lock(void)
2718 {
2719 rcu_read_lock();
2720 }
2721
bpf_rcu_read_unlock(void)2722 __bpf_kfunc void bpf_rcu_read_unlock(void)
2723 {
2724 rcu_read_unlock();
2725 }
2726
2727 struct bpf_throw_ctx {
2728 struct bpf_prog_aux *aux;
2729 u64 sp;
2730 u64 bp;
2731 int cnt;
2732 };
2733
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)2734 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2735 {
2736 struct bpf_throw_ctx *ctx = cookie;
2737 struct bpf_prog *prog;
2738
2739 if (!is_bpf_text_address(ip))
2740 return !ctx->cnt;
2741 prog = bpf_prog_ksym_find(ip);
2742 ctx->cnt++;
2743 if (bpf_is_subprog(prog))
2744 return true;
2745 ctx->aux = prog->aux;
2746 ctx->sp = sp;
2747 ctx->bp = bp;
2748 return false;
2749 }
2750
bpf_throw(u64 cookie)2751 __bpf_kfunc void bpf_throw(u64 cookie)
2752 {
2753 struct bpf_throw_ctx ctx = {};
2754
2755 arch_bpf_stack_walk(bpf_stack_walker, &ctx);
2756 WARN_ON_ONCE(!ctx.aux);
2757 if (ctx.aux)
2758 WARN_ON_ONCE(!ctx.aux->exception_boundary);
2759 WARN_ON_ONCE(!ctx.bp);
2760 WARN_ON_ONCE(!ctx.cnt);
2761 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
2762 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
2763 * which skips compiler generated instrumentation to do the same.
2764 */
2765 kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
2766 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
2767 WARN(1, "A call to BPF exception callback should never return\n");
2768 }
2769
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)2770 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
2771 {
2772 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2773 struct bpf_map *map = p__map;
2774
2775 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
2776 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
2777
2778 if (flags)
2779 return -EINVAL;
2780
2781 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
2782 }
2783
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)2784 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
2785 {
2786 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2787 struct bpf_work *w;
2788
2789 if (in_nmi())
2790 return -EOPNOTSUPP;
2791 if (flags)
2792 return -EINVAL;
2793 w = READ_ONCE(async->work);
2794 if (!w || !READ_ONCE(w->cb.prog))
2795 return -EINVAL;
2796
2797 schedule_work(&w->work);
2798 return 0;
2799 }
2800
bpf_wq_set_callback_impl(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,void * aux__ign)2801 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
2802 int (callback_fn)(void *map, int *key, void *value),
2803 unsigned int flags,
2804 void *aux__ign)
2805 {
2806 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign;
2807 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2808
2809 if (flags)
2810 return -EINVAL;
2811
2812 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
2813 }
2814
bpf_preempt_disable(void)2815 __bpf_kfunc void bpf_preempt_disable(void)
2816 {
2817 preempt_disable();
2818 }
2819
bpf_preempt_enable(void)2820 __bpf_kfunc void bpf_preempt_enable(void)
2821 {
2822 preempt_enable();
2823 }
2824
2825 struct bpf_iter_bits {
2826 __u64 __opaque[2];
2827 } __aligned(8);
2828
2829 struct bpf_iter_bits_kern {
2830 union {
2831 unsigned long *bits;
2832 unsigned long bits_copy;
2833 };
2834 u32 nr_bits;
2835 int bit;
2836 } __aligned(8);
2837
2838 /**
2839 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
2840 * @it: The new bpf_iter_bits to be created
2841 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
2842 * @nr_words: The size of the specified memory area, measured in 8-byte units.
2843 * Due to the limitation of memalloc, it can't be greater than 512.
2844 *
2845 * This function initializes a new bpf_iter_bits structure for iterating over
2846 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
2847 * copies the data of the memory area to the newly created bpf_iter_bits @it for
2848 * subsequent iteration operations.
2849 *
2850 * On success, 0 is returned. On failure, ERR is returned.
2851 */
2852 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)2853 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
2854 {
2855 struct bpf_iter_bits_kern *kit = (void *)it;
2856 u32 nr_bytes = nr_words * sizeof(u64);
2857 u32 nr_bits = BYTES_TO_BITS(nr_bytes);
2858 int err;
2859
2860 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
2861 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
2862 __alignof__(struct bpf_iter_bits));
2863
2864 kit->nr_bits = 0;
2865 kit->bits_copy = 0;
2866 kit->bit = -1;
2867
2868 if (!unsafe_ptr__ign || !nr_words)
2869 return -EINVAL;
2870
2871 /* Optimization for u64 mask */
2872 if (nr_bits == 64) {
2873 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
2874 if (err)
2875 return -EFAULT;
2876
2877 kit->nr_bits = nr_bits;
2878 return 0;
2879 }
2880
2881 /* Fallback to memalloc */
2882 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
2883 if (!kit->bits)
2884 return -ENOMEM;
2885
2886 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
2887 if (err) {
2888 bpf_mem_free(&bpf_global_ma, kit->bits);
2889 return err;
2890 }
2891
2892 kit->nr_bits = nr_bits;
2893 return 0;
2894 }
2895
2896 /**
2897 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
2898 * @it: The bpf_iter_bits to be checked
2899 *
2900 * This function returns a pointer to a number representing the value of the
2901 * next bit in the bits.
2902 *
2903 * If there are no further bits available, it returns NULL.
2904 */
bpf_iter_bits_next(struct bpf_iter_bits * it)2905 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
2906 {
2907 struct bpf_iter_bits_kern *kit = (void *)it;
2908 u32 nr_bits = kit->nr_bits;
2909 const unsigned long *bits;
2910 int bit;
2911
2912 if (nr_bits == 0)
2913 return NULL;
2914
2915 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
2916 bit = find_next_bit(bits, nr_bits, kit->bit + 1);
2917 if (bit >= nr_bits) {
2918 kit->nr_bits = 0;
2919 return NULL;
2920 }
2921
2922 kit->bit = bit;
2923 return &kit->bit;
2924 }
2925
2926 /**
2927 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
2928 * @it: The bpf_iter_bits to be destroyed
2929 *
2930 * Destroy the resource associated with the bpf_iter_bits.
2931 */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)2932 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
2933 {
2934 struct bpf_iter_bits_kern *kit = (void *)it;
2935
2936 if (kit->nr_bits <= 64)
2937 return;
2938 bpf_mem_free(&bpf_global_ma, kit->bits);
2939 }
2940
2941 __bpf_kfunc_end_defs();
2942
2943 BTF_KFUNCS_START(generic_btf_ids)
2944 #ifdef CONFIG_CRASH_DUMP
2945 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
2946 #endif
2947 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2948 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2949 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
2950 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
2951 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
2952 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
2953 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
2954 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
2955 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
2956 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2957 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
2958 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
2959 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
2960 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
2961
2962 #ifdef CONFIG_CGROUPS
2963 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2964 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
2965 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2966 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
2967 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
2968 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2969 #endif
2970 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
2971 BTF_ID_FLAGS(func, bpf_throw)
2972 BTF_KFUNCS_END(generic_btf_ids)
2973
2974 static const struct btf_kfunc_id_set generic_kfunc_set = {
2975 .owner = THIS_MODULE,
2976 .set = &generic_btf_ids,
2977 };
2978
2979
2980 BTF_ID_LIST(generic_dtor_ids)
2981 BTF_ID(struct, task_struct)
2982 BTF_ID(func, bpf_task_release_dtor)
2983 #ifdef CONFIG_CGROUPS
2984 BTF_ID(struct, cgroup)
2985 BTF_ID(func, bpf_cgroup_release_dtor)
2986 #endif
2987
2988 BTF_KFUNCS_START(common_btf_ids)
2989 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
2990 BTF_ID_FLAGS(func, bpf_rdonly_cast)
2991 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
2992 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
2993 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
2994 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
2995 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
2996 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
2997 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
2998 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
2999 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
3000 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
3001 #ifdef CONFIG_CGROUPS
3002 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
3003 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
3004 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
3005 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3006 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
3007 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
3008 #endif
3009 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3010 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
3011 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
3012 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
3013 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
3014 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
3015 BTF_ID_FLAGS(func, bpf_dynptr_size)
3016 BTF_ID_FLAGS(func, bpf_dynptr_clone)
3017 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
3018 BTF_ID_FLAGS(func, bpf_wq_init)
3019 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
3020 BTF_ID_FLAGS(func, bpf_wq_start)
3021 BTF_ID_FLAGS(func, bpf_preempt_disable)
3022 BTF_ID_FLAGS(func, bpf_preempt_enable)
3023 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
3024 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
3025 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
3026 BTF_KFUNCS_END(common_btf_ids)
3027
3028 static const struct btf_kfunc_id_set common_kfunc_set = {
3029 .owner = THIS_MODULE,
3030 .set = &common_btf_ids,
3031 };
3032
kfunc_init(void)3033 static int __init kfunc_init(void)
3034 {
3035 int ret;
3036 const struct btf_id_dtor_kfunc generic_dtors[] = {
3037 {
3038 .btf_id = generic_dtor_ids[0],
3039 .kfunc_btf_id = generic_dtor_ids[1]
3040 },
3041 #ifdef CONFIG_CGROUPS
3042 {
3043 .btf_id = generic_dtor_ids[2],
3044 .kfunc_btf_id = generic_dtor_ids[3]
3045 },
3046 #endif
3047 };
3048
3049 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
3050 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
3051 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
3052 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
3053 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
3054 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
3055 ARRAY_SIZE(generic_dtors),
3056 THIS_MODULE);
3057 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
3058 }
3059
3060 late_initcall(kfunc_init);
3061
3062 /* Get a pointer to dynptr data up to len bytes for read only access. If
3063 * the dynptr doesn't have continuous data up to len bytes, return NULL.
3064 */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u32 len)3065 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
3066 {
3067 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
3068
3069 return bpf_dynptr_slice(p, 0, NULL, len);
3070 }
3071
3072 /* Get a pointer to dynptr data up to len bytes for read write access. If
3073 * the dynptr doesn't have continuous data up to len bytes, or the dynptr
3074 * is read only, return NULL.
3075 */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u32 len)3076 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
3077 {
3078 if (__bpf_dynptr_is_rdonly(ptr))
3079 return NULL;
3080 return (void *)__bpf_dynptr_data(ptr, len);
3081 }
3082