1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/objtool.h>
28 #include <linux/overflow.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40 #include <linux/execmem.h>
41
42 #include <asm/barrier.h>
43 #include <linux/unaligned.h>
44
45 /* Registers */
46 #define BPF_R0 regs[BPF_REG_0]
47 #define BPF_R1 regs[BPF_REG_1]
48 #define BPF_R2 regs[BPF_REG_2]
49 #define BPF_R3 regs[BPF_REG_3]
50 #define BPF_R4 regs[BPF_REG_4]
51 #define BPF_R5 regs[BPF_REG_5]
52 #define BPF_R6 regs[BPF_REG_6]
53 #define BPF_R7 regs[BPF_REG_7]
54 #define BPF_R8 regs[BPF_REG_8]
55 #define BPF_R9 regs[BPF_REG_9]
56 #define BPF_R10 regs[BPF_REG_10]
57
58 /* Named registers */
59 #define DST regs[insn->dst_reg]
60 #define SRC regs[insn->src_reg]
61 #define FP regs[BPF_REG_FP]
62 #define AX regs[BPF_REG_AX]
63 #define ARG1 regs[BPF_REG_ARG1]
64 #define CTX regs[BPF_REG_CTX]
65 #define OFF insn->off
66 #define IMM insn->imm
67
68 struct bpf_mem_alloc bpf_global_ma;
69 bool bpf_global_ma_set;
70
71 /* No hurry in this branch
72 *
73 * Exported for the bpf jit load helper.
74 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)75 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
76 {
77 u8 *ptr = NULL;
78
79 if (k >= SKF_NET_OFF) {
80 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
81 } else if (k >= SKF_LL_OFF) {
82 if (unlikely(!skb_mac_header_was_set(skb)))
83 return NULL;
84 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
85 }
86 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
87 return ptr;
88
89 return NULL;
90 }
91
92 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
93 enum page_size_enum {
94 __PAGE_SIZE = PAGE_SIZE
95 };
96
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)97 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
98 {
99 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
100 struct bpf_prog_aux *aux;
101 struct bpf_prog *fp;
102
103 size = round_up(size, __PAGE_SIZE);
104 fp = __vmalloc(size, gfp_flags);
105 if (fp == NULL)
106 return NULL;
107
108 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
109 if (aux == NULL) {
110 vfree(fp);
111 return NULL;
112 }
113 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
114 if (!fp->active) {
115 vfree(fp);
116 kfree(aux);
117 return NULL;
118 }
119
120 fp->pages = size / PAGE_SIZE;
121 fp->aux = aux;
122 fp->aux->prog = fp;
123 fp->jit_requested = ebpf_jit_enabled();
124 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
125 #ifdef CONFIG_CGROUP_BPF
126 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
127 #endif
128
129 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
130 #ifdef CONFIG_FINEIBT
131 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
132 #endif
133 mutex_init(&fp->aux->used_maps_mutex);
134 mutex_init(&fp->aux->dst_mutex);
135
136 return fp;
137 }
138
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)139 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
140 {
141 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
142 struct bpf_prog *prog;
143 int cpu;
144
145 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
146 if (!prog)
147 return NULL;
148
149 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
150 if (!prog->stats) {
151 free_percpu(prog->active);
152 kfree(prog->aux);
153 vfree(prog);
154 return NULL;
155 }
156
157 for_each_possible_cpu(cpu) {
158 struct bpf_prog_stats *pstats;
159
160 pstats = per_cpu_ptr(prog->stats, cpu);
161 u64_stats_init(&pstats->syncp);
162 }
163 return prog;
164 }
165 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
166
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)167 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
168 {
169 if (!prog->aux->nr_linfo || !prog->jit_requested)
170 return 0;
171
172 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
173 sizeof(*prog->aux->jited_linfo),
174 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
175 if (!prog->aux->jited_linfo)
176 return -ENOMEM;
177
178 return 0;
179 }
180
bpf_prog_jit_attempt_done(struct bpf_prog * prog)181 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
182 {
183 if (prog->aux->jited_linfo &&
184 (!prog->jited || !prog->aux->jited_linfo[0])) {
185 kvfree(prog->aux->jited_linfo);
186 prog->aux->jited_linfo = NULL;
187 }
188
189 kfree(prog->aux->kfunc_tab);
190 prog->aux->kfunc_tab = NULL;
191 }
192
193 /* The jit engine is responsible to provide an array
194 * for insn_off to the jited_off mapping (insn_to_jit_off).
195 *
196 * The idx to this array is the insn_off. Hence, the insn_off
197 * here is relative to the prog itself instead of the main prog.
198 * This array has one entry for each xlated bpf insn.
199 *
200 * jited_off is the byte off to the end of the jited insn.
201 *
202 * Hence, with
203 * insn_start:
204 * The first bpf insn off of the prog. The insn off
205 * here is relative to the main prog.
206 * e.g. if prog is a subprog, insn_start > 0
207 * linfo_idx:
208 * The prog's idx to prog->aux->linfo and jited_linfo
209 *
210 * jited_linfo[linfo_idx] = prog->bpf_func
211 *
212 * For i > linfo_idx,
213 *
214 * jited_linfo[i] = prog->bpf_func +
215 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
216 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)217 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
218 const u32 *insn_to_jit_off)
219 {
220 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
221 const struct bpf_line_info *linfo;
222 void **jited_linfo;
223
224 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
225 /* Userspace did not provide linfo */
226 return;
227
228 linfo_idx = prog->aux->linfo_idx;
229 linfo = &prog->aux->linfo[linfo_idx];
230 insn_start = linfo[0].insn_off;
231 insn_end = insn_start + prog->len;
232
233 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
234 jited_linfo[0] = prog->bpf_func;
235
236 nr_linfo = prog->aux->nr_linfo - linfo_idx;
237
238 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
239 /* The verifier ensures that linfo[i].insn_off is
240 * strictly increasing
241 */
242 jited_linfo[i] = prog->bpf_func +
243 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
244 }
245
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)246 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
247 gfp_t gfp_extra_flags)
248 {
249 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
250 struct bpf_prog *fp;
251 u32 pages;
252
253 size = round_up(size, PAGE_SIZE);
254 pages = size / PAGE_SIZE;
255 if (pages <= fp_old->pages)
256 return fp_old;
257
258 fp = __vmalloc(size, gfp_flags);
259 if (fp) {
260 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
261 fp->pages = pages;
262 fp->aux->prog = fp;
263
264 /* We keep fp->aux from fp_old around in the new
265 * reallocated structure.
266 */
267 fp_old->aux = NULL;
268 fp_old->stats = NULL;
269 fp_old->active = NULL;
270 __bpf_prog_free(fp_old);
271 }
272
273 return fp;
274 }
275
__bpf_prog_free(struct bpf_prog * fp)276 void __bpf_prog_free(struct bpf_prog *fp)
277 {
278 if (fp->aux) {
279 mutex_destroy(&fp->aux->used_maps_mutex);
280 mutex_destroy(&fp->aux->dst_mutex);
281 kfree(fp->aux->poke_tab);
282 kfree(fp->aux);
283 }
284 free_percpu(fp->stats);
285 free_percpu(fp->active);
286 vfree(fp);
287 }
288
bpf_prog_calc_tag(struct bpf_prog * fp)289 int bpf_prog_calc_tag(struct bpf_prog *fp)
290 {
291 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
292 u32 raw_size = bpf_prog_tag_scratch_size(fp);
293 u32 digest[SHA1_DIGEST_WORDS];
294 u32 ws[SHA1_WORKSPACE_WORDS];
295 u32 i, bsize, psize, blocks;
296 struct bpf_insn *dst;
297 bool was_ld_map;
298 u8 *raw, *todo;
299 __be32 *result;
300 __be64 *bits;
301
302 raw = vmalloc(raw_size);
303 if (!raw)
304 return -ENOMEM;
305
306 sha1_init(digest);
307 memset(ws, 0, sizeof(ws));
308
309 /* We need to take out the map fd for the digest calculation
310 * since they are unstable from user space side.
311 */
312 dst = (void *)raw;
313 for (i = 0, was_ld_map = false; i < fp->len; i++) {
314 dst[i] = fp->insnsi[i];
315 if (!was_ld_map &&
316 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
317 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
318 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
319 was_ld_map = true;
320 dst[i].imm = 0;
321 } else if (was_ld_map &&
322 dst[i].code == 0 &&
323 dst[i].dst_reg == 0 &&
324 dst[i].src_reg == 0 &&
325 dst[i].off == 0) {
326 was_ld_map = false;
327 dst[i].imm = 0;
328 } else {
329 was_ld_map = false;
330 }
331 }
332
333 psize = bpf_prog_insn_size(fp);
334 memset(&raw[psize], 0, raw_size - psize);
335 raw[psize++] = 0x80;
336
337 bsize = round_up(psize, SHA1_BLOCK_SIZE);
338 blocks = bsize / SHA1_BLOCK_SIZE;
339 todo = raw;
340 if (bsize - psize >= sizeof(__be64)) {
341 bits = (__be64 *)(todo + bsize - sizeof(__be64));
342 } else {
343 bits = (__be64 *)(todo + bsize + bits_offset);
344 blocks++;
345 }
346 *bits = cpu_to_be64((psize - 1) << 3);
347
348 while (blocks--) {
349 sha1_transform(digest, todo, ws);
350 todo += SHA1_BLOCK_SIZE;
351 }
352
353 result = (__force __be32 *)digest;
354 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
355 result[i] = cpu_to_be32(digest[i]);
356 memcpy(fp->tag, result, sizeof(fp->tag));
357
358 vfree(raw);
359 return 0;
360 }
361
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)362 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
363 s32 end_new, s32 curr, const bool probe_pass)
364 {
365 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
366 s32 delta = end_new - end_old;
367 s64 imm = insn->imm;
368
369 if (curr < pos && curr + imm + 1 >= end_old)
370 imm += delta;
371 else if (curr >= end_new && curr + imm + 1 < end_new)
372 imm -= delta;
373 if (imm < imm_min || imm > imm_max)
374 return -ERANGE;
375 if (!probe_pass)
376 insn->imm = imm;
377 return 0;
378 }
379
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)380 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
381 s32 end_new, s32 curr, const bool probe_pass)
382 {
383 s64 off_min, off_max, off;
384 s32 delta = end_new - end_old;
385
386 if (insn->code == (BPF_JMP32 | BPF_JA)) {
387 off = insn->imm;
388 off_min = S32_MIN;
389 off_max = S32_MAX;
390 } else {
391 off = insn->off;
392 off_min = S16_MIN;
393 off_max = S16_MAX;
394 }
395
396 if (curr < pos && curr + off + 1 >= end_old)
397 off += delta;
398 else if (curr >= end_new && curr + off + 1 < end_new)
399 off -= delta;
400 if (off < off_min || off > off_max)
401 return -ERANGE;
402 if (!probe_pass) {
403 if (insn->code == (BPF_JMP32 | BPF_JA))
404 insn->imm = off;
405 else
406 insn->off = off;
407 }
408 return 0;
409 }
410
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)411 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
412 s32 end_new, const bool probe_pass)
413 {
414 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
415 struct bpf_insn *insn = prog->insnsi;
416 int ret = 0;
417
418 for (i = 0; i < insn_cnt; i++, insn++) {
419 u8 code;
420
421 /* In the probing pass we still operate on the original,
422 * unpatched image in order to check overflows before we
423 * do any other adjustments. Therefore skip the patchlet.
424 */
425 if (probe_pass && i == pos) {
426 i = end_new;
427 insn = prog->insnsi + end_old;
428 }
429 if (bpf_pseudo_func(insn)) {
430 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
431 end_new, i, probe_pass);
432 if (ret)
433 return ret;
434 continue;
435 }
436 code = insn->code;
437 if ((BPF_CLASS(code) != BPF_JMP &&
438 BPF_CLASS(code) != BPF_JMP32) ||
439 BPF_OP(code) == BPF_EXIT)
440 continue;
441 /* Adjust offset of jmps if we cross patch boundaries. */
442 if (BPF_OP(code) == BPF_CALL) {
443 if (insn->src_reg != BPF_PSEUDO_CALL)
444 continue;
445 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
446 end_new, i, probe_pass);
447 } else {
448 ret = bpf_adj_delta_to_off(insn, pos, end_old,
449 end_new, i, probe_pass);
450 }
451 if (ret)
452 break;
453 }
454
455 return ret;
456 }
457
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)458 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
459 {
460 struct bpf_line_info *linfo;
461 u32 i, nr_linfo;
462
463 nr_linfo = prog->aux->nr_linfo;
464 if (!nr_linfo || !delta)
465 return;
466
467 linfo = prog->aux->linfo;
468
469 for (i = 0; i < nr_linfo; i++)
470 if (off < linfo[i].insn_off)
471 break;
472
473 /* Push all off < linfo[i].insn_off by delta */
474 for (; i < nr_linfo; i++)
475 linfo[i].insn_off += delta;
476 }
477
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)478 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
479 const struct bpf_insn *patch, u32 len)
480 {
481 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
482 const u32 cnt_max = S16_MAX;
483 struct bpf_prog *prog_adj;
484 int err;
485
486 /* Since our patchlet doesn't expand the image, we're done. */
487 if (insn_delta == 0) {
488 memcpy(prog->insnsi + off, patch, sizeof(*patch));
489 return prog;
490 }
491
492 insn_adj_cnt = prog->len + insn_delta;
493
494 /* Reject anything that would potentially let the insn->off
495 * target overflow when we have excessive program expansions.
496 * We need to probe here before we do any reallocation where
497 * we afterwards may not fail anymore.
498 */
499 if (insn_adj_cnt > cnt_max &&
500 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
501 return ERR_PTR(err);
502
503 /* Several new instructions need to be inserted. Make room
504 * for them. Likely, there's no need for a new allocation as
505 * last page could have large enough tailroom.
506 */
507 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
508 GFP_USER);
509 if (!prog_adj)
510 return ERR_PTR(-ENOMEM);
511
512 prog_adj->len = insn_adj_cnt;
513
514 /* Patching happens in 3 steps:
515 *
516 * 1) Move over tail of insnsi from next instruction onwards,
517 * so we can patch the single target insn with one or more
518 * new ones (patching is always from 1 to n insns, n > 0).
519 * 2) Inject new instructions at the target location.
520 * 3) Adjust branch offsets if necessary.
521 */
522 insn_rest = insn_adj_cnt - off - len;
523
524 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
525 sizeof(*patch) * insn_rest);
526 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
527
528 /* We are guaranteed to not fail at this point, otherwise
529 * the ship has sailed to reverse to the original state. An
530 * overflow cannot happen at this point.
531 */
532 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
533
534 bpf_adj_linfo(prog_adj, off, insn_delta);
535
536 return prog_adj;
537 }
538
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)539 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
540 {
541 /* Branch offsets can't overflow when program is shrinking, no need
542 * to call bpf_adj_branches(..., true) here
543 */
544 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
545 sizeof(struct bpf_insn) * (prog->len - off - cnt));
546 prog->len -= cnt;
547
548 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
549 }
550
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)551 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
552 {
553 int i;
554
555 for (i = 0; i < fp->aux->real_func_cnt; i++)
556 bpf_prog_kallsyms_del(fp->aux->func[i]);
557 }
558
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)559 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
560 {
561 bpf_prog_kallsyms_del_subprogs(fp);
562 bpf_prog_kallsyms_del(fp);
563 }
564
565 #ifdef CONFIG_BPF_JIT
566 /* All BPF JIT sysctl knobs here. */
567 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
568 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
569 int bpf_jit_harden __read_mostly;
570 long bpf_jit_limit __read_mostly;
571 long bpf_jit_limit_max __read_mostly;
572
573 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)574 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
575 {
576 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
577
578 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
579 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
580 }
581
582 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)583 bpf_prog_ksym_set_name(struct bpf_prog *prog)
584 {
585 char *sym = prog->aux->ksym.name;
586 const char *end = sym + KSYM_NAME_LEN;
587 const struct btf_type *type;
588 const char *func_name;
589
590 BUILD_BUG_ON(sizeof("bpf_prog_") +
591 sizeof(prog->tag) * 2 +
592 /* name has been null terminated.
593 * We should need +1 for the '_' preceding
594 * the name. However, the null character
595 * is double counted between the name and the
596 * sizeof("bpf_prog_") above, so we omit
597 * the +1 here.
598 */
599 sizeof(prog->aux->name) > KSYM_NAME_LEN);
600
601 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
602 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
603
604 /* prog->aux->name will be ignored if full btf name is available */
605 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
606 type = btf_type_by_id(prog->aux->btf,
607 prog->aux->func_info[prog->aux->func_idx].type_id);
608 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
609 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
610 return;
611 }
612
613 if (prog->aux->name[0])
614 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
615 else
616 *sym = 0;
617 }
618
bpf_get_ksym_start(struct latch_tree_node * n)619 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
620 {
621 return container_of(n, struct bpf_ksym, tnode)->start;
622 }
623
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)624 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
625 struct latch_tree_node *b)
626 {
627 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
628 }
629
bpf_tree_comp(void * key,struct latch_tree_node * n)630 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
631 {
632 unsigned long val = (unsigned long)key;
633 const struct bpf_ksym *ksym;
634
635 ksym = container_of(n, struct bpf_ksym, tnode);
636
637 if (val < ksym->start)
638 return -1;
639 /* Ensure that we detect return addresses as part of the program, when
640 * the final instruction is a call for a program part of the stack
641 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
642 */
643 if (val > ksym->end)
644 return 1;
645
646 return 0;
647 }
648
649 static const struct latch_tree_ops bpf_tree_ops = {
650 .less = bpf_tree_less,
651 .comp = bpf_tree_comp,
652 };
653
654 static DEFINE_SPINLOCK(bpf_lock);
655 static LIST_HEAD(bpf_kallsyms);
656 static struct latch_tree_root bpf_tree __cacheline_aligned;
657
bpf_ksym_add(struct bpf_ksym * ksym)658 void bpf_ksym_add(struct bpf_ksym *ksym)
659 {
660 spin_lock_bh(&bpf_lock);
661 WARN_ON_ONCE(!list_empty(&ksym->lnode));
662 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
663 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
664 spin_unlock_bh(&bpf_lock);
665 }
666
__bpf_ksym_del(struct bpf_ksym * ksym)667 static void __bpf_ksym_del(struct bpf_ksym *ksym)
668 {
669 if (list_empty(&ksym->lnode))
670 return;
671
672 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
673 list_del_rcu(&ksym->lnode);
674 }
675
bpf_ksym_del(struct bpf_ksym * ksym)676 void bpf_ksym_del(struct bpf_ksym *ksym)
677 {
678 spin_lock_bh(&bpf_lock);
679 __bpf_ksym_del(ksym);
680 spin_unlock_bh(&bpf_lock);
681 }
682
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)683 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
684 {
685 return fp->jited && !bpf_prog_was_classic(fp);
686 }
687
bpf_prog_kallsyms_add(struct bpf_prog * fp)688 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
689 {
690 if (!bpf_prog_kallsyms_candidate(fp) ||
691 !bpf_token_capable(fp->aux->token, CAP_BPF))
692 return;
693
694 bpf_prog_ksym_set_addr(fp);
695 bpf_prog_ksym_set_name(fp);
696 fp->aux->ksym.prog = true;
697
698 bpf_ksym_add(&fp->aux->ksym);
699
700 #ifdef CONFIG_FINEIBT
701 /*
702 * When FineIBT, code in the __cfi_foo() symbols can get executed
703 * and hence unwinder needs help.
704 */
705 if (cfi_mode != CFI_FINEIBT)
706 return;
707
708 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
709 "__cfi_%s", fp->aux->ksym.name);
710
711 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
712 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
713
714 bpf_ksym_add(&fp->aux->ksym_prefix);
715 #endif
716 }
717
bpf_prog_kallsyms_del(struct bpf_prog * fp)718 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
719 {
720 if (!bpf_prog_kallsyms_candidate(fp))
721 return;
722
723 bpf_ksym_del(&fp->aux->ksym);
724 #ifdef CONFIG_FINEIBT
725 if (cfi_mode != CFI_FINEIBT)
726 return;
727 bpf_ksym_del(&fp->aux->ksym_prefix);
728 #endif
729 }
730
bpf_ksym_find(unsigned long addr)731 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
732 {
733 struct latch_tree_node *n;
734
735 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
736 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
737 }
738
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)739 int __bpf_address_lookup(unsigned long addr, unsigned long *size,
740 unsigned long *off, char *sym)
741 {
742 struct bpf_ksym *ksym;
743 int ret = 0;
744
745 rcu_read_lock();
746 ksym = bpf_ksym_find(addr);
747 if (ksym) {
748 unsigned long symbol_start = ksym->start;
749 unsigned long symbol_end = ksym->end;
750
751 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
752
753 if (size)
754 *size = symbol_end - symbol_start;
755 if (off)
756 *off = addr - symbol_start;
757 }
758 rcu_read_unlock();
759
760 return ret;
761 }
762
is_bpf_text_address(unsigned long addr)763 bool is_bpf_text_address(unsigned long addr)
764 {
765 bool ret;
766
767 rcu_read_lock();
768 ret = bpf_ksym_find(addr) != NULL;
769 rcu_read_unlock();
770
771 return ret;
772 }
773
bpf_prog_ksym_find(unsigned long addr)774 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
775 {
776 struct bpf_ksym *ksym = bpf_ksym_find(addr);
777
778 return ksym && ksym->prog ?
779 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
780 NULL;
781 }
782
search_bpf_extables(unsigned long addr)783 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
784 {
785 const struct exception_table_entry *e = NULL;
786 struct bpf_prog *prog;
787
788 rcu_read_lock();
789 prog = bpf_prog_ksym_find(addr);
790 if (!prog)
791 goto out;
792 if (!prog->aux->num_exentries)
793 goto out;
794
795 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
796 out:
797 rcu_read_unlock();
798 return e;
799 }
800
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)801 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
802 char *sym)
803 {
804 struct bpf_ksym *ksym;
805 unsigned int it = 0;
806 int ret = -ERANGE;
807
808 if (!bpf_jit_kallsyms_enabled())
809 return ret;
810
811 rcu_read_lock();
812 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
813 if (it++ != symnum)
814 continue;
815
816 strscpy(sym, ksym->name, KSYM_NAME_LEN);
817
818 *value = ksym->start;
819 *type = BPF_SYM_ELF_TYPE;
820
821 ret = 0;
822 break;
823 }
824 rcu_read_unlock();
825
826 return ret;
827 }
828
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)829 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
830 struct bpf_jit_poke_descriptor *poke)
831 {
832 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
833 static const u32 poke_tab_max = 1024;
834 u32 slot = prog->aux->size_poke_tab;
835 u32 size = slot + 1;
836
837 if (size > poke_tab_max)
838 return -ENOSPC;
839 if (poke->tailcall_target || poke->tailcall_target_stable ||
840 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
841 return -EINVAL;
842
843 switch (poke->reason) {
844 case BPF_POKE_REASON_TAIL_CALL:
845 if (!poke->tail_call.map)
846 return -EINVAL;
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
853 if (!tab)
854 return -ENOMEM;
855
856 memcpy(&tab[slot], poke, sizeof(*poke));
857 prog->aux->size_poke_tab = size;
858 prog->aux->poke_tab = tab;
859
860 return slot;
861 }
862
863 /*
864 * BPF program pack allocator.
865 *
866 * Most BPF programs are pretty small. Allocating a hole page for each
867 * program is sometime a waste. Many small bpf program also adds pressure
868 * to instruction TLB. To solve this issue, we introduce a BPF program pack
869 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
870 * to host BPF programs.
871 */
872 #define BPF_PROG_CHUNK_SHIFT 6
873 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
874 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
875
876 struct bpf_prog_pack {
877 struct list_head list;
878 void *ptr;
879 unsigned long bitmap[];
880 };
881
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)882 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
883 {
884 memset(area, 0, size);
885 }
886
887 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
888
889 static DEFINE_MUTEX(pack_mutex);
890 static LIST_HEAD(pack_list);
891
892 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
893 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
894 */
895 #ifdef PMD_SIZE
896 /* PMD_SIZE is really big for some archs. It doesn't make sense to
897 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
898 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
899 * greater than or equal to 2MB.
900 */
901 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
902 #else
903 #define BPF_PROG_PACK_SIZE PAGE_SIZE
904 #endif
905
906 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
907
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)908 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
909 {
910 struct bpf_prog_pack *pack;
911 int err;
912
913 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
914 GFP_KERNEL);
915 if (!pack)
916 return NULL;
917 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
918 if (!pack->ptr)
919 goto out;
920 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
921 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
922
923 set_vm_flush_reset_perms(pack->ptr);
924 err = set_memory_rox((unsigned long)pack->ptr,
925 BPF_PROG_PACK_SIZE / PAGE_SIZE);
926 if (err)
927 goto out;
928 list_add_tail(&pack->list, &pack_list);
929 return pack;
930
931 out:
932 bpf_jit_free_exec(pack->ptr);
933 kfree(pack);
934 return NULL;
935 }
936
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)937 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
938 {
939 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
940 struct bpf_prog_pack *pack;
941 unsigned long pos;
942 void *ptr = NULL;
943
944 mutex_lock(&pack_mutex);
945 if (size > BPF_PROG_PACK_SIZE) {
946 size = round_up(size, PAGE_SIZE);
947 ptr = bpf_jit_alloc_exec(size);
948 if (ptr) {
949 int err;
950
951 bpf_fill_ill_insns(ptr, size);
952 set_vm_flush_reset_perms(ptr);
953 err = set_memory_rox((unsigned long)ptr,
954 size / PAGE_SIZE);
955 if (err) {
956 bpf_jit_free_exec(ptr);
957 ptr = NULL;
958 }
959 }
960 goto out;
961 }
962 list_for_each_entry(pack, &pack_list, list) {
963 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
964 nbits, 0);
965 if (pos < BPF_PROG_CHUNK_COUNT)
966 goto found_free_area;
967 }
968
969 pack = alloc_new_pack(bpf_fill_ill_insns);
970 if (!pack)
971 goto out;
972
973 pos = 0;
974
975 found_free_area:
976 bitmap_set(pack->bitmap, pos, nbits);
977 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
978
979 out:
980 mutex_unlock(&pack_mutex);
981 return ptr;
982 }
983
bpf_prog_pack_free(void * ptr,u32 size)984 void bpf_prog_pack_free(void *ptr, u32 size)
985 {
986 struct bpf_prog_pack *pack = NULL, *tmp;
987 unsigned int nbits;
988 unsigned long pos;
989
990 mutex_lock(&pack_mutex);
991 if (size > BPF_PROG_PACK_SIZE) {
992 bpf_jit_free_exec(ptr);
993 goto out;
994 }
995
996 list_for_each_entry(tmp, &pack_list, list) {
997 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
998 pack = tmp;
999 break;
1000 }
1001 }
1002
1003 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1004 goto out;
1005
1006 nbits = BPF_PROG_SIZE_TO_NBITS(size);
1007 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1008
1009 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1010 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1011
1012 bitmap_clear(pack->bitmap, pos, nbits);
1013 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1014 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1015 list_del(&pack->list);
1016 bpf_jit_free_exec(pack->ptr);
1017 kfree(pack);
1018 }
1019 out:
1020 mutex_unlock(&pack_mutex);
1021 }
1022
1023 static atomic_long_t bpf_jit_current;
1024
1025 /* Can be overridden by an arch's JIT compiler if it has a custom,
1026 * dedicated BPF backend memory area, or if neither of the two
1027 * below apply.
1028 */
bpf_jit_alloc_exec_limit(void)1029 u64 __weak bpf_jit_alloc_exec_limit(void)
1030 {
1031 #if defined(MODULES_VADDR)
1032 return MODULES_END - MODULES_VADDR;
1033 #else
1034 return VMALLOC_END - VMALLOC_START;
1035 #endif
1036 }
1037
bpf_jit_charge_init(void)1038 static int __init bpf_jit_charge_init(void)
1039 {
1040 /* Only used as heuristic here to derive limit. */
1041 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1042 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1043 PAGE_SIZE), LONG_MAX);
1044 return 0;
1045 }
1046 pure_initcall(bpf_jit_charge_init);
1047
bpf_jit_charge_modmem(u32 size)1048 int bpf_jit_charge_modmem(u32 size)
1049 {
1050 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1051 if (!bpf_capable()) {
1052 atomic_long_sub(size, &bpf_jit_current);
1053 return -EPERM;
1054 }
1055 }
1056
1057 return 0;
1058 }
1059
bpf_jit_uncharge_modmem(u32 size)1060 void bpf_jit_uncharge_modmem(u32 size)
1061 {
1062 atomic_long_sub(size, &bpf_jit_current);
1063 }
1064
bpf_jit_alloc_exec(unsigned long size)1065 void *__weak bpf_jit_alloc_exec(unsigned long size)
1066 {
1067 return execmem_alloc(EXECMEM_BPF, size);
1068 }
1069
bpf_jit_free_exec(void * addr)1070 void __weak bpf_jit_free_exec(void *addr)
1071 {
1072 execmem_free(addr);
1073 }
1074
1075 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1076 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1077 unsigned int alignment,
1078 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1079 {
1080 struct bpf_binary_header *hdr;
1081 u32 size, hole, start;
1082
1083 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1084 alignment > BPF_IMAGE_ALIGNMENT);
1085
1086 /* Most of BPF filters are really small, but if some of them
1087 * fill a page, allow at least 128 extra bytes to insert a
1088 * random section of illegal instructions.
1089 */
1090 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1091
1092 if (bpf_jit_charge_modmem(size))
1093 return NULL;
1094 hdr = bpf_jit_alloc_exec(size);
1095 if (!hdr) {
1096 bpf_jit_uncharge_modmem(size);
1097 return NULL;
1098 }
1099
1100 /* Fill space with illegal/arch-dep instructions. */
1101 bpf_fill_ill_insns(hdr, size);
1102
1103 hdr->size = size;
1104 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1105 PAGE_SIZE - sizeof(*hdr));
1106 start = get_random_u32_below(hole) & ~(alignment - 1);
1107
1108 /* Leave a random number of instructions before BPF code. */
1109 *image_ptr = &hdr->image[start];
1110
1111 return hdr;
1112 }
1113
bpf_jit_binary_free(struct bpf_binary_header * hdr)1114 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1115 {
1116 u32 size = hdr->size;
1117
1118 bpf_jit_free_exec(hdr);
1119 bpf_jit_uncharge_modmem(size);
1120 }
1121
1122 /* Allocate jit binary from bpf_prog_pack allocator.
1123 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1124 * to the memory. To solve this problem, a RW buffer is also allocated at
1125 * as the same time. The JIT engine should calculate offsets based on the
1126 * RO memory address, but write JITed program to the RW buffer. Once the
1127 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1128 * the JITed program to the RO memory.
1129 */
1130 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1131 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1132 unsigned int alignment,
1133 struct bpf_binary_header **rw_header,
1134 u8 **rw_image,
1135 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1136 {
1137 struct bpf_binary_header *ro_header;
1138 u32 size, hole, start;
1139
1140 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1141 alignment > BPF_IMAGE_ALIGNMENT);
1142
1143 /* add 16 bytes for a random section of illegal instructions */
1144 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1145
1146 if (bpf_jit_charge_modmem(size))
1147 return NULL;
1148 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1149 if (!ro_header) {
1150 bpf_jit_uncharge_modmem(size);
1151 return NULL;
1152 }
1153
1154 *rw_header = kvmalloc(size, GFP_KERNEL);
1155 if (!*rw_header) {
1156 bpf_prog_pack_free(ro_header, size);
1157 bpf_jit_uncharge_modmem(size);
1158 return NULL;
1159 }
1160
1161 /* Fill space with illegal/arch-dep instructions. */
1162 bpf_fill_ill_insns(*rw_header, size);
1163 (*rw_header)->size = size;
1164
1165 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1166 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1167 start = get_random_u32_below(hole) & ~(alignment - 1);
1168
1169 *image_ptr = &ro_header->image[start];
1170 *rw_image = &(*rw_header)->image[start];
1171
1172 return ro_header;
1173 }
1174
1175 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1176 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1177 struct bpf_binary_header *rw_header)
1178 {
1179 void *ptr;
1180
1181 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1182
1183 kvfree(rw_header);
1184
1185 if (IS_ERR(ptr)) {
1186 bpf_prog_pack_free(ro_header, ro_header->size);
1187 return PTR_ERR(ptr);
1188 }
1189 return 0;
1190 }
1191
1192 /* bpf_jit_binary_pack_free is called in two different scenarios:
1193 * 1) when the program is freed after;
1194 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1195 * For case 2), we need to free both the RO memory and the RW buffer.
1196 *
1197 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1198 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1199 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1200 * bpf_arch_text_copy (when jit fails).
1201 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1202 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1203 struct bpf_binary_header *rw_header)
1204 {
1205 u32 size = ro_header->size;
1206
1207 bpf_prog_pack_free(ro_header, size);
1208 kvfree(rw_header);
1209 bpf_jit_uncharge_modmem(size);
1210 }
1211
1212 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1213 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1214 {
1215 unsigned long real_start = (unsigned long)fp->bpf_func;
1216 unsigned long addr;
1217
1218 addr = real_start & BPF_PROG_CHUNK_MASK;
1219 return (void *)addr;
1220 }
1221
1222 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1223 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1224 {
1225 unsigned long real_start = (unsigned long)fp->bpf_func;
1226 unsigned long addr;
1227
1228 addr = real_start & PAGE_MASK;
1229 return (void *)addr;
1230 }
1231
1232 /* This symbol is only overridden by archs that have different
1233 * requirements than the usual eBPF JITs, f.e. when they only
1234 * implement cBPF JIT, do not set images read-only, etc.
1235 */
bpf_jit_free(struct bpf_prog * fp)1236 void __weak bpf_jit_free(struct bpf_prog *fp)
1237 {
1238 if (fp->jited) {
1239 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1240
1241 bpf_jit_binary_free(hdr);
1242 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1243 }
1244
1245 bpf_prog_unlock_free(fp);
1246 }
1247
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1248 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1249 const struct bpf_insn *insn, bool extra_pass,
1250 u64 *func_addr, bool *func_addr_fixed)
1251 {
1252 s16 off = insn->off;
1253 s32 imm = insn->imm;
1254 u8 *addr;
1255 int err;
1256
1257 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1258 if (!*func_addr_fixed) {
1259 /* Place-holder address till the last pass has collected
1260 * all addresses for JITed subprograms in which case we
1261 * can pick them up from prog->aux.
1262 */
1263 if (!extra_pass)
1264 addr = NULL;
1265 else if (prog->aux->func &&
1266 off >= 0 && off < prog->aux->real_func_cnt)
1267 addr = (u8 *)prog->aux->func[off]->bpf_func;
1268 else
1269 return -EINVAL;
1270 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1271 bpf_jit_supports_far_kfunc_call()) {
1272 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1273 if (err)
1274 return err;
1275 } else {
1276 /* Address of a BPF helper call. Since part of the core
1277 * kernel, it's always at a fixed location. __bpf_call_base
1278 * and the helper with imm relative to it are both in core
1279 * kernel.
1280 */
1281 addr = (u8 *)__bpf_call_base + imm;
1282 }
1283
1284 *func_addr = (unsigned long)addr;
1285 return 0;
1286 }
1287
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1288 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1289 const struct bpf_insn *aux,
1290 struct bpf_insn *to_buff,
1291 bool emit_zext)
1292 {
1293 struct bpf_insn *to = to_buff;
1294 u32 imm_rnd = get_random_u32();
1295 s16 off;
1296
1297 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1298 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1299
1300 /* Constraints on AX register:
1301 *
1302 * AX register is inaccessible from user space. It is mapped in
1303 * all JITs, and used here for constant blinding rewrites. It is
1304 * typically "stateless" meaning its contents are only valid within
1305 * the executed instruction, but not across several instructions.
1306 * There are a few exceptions however which are further detailed
1307 * below.
1308 *
1309 * Constant blinding is only used by JITs, not in the interpreter.
1310 * The interpreter uses AX in some occasions as a local temporary
1311 * register e.g. in DIV or MOD instructions.
1312 *
1313 * In restricted circumstances, the verifier can also use the AX
1314 * register for rewrites as long as they do not interfere with
1315 * the above cases!
1316 */
1317 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1318 goto out;
1319
1320 if (from->imm == 0 &&
1321 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1322 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1323 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1324 goto out;
1325 }
1326
1327 switch (from->code) {
1328 case BPF_ALU | BPF_ADD | BPF_K:
1329 case BPF_ALU | BPF_SUB | BPF_K:
1330 case BPF_ALU | BPF_AND | BPF_K:
1331 case BPF_ALU | BPF_OR | BPF_K:
1332 case BPF_ALU | BPF_XOR | BPF_K:
1333 case BPF_ALU | BPF_MUL | BPF_K:
1334 case BPF_ALU | BPF_MOV | BPF_K:
1335 case BPF_ALU | BPF_DIV | BPF_K:
1336 case BPF_ALU | BPF_MOD | BPF_K:
1337 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1338 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1339 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1340 break;
1341
1342 case BPF_ALU64 | BPF_ADD | BPF_K:
1343 case BPF_ALU64 | BPF_SUB | BPF_K:
1344 case BPF_ALU64 | BPF_AND | BPF_K:
1345 case BPF_ALU64 | BPF_OR | BPF_K:
1346 case BPF_ALU64 | BPF_XOR | BPF_K:
1347 case BPF_ALU64 | BPF_MUL | BPF_K:
1348 case BPF_ALU64 | BPF_MOV | BPF_K:
1349 case BPF_ALU64 | BPF_DIV | BPF_K:
1350 case BPF_ALU64 | BPF_MOD | BPF_K:
1351 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1352 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1353 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1354 break;
1355
1356 case BPF_JMP | BPF_JEQ | BPF_K:
1357 case BPF_JMP | BPF_JNE | BPF_K:
1358 case BPF_JMP | BPF_JGT | BPF_K:
1359 case BPF_JMP | BPF_JLT | BPF_K:
1360 case BPF_JMP | BPF_JGE | BPF_K:
1361 case BPF_JMP | BPF_JLE | BPF_K:
1362 case BPF_JMP | BPF_JSGT | BPF_K:
1363 case BPF_JMP | BPF_JSLT | BPF_K:
1364 case BPF_JMP | BPF_JSGE | BPF_K:
1365 case BPF_JMP | BPF_JSLE | BPF_K:
1366 case BPF_JMP | BPF_JSET | BPF_K:
1367 /* Accommodate for extra offset in case of a backjump. */
1368 off = from->off;
1369 if (off < 0)
1370 off -= 2;
1371 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1372 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1373 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1374 break;
1375
1376 case BPF_JMP32 | BPF_JEQ | BPF_K:
1377 case BPF_JMP32 | BPF_JNE | BPF_K:
1378 case BPF_JMP32 | BPF_JGT | BPF_K:
1379 case BPF_JMP32 | BPF_JLT | BPF_K:
1380 case BPF_JMP32 | BPF_JGE | BPF_K:
1381 case BPF_JMP32 | BPF_JLE | BPF_K:
1382 case BPF_JMP32 | BPF_JSGT | BPF_K:
1383 case BPF_JMP32 | BPF_JSLT | BPF_K:
1384 case BPF_JMP32 | BPF_JSGE | BPF_K:
1385 case BPF_JMP32 | BPF_JSLE | BPF_K:
1386 case BPF_JMP32 | BPF_JSET | BPF_K:
1387 /* Accommodate for extra offset in case of a backjump. */
1388 off = from->off;
1389 if (off < 0)
1390 off -= 2;
1391 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1392 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1393 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1394 off);
1395 break;
1396
1397 case BPF_LD | BPF_IMM | BPF_DW:
1398 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1399 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1400 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1401 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1402 break;
1403 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1404 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1405 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1406 if (emit_zext)
1407 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1408 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1409 break;
1410
1411 case BPF_ST | BPF_MEM | BPF_DW:
1412 case BPF_ST | BPF_MEM | BPF_W:
1413 case BPF_ST | BPF_MEM | BPF_H:
1414 case BPF_ST | BPF_MEM | BPF_B:
1415 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1416 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1417 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1418 break;
1419 }
1420 out:
1421 return to - to_buff;
1422 }
1423
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1424 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1425 gfp_t gfp_extra_flags)
1426 {
1427 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1428 struct bpf_prog *fp;
1429
1430 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1431 if (fp != NULL) {
1432 /* aux->prog still points to the fp_other one, so
1433 * when promoting the clone to the real program,
1434 * this still needs to be adapted.
1435 */
1436 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1437 }
1438
1439 return fp;
1440 }
1441
bpf_prog_clone_free(struct bpf_prog * fp)1442 static void bpf_prog_clone_free(struct bpf_prog *fp)
1443 {
1444 /* aux was stolen by the other clone, so we cannot free
1445 * it from this path! It will be freed eventually by the
1446 * other program on release.
1447 *
1448 * At this point, we don't need a deferred release since
1449 * clone is guaranteed to not be locked.
1450 */
1451 fp->aux = NULL;
1452 fp->stats = NULL;
1453 fp->active = NULL;
1454 __bpf_prog_free(fp);
1455 }
1456
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1457 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1458 {
1459 /* We have to repoint aux->prog to self, as we don't
1460 * know whether fp here is the clone or the original.
1461 */
1462 fp->aux->prog = fp;
1463 bpf_prog_clone_free(fp_other);
1464 }
1465
bpf_jit_blind_constants(struct bpf_prog * prog)1466 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1467 {
1468 struct bpf_insn insn_buff[16], aux[2];
1469 struct bpf_prog *clone, *tmp;
1470 int insn_delta, insn_cnt;
1471 struct bpf_insn *insn;
1472 int i, rewritten;
1473
1474 if (!prog->blinding_requested || prog->blinded)
1475 return prog;
1476
1477 clone = bpf_prog_clone_create(prog, GFP_USER);
1478 if (!clone)
1479 return ERR_PTR(-ENOMEM);
1480
1481 insn_cnt = clone->len;
1482 insn = clone->insnsi;
1483
1484 for (i = 0; i < insn_cnt; i++, insn++) {
1485 if (bpf_pseudo_func(insn)) {
1486 /* ld_imm64 with an address of bpf subprog is not
1487 * a user controlled constant. Don't randomize it,
1488 * since it will conflict with jit_subprogs() logic.
1489 */
1490 insn++;
1491 i++;
1492 continue;
1493 }
1494
1495 /* We temporarily need to hold the original ld64 insn
1496 * so that we can still access the first part in the
1497 * second blinding run.
1498 */
1499 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1500 insn[1].code == 0)
1501 memcpy(aux, insn, sizeof(aux));
1502
1503 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1504 clone->aux->verifier_zext);
1505 if (!rewritten)
1506 continue;
1507
1508 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1509 if (IS_ERR(tmp)) {
1510 /* Patching may have repointed aux->prog during
1511 * realloc from the original one, so we need to
1512 * fix it up here on error.
1513 */
1514 bpf_jit_prog_release_other(prog, clone);
1515 return tmp;
1516 }
1517
1518 clone = tmp;
1519 insn_delta = rewritten - 1;
1520
1521 /* Walk new program and skip insns we just inserted. */
1522 insn = clone->insnsi + i + insn_delta;
1523 insn_cnt += insn_delta;
1524 i += insn_delta;
1525 }
1526
1527 clone->blinded = 1;
1528 return clone;
1529 }
1530 #endif /* CONFIG_BPF_JIT */
1531
1532 /* Base function for offset calculation. Needs to go into .text section,
1533 * therefore keeping it non-static as well; will also be used by JITs
1534 * anyway later on, so do not let the compiler omit it. This also needs
1535 * to go into kallsyms for correlation from e.g. bpftool, so naming
1536 * must not change.
1537 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1538 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1539 {
1540 return 0;
1541 }
1542 EXPORT_SYMBOL_GPL(__bpf_call_base);
1543
1544 /* All UAPI available opcodes. */
1545 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1546 /* 32 bit ALU operations. */ \
1547 /* Register based. */ \
1548 INSN_3(ALU, ADD, X), \
1549 INSN_3(ALU, SUB, X), \
1550 INSN_3(ALU, AND, X), \
1551 INSN_3(ALU, OR, X), \
1552 INSN_3(ALU, LSH, X), \
1553 INSN_3(ALU, RSH, X), \
1554 INSN_3(ALU, XOR, X), \
1555 INSN_3(ALU, MUL, X), \
1556 INSN_3(ALU, MOV, X), \
1557 INSN_3(ALU, ARSH, X), \
1558 INSN_3(ALU, DIV, X), \
1559 INSN_3(ALU, MOD, X), \
1560 INSN_2(ALU, NEG), \
1561 INSN_3(ALU, END, TO_BE), \
1562 INSN_3(ALU, END, TO_LE), \
1563 /* Immediate based. */ \
1564 INSN_3(ALU, ADD, K), \
1565 INSN_3(ALU, SUB, K), \
1566 INSN_3(ALU, AND, K), \
1567 INSN_3(ALU, OR, K), \
1568 INSN_3(ALU, LSH, K), \
1569 INSN_3(ALU, RSH, K), \
1570 INSN_3(ALU, XOR, K), \
1571 INSN_3(ALU, MUL, K), \
1572 INSN_3(ALU, MOV, K), \
1573 INSN_3(ALU, ARSH, K), \
1574 INSN_3(ALU, DIV, K), \
1575 INSN_3(ALU, MOD, K), \
1576 /* 64 bit ALU operations. */ \
1577 /* Register based. */ \
1578 INSN_3(ALU64, ADD, X), \
1579 INSN_3(ALU64, SUB, X), \
1580 INSN_3(ALU64, AND, X), \
1581 INSN_3(ALU64, OR, X), \
1582 INSN_3(ALU64, LSH, X), \
1583 INSN_3(ALU64, RSH, X), \
1584 INSN_3(ALU64, XOR, X), \
1585 INSN_3(ALU64, MUL, X), \
1586 INSN_3(ALU64, MOV, X), \
1587 INSN_3(ALU64, ARSH, X), \
1588 INSN_3(ALU64, DIV, X), \
1589 INSN_3(ALU64, MOD, X), \
1590 INSN_2(ALU64, NEG), \
1591 INSN_3(ALU64, END, TO_LE), \
1592 /* Immediate based. */ \
1593 INSN_3(ALU64, ADD, K), \
1594 INSN_3(ALU64, SUB, K), \
1595 INSN_3(ALU64, AND, K), \
1596 INSN_3(ALU64, OR, K), \
1597 INSN_3(ALU64, LSH, K), \
1598 INSN_3(ALU64, RSH, K), \
1599 INSN_3(ALU64, XOR, K), \
1600 INSN_3(ALU64, MUL, K), \
1601 INSN_3(ALU64, MOV, K), \
1602 INSN_3(ALU64, ARSH, K), \
1603 INSN_3(ALU64, DIV, K), \
1604 INSN_3(ALU64, MOD, K), \
1605 /* Call instruction. */ \
1606 INSN_2(JMP, CALL), \
1607 /* Exit instruction. */ \
1608 INSN_2(JMP, EXIT), \
1609 /* 32-bit Jump instructions. */ \
1610 /* Register based. */ \
1611 INSN_3(JMP32, JEQ, X), \
1612 INSN_3(JMP32, JNE, X), \
1613 INSN_3(JMP32, JGT, X), \
1614 INSN_3(JMP32, JLT, X), \
1615 INSN_3(JMP32, JGE, X), \
1616 INSN_3(JMP32, JLE, X), \
1617 INSN_3(JMP32, JSGT, X), \
1618 INSN_3(JMP32, JSLT, X), \
1619 INSN_3(JMP32, JSGE, X), \
1620 INSN_3(JMP32, JSLE, X), \
1621 INSN_3(JMP32, JSET, X), \
1622 /* Immediate based. */ \
1623 INSN_3(JMP32, JEQ, K), \
1624 INSN_3(JMP32, JNE, K), \
1625 INSN_3(JMP32, JGT, K), \
1626 INSN_3(JMP32, JLT, K), \
1627 INSN_3(JMP32, JGE, K), \
1628 INSN_3(JMP32, JLE, K), \
1629 INSN_3(JMP32, JSGT, K), \
1630 INSN_3(JMP32, JSLT, K), \
1631 INSN_3(JMP32, JSGE, K), \
1632 INSN_3(JMP32, JSLE, K), \
1633 INSN_3(JMP32, JSET, K), \
1634 /* Jump instructions. */ \
1635 /* Register based. */ \
1636 INSN_3(JMP, JEQ, X), \
1637 INSN_3(JMP, JNE, X), \
1638 INSN_3(JMP, JGT, X), \
1639 INSN_3(JMP, JLT, X), \
1640 INSN_3(JMP, JGE, X), \
1641 INSN_3(JMP, JLE, X), \
1642 INSN_3(JMP, JSGT, X), \
1643 INSN_3(JMP, JSLT, X), \
1644 INSN_3(JMP, JSGE, X), \
1645 INSN_3(JMP, JSLE, X), \
1646 INSN_3(JMP, JSET, X), \
1647 /* Immediate based. */ \
1648 INSN_3(JMP, JEQ, K), \
1649 INSN_3(JMP, JNE, K), \
1650 INSN_3(JMP, JGT, K), \
1651 INSN_3(JMP, JLT, K), \
1652 INSN_3(JMP, JGE, K), \
1653 INSN_3(JMP, JLE, K), \
1654 INSN_3(JMP, JSGT, K), \
1655 INSN_3(JMP, JSLT, K), \
1656 INSN_3(JMP, JSGE, K), \
1657 INSN_3(JMP, JSLE, K), \
1658 INSN_3(JMP, JSET, K), \
1659 INSN_2(JMP, JA), \
1660 INSN_2(JMP32, JA), \
1661 /* Store instructions. */ \
1662 /* Register based. */ \
1663 INSN_3(STX, MEM, B), \
1664 INSN_3(STX, MEM, H), \
1665 INSN_3(STX, MEM, W), \
1666 INSN_3(STX, MEM, DW), \
1667 INSN_3(STX, ATOMIC, W), \
1668 INSN_3(STX, ATOMIC, DW), \
1669 /* Immediate based. */ \
1670 INSN_3(ST, MEM, B), \
1671 INSN_3(ST, MEM, H), \
1672 INSN_3(ST, MEM, W), \
1673 INSN_3(ST, MEM, DW), \
1674 /* Load instructions. */ \
1675 /* Register based. */ \
1676 INSN_3(LDX, MEM, B), \
1677 INSN_3(LDX, MEM, H), \
1678 INSN_3(LDX, MEM, W), \
1679 INSN_3(LDX, MEM, DW), \
1680 INSN_3(LDX, MEMSX, B), \
1681 INSN_3(LDX, MEMSX, H), \
1682 INSN_3(LDX, MEMSX, W), \
1683 /* Immediate based. */ \
1684 INSN_3(LD, IMM, DW)
1685
bpf_opcode_in_insntable(u8 code)1686 bool bpf_opcode_in_insntable(u8 code)
1687 {
1688 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1689 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1690 static const bool public_insntable[256] = {
1691 [0 ... 255] = false,
1692 /* Now overwrite non-defaults ... */
1693 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1694 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1695 [BPF_LD | BPF_ABS | BPF_B] = true,
1696 [BPF_LD | BPF_ABS | BPF_H] = true,
1697 [BPF_LD | BPF_ABS | BPF_W] = true,
1698 [BPF_LD | BPF_IND | BPF_B] = true,
1699 [BPF_LD | BPF_IND | BPF_H] = true,
1700 [BPF_LD | BPF_IND | BPF_W] = true,
1701 [BPF_JMP | BPF_JCOND] = true,
1702 };
1703 #undef BPF_INSN_3_TBL
1704 #undef BPF_INSN_2_TBL
1705 return public_insntable[code];
1706 }
1707
1708 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1709 /**
1710 * ___bpf_prog_run - run eBPF program on a given context
1711 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1712 * @insn: is the array of eBPF instructions
1713 *
1714 * Decode and execute eBPF instructions.
1715 *
1716 * Return: whatever value is in %BPF_R0 at program exit
1717 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1718 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1719 {
1720 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1721 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1722 static const void * const jumptable[256] __annotate_jump_table = {
1723 [0 ... 255] = &&default_label,
1724 /* Now overwrite non-defaults ... */
1725 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1726 /* Non-UAPI available opcodes. */
1727 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1728 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1729 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1730 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1731 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1732 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1733 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1734 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1735 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1736 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1737 };
1738 #undef BPF_INSN_3_LBL
1739 #undef BPF_INSN_2_LBL
1740 u32 tail_call_cnt = 0;
1741
1742 #define CONT ({ insn++; goto select_insn; })
1743 #define CONT_JMP ({ insn++; goto select_insn; })
1744
1745 select_insn:
1746 goto *jumptable[insn->code];
1747
1748 /* Explicitly mask the register-based shift amounts with 63 or 31
1749 * to avoid undefined behavior. Normally this won't affect the
1750 * generated code, for example, in case of native 64 bit archs such
1751 * as x86-64 or arm64, the compiler is optimizing the AND away for
1752 * the interpreter. In case of JITs, each of the JIT backends compiles
1753 * the BPF shift operations to machine instructions which produce
1754 * implementation-defined results in such a case; the resulting
1755 * contents of the register may be arbitrary, but program behaviour
1756 * as a whole remains defined. In other words, in case of JIT backends,
1757 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1758 */
1759 /* ALU (shifts) */
1760 #define SHT(OPCODE, OP) \
1761 ALU64_##OPCODE##_X: \
1762 DST = DST OP (SRC & 63); \
1763 CONT; \
1764 ALU_##OPCODE##_X: \
1765 DST = (u32) DST OP ((u32) SRC & 31); \
1766 CONT; \
1767 ALU64_##OPCODE##_K: \
1768 DST = DST OP IMM; \
1769 CONT; \
1770 ALU_##OPCODE##_K: \
1771 DST = (u32) DST OP (u32) IMM; \
1772 CONT;
1773 /* ALU (rest) */
1774 #define ALU(OPCODE, OP) \
1775 ALU64_##OPCODE##_X: \
1776 DST = DST OP SRC; \
1777 CONT; \
1778 ALU_##OPCODE##_X: \
1779 DST = (u32) DST OP (u32) SRC; \
1780 CONT; \
1781 ALU64_##OPCODE##_K: \
1782 DST = DST OP IMM; \
1783 CONT; \
1784 ALU_##OPCODE##_K: \
1785 DST = (u32) DST OP (u32) IMM; \
1786 CONT;
1787 ALU(ADD, +)
1788 ALU(SUB, -)
1789 ALU(AND, &)
1790 ALU(OR, |)
1791 ALU(XOR, ^)
1792 ALU(MUL, *)
1793 SHT(LSH, <<)
1794 SHT(RSH, >>)
1795 #undef SHT
1796 #undef ALU
1797 ALU_NEG:
1798 DST = (u32) -DST;
1799 CONT;
1800 ALU64_NEG:
1801 DST = -DST;
1802 CONT;
1803 ALU_MOV_X:
1804 switch (OFF) {
1805 case 0:
1806 DST = (u32) SRC;
1807 break;
1808 case 8:
1809 DST = (u32)(s8) SRC;
1810 break;
1811 case 16:
1812 DST = (u32)(s16) SRC;
1813 break;
1814 }
1815 CONT;
1816 ALU_MOV_K:
1817 DST = (u32) IMM;
1818 CONT;
1819 ALU64_MOV_X:
1820 switch (OFF) {
1821 case 0:
1822 DST = SRC;
1823 break;
1824 case 8:
1825 DST = (s8) SRC;
1826 break;
1827 case 16:
1828 DST = (s16) SRC;
1829 break;
1830 case 32:
1831 DST = (s32) SRC;
1832 break;
1833 }
1834 CONT;
1835 ALU64_MOV_K:
1836 DST = IMM;
1837 CONT;
1838 LD_IMM_DW:
1839 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1840 insn++;
1841 CONT;
1842 ALU_ARSH_X:
1843 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1844 CONT;
1845 ALU_ARSH_K:
1846 DST = (u64) (u32) (((s32) DST) >> IMM);
1847 CONT;
1848 ALU64_ARSH_X:
1849 (*(s64 *) &DST) >>= (SRC & 63);
1850 CONT;
1851 ALU64_ARSH_K:
1852 (*(s64 *) &DST) >>= IMM;
1853 CONT;
1854 ALU64_MOD_X:
1855 switch (OFF) {
1856 case 0:
1857 div64_u64_rem(DST, SRC, &AX);
1858 DST = AX;
1859 break;
1860 case 1:
1861 AX = div64_s64(DST, SRC);
1862 DST = DST - AX * SRC;
1863 break;
1864 }
1865 CONT;
1866 ALU_MOD_X:
1867 switch (OFF) {
1868 case 0:
1869 AX = (u32) DST;
1870 DST = do_div(AX, (u32) SRC);
1871 break;
1872 case 1:
1873 AX = abs((s32)DST);
1874 AX = do_div(AX, abs((s32)SRC));
1875 if ((s32)DST < 0)
1876 DST = (u32)-AX;
1877 else
1878 DST = (u32)AX;
1879 break;
1880 }
1881 CONT;
1882 ALU64_MOD_K:
1883 switch (OFF) {
1884 case 0:
1885 div64_u64_rem(DST, IMM, &AX);
1886 DST = AX;
1887 break;
1888 case 1:
1889 AX = div64_s64(DST, IMM);
1890 DST = DST - AX * IMM;
1891 break;
1892 }
1893 CONT;
1894 ALU_MOD_K:
1895 switch (OFF) {
1896 case 0:
1897 AX = (u32) DST;
1898 DST = do_div(AX, (u32) IMM);
1899 break;
1900 case 1:
1901 AX = abs((s32)DST);
1902 AX = do_div(AX, abs((s32)IMM));
1903 if ((s32)DST < 0)
1904 DST = (u32)-AX;
1905 else
1906 DST = (u32)AX;
1907 break;
1908 }
1909 CONT;
1910 ALU64_DIV_X:
1911 switch (OFF) {
1912 case 0:
1913 DST = div64_u64(DST, SRC);
1914 break;
1915 case 1:
1916 DST = div64_s64(DST, SRC);
1917 break;
1918 }
1919 CONT;
1920 ALU_DIV_X:
1921 switch (OFF) {
1922 case 0:
1923 AX = (u32) DST;
1924 do_div(AX, (u32) SRC);
1925 DST = (u32) AX;
1926 break;
1927 case 1:
1928 AX = abs((s32)DST);
1929 do_div(AX, abs((s32)SRC));
1930 if (((s32)DST < 0) == ((s32)SRC < 0))
1931 DST = (u32)AX;
1932 else
1933 DST = (u32)-AX;
1934 break;
1935 }
1936 CONT;
1937 ALU64_DIV_K:
1938 switch (OFF) {
1939 case 0:
1940 DST = div64_u64(DST, IMM);
1941 break;
1942 case 1:
1943 DST = div64_s64(DST, IMM);
1944 break;
1945 }
1946 CONT;
1947 ALU_DIV_K:
1948 switch (OFF) {
1949 case 0:
1950 AX = (u32) DST;
1951 do_div(AX, (u32) IMM);
1952 DST = (u32) AX;
1953 break;
1954 case 1:
1955 AX = abs((s32)DST);
1956 do_div(AX, abs((s32)IMM));
1957 if (((s32)DST < 0) == ((s32)IMM < 0))
1958 DST = (u32)AX;
1959 else
1960 DST = (u32)-AX;
1961 break;
1962 }
1963 CONT;
1964 ALU_END_TO_BE:
1965 switch (IMM) {
1966 case 16:
1967 DST = (__force u16) cpu_to_be16(DST);
1968 break;
1969 case 32:
1970 DST = (__force u32) cpu_to_be32(DST);
1971 break;
1972 case 64:
1973 DST = (__force u64) cpu_to_be64(DST);
1974 break;
1975 }
1976 CONT;
1977 ALU_END_TO_LE:
1978 switch (IMM) {
1979 case 16:
1980 DST = (__force u16) cpu_to_le16(DST);
1981 break;
1982 case 32:
1983 DST = (__force u32) cpu_to_le32(DST);
1984 break;
1985 case 64:
1986 DST = (__force u64) cpu_to_le64(DST);
1987 break;
1988 }
1989 CONT;
1990 ALU64_END_TO_LE:
1991 switch (IMM) {
1992 case 16:
1993 DST = (__force u16) __swab16(DST);
1994 break;
1995 case 32:
1996 DST = (__force u32) __swab32(DST);
1997 break;
1998 case 64:
1999 DST = (__force u64) __swab64(DST);
2000 break;
2001 }
2002 CONT;
2003
2004 /* CALL */
2005 JMP_CALL:
2006 /* Function call scratches BPF_R1-BPF_R5 registers,
2007 * preserves BPF_R6-BPF_R9, and stores return value
2008 * into BPF_R0.
2009 */
2010 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2011 BPF_R4, BPF_R5);
2012 CONT;
2013
2014 JMP_CALL_ARGS:
2015 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2016 BPF_R3, BPF_R4,
2017 BPF_R5,
2018 insn + insn->off + 1);
2019 CONT;
2020
2021 JMP_TAIL_CALL: {
2022 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2023 struct bpf_array *array = container_of(map, struct bpf_array, map);
2024 struct bpf_prog *prog;
2025 u32 index = BPF_R3;
2026
2027 if (unlikely(index >= array->map.max_entries))
2028 goto out;
2029
2030 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2031 goto out;
2032
2033 tail_call_cnt++;
2034
2035 prog = READ_ONCE(array->ptrs[index]);
2036 if (!prog)
2037 goto out;
2038
2039 /* ARG1 at this point is guaranteed to point to CTX from
2040 * the verifier side due to the fact that the tail call is
2041 * handled like a helper, that is, bpf_tail_call_proto,
2042 * where arg1_type is ARG_PTR_TO_CTX.
2043 */
2044 insn = prog->insnsi;
2045 goto select_insn;
2046 out:
2047 CONT;
2048 }
2049 JMP_JA:
2050 insn += insn->off;
2051 CONT;
2052 JMP32_JA:
2053 insn += insn->imm;
2054 CONT;
2055 JMP_EXIT:
2056 return BPF_R0;
2057 /* JMP */
2058 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2059 JMP_##OPCODE##_X: \
2060 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2061 insn += insn->off; \
2062 CONT_JMP; \
2063 } \
2064 CONT; \
2065 JMP32_##OPCODE##_X: \
2066 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2067 insn += insn->off; \
2068 CONT_JMP; \
2069 } \
2070 CONT; \
2071 JMP_##OPCODE##_K: \
2072 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2073 insn += insn->off; \
2074 CONT_JMP; \
2075 } \
2076 CONT; \
2077 JMP32_##OPCODE##_K: \
2078 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2079 insn += insn->off; \
2080 CONT_JMP; \
2081 } \
2082 CONT;
2083 COND_JMP(u, JEQ, ==)
2084 COND_JMP(u, JNE, !=)
2085 COND_JMP(u, JGT, >)
2086 COND_JMP(u, JLT, <)
2087 COND_JMP(u, JGE, >=)
2088 COND_JMP(u, JLE, <=)
2089 COND_JMP(u, JSET, &)
2090 COND_JMP(s, JSGT, >)
2091 COND_JMP(s, JSLT, <)
2092 COND_JMP(s, JSGE, >=)
2093 COND_JMP(s, JSLE, <=)
2094 #undef COND_JMP
2095 /* ST, STX and LDX*/
2096 ST_NOSPEC:
2097 /* Speculation barrier for mitigating Speculative Store Bypass.
2098 * In case of arm64, we rely on the firmware mitigation as
2099 * controlled via the ssbd kernel parameter. Whenever the
2100 * mitigation is enabled, it works for all of the kernel code
2101 * with no need to provide any additional instructions here.
2102 * In case of x86, we use 'lfence' insn for mitigation. We
2103 * reuse preexisting logic from Spectre v1 mitigation that
2104 * happens to produce the required code on x86 for v4 as well.
2105 */
2106 barrier_nospec();
2107 CONT;
2108 #define LDST(SIZEOP, SIZE) \
2109 STX_MEM_##SIZEOP: \
2110 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2111 CONT; \
2112 ST_MEM_##SIZEOP: \
2113 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2114 CONT; \
2115 LDX_MEM_##SIZEOP: \
2116 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2117 CONT; \
2118 LDX_PROBE_MEM_##SIZEOP: \
2119 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2120 (const void *)(long) (SRC + insn->off)); \
2121 DST = *((SIZE *)&DST); \
2122 CONT;
2123
2124 LDST(B, u8)
2125 LDST(H, u16)
2126 LDST(W, u32)
2127 LDST(DW, u64)
2128 #undef LDST
2129
2130 #define LDSX(SIZEOP, SIZE) \
2131 LDX_MEMSX_##SIZEOP: \
2132 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2133 CONT; \
2134 LDX_PROBE_MEMSX_##SIZEOP: \
2135 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2136 (const void *)(long) (SRC + insn->off)); \
2137 DST = *((SIZE *)&DST); \
2138 CONT;
2139
2140 LDSX(B, s8)
2141 LDSX(H, s16)
2142 LDSX(W, s32)
2143 #undef LDSX
2144
2145 #define ATOMIC_ALU_OP(BOP, KOP) \
2146 case BOP: \
2147 if (BPF_SIZE(insn->code) == BPF_W) \
2148 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2149 (DST + insn->off)); \
2150 else \
2151 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2152 (DST + insn->off)); \
2153 break; \
2154 case BOP | BPF_FETCH: \
2155 if (BPF_SIZE(insn->code) == BPF_W) \
2156 SRC = (u32) atomic_fetch_##KOP( \
2157 (u32) SRC, \
2158 (atomic_t *)(unsigned long) (DST + insn->off)); \
2159 else \
2160 SRC = (u64) atomic64_fetch_##KOP( \
2161 (u64) SRC, \
2162 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2163 break;
2164
2165 STX_ATOMIC_DW:
2166 STX_ATOMIC_W:
2167 switch (IMM) {
2168 ATOMIC_ALU_OP(BPF_ADD, add)
2169 ATOMIC_ALU_OP(BPF_AND, and)
2170 ATOMIC_ALU_OP(BPF_OR, or)
2171 ATOMIC_ALU_OP(BPF_XOR, xor)
2172 #undef ATOMIC_ALU_OP
2173
2174 case BPF_XCHG:
2175 if (BPF_SIZE(insn->code) == BPF_W)
2176 SRC = (u32) atomic_xchg(
2177 (atomic_t *)(unsigned long) (DST + insn->off),
2178 (u32) SRC);
2179 else
2180 SRC = (u64) atomic64_xchg(
2181 (atomic64_t *)(unsigned long) (DST + insn->off),
2182 (u64) SRC);
2183 break;
2184 case BPF_CMPXCHG:
2185 if (BPF_SIZE(insn->code) == BPF_W)
2186 BPF_R0 = (u32) atomic_cmpxchg(
2187 (atomic_t *)(unsigned long) (DST + insn->off),
2188 (u32) BPF_R0, (u32) SRC);
2189 else
2190 BPF_R0 = (u64) atomic64_cmpxchg(
2191 (atomic64_t *)(unsigned long) (DST + insn->off),
2192 (u64) BPF_R0, (u64) SRC);
2193 break;
2194
2195 default:
2196 goto default_label;
2197 }
2198 CONT;
2199
2200 default_label:
2201 /* If we ever reach this, we have a bug somewhere. Die hard here
2202 * instead of just returning 0; we could be somewhere in a subprog,
2203 * so execution could continue otherwise which we do /not/ want.
2204 *
2205 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2206 */
2207 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2208 insn->code, insn->imm);
2209 BUG_ON(1);
2210 return 0;
2211 }
2212
2213 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2214 #define DEFINE_BPF_PROG_RUN(stack_size) \
2215 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2216 { \
2217 u64 stack[stack_size / sizeof(u64)]; \
2218 u64 regs[MAX_BPF_EXT_REG] = {}; \
2219 \
2220 kmsan_unpoison_memory(stack, sizeof(stack)); \
2221 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2222 ARG1 = (u64) (unsigned long) ctx; \
2223 return ___bpf_prog_run(regs, insn); \
2224 }
2225
2226 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2227 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2228 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2229 const struct bpf_insn *insn) \
2230 { \
2231 u64 stack[stack_size / sizeof(u64)]; \
2232 u64 regs[MAX_BPF_EXT_REG]; \
2233 \
2234 kmsan_unpoison_memory(stack, sizeof(stack)); \
2235 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2236 BPF_R1 = r1; \
2237 BPF_R2 = r2; \
2238 BPF_R3 = r3; \
2239 BPF_R4 = r4; \
2240 BPF_R5 = r5; \
2241 return ___bpf_prog_run(regs, insn); \
2242 }
2243
2244 #define EVAL1(FN, X) FN(X)
2245 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2246 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2247 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2248 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2249 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2250
2251 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2252 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2253 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2254
2255 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2256 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2257 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2258
2259 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2260
2261 static unsigned int (*interpreters[])(const void *ctx,
2262 const struct bpf_insn *insn) = {
2263 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2264 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2265 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2266 };
2267 #undef PROG_NAME_LIST
2268 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2269 static __maybe_unused
2270 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2271 const struct bpf_insn *insn) = {
2272 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2273 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2274 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2275 };
2276 #undef PROG_NAME_LIST
2277
2278 #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2279 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2280 {
2281 stack_depth = max_t(u32, stack_depth, 1);
2282 insn->off = (s16) insn->imm;
2283 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2284 __bpf_call_base_args;
2285 insn->code = BPF_JMP | BPF_CALL_ARGS;
2286 }
2287 #endif
2288 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2289 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2290 const struct bpf_insn *insn)
2291 {
2292 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2293 * is not working properly, so warn about it!
2294 */
2295 WARN_ON_ONCE(1);
2296 return 0;
2297 }
2298 #endif
2299
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2300 bool bpf_prog_map_compatible(struct bpf_map *map,
2301 const struct bpf_prog *fp)
2302 {
2303 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2304 bool ret;
2305 struct bpf_prog_aux *aux = fp->aux;
2306
2307 if (fp->kprobe_override)
2308 return false;
2309
2310 /* XDP programs inserted into maps are not guaranteed to run on
2311 * a particular netdev (and can run outside driver context entirely
2312 * in the case of devmap and cpumap). Until device checks
2313 * are implemented, prohibit adding dev-bound programs to program maps.
2314 */
2315 if (bpf_prog_is_dev_bound(aux))
2316 return false;
2317
2318 spin_lock(&map->owner.lock);
2319 if (!map->owner.type) {
2320 /* There's no owner yet where we could check for
2321 * compatibility.
2322 */
2323 map->owner.type = prog_type;
2324 map->owner.jited = fp->jited;
2325 map->owner.xdp_has_frags = aux->xdp_has_frags;
2326 map->owner.attach_func_proto = aux->attach_func_proto;
2327 ret = true;
2328 } else {
2329 ret = map->owner.type == prog_type &&
2330 map->owner.jited == fp->jited &&
2331 map->owner.xdp_has_frags == aux->xdp_has_frags;
2332 if (ret &&
2333 map->owner.attach_func_proto != aux->attach_func_proto) {
2334 switch (prog_type) {
2335 case BPF_PROG_TYPE_TRACING:
2336 case BPF_PROG_TYPE_LSM:
2337 case BPF_PROG_TYPE_EXT:
2338 case BPF_PROG_TYPE_STRUCT_OPS:
2339 ret = false;
2340 break;
2341 default:
2342 break;
2343 }
2344 }
2345 }
2346 spin_unlock(&map->owner.lock);
2347
2348 return ret;
2349 }
2350
bpf_check_tail_call(const struct bpf_prog * fp)2351 static int bpf_check_tail_call(const struct bpf_prog *fp)
2352 {
2353 struct bpf_prog_aux *aux = fp->aux;
2354 int i, ret = 0;
2355
2356 mutex_lock(&aux->used_maps_mutex);
2357 for (i = 0; i < aux->used_map_cnt; i++) {
2358 struct bpf_map *map = aux->used_maps[i];
2359
2360 if (!map_type_contains_progs(map))
2361 continue;
2362
2363 if (!bpf_prog_map_compatible(map, fp)) {
2364 ret = -EINVAL;
2365 goto out;
2366 }
2367 }
2368
2369 out:
2370 mutex_unlock(&aux->used_maps_mutex);
2371 return ret;
2372 }
2373
bpf_prog_select_func(struct bpf_prog * fp)2374 static void bpf_prog_select_func(struct bpf_prog *fp)
2375 {
2376 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2377 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2378
2379 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2380 #else
2381 fp->bpf_func = __bpf_prog_ret0_warn;
2382 #endif
2383 }
2384
2385 /**
2386 * bpf_prog_select_runtime - select exec runtime for BPF program
2387 * @fp: bpf_prog populated with BPF program
2388 * @err: pointer to error variable
2389 *
2390 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2391 * The BPF program will be executed via bpf_prog_run() function.
2392 *
2393 * Return: the &fp argument along with &err set to 0 for success or
2394 * a negative errno code on failure
2395 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2396 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2397 {
2398 /* In case of BPF to BPF calls, verifier did all the prep
2399 * work with regards to JITing, etc.
2400 */
2401 bool jit_needed = false;
2402
2403 if (fp->bpf_func)
2404 goto finalize;
2405
2406 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2407 bpf_prog_has_kfunc_call(fp))
2408 jit_needed = true;
2409
2410 bpf_prog_select_func(fp);
2411
2412 /* eBPF JITs can rewrite the program in case constant
2413 * blinding is active. However, in case of error during
2414 * blinding, bpf_int_jit_compile() must always return a
2415 * valid program, which in this case would simply not
2416 * be JITed, but falls back to the interpreter.
2417 */
2418 if (!bpf_prog_is_offloaded(fp->aux)) {
2419 *err = bpf_prog_alloc_jited_linfo(fp);
2420 if (*err)
2421 return fp;
2422
2423 fp = bpf_int_jit_compile(fp);
2424 bpf_prog_jit_attempt_done(fp);
2425 if (!fp->jited && jit_needed) {
2426 *err = -ENOTSUPP;
2427 return fp;
2428 }
2429 } else {
2430 *err = bpf_prog_offload_compile(fp);
2431 if (*err)
2432 return fp;
2433 }
2434
2435 finalize:
2436 *err = bpf_prog_lock_ro(fp);
2437 if (*err)
2438 return fp;
2439
2440 /* The tail call compatibility check can only be done at
2441 * this late stage as we need to determine, if we deal
2442 * with JITed or non JITed program concatenations and not
2443 * all eBPF JITs might immediately support all features.
2444 */
2445 *err = bpf_check_tail_call(fp);
2446
2447 return fp;
2448 }
2449 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2450
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2451 static unsigned int __bpf_prog_ret1(const void *ctx,
2452 const struct bpf_insn *insn)
2453 {
2454 return 1;
2455 }
2456
2457 static struct bpf_prog_dummy {
2458 struct bpf_prog prog;
2459 } dummy_bpf_prog = {
2460 .prog = {
2461 .bpf_func = __bpf_prog_ret1,
2462 },
2463 };
2464
2465 struct bpf_empty_prog_array bpf_empty_prog_array = {
2466 .null_prog = NULL,
2467 };
2468 EXPORT_SYMBOL(bpf_empty_prog_array);
2469
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2470 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2471 {
2472 struct bpf_prog_array *p;
2473
2474 if (prog_cnt)
2475 p = kzalloc(struct_size(p, items, prog_cnt + 1), flags);
2476 else
2477 p = &bpf_empty_prog_array.hdr;
2478
2479 return p;
2480 }
2481
bpf_prog_array_free(struct bpf_prog_array * progs)2482 void bpf_prog_array_free(struct bpf_prog_array *progs)
2483 {
2484 if (!progs || progs == &bpf_empty_prog_array.hdr)
2485 return;
2486 kfree_rcu(progs, rcu);
2487 }
2488
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2489 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2490 {
2491 struct bpf_prog_array *progs;
2492
2493 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2494 * no need to call kfree_rcu(), just call kfree() directly.
2495 */
2496 progs = container_of(rcu, struct bpf_prog_array, rcu);
2497 if (rcu_trace_implies_rcu_gp())
2498 kfree(progs);
2499 else
2500 kfree_rcu(progs, rcu);
2501 }
2502
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2503 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2504 {
2505 if (!progs || progs == &bpf_empty_prog_array.hdr)
2506 return;
2507 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2508 }
2509
bpf_prog_array_length(struct bpf_prog_array * array)2510 int bpf_prog_array_length(struct bpf_prog_array *array)
2511 {
2512 struct bpf_prog_array_item *item;
2513 u32 cnt = 0;
2514
2515 for (item = array->items; item->prog; item++)
2516 if (item->prog != &dummy_bpf_prog.prog)
2517 cnt++;
2518 return cnt;
2519 }
2520
bpf_prog_array_is_empty(struct bpf_prog_array * array)2521 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2522 {
2523 struct bpf_prog_array_item *item;
2524
2525 for (item = array->items; item->prog; item++)
2526 if (item->prog != &dummy_bpf_prog.prog)
2527 return false;
2528 return true;
2529 }
2530
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2531 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2532 u32 *prog_ids,
2533 u32 request_cnt)
2534 {
2535 struct bpf_prog_array_item *item;
2536 int i = 0;
2537
2538 for (item = array->items; item->prog; item++) {
2539 if (item->prog == &dummy_bpf_prog.prog)
2540 continue;
2541 prog_ids[i] = item->prog->aux->id;
2542 if (++i == request_cnt) {
2543 item++;
2544 break;
2545 }
2546 }
2547
2548 return !!(item->prog);
2549 }
2550
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2551 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2552 __u32 __user *prog_ids, u32 cnt)
2553 {
2554 unsigned long err = 0;
2555 bool nospc;
2556 u32 *ids;
2557
2558 /* users of this function are doing:
2559 * cnt = bpf_prog_array_length();
2560 * if (cnt > 0)
2561 * bpf_prog_array_copy_to_user(..., cnt);
2562 * so below kcalloc doesn't need extra cnt > 0 check.
2563 */
2564 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2565 if (!ids)
2566 return -ENOMEM;
2567 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2568 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2569 kfree(ids);
2570 if (err)
2571 return -EFAULT;
2572 if (nospc)
2573 return -ENOSPC;
2574 return 0;
2575 }
2576
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2577 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2578 struct bpf_prog *old_prog)
2579 {
2580 struct bpf_prog_array_item *item;
2581
2582 for (item = array->items; item->prog; item++)
2583 if (item->prog == old_prog) {
2584 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2585 break;
2586 }
2587 }
2588
2589 /**
2590 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2591 * index into the program array with
2592 * a dummy no-op program.
2593 * @array: a bpf_prog_array
2594 * @index: the index of the program to replace
2595 *
2596 * Skips over dummy programs, by not counting them, when calculating
2597 * the position of the program to replace.
2598 *
2599 * Return:
2600 * * 0 - Success
2601 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2602 * * -ENOENT - Index out of range
2603 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2604 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2605 {
2606 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2607 }
2608
2609 /**
2610 * bpf_prog_array_update_at() - Updates the program at the given index
2611 * into the program array.
2612 * @array: a bpf_prog_array
2613 * @index: the index of the program to update
2614 * @prog: the program to insert into the array
2615 *
2616 * Skips over dummy programs, by not counting them, when calculating
2617 * the position of the program to update.
2618 *
2619 * Return:
2620 * * 0 - Success
2621 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2622 * * -ENOENT - Index out of range
2623 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2624 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2625 struct bpf_prog *prog)
2626 {
2627 struct bpf_prog_array_item *item;
2628
2629 if (unlikely(index < 0))
2630 return -EINVAL;
2631
2632 for (item = array->items; item->prog; item++) {
2633 if (item->prog == &dummy_bpf_prog.prog)
2634 continue;
2635 if (!index) {
2636 WRITE_ONCE(item->prog, prog);
2637 return 0;
2638 }
2639 index--;
2640 }
2641 return -ENOENT;
2642 }
2643
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2644 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2645 struct bpf_prog *exclude_prog,
2646 struct bpf_prog *include_prog,
2647 u64 bpf_cookie,
2648 struct bpf_prog_array **new_array)
2649 {
2650 int new_prog_cnt, carry_prog_cnt = 0;
2651 struct bpf_prog_array_item *existing, *new;
2652 struct bpf_prog_array *array;
2653 bool found_exclude = false;
2654
2655 /* Figure out how many existing progs we need to carry over to
2656 * the new array.
2657 */
2658 if (old_array) {
2659 existing = old_array->items;
2660 for (; existing->prog; existing++) {
2661 if (existing->prog == exclude_prog) {
2662 found_exclude = true;
2663 continue;
2664 }
2665 if (existing->prog != &dummy_bpf_prog.prog)
2666 carry_prog_cnt++;
2667 if (existing->prog == include_prog)
2668 return -EEXIST;
2669 }
2670 }
2671
2672 if (exclude_prog && !found_exclude)
2673 return -ENOENT;
2674
2675 /* How many progs (not NULL) will be in the new array? */
2676 new_prog_cnt = carry_prog_cnt;
2677 if (include_prog)
2678 new_prog_cnt += 1;
2679
2680 /* Do we have any prog (not NULL) in the new array? */
2681 if (!new_prog_cnt) {
2682 *new_array = NULL;
2683 return 0;
2684 }
2685
2686 /* +1 as the end of prog_array is marked with NULL */
2687 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2688 if (!array)
2689 return -ENOMEM;
2690 new = array->items;
2691
2692 /* Fill in the new prog array */
2693 if (carry_prog_cnt) {
2694 existing = old_array->items;
2695 for (; existing->prog; existing++) {
2696 if (existing->prog == exclude_prog ||
2697 existing->prog == &dummy_bpf_prog.prog)
2698 continue;
2699
2700 new->prog = existing->prog;
2701 new->bpf_cookie = existing->bpf_cookie;
2702 new++;
2703 }
2704 }
2705 if (include_prog) {
2706 new->prog = include_prog;
2707 new->bpf_cookie = bpf_cookie;
2708 new++;
2709 }
2710 new->prog = NULL;
2711 *new_array = array;
2712 return 0;
2713 }
2714
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2715 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2716 u32 *prog_ids, u32 request_cnt,
2717 u32 *prog_cnt)
2718 {
2719 u32 cnt = 0;
2720
2721 if (array)
2722 cnt = bpf_prog_array_length(array);
2723
2724 *prog_cnt = cnt;
2725
2726 /* return early if user requested only program count or nothing to copy */
2727 if (!request_cnt || !cnt)
2728 return 0;
2729
2730 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2731 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2732 : 0;
2733 }
2734
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2735 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2736 struct bpf_map **used_maps, u32 len)
2737 {
2738 struct bpf_map *map;
2739 bool sleepable;
2740 u32 i;
2741
2742 sleepable = aux->prog->sleepable;
2743 for (i = 0; i < len; i++) {
2744 map = used_maps[i];
2745 if (map->ops->map_poke_untrack)
2746 map->ops->map_poke_untrack(map, aux);
2747 if (sleepable)
2748 atomic64_dec(&map->sleepable_refcnt);
2749 bpf_map_put(map);
2750 }
2751 }
2752
bpf_free_used_maps(struct bpf_prog_aux * aux)2753 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2754 {
2755 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2756 kfree(aux->used_maps);
2757 }
2758
__bpf_free_used_btfs(struct btf_mod_pair * used_btfs,u32 len)2759 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2760 {
2761 #ifdef CONFIG_BPF_SYSCALL
2762 struct btf_mod_pair *btf_mod;
2763 u32 i;
2764
2765 for (i = 0; i < len; i++) {
2766 btf_mod = &used_btfs[i];
2767 if (btf_mod->module)
2768 module_put(btf_mod->module);
2769 btf_put(btf_mod->btf);
2770 }
2771 #endif
2772 }
2773
bpf_free_used_btfs(struct bpf_prog_aux * aux)2774 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2775 {
2776 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2777 kfree(aux->used_btfs);
2778 }
2779
bpf_prog_free_deferred(struct work_struct * work)2780 static void bpf_prog_free_deferred(struct work_struct *work)
2781 {
2782 struct bpf_prog_aux *aux;
2783 int i;
2784
2785 aux = container_of(work, struct bpf_prog_aux, work);
2786 #ifdef CONFIG_BPF_SYSCALL
2787 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2788 #endif
2789 #ifdef CONFIG_CGROUP_BPF
2790 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2791 bpf_cgroup_atype_put(aux->cgroup_atype);
2792 #endif
2793 bpf_free_used_maps(aux);
2794 bpf_free_used_btfs(aux);
2795 if (bpf_prog_is_dev_bound(aux))
2796 bpf_prog_dev_bound_destroy(aux->prog);
2797 #ifdef CONFIG_PERF_EVENTS
2798 if (aux->prog->has_callchain_buf)
2799 put_callchain_buffers();
2800 #endif
2801 if (aux->dst_trampoline)
2802 bpf_trampoline_put(aux->dst_trampoline);
2803 for (i = 0; i < aux->real_func_cnt; i++) {
2804 /* We can just unlink the subprog poke descriptor table as
2805 * it was originally linked to the main program and is also
2806 * released along with it.
2807 */
2808 aux->func[i]->aux->poke_tab = NULL;
2809 bpf_jit_free(aux->func[i]);
2810 }
2811 if (aux->real_func_cnt) {
2812 kfree(aux->func);
2813 bpf_prog_unlock_free(aux->prog);
2814 } else {
2815 bpf_jit_free(aux->prog);
2816 }
2817 }
2818
bpf_prog_free(struct bpf_prog * fp)2819 void bpf_prog_free(struct bpf_prog *fp)
2820 {
2821 struct bpf_prog_aux *aux = fp->aux;
2822
2823 if (aux->dst_prog)
2824 bpf_prog_put(aux->dst_prog);
2825 bpf_token_put(aux->token);
2826 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2827 schedule_work(&aux->work);
2828 }
2829 EXPORT_SYMBOL_GPL(bpf_prog_free);
2830
2831 /* RNG for unprivileged user space with separated state from prandom_u32(). */
2832 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2833
bpf_user_rnd_init_once(void)2834 void bpf_user_rnd_init_once(void)
2835 {
2836 prandom_init_once(&bpf_user_rnd_state);
2837 }
2838
BPF_CALL_0(bpf_user_rnd_u32)2839 BPF_CALL_0(bpf_user_rnd_u32)
2840 {
2841 /* Should someone ever have the rather unwise idea to use some
2842 * of the registers passed into this function, then note that
2843 * this function is called from native eBPF and classic-to-eBPF
2844 * transformations. Register assignments from both sides are
2845 * different, f.e. classic always sets fn(ctx, A, X) here.
2846 */
2847 struct rnd_state *state;
2848 u32 res;
2849
2850 state = &get_cpu_var(bpf_user_rnd_state);
2851 res = prandom_u32_state(state);
2852 put_cpu_var(bpf_user_rnd_state);
2853
2854 return res;
2855 }
2856
BPF_CALL_0(bpf_get_raw_cpu_id)2857 BPF_CALL_0(bpf_get_raw_cpu_id)
2858 {
2859 return raw_smp_processor_id();
2860 }
2861
2862 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2863 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2864 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2865 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2866 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2867 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2868 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2869 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2870 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2871 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2872 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2873
2874 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2875 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2876 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2877 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2878 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2879 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2880 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2881
2882 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2883 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2884 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2885 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2886 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2887 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2888 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2889 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2890 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2891 const struct bpf_func_proto bpf_set_retval_proto __weak;
2892 const struct bpf_func_proto bpf_get_retval_proto __weak;
2893
bpf_get_trace_printk_proto(void)2894 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2895 {
2896 return NULL;
2897 }
2898
bpf_get_trace_vprintk_proto(void)2899 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2900 {
2901 return NULL;
2902 }
2903
2904 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2905 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2906 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2907 {
2908 return -ENOTSUPP;
2909 }
2910 EXPORT_SYMBOL_GPL(bpf_event_output);
2911
2912 /* Always built-in helper functions. */
2913 const struct bpf_func_proto bpf_tail_call_proto = {
2914 .func = NULL,
2915 .gpl_only = false,
2916 .ret_type = RET_VOID,
2917 .arg1_type = ARG_PTR_TO_CTX,
2918 .arg2_type = ARG_CONST_MAP_PTR,
2919 .arg3_type = ARG_ANYTHING,
2920 };
2921
2922 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2923 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2924 * eBPF and implicitly also cBPF can get JITed!
2925 */
bpf_int_jit_compile(struct bpf_prog * prog)2926 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2927 {
2928 return prog;
2929 }
2930
2931 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2932 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2933 */
bpf_jit_compile(struct bpf_prog * prog)2934 void __weak bpf_jit_compile(struct bpf_prog *prog)
2935 {
2936 }
2937
bpf_helper_changes_pkt_data(void * func)2938 bool __weak bpf_helper_changes_pkt_data(void *func)
2939 {
2940 return false;
2941 }
2942
2943 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2944 * analysis code and wants explicit zero extension inserted by verifier.
2945 * Otherwise, return FALSE.
2946 *
2947 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2948 * you don't override this. JITs that don't want these extra insns can detect
2949 * them using insn_is_zext.
2950 */
bpf_jit_needs_zext(void)2951 bool __weak bpf_jit_needs_zext(void)
2952 {
2953 return false;
2954 }
2955
2956 /* Return true if the JIT inlines the call to the helper corresponding to
2957 * the imm.
2958 *
2959 * The verifier will not patch the insn->imm for the call to the helper if
2960 * this returns true.
2961 */
bpf_jit_inlines_helper_call(s32 imm)2962 bool __weak bpf_jit_inlines_helper_call(s32 imm)
2963 {
2964 return false;
2965 }
2966
2967 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)2968 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2969 {
2970 return false;
2971 }
2972
bpf_jit_supports_percpu_insn(void)2973 bool __weak bpf_jit_supports_percpu_insn(void)
2974 {
2975 return false;
2976 }
2977
bpf_jit_supports_kfunc_call(void)2978 bool __weak bpf_jit_supports_kfunc_call(void)
2979 {
2980 return false;
2981 }
2982
bpf_jit_supports_far_kfunc_call(void)2983 bool __weak bpf_jit_supports_far_kfunc_call(void)
2984 {
2985 return false;
2986 }
2987
bpf_jit_supports_arena(void)2988 bool __weak bpf_jit_supports_arena(void)
2989 {
2990 return false;
2991 }
2992
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)2993 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
2994 {
2995 return false;
2996 }
2997
bpf_arch_uaddress_limit(void)2998 u64 __weak bpf_arch_uaddress_limit(void)
2999 {
3000 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3001 return TASK_SIZE;
3002 #else
3003 return 0;
3004 #endif
3005 }
3006
3007 /* Return TRUE if the JIT backend satisfies the following two conditions:
3008 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3009 * 2) Under the specific arch, the implementation of xchg() is the same
3010 * as atomic_xchg() on pointer-sized words.
3011 */
bpf_jit_supports_ptr_xchg(void)3012 bool __weak bpf_jit_supports_ptr_xchg(void)
3013 {
3014 return false;
3015 }
3016
3017 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3018 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3019 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)3020 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3021 int len)
3022 {
3023 return -EFAULT;
3024 }
3025
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)3026 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3027 void *addr1, void *addr2)
3028 {
3029 return -ENOTSUPP;
3030 }
3031
bpf_arch_text_copy(void * dst,void * src,size_t len)3032 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3033 {
3034 return ERR_PTR(-ENOTSUPP);
3035 }
3036
bpf_arch_text_invalidate(void * dst,size_t len)3037 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3038 {
3039 return -ENOTSUPP;
3040 }
3041
bpf_jit_supports_exceptions(void)3042 bool __weak bpf_jit_supports_exceptions(void)
3043 {
3044 return false;
3045 }
3046
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3047 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3048 {
3049 }
3050
3051 /* for configs without MMU or 32-bit */
3052 __weak const struct bpf_map_ops arena_map_ops;
bpf_arena_get_user_vm_start(struct bpf_arena * arena)3053 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3054 {
3055 return 0;
3056 }
bpf_arena_get_kern_vm_start(struct bpf_arena * arena)3057 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3058 {
3059 return 0;
3060 }
3061
3062 #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)3063 static int __init bpf_global_ma_init(void)
3064 {
3065 int ret;
3066
3067 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3068 bpf_global_ma_set = !ret;
3069 return ret;
3070 }
3071 late_initcall(bpf_global_ma_init);
3072 #endif
3073
3074 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3075 EXPORT_SYMBOL(bpf_stats_enabled_key);
3076
3077 /* All definitions of tracepoints related to BPF. */
3078 #define CREATE_TRACE_POINTS
3079 #include <linux/bpf_trace.h>
3080
3081 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3082 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3083