1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <asm/unaligned.h>
36
37 /* Registers */
38 #define BPF_R0 regs[BPF_REG_0]
39 #define BPF_R1 regs[BPF_REG_1]
40 #define BPF_R2 regs[BPF_REG_2]
41 #define BPF_R3 regs[BPF_REG_3]
42 #define BPF_R4 regs[BPF_REG_4]
43 #define BPF_R5 regs[BPF_REG_5]
44 #define BPF_R6 regs[BPF_REG_6]
45 #define BPF_R7 regs[BPF_REG_7]
46 #define BPF_R8 regs[BPF_REG_8]
47 #define BPF_R9 regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
49
50 /* Named registers */
51 #define DST regs[insn->dst_reg]
52 #define SRC regs[insn->src_reg]
53 #define FP regs[BPF_REG_FP]
54 #define AX regs[BPF_REG_AX]
55 #define ARG1 regs[BPF_REG_ARG1]
56 #define CTX regs[BPF_REG_CTX]
57 #define IMM insn->imm
58
59 /* No hurry in this branch
60 *
61 * Exported for the bpf jit load helper.
62 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64 {
65 u8 *ptr = NULL;
66
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
71
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
74
75 return NULL;
76 }
77
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
79 {
80 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
81 struct bpf_prog_aux *aux;
82 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags);
86 if (fp == NULL)
87 return NULL;
88
89 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
90 if (aux == NULL) {
91 vfree(fp);
92 return NULL;
93 }
94 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
95 if (!fp->active) {
96 vfree(fp);
97 kfree(aux);
98 return NULL;
99 }
100
101 fp->pages = size / PAGE_SIZE;
102 fp->aux = aux;
103 fp->aux->prog = fp;
104 fp->jit_requested = ebpf_jit_enabled();
105
106 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
107 mutex_init(&fp->aux->used_maps_mutex);
108 mutex_init(&fp->aux->dst_mutex);
109
110 return fp;
111 }
112
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)113 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
114 {
115 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
116 struct bpf_prog *prog;
117 int cpu;
118
119 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
120 if (!prog)
121 return NULL;
122
123 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
124 if (!prog->stats) {
125 free_percpu(prog->active);
126 kfree(prog->aux);
127 vfree(prog);
128 return NULL;
129 }
130
131 for_each_possible_cpu(cpu) {
132 struct bpf_prog_stats *pstats;
133
134 pstats = per_cpu_ptr(prog->stats, cpu);
135 u64_stats_init(&pstats->syncp);
136 }
137 return prog;
138 }
139 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
140
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)141 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
142 {
143 if (!prog->aux->nr_linfo || !prog->jit_requested)
144 return 0;
145
146 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
147 sizeof(*prog->aux->jited_linfo),
148 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
149 if (!prog->aux->jited_linfo)
150 return -ENOMEM;
151
152 return 0;
153 }
154
bpf_prog_jit_attempt_done(struct bpf_prog * prog)155 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
156 {
157 if (prog->aux->jited_linfo &&
158 (!prog->jited || !prog->aux->jited_linfo[0])) {
159 kvfree(prog->aux->jited_linfo);
160 prog->aux->jited_linfo = NULL;
161 }
162
163 kfree(prog->aux->kfunc_tab);
164 prog->aux->kfunc_tab = NULL;
165 }
166
167 /* The jit engine is responsible to provide an array
168 * for insn_off to the jited_off mapping (insn_to_jit_off).
169 *
170 * The idx to this array is the insn_off. Hence, the insn_off
171 * here is relative to the prog itself instead of the main prog.
172 * This array has one entry for each xlated bpf insn.
173 *
174 * jited_off is the byte off to the last byte of the jited insn.
175 *
176 * Hence, with
177 * insn_start:
178 * The first bpf insn off of the prog. The insn off
179 * here is relative to the main prog.
180 * e.g. if prog is a subprog, insn_start > 0
181 * linfo_idx:
182 * The prog's idx to prog->aux->linfo and jited_linfo
183 *
184 * jited_linfo[linfo_idx] = prog->bpf_func
185 *
186 * For i > linfo_idx,
187 *
188 * jited_linfo[i] = prog->bpf_func +
189 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
190 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)191 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
192 const u32 *insn_to_jit_off)
193 {
194 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
195 const struct bpf_line_info *linfo;
196 void **jited_linfo;
197
198 if (!prog->aux->jited_linfo)
199 /* Userspace did not provide linfo */
200 return;
201
202 linfo_idx = prog->aux->linfo_idx;
203 linfo = &prog->aux->linfo[linfo_idx];
204 insn_start = linfo[0].insn_off;
205 insn_end = insn_start + prog->len;
206
207 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
208 jited_linfo[0] = prog->bpf_func;
209
210 nr_linfo = prog->aux->nr_linfo - linfo_idx;
211
212 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
213 /* The verifier ensures that linfo[i].insn_off is
214 * strictly increasing
215 */
216 jited_linfo[i] = prog->bpf_func +
217 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
218 }
219
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)220 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
221 gfp_t gfp_extra_flags)
222 {
223 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
224 struct bpf_prog *fp;
225 u32 pages;
226
227 size = round_up(size, PAGE_SIZE);
228 pages = size / PAGE_SIZE;
229 if (pages <= fp_old->pages)
230 return fp_old;
231
232 fp = __vmalloc(size, gfp_flags);
233 if (fp) {
234 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
235 fp->pages = pages;
236 fp->aux->prog = fp;
237
238 /* We keep fp->aux from fp_old around in the new
239 * reallocated structure.
240 */
241 fp_old->aux = NULL;
242 fp_old->stats = NULL;
243 fp_old->active = NULL;
244 __bpf_prog_free(fp_old);
245 }
246
247 return fp;
248 }
249
__bpf_prog_free(struct bpf_prog * fp)250 void __bpf_prog_free(struct bpf_prog *fp)
251 {
252 if (fp->aux) {
253 mutex_destroy(&fp->aux->used_maps_mutex);
254 mutex_destroy(&fp->aux->dst_mutex);
255 kfree(fp->aux->poke_tab);
256 kfree(fp->aux);
257 }
258 free_percpu(fp->stats);
259 free_percpu(fp->active);
260 vfree(fp);
261 }
262
bpf_prog_calc_tag(struct bpf_prog * fp)263 int bpf_prog_calc_tag(struct bpf_prog *fp)
264 {
265 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
266 u32 raw_size = bpf_prog_tag_scratch_size(fp);
267 u32 digest[SHA1_DIGEST_WORDS];
268 u32 ws[SHA1_WORKSPACE_WORDS];
269 u32 i, bsize, psize, blocks;
270 struct bpf_insn *dst;
271 bool was_ld_map;
272 u8 *raw, *todo;
273 __be32 *result;
274 __be64 *bits;
275
276 raw = vmalloc(raw_size);
277 if (!raw)
278 return -ENOMEM;
279
280 sha1_init(digest);
281 memset(ws, 0, sizeof(ws));
282
283 /* We need to take out the map fd for the digest calculation
284 * since they are unstable from user space side.
285 */
286 dst = (void *)raw;
287 for (i = 0, was_ld_map = false; i < fp->len; i++) {
288 dst[i] = fp->insnsi[i];
289 if (!was_ld_map &&
290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
293 was_ld_map = true;
294 dst[i].imm = 0;
295 } else if (was_ld_map &&
296 dst[i].code == 0 &&
297 dst[i].dst_reg == 0 &&
298 dst[i].src_reg == 0 &&
299 dst[i].off == 0) {
300 was_ld_map = false;
301 dst[i].imm = 0;
302 } else {
303 was_ld_map = false;
304 }
305 }
306
307 psize = bpf_prog_insn_size(fp);
308 memset(&raw[psize], 0, raw_size - psize);
309 raw[psize++] = 0x80;
310
311 bsize = round_up(psize, SHA1_BLOCK_SIZE);
312 blocks = bsize / SHA1_BLOCK_SIZE;
313 todo = raw;
314 if (bsize - psize >= sizeof(__be64)) {
315 bits = (__be64 *)(todo + bsize - sizeof(__be64));
316 } else {
317 bits = (__be64 *)(todo + bsize + bits_offset);
318 blocks++;
319 }
320 *bits = cpu_to_be64((psize - 1) << 3);
321
322 while (blocks--) {
323 sha1_transform(digest, todo, ws);
324 todo += SHA1_BLOCK_SIZE;
325 }
326
327 result = (__force __be32 *)digest;
328 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
329 result[i] = cpu_to_be32(digest[i]);
330 memcpy(fp->tag, result, sizeof(fp->tag));
331
332 vfree(raw);
333 return 0;
334 }
335
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)336 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
337 s32 end_new, s32 curr, const bool probe_pass)
338 {
339 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
340 s32 delta = end_new - end_old;
341 s64 imm = insn->imm;
342
343 if (curr < pos && curr + imm + 1 >= end_old)
344 imm += delta;
345 else if (curr >= end_new && curr + imm + 1 < end_new)
346 imm -= delta;
347 if (imm < imm_min || imm > imm_max)
348 return -ERANGE;
349 if (!probe_pass)
350 insn->imm = imm;
351 return 0;
352 }
353
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)354 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
355 s32 end_new, s32 curr, const bool probe_pass)
356 {
357 const s32 off_min = S16_MIN, off_max = S16_MAX;
358 s32 delta = end_new - end_old;
359 s32 off = insn->off;
360
361 if (curr < pos && curr + off + 1 >= end_old)
362 off += delta;
363 else if (curr >= end_new && curr + off + 1 < end_new)
364 off -= delta;
365 if (off < off_min || off > off_max)
366 return -ERANGE;
367 if (!probe_pass)
368 insn->off = off;
369 return 0;
370 }
371
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)372 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
373 s32 end_new, const bool probe_pass)
374 {
375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
376 struct bpf_insn *insn = prog->insnsi;
377 int ret = 0;
378
379 for (i = 0; i < insn_cnt; i++, insn++) {
380 u8 code;
381
382 /* In the probing pass we still operate on the original,
383 * unpatched image in order to check overflows before we
384 * do any other adjustments. Therefore skip the patchlet.
385 */
386 if (probe_pass && i == pos) {
387 i = end_new;
388 insn = prog->insnsi + end_old;
389 }
390 code = insn->code;
391 if ((BPF_CLASS(code) != BPF_JMP &&
392 BPF_CLASS(code) != BPF_JMP32) ||
393 BPF_OP(code) == BPF_EXIT)
394 continue;
395 /* Adjust offset of jmps if we cross patch boundaries. */
396 if (BPF_OP(code) == BPF_CALL) {
397 if (insn->src_reg != BPF_PSEUDO_CALL)
398 continue;
399 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
400 end_new, i, probe_pass);
401 } else {
402 ret = bpf_adj_delta_to_off(insn, pos, end_old,
403 end_new, i, probe_pass);
404 }
405 if (ret)
406 break;
407 }
408
409 return ret;
410 }
411
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)412 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
413 {
414 struct bpf_line_info *linfo;
415 u32 i, nr_linfo;
416
417 nr_linfo = prog->aux->nr_linfo;
418 if (!nr_linfo || !delta)
419 return;
420
421 linfo = prog->aux->linfo;
422
423 for (i = 0; i < nr_linfo; i++)
424 if (off < linfo[i].insn_off)
425 break;
426
427 /* Push all off < linfo[i].insn_off by delta */
428 for (; i < nr_linfo; i++)
429 linfo[i].insn_off += delta;
430 }
431
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)432 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
433 const struct bpf_insn *patch, u32 len)
434 {
435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
436 const u32 cnt_max = S16_MAX;
437 struct bpf_prog *prog_adj;
438 int err;
439
440 /* Since our patchlet doesn't expand the image, we're done. */
441 if (insn_delta == 0) {
442 memcpy(prog->insnsi + off, patch, sizeof(*patch));
443 return prog;
444 }
445
446 insn_adj_cnt = prog->len + insn_delta;
447
448 /* Reject anything that would potentially let the insn->off
449 * target overflow when we have excessive program expansions.
450 * We need to probe here before we do any reallocation where
451 * we afterwards may not fail anymore.
452 */
453 if (insn_adj_cnt > cnt_max &&
454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
455 return ERR_PTR(err);
456
457 /* Several new instructions need to be inserted. Make room
458 * for them. Likely, there's no need for a new allocation as
459 * last page could have large enough tailroom.
460 */
461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
462 GFP_USER);
463 if (!prog_adj)
464 return ERR_PTR(-ENOMEM);
465
466 prog_adj->len = insn_adj_cnt;
467
468 /* Patching happens in 3 steps:
469 *
470 * 1) Move over tail of insnsi from next instruction onwards,
471 * so we can patch the single target insn with one or more
472 * new ones (patching is always from 1 to n insns, n > 0).
473 * 2) Inject new instructions at the target location.
474 * 3) Adjust branch offsets if necessary.
475 */
476 insn_rest = insn_adj_cnt - off - len;
477
478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
479 sizeof(*patch) * insn_rest);
480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
481
482 /* We are guaranteed to not fail at this point, otherwise
483 * the ship has sailed to reverse to the original state. An
484 * overflow cannot happen at this point.
485 */
486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
487
488 bpf_adj_linfo(prog_adj, off, insn_delta);
489
490 return prog_adj;
491 }
492
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)493 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
494 {
495 /* Branch offsets can't overflow when program is shrinking, no need
496 * to call bpf_adj_branches(..., true) here
497 */
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
499 sizeof(struct bpf_insn) * (prog->len - off - cnt));
500 prog->len -= cnt;
501
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
503 }
504
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)505 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
506 {
507 int i;
508
509 for (i = 0; i < fp->aux->func_cnt; i++)
510 bpf_prog_kallsyms_del(fp->aux->func[i]);
511 }
512
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)513 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
514 {
515 bpf_prog_kallsyms_del_subprogs(fp);
516 bpf_prog_kallsyms_del(fp);
517 }
518
519 #ifdef CONFIG_BPF_JIT
520 /* All BPF JIT sysctl knobs here. */
521 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
522 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
523 int bpf_jit_harden __read_mostly;
524 long bpf_jit_limit __read_mostly;
525
526 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)527 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
528 {
529 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
530 unsigned long addr = (unsigned long)hdr;
531
532 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
533
534 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
535 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
536 }
537
538 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)539 bpf_prog_ksym_set_name(struct bpf_prog *prog)
540 {
541 char *sym = prog->aux->ksym.name;
542 const char *end = sym + KSYM_NAME_LEN;
543 const struct btf_type *type;
544 const char *func_name;
545
546 BUILD_BUG_ON(sizeof("bpf_prog_") +
547 sizeof(prog->tag) * 2 +
548 /* name has been null terminated.
549 * We should need +1 for the '_' preceding
550 * the name. However, the null character
551 * is double counted between the name and the
552 * sizeof("bpf_prog_") above, so we omit
553 * the +1 here.
554 */
555 sizeof(prog->aux->name) > KSYM_NAME_LEN);
556
557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
559
560 /* prog->aux->name will be ignored if full btf name is available */
561 if (prog->aux->func_info_cnt) {
562 type = btf_type_by_id(prog->aux->btf,
563 prog->aux->func_info[prog->aux->func_idx].type_id);
564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
565 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
566 return;
567 }
568
569 if (prog->aux->name[0])
570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
571 else
572 *sym = 0;
573 }
574
bpf_get_ksym_start(struct latch_tree_node * n)575 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
576 {
577 return container_of(n, struct bpf_ksym, tnode)->start;
578 }
579
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)580 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
581 struct latch_tree_node *b)
582 {
583 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
584 }
585
bpf_tree_comp(void * key,struct latch_tree_node * n)586 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
587 {
588 unsigned long val = (unsigned long)key;
589 const struct bpf_ksym *ksym;
590
591 ksym = container_of(n, struct bpf_ksym, tnode);
592
593 if (val < ksym->start)
594 return -1;
595 if (val >= ksym->end)
596 return 1;
597
598 return 0;
599 }
600
601 static const struct latch_tree_ops bpf_tree_ops = {
602 .less = bpf_tree_less,
603 .comp = bpf_tree_comp,
604 };
605
606 static DEFINE_SPINLOCK(bpf_lock);
607 static LIST_HEAD(bpf_kallsyms);
608 static struct latch_tree_root bpf_tree __cacheline_aligned;
609
bpf_ksym_add(struct bpf_ksym * ksym)610 void bpf_ksym_add(struct bpf_ksym *ksym)
611 {
612 spin_lock_bh(&bpf_lock);
613 WARN_ON_ONCE(!list_empty(&ksym->lnode));
614 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
615 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
616 spin_unlock_bh(&bpf_lock);
617 }
618
__bpf_ksym_del(struct bpf_ksym * ksym)619 static void __bpf_ksym_del(struct bpf_ksym *ksym)
620 {
621 if (list_empty(&ksym->lnode))
622 return;
623
624 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
625 list_del_rcu(&ksym->lnode);
626 }
627
bpf_ksym_del(struct bpf_ksym * ksym)628 void bpf_ksym_del(struct bpf_ksym *ksym)
629 {
630 spin_lock_bh(&bpf_lock);
631 __bpf_ksym_del(ksym);
632 spin_unlock_bh(&bpf_lock);
633 }
634
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)635 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
636 {
637 return fp->jited && !bpf_prog_was_classic(fp);
638 }
639
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)640 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
641 {
642 return list_empty(&fp->aux->ksym.lnode) ||
643 fp->aux->ksym.lnode.prev == LIST_POISON2;
644 }
645
bpf_prog_kallsyms_add(struct bpf_prog * fp)646 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
647 {
648 if (!bpf_prog_kallsyms_candidate(fp) ||
649 !bpf_capable())
650 return;
651
652 bpf_prog_ksym_set_addr(fp);
653 bpf_prog_ksym_set_name(fp);
654 fp->aux->ksym.prog = true;
655
656 bpf_ksym_add(&fp->aux->ksym);
657 }
658
bpf_prog_kallsyms_del(struct bpf_prog * fp)659 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
660 {
661 if (!bpf_prog_kallsyms_candidate(fp))
662 return;
663
664 bpf_ksym_del(&fp->aux->ksym);
665 }
666
bpf_ksym_find(unsigned long addr)667 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
668 {
669 struct latch_tree_node *n;
670
671 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
672 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
673 }
674
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)675 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
676 unsigned long *off, char *sym)
677 {
678 struct bpf_ksym *ksym;
679 char *ret = NULL;
680
681 rcu_read_lock();
682 ksym = bpf_ksym_find(addr);
683 if (ksym) {
684 unsigned long symbol_start = ksym->start;
685 unsigned long symbol_end = ksym->end;
686
687 strncpy(sym, ksym->name, KSYM_NAME_LEN);
688
689 ret = sym;
690 if (size)
691 *size = symbol_end - symbol_start;
692 if (off)
693 *off = addr - symbol_start;
694 }
695 rcu_read_unlock();
696
697 return ret;
698 }
699
is_bpf_text_address(unsigned long addr)700 bool is_bpf_text_address(unsigned long addr)
701 {
702 bool ret;
703
704 rcu_read_lock();
705 ret = bpf_ksym_find(addr) != NULL;
706 rcu_read_unlock();
707
708 return ret;
709 }
710
bpf_prog_ksym_find(unsigned long addr)711 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
712 {
713 struct bpf_ksym *ksym = bpf_ksym_find(addr);
714
715 return ksym && ksym->prog ?
716 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
717 NULL;
718 }
719
search_bpf_extables(unsigned long addr)720 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
721 {
722 const struct exception_table_entry *e = NULL;
723 struct bpf_prog *prog;
724
725 rcu_read_lock();
726 prog = bpf_prog_ksym_find(addr);
727 if (!prog)
728 goto out;
729 if (!prog->aux->num_exentries)
730 goto out;
731
732 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
733 out:
734 rcu_read_unlock();
735 return e;
736 }
737
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)738 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
739 char *sym)
740 {
741 struct bpf_ksym *ksym;
742 unsigned int it = 0;
743 int ret = -ERANGE;
744
745 if (!bpf_jit_kallsyms_enabled())
746 return ret;
747
748 rcu_read_lock();
749 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
750 if (it++ != symnum)
751 continue;
752
753 strncpy(sym, ksym->name, KSYM_NAME_LEN);
754
755 *value = ksym->start;
756 *type = BPF_SYM_ELF_TYPE;
757
758 ret = 0;
759 break;
760 }
761 rcu_read_unlock();
762
763 return ret;
764 }
765
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)766 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
767 struct bpf_jit_poke_descriptor *poke)
768 {
769 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
770 static const u32 poke_tab_max = 1024;
771 u32 slot = prog->aux->size_poke_tab;
772 u32 size = slot + 1;
773
774 if (size > poke_tab_max)
775 return -ENOSPC;
776 if (poke->tailcall_target || poke->tailcall_target_stable ||
777 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
778 return -EINVAL;
779
780 switch (poke->reason) {
781 case BPF_POKE_REASON_TAIL_CALL:
782 if (!poke->tail_call.map)
783 return -EINVAL;
784 break;
785 default:
786 return -EINVAL;
787 }
788
789 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
790 if (!tab)
791 return -ENOMEM;
792
793 memcpy(&tab[slot], poke, sizeof(*poke));
794 prog->aux->size_poke_tab = size;
795 prog->aux->poke_tab = tab;
796
797 return slot;
798 }
799
800 static atomic_long_t bpf_jit_current;
801
802 /* Can be overridden by an arch's JIT compiler if it has a custom,
803 * dedicated BPF backend memory area, or if neither of the two
804 * below apply.
805 */
bpf_jit_alloc_exec_limit(void)806 u64 __weak bpf_jit_alloc_exec_limit(void)
807 {
808 #if defined(MODULES_VADDR)
809 return MODULES_END - MODULES_VADDR;
810 #else
811 return VMALLOC_END - VMALLOC_START;
812 #endif
813 }
814
bpf_jit_charge_init(void)815 static int __init bpf_jit_charge_init(void)
816 {
817 /* Only used as heuristic here to derive limit. */
818 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
819 PAGE_SIZE), LONG_MAX);
820 return 0;
821 }
822 pure_initcall(bpf_jit_charge_init);
823
bpf_jit_charge_modmem(u32 pages)824 int bpf_jit_charge_modmem(u32 pages)
825 {
826 if (atomic_long_add_return(pages, &bpf_jit_current) >
827 (bpf_jit_limit >> PAGE_SHIFT)) {
828 if (!capable(CAP_SYS_ADMIN)) {
829 atomic_long_sub(pages, &bpf_jit_current);
830 return -EPERM;
831 }
832 }
833
834 return 0;
835 }
836
bpf_jit_uncharge_modmem(u32 pages)837 void bpf_jit_uncharge_modmem(u32 pages)
838 {
839 atomic_long_sub(pages, &bpf_jit_current);
840 }
841
bpf_jit_alloc_exec(unsigned long size)842 void *__weak bpf_jit_alloc_exec(unsigned long size)
843 {
844 return module_alloc(size);
845 }
846
bpf_jit_free_exec(void * addr)847 void __weak bpf_jit_free_exec(void *addr)
848 {
849 module_memfree(addr);
850 }
851
852 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)853 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
854 unsigned int alignment,
855 bpf_jit_fill_hole_t bpf_fill_ill_insns)
856 {
857 struct bpf_binary_header *hdr;
858 u32 size, hole, start, pages;
859
860 WARN_ON_ONCE(!is_power_of_2(alignment) ||
861 alignment > BPF_IMAGE_ALIGNMENT);
862
863 /* Most of BPF filters are really small, but if some of them
864 * fill a page, allow at least 128 extra bytes to insert a
865 * random section of illegal instructions.
866 */
867 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
868 pages = size / PAGE_SIZE;
869
870 if (bpf_jit_charge_modmem(pages))
871 return NULL;
872 hdr = bpf_jit_alloc_exec(size);
873 if (!hdr) {
874 bpf_jit_uncharge_modmem(pages);
875 return NULL;
876 }
877
878 /* Fill space with illegal/arch-dep instructions. */
879 bpf_fill_ill_insns(hdr, size);
880
881 hdr->pages = pages;
882 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
883 PAGE_SIZE - sizeof(*hdr));
884 start = (get_random_int() % hole) & ~(alignment - 1);
885
886 /* Leave a random number of instructions before BPF code. */
887 *image_ptr = &hdr->image[start];
888
889 return hdr;
890 }
891
bpf_jit_binary_free(struct bpf_binary_header * hdr)892 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
893 {
894 u32 pages = hdr->pages;
895
896 bpf_jit_free_exec(hdr);
897 bpf_jit_uncharge_modmem(pages);
898 }
899
900 /* This symbol is only overridden by archs that have different
901 * requirements than the usual eBPF JITs, f.e. when they only
902 * implement cBPF JIT, do not set images read-only, etc.
903 */
bpf_jit_free(struct bpf_prog * fp)904 void __weak bpf_jit_free(struct bpf_prog *fp)
905 {
906 if (fp->jited) {
907 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
908
909 bpf_jit_binary_free(hdr);
910
911 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
912 }
913
914 bpf_prog_unlock_free(fp);
915 }
916
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)917 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
918 const struct bpf_insn *insn, bool extra_pass,
919 u64 *func_addr, bool *func_addr_fixed)
920 {
921 s16 off = insn->off;
922 s32 imm = insn->imm;
923 u8 *addr;
924
925 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
926 if (!*func_addr_fixed) {
927 /* Place-holder address till the last pass has collected
928 * all addresses for JITed subprograms in which case we
929 * can pick them up from prog->aux.
930 */
931 if (!extra_pass)
932 addr = NULL;
933 else if (prog->aux->func &&
934 off >= 0 && off < prog->aux->func_cnt)
935 addr = (u8 *)prog->aux->func[off]->bpf_func;
936 else
937 return -EINVAL;
938 } else {
939 /* Address of a BPF helper call. Since part of the core
940 * kernel, it's always at a fixed location. __bpf_call_base
941 * and the helper with imm relative to it are both in core
942 * kernel.
943 */
944 addr = (u8 *)__bpf_call_base + imm;
945 }
946
947 *func_addr = (unsigned long)addr;
948 return 0;
949 }
950
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)951 static int bpf_jit_blind_insn(const struct bpf_insn *from,
952 const struct bpf_insn *aux,
953 struct bpf_insn *to_buff,
954 bool emit_zext)
955 {
956 struct bpf_insn *to = to_buff;
957 u32 imm_rnd = get_random_int();
958 s16 off;
959
960 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
961 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
962
963 /* Constraints on AX register:
964 *
965 * AX register is inaccessible from user space. It is mapped in
966 * all JITs, and used here for constant blinding rewrites. It is
967 * typically "stateless" meaning its contents are only valid within
968 * the executed instruction, but not across several instructions.
969 * There are a few exceptions however which are further detailed
970 * below.
971 *
972 * Constant blinding is only used by JITs, not in the interpreter.
973 * The interpreter uses AX in some occasions as a local temporary
974 * register e.g. in DIV or MOD instructions.
975 *
976 * In restricted circumstances, the verifier can also use the AX
977 * register for rewrites as long as they do not interfere with
978 * the above cases!
979 */
980 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
981 goto out;
982
983 if (from->imm == 0 &&
984 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
985 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
986 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
987 goto out;
988 }
989
990 switch (from->code) {
991 case BPF_ALU | BPF_ADD | BPF_K:
992 case BPF_ALU | BPF_SUB | BPF_K:
993 case BPF_ALU | BPF_AND | BPF_K:
994 case BPF_ALU | BPF_OR | BPF_K:
995 case BPF_ALU | BPF_XOR | BPF_K:
996 case BPF_ALU | BPF_MUL | BPF_K:
997 case BPF_ALU | BPF_MOV | BPF_K:
998 case BPF_ALU | BPF_DIV | BPF_K:
999 case BPF_ALU | BPF_MOD | BPF_K:
1000 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1001 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1002 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1003 break;
1004
1005 case BPF_ALU64 | BPF_ADD | BPF_K:
1006 case BPF_ALU64 | BPF_SUB | BPF_K:
1007 case BPF_ALU64 | BPF_AND | BPF_K:
1008 case BPF_ALU64 | BPF_OR | BPF_K:
1009 case BPF_ALU64 | BPF_XOR | BPF_K:
1010 case BPF_ALU64 | BPF_MUL | BPF_K:
1011 case BPF_ALU64 | BPF_MOV | BPF_K:
1012 case BPF_ALU64 | BPF_DIV | BPF_K:
1013 case BPF_ALU64 | BPF_MOD | BPF_K:
1014 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1015 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1016 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1017 break;
1018
1019 case BPF_JMP | BPF_JEQ | BPF_K:
1020 case BPF_JMP | BPF_JNE | BPF_K:
1021 case BPF_JMP | BPF_JGT | BPF_K:
1022 case BPF_JMP | BPF_JLT | BPF_K:
1023 case BPF_JMP | BPF_JGE | BPF_K:
1024 case BPF_JMP | BPF_JLE | BPF_K:
1025 case BPF_JMP | BPF_JSGT | BPF_K:
1026 case BPF_JMP | BPF_JSLT | BPF_K:
1027 case BPF_JMP | BPF_JSGE | BPF_K:
1028 case BPF_JMP | BPF_JSLE | BPF_K:
1029 case BPF_JMP | BPF_JSET | BPF_K:
1030 /* Accommodate for extra offset in case of a backjump. */
1031 off = from->off;
1032 if (off < 0)
1033 off -= 2;
1034 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1035 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1036 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1037 break;
1038
1039 case BPF_JMP32 | BPF_JEQ | BPF_K:
1040 case BPF_JMP32 | BPF_JNE | BPF_K:
1041 case BPF_JMP32 | BPF_JGT | BPF_K:
1042 case BPF_JMP32 | BPF_JLT | BPF_K:
1043 case BPF_JMP32 | BPF_JGE | BPF_K:
1044 case BPF_JMP32 | BPF_JLE | BPF_K:
1045 case BPF_JMP32 | BPF_JSGT | BPF_K:
1046 case BPF_JMP32 | BPF_JSLT | BPF_K:
1047 case BPF_JMP32 | BPF_JSGE | BPF_K:
1048 case BPF_JMP32 | BPF_JSLE | BPF_K:
1049 case BPF_JMP32 | BPF_JSET | BPF_K:
1050 /* Accommodate for extra offset in case of a backjump. */
1051 off = from->off;
1052 if (off < 0)
1053 off -= 2;
1054 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1055 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1056 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1057 off);
1058 break;
1059
1060 case BPF_LD | BPF_IMM | BPF_DW:
1061 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1062 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1063 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1064 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1065 break;
1066 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1067 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1068 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1069 if (emit_zext)
1070 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1071 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1072 break;
1073
1074 case BPF_ST | BPF_MEM | BPF_DW:
1075 case BPF_ST | BPF_MEM | BPF_W:
1076 case BPF_ST | BPF_MEM | BPF_H:
1077 case BPF_ST | BPF_MEM | BPF_B:
1078 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1079 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1080 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1081 break;
1082 }
1083 out:
1084 return to - to_buff;
1085 }
1086
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1087 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1088 gfp_t gfp_extra_flags)
1089 {
1090 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1091 struct bpf_prog *fp;
1092
1093 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1094 if (fp != NULL) {
1095 /* aux->prog still points to the fp_other one, so
1096 * when promoting the clone to the real program,
1097 * this still needs to be adapted.
1098 */
1099 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1100 }
1101
1102 return fp;
1103 }
1104
bpf_prog_clone_free(struct bpf_prog * fp)1105 static void bpf_prog_clone_free(struct bpf_prog *fp)
1106 {
1107 /* aux was stolen by the other clone, so we cannot free
1108 * it from this path! It will be freed eventually by the
1109 * other program on release.
1110 *
1111 * At this point, we don't need a deferred release since
1112 * clone is guaranteed to not be locked.
1113 */
1114 fp->aux = NULL;
1115 fp->stats = NULL;
1116 fp->active = NULL;
1117 __bpf_prog_free(fp);
1118 }
1119
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1120 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1121 {
1122 /* We have to repoint aux->prog to self, as we don't
1123 * know whether fp here is the clone or the original.
1124 */
1125 fp->aux->prog = fp;
1126 bpf_prog_clone_free(fp_other);
1127 }
1128
bpf_jit_blind_constants(struct bpf_prog * prog)1129 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1130 {
1131 struct bpf_insn insn_buff[16], aux[2];
1132 struct bpf_prog *clone, *tmp;
1133 int insn_delta, insn_cnt;
1134 struct bpf_insn *insn;
1135 int i, rewritten;
1136
1137 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1138 return prog;
1139
1140 clone = bpf_prog_clone_create(prog, GFP_USER);
1141 if (!clone)
1142 return ERR_PTR(-ENOMEM);
1143
1144 insn_cnt = clone->len;
1145 insn = clone->insnsi;
1146
1147 for (i = 0; i < insn_cnt; i++, insn++) {
1148 /* We temporarily need to hold the original ld64 insn
1149 * so that we can still access the first part in the
1150 * second blinding run.
1151 */
1152 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1153 insn[1].code == 0)
1154 memcpy(aux, insn, sizeof(aux));
1155
1156 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1157 clone->aux->verifier_zext);
1158 if (!rewritten)
1159 continue;
1160
1161 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1162 if (IS_ERR(tmp)) {
1163 /* Patching may have repointed aux->prog during
1164 * realloc from the original one, so we need to
1165 * fix it up here on error.
1166 */
1167 bpf_jit_prog_release_other(prog, clone);
1168 return tmp;
1169 }
1170
1171 clone = tmp;
1172 insn_delta = rewritten - 1;
1173
1174 /* Walk new program and skip insns we just inserted. */
1175 insn = clone->insnsi + i + insn_delta;
1176 insn_cnt += insn_delta;
1177 i += insn_delta;
1178 }
1179
1180 clone->blinded = 1;
1181 return clone;
1182 }
1183 #endif /* CONFIG_BPF_JIT */
1184
1185 /* Base function for offset calculation. Needs to go into .text section,
1186 * therefore keeping it non-static as well; will also be used by JITs
1187 * anyway later on, so do not let the compiler omit it. This also needs
1188 * to go into kallsyms for correlation from e.g. bpftool, so naming
1189 * must not change.
1190 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1191 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1192 {
1193 return 0;
1194 }
1195 EXPORT_SYMBOL_GPL(__bpf_call_base);
1196
1197 /* All UAPI available opcodes. */
1198 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1199 /* 32 bit ALU operations. */ \
1200 /* Register based. */ \
1201 INSN_3(ALU, ADD, X), \
1202 INSN_3(ALU, SUB, X), \
1203 INSN_3(ALU, AND, X), \
1204 INSN_3(ALU, OR, X), \
1205 INSN_3(ALU, LSH, X), \
1206 INSN_3(ALU, RSH, X), \
1207 INSN_3(ALU, XOR, X), \
1208 INSN_3(ALU, MUL, X), \
1209 INSN_3(ALU, MOV, X), \
1210 INSN_3(ALU, ARSH, X), \
1211 INSN_3(ALU, DIV, X), \
1212 INSN_3(ALU, MOD, X), \
1213 INSN_2(ALU, NEG), \
1214 INSN_3(ALU, END, TO_BE), \
1215 INSN_3(ALU, END, TO_LE), \
1216 /* Immediate based. */ \
1217 INSN_3(ALU, ADD, K), \
1218 INSN_3(ALU, SUB, K), \
1219 INSN_3(ALU, AND, K), \
1220 INSN_3(ALU, OR, K), \
1221 INSN_3(ALU, LSH, K), \
1222 INSN_3(ALU, RSH, K), \
1223 INSN_3(ALU, XOR, K), \
1224 INSN_3(ALU, MUL, K), \
1225 INSN_3(ALU, MOV, K), \
1226 INSN_3(ALU, ARSH, K), \
1227 INSN_3(ALU, DIV, K), \
1228 INSN_3(ALU, MOD, K), \
1229 /* 64 bit ALU operations. */ \
1230 /* Register based. */ \
1231 INSN_3(ALU64, ADD, X), \
1232 INSN_3(ALU64, SUB, X), \
1233 INSN_3(ALU64, AND, X), \
1234 INSN_3(ALU64, OR, X), \
1235 INSN_3(ALU64, LSH, X), \
1236 INSN_3(ALU64, RSH, X), \
1237 INSN_3(ALU64, XOR, X), \
1238 INSN_3(ALU64, MUL, X), \
1239 INSN_3(ALU64, MOV, X), \
1240 INSN_3(ALU64, ARSH, X), \
1241 INSN_3(ALU64, DIV, X), \
1242 INSN_3(ALU64, MOD, X), \
1243 INSN_2(ALU64, NEG), \
1244 /* Immediate based. */ \
1245 INSN_3(ALU64, ADD, K), \
1246 INSN_3(ALU64, SUB, K), \
1247 INSN_3(ALU64, AND, K), \
1248 INSN_3(ALU64, OR, K), \
1249 INSN_3(ALU64, LSH, K), \
1250 INSN_3(ALU64, RSH, K), \
1251 INSN_3(ALU64, XOR, K), \
1252 INSN_3(ALU64, MUL, K), \
1253 INSN_3(ALU64, MOV, K), \
1254 INSN_3(ALU64, ARSH, K), \
1255 INSN_3(ALU64, DIV, K), \
1256 INSN_3(ALU64, MOD, K), \
1257 /* Call instruction. */ \
1258 INSN_2(JMP, CALL), \
1259 /* Exit instruction. */ \
1260 INSN_2(JMP, EXIT), \
1261 /* 32-bit Jump instructions. */ \
1262 /* Register based. */ \
1263 INSN_3(JMP32, JEQ, X), \
1264 INSN_3(JMP32, JNE, X), \
1265 INSN_3(JMP32, JGT, X), \
1266 INSN_3(JMP32, JLT, X), \
1267 INSN_3(JMP32, JGE, X), \
1268 INSN_3(JMP32, JLE, X), \
1269 INSN_3(JMP32, JSGT, X), \
1270 INSN_3(JMP32, JSLT, X), \
1271 INSN_3(JMP32, JSGE, X), \
1272 INSN_3(JMP32, JSLE, X), \
1273 INSN_3(JMP32, JSET, X), \
1274 /* Immediate based. */ \
1275 INSN_3(JMP32, JEQ, K), \
1276 INSN_3(JMP32, JNE, K), \
1277 INSN_3(JMP32, JGT, K), \
1278 INSN_3(JMP32, JLT, K), \
1279 INSN_3(JMP32, JGE, K), \
1280 INSN_3(JMP32, JLE, K), \
1281 INSN_3(JMP32, JSGT, K), \
1282 INSN_3(JMP32, JSLT, K), \
1283 INSN_3(JMP32, JSGE, K), \
1284 INSN_3(JMP32, JSLE, K), \
1285 INSN_3(JMP32, JSET, K), \
1286 /* Jump instructions. */ \
1287 /* Register based. */ \
1288 INSN_3(JMP, JEQ, X), \
1289 INSN_3(JMP, JNE, X), \
1290 INSN_3(JMP, JGT, X), \
1291 INSN_3(JMP, JLT, X), \
1292 INSN_3(JMP, JGE, X), \
1293 INSN_3(JMP, JLE, X), \
1294 INSN_3(JMP, JSGT, X), \
1295 INSN_3(JMP, JSLT, X), \
1296 INSN_3(JMP, JSGE, X), \
1297 INSN_3(JMP, JSLE, X), \
1298 INSN_3(JMP, JSET, X), \
1299 /* Immediate based. */ \
1300 INSN_3(JMP, JEQ, K), \
1301 INSN_3(JMP, JNE, K), \
1302 INSN_3(JMP, JGT, K), \
1303 INSN_3(JMP, JLT, K), \
1304 INSN_3(JMP, JGE, K), \
1305 INSN_3(JMP, JLE, K), \
1306 INSN_3(JMP, JSGT, K), \
1307 INSN_3(JMP, JSLT, K), \
1308 INSN_3(JMP, JSGE, K), \
1309 INSN_3(JMP, JSLE, K), \
1310 INSN_3(JMP, JSET, K), \
1311 INSN_2(JMP, JA), \
1312 /* Store instructions. */ \
1313 /* Register based. */ \
1314 INSN_3(STX, MEM, B), \
1315 INSN_3(STX, MEM, H), \
1316 INSN_3(STX, MEM, W), \
1317 INSN_3(STX, MEM, DW), \
1318 INSN_3(STX, ATOMIC, W), \
1319 INSN_3(STX, ATOMIC, DW), \
1320 /* Immediate based. */ \
1321 INSN_3(ST, MEM, B), \
1322 INSN_3(ST, MEM, H), \
1323 INSN_3(ST, MEM, W), \
1324 INSN_3(ST, MEM, DW), \
1325 /* Load instructions. */ \
1326 /* Register based. */ \
1327 INSN_3(LDX, MEM, B), \
1328 INSN_3(LDX, MEM, H), \
1329 INSN_3(LDX, MEM, W), \
1330 INSN_3(LDX, MEM, DW), \
1331 /* Immediate based. */ \
1332 INSN_3(LD, IMM, DW)
1333
bpf_opcode_in_insntable(u8 code)1334 bool bpf_opcode_in_insntable(u8 code)
1335 {
1336 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1337 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1338 static const bool public_insntable[256] = {
1339 [0 ... 255] = false,
1340 /* Now overwrite non-defaults ... */
1341 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1342 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1343 [BPF_LD | BPF_ABS | BPF_B] = true,
1344 [BPF_LD | BPF_ABS | BPF_H] = true,
1345 [BPF_LD | BPF_ABS | BPF_W] = true,
1346 [BPF_LD | BPF_IND | BPF_B] = true,
1347 [BPF_LD | BPF_IND | BPF_H] = true,
1348 [BPF_LD | BPF_IND | BPF_W] = true,
1349 };
1350 #undef BPF_INSN_3_TBL
1351 #undef BPF_INSN_2_TBL
1352 return public_insntable[code];
1353 }
1354
1355 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1356 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1357 {
1358 memset(dst, 0, size);
1359 return -EFAULT;
1360 }
1361
1362 /**
1363 * __bpf_prog_run - run eBPF program on a given context
1364 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1365 * @insn: is the array of eBPF instructions
1366 *
1367 * Decode and execute eBPF instructions.
1368 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1369 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1370 {
1371 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1372 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1373 static const void * const jumptable[256] __annotate_jump_table = {
1374 [0 ... 255] = &&default_label,
1375 /* Now overwrite non-defaults ... */
1376 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1377 /* Non-UAPI available opcodes. */
1378 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1379 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1380 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1381 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1382 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1383 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1384 };
1385 #undef BPF_INSN_3_LBL
1386 #undef BPF_INSN_2_LBL
1387 u32 tail_call_cnt = 0;
1388
1389 #define CONT ({ insn++; goto select_insn; })
1390 #define CONT_JMP ({ insn++; goto select_insn; })
1391
1392 select_insn:
1393 goto *jumptable[insn->code];
1394
1395 /* ALU */
1396 #define ALU(OPCODE, OP) \
1397 ALU64_##OPCODE##_X: \
1398 DST = DST OP SRC; \
1399 CONT; \
1400 ALU_##OPCODE##_X: \
1401 DST = (u32) DST OP (u32) SRC; \
1402 CONT; \
1403 ALU64_##OPCODE##_K: \
1404 DST = DST OP IMM; \
1405 CONT; \
1406 ALU_##OPCODE##_K: \
1407 DST = (u32) DST OP (u32) IMM; \
1408 CONT;
1409
1410 ALU(ADD, +)
1411 ALU(SUB, -)
1412 ALU(AND, &)
1413 ALU(OR, |)
1414 ALU(LSH, <<)
1415 ALU(RSH, >>)
1416 ALU(XOR, ^)
1417 ALU(MUL, *)
1418 #undef ALU
1419 ALU_NEG:
1420 DST = (u32) -DST;
1421 CONT;
1422 ALU64_NEG:
1423 DST = -DST;
1424 CONT;
1425 ALU_MOV_X:
1426 DST = (u32) SRC;
1427 CONT;
1428 ALU_MOV_K:
1429 DST = (u32) IMM;
1430 CONT;
1431 ALU64_MOV_X:
1432 DST = SRC;
1433 CONT;
1434 ALU64_MOV_K:
1435 DST = IMM;
1436 CONT;
1437 LD_IMM_DW:
1438 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1439 insn++;
1440 CONT;
1441 ALU_ARSH_X:
1442 DST = (u64) (u32) (((s32) DST) >> SRC);
1443 CONT;
1444 ALU_ARSH_K:
1445 DST = (u64) (u32) (((s32) DST) >> IMM);
1446 CONT;
1447 ALU64_ARSH_X:
1448 (*(s64 *) &DST) >>= SRC;
1449 CONT;
1450 ALU64_ARSH_K:
1451 (*(s64 *) &DST) >>= IMM;
1452 CONT;
1453 ALU64_MOD_X:
1454 div64_u64_rem(DST, SRC, &AX);
1455 DST = AX;
1456 CONT;
1457 ALU_MOD_X:
1458 AX = (u32) DST;
1459 DST = do_div(AX, (u32) SRC);
1460 CONT;
1461 ALU64_MOD_K:
1462 div64_u64_rem(DST, IMM, &AX);
1463 DST = AX;
1464 CONT;
1465 ALU_MOD_K:
1466 AX = (u32) DST;
1467 DST = do_div(AX, (u32) IMM);
1468 CONT;
1469 ALU64_DIV_X:
1470 DST = div64_u64(DST, SRC);
1471 CONT;
1472 ALU_DIV_X:
1473 AX = (u32) DST;
1474 do_div(AX, (u32) SRC);
1475 DST = (u32) AX;
1476 CONT;
1477 ALU64_DIV_K:
1478 DST = div64_u64(DST, IMM);
1479 CONT;
1480 ALU_DIV_K:
1481 AX = (u32) DST;
1482 do_div(AX, (u32) IMM);
1483 DST = (u32) AX;
1484 CONT;
1485 ALU_END_TO_BE:
1486 switch (IMM) {
1487 case 16:
1488 DST = (__force u16) cpu_to_be16(DST);
1489 break;
1490 case 32:
1491 DST = (__force u32) cpu_to_be32(DST);
1492 break;
1493 case 64:
1494 DST = (__force u64) cpu_to_be64(DST);
1495 break;
1496 }
1497 CONT;
1498 ALU_END_TO_LE:
1499 switch (IMM) {
1500 case 16:
1501 DST = (__force u16) cpu_to_le16(DST);
1502 break;
1503 case 32:
1504 DST = (__force u32) cpu_to_le32(DST);
1505 break;
1506 case 64:
1507 DST = (__force u64) cpu_to_le64(DST);
1508 break;
1509 }
1510 CONT;
1511
1512 /* CALL */
1513 JMP_CALL:
1514 /* Function call scratches BPF_R1-BPF_R5 registers,
1515 * preserves BPF_R6-BPF_R9, and stores return value
1516 * into BPF_R0.
1517 */
1518 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1519 BPF_R4, BPF_R5);
1520 CONT;
1521
1522 JMP_CALL_ARGS:
1523 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1524 BPF_R3, BPF_R4,
1525 BPF_R5,
1526 insn + insn->off + 1);
1527 CONT;
1528
1529 JMP_TAIL_CALL: {
1530 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1531 struct bpf_array *array = container_of(map, struct bpf_array, map);
1532 struct bpf_prog *prog;
1533 u32 index = BPF_R3;
1534
1535 if (unlikely(index >= array->map.max_entries))
1536 goto out;
1537 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1538 goto out;
1539
1540 tail_call_cnt++;
1541
1542 prog = READ_ONCE(array->ptrs[index]);
1543 if (!prog)
1544 goto out;
1545
1546 /* ARG1 at this point is guaranteed to point to CTX from
1547 * the verifier side due to the fact that the tail call is
1548 * handled like a helper, that is, bpf_tail_call_proto,
1549 * where arg1_type is ARG_PTR_TO_CTX.
1550 */
1551 insn = prog->insnsi;
1552 goto select_insn;
1553 out:
1554 CONT;
1555 }
1556 JMP_JA:
1557 insn += insn->off;
1558 CONT;
1559 JMP_EXIT:
1560 return BPF_R0;
1561 /* JMP */
1562 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1563 JMP_##OPCODE##_X: \
1564 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1565 insn += insn->off; \
1566 CONT_JMP; \
1567 } \
1568 CONT; \
1569 JMP32_##OPCODE##_X: \
1570 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1571 insn += insn->off; \
1572 CONT_JMP; \
1573 } \
1574 CONT; \
1575 JMP_##OPCODE##_K: \
1576 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1577 insn += insn->off; \
1578 CONT_JMP; \
1579 } \
1580 CONT; \
1581 JMP32_##OPCODE##_K: \
1582 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1583 insn += insn->off; \
1584 CONT_JMP; \
1585 } \
1586 CONT;
1587 COND_JMP(u, JEQ, ==)
1588 COND_JMP(u, JNE, !=)
1589 COND_JMP(u, JGT, >)
1590 COND_JMP(u, JLT, <)
1591 COND_JMP(u, JGE, >=)
1592 COND_JMP(u, JLE, <=)
1593 COND_JMP(u, JSET, &)
1594 COND_JMP(s, JSGT, >)
1595 COND_JMP(s, JSLT, <)
1596 COND_JMP(s, JSGE, >=)
1597 COND_JMP(s, JSLE, <=)
1598 #undef COND_JMP
1599 /* STX and ST and LDX*/
1600 #define LDST(SIZEOP, SIZE) \
1601 STX_MEM_##SIZEOP: \
1602 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1603 CONT; \
1604 ST_MEM_##SIZEOP: \
1605 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1606 CONT; \
1607 LDX_MEM_##SIZEOP: \
1608 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1609 CONT;
1610
1611 LDST(B, u8)
1612 LDST(H, u16)
1613 LDST(W, u32)
1614 LDST(DW, u64)
1615 #undef LDST
1616 #define LDX_PROBE(SIZEOP, SIZE) \
1617 LDX_PROBE_MEM_##SIZEOP: \
1618 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1619 CONT;
1620 LDX_PROBE(B, 1)
1621 LDX_PROBE(H, 2)
1622 LDX_PROBE(W, 4)
1623 LDX_PROBE(DW, 8)
1624 #undef LDX_PROBE
1625
1626 #define ATOMIC_ALU_OP(BOP, KOP) \
1627 case BOP: \
1628 if (BPF_SIZE(insn->code) == BPF_W) \
1629 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1630 (DST + insn->off)); \
1631 else \
1632 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1633 (DST + insn->off)); \
1634 break; \
1635 case BOP | BPF_FETCH: \
1636 if (BPF_SIZE(insn->code) == BPF_W) \
1637 SRC = (u32) atomic_fetch_##KOP( \
1638 (u32) SRC, \
1639 (atomic_t *)(unsigned long) (DST + insn->off)); \
1640 else \
1641 SRC = (u64) atomic64_fetch_##KOP( \
1642 (u64) SRC, \
1643 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1644 break;
1645
1646 STX_ATOMIC_DW:
1647 STX_ATOMIC_W:
1648 switch (IMM) {
1649 ATOMIC_ALU_OP(BPF_ADD, add)
1650 ATOMIC_ALU_OP(BPF_AND, and)
1651 ATOMIC_ALU_OP(BPF_OR, or)
1652 ATOMIC_ALU_OP(BPF_XOR, xor)
1653 #undef ATOMIC_ALU_OP
1654
1655 case BPF_XCHG:
1656 if (BPF_SIZE(insn->code) == BPF_W)
1657 SRC = (u32) atomic_xchg(
1658 (atomic_t *)(unsigned long) (DST + insn->off),
1659 (u32) SRC);
1660 else
1661 SRC = (u64) atomic64_xchg(
1662 (atomic64_t *)(unsigned long) (DST + insn->off),
1663 (u64) SRC);
1664 break;
1665 case BPF_CMPXCHG:
1666 if (BPF_SIZE(insn->code) == BPF_W)
1667 BPF_R0 = (u32) atomic_cmpxchg(
1668 (atomic_t *)(unsigned long) (DST + insn->off),
1669 (u32) BPF_R0, (u32) SRC);
1670 else
1671 BPF_R0 = (u64) atomic64_cmpxchg(
1672 (atomic64_t *)(unsigned long) (DST + insn->off),
1673 (u64) BPF_R0, (u64) SRC);
1674 break;
1675
1676 default:
1677 goto default_label;
1678 }
1679 CONT;
1680
1681 default_label:
1682 /* If we ever reach this, we have a bug somewhere. Die hard here
1683 * instead of just returning 0; we could be somewhere in a subprog,
1684 * so execution could continue otherwise which we do /not/ want.
1685 *
1686 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1687 */
1688 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1689 insn->code, insn->imm);
1690 BUG_ON(1);
1691 return 0;
1692 }
1693
1694 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1695 #define DEFINE_BPF_PROG_RUN(stack_size) \
1696 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1697 { \
1698 u64 stack[stack_size / sizeof(u64)]; \
1699 u64 regs[MAX_BPF_EXT_REG]; \
1700 \
1701 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1702 ARG1 = (u64) (unsigned long) ctx; \
1703 return ___bpf_prog_run(regs, insn); \
1704 }
1705
1706 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1707 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1708 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1709 const struct bpf_insn *insn) \
1710 { \
1711 u64 stack[stack_size / sizeof(u64)]; \
1712 u64 regs[MAX_BPF_EXT_REG]; \
1713 \
1714 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1715 BPF_R1 = r1; \
1716 BPF_R2 = r2; \
1717 BPF_R3 = r3; \
1718 BPF_R4 = r4; \
1719 BPF_R5 = r5; \
1720 return ___bpf_prog_run(regs, insn); \
1721 }
1722
1723 #define EVAL1(FN, X) FN(X)
1724 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1725 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1726 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1727 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1728 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1729
1730 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1731 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1732 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1733
1734 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1735 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1736 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1737
1738 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1739
1740 static unsigned int (*interpreters[])(const void *ctx,
1741 const struct bpf_insn *insn) = {
1742 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1743 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1744 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1745 };
1746 #undef PROG_NAME_LIST
1747 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1748 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1749 const struct bpf_insn *insn) = {
1750 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1751 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1752 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1753 };
1754 #undef PROG_NAME_LIST
1755
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)1756 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1757 {
1758 stack_depth = max_t(u32, stack_depth, 1);
1759 insn->off = (s16) insn->imm;
1760 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1761 __bpf_call_base_args;
1762 insn->code = BPF_JMP | BPF_CALL_ARGS;
1763 }
1764
1765 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)1766 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1767 const struct bpf_insn *insn)
1768 {
1769 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1770 * is not working properly, so warn about it!
1771 */
1772 WARN_ON_ONCE(1);
1773 return 0;
1774 }
1775 #endif
1776
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)1777 bool bpf_prog_array_compatible(struct bpf_array *array,
1778 const struct bpf_prog *fp)
1779 {
1780 if (fp->kprobe_override)
1781 return false;
1782
1783 if (!array->aux->type) {
1784 /* There's no owner yet where we could check for
1785 * compatibility.
1786 */
1787 array->aux->type = fp->type;
1788 array->aux->jited = fp->jited;
1789 return true;
1790 }
1791
1792 return array->aux->type == fp->type &&
1793 array->aux->jited == fp->jited;
1794 }
1795
bpf_check_tail_call(const struct bpf_prog * fp)1796 static int bpf_check_tail_call(const struct bpf_prog *fp)
1797 {
1798 struct bpf_prog_aux *aux = fp->aux;
1799 int i, ret = 0;
1800
1801 mutex_lock(&aux->used_maps_mutex);
1802 for (i = 0; i < aux->used_map_cnt; i++) {
1803 struct bpf_map *map = aux->used_maps[i];
1804 struct bpf_array *array;
1805
1806 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1807 continue;
1808
1809 array = container_of(map, struct bpf_array, map);
1810 if (!bpf_prog_array_compatible(array, fp)) {
1811 ret = -EINVAL;
1812 goto out;
1813 }
1814 }
1815
1816 out:
1817 mutex_unlock(&aux->used_maps_mutex);
1818 return ret;
1819 }
1820
bpf_prog_select_func(struct bpf_prog * fp)1821 static void bpf_prog_select_func(struct bpf_prog *fp)
1822 {
1823 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1824 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1825
1826 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1827 #else
1828 fp->bpf_func = __bpf_prog_ret0_warn;
1829 #endif
1830 }
1831
1832 /**
1833 * bpf_prog_select_runtime - select exec runtime for BPF program
1834 * @fp: bpf_prog populated with internal BPF program
1835 * @err: pointer to error variable
1836 *
1837 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1838 * The BPF program will be executed via BPF_PROG_RUN() macro.
1839 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)1840 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1841 {
1842 /* In case of BPF to BPF calls, verifier did all the prep
1843 * work with regards to JITing, etc.
1844 */
1845 bool jit_needed = false;
1846
1847 if (fp->bpf_func)
1848 goto finalize;
1849
1850 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1851 bpf_prog_has_kfunc_call(fp))
1852 jit_needed = true;
1853
1854 bpf_prog_select_func(fp);
1855
1856 /* eBPF JITs can rewrite the program in case constant
1857 * blinding is active. However, in case of error during
1858 * blinding, bpf_int_jit_compile() must always return a
1859 * valid program, which in this case would simply not
1860 * be JITed, but falls back to the interpreter.
1861 */
1862 if (!bpf_prog_is_dev_bound(fp->aux)) {
1863 *err = bpf_prog_alloc_jited_linfo(fp);
1864 if (*err)
1865 return fp;
1866
1867 fp = bpf_int_jit_compile(fp);
1868 bpf_prog_jit_attempt_done(fp);
1869 if (!fp->jited && jit_needed) {
1870 *err = -ENOTSUPP;
1871 return fp;
1872 }
1873 } else {
1874 *err = bpf_prog_offload_compile(fp);
1875 if (*err)
1876 return fp;
1877 }
1878
1879 finalize:
1880 bpf_prog_lock_ro(fp);
1881
1882 /* The tail call compatibility check can only be done at
1883 * this late stage as we need to determine, if we deal
1884 * with JITed or non JITed program concatenations and not
1885 * all eBPF JITs might immediately support all features.
1886 */
1887 *err = bpf_check_tail_call(fp);
1888
1889 return fp;
1890 }
1891 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1892
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)1893 static unsigned int __bpf_prog_ret1(const void *ctx,
1894 const struct bpf_insn *insn)
1895 {
1896 return 1;
1897 }
1898
1899 static struct bpf_prog_dummy {
1900 struct bpf_prog prog;
1901 } dummy_bpf_prog = {
1902 .prog = {
1903 .bpf_func = __bpf_prog_ret1,
1904 },
1905 };
1906
1907 /* to avoid allocating empty bpf_prog_array for cgroups that
1908 * don't have bpf program attached use one global 'empty_prog_array'
1909 * It will not be modified the caller of bpf_prog_array_alloc()
1910 * (since caller requested prog_cnt == 0)
1911 * that pointer should be 'freed' by bpf_prog_array_free()
1912 */
1913 static struct {
1914 struct bpf_prog_array hdr;
1915 struct bpf_prog *null_prog;
1916 } empty_prog_array = {
1917 .null_prog = NULL,
1918 };
1919
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)1920 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1921 {
1922 if (prog_cnt)
1923 return kzalloc(sizeof(struct bpf_prog_array) +
1924 sizeof(struct bpf_prog_array_item) *
1925 (prog_cnt + 1),
1926 flags);
1927
1928 return &empty_prog_array.hdr;
1929 }
1930
bpf_prog_array_free(struct bpf_prog_array * progs)1931 void bpf_prog_array_free(struct bpf_prog_array *progs)
1932 {
1933 if (!progs || progs == &empty_prog_array.hdr)
1934 return;
1935 kfree_rcu(progs, rcu);
1936 }
1937
bpf_prog_array_length(struct bpf_prog_array * array)1938 int bpf_prog_array_length(struct bpf_prog_array *array)
1939 {
1940 struct bpf_prog_array_item *item;
1941 u32 cnt = 0;
1942
1943 for (item = array->items; item->prog; item++)
1944 if (item->prog != &dummy_bpf_prog.prog)
1945 cnt++;
1946 return cnt;
1947 }
1948
bpf_prog_array_is_empty(struct bpf_prog_array * array)1949 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1950 {
1951 struct bpf_prog_array_item *item;
1952
1953 for (item = array->items; item->prog; item++)
1954 if (item->prog != &dummy_bpf_prog.prog)
1955 return false;
1956 return true;
1957 }
1958
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)1959 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1960 u32 *prog_ids,
1961 u32 request_cnt)
1962 {
1963 struct bpf_prog_array_item *item;
1964 int i = 0;
1965
1966 for (item = array->items; item->prog; item++) {
1967 if (item->prog == &dummy_bpf_prog.prog)
1968 continue;
1969 prog_ids[i] = item->prog->aux->id;
1970 if (++i == request_cnt) {
1971 item++;
1972 break;
1973 }
1974 }
1975
1976 return !!(item->prog);
1977 }
1978
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)1979 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1980 __u32 __user *prog_ids, u32 cnt)
1981 {
1982 unsigned long err = 0;
1983 bool nospc;
1984 u32 *ids;
1985
1986 /* users of this function are doing:
1987 * cnt = bpf_prog_array_length();
1988 * if (cnt > 0)
1989 * bpf_prog_array_copy_to_user(..., cnt);
1990 * so below kcalloc doesn't need extra cnt > 0 check.
1991 */
1992 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1993 if (!ids)
1994 return -ENOMEM;
1995 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1996 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1997 kfree(ids);
1998 if (err)
1999 return -EFAULT;
2000 if (nospc)
2001 return -ENOSPC;
2002 return 0;
2003 }
2004
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2005 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2006 struct bpf_prog *old_prog)
2007 {
2008 struct bpf_prog_array_item *item;
2009
2010 for (item = array->items; item->prog; item++)
2011 if (item->prog == old_prog) {
2012 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2013 break;
2014 }
2015 }
2016
2017 /**
2018 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2019 * index into the program array with
2020 * a dummy no-op program.
2021 * @array: a bpf_prog_array
2022 * @index: the index of the program to replace
2023 *
2024 * Skips over dummy programs, by not counting them, when calculating
2025 * the position of the program to replace.
2026 *
2027 * Return:
2028 * * 0 - Success
2029 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2030 * * -ENOENT - Index out of range
2031 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2032 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2033 {
2034 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2035 }
2036
2037 /**
2038 * bpf_prog_array_update_at() - Updates the program at the given index
2039 * into the program array.
2040 * @array: a bpf_prog_array
2041 * @index: the index of the program to update
2042 * @prog: the program to insert into the array
2043 *
2044 * Skips over dummy programs, by not counting them, when calculating
2045 * the position of the program to update.
2046 *
2047 * Return:
2048 * * 0 - Success
2049 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2050 * * -ENOENT - Index out of range
2051 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2052 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2053 struct bpf_prog *prog)
2054 {
2055 struct bpf_prog_array_item *item;
2056
2057 if (unlikely(index < 0))
2058 return -EINVAL;
2059
2060 for (item = array->items; item->prog; item++) {
2061 if (item->prog == &dummy_bpf_prog.prog)
2062 continue;
2063 if (!index) {
2064 WRITE_ONCE(item->prog, prog);
2065 return 0;
2066 }
2067 index--;
2068 }
2069 return -ENOENT;
2070 }
2071
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,struct bpf_prog_array ** new_array)2072 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2073 struct bpf_prog *exclude_prog,
2074 struct bpf_prog *include_prog,
2075 struct bpf_prog_array **new_array)
2076 {
2077 int new_prog_cnt, carry_prog_cnt = 0;
2078 struct bpf_prog_array_item *existing;
2079 struct bpf_prog_array *array;
2080 bool found_exclude = false;
2081 int new_prog_idx = 0;
2082
2083 /* Figure out how many existing progs we need to carry over to
2084 * the new array.
2085 */
2086 if (old_array) {
2087 existing = old_array->items;
2088 for (; existing->prog; existing++) {
2089 if (existing->prog == exclude_prog) {
2090 found_exclude = true;
2091 continue;
2092 }
2093 if (existing->prog != &dummy_bpf_prog.prog)
2094 carry_prog_cnt++;
2095 if (existing->prog == include_prog)
2096 return -EEXIST;
2097 }
2098 }
2099
2100 if (exclude_prog && !found_exclude)
2101 return -ENOENT;
2102
2103 /* How many progs (not NULL) will be in the new array? */
2104 new_prog_cnt = carry_prog_cnt;
2105 if (include_prog)
2106 new_prog_cnt += 1;
2107
2108 /* Do we have any prog (not NULL) in the new array? */
2109 if (!new_prog_cnt) {
2110 *new_array = NULL;
2111 return 0;
2112 }
2113
2114 /* +1 as the end of prog_array is marked with NULL */
2115 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2116 if (!array)
2117 return -ENOMEM;
2118
2119 /* Fill in the new prog array */
2120 if (carry_prog_cnt) {
2121 existing = old_array->items;
2122 for (; existing->prog; existing++)
2123 if (existing->prog != exclude_prog &&
2124 existing->prog != &dummy_bpf_prog.prog) {
2125 array->items[new_prog_idx++].prog =
2126 existing->prog;
2127 }
2128 }
2129 if (include_prog)
2130 array->items[new_prog_idx++].prog = include_prog;
2131 array->items[new_prog_idx].prog = NULL;
2132 *new_array = array;
2133 return 0;
2134 }
2135
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2136 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2137 u32 *prog_ids, u32 request_cnt,
2138 u32 *prog_cnt)
2139 {
2140 u32 cnt = 0;
2141
2142 if (array)
2143 cnt = bpf_prog_array_length(array);
2144
2145 *prog_cnt = cnt;
2146
2147 /* return early if user requested only program count or nothing to copy */
2148 if (!request_cnt || !cnt)
2149 return 0;
2150
2151 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2152 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2153 : 0;
2154 }
2155
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2156 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2157 struct bpf_map **used_maps, u32 len)
2158 {
2159 struct bpf_map *map;
2160 u32 i;
2161
2162 for (i = 0; i < len; i++) {
2163 map = used_maps[i];
2164 if (map->ops->map_poke_untrack)
2165 map->ops->map_poke_untrack(map, aux);
2166 bpf_map_put(map);
2167 }
2168 }
2169
bpf_free_used_maps(struct bpf_prog_aux * aux)2170 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2171 {
2172 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2173 kfree(aux->used_maps);
2174 }
2175
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2176 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2177 struct btf_mod_pair *used_btfs, u32 len)
2178 {
2179 #ifdef CONFIG_BPF_SYSCALL
2180 struct btf_mod_pair *btf_mod;
2181 u32 i;
2182
2183 for (i = 0; i < len; i++) {
2184 btf_mod = &used_btfs[i];
2185 if (btf_mod->module)
2186 module_put(btf_mod->module);
2187 btf_put(btf_mod->btf);
2188 }
2189 #endif
2190 }
2191
bpf_free_used_btfs(struct bpf_prog_aux * aux)2192 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2193 {
2194 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2195 kfree(aux->used_btfs);
2196 }
2197
bpf_prog_free_deferred(struct work_struct * work)2198 static void bpf_prog_free_deferred(struct work_struct *work)
2199 {
2200 struct bpf_prog_aux *aux;
2201 int i;
2202
2203 aux = container_of(work, struct bpf_prog_aux, work);
2204 bpf_free_used_maps(aux);
2205 bpf_free_used_btfs(aux);
2206 if (bpf_prog_is_dev_bound(aux))
2207 bpf_prog_offload_destroy(aux->prog);
2208 #ifdef CONFIG_PERF_EVENTS
2209 if (aux->prog->has_callchain_buf)
2210 put_callchain_buffers();
2211 #endif
2212 if (aux->dst_trampoline)
2213 bpf_trampoline_put(aux->dst_trampoline);
2214 for (i = 0; i < aux->func_cnt; i++)
2215 bpf_jit_free(aux->func[i]);
2216 if (aux->func_cnt) {
2217 kfree(aux->func);
2218 bpf_prog_unlock_free(aux->prog);
2219 } else {
2220 bpf_jit_free(aux->prog);
2221 }
2222 }
2223
2224 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)2225 void bpf_prog_free(struct bpf_prog *fp)
2226 {
2227 struct bpf_prog_aux *aux = fp->aux;
2228
2229 if (aux->dst_prog)
2230 bpf_prog_put(aux->dst_prog);
2231 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2232 schedule_work(&aux->work);
2233 }
2234 EXPORT_SYMBOL_GPL(bpf_prog_free);
2235
2236 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2237 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2238
bpf_user_rnd_init_once(void)2239 void bpf_user_rnd_init_once(void)
2240 {
2241 prandom_init_once(&bpf_user_rnd_state);
2242 }
2243
BPF_CALL_0(bpf_user_rnd_u32)2244 BPF_CALL_0(bpf_user_rnd_u32)
2245 {
2246 /* Should someone ever have the rather unwise idea to use some
2247 * of the registers passed into this function, then note that
2248 * this function is called from native eBPF and classic-to-eBPF
2249 * transformations. Register assignments from both sides are
2250 * different, f.e. classic always sets fn(ctx, A, X) here.
2251 */
2252 struct rnd_state *state;
2253 u32 res;
2254
2255 state = &get_cpu_var(bpf_user_rnd_state);
2256 res = prandom_u32_state(state);
2257 put_cpu_var(bpf_user_rnd_state);
2258
2259 return res;
2260 }
2261
BPF_CALL_0(bpf_get_raw_cpu_id)2262 BPF_CALL_0(bpf_get_raw_cpu_id)
2263 {
2264 return raw_smp_processor_id();
2265 }
2266
2267 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2268 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2269 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2270 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2271 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2272 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2273 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2274 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2275 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2276 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2277
2278 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2279 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2280 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2281 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2282 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2283 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2284
2285 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2286 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2287 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2288 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2289 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2290 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2291 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2292 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2293 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2294
bpf_get_trace_printk_proto(void)2295 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2296 {
2297 return NULL;
2298 }
2299
2300 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2301 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2302 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2303 {
2304 return -ENOTSUPP;
2305 }
2306 EXPORT_SYMBOL_GPL(bpf_event_output);
2307
2308 /* Always built-in helper functions. */
2309 const struct bpf_func_proto bpf_tail_call_proto = {
2310 .func = NULL,
2311 .gpl_only = false,
2312 .ret_type = RET_VOID,
2313 .arg1_type = ARG_PTR_TO_CTX,
2314 .arg2_type = ARG_CONST_MAP_PTR,
2315 .arg3_type = ARG_ANYTHING,
2316 };
2317
2318 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2319 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2320 * eBPF and implicitly also cBPF can get JITed!
2321 */
bpf_int_jit_compile(struct bpf_prog * prog)2322 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2323 {
2324 return prog;
2325 }
2326
2327 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2328 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2329 */
bpf_jit_compile(struct bpf_prog * prog)2330 void __weak bpf_jit_compile(struct bpf_prog *prog)
2331 {
2332 }
2333
bpf_helper_changes_pkt_data(void * func)2334 bool __weak bpf_helper_changes_pkt_data(void *func)
2335 {
2336 return false;
2337 }
2338
2339 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2340 * analysis code and wants explicit zero extension inserted by verifier.
2341 * Otherwise, return FALSE.
2342 *
2343 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2344 * you don't override this. JITs that don't want these extra insns can detect
2345 * them using insn_is_zext.
2346 */
bpf_jit_needs_zext(void)2347 bool __weak bpf_jit_needs_zext(void)
2348 {
2349 return false;
2350 }
2351
bpf_jit_supports_kfunc_call(void)2352 bool __weak bpf_jit_supports_kfunc_call(void)
2353 {
2354 return false;
2355 }
2356
2357 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2358 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2359 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2360 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2361 int len)
2362 {
2363 return -EFAULT;
2364 }
2365
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2366 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2367 void *addr1, void *addr2)
2368 {
2369 return -ENOTSUPP;
2370 }
2371
2372 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2373 EXPORT_SYMBOL(bpf_stats_enabled_key);
2374
2375 /* All definitions of tracepoints related to BPF. */
2376 #define CREATE_TRACE_POINTS
2377 #include <linux/bpf_trace.h>
2378
2379 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2380 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2381