1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * BPF Jit compiler for s390.
4 *
5 * Minimum build requirements:
6 *
7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
10 * - 64BIT
11 *
12 * Copyright IBM Corp. 2012,2015
13 *
14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
15 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
16 */
17
18 #define KMSG_COMPONENT "bpf_jit"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21 #include <linux/netdevice.h>
22 #include <linux/filter.h>
23 #include <linux/init.h>
24 #include <linux/bpf.h>
25 #include <linux/mm.h>
26 #include <linux/kernel.h>
27 #include <asm/cacheflush.h>
28 #include <asm/extable.h>
29 #include <asm/dis.h>
30 #include <asm/facility.h>
31 #include <asm/nospec-branch.h>
32 #include <asm/set_memory.h>
33 #include <asm/text-patching.h>
34 #include "bpf_jit.h"
35
36 struct bpf_jit {
37 u32 seen; /* Flags to remember seen eBPF instructions */
38 u32 seen_reg[16]; /* Array to remember which registers are used */
39 u32 *addrs; /* Array with relative instruction addresses */
40 u8 *prg_buf; /* Start of program */
41 int size; /* Size of program and literal pool */
42 int size_prg; /* Size of program */
43 int prg; /* Current position in program */
44 int lit32_start; /* Start of 32-bit literal pool */
45 int lit32; /* Current position in 32-bit literal pool */
46 int lit64_start; /* Start of 64-bit literal pool */
47 int lit64; /* Current position in 64-bit literal pool */
48 int base_ip; /* Base address for literal pool */
49 int exit_ip; /* Address of exit */
50 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
51 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
52 int tail_call_start; /* Tail call start offset */
53 int excnt; /* Number of exception table entries */
54 int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
55 int prologue_plt; /* Start of prologue hotpatch PLT */
56 };
57
58 #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
59 #define SEEN_LITERAL BIT(1) /* code uses literals */
60 #define SEEN_FUNC BIT(2) /* calls C functions */
61 #define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
62
63 /*
64 * s390 registers
65 */
66 #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
67 #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
68 #define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
69 #define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */
70 #define REG_0 REG_W0 /* Register 0 */
71 #define REG_1 REG_W1 /* Register 1 */
72 #define REG_2 BPF_REG_1 /* Register 2 */
73 #define REG_3 BPF_REG_2 /* Register 3 */
74 #define REG_4 BPF_REG_3 /* Register 4 */
75 #define REG_7 BPF_REG_6 /* Register 7 */
76 #define REG_8 BPF_REG_7 /* Register 8 */
77 #define REG_14 BPF_REG_0 /* Register 14 */
78
79 /*
80 * Mapping of BPF registers to s390 registers
81 */
82 static const int reg2hex[] = {
83 /* Return code */
84 [BPF_REG_0] = 14,
85 /* Function parameters */
86 [BPF_REG_1] = 2,
87 [BPF_REG_2] = 3,
88 [BPF_REG_3] = 4,
89 [BPF_REG_4] = 5,
90 [BPF_REG_5] = 6,
91 /* Call saved registers */
92 [BPF_REG_6] = 7,
93 [BPF_REG_7] = 8,
94 [BPF_REG_8] = 9,
95 [BPF_REG_9] = 10,
96 /* BPF stack pointer */
97 [BPF_REG_FP] = 13,
98 /* Register for blinding */
99 [BPF_REG_AX] = 12,
100 /* Work registers for s390x backend */
101 [REG_W0] = 0,
102 [REG_W1] = 1,
103 [REG_L] = 11,
104 [REG_15] = 15,
105 };
106
reg(u32 dst_reg,u32 src_reg)107 static inline u32 reg(u32 dst_reg, u32 src_reg)
108 {
109 return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
110 }
111
reg_high(u32 reg)112 static inline u32 reg_high(u32 reg)
113 {
114 return reg2hex[reg] << 4;
115 }
116
reg_set_seen(struct bpf_jit * jit,u32 b1)117 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
118 {
119 u32 r1 = reg2hex[b1];
120
121 if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
122 jit->seen_reg[r1] = 1;
123 }
124
125 #define REG_SET_SEEN(b1) \
126 ({ \
127 reg_set_seen(jit, b1); \
128 })
129
130 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
131
132 /*
133 * EMIT macros for code generation
134 */
135
136 #define _EMIT2(op) \
137 ({ \
138 if (jit->prg_buf) \
139 *(u16 *) (jit->prg_buf + jit->prg) = (op); \
140 jit->prg += 2; \
141 })
142
143 #define EMIT2(op, b1, b2) \
144 ({ \
145 _EMIT2((op) | reg(b1, b2)); \
146 REG_SET_SEEN(b1); \
147 REG_SET_SEEN(b2); \
148 })
149
150 #define _EMIT4(op) \
151 ({ \
152 if (jit->prg_buf) \
153 *(u32 *) (jit->prg_buf + jit->prg) = (op); \
154 jit->prg += 4; \
155 })
156
157 #define EMIT4(op, b1, b2) \
158 ({ \
159 _EMIT4((op) | reg(b1, b2)); \
160 REG_SET_SEEN(b1); \
161 REG_SET_SEEN(b2); \
162 })
163
164 #define EMIT4_RRF(op, b1, b2, b3) \
165 ({ \
166 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
167 REG_SET_SEEN(b1); \
168 REG_SET_SEEN(b2); \
169 REG_SET_SEEN(b3); \
170 })
171
172 #define _EMIT4_DISP(op, disp) \
173 ({ \
174 unsigned int __disp = (disp) & 0xfff; \
175 _EMIT4((op) | __disp); \
176 })
177
178 #define EMIT4_DISP(op, b1, b2, disp) \
179 ({ \
180 _EMIT4_DISP((op) | reg_high(b1) << 16 | \
181 reg_high(b2) << 8, (disp)); \
182 REG_SET_SEEN(b1); \
183 REG_SET_SEEN(b2); \
184 })
185
186 #define EMIT4_IMM(op, b1, imm) \
187 ({ \
188 unsigned int __imm = (imm) & 0xffff; \
189 _EMIT4((op) | reg_high(b1) << 16 | __imm); \
190 REG_SET_SEEN(b1); \
191 })
192
193 #define EMIT4_PCREL(op, pcrel) \
194 ({ \
195 long __pcrel = ((pcrel) >> 1) & 0xffff; \
196 _EMIT4((op) | __pcrel); \
197 })
198
199 #define EMIT4_PCREL_RIC(op, mask, target) \
200 ({ \
201 int __rel = ((target) - jit->prg) / 2; \
202 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
203 })
204
205 #define _EMIT6(op1, op2) \
206 ({ \
207 if (jit->prg_buf) { \
208 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
209 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
210 } \
211 jit->prg += 6; \
212 })
213
214 #define _EMIT6_DISP(op1, op2, disp) \
215 ({ \
216 unsigned int __disp = (disp) & 0xfff; \
217 _EMIT6((op1) | __disp, op2); \
218 })
219
220 #define _EMIT6_DISP_LH(op1, op2, disp) \
221 ({ \
222 u32 _disp = (u32) (disp); \
223 unsigned int __disp_h = _disp & 0xff000; \
224 unsigned int __disp_l = _disp & 0x00fff; \
225 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
226 })
227
228 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
229 ({ \
230 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
231 reg_high(b3) << 8, op2, disp); \
232 REG_SET_SEEN(b1); \
233 REG_SET_SEEN(b2); \
234 REG_SET_SEEN(b3); \
235 })
236
237 #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
238 ({ \
239 unsigned int rel = (int)((target) - jit->prg) / 2; \
240 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
241 (op2) | (mask) << 12); \
242 REG_SET_SEEN(b1); \
243 REG_SET_SEEN(b2); \
244 })
245
246 #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
247 ({ \
248 unsigned int rel = (int)((target) - jit->prg) / 2; \
249 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
250 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
251 REG_SET_SEEN(b1); \
252 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
253 })
254
255 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
256 ({ \
257 int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
258 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
259 REG_SET_SEEN(b1); \
260 REG_SET_SEEN(b2); \
261 })
262
263 #define EMIT6_PCREL_RILB(op, b, target) \
264 ({ \
265 unsigned int rel = (int)((target) - jit->prg) / 2; \
266 _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
267 REG_SET_SEEN(b); \
268 })
269
270 #define EMIT6_PCREL_RIL(op, target) \
271 ({ \
272 unsigned int rel = (int)((target) - jit->prg) / 2; \
273 _EMIT6((op) | rel >> 16, rel & 0xffff); \
274 })
275
276 #define EMIT6_PCREL_RILC(op, mask, target) \
277 ({ \
278 EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
279 })
280
281 #define _EMIT6_IMM(op, imm) \
282 ({ \
283 unsigned int __imm = (imm); \
284 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
285 })
286
287 #define EMIT6_IMM(op, b1, imm) \
288 ({ \
289 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
290 REG_SET_SEEN(b1); \
291 })
292
293 #define _EMIT_CONST_U32(val) \
294 ({ \
295 unsigned int ret; \
296 ret = jit->lit32; \
297 if (jit->prg_buf) \
298 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
299 jit->lit32 += 4; \
300 ret; \
301 })
302
303 #define EMIT_CONST_U32(val) \
304 ({ \
305 jit->seen |= SEEN_LITERAL; \
306 _EMIT_CONST_U32(val) - jit->base_ip; \
307 })
308
309 #define _EMIT_CONST_U64(val) \
310 ({ \
311 unsigned int ret; \
312 ret = jit->lit64; \
313 if (jit->prg_buf) \
314 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
315 jit->lit64 += 8; \
316 ret; \
317 })
318
319 #define EMIT_CONST_U64(val) \
320 ({ \
321 jit->seen |= SEEN_LITERAL; \
322 _EMIT_CONST_U64(val) - jit->base_ip; \
323 })
324
325 #define EMIT_ZERO(b1) \
326 ({ \
327 if (!fp->aux->verifier_zext) { \
328 /* llgfr %dst,%dst (zero extend to 64 bit) */ \
329 EMIT4(0xb9160000, b1, b1); \
330 REG_SET_SEEN(b1); \
331 } \
332 })
333
334 /*
335 * Return whether this is the first pass. The first pass is special, since we
336 * don't know any sizes yet, and thus must be conservative.
337 */
is_first_pass(struct bpf_jit * jit)338 static bool is_first_pass(struct bpf_jit *jit)
339 {
340 return jit->size == 0;
341 }
342
343 /*
344 * Return whether this is the code generation pass. The code generation pass is
345 * special, since we should change as little as possible.
346 */
is_codegen_pass(struct bpf_jit * jit)347 static bool is_codegen_pass(struct bpf_jit *jit)
348 {
349 return jit->prg_buf;
350 }
351
352 /*
353 * Return whether "rel" can be encoded as a short PC-relative offset
354 */
is_valid_rel(int rel)355 static bool is_valid_rel(int rel)
356 {
357 return rel >= -65536 && rel <= 65534;
358 }
359
360 /*
361 * Return whether "off" can be reached using a short PC-relative offset
362 */
can_use_rel(struct bpf_jit * jit,int off)363 static bool can_use_rel(struct bpf_jit *jit, int off)
364 {
365 return is_valid_rel(off - jit->prg);
366 }
367
368 /*
369 * Return whether given displacement can be encoded using
370 * Long-Displacement Facility
371 */
is_valid_ldisp(int disp)372 static bool is_valid_ldisp(int disp)
373 {
374 return disp >= -524288 && disp <= 524287;
375 }
376
377 /*
378 * Return whether the next 32-bit literal pool entry can be referenced using
379 * Long-Displacement Facility
380 */
can_use_ldisp_for_lit32(struct bpf_jit * jit)381 static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
382 {
383 return is_valid_ldisp(jit->lit32 - jit->base_ip);
384 }
385
386 /*
387 * Return whether the next 64-bit literal pool entry can be referenced using
388 * Long-Displacement Facility
389 */
can_use_ldisp_for_lit64(struct bpf_jit * jit)390 static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
391 {
392 return is_valid_ldisp(jit->lit64 - jit->base_ip);
393 }
394
395 /*
396 * Fill whole space with illegal instructions
397 */
jit_fill_hole(void * area,unsigned int size)398 static void jit_fill_hole(void *area, unsigned int size)
399 {
400 memset(area, 0, size);
401 }
402
403 /*
404 * Save registers from "rs" (register start) to "re" (register end) on stack
405 */
save_regs(struct bpf_jit * jit,u32 rs,u32 re)406 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
407 {
408 u32 off = STK_OFF_R6 + (rs - 6) * 8;
409
410 if (rs == re)
411 /* stg %rs,off(%r15) */
412 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
413 else
414 /* stmg %rs,%re,off(%r15) */
415 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
416 }
417
418 /*
419 * Restore registers from "rs" (register start) to "re" (register end) on stack
420 */
restore_regs(struct bpf_jit * jit,u32 rs,u32 re,u32 stack_depth)421 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
422 {
423 u32 off = STK_OFF_R6 + (rs - 6) * 8;
424
425 if (jit->seen & SEEN_STACK)
426 off += STK_OFF + stack_depth;
427
428 if (rs == re)
429 /* lg %rs,off(%r15) */
430 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
431 else
432 /* lmg %rs,%re,off(%r15) */
433 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
434 }
435
436 /*
437 * Return first seen register (from start)
438 */
get_start(struct bpf_jit * jit,int start)439 static int get_start(struct bpf_jit *jit, int start)
440 {
441 int i;
442
443 for (i = start; i <= 15; i++) {
444 if (jit->seen_reg[i])
445 return i;
446 }
447 return 0;
448 }
449
450 /*
451 * Return last seen register (from start) (gap >= 2)
452 */
get_end(struct bpf_jit * jit,int start)453 static int get_end(struct bpf_jit *jit, int start)
454 {
455 int i;
456
457 for (i = start; i < 15; i++) {
458 if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
459 return i - 1;
460 }
461 return jit->seen_reg[15] ? 15 : 14;
462 }
463
464 #define REGS_SAVE 1
465 #define REGS_RESTORE 0
466 /*
467 * Save and restore clobbered registers (6-15) on stack.
468 * We save/restore registers in chunks with gap >= 2 registers.
469 */
save_restore_regs(struct bpf_jit * jit,int op,u32 stack_depth)470 static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
471 {
472 const int last = 15, save_restore_size = 6;
473 int re = 6, rs;
474
475 if (is_first_pass(jit)) {
476 /*
477 * We don't know yet which registers are used. Reserve space
478 * conservatively.
479 */
480 jit->prg += (last - re + 1) * save_restore_size;
481 return;
482 }
483
484 do {
485 rs = get_start(jit, re);
486 if (!rs)
487 break;
488 re = get_end(jit, rs + 1);
489 if (op == REGS_SAVE)
490 save_regs(jit, rs, re);
491 else
492 restore_regs(jit, rs, re, stack_depth);
493 re++;
494 } while (re <= last);
495 }
496
bpf_skip(struct bpf_jit * jit,int size)497 static void bpf_skip(struct bpf_jit *jit, int size)
498 {
499 if (size >= 6 && !is_valid_rel(size)) {
500 /* brcl 0xf,size */
501 EMIT6_PCREL_RIL(0xc0f4000000, size);
502 size -= 6;
503 } else if (size >= 4 && is_valid_rel(size)) {
504 /* brc 0xf,size */
505 EMIT4_PCREL(0xa7f40000, size);
506 size -= 4;
507 }
508 while (size >= 2) {
509 /* bcr 0,%0 */
510 _EMIT2(0x0700);
511 size -= 2;
512 }
513 }
514
515 /*
516 * PLT for hotpatchable calls. The calling convention is the same as for the
517 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
518 */
519 struct bpf_plt {
520 char code[16];
521 void *ret;
522 void *target;
523 } __packed;
524 extern const struct bpf_plt bpf_plt;
525 asm(
526 ".pushsection .rodata\n"
527 " .balign 8\n"
528 "bpf_plt:\n"
529 " lgrl %r0,bpf_plt_ret\n"
530 " lgrl %r1,bpf_plt_target\n"
531 " br %r1\n"
532 " .balign 8\n"
533 "bpf_plt_ret: .quad 0\n"
534 "bpf_plt_target: .quad 0\n"
535 " .popsection\n"
536 );
537
bpf_jit_plt(struct bpf_plt * plt,void * ret,void * target)538 static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
539 {
540 memcpy(plt, &bpf_plt, sizeof(*plt));
541 plt->ret = ret;
542 plt->target = target;
543 }
544
545 /*
546 * Emit function prologue
547 *
548 * Save registers and create stack frame if necessary.
549 * See stack frame layout description in "bpf_jit.h"!
550 */
bpf_jit_prologue(struct bpf_jit * jit,struct bpf_prog * fp,u32 stack_depth)551 static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
552 u32 stack_depth)
553 {
554 /* No-op for hotpatching */
555 /* brcl 0,prologue_plt */
556 EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
557 jit->prologue_plt_ret = jit->prg;
558
559 if (!bpf_is_subprog(fp)) {
560 /* Initialize the tail call counter in the main program. */
561 /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
562 _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
563 } else {
564 /*
565 * Skip the tail call counter initialization in subprograms.
566 * Insert nops in order to have tail_call_start at a
567 * predictable offset.
568 */
569 bpf_skip(jit, 6);
570 }
571 /* Tail calls have to skip above initialization */
572 jit->tail_call_start = jit->prg;
573 /* Save registers */
574 save_restore_regs(jit, REGS_SAVE, stack_depth);
575 /* Setup literal pool */
576 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
577 if (!is_first_pass(jit) &&
578 is_valid_ldisp(jit->size - (jit->prg + 2))) {
579 /* basr %l,0 */
580 EMIT2(0x0d00, REG_L, REG_0);
581 jit->base_ip = jit->prg;
582 } else {
583 /* larl %l,lit32_start */
584 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
585 jit->base_ip = jit->lit32_start;
586 }
587 }
588 /* Setup stack and backchain */
589 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
590 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
591 /* lgr %w1,%r15 (backchain) */
592 EMIT4(0xb9040000, REG_W1, REG_15);
593 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
594 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
595 /* aghi %r15,-STK_OFF */
596 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
597 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
598 /* stg %w1,152(%r15) (backchain) */
599 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
600 REG_15, 152);
601 }
602 }
603
604 /*
605 * Emit an expoline for a jump that follows
606 */
emit_expoline(struct bpf_jit * jit)607 static void emit_expoline(struct bpf_jit *jit)
608 {
609 /* exrl %r0,.+10 */
610 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
611 /* j . */
612 EMIT4_PCREL(0xa7f40000, 0);
613 }
614
615 /*
616 * Emit __s390_indirect_jump_r1 thunk if necessary
617 */
emit_r1_thunk(struct bpf_jit * jit)618 static void emit_r1_thunk(struct bpf_jit *jit)
619 {
620 if (nospec_uses_trampoline()) {
621 jit->r1_thunk_ip = jit->prg;
622 emit_expoline(jit);
623 /* br %r1 */
624 _EMIT2(0x07f1);
625 }
626 }
627
628 /*
629 * Call r1 either directly or via __s390_indirect_jump_r1 thunk
630 */
call_r1(struct bpf_jit * jit)631 static void call_r1(struct bpf_jit *jit)
632 {
633 if (nospec_uses_trampoline())
634 /* brasl %r14,__s390_indirect_jump_r1 */
635 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
636 else
637 /* basr %r14,%r1 */
638 EMIT2(0x0d00, REG_14, REG_1);
639 }
640
641 /*
642 * Function epilogue
643 */
bpf_jit_epilogue(struct bpf_jit * jit,u32 stack_depth)644 static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
645 {
646 jit->exit_ip = jit->prg;
647 /* Load exit code: lgr %r2,%b0 */
648 EMIT4(0xb9040000, REG_2, BPF_REG_0);
649 /* Restore registers */
650 save_restore_regs(jit, REGS_RESTORE, stack_depth);
651 if (nospec_uses_trampoline()) {
652 jit->r14_thunk_ip = jit->prg;
653 /* Generate __s390_indirect_jump_r14 thunk */
654 emit_expoline(jit);
655 }
656 /* br %r14 */
657 _EMIT2(0x07fe);
658
659 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
660 emit_r1_thunk(jit);
661
662 jit->prg = ALIGN(jit->prg, 8);
663 jit->prologue_plt = jit->prg;
664 if (jit->prg_buf)
665 bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
666 jit->prg_buf + jit->prologue_plt_ret, NULL);
667 jit->prg += sizeof(struct bpf_plt);
668 }
669
get_probe_mem_regno(const u8 * insn)670 static int get_probe_mem_regno(const u8 *insn)
671 {
672 /*
673 * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have
674 * destination register at the same position.
675 */
676 if (insn[0] != 0xe3) /* common prefix */
677 return -1;
678 if (insn[5] != 0x90 && /* llgc */
679 insn[5] != 0x91 && /* llgh */
680 insn[5] != 0x16 && /* llgf */
681 insn[5] != 0x04 && /* lg */
682 insn[5] != 0x77 && /* lgb */
683 insn[5] != 0x15 && /* lgh */
684 insn[5] != 0x14) /* lgf */
685 return -1;
686 return insn[1] >> 4;
687 }
688
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)689 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
690 {
691 regs->psw.addr = extable_fixup(x);
692 regs->gprs[x->data] = 0;
693 return true;
694 }
695
bpf_jit_probe_mem(struct bpf_jit * jit,struct bpf_prog * fp,int probe_prg,int nop_prg)696 static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
697 int probe_prg, int nop_prg)
698 {
699 struct exception_table_entry *ex;
700 int reg, prg;
701 s64 delta;
702 u8 *insn;
703 int i;
704
705 if (!fp->aux->extable)
706 /* Do nothing during early JIT passes. */
707 return 0;
708 insn = jit->prg_buf + probe_prg;
709 reg = get_probe_mem_regno(insn);
710 if (WARN_ON_ONCE(reg < 0))
711 /* JIT bug - unexpected probe instruction. */
712 return -1;
713 if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
714 /* JIT bug - gap between probe and nop instructions. */
715 return -1;
716 for (i = 0; i < 2; i++) {
717 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
718 /* Verifier bug - not enough entries. */
719 return -1;
720 ex = &fp->aux->extable[jit->excnt];
721 /* Add extable entries for probe and nop instructions. */
722 prg = i == 0 ? probe_prg : nop_prg;
723 delta = jit->prg_buf + prg - (u8 *)&ex->insn;
724 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
725 /* JIT bug - code and extable must be close. */
726 return -1;
727 ex->insn = delta;
728 /*
729 * Always land on the nop. Note that extable infrastructure
730 * ignores fixup field, it is handled by ex_handler_bpf().
731 */
732 delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
733 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
734 /* JIT bug - landing pad and extable must be close. */
735 return -1;
736 ex->fixup = delta;
737 ex->type = EX_TYPE_BPF;
738 ex->data = reg;
739 jit->excnt++;
740 }
741 return 0;
742 }
743
744 /*
745 * Sign-extend the register if necessary
746 */
sign_extend(struct bpf_jit * jit,int r,u8 size,u8 flags)747 static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
748 {
749 if (!(flags & BTF_FMODEL_SIGNED_ARG))
750 return 0;
751
752 switch (size) {
753 case 1:
754 /* lgbr %r,%r */
755 EMIT4(0xb9060000, r, r);
756 return 0;
757 case 2:
758 /* lghr %r,%r */
759 EMIT4(0xb9070000, r, r);
760 return 0;
761 case 4:
762 /* lgfr %r,%r */
763 EMIT4(0xb9140000, r, r);
764 return 0;
765 case 8:
766 return 0;
767 default:
768 return -1;
769 }
770 }
771
772 /*
773 * Compile one eBPF instruction into s390x code
774 *
775 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
776 * stack space for the large switch statement.
777 */
bpf_jit_insn(struct bpf_jit * jit,struct bpf_prog * fp,int i,bool extra_pass,u32 stack_depth)778 static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
779 int i, bool extra_pass, u32 stack_depth)
780 {
781 struct bpf_insn *insn = &fp->insnsi[i];
782 s32 branch_oc_off = insn->off;
783 u32 dst_reg = insn->dst_reg;
784 u32 src_reg = insn->src_reg;
785 int last, insn_count = 1;
786 u32 *addrs = jit->addrs;
787 s32 imm = insn->imm;
788 s16 off = insn->off;
789 int probe_prg = -1;
790 unsigned int mask;
791 int nop_prg;
792 int err;
793
794 if (BPF_CLASS(insn->code) == BPF_LDX &&
795 (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
796 BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
797 probe_prg = jit->prg;
798
799 switch (insn->code) {
800 /*
801 * BPF_MOV
802 */
803 case BPF_ALU | BPF_MOV | BPF_X:
804 switch (insn->off) {
805 case 0: /* DST = (u32) SRC */
806 /* llgfr %dst,%src */
807 EMIT4(0xb9160000, dst_reg, src_reg);
808 if (insn_is_zext(&insn[1]))
809 insn_count = 2;
810 break;
811 case 8: /* DST = (u32)(s8) SRC */
812 /* lbr %dst,%src */
813 EMIT4(0xb9260000, dst_reg, src_reg);
814 /* llgfr %dst,%dst */
815 EMIT4(0xb9160000, dst_reg, dst_reg);
816 break;
817 case 16: /* DST = (u32)(s16) SRC */
818 /* lhr %dst,%src */
819 EMIT4(0xb9270000, dst_reg, src_reg);
820 /* llgfr %dst,%dst */
821 EMIT4(0xb9160000, dst_reg, dst_reg);
822 break;
823 }
824 break;
825 case BPF_ALU64 | BPF_MOV | BPF_X:
826 switch (insn->off) {
827 case 0: /* DST = SRC */
828 /* lgr %dst,%src */
829 EMIT4(0xb9040000, dst_reg, src_reg);
830 break;
831 case 8: /* DST = (s8) SRC */
832 /* lgbr %dst,%src */
833 EMIT4(0xb9060000, dst_reg, src_reg);
834 break;
835 case 16: /* DST = (s16) SRC */
836 /* lghr %dst,%src */
837 EMIT4(0xb9070000, dst_reg, src_reg);
838 break;
839 case 32: /* DST = (s32) SRC */
840 /* lgfr %dst,%src */
841 EMIT4(0xb9140000, dst_reg, src_reg);
842 break;
843 }
844 break;
845 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
846 /* llilf %dst,imm */
847 EMIT6_IMM(0xc00f0000, dst_reg, imm);
848 if (insn_is_zext(&insn[1]))
849 insn_count = 2;
850 break;
851 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
852 /* lgfi %dst,imm */
853 EMIT6_IMM(0xc0010000, dst_reg, imm);
854 break;
855 /*
856 * BPF_LD 64
857 */
858 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
859 {
860 /* 16 byte instruction that uses two 'struct bpf_insn' */
861 u64 imm64;
862
863 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
864 /* lgrl %dst,imm */
865 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
866 insn_count = 2;
867 break;
868 }
869 /*
870 * BPF_ADD
871 */
872 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
873 /* ar %dst,%src */
874 EMIT2(0x1a00, dst_reg, src_reg);
875 EMIT_ZERO(dst_reg);
876 break;
877 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
878 /* agr %dst,%src */
879 EMIT4(0xb9080000, dst_reg, src_reg);
880 break;
881 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
882 if (imm != 0) {
883 /* alfi %dst,imm */
884 EMIT6_IMM(0xc20b0000, dst_reg, imm);
885 }
886 EMIT_ZERO(dst_reg);
887 break;
888 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
889 if (!imm)
890 break;
891 /* agfi %dst,imm */
892 EMIT6_IMM(0xc2080000, dst_reg, imm);
893 break;
894 /*
895 * BPF_SUB
896 */
897 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
898 /* sr %dst,%src */
899 EMIT2(0x1b00, dst_reg, src_reg);
900 EMIT_ZERO(dst_reg);
901 break;
902 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
903 /* sgr %dst,%src */
904 EMIT4(0xb9090000, dst_reg, src_reg);
905 break;
906 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
907 if (imm != 0) {
908 /* alfi %dst,-imm */
909 EMIT6_IMM(0xc20b0000, dst_reg, -imm);
910 }
911 EMIT_ZERO(dst_reg);
912 break;
913 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
914 if (!imm)
915 break;
916 if (imm == -0x80000000) {
917 /* algfi %dst,0x80000000 */
918 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
919 } else {
920 /* agfi %dst,-imm */
921 EMIT6_IMM(0xc2080000, dst_reg, -imm);
922 }
923 break;
924 /*
925 * BPF_MUL
926 */
927 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
928 /* msr %dst,%src */
929 EMIT4(0xb2520000, dst_reg, src_reg);
930 EMIT_ZERO(dst_reg);
931 break;
932 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
933 /* msgr %dst,%src */
934 EMIT4(0xb90c0000, dst_reg, src_reg);
935 break;
936 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
937 if (imm != 1) {
938 /* msfi %r5,imm */
939 EMIT6_IMM(0xc2010000, dst_reg, imm);
940 }
941 EMIT_ZERO(dst_reg);
942 break;
943 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
944 if (imm == 1)
945 break;
946 /* msgfi %dst,imm */
947 EMIT6_IMM(0xc2000000, dst_reg, imm);
948 break;
949 /*
950 * BPF_DIV / BPF_MOD
951 */
952 case BPF_ALU | BPF_DIV | BPF_X:
953 case BPF_ALU | BPF_MOD | BPF_X:
954 {
955 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
956
957 switch (off) {
958 case 0: /* dst = (u32) dst {/,%} (u32) src */
959 /* xr %w0,%w0 */
960 EMIT2(0x1700, REG_W0, REG_W0);
961 /* lr %w1,%dst */
962 EMIT2(0x1800, REG_W1, dst_reg);
963 /* dlr %w0,%src */
964 EMIT4(0xb9970000, REG_W0, src_reg);
965 break;
966 case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */
967 /* lgfr %r1,%dst */
968 EMIT4(0xb9140000, REG_W1, dst_reg);
969 /* dsgfr %r0,%src */
970 EMIT4(0xb91d0000, REG_W0, src_reg);
971 break;
972 }
973 /* llgfr %dst,%rc */
974 EMIT4(0xb9160000, dst_reg, rc_reg);
975 if (insn_is_zext(&insn[1]))
976 insn_count = 2;
977 break;
978 }
979 case BPF_ALU64 | BPF_DIV | BPF_X:
980 case BPF_ALU64 | BPF_MOD | BPF_X:
981 {
982 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
983
984 switch (off) {
985 case 0: /* dst = dst {/,%} src */
986 /* lghi %w0,0 */
987 EMIT4_IMM(0xa7090000, REG_W0, 0);
988 /* lgr %w1,%dst */
989 EMIT4(0xb9040000, REG_W1, dst_reg);
990 /* dlgr %w0,%src */
991 EMIT4(0xb9870000, REG_W0, src_reg);
992 break;
993 case 1: /* dst = (s64) dst {/,%} (s64) src */
994 /* lgr %w1,%dst */
995 EMIT4(0xb9040000, REG_W1, dst_reg);
996 /* dsgr %w0,%src */
997 EMIT4(0xb90d0000, REG_W0, src_reg);
998 break;
999 }
1000 /* lgr %dst,%rc */
1001 EMIT4(0xb9040000, dst_reg, rc_reg);
1002 break;
1003 }
1004 case BPF_ALU | BPF_DIV | BPF_K:
1005 case BPF_ALU | BPF_MOD | BPF_K:
1006 {
1007 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1008
1009 if (imm == 1) {
1010 if (BPF_OP(insn->code) == BPF_MOD)
1011 /* lghi %dst,0 */
1012 EMIT4_IMM(0xa7090000, dst_reg, 0);
1013 else
1014 EMIT_ZERO(dst_reg);
1015 break;
1016 }
1017 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
1018 switch (off) {
1019 case 0: /* dst = (u32) dst {/,%} (u32) imm */
1020 /* xr %w0,%w0 */
1021 EMIT2(0x1700, REG_W0, REG_W0);
1022 /* lr %w1,%dst */
1023 EMIT2(0x1800, REG_W1, dst_reg);
1024 /* dl %w0,<d(imm)>(%l) */
1025 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0,
1026 REG_L, EMIT_CONST_U32(imm));
1027 break;
1028 case 1: /* dst = (s32) dst {/,%} (s32) imm */
1029 /* lgfr %r1,%dst */
1030 EMIT4(0xb9140000, REG_W1, dst_reg);
1031 /* dsgf %r0,<d(imm)>(%l) */
1032 EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0,
1033 REG_L, EMIT_CONST_U32(imm));
1034 break;
1035 }
1036 } else {
1037 switch (off) {
1038 case 0: /* dst = (u32) dst {/,%} (u32) imm */
1039 /* xr %w0,%w0 */
1040 EMIT2(0x1700, REG_W0, REG_W0);
1041 /* lr %w1,%dst */
1042 EMIT2(0x1800, REG_W1, dst_reg);
1043 /* lrl %dst,imm */
1044 EMIT6_PCREL_RILB(0xc40d0000, dst_reg,
1045 _EMIT_CONST_U32(imm));
1046 jit->seen |= SEEN_LITERAL;
1047 /* dlr %w0,%dst */
1048 EMIT4(0xb9970000, REG_W0, dst_reg);
1049 break;
1050 case 1: /* dst = (s32) dst {/,%} (s32) imm */
1051 /* lgfr %w1,%dst */
1052 EMIT4(0xb9140000, REG_W1, dst_reg);
1053 /* lgfrl %dst,imm */
1054 EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
1055 _EMIT_CONST_U32(imm));
1056 jit->seen |= SEEN_LITERAL;
1057 /* dsgr %w0,%dst */
1058 EMIT4(0xb90d0000, REG_W0, dst_reg);
1059 break;
1060 }
1061 }
1062 /* llgfr %dst,%rc */
1063 EMIT4(0xb9160000, dst_reg, rc_reg);
1064 if (insn_is_zext(&insn[1]))
1065 insn_count = 2;
1066 break;
1067 }
1068 case BPF_ALU64 | BPF_DIV | BPF_K:
1069 case BPF_ALU64 | BPF_MOD | BPF_K:
1070 {
1071 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1072
1073 if (imm == 1) {
1074 if (BPF_OP(insn->code) == BPF_MOD)
1075 /* lhgi %dst,0 */
1076 EMIT4_IMM(0xa7090000, dst_reg, 0);
1077 break;
1078 }
1079 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1080 switch (off) {
1081 case 0: /* dst = dst {/,%} imm */
1082 /* lghi %w0,0 */
1083 EMIT4_IMM(0xa7090000, REG_W0, 0);
1084 /* lgr %w1,%dst */
1085 EMIT4(0xb9040000, REG_W1, dst_reg);
1086 /* dlg %w0,<d(imm)>(%l) */
1087 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0,
1088 REG_L, EMIT_CONST_U64(imm));
1089 break;
1090 case 1: /* dst = (s64) dst {/,%} (s64) imm */
1091 /* lgr %w1,%dst */
1092 EMIT4(0xb9040000, REG_W1, dst_reg);
1093 /* dsg %w0,<d(imm)>(%l) */
1094 EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0,
1095 REG_L, EMIT_CONST_U64(imm));
1096 break;
1097 }
1098 } else {
1099 switch (off) {
1100 case 0: /* dst = dst {/,%} imm */
1101 /* lghi %w0,0 */
1102 EMIT4_IMM(0xa7090000, REG_W0, 0);
1103 /* lgr %w1,%dst */
1104 EMIT4(0xb9040000, REG_W1, dst_reg);
1105 /* lgrl %dst,imm */
1106 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1107 _EMIT_CONST_U64(imm));
1108 jit->seen |= SEEN_LITERAL;
1109 /* dlgr %w0,%dst */
1110 EMIT4(0xb9870000, REG_W0, dst_reg);
1111 break;
1112 case 1: /* dst = (s64) dst {/,%} (s64) imm */
1113 /* lgr %w1,%dst */
1114 EMIT4(0xb9040000, REG_W1, dst_reg);
1115 /* lgrl %dst,imm */
1116 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1117 _EMIT_CONST_U64(imm));
1118 jit->seen |= SEEN_LITERAL;
1119 /* dsgr %w0,%dst */
1120 EMIT4(0xb90d0000, REG_W0, dst_reg);
1121 break;
1122 }
1123 }
1124 /* lgr %dst,%rc */
1125 EMIT4(0xb9040000, dst_reg, rc_reg);
1126 break;
1127 }
1128 /*
1129 * BPF_AND
1130 */
1131 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
1132 /* nr %dst,%src */
1133 EMIT2(0x1400, dst_reg, src_reg);
1134 EMIT_ZERO(dst_reg);
1135 break;
1136 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1137 /* ngr %dst,%src */
1138 EMIT4(0xb9800000, dst_reg, src_reg);
1139 break;
1140 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
1141 /* nilf %dst,imm */
1142 EMIT6_IMM(0xc00b0000, dst_reg, imm);
1143 EMIT_ZERO(dst_reg);
1144 break;
1145 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1146 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1147 /* ng %dst,<d(imm)>(%l) */
1148 EMIT6_DISP_LH(0xe3000000, 0x0080,
1149 dst_reg, REG_0, REG_L,
1150 EMIT_CONST_U64(imm));
1151 } else {
1152 /* lgrl %w0,imm */
1153 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1154 _EMIT_CONST_U64(imm));
1155 jit->seen |= SEEN_LITERAL;
1156 /* ngr %dst,%w0 */
1157 EMIT4(0xb9800000, dst_reg, REG_W0);
1158 }
1159 break;
1160 /*
1161 * BPF_OR
1162 */
1163 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1164 /* or %dst,%src */
1165 EMIT2(0x1600, dst_reg, src_reg);
1166 EMIT_ZERO(dst_reg);
1167 break;
1168 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1169 /* ogr %dst,%src */
1170 EMIT4(0xb9810000, dst_reg, src_reg);
1171 break;
1172 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
1173 /* oilf %dst,imm */
1174 EMIT6_IMM(0xc00d0000, dst_reg, imm);
1175 EMIT_ZERO(dst_reg);
1176 break;
1177 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
1178 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1179 /* og %dst,<d(imm)>(%l) */
1180 EMIT6_DISP_LH(0xe3000000, 0x0081,
1181 dst_reg, REG_0, REG_L,
1182 EMIT_CONST_U64(imm));
1183 } else {
1184 /* lgrl %w0,imm */
1185 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1186 _EMIT_CONST_U64(imm));
1187 jit->seen |= SEEN_LITERAL;
1188 /* ogr %dst,%w0 */
1189 EMIT4(0xb9810000, dst_reg, REG_W0);
1190 }
1191 break;
1192 /*
1193 * BPF_XOR
1194 */
1195 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
1196 /* xr %dst,%src */
1197 EMIT2(0x1700, dst_reg, src_reg);
1198 EMIT_ZERO(dst_reg);
1199 break;
1200 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
1201 /* xgr %dst,%src */
1202 EMIT4(0xb9820000, dst_reg, src_reg);
1203 break;
1204 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1205 if (imm != 0) {
1206 /* xilf %dst,imm */
1207 EMIT6_IMM(0xc0070000, dst_reg, imm);
1208 }
1209 EMIT_ZERO(dst_reg);
1210 break;
1211 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1212 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1213 /* xg %dst,<d(imm)>(%l) */
1214 EMIT6_DISP_LH(0xe3000000, 0x0082,
1215 dst_reg, REG_0, REG_L,
1216 EMIT_CONST_U64(imm));
1217 } else {
1218 /* lgrl %w0,imm */
1219 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1220 _EMIT_CONST_U64(imm));
1221 jit->seen |= SEEN_LITERAL;
1222 /* xgr %dst,%w0 */
1223 EMIT4(0xb9820000, dst_reg, REG_W0);
1224 }
1225 break;
1226 /*
1227 * BPF_LSH
1228 */
1229 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1230 /* sll %dst,0(%src) */
1231 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1232 EMIT_ZERO(dst_reg);
1233 break;
1234 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1235 /* sllg %dst,%dst,0(%src) */
1236 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1237 break;
1238 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1239 if (imm != 0) {
1240 /* sll %dst,imm(%r0) */
1241 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1242 }
1243 EMIT_ZERO(dst_reg);
1244 break;
1245 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1246 if (imm == 0)
1247 break;
1248 /* sllg %dst,%dst,imm(%r0) */
1249 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1250 break;
1251 /*
1252 * BPF_RSH
1253 */
1254 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1255 /* srl %dst,0(%src) */
1256 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1257 EMIT_ZERO(dst_reg);
1258 break;
1259 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1260 /* srlg %dst,%dst,0(%src) */
1261 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1262 break;
1263 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1264 if (imm != 0) {
1265 /* srl %dst,imm(%r0) */
1266 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1267 }
1268 EMIT_ZERO(dst_reg);
1269 break;
1270 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1271 if (imm == 0)
1272 break;
1273 /* srlg %dst,%dst,imm(%r0) */
1274 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1275 break;
1276 /*
1277 * BPF_ARSH
1278 */
1279 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1280 /* sra %dst,%dst,0(%src) */
1281 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1282 EMIT_ZERO(dst_reg);
1283 break;
1284 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1285 /* srag %dst,%dst,0(%src) */
1286 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1287 break;
1288 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1289 if (imm != 0) {
1290 /* sra %dst,imm(%r0) */
1291 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1292 }
1293 EMIT_ZERO(dst_reg);
1294 break;
1295 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1296 if (imm == 0)
1297 break;
1298 /* srag %dst,%dst,imm(%r0) */
1299 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1300 break;
1301 /*
1302 * BPF_NEG
1303 */
1304 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1305 /* lcr %dst,%dst */
1306 EMIT2(0x1300, dst_reg, dst_reg);
1307 EMIT_ZERO(dst_reg);
1308 break;
1309 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1310 /* lcgr %dst,%dst */
1311 EMIT4(0xb9030000, dst_reg, dst_reg);
1312 break;
1313 /*
1314 * BPF_FROM_BE/LE
1315 */
1316 case BPF_ALU | BPF_END | BPF_FROM_BE:
1317 /* s390 is big endian, therefore only clear high order bytes */
1318 switch (imm) {
1319 case 16: /* dst = (u16) cpu_to_be16(dst) */
1320 /* llghr %dst,%dst */
1321 EMIT4(0xb9850000, dst_reg, dst_reg);
1322 if (insn_is_zext(&insn[1]))
1323 insn_count = 2;
1324 break;
1325 case 32: /* dst = (u32) cpu_to_be32(dst) */
1326 if (!fp->aux->verifier_zext)
1327 /* llgfr %dst,%dst */
1328 EMIT4(0xb9160000, dst_reg, dst_reg);
1329 break;
1330 case 64: /* dst = (u64) cpu_to_be64(dst) */
1331 break;
1332 }
1333 break;
1334 case BPF_ALU | BPF_END | BPF_FROM_LE:
1335 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1336 switch (imm) {
1337 case 16: /* dst = (u16) cpu_to_le16(dst) */
1338 /* lrvr %dst,%dst */
1339 EMIT4(0xb91f0000, dst_reg, dst_reg);
1340 /* srl %dst,16(%r0) */
1341 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1342 /* llghr %dst,%dst */
1343 EMIT4(0xb9850000, dst_reg, dst_reg);
1344 if (insn_is_zext(&insn[1]))
1345 insn_count = 2;
1346 break;
1347 case 32: /* dst = (u32) cpu_to_le32(dst) */
1348 /* lrvr %dst,%dst */
1349 EMIT4(0xb91f0000, dst_reg, dst_reg);
1350 if (!fp->aux->verifier_zext)
1351 /* llgfr %dst,%dst */
1352 EMIT4(0xb9160000, dst_reg, dst_reg);
1353 break;
1354 case 64: /* dst = (u64) cpu_to_le64(dst) */
1355 /* lrvgr %dst,%dst */
1356 EMIT4(0xb90f0000, dst_reg, dst_reg);
1357 break;
1358 }
1359 break;
1360 /*
1361 * BPF_NOSPEC (speculation barrier)
1362 */
1363 case BPF_ST | BPF_NOSPEC:
1364 break;
1365 /*
1366 * BPF_ST(X)
1367 */
1368 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1369 /* stcy %src,off(%dst) */
1370 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1371 jit->seen |= SEEN_MEM;
1372 break;
1373 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1374 /* sthy %src,off(%dst) */
1375 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1376 jit->seen |= SEEN_MEM;
1377 break;
1378 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1379 /* sty %src,off(%dst) */
1380 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1381 jit->seen |= SEEN_MEM;
1382 break;
1383 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1384 /* stg %src,off(%dst) */
1385 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1386 jit->seen |= SEEN_MEM;
1387 break;
1388 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1389 /* lhi %w0,imm */
1390 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1391 /* stcy %w0,off(dst) */
1392 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1393 jit->seen |= SEEN_MEM;
1394 break;
1395 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1396 /* lhi %w0,imm */
1397 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1398 /* sthy %w0,off(dst) */
1399 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1400 jit->seen |= SEEN_MEM;
1401 break;
1402 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1403 /* llilf %w0,imm */
1404 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1405 /* sty %w0,off(%dst) */
1406 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1407 jit->seen |= SEEN_MEM;
1408 break;
1409 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1410 /* lgfi %w0,imm */
1411 EMIT6_IMM(0xc0010000, REG_W0, imm);
1412 /* stg %w0,off(%dst) */
1413 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1414 jit->seen |= SEEN_MEM;
1415 break;
1416 /*
1417 * BPF_ATOMIC
1418 */
1419 case BPF_STX | BPF_ATOMIC | BPF_DW:
1420 case BPF_STX | BPF_ATOMIC | BPF_W:
1421 {
1422 bool is32 = BPF_SIZE(insn->code) == BPF_W;
1423
1424 switch (insn->imm) {
1425 /* {op32|op64} {%w0|%src},%src,off(%dst) */
1426 #define EMIT_ATOMIC(op32, op64) do { \
1427 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
1428 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
1429 src_reg, dst_reg, off); \
1430 if (insn->imm & BPF_FETCH) { \
1431 /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
1432 _EMIT2(0x07e0); \
1433 if (is32) \
1434 EMIT_ZERO(src_reg); \
1435 } \
1436 } while (0)
1437 case BPF_ADD:
1438 case BPF_ADD | BPF_FETCH:
1439 /* {laal|laalg} */
1440 EMIT_ATOMIC(0x00fa, 0x00ea);
1441 break;
1442 case BPF_AND:
1443 case BPF_AND | BPF_FETCH:
1444 /* {lan|lang} */
1445 EMIT_ATOMIC(0x00f4, 0x00e4);
1446 break;
1447 case BPF_OR:
1448 case BPF_OR | BPF_FETCH:
1449 /* {lao|laog} */
1450 EMIT_ATOMIC(0x00f6, 0x00e6);
1451 break;
1452 case BPF_XOR:
1453 case BPF_XOR | BPF_FETCH:
1454 /* {lax|laxg} */
1455 EMIT_ATOMIC(0x00f7, 0x00e7);
1456 break;
1457 #undef EMIT_ATOMIC
1458 case BPF_XCHG:
1459 /* {ly|lg} %w0,off(%dst) */
1460 EMIT6_DISP_LH(0xe3000000,
1461 is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1462 dst_reg, off);
1463 /* 0: {csy|csg} %w0,%src,off(%dst) */
1464 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1465 REG_W0, src_reg, dst_reg, off);
1466 /* brc 4,0b */
1467 EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1468 /* {llgfr|lgr} %src,%w0 */
1469 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1470 if (is32 && insn_is_zext(&insn[1]))
1471 insn_count = 2;
1472 break;
1473 case BPF_CMPXCHG:
1474 /* 0: {csy|csg} %b0,%src,off(%dst) */
1475 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1476 BPF_REG_0, src_reg, dst_reg, off);
1477 break;
1478 default:
1479 pr_err("Unknown atomic operation %02x\n", insn->imm);
1480 return -1;
1481 }
1482
1483 jit->seen |= SEEN_MEM;
1484 break;
1485 }
1486 /*
1487 * BPF_LDX
1488 */
1489 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1490 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1491 /* llgc %dst,0(off,%src) */
1492 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1493 jit->seen |= SEEN_MEM;
1494 if (insn_is_zext(&insn[1]))
1495 insn_count = 2;
1496 break;
1497 case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
1498 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1499 /* lgb %dst,0(off,%src) */
1500 EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
1501 jit->seen |= SEEN_MEM;
1502 break;
1503 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1504 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1505 /* llgh %dst,0(off,%src) */
1506 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1507 jit->seen |= SEEN_MEM;
1508 if (insn_is_zext(&insn[1]))
1509 insn_count = 2;
1510 break;
1511 case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
1512 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1513 /* lgh %dst,0(off,%src) */
1514 EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
1515 jit->seen |= SEEN_MEM;
1516 break;
1517 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1518 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1519 /* llgf %dst,off(%src) */
1520 jit->seen |= SEEN_MEM;
1521 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1522 if (insn_is_zext(&insn[1]))
1523 insn_count = 2;
1524 break;
1525 case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
1526 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1527 /* lgf %dst,off(%src) */
1528 jit->seen |= SEEN_MEM;
1529 EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
1530 break;
1531 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1532 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1533 /* lg %dst,0(off,%src) */
1534 jit->seen |= SEEN_MEM;
1535 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1536 break;
1537 /*
1538 * BPF_JMP / CALL
1539 */
1540 case BPF_JMP | BPF_CALL:
1541 {
1542 const struct btf_func_model *m;
1543 bool func_addr_fixed;
1544 int j, ret;
1545 u64 func;
1546
1547 ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1548 &func, &func_addr_fixed);
1549 if (ret < 0)
1550 return -1;
1551
1552 REG_SET_SEEN(BPF_REG_5);
1553 jit->seen |= SEEN_FUNC;
1554 /*
1555 * Copy the tail call counter to where the callee expects it.
1556 *
1557 * Note 1: The callee can increment the tail call counter, but
1558 * we do not load it back, since the x86 JIT does not do this
1559 * either.
1560 *
1561 * Note 2: We assume that the verifier does not let us call the
1562 * main program, which clears the tail call counter on entry.
1563 */
1564 /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
1565 _EMIT6(0xd203f000 | STK_OFF_TCCNT,
1566 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
1567
1568 /* Sign-extend the kfunc arguments. */
1569 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
1570 m = bpf_jit_find_kfunc_model(fp, insn);
1571 if (!m)
1572 return -1;
1573
1574 for (j = 0; j < m->nr_args; j++) {
1575 if (sign_extend(jit, BPF_REG_1 + j,
1576 m->arg_size[j],
1577 m->arg_flags[j]))
1578 return -1;
1579 }
1580 }
1581
1582 /* lgrl %w1,func */
1583 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1584 /* %r1() */
1585 call_r1(jit);
1586 /* lgr %b0,%r2: load return value into %b0 */
1587 EMIT4(0xb9040000, BPF_REG_0, REG_2);
1588 break;
1589 }
1590 case BPF_JMP | BPF_TAIL_CALL: {
1591 int patch_1_clrj, patch_2_clij, patch_3_brc;
1592
1593 /*
1594 * Implicit input:
1595 * B1: pointer to ctx
1596 * B2: pointer to bpf_array
1597 * B3: index in bpf_array
1598 *
1599 * if (index >= array->map.max_entries)
1600 * goto out;
1601 */
1602
1603 /* llgf %w1,map.max_entries(%b2) */
1604 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1605 offsetof(struct bpf_array, map.max_entries));
1606 /* if ((u32)%b3 >= (u32)%w1) goto out; */
1607 /* clrj %b3,%w1,0xa,out */
1608 patch_1_clrj = jit->prg;
1609 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1610 jit->prg);
1611
1612 /*
1613 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1614 * goto out;
1615 */
1616
1617 if (jit->seen & SEEN_STACK)
1618 off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1619 else
1620 off = STK_OFF_TCCNT;
1621 /* lhi %w0,1 */
1622 EMIT4_IMM(0xa7080000, REG_W0, 1);
1623 /* laal %w1,%w0,off(%r15) */
1624 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1625 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1626 patch_2_clij = jit->prg;
1627 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1628 2, jit->prg);
1629
1630 /*
1631 * prog = array->ptrs[index];
1632 * if (prog == NULL)
1633 * goto out;
1634 */
1635
1636 /* llgfr %r1,%b3: %r1 = (u32) index */
1637 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1638 /* sllg %r1,%r1,3: %r1 *= 8 */
1639 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1640 /* ltg %r1,prog(%b2,%r1) */
1641 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1642 REG_1, offsetof(struct bpf_array, ptrs));
1643 /* brc 0x8,out */
1644 patch_3_brc = jit->prg;
1645 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1646
1647 /*
1648 * Restore registers before calling function
1649 */
1650 save_restore_regs(jit, REGS_RESTORE, stack_depth);
1651
1652 /*
1653 * goto *(prog->bpf_func + tail_call_start);
1654 */
1655
1656 /* lg %r1,bpf_func(%r1) */
1657 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1658 offsetof(struct bpf_prog, bpf_func));
1659 if (nospec_uses_trampoline()) {
1660 jit->seen |= SEEN_FUNC;
1661 /* aghi %r1,tail_call_start */
1662 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
1663 /* brcl 0xf,__s390_indirect_jump_r1 */
1664 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
1665 } else {
1666 /* bc 0xf,tail_call_start(%r1) */
1667 _EMIT4(0x47f01000 + jit->tail_call_start);
1668 }
1669 /* out: */
1670 if (jit->prg_buf) {
1671 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1672 (jit->prg - patch_1_clrj) >> 1;
1673 *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1674 (jit->prg - patch_2_clij) >> 1;
1675 *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1676 (jit->prg - patch_3_brc) >> 1;
1677 }
1678 break;
1679 }
1680 case BPF_JMP | BPF_EXIT: /* return b0 */
1681 last = (i == fp->len - 1) ? 1 : 0;
1682 if (last)
1683 break;
1684 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1685 /* brc 0xf, <exit> */
1686 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1687 else
1688 /* brcl 0xf, <exit> */
1689 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1690 break;
1691 /*
1692 * Branch relative (number of skipped instructions) to offset on
1693 * condition.
1694 *
1695 * Condition code to mask mapping:
1696 *
1697 * CC | Description | Mask
1698 * ------------------------------
1699 * 0 | Operands equal | 8
1700 * 1 | First operand low | 4
1701 * 2 | First operand high | 2
1702 * 3 | Unused | 1
1703 *
1704 * For s390x relative branches: ip = ip + off_bytes
1705 * For BPF relative branches: insn = insn + off_insns + 1
1706 *
1707 * For example for s390x with offset 0 we jump to the branch
1708 * instruction itself (loop) and for BPF with offset 0 we
1709 * branch to the instruction behind the branch.
1710 */
1711 case BPF_JMP32 | BPF_JA: /* if (true) */
1712 branch_oc_off = imm;
1713 fallthrough;
1714 case BPF_JMP | BPF_JA: /* if (true) */
1715 mask = 0xf000; /* j */
1716 goto branch_oc;
1717 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1718 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1719 mask = 0x2000; /* jh */
1720 goto branch_ks;
1721 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1722 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1723 mask = 0x4000; /* jl */
1724 goto branch_ks;
1725 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1726 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1727 mask = 0xa000; /* jhe */
1728 goto branch_ks;
1729 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1730 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1731 mask = 0xc000; /* jle */
1732 goto branch_ks;
1733 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1734 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1735 mask = 0x2000; /* jh */
1736 goto branch_ku;
1737 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1738 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1739 mask = 0x4000; /* jl */
1740 goto branch_ku;
1741 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1742 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1743 mask = 0xa000; /* jhe */
1744 goto branch_ku;
1745 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1746 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1747 mask = 0xc000; /* jle */
1748 goto branch_ku;
1749 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1750 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1751 mask = 0x7000; /* jne */
1752 goto branch_ku;
1753 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1754 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1755 mask = 0x8000; /* je */
1756 goto branch_ku;
1757 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1758 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1759 mask = 0x7000; /* jnz */
1760 if (BPF_CLASS(insn->code) == BPF_JMP32) {
1761 /* llilf %w1,imm (load zero extend imm) */
1762 EMIT6_IMM(0xc00f0000, REG_W1, imm);
1763 /* nr %w1,%dst */
1764 EMIT2(0x1400, REG_W1, dst_reg);
1765 } else {
1766 /* lgfi %w1,imm (load sign extend imm) */
1767 EMIT6_IMM(0xc0010000, REG_W1, imm);
1768 /* ngr %w1,%dst */
1769 EMIT4(0xb9800000, REG_W1, dst_reg);
1770 }
1771 goto branch_oc;
1772
1773 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1774 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1775 mask = 0x2000; /* jh */
1776 goto branch_xs;
1777 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1778 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1779 mask = 0x4000; /* jl */
1780 goto branch_xs;
1781 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1782 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1783 mask = 0xa000; /* jhe */
1784 goto branch_xs;
1785 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1786 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1787 mask = 0xc000; /* jle */
1788 goto branch_xs;
1789 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1790 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1791 mask = 0x2000; /* jh */
1792 goto branch_xu;
1793 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1794 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1795 mask = 0x4000; /* jl */
1796 goto branch_xu;
1797 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1798 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1799 mask = 0xa000; /* jhe */
1800 goto branch_xu;
1801 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1802 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1803 mask = 0xc000; /* jle */
1804 goto branch_xu;
1805 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1806 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1807 mask = 0x7000; /* jne */
1808 goto branch_xu;
1809 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1810 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1811 mask = 0x8000; /* je */
1812 goto branch_xu;
1813 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1814 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1815 {
1816 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1817
1818 mask = 0x7000; /* jnz */
1819 /* nrk or ngrk %w1,%dst,%src */
1820 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1821 REG_W1, dst_reg, src_reg);
1822 goto branch_oc;
1823 branch_ks:
1824 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1825 /* cfi or cgfi %dst,imm */
1826 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1827 dst_reg, imm);
1828 if (!is_first_pass(jit) &&
1829 can_use_rel(jit, addrs[i + off + 1])) {
1830 /* brc mask,off */
1831 EMIT4_PCREL_RIC(0xa7040000,
1832 mask >> 12, addrs[i + off + 1]);
1833 } else {
1834 /* brcl mask,off */
1835 EMIT6_PCREL_RILC(0xc0040000,
1836 mask >> 12, addrs[i + off + 1]);
1837 }
1838 break;
1839 branch_ku:
1840 /* lgfi %w1,imm (load sign extend imm) */
1841 src_reg = REG_1;
1842 EMIT6_IMM(0xc0010000, src_reg, imm);
1843 goto branch_xu;
1844 branch_xs:
1845 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1846 if (!is_first_pass(jit) &&
1847 can_use_rel(jit, addrs[i + off + 1])) {
1848 /* crj or cgrj %dst,%src,mask,off */
1849 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1850 dst_reg, src_reg, i, off, mask);
1851 } else {
1852 /* cr or cgr %dst,%src */
1853 if (is_jmp32)
1854 EMIT2(0x1900, dst_reg, src_reg);
1855 else
1856 EMIT4(0xb9200000, dst_reg, src_reg);
1857 /* brcl mask,off */
1858 EMIT6_PCREL_RILC(0xc0040000,
1859 mask >> 12, addrs[i + off + 1]);
1860 }
1861 break;
1862 branch_xu:
1863 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1864 if (!is_first_pass(jit) &&
1865 can_use_rel(jit, addrs[i + off + 1])) {
1866 /* clrj or clgrj %dst,%src,mask,off */
1867 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1868 dst_reg, src_reg, i, off, mask);
1869 } else {
1870 /* clr or clgr %dst,%src */
1871 if (is_jmp32)
1872 EMIT2(0x1500, dst_reg, src_reg);
1873 else
1874 EMIT4(0xb9210000, dst_reg, src_reg);
1875 /* brcl mask,off */
1876 EMIT6_PCREL_RILC(0xc0040000,
1877 mask >> 12, addrs[i + off + 1]);
1878 }
1879 break;
1880 branch_oc:
1881 if (!is_first_pass(jit) &&
1882 can_use_rel(jit, addrs[i + branch_oc_off + 1])) {
1883 /* brc mask,off */
1884 EMIT4_PCREL_RIC(0xa7040000,
1885 mask >> 12,
1886 addrs[i + branch_oc_off + 1]);
1887 } else {
1888 /* brcl mask,off */
1889 EMIT6_PCREL_RILC(0xc0040000,
1890 mask >> 12,
1891 addrs[i + branch_oc_off + 1]);
1892 }
1893 break;
1894 }
1895 default: /* too complex, give up */
1896 pr_err("Unknown opcode %02x\n", insn->code);
1897 return -1;
1898 }
1899
1900 if (probe_prg != -1) {
1901 /*
1902 * Handlers of certain exceptions leave psw.addr pointing to
1903 * the instruction directly after the failing one. Therefore,
1904 * create two exception table entries and also add a nop in
1905 * case two probing instructions come directly after each
1906 * other.
1907 */
1908 nop_prg = jit->prg;
1909 /* bcr 0,%0 */
1910 _EMIT2(0x0700);
1911 err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1912 if (err < 0)
1913 return err;
1914 }
1915
1916 return insn_count;
1917 }
1918
1919 /*
1920 * Return whether new i-th instruction address does not violate any invariant
1921 */
bpf_is_new_addr_sane(struct bpf_jit * jit,int i)1922 static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1923 {
1924 /* On the first pass anything goes */
1925 if (is_first_pass(jit))
1926 return true;
1927
1928 /* The codegen pass must not change anything */
1929 if (is_codegen_pass(jit))
1930 return jit->addrs[i] == jit->prg;
1931
1932 /* Passes in between must not increase code size */
1933 return jit->addrs[i] >= jit->prg;
1934 }
1935
1936 /*
1937 * Update the address of i-th instruction
1938 */
bpf_set_addr(struct bpf_jit * jit,int i)1939 static int bpf_set_addr(struct bpf_jit *jit, int i)
1940 {
1941 int delta;
1942
1943 if (is_codegen_pass(jit)) {
1944 delta = jit->prg - jit->addrs[i];
1945 if (delta < 0)
1946 bpf_skip(jit, -delta);
1947 }
1948 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1949 return -1;
1950 jit->addrs[i] = jit->prg;
1951 return 0;
1952 }
1953
1954 /*
1955 * Compile eBPF program into s390x code
1956 */
bpf_jit_prog(struct bpf_jit * jit,struct bpf_prog * fp,bool extra_pass,u32 stack_depth)1957 static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1958 bool extra_pass, u32 stack_depth)
1959 {
1960 int i, insn_count, lit32_size, lit64_size;
1961
1962 jit->lit32 = jit->lit32_start;
1963 jit->lit64 = jit->lit64_start;
1964 jit->prg = 0;
1965 jit->excnt = 0;
1966
1967 bpf_jit_prologue(jit, fp, stack_depth);
1968 if (bpf_set_addr(jit, 0) < 0)
1969 return -1;
1970 for (i = 0; i < fp->len; i += insn_count) {
1971 insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1972 if (insn_count < 0)
1973 return -1;
1974 /* Next instruction address */
1975 if (bpf_set_addr(jit, i + insn_count) < 0)
1976 return -1;
1977 }
1978 bpf_jit_epilogue(jit, stack_depth);
1979
1980 lit32_size = jit->lit32 - jit->lit32_start;
1981 lit64_size = jit->lit64 - jit->lit64_start;
1982 jit->lit32_start = jit->prg;
1983 if (lit32_size)
1984 jit->lit32_start = ALIGN(jit->lit32_start, 4);
1985 jit->lit64_start = jit->lit32_start + lit32_size;
1986 if (lit64_size)
1987 jit->lit64_start = ALIGN(jit->lit64_start, 8);
1988 jit->size = jit->lit64_start + lit64_size;
1989 jit->size_prg = jit->prg;
1990
1991 if (WARN_ON_ONCE(fp->aux->extable &&
1992 jit->excnt != fp->aux->num_exentries))
1993 /* Verifier bug - too many entries. */
1994 return -1;
1995
1996 return 0;
1997 }
1998
bpf_jit_needs_zext(void)1999 bool bpf_jit_needs_zext(void)
2000 {
2001 return true;
2002 }
2003
2004 struct s390_jit_data {
2005 struct bpf_binary_header *header;
2006 struct bpf_jit ctx;
2007 int pass;
2008 };
2009
bpf_jit_alloc(struct bpf_jit * jit,struct bpf_prog * fp)2010 static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
2011 struct bpf_prog *fp)
2012 {
2013 struct bpf_binary_header *header;
2014 u32 extable_size;
2015 u32 code_size;
2016
2017 /* We need two entries per insn. */
2018 fp->aux->num_exentries *= 2;
2019
2020 code_size = roundup(jit->size,
2021 __alignof__(struct exception_table_entry));
2022 extable_size = fp->aux->num_exentries *
2023 sizeof(struct exception_table_entry);
2024 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
2025 8, jit_fill_hole);
2026 if (!header)
2027 return NULL;
2028 fp->aux->extable = (struct exception_table_entry *)
2029 (jit->prg_buf + code_size);
2030 return header;
2031 }
2032
2033 /*
2034 * Compile eBPF program "fp"
2035 */
bpf_int_jit_compile(struct bpf_prog * fp)2036 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2037 {
2038 u32 stack_depth = round_up(fp->aux->stack_depth, 8);
2039 struct bpf_prog *tmp, *orig_fp = fp;
2040 struct bpf_binary_header *header;
2041 struct s390_jit_data *jit_data;
2042 bool tmp_blinded = false;
2043 bool extra_pass = false;
2044 struct bpf_jit jit;
2045 int pass;
2046
2047 if (!fp->jit_requested)
2048 return orig_fp;
2049
2050 tmp = bpf_jit_blind_constants(fp);
2051 /*
2052 * If blinding was requested and we failed during blinding,
2053 * we must fall back to the interpreter.
2054 */
2055 if (IS_ERR(tmp))
2056 return orig_fp;
2057 if (tmp != fp) {
2058 tmp_blinded = true;
2059 fp = tmp;
2060 }
2061
2062 jit_data = fp->aux->jit_data;
2063 if (!jit_data) {
2064 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2065 if (!jit_data) {
2066 fp = orig_fp;
2067 goto out;
2068 }
2069 fp->aux->jit_data = jit_data;
2070 }
2071 if (jit_data->ctx.addrs) {
2072 jit = jit_data->ctx;
2073 header = jit_data->header;
2074 extra_pass = true;
2075 pass = jit_data->pass + 1;
2076 goto skip_init_ctx;
2077 }
2078
2079 memset(&jit, 0, sizeof(jit));
2080 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
2081 if (jit.addrs == NULL) {
2082 fp = orig_fp;
2083 goto free_addrs;
2084 }
2085 /*
2086 * Three initial passes:
2087 * - 1/2: Determine clobbered registers
2088 * - 3: Calculate program size and addrs array
2089 */
2090 for (pass = 1; pass <= 3; pass++) {
2091 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2092 fp = orig_fp;
2093 goto free_addrs;
2094 }
2095 }
2096 /*
2097 * Final pass: Allocate and generate program
2098 */
2099 header = bpf_jit_alloc(&jit, fp);
2100 if (!header) {
2101 fp = orig_fp;
2102 goto free_addrs;
2103 }
2104 skip_init_ctx:
2105 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2106 bpf_jit_binary_free(header);
2107 fp = orig_fp;
2108 goto free_addrs;
2109 }
2110 if (bpf_jit_enable > 1) {
2111 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
2112 print_fn_code(jit.prg_buf, jit.size_prg);
2113 }
2114 if (!fp->is_func || extra_pass) {
2115 if (bpf_jit_binary_lock_ro(header)) {
2116 bpf_jit_binary_free(header);
2117 fp = orig_fp;
2118 goto free_addrs;
2119 }
2120 } else {
2121 jit_data->header = header;
2122 jit_data->ctx = jit;
2123 jit_data->pass = pass;
2124 }
2125 fp->bpf_func = (void *) jit.prg_buf;
2126 fp->jited = 1;
2127 fp->jited_len = jit.size;
2128
2129 if (!fp->is_func || extra_pass) {
2130 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
2131 free_addrs:
2132 kvfree(jit.addrs);
2133 kfree(jit_data);
2134 fp->aux->jit_data = NULL;
2135 }
2136 out:
2137 if (tmp_blinded)
2138 bpf_jit_prog_release_other(fp, fp == orig_fp ?
2139 tmp : orig_fp);
2140 return fp;
2141 }
2142
bpf_jit_supports_kfunc_call(void)2143 bool bpf_jit_supports_kfunc_call(void)
2144 {
2145 return true;
2146 }
2147
bpf_jit_supports_far_kfunc_call(void)2148 bool bpf_jit_supports_far_kfunc_call(void)
2149 {
2150 return true;
2151 }
2152
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)2153 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2154 void *old_addr, void *new_addr)
2155 {
2156 struct bpf_plt expected_plt, current_plt, new_plt, *plt;
2157 struct {
2158 u16 opc;
2159 s32 disp;
2160 } __packed insn;
2161 char *ret;
2162 int err;
2163
2164 /* Verify the branch to be patched. */
2165 err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
2166 if (err < 0)
2167 return err;
2168 if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
2169 return -EINVAL;
2170
2171 if (t == BPF_MOD_JUMP &&
2172 insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
2173 /*
2174 * The branch already points to the destination,
2175 * there is no PLT.
2176 */
2177 } else {
2178 /* Verify the PLT. */
2179 plt = ip + (insn.disp << 1);
2180 err = copy_from_kernel_nofault(¤t_plt, plt,
2181 sizeof(current_plt));
2182 if (err < 0)
2183 return err;
2184 ret = (char *)ip + 6;
2185 bpf_jit_plt(&expected_plt, ret, old_addr);
2186 if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt)))
2187 return -EINVAL;
2188 /* Adjust the call address. */
2189 bpf_jit_plt(&new_plt, ret, new_addr);
2190 s390_kernel_write(&plt->target, &new_plt.target,
2191 sizeof(void *));
2192 }
2193
2194 /* Adjust the mask of the branch. */
2195 insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
2196 s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
2197
2198 /* Make the new code visible to the other CPUs. */
2199 text_poke_sync_lock();
2200
2201 return 0;
2202 }
2203
2204 struct bpf_tramp_jit {
2205 struct bpf_jit common;
2206 int orig_stack_args_off;/* Offset of arguments placed on stack by the
2207 * func_addr's original caller
2208 */
2209 int stack_size; /* Trampoline stack size */
2210 int backchain_off; /* Offset of backchain */
2211 int stack_args_off; /* Offset of stack arguments for calling
2212 * func_addr, has to be at the top
2213 */
2214 int reg_args_off; /* Offset of register arguments for calling
2215 * func_addr
2216 */
2217 int ip_off; /* For bpf_get_func_ip(), has to be at
2218 * (ctx - 16)
2219 */
2220 int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at
2221 * (ctx - 8)
2222 */
2223 int bpf_args_off; /* Offset of BPF_PROG context, which consists
2224 * of BPF arguments followed by return value
2225 */
2226 int retval_off; /* Offset of return value (see above) */
2227 int r7_r8_off; /* Offset of saved %r7 and %r8, which are used
2228 * for __bpf_prog_enter() return value and
2229 * func_addr respectively
2230 */
2231 int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
2232 int tccnt_off; /* Offset of saved tailcall counter */
2233 int r14_off; /* Offset of saved %r14, has to be at the
2234 * bottom */
2235 int do_fexit; /* do_fexit: label */
2236 };
2237
load_imm64(struct bpf_jit * jit,int dst_reg,u64 val)2238 static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
2239 {
2240 /* llihf %dst_reg,val_hi */
2241 EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
2242 /* oilf %rdst_reg,val_lo */
2243 EMIT6_IMM(0xc00d0000, dst_reg, val);
2244 }
2245
invoke_bpf_prog(struct bpf_tramp_jit * tjit,const struct btf_func_model * m,struct bpf_tramp_link * tlink,bool save_ret)2246 static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
2247 const struct btf_func_model *m,
2248 struct bpf_tramp_link *tlink, bool save_ret)
2249 {
2250 struct bpf_jit *jit = &tjit->common;
2251 int cookie_off = tjit->run_ctx_off +
2252 offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2253 struct bpf_prog *p = tlink->link.prog;
2254 int patch;
2255
2256 /*
2257 * run_ctx.cookie = tlink->cookie;
2258 */
2259
2260 /* %r0 = tlink->cookie */
2261 load_imm64(jit, REG_W0, tlink->cookie);
2262 /* stg %r0,cookie_off(%r15) */
2263 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
2264
2265 /*
2266 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
2267 * goto skip;
2268 */
2269
2270 /* %r1 = __bpf_prog_enter */
2271 load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
2272 /* %r2 = p */
2273 load_imm64(jit, REG_2, (u64)p);
2274 /* la %r3,run_ctx_off(%r15) */
2275 EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
2276 /* %r1() */
2277 call_r1(jit);
2278 /* ltgr %r7,%r2 */
2279 EMIT4(0xb9020000, REG_7, REG_2);
2280 /* brcl 8,skip */
2281 patch = jit->prg;
2282 EMIT6_PCREL_RILC(0xc0040000, 8, 0);
2283
2284 /*
2285 * retval = bpf_func(args, p->insnsi);
2286 */
2287
2288 /* %r1 = p->bpf_func */
2289 load_imm64(jit, REG_1, (u64)p->bpf_func);
2290 /* la %r2,bpf_args_off(%r15) */
2291 EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
2292 /* %r3 = p->insnsi */
2293 if (!p->jited)
2294 load_imm64(jit, REG_3, (u64)p->insnsi);
2295 /* %r1() */
2296 call_r1(jit);
2297 /* stg %r2,retval_off(%r15) */
2298 if (save_ret) {
2299 if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
2300 return -1;
2301 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2302 tjit->retval_off);
2303 }
2304
2305 /* skip: */
2306 if (jit->prg_buf)
2307 *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
2308
2309 /*
2310 * __bpf_prog_exit(p, start, &run_ctx);
2311 */
2312
2313 /* %r1 = __bpf_prog_exit */
2314 load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
2315 /* %r2 = p */
2316 load_imm64(jit, REG_2, (u64)p);
2317 /* lgr %r3,%r7 */
2318 EMIT4(0xb9040000, REG_3, REG_7);
2319 /* la %r4,run_ctx_off(%r15) */
2320 EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
2321 /* %r1() */
2322 call_r1(jit);
2323
2324 return 0;
2325 }
2326
alloc_stack(struct bpf_tramp_jit * tjit,size_t size)2327 static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
2328 {
2329 int stack_offset = tjit->stack_size;
2330
2331 tjit->stack_size += size;
2332 return stack_offset;
2333 }
2334
2335 /* ABI uses %r2 - %r6 for parameter passing. */
2336 #define MAX_NR_REG_ARGS 5
2337
2338 /* The "L" field of the "mvc" instruction is 8 bits. */
2339 #define MAX_MVC_SIZE 256
2340 #define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
2341
2342 /* -mfentry generates a 6-byte nop on s390x. */
2343 #define S390X_PATCH_SIZE 6
2344
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,struct bpf_tramp_jit * tjit,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)2345 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2346 struct bpf_tramp_jit *tjit,
2347 const struct btf_func_model *m,
2348 u32 flags,
2349 struct bpf_tramp_links *tlinks,
2350 void *func_addr)
2351 {
2352 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2353 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2354 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2355 int nr_bpf_args, nr_reg_args, nr_stack_args;
2356 struct bpf_jit *jit = &tjit->common;
2357 int arg, bpf_arg_off;
2358 int i, j;
2359
2360 /* Support as many stack arguments as "mvc" instruction can handle. */
2361 nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
2362 nr_stack_args = m->nr_args - nr_reg_args;
2363 if (nr_stack_args > MAX_NR_STACK_ARGS)
2364 return -ENOTSUPP;
2365
2366 /* Return to %r14, since func_addr and %r0 are not available. */
2367 if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
2368 (flags & BPF_TRAMP_F_INDIRECT))
2369 flags |= BPF_TRAMP_F_SKIP_FRAME;
2370
2371 /*
2372 * Compute how many arguments we need to pass to BPF programs.
2373 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
2374 * smaller are packed into 1 or 2 registers; larger arguments are
2375 * passed via pointers.
2376 * In s390x ABI, arguments that are 8 bytes or smaller are packed into
2377 * a register; larger arguments are passed via pointers.
2378 * We need to deal with this difference.
2379 */
2380 nr_bpf_args = 0;
2381 for (i = 0; i < m->nr_args; i++) {
2382 if (m->arg_size[i] <= 8)
2383 nr_bpf_args += 1;
2384 else if (m->arg_size[i] <= 16)
2385 nr_bpf_args += 2;
2386 else
2387 return -ENOTSUPP;
2388 }
2389
2390 /*
2391 * Calculate the stack layout.
2392 */
2393
2394 /*
2395 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
2396 * ABI requires, put our backchain at the end of the allocated memory.
2397 */
2398 tjit->stack_size = STACK_FRAME_OVERHEAD;
2399 tjit->backchain_off = tjit->stack_size - sizeof(u64);
2400 tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
2401 tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
2402 tjit->ip_off = alloc_stack(tjit, sizeof(u64));
2403 tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
2404 tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
2405 tjit->retval_off = alloc_stack(tjit, sizeof(u64));
2406 tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
2407 tjit->run_ctx_off = alloc_stack(tjit,
2408 sizeof(struct bpf_tramp_run_ctx));
2409 tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
2410 tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
2411 /*
2412 * In accordance with the s390x ABI, the caller has allocated
2413 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
2414 * backchain, and the rest we can use.
2415 */
2416 tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
2417 tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
2418
2419 /* lgr %r1,%r15 */
2420 EMIT4(0xb9040000, REG_1, REG_15);
2421 /* aghi %r15,-stack_size */
2422 EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
2423 /* stg %r1,backchain_off(%r15) */
2424 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
2425 tjit->backchain_off);
2426 /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
2427 _EMIT6(0xd203f000 | tjit->tccnt_off,
2428 0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
2429 /* stmg %r2,%rN,fwd_reg_args_off(%r15) */
2430 if (nr_reg_args)
2431 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
2432 REG_2 + (nr_reg_args - 1), REG_15,
2433 tjit->reg_args_off);
2434 for (i = 0, j = 0; i < m->nr_args; i++) {
2435 if (i < MAX_NR_REG_ARGS)
2436 arg = REG_2 + i;
2437 else
2438 arg = tjit->orig_stack_args_off +
2439 (i - MAX_NR_REG_ARGS) * sizeof(u64);
2440 bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
2441 if (m->arg_size[i] <= 8) {
2442 if (i < MAX_NR_REG_ARGS)
2443 /* stg %arg,bpf_arg_off(%r15) */
2444 EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
2445 REG_0, REG_15, bpf_arg_off);
2446 else
2447 /* mvc bpf_arg_off(8,%r15),arg(%r15) */
2448 _EMIT6(0xd207f000 | bpf_arg_off,
2449 0xf000 | arg);
2450 j += 1;
2451 } else {
2452 if (i < MAX_NR_REG_ARGS) {
2453 /* mvc bpf_arg_off(16,%r15),0(%arg) */
2454 _EMIT6(0xd20ff000 | bpf_arg_off,
2455 reg2hex[arg] << 12);
2456 } else {
2457 /* lg %r1,arg(%r15) */
2458 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
2459 REG_15, arg);
2460 /* mvc bpf_arg_off(16,%r15),0(%r1) */
2461 _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
2462 }
2463 j += 2;
2464 }
2465 }
2466 /* stmg %r7,%r8,r7_r8_off(%r15) */
2467 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
2468 tjit->r7_r8_off);
2469 /* stg %r14,r14_off(%r15) */
2470 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
2471
2472 if (flags & BPF_TRAMP_F_ORIG_STACK) {
2473 /*
2474 * The ftrace trampoline puts the return address (which is the
2475 * address of the original function + S390X_PATCH_SIZE) into
2476 * %r0; see ftrace_shared_hotpatch_trampoline_br and
2477 * ftrace_init_nop() for details.
2478 */
2479
2480 /* lgr %r8,%r0 */
2481 EMIT4(0xb9040000, REG_8, REG_0);
2482 } else {
2483 /* %r8 = func_addr + S390X_PATCH_SIZE */
2484 load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
2485 }
2486
2487 /*
2488 * ip = func_addr;
2489 * arg_cnt = m->nr_args;
2490 */
2491
2492 if (flags & BPF_TRAMP_F_IP_ARG) {
2493 /* %r0 = func_addr */
2494 load_imm64(jit, REG_0, (u64)func_addr);
2495 /* stg %r0,ip_off(%r15) */
2496 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2497 tjit->ip_off);
2498 }
2499 /* lghi %r0,nr_bpf_args */
2500 EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
2501 /* stg %r0,arg_cnt_off(%r15) */
2502 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2503 tjit->arg_cnt_off);
2504
2505 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2506 /*
2507 * __bpf_tramp_enter(im);
2508 */
2509
2510 /* %r1 = __bpf_tramp_enter */
2511 load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
2512 /* %r2 = im */
2513 load_imm64(jit, REG_2, (u64)im);
2514 /* %r1() */
2515 call_r1(jit);
2516 }
2517
2518 for (i = 0; i < fentry->nr_links; i++)
2519 if (invoke_bpf_prog(tjit, m, fentry->links[i],
2520 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2521 return -EINVAL;
2522
2523 if (fmod_ret->nr_links) {
2524 /*
2525 * retval = 0;
2526 */
2527
2528 /* xc retval_off(8,%r15),retval_off(%r15) */
2529 _EMIT6(0xd707f000 | tjit->retval_off,
2530 0xf000 | tjit->retval_off);
2531
2532 for (i = 0; i < fmod_ret->nr_links; i++) {
2533 if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
2534 return -EINVAL;
2535
2536 /*
2537 * if (retval)
2538 * goto do_fexit;
2539 */
2540
2541 /* ltg %r0,retval_off(%r15) */
2542 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
2543 tjit->retval_off);
2544 /* brcl 7,do_fexit */
2545 EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
2546 }
2547 }
2548
2549 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2550 /*
2551 * retval = func_addr(args);
2552 */
2553
2554 /* lmg %r2,%rN,reg_args_off(%r15) */
2555 if (nr_reg_args)
2556 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2557 REG_2 + (nr_reg_args - 1), REG_15,
2558 tjit->reg_args_off);
2559 /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
2560 if (nr_stack_args)
2561 _EMIT6(0xd200f000 |
2562 (nr_stack_args * sizeof(u64) - 1) << 16 |
2563 tjit->stack_args_off,
2564 0xf000 | tjit->orig_stack_args_off);
2565 /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2566 _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
2567 /* lgr %r1,%r8 */
2568 EMIT4(0xb9040000, REG_1, REG_8);
2569 /* %r1() */
2570 call_r1(jit);
2571 /* stg %r2,retval_off(%r15) */
2572 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2573 tjit->retval_off);
2574
2575 im->ip_after_call = jit->prg_buf + jit->prg;
2576
2577 /*
2578 * The following nop will be patched by bpf_tramp_image_put().
2579 */
2580
2581 /* brcl 0,im->ip_epilogue */
2582 EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
2583 }
2584
2585 /* do_fexit: */
2586 tjit->do_fexit = jit->prg;
2587 for (i = 0; i < fexit->nr_links; i++)
2588 if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
2589 return -EINVAL;
2590
2591 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2592 im->ip_epilogue = jit->prg_buf + jit->prg;
2593
2594 /*
2595 * __bpf_tramp_exit(im);
2596 */
2597
2598 /* %r1 = __bpf_tramp_exit */
2599 load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
2600 /* %r2 = im */
2601 load_imm64(jit, REG_2, (u64)im);
2602 /* %r1() */
2603 call_r1(jit);
2604 }
2605
2606 /* lmg %r2,%rN,reg_args_off(%r15) */
2607 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
2608 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2609 REG_2 + (nr_reg_args - 1), REG_15,
2610 tjit->reg_args_off);
2611 /* lgr %r1,%r8 */
2612 if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
2613 EMIT4(0xb9040000, REG_1, REG_8);
2614 /* lmg %r7,%r8,r7_r8_off(%r15) */
2615 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
2616 tjit->r7_r8_off);
2617 /* lg %r14,r14_off(%r15) */
2618 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
2619 /* lg %r2,retval_off(%r15) */
2620 if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
2621 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
2622 tjit->retval_off);
2623 /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2624 _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
2625 0xf000 | tjit->tccnt_off);
2626 /* aghi %r15,stack_size */
2627 EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
2628 /* Emit an expoline for the following indirect jump. */
2629 if (nospec_uses_trampoline())
2630 emit_expoline(jit);
2631 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2632 /* br %r14 */
2633 _EMIT2(0x07fe);
2634 else
2635 /* br %r1 */
2636 _EMIT2(0x07f1);
2637
2638 emit_r1_thunk(jit);
2639
2640 return 0;
2641 }
2642
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * orig_call)2643 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
2644 struct bpf_tramp_links *tlinks, void *orig_call)
2645 {
2646 struct bpf_tramp_image im;
2647 struct bpf_tramp_jit tjit;
2648 int ret;
2649
2650 memset(&tjit, 0, sizeof(tjit));
2651
2652 ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
2653 tlinks, orig_call);
2654
2655 return ret < 0 ? ret : tjit.common.prg;
2656 }
2657
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)2658 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
2659 void *image_end, const struct btf_func_model *m,
2660 u32 flags, struct bpf_tramp_links *tlinks,
2661 void *func_addr)
2662 {
2663 struct bpf_tramp_jit tjit;
2664 int ret;
2665
2666 /* Compute offsets, check whether the code fits. */
2667 memset(&tjit, 0, sizeof(tjit));
2668 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2669 tlinks, func_addr);
2670
2671 if (ret < 0)
2672 return ret;
2673 if (tjit.common.prg > (char *)image_end - (char *)image)
2674 /*
2675 * Use the same error code as for exceeding
2676 * BPF_MAX_TRAMP_LINKS.
2677 */
2678 return -E2BIG;
2679
2680 tjit.common.prg = 0;
2681 tjit.common.prg_buf = image;
2682 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2683 tlinks, func_addr);
2684
2685 return ret < 0 ? ret : tjit.common.prg;
2686 }
2687
bpf_jit_supports_subprog_tailcalls(void)2688 bool bpf_jit_supports_subprog_tailcalls(void)
2689 {
2690 return true;
2691 }
2692