xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit.h"
21 
22 /*
23  * Stack layout:
24  * Ensure the top half (upto local_tmp_var) stays consistent
25  * with our redzone usage.
26  *
27  *		[	prev sp		] <-------------
28  *		[   nv gpr save area	] 5*8		|
29  *		[    tail_call_cnt	] 8		|
30  *		[    local_tmp_var	] 16		|
31  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32  *		[     frame header	] 32/112	|
33  * sp (r1) --->	[    stack pointer	] --------------
34  */
35 
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE	(5*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS	24
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43 
44 /* BPF register usage */
45 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47 
48 /* BPF to ppc register mappings */
49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50 {
51 	/* function return value */
52 	ctx->b2p[BPF_REG_0] = _R8;
53 	/* function arguments */
54 	ctx->b2p[BPF_REG_1] = _R3;
55 	ctx->b2p[BPF_REG_2] = _R4;
56 	ctx->b2p[BPF_REG_3] = _R5;
57 	ctx->b2p[BPF_REG_4] = _R6;
58 	ctx->b2p[BPF_REG_5] = _R7;
59 	/* non volatile registers */
60 	ctx->b2p[BPF_REG_6] = _R27;
61 	ctx->b2p[BPF_REG_7] = _R28;
62 	ctx->b2p[BPF_REG_8] = _R29;
63 	ctx->b2p[BPF_REG_9] = _R30;
64 	/* frame pointer aka BPF_REG_10 */
65 	ctx->b2p[BPF_REG_FP] = _R31;
66 	/* eBPF jit internal registers */
67 	ctx->b2p[BPF_REG_AX] = _R12;
68 	ctx->b2p[TMP_REG_1] = _R9;
69 	ctx->b2p[TMP_REG_2] = _R10;
70 }
71 
72 /* PPC NVR range -- update this if we ever use NVRs below r27 */
73 #define BPF_PPC_NVR_MIN		_R27
74 
75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76 {
77 	/*
78 	 * We only need a stack frame if:
79 	 * - we call other functions (kernel helpers), or
80 	 * - the bpf program uses its stack area
81 	 * The latter condition is deduced from the usage of BPF_REG_FP
82 	 */
83 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84 }
85 
86 /*
87  * When not setting up our own stackframe, the redzone usage is:
88  *
89  *		[	prev sp		] <-------------
90  *		[	  ...       	] 		|
91  * sp (r1) --->	[    stack pointer	] --------------
92  *		[   nv gpr save area	] 5*8
93  *		[    tail_call_cnt	] 8
94  *		[    local_tmp_var	] 16
95  *		[   unused red zone	] 208 bytes protected
96  */
97 static int bpf_jit_stack_local(struct codegen_context *ctx)
98 {
99 	if (bpf_has_stack_frame(ctx))
100 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 	else
102 		return -(BPF_PPC_STACK_SAVE + 24);
103 }
104 
105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106 {
107 	return bpf_jit_stack_local(ctx) + 16;
108 }
109 
110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111 {
112 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 		return (bpf_has_stack_frame(ctx) ?
114 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 				- (8 * (32 - reg));
116 
117 	pr_err("BPF JIT is asking about unknown registers");
118 	BUG();
119 }
120 
121 void bpf_jit_realloc_regs(struct codegen_context *ctx)
122 {
123 }
124 
125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126 {
127 	int i;
128 
129 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
130 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
131 
132 	/*
133 	 * Initialize tail_call_cnt if we do tail calls.
134 	 * Otherwise, put in NOPs so that it can be skipped when we are
135 	 * invoked through a tail call.
136 	 */
137 	if (ctx->seen & SEEN_TAILCALL) {
138 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
139 		/* this goes in the redzone */
140 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
141 	} else {
142 		EMIT(PPC_RAW_NOP());
143 		EMIT(PPC_RAW_NOP());
144 	}
145 
146 	if (bpf_has_stack_frame(ctx)) {
147 		/*
148 		 * We need a stack frame, but we don't necessarily need to
149 		 * save/restore LR unless we call other functions
150 		 */
151 		if (ctx->seen & SEEN_FUNC) {
152 			EMIT(PPC_RAW_MFLR(_R0));
153 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
154 		}
155 
156 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
157 	}
158 
159 	/*
160 	 * Back up non-volatile regs -- BPF registers 6-10
161 	 * If we haven't created our own stack frame, we save these
162 	 * in the protected zone below the previous stack frame
163 	 */
164 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
165 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
166 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
167 
168 	/* Setup frame pointer to point to the bpf stack area */
169 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
170 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
171 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
172 }
173 
174 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
175 {
176 	int i;
177 
178 	/* Restore NVRs */
179 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
180 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
181 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
182 
183 	/* Tear down our stack frame */
184 	if (bpf_has_stack_frame(ctx)) {
185 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
186 		if (ctx->seen & SEEN_FUNC) {
187 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
188 			EMIT(PPC_RAW_MTLR(_R0));
189 		}
190 	}
191 }
192 
193 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
194 {
195 	bpf_jit_emit_common_epilogue(image, ctx);
196 
197 	/* Move result to r3 */
198 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
199 
200 	EMIT(PPC_RAW_BLR());
201 }
202 
203 static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
204 {
205 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
206 	long reladdr;
207 
208 	if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
209 		return -EINVAL;
210 
211 	reladdr = func_addr - kernel_toc_addr();
212 	if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
213 		pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
214 		return -ERANGE;
215 	}
216 
217 	EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
218 	EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
219 	EMIT(PPC_RAW_MTCTR(_R12));
220 	EMIT(PPC_RAW_BCTRL());
221 
222 	return 0;
223 }
224 
225 int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
226 {
227 	unsigned int i, ctx_idx = ctx->idx;
228 
229 	if (WARN_ON_ONCE(func && is_module_text_address(func)))
230 		return -EINVAL;
231 
232 	/* skip past descriptor if elf v1 */
233 	func += FUNCTION_DESCR_SIZE;
234 
235 	/* Load function address into r12 */
236 	PPC_LI64(_R12, func);
237 
238 	/* For bpf-to-bpf function calls, the callee's address is unknown
239 	 * until the last extra pass. As seen above, we use PPC_LI64() to
240 	 * load the callee's address, but this may optimize the number of
241 	 * instructions required based on the nature of the address.
242 	 *
243 	 * Since we don't want the number of instructions emitted to change,
244 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
245 	 * we always have a five-instruction sequence, which is the maximum
246 	 * that PPC_LI64() can emit.
247 	 */
248 	for (i = ctx->idx - ctx_idx; i < 5; i++)
249 		EMIT(PPC_RAW_NOP());
250 
251 	EMIT(PPC_RAW_MTCTR(_R12));
252 	EMIT(PPC_RAW_BCTRL());
253 
254 	return 0;
255 }
256 
257 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
258 {
259 	/*
260 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
261 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
262 	 * r4/BPF_REG_2 - pointer to bpf_array
263 	 * r5/BPF_REG_3 - index in bpf_array
264 	 */
265 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
266 	int b2p_index = bpf_to_ppc(BPF_REG_3);
267 	int bpf_tailcall_prologue_size = 8;
268 
269 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
270 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
271 
272 	/*
273 	 * if (index >= array->map.max_entries)
274 	 *   goto out;
275 	 */
276 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
277 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
278 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
279 	PPC_BCC_SHORT(COND_GE, out);
280 
281 	/*
282 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
283 	 *   goto out;
284 	 */
285 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
286 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
287 	PPC_BCC_SHORT(COND_GE, out);
288 
289 	/*
290 	 * tail_call_cnt++;
291 	 */
292 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
293 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
294 
295 	/* prog = array->ptrs[index]; */
296 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
297 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
298 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
299 
300 	/*
301 	 * if (prog == NULL)
302 	 *   goto out;
303 	 */
304 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
305 	PPC_BCC_SHORT(COND_EQ, out);
306 
307 	/* goto *(prog->bpf_func + prologue_size); */
308 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
309 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
310 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
311 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
312 
313 	/* tear down stack, restore NVRs, ... */
314 	bpf_jit_emit_common_epilogue(image, ctx);
315 
316 	EMIT(PPC_RAW_BCTR());
317 
318 	/* out: */
319 	return 0;
320 }
321 
322 /*
323  * We spill into the redzone always, even if the bpf program has its own stackframe.
324  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
325  */
326 void bpf_stf_barrier(void);
327 
328 asm (
329 "		.global bpf_stf_barrier		;"
330 "	bpf_stf_barrier:			;"
331 "		std	21,-64(1)		;"
332 "		std	22,-56(1)		;"
333 "		sync				;"
334 "		ld	21,-64(1)		;"
335 "		ld	22,-56(1)		;"
336 "		ori	31,31,0			;"
337 "		.rept 14			;"
338 "		b	1f			;"
339 "	1:					;"
340 "		.endr				;"
341 "		blr				;"
342 );
343 
344 /* Assemble the body code between the prologue & epilogue */
345 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
346 		       u32 *addrs, int pass)
347 {
348 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
349 	const struct bpf_insn *insn = fp->insnsi;
350 	int flen = fp->len;
351 	int i, ret;
352 
353 	/* Start of epilogue code - will only be valid 2nd pass onwards */
354 	u32 exit_addr = addrs[flen];
355 
356 	for (i = 0; i < flen; i++) {
357 		u32 code = insn[i].code;
358 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
359 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
360 		u32 size = BPF_SIZE(code);
361 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
362 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
363 		u32 save_reg, ret_reg;
364 		s16 off = insn[i].off;
365 		s32 imm = insn[i].imm;
366 		bool func_addr_fixed;
367 		u64 func_addr;
368 		u64 imm64;
369 		u32 true_cond;
370 		u32 tmp_idx;
371 		int j;
372 
373 		/*
374 		 * addrs[] maps a BPF bytecode address into a real offset from
375 		 * the start of the body code.
376 		 */
377 		addrs[i] = ctx->idx * 4;
378 
379 		/*
380 		 * As an optimization, we note down which non-volatile registers
381 		 * are used so that we can only save/restore those in our
382 		 * prologue and epilogue. We do this here regardless of whether
383 		 * the actual BPF instruction uses src/dst registers or not
384 		 * (for instance, BPF_CALL does not use them). The expectation
385 		 * is that those instructions will have src_reg/dst_reg set to
386 		 * 0. Even otherwise, we just lose some prologue/epilogue
387 		 * optimization but everything else should work without
388 		 * any issues.
389 		 */
390 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
391 			bpf_set_seen_register(ctx, dst_reg);
392 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
393 			bpf_set_seen_register(ctx, src_reg);
394 
395 		switch (code) {
396 		/*
397 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
398 		 */
399 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
400 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
401 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
402 			goto bpf_alu32_trunc;
403 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
404 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
405 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
406 			goto bpf_alu32_trunc;
407 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
408 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
409 			if (!imm) {
410 				goto bpf_alu32_trunc;
411 			} else if (imm >= -32768 && imm < 32768) {
412 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
413 			} else {
414 				PPC_LI32(tmp1_reg, imm);
415 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
416 			}
417 			goto bpf_alu32_trunc;
418 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
419 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
420 			if (!imm) {
421 				goto bpf_alu32_trunc;
422 			} else if (imm > -32768 && imm <= 32768) {
423 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
424 			} else {
425 				PPC_LI32(tmp1_reg, imm);
426 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
427 			}
428 			goto bpf_alu32_trunc;
429 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
430 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
431 			if (BPF_CLASS(code) == BPF_ALU)
432 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
433 			else
434 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
435 			goto bpf_alu32_trunc;
436 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
437 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
438 			if (imm >= -32768 && imm < 32768)
439 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
440 			else {
441 				PPC_LI32(tmp1_reg, imm);
442 				if (BPF_CLASS(code) == BPF_ALU)
443 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
444 				else
445 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
446 			}
447 			goto bpf_alu32_trunc;
448 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
449 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
450 			if (BPF_OP(code) == BPF_MOD) {
451 				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
452 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
453 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
454 			} else
455 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
456 			goto bpf_alu32_trunc;
457 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
458 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
459 			if (BPF_OP(code) == BPF_MOD) {
460 				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
461 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
462 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
463 			} else
464 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
465 			break;
466 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
467 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
468 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
469 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
470 			if (imm == 0)
471 				return -EINVAL;
472 			if (imm == 1) {
473 				if (BPF_OP(code) == BPF_DIV) {
474 					goto bpf_alu32_trunc;
475 				} else {
476 					EMIT(PPC_RAW_LI(dst_reg, 0));
477 					break;
478 				}
479 			}
480 
481 			PPC_LI32(tmp1_reg, imm);
482 			switch (BPF_CLASS(code)) {
483 			case BPF_ALU:
484 				if (BPF_OP(code) == BPF_MOD) {
485 					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
486 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
487 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
488 				} else
489 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
490 				break;
491 			case BPF_ALU64:
492 				if (BPF_OP(code) == BPF_MOD) {
493 					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
494 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
495 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
496 				} else
497 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
498 				break;
499 			}
500 			goto bpf_alu32_trunc;
501 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
502 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
503 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
504 			goto bpf_alu32_trunc;
505 
506 		/*
507 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
508 		 */
509 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
510 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
511 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
512 			goto bpf_alu32_trunc;
513 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
514 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
515 			if (!IMM_H(imm))
516 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
517 			else {
518 				/* Sign-extended */
519 				PPC_LI32(tmp1_reg, imm);
520 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
521 			}
522 			goto bpf_alu32_trunc;
523 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
524 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
525 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
526 			goto bpf_alu32_trunc;
527 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
528 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
529 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
530 				/* Sign-extended */
531 				PPC_LI32(tmp1_reg, imm);
532 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
533 			} else {
534 				if (IMM_L(imm))
535 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
536 				if (IMM_H(imm))
537 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
538 			}
539 			goto bpf_alu32_trunc;
540 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
541 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
542 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
543 			goto bpf_alu32_trunc;
544 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
545 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
546 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
547 				/* Sign-extended */
548 				PPC_LI32(tmp1_reg, imm);
549 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
550 			} else {
551 				if (IMM_L(imm))
552 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
553 				if (IMM_H(imm))
554 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
555 			}
556 			goto bpf_alu32_trunc;
557 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
558 			/* slw clears top 32 bits */
559 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
560 			/* skip zero extension move, but set address map. */
561 			if (insn_is_zext(&insn[i + 1]))
562 				addrs[++i] = ctx->idx * 4;
563 			break;
564 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
565 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
566 			break;
567 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
568 			/* with imm 0, we still need to clear top 32 bits */
569 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
570 			if (insn_is_zext(&insn[i + 1]))
571 				addrs[++i] = ctx->idx * 4;
572 			break;
573 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
574 			if (imm != 0)
575 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
576 			break;
577 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
578 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
579 			if (insn_is_zext(&insn[i + 1]))
580 				addrs[++i] = ctx->idx * 4;
581 			break;
582 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
583 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
584 			break;
585 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
586 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
587 			if (insn_is_zext(&insn[i + 1]))
588 				addrs[++i] = ctx->idx * 4;
589 			break;
590 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
591 			if (imm != 0)
592 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
593 			break;
594 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
595 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
596 			goto bpf_alu32_trunc;
597 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
598 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
599 			break;
600 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
601 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
602 			goto bpf_alu32_trunc;
603 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
604 			if (imm != 0)
605 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
606 			break;
607 
608 		/*
609 		 * MOV
610 		 */
611 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
612 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
613 			if (imm == 1) {
614 				/* special mov32 for zext */
615 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
616 				break;
617 			}
618 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
619 			goto bpf_alu32_trunc;
620 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
621 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
622 			PPC_LI32(dst_reg, imm);
623 			if (imm < 0)
624 				goto bpf_alu32_trunc;
625 			else if (insn_is_zext(&insn[i + 1]))
626 				addrs[++i] = ctx->idx * 4;
627 			break;
628 
629 bpf_alu32_trunc:
630 		/* Truncate to 32-bits */
631 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
632 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
633 		break;
634 
635 		/*
636 		 * BPF_FROM_BE/LE
637 		 */
638 		case BPF_ALU | BPF_END | BPF_FROM_LE:
639 		case BPF_ALU | BPF_END | BPF_FROM_BE:
640 #ifdef __BIG_ENDIAN__
641 			if (BPF_SRC(code) == BPF_FROM_BE)
642 				goto emit_clear;
643 #else /* !__BIG_ENDIAN__ */
644 			if (BPF_SRC(code) == BPF_FROM_LE)
645 				goto emit_clear;
646 #endif
647 			switch (imm) {
648 			case 16:
649 				/* Rotate 8 bits left & mask with 0x0000ff00 */
650 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
651 				/* Rotate 8 bits right & insert LSB to reg */
652 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
653 				/* Move result back to dst_reg */
654 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
655 				break;
656 			case 32:
657 				/*
658 				 * Rotate word left by 8 bits:
659 				 * 2 bytes are already in their final position
660 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
661 				 */
662 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
663 				/* Rotate 24 bits and insert byte 1 */
664 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
665 				/* Rotate 24 bits and insert byte 3 */
666 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
667 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
668 				break;
669 			case 64:
670 				/* Store the value to stack and then use byte-reverse loads */
671 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
672 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
673 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
674 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
675 				} else {
676 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
677 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
678 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
679 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
680 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
681 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
682 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
683 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
684 				}
685 				break;
686 			}
687 			break;
688 
689 emit_clear:
690 			switch (imm) {
691 			case 16:
692 				/* zero-extend 16 bits into 64 bits */
693 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
694 				if (insn_is_zext(&insn[i + 1]))
695 					addrs[++i] = ctx->idx * 4;
696 				break;
697 			case 32:
698 				if (!fp->aux->verifier_zext)
699 					/* zero-extend 32 bits into 64 bits */
700 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
701 				break;
702 			case 64:
703 				/* nop */
704 				break;
705 			}
706 			break;
707 
708 		/*
709 		 * BPF_ST NOSPEC (speculation barrier)
710 		 */
711 		case BPF_ST | BPF_NOSPEC:
712 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
713 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
714 				break;
715 
716 			switch (stf_barrier) {
717 			case STF_BARRIER_EIEIO:
718 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
719 				break;
720 			case STF_BARRIER_SYNC_ORI:
721 				EMIT(PPC_RAW_SYNC());
722 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
723 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
724 				break;
725 			case STF_BARRIER_FALLBACK:
726 				ctx->seen |= SEEN_FUNC;
727 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
728 				EMIT(PPC_RAW_MTCTR(_R12));
729 				EMIT(PPC_RAW_BCTRL());
730 				break;
731 			case STF_BARRIER_NONE:
732 				break;
733 			}
734 			break;
735 
736 		/*
737 		 * BPF_ST(X)
738 		 */
739 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
740 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
741 			if (BPF_CLASS(code) == BPF_ST) {
742 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
743 				src_reg = tmp1_reg;
744 			}
745 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
746 			break;
747 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
748 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
749 			if (BPF_CLASS(code) == BPF_ST) {
750 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
751 				src_reg = tmp1_reg;
752 			}
753 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
754 			break;
755 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
756 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
757 			if (BPF_CLASS(code) == BPF_ST) {
758 				PPC_LI32(tmp1_reg, imm);
759 				src_reg = tmp1_reg;
760 			}
761 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
762 			break;
763 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
764 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
765 			if (BPF_CLASS(code) == BPF_ST) {
766 				PPC_LI32(tmp1_reg, imm);
767 				src_reg = tmp1_reg;
768 			}
769 			if (off % 4) {
770 				EMIT(PPC_RAW_LI(tmp2_reg, off));
771 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
772 			} else {
773 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
774 			}
775 			break;
776 
777 		/*
778 		 * BPF_STX ATOMIC (atomic ops)
779 		 */
780 		case BPF_STX | BPF_ATOMIC | BPF_W:
781 		case BPF_STX | BPF_ATOMIC | BPF_DW:
782 			save_reg = tmp2_reg;
783 			ret_reg = src_reg;
784 
785 			/* Get offset into TMP_REG_1 */
786 			EMIT(PPC_RAW_LI(tmp1_reg, off));
787 			tmp_idx = ctx->idx * 4;
788 			/* load value from memory into TMP_REG_2 */
789 			if (size == BPF_DW)
790 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
791 			else
792 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
793 
794 			/* Save old value in _R0 */
795 			if (imm & BPF_FETCH)
796 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
797 
798 			switch (imm) {
799 			case BPF_ADD:
800 			case BPF_ADD | BPF_FETCH:
801 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
802 				break;
803 			case BPF_AND:
804 			case BPF_AND | BPF_FETCH:
805 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
806 				break;
807 			case BPF_OR:
808 			case BPF_OR | BPF_FETCH:
809 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
810 				break;
811 			case BPF_XOR:
812 			case BPF_XOR | BPF_FETCH:
813 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
814 				break;
815 			case BPF_CMPXCHG:
816 				/*
817 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
818 				 * in src_reg for other cases.
819 				 */
820 				ret_reg = bpf_to_ppc(BPF_REG_0);
821 
822 				/* Compare with old value in BPF_R0 */
823 				if (size == BPF_DW)
824 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
825 				else
826 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
827 				/* Don't set if different from old value */
828 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
829 				fallthrough;
830 			case BPF_XCHG:
831 				save_reg = src_reg;
832 				break;
833 			default:
834 				pr_err_ratelimited(
835 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
836 					code, i);
837 				return -EOPNOTSUPP;
838 			}
839 
840 			/* store new value */
841 			if (size == BPF_DW)
842 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
843 			else
844 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
845 			/* we're done if this succeeded */
846 			PPC_BCC_SHORT(COND_NE, tmp_idx);
847 
848 			if (imm & BPF_FETCH) {
849 				EMIT(PPC_RAW_MR(ret_reg, _R0));
850 				/*
851 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
852 				 * For context, see commit 39491867ace5.
853 				 */
854 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
855 				    insn_is_zext(&insn[i + 1]))
856 					addrs[++i] = ctx->idx * 4;
857 			}
858 			break;
859 
860 		/*
861 		 * BPF_LDX
862 		 */
863 		/* dst = *(u8 *)(ul) (src + off) */
864 		case BPF_LDX | BPF_MEM | BPF_B:
865 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
866 		/* dst = *(u16 *)(ul) (src + off) */
867 		case BPF_LDX | BPF_MEM | BPF_H:
868 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
869 		/* dst = *(u32 *)(ul) (src + off) */
870 		case BPF_LDX | BPF_MEM | BPF_W:
871 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
872 		/* dst = *(u64 *)(ul) (src + off) */
873 		case BPF_LDX | BPF_MEM | BPF_DW:
874 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
875 			/*
876 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
877 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
878 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
879 			 * set dst_reg=0 and move on.
880 			 */
881 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
882 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
883 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
884 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
885 				else /* BOOK3S_64 */
886 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
887 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
888 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
889 				EMIT(PPC_RAW_LI(dst_reg, 0));
890 				/*
891 				 * Check if 'off' is word aligned for BPF_DW, because
892 				 * we might generate two instructions.
893 				 */
894 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
895 					PPC_JMP((ctx->idx + 3) * 4);
896 				else
897 					PPC_JMP((ctx->idx + 2) * 4);
898 			}
899 
900 			switch (size) {
901 			case BPF_B:
902 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
903 				break;
904 			case BPF_H:
905 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
906 				break;
907 			case BPF_W:
908 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
909 				break;
910 			case BPF_DW:
911 				if (off % 4) {
912 					EMIT(PPC_RAW_LI(tmp1_reg, off));
913 					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
914 				} else {
915 					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
916 				}
917 				break;
918 			}
919 
920 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
921 				addrs[++i] = ctx->idx * 4;
922 
923 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
924 				ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
925 							    4, dst_reg);
926 				if (ret)
927 					return ret;
928 			}
929 			break;
930 
931 		/*
932 		 * Doubleword load
933 		 * 16 byte instruction that uses two 'struct bpf_insn'
934 		 */
935 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
936 			imm64 = ((u64)(u32) insn[i].imm) |
937 				    (((u64)(u32) insn[i+1].imm) << 32);
938 			tmp_idx = ctx->idx;
939 			PPC_LI64(dst_reg, imm64);
940 			/* padding to allow full 5 instructions for later patching */
941 			for (j = ctx->idx - tmp_idx; j < 5; j++)
942 				EMIT(PPC_RAW_NOP());
943 			/* Adjust for two bpf instructions */
944 			addrs[++i] = ctx->idx * 4;
945 			break;
946 
947 		/*
948 		 * Return/Exit
949 		 */
950 		case BPF_JMP | BPF_EXIT:
951 			/*
952 			 * If this isn't the very last instruction, branch to
953 			 * the epilogue. If we _are_ the last instruction,
954 			 * we'll just fall through to the epilogue.
955 			 */
956 			if (i != flen - 1) {
957 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
958 				if (ret)
959 					return ret;
960 			}
961 			/* else fall through to the epilogue */
962 			break;
963 
964 		/*
965 		 * Call kernel helper or bpf function
966 		 */
967 		case BPF_JMP | BPF_CALL:
968 			ctx->seen |= SEEN_FUNC;
969 
970 			ret = bpf_jit_get_func_addr(fp, &insn[i], false,
971 						    &func_addr, &func_addr_fixed);
972 			if (ret < 0)
973 				return ret;
974 
975 			if (func_addr_fixed)
976 				ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
977 			else
978 				ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
979 
980 			if (ret)
981 				return ret;
982 
983 			/* move return value from r3 to BPF_REG_0 */
984 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
985 			break;
986 
987 		/*
988 		 * Jumps and branches
989 		 */
990 		case BPF_JMP | BPF_JA:
991 			PPC_JMP(addrs[i + 1 + off]);
992 			break;
993 
994 		case BPF_JMP | BPF_JGT | BPF_K:
995 		case BPF_JMP | BPF_JGT | BPF_X:
996 		case BPF_JMP | BPF_JSGT | BPF_K:
997 		case BPF_JMP | BPF_JSGT | BPF_X:
998 		case BPF_JMP32 | BPF_JGT | BPF_K:
999 		case BPF_JMP32 | BPF_JGT | BPF_X:
1000 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1001 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1002 			true_cond = COND_GT;
1003 			goto cond_branch;
1004 		case BPF_JMP | BPF_JLT | BPF_K:
1005 		case BPF_JMP | BPF_JLT | BPF_X:
1006 		case BPF_JMP | BPF_JSLT | BPF_K:
1007 		case BPF_JMP | BPF_JSLT | BPF_X:
1008 		case BPF_JMP32 | BPF_JLT | BPF_K:
1009 		case BPF_JMP32 | BPF_JLT | BPF_X:
1010 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1011 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1012 			true_cond = COND_LT;
1013 			goto cond_branch;
1014 		case BPF_JMP | BPF_JGE | BPF_K:
1015 		case BPF_JMP | BPF_JGE | BPF_X:
1016 		case BPF_JMP | BPF_JSGE | BPF_K:
1017 		case BPF_JMP | BPF_JSGE | BPF_X:
1018 		case BPF_JMP32 | BPF_JGE | BPF_K:
1019 		case BPF_JMP32 | BPF_JGE | BPF_X:
1020 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1021 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1022 			true_cond = COND_GE;
1023 			goto cond_branch;
1024 		case BPF_JMP | BPF_JLE | BPF_K:
1025 		case BPF_JMP | BPF_JLE | BPF_X:
1026 		case BPF_JMP | BPF_JSLE | BPF_K:
1027 		case BPF_JMP | BPF_JSLE | BPF_X:
1028 		case BPF_JMP32 | BPF_JLE | BPF_K:
1029 		case BPF_JMP32 | BPF_JLE | BPF_X:
1030 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1031 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1032 			true_cond = COND_LE;
1033 			goto cond_branch;
1034 		case BPF_JMP | BPF_JEQ | BPF_K:
1035 		case BPF_JMP | BPF_JEQ | BPF_X:
1036 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1037 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1038 			true_cond = COND_EQ;
1039 			goto cond_branch;
1040 		case BPF_JMP | BPF_JNE | BPF_K:
1041 		case BPF_JMP | BPF_JNE | BPF_X:
1042 		case BPF_JMP32 | BPF_JNE | BPF_K:
1043 		case BPF_JMP32 | BPF_JNE | BPF_X:
1044 			true_cond = COND_NE;
1045 			goto cond_branch;
1046 		case BPF_JMP | BPF_JSET | BPF_K:
1047 		case BPF_JMP | BPF_JSET | BPF_X:
1048 		case BPF_JMP32 | BPF_JSET | BPF_K:
1049 		case BPF_JMP32 | BPF_JSET | BPF_X:
1050 			true_cond = COND_NE;
1051 			/* Fall through */
1052 
1053 cond_branch:
1054 			switch (code) {
1055 			case BPF_JMP | BPF_JGT | BPF_X:
1056 			case BPF_JMP | BPF_JLT | BPF_X:
1057 			case BPF_JMP | BPF_JGE | BPF_X:
1058 			case BPF_JMP | BPF_JLE | BPF_X:
1059 			case BPF_JMP | BPF_JEQ | BPF_X:
1060 			case BPF_JMP | BPF_JNE | BPF_X:
1061 			case BPF_JMP32 | BPF_JGT | BPF_X:
1062 			case BPF_JMP32 | BPF_JLT | BPF_X:
1063 			case BPF_JMP32 | BPF_JGE | BPF_X:
1064 			case BPF_JMP32 | BPF_JLE | BPF_X:
1065 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1066 			case BPF_JMP32 | BPF_JNE | BPF_X:
1067 				/* unsigned comparison */
1068 				if (BPF_CLASS(code) == BPF_JMP32)
1069 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1070 				else
1071 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1072 				break;
1073 			case BPF_JMP | BPF_JSGT | BPF_X:
1074 			case BPF_JMP | BPF_JSLT | BPF_X:
1075 			case BPF_JMP | BPF_JSGE | BPF_X:
1076 			case BPF_JMP | BPF_JSLE | BPF_X:
1077 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1078 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1079 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1080 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1081 				/* signed comparison */
1082 				if (BPF_CLASS(code) == BPF_JMP32)
1083 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1084 				else
1085 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1086 				break;
1087 			case BPF_JMP | BPF_JSET | BPF_X:
1088 			case BPF_JMP32 | BPF_JSET | BPF_X:
1089 				if (BPF_CLASS(code) == BPF_JMP) {
1090 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1091 				} else {
1092 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1093 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1094 				}
1095 				break;
1096 			case BPF_JMP | BPF_JNE | BPF_K:
1097 			case BPF_JMP | BPF_JEQ | BPF_K:
1098 			case BPF_JMP | BPF_JGT | BPF_K:
1099 			case BPF_JMP | BPF_JLT | BPF_K:
1100 			case BPF_JMP | BPF_JGE | BPF_K:
1101 			case BPF_JMP | BPF_JLE | BPF_K:
1102 			case BPF_JMP32 | BPF_JNE | BPF_K:
1103 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1104 			case BPF_JMP32 | BPF_JGT | BPF_K:
1105 			case BPF_JMP32 | BPF_JLT | BPF_K:
1106 			case BPF_JMP32 | BPF_JGE | BPF_K:
1107 			case BPF_JMP32 | BPF_JLE | BPF_K:
1108 			{
1109 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1110 
1111 				/*
1112 				 * Need sign-extended load, so only positive
1113 				 * values can be used as imm in cmpldi
1114 				 */
1115 				if (imm >= 0 && imm < 32768) {
1116 					if (is_jmp32)
1117 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1118 					else
1119 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1120 				} else {
1121 					/* sign-extending load */
1122 					PPC_LI32(tmp1_reg, imm);
1123 					/* ... but unsigned comparison */
1124 					if (is_jmp32)
1125 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1126 					else
1127 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1128 				}
1129 				break;
1130 			}
1131 			case BPF_JMP | BPF_JSGT | BPF_K:
1132 			case BPF_JMP | BPF_JSLT | BPF_K:
1133 			case BPF_JMP | BPF_JSGE | BPF_K:
1134 			case BPF_JMP | BPF_JSLE | BPF_K:
1135 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1136 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1137 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1138 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1139 			{
1140 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1141 
1142 				/*
1143 				 * signed comparison, so any 16-bit value
1144 				 * can be used in cmpdi
1145 				 */
1146 				if (imm >= -32768 && imm < 32768) {
1147 					if (is_jmp32)
1148 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1149 					else
1150 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1151 				} else {
1152 					PPC_LI32(tmp1_reg, imm);
1153 					if (is_jmp32)
1154 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1155 					else
1156 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1157 				}
1158 				break;
1159 			}
1160 			case BPF_JMP | BPF_JSET | BPF_K:
1161 			case BPF_JMP32 | BPF_JSET | BPF_K:
1162 				/* andi does not sign-extend the immediate */
1163 				if (imm >= 0 && imm < 32768)
1164 					/* PPC_ANDI is _only/always_ dot-form */
1165 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1166 				else {
1167 					PPC_LI32(tmp1_reg, imm);
1168 					if (BPF_CLASS(code) == BPF_JMP) {
1169 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1170 								     tmp1_reg));
1171 					} else {
1172 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1173 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1174 									0, 0, 31));
1175 					}
1176 				}
1177 				break;
1178 			}
1179 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1180 			break;
1181 
1182 		/*
1183 		 * Tail call
1184 		 */
1185 		case BPF_JMP | BPF_TAIL_CALL:
1186 			ctx->seen |= SEEN_TAILCALL;
1187 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1188 			if (ret < 0)
1189 				return ret;
1190 			break;
1191 
1192 		default:
1193 			/*
1194 			 * The filter contains something cruel & unusual.
1195 			 * We don't handle it, but also there shouldn't be
1196 			 * anything missing from our list.
1197 			 */
1198 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1199 					code, i);
1200 			return -ENOTSUPP;
1201 		}
1202 	}
1203 
1204 	/* Set end-of-body-code address for exit. */
1205 	addrs[i] = ctx->idx * 4;
1206 
1207 	return 0;
1208 }
1209