xref: /linux/arch/arm64/kernel/traps.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/traps.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/bug.h>
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/kallsyms.h>
13 #include <linux/kprobes.h>
14 #include <linux/spinlock.h>
15 #include <linux/uaccess.h>
16 #include <linux/hardirq.h>
17 #include <linux/kdebug.h>
18 #include <linux/module.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/efi.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
30 #include <linux/ubsan.h>
31 #include <linux/cfi.h>
32 
33 #include <asm/atomic.h>
34 #include <asm/bug.h>
35 #include <asm/cpufeature.h>
36 #include <asm/daifflags.h>
37 #include <asm/debug-monitors.h>
38 #include <asm/efi.h>
39 #include <asm/esr.h>
40 #include <asm/exception.h>
41 #include <asm/extable.h>
42 #include <asm/insn.h>
43 #include <asm/kprobes.h>
44 #include <asm/patching.h>
45 #include <asm/traps.h>
46 #include <asm/smp.h>
47 #include <asm/stack_pointer.h>
48 #include <asm/stacktrace.h>
49 #include <asm/system_misc.h>
50 #include <asm/sysreg.h>
51 
52 static bool __kprobes __check_eq(unsigned long pstate)
53 {
54 	return (pstate & PSR_Z_BIT) != 0;
55 }
56 
57 static bool __kprobes __check_ne(unsigned long pstate)
58 {
59 	return (pstate & PSR_Z_BIT) == 0;
60 }
61 
62 static bool __kprobes __check_cs(unsigned long pstate)
63 {
64 	return (pstate & PSR_C_BIT) != 0;
65 }
66 
67 static bool __kprobes __check_cc(unsigned long pstate)
68 {
69 	return (pstate & PSR_C_BIT) == 0;
70 }
71 
72 static bool __kprobes __check_mi(unsigned long pstate)
73 {
74 	return (pstate & PSR_N_BIT) != 0;
75 }
76 
77 static bool __kprobes __check_pl(unsigned long pstate)
78 {
79 	return (pstate & PSR_N_BIT) == 0;
80 }
81 
82 static bool __kprobes __check_vs(unsigned long pstate)
83 {
84 	return (pstate & PSR_V_BIT) != 0;
85 }
86 
87 static bool __kprobes __check_vc(unsigned long pstate)
88 {
89 	return (pstate & PSR_V_BIT) == 0;
90 }
91 
92 static bool __kprobes __check_hi(unsigned long pstate)
93 {
94 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
95 	return (pstate & PSR_C_BIT) != 0;
96 }
97 
98 static bool __kprobes __check_ls(unsigned long pstate)
99 {
100 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
101 	return (pstate & PSR_C_BIT) == 0;
102 }
103 
104 static bool __kprobes __check_ge(unsigned long pstate)
105 {
106 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
107 	return (pstate & PSR_N_BIT) == 0;
108 }
109 
110 static bool __kprobes __check_lt(unsigned long pstate)
111 {
112 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
113 	return (pstate & PSR_N_BIT) != 0;
114 }
115 
116 static bool __kprobes __check_gt(unsigned long pstate)
117 {
118 	/*PSR_N_BIT ^= PSR_V_BIT */
119 	unsigned long temp = pstate ^ (pstate << 3);
120 
121 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
122 	return (temp & PSR_N_BIT) == 0;
123 }
124 
125 static bool __kprobes __check_le(unsigned long pstate)
126 {
127 	/*PSR_N_BIT ^= PSR_V_BIT */
128 	unsigned long temp = pstate ^ (pstate << 3);
129 
130 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
131 	return (temp & PSR_N_BIT) != 0;
132 }
133 
134 static bool __kprobes __check_al(unsigned long pstate)
135 {
136 	return true;
137 }
138 
139 /*
140  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
141  * it behaves identically to 0b1110 ("al").
142  */
143 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
144 	__check_eq, __check_ne, __check_cs, __check_cc,
145 	__check_mi, __check_pl, __check_vs, __check_vc,
146 	__check_hi, __check_ls, __check_ge, __check_lt,
147 	__check_gt, __check_le, __check_al, __check_al
148 };
149 
150 int show_unhandled_signals = 0;
151 
152 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
153 {
154 	unsigned long addr = instruction_pointer(regs);
155 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
156 	int i;
157 
158 	if (user_mode(regs))
159 		return;
160 
161 	for (i = -4; i < 1; i++) {
162 		unsigned int val, bad;
163 
164 		bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
165 
166 		if (!bad)
167 			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
168 		else
169 			p += sprintf(p, i == 0 ? "(????????) " : "???????? ");
170 	}
171 
172 	printk("%sCode: %s\n", lvl, str);
173 }
174 
175 #ifdef CONFIG_PREEMPT
176 #define S_PREEMPT " PREEMPT"
177 #elif defined(CONFIG_PREEMPT_RT)
178 #define S_PREEMPT " PREEMPT_RT"
179 #else
180 #define S_PREEMPT ""
181 #endif
182 
183 #define S_SMP " SMP"
184 
185 static int __die(const char *str, long err, struct pt_regs *regs)
186 {
187 	static int die_counter;
188 	int ret;
189 
190 	pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
191 		 str, err, ++die_counter);
192 
193 	/* trap and error numbers are mostly meaningless on ARM */
194 	ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
195 	if (ret == NOTIFY_STOP)
196 		return ret;
197 
198 	print_modules();
199 	show_regs(regs);
200 
201 	dump_kernel_instr(KERN_EMERG, regs);
202 
203 	return ret;
204 }
205 
206 static DEFINE_RAW_SPINLOCK(die_lock);
207 
208 /*
209  * This function is protected against re-entrancy.
210  */
211 void die(const char *str, struct pt_regs *regs, long err)
212 {
213 	int ret;
214 	unsigned long flags;
215 
216 	raw_spin_lock_irqsave(&die_lock, flags);
217 
218 	oops_enter();
219 
220 	console_verbose();
221 	bust_spinlocks(1);
222 	ret = __die(str, err, regs);
223 
224 	if (regs && kexec_should_crash(current))
225 		crash_kexec(regs);
226 
227 	bust_spinlocks(0);
228 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
229 	oops_exit();
230 
231 	if (in_interrupt())
232 		panic("%s: Fatal exception in interrupt", str);
233 	if (panic_on_oops)
234 		panic("%s: Fatal exception", str);
235 
236 	raw_spin_unlock_irqrestore(&die_lock, flags);
237 
238 	if (ret != NOTIFY_STOP)
239 		make_task_dead(SIGSEGV);
240 }
241 
242 static void arm64_show_signal(int signo, const char *str)
243 {
244 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
245 				      DEFAULT_RATELIMIT_BURST);
246 	struct task_struct *tsk = current;
247 	unsigned long esr = tsk->thread.fault_code;
248 	struct pt_regs *regs = task_pt_regs(tsk);
249 
250 	/* Leave if the signal won't be shown */
251 	if (!show_unhandled_signals ||
252 	    !unhandled_signal(tsk, signo) ||
253 	    !__ratelimit(&rs))
254 		return;
255 
256 	pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
257 	if (esr)
258 		pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr);
259 
260 	pr_cont("%s", str);
261 	print_vma_addr(KERN_CONT " in ", regs->pc);
262 	pr_cont("\n");
263 	__show_regs(regs);
264 }
265 
266 void arm64_force_sig_fault(int signo, int code, unsigned long far,
267 			   const char *str)
268 {
269 	arm64_show_signal(signo, str);
270 	if (signo == SIGKILL)
271 		force_sig(SIGKILL);
272 	else
273 		force_sig_fault(signo, code, (void __user *)far);
274 }
275 
276 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
277 			    const char *str)
278 {
279 	arm64_show_signal(SIGBUS, str);
280 	force_sig_mceerr(code, (void __user *)far, lsb);
281 }
282 
283 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
284 				       const char *str)
285 {
286 	arm64_show_signal(SIGTRAP, str);
287 	force_sig_ptrace_errno_trap(errno, (void __user *)far);
288 }
289 
290 void arm64_notify_die(const char *str, struct pt_regs *regs,
291 		      int signo, int sicode, unsigned long far,
292 		      unsigned long err)
293 {
294 	if (user_mode(regs)) {
295 		WARN_ON(regs != current_pt_regs());
296 		current->thread.fault_address = 0;
297 		current->thread.fault_code = err;
298 
299 		arm64_force_sig_fault(signo, sicode, far, str);
300 	} else {
301 		die(str, regs, err);
302 	}
303 }
304 
305 #ifdef CONFIG_COMPAT
306 #define PSTATE_IT_1_0_SHIFT	25
307 #define PSTATE_IT_1_0_MASK	(0x3 << PSTATE_IT_1_0_SHIFT)
308 #define PSTATE_IT_7_2_SHIFT	10
309 #define PSTATE_IT_7_2_MASK	(0x3f << PSTATE_IT_7_2_SHIFT)
310 
311 static u32 compat_get_it_state(struct pt_regs *regs)
312 {
313 	u32 it, pstate = regs->pstate;
314 
315 	it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
316 	it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
317 
318 	return it;
319 }
320 
321 static void compat_set_it_state(struct pt_regs *regs, u32 it)
322 {
323 	u32 pstate_it;
324 
325 	pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
326 	pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
327 
328 	regs->pstate &= ~PSR_AA32_IT_MASK;
329 	regs->pstate |= pstate_it;
330 }
331 
332 static void advance_itstate(struct pt_regs *regs)
333 {
334 	u32 it;
335 
336 	/* ARM mode */
337 	if (!(regs->pstate & PSR_AA32_T_BIT) ||
338 	    !(regs->pstate & PSR_AA32_IT_MASK))
339 		return;
340 
341 	it  = compat_get_it_state(regs);
342 
343 	/*
344 	 * If this is the last instruction of the block, wipe the IT
345 	 * state. Otherwise advance it.
346 	 */
347 	if (!(it & 7))
348 		it = 0;
349 	else
350 		it = (it & 0xe0) | ((it << 1) & 0x1f);
351 
352 	compat_set_it_state(regs, it);
353 }
354 #else
355 static void advance_itstate(struct pt_regs *regs)
356 {
357 }
358 #endif
359 
360 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
361 {
362 	regs->pc += size;
363 
364 	/*
365 	 * If we were single stepping, we want to get the step exception after
366 	 * we return from the trap.
367 	 */
368 	if (user_mode(regs))
369 		user_fastforward_single_step(current);
370 
371 	if (compat_user_mode(regs))
372 		advance_itstate(regs);
373 	else
374 		regs->pstate &= ~PSR_BTYPE_MASK;
375 }
376 
377 static int user_insn_read(struct pt_regs *regs, u32 *insnp)
378 {
379 	u32 instr;
380 	unsigned long pc = instruction_pointer(regs);
381 
382 	if (compat_thumb_mode(regs)) {
383 		/* 16-bit Thumb instruction */
384 		__le16 instr_le;
385 		if (get_user(instr_le, (__le16 __user *)pc))
386 			return -EFAULT;
387 		instr = le16_to_cpu(instr_le);
388 		if (aarch32_insn_is_wide(instr)) {
389 			u32 instr2;
390 
391 			if (get_user(instr_le, (__le16 __user *)(pc + 2)))
392 				return -EFAULT;
393 			instr2 = le16_to_cpu(instr_le);
394 			instr = (instr << 16) | instr2;
395 		}
396 	} else {
397 		/* 32-bit ARM instruction */
398 		__le32 instr_le;
399 		if (get_user(instr_le, (__le32 __user *)pc))
400 			return -EFAULT;
401 		instr = le32_to_cpu(instr_le);
402 	}
403 
404 	*insnp = instr;
405 	return 0;
406 }
407 
408 void force_signal_inject(int signal, int code, unsigned long address, unsigned long err)
409 {
410 	const char *desc;
411 	struct pt_regs *regs = current_pt_regs();
412 
413 	if (WARN_ON(!user_mode(regs)))
414 		return;
415 
416 	switch (signal) {
417 	case SIGILL:
418 		desc = "undefined instruction";
419 		break;
420 	case SIGSEGV:
421 		desc = "illegal memory access";
422 		break;
423 	default:
424 		desc = "unknown or unrecoverable error";
425 		break;
426 	}
427 
428 	/* Force signals we don't understand to SIGKILL */
429 	if (WARN_ON(signal != SIGKILL &&
430 		    siginfo_layout(signal, code) != SIL_FAULT)) {
431 		signal = SIGKILL;
432 	}
433 
434 	arm64_notify_die(desc, regs, signal, code, address, err);
435 }
436 
437 /*
438  * Set up process info to signal segmentation fault - called on access error.
439  */
440 void arm64_notify_segfault(unsigned long addr)
441 {
442 	int code;
443 
444 	mmap_read_lock(current->mm);
445 	if (find_vma(current->mm, untagged_addr(addr)) == NULL)
446 		code = SEGV_MAPERR;
447 	else
448 		code = SEGV_ACCERR;
449 	mmap_read_unlock(current->mm);
450 
451 	force_signal_inject(SIGSEGV, code, addr, 0);
452 }
453 
454 void do_el0_undef(struct pt_regs *regs, unsigned long esr)
455 {
456 	u32 insn;
457 
458 	/* check for AArch32 breakpoint instructions */
459 	if (!aarch32_break_handler(regs))
460 		return;
461 
462 	if (user_insn_read(regs, &insn))
463 		goto out_err;
464 
465 	if (try_emulate_mrs(regs, insn))
466 		return;
467 
468 	if (try_emulate_armv8_deprecated(regs, insn))
469 		return;
470 
471 out_err:
472 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
473 }
474 
475 void do_el1_undef(struct pt_regs *regs, unsigned long esr)
476 {
477 	u32 insn;
478 
479 	if (aarch64_insn_read((void *)regs->pc, &insn))
480 		goto out_err;
481 
482 	if (try_emulate_el1_ssbs(regs, insn))
483 		return;
484 
485 out_err:
486 	die("Oops - Undefined instruction", regs, esr);
487 }
488 
489 void do_el0_bti(struct pt_regs *regs)
490 {
491 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
492 }
493 
494 void do_el1_bti(struct pt_regs *regs, unsigned long esr)
495 {
496 	if (efi_runtime_fixup_exception(regs, "BTI violation")) {
497 		regs->pstate &= ~PSR_BTYPE_MASK;
498 		return;
499 	}
500 	die("Oops - BTI", regs, esr);
501 }
502 
503 void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
504 {
505 	force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
506 }
507 
508 void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
509 {
510 	/*
511 	 * Unexpected FPAC exception in the kernel: kill the task before it
512 	 * does any more harm.
513 	 */
514 	die("Oops - FPAC", regs, esr);
515 }
516 
517 #define __user_cache_maint(insn, address, res)			\
518 	if (address >= TASK_SIZE_MAX) {				\
519 		res = -EFAULT;					\
520 	} else {						\
521 		uaccess_ttbr0_enable();				\
522 		asm volatile (					\
523 			"1:	" insn ", %1\n"			\
524 			"	mov	%w0, #0\n"		\
525 			"2:\n"					\
526 			_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)	\
527 			: "=r" (res)				\
528 			: "r" (address));			\
529 		uaccess_ttbr0_disable();			\
530 	}
531 
532 static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs)
533 {
534 	unsigned long tagged_address, address;
535 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
536 	int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
537 	int ret = 0;
538 
539 	tagged_address = pt_regs_read_reg(regs, rt);
540 	address = untagged_addr(tagged_address);
541 
542 	switch (crm) {
543 	case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:	/* DC CVAU, gets promoted */
544 		__user_cache_maint("dc civac", address, ret);
545 		break;
546 	case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:	/* DC CVAC, gets promoted */
547 		__user_cache_maint("dc civac", address, ret);
548 		break;
549 	case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:	/* DC CVADP */
550 		__user_cache_maint("sys 3, c7, c13, 1", address, ret);
551 		break;
552 	case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:	/* DC CVAP */
553 		__user_cache_maint("sys 3, c7, c12, 1", address, ret);
554 		break;
555 	case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:	/* DC CIVAC */
556 		__user_cache_maint("dc civac", address, ret);
557 		break;
558 	case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:	/* IC IVAU */
559 		__user_cache_maint("ic ivau", address, ret);
560 		break;
561 	default:
562 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
563 		return;
564 	}
565 
566 	if (ret)
567 		arm64_notify_segfault(tagged_address);
568 	else
569 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
570 }
571 
572 static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
573 {
574 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
575 	unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
576 
577 	if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
578 		/* Hide DIC so that we can trap the unnecessary maintenance...*/
579 		val &= ~BIT(CTR_EL0_DIC_SHIFT);
580 
581 		/* ... and fake IminLine to reduce the number of traps. */
582 		val &= ~CTR_EL0_IminLine_MASK;
583 		val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK;
584 	}
585 
586 	pt_regs_write_reg(regs, rt, val);
587 
588 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
589 }
590 
591 static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
592 {
593 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
594 
595 	pt_regs_write_reg(regs, rt, arch_timer_read_counter());
596 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
597 }
598 
599 static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
600 {
601 	int rt = ESR_ELx_SYS64_ISS_RT(esr);
602 
603 	pt_regs_write_reg(regs, rt, arch_timer_get_rate());
604 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
605 }
606 
607 static void mrs_handler(unsigned long esr, struct pt_regs *regs)
608 {
609 	u32 sysreg, rt;
610 
611 	rt = ESR_ELx_SYS64_ISS_RT(esr);
612 	sysreg = esr_sys64_to_sysreg(esr);
613 
614 	if (do_emulate_mrs(regs, sysreg, rt) != 0)
615 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
616 }
617 
618 static void wfi_handler(unsigned long esr, struct pt_regs *regs)
619 {
620 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
621 }
622 
623 struct sys64_hook {
624 	unsigned long esr_mask;
625 	unsigned long esr_val;
626 	void (*handler)(unsigned long esr, struct pt_regs *regs);
627 };
628 
629 static const struct sys64_hook sys64_hooks[] = {
630 	{
631 		.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
632 		.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
633 		.handler = user_cache_maint_handler,
634 	},
635 	{
636 		/* Trap read access to CTR_EL0 */
637 		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
638 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
639 		.handler = ctr_read_handler,
640 	},
641 	{
642 		/* Trap read access to CNTVCT_EL0 */
643 		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
644 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
645 		.handler = cntvct_read_handler,
646 	},
647 	{
648 		/* Trap read access to CNTVCTSS_EL0 */
649 		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
650 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS,
651 		.handler = cntvct_read_handler,
652 	},
653 	{
654 		/* Trap read access to CNTFRQ_EL0 */
655 		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
656 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
657 		.handler = cntfrq_read_handler,
658 	},
659 	{
660 		/* Trap read access to CPUID registers */
661 		.esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
662 		.esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
663 		.handler = mrs_handler,
664 	},
665 	{
666 		/* Trap WFI instructions executed in userspace */
667 		.esr_mask = ESR_ELx_WFx_MASK,
668 		.esr_val = ESR_ELx_WFx_WFI_VAL,
669 		.handler = wfi_handler,
670 	},
671 	{},
672 };
673 
674 #ifdef CONFIG_COMPAT
675 static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs)
676 {
677 	int cond;
678 
679 	/* Only a T32 instruction can trap without CV being set */
680 	if (!(esr & ESR_ELx_CV)) {
681 		u32 it;
682 
683 		it = compat_get_it_state(regs);
684 		if (!it)
685 			return true;
686 
687 		cond = it >> 4;
688 	} else {
689 		cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
690 	}
691 
692 	return aarch32_opcode_cond_checks[cond](regs->pstate);
693 }
694 
695 static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
696 {
697 	int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
698 
699 	pt_regs_write_reg(regs, reg, arch_timer_get_rate());
700 	arm64_skip_faulting_instruction(regs, 4);
701 }
702 
703 static const struct sys64_hook cp15_32_hooks[] = {
704 	{
705 		.esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
706 		.esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
707 		.handler = compat_cntfrq_read_handler,
708 	},
709 	{},
710 };
711 
712 static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
713 {
714 	int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
715 	int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
716 	u64 val = arch_timer_read_counter();
717 
718 	pt_regs_write_reg(regs, rt, lower_32_bits(val));
719 	pt_regs_write_reg(regs, rt2, upper_32_bits(val));
720 	arm64_skip_faulting_instruction(regs, 4);
721 }
722 
723 static const struct sys64_hook cp15_64_hooks[] = {
724 	{
725 		.esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
726 		.esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
727 		.handler = compat_cntvct_read_handler,
728 	},
729 	{
730 		.esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
731 		.esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS,
732 		.handler = compat_cntvct_read_handler,
733 	},
734 	{},
735 };
736 
737 void do_el0_cp15(unsigned long esr, struct pt_regs *regs)
738 {
739 	const struct sys64_hook *hook, *hook_base;
740 
741 	if (!cp15_cond_valid(esr, regs)) {
742 		/*
743 		 * There is no T16 variant of a CP access, so we
744 		 * always advance PC by 4 bytes.
745 		 */
746 		arm64_skip_faulting_instruction(regs, 4);
747 		return;
748 	}
749 
750 	switch (ESR_ELx_EC(esr)) {
751 	case ESR_ELx_EC_CP15_32:
752 		hook_base = cp15_32_hooks;
753 		break;
754 	case ESR_ELx_EC_CP15_64:
755 		hook_base = cp15_64_hooks;
756 		break;
757 	default:
758 		do_el0_undef(regs, esr);
759 		return;
760 	}
761 
762 	for (hook = hook_base; hook->handler; hook++)
763 		if ((hook->esr_mask & esr) == hook->esr_val) {
764 			hook->handler(esr, regs);
765 			return;
766 		}
767 
768 	/*
769 	 * New cp15 instructions may previously have been undefined at
770 	 * EL0. Fall back to our usual undefined instruction handler
771 	 * so that we handle these consistently.
772 	 */
773 	do_el0_undef(regs, esr);
774 }
775 #endif
776 
777 void do_el0_sys(unsigned long esr, struct pt_regs *regs)
778 {
779 	const struct sys64_hook *hook;
780 
781 	for (hook = sys64_hooks; hook->handler; hook++)
782 		if ((hook->esr_mask & esr) == hook->esr_val) {
783 			hook->handler(esr, regs);
784 			return;
785 		}
786 
787 	/*
788 	 * New SYS instructions may previously have been undefined at EL0. Fall
789 	 * back to our usual undefined instruction handler so that we handle
790 	 * these consistently.
791 	 */
792 	do_el0_undef(regs, esr);
793 }
794 
795 static const char *esr_class_str[] = {
796 	[0 ... ESR_ELx_EC_MAX]		= "UNRECOGNIZED EC",
797 	[ESR_ELx_EC_UNKNOWN]		= "Unknown/Uncategorized",
798 	[ESR_ELx_EC_WFx]		= "WFI/WFE",
799 	[ESR_ELx_EC_CP15_32]		= "CP15 MCR/MRC",
800 	[ESR_ELx_EC_CP15_64]		= "CP15 MCRR/MRRC",
801 	[ESR_ELx_EC_CP14_MR]		= "CP14 MCR/MRC",
802 	[ESR_ELx_EC_CP14_LS]		= "CP14 LDC/STC",
803 	[ESR_ELx_EC_FP_ASIMD]		= "ASIMD",
804 	[ESR_ELx_EC_CP10_ID]		= "CP10 MRC/VMRS",
805 	[ESR_ELx_EC_PAC]		= "PAC",
806 	[ESR_ELx_EC_CP14_64]		= "CP14 MCRR/MRRC",
807 	[ESR_ELx_EC_BTI]		= "BTI",
808 	[ESR_ELx_EC_ILL]		= "PSTATE.IL",
809 	[ESR_ELx_EC_SVC32]		= "SVC (AArch32)",
810 	[ESR_ELx_EC_HVC32]		= "HVC (AArch32)",
811 	[ESR_ELx_EC_SMC32]		= "SMC (AArch32)",
812 	[ESR_ELx_EC_SVC64]		= "SVC (AArch64)",
813 	[ESR_ELx_EC_HVC64]		= "HVC (AArch64)",
814 	[ESR_ELx_EC_SMC64]		= "SMC (AArch64)",
815 	[ESR_ELx_EC_SYS64]		= "MSR/MRS (AArch64)",
816 	[ESR_ELx_EC_SVE]		= "SVE",
817 	[ESR_ELx_EC_ERET]		= "ERET/ERETAA/ERETAB",
818 	[ESR_ELx_EC_FPAC]		= "FPAC",
819 	[ESR_ELx_EC_SME]		= "SME",
820 	[ESR_ELx_EC_IMP_DEF]		= "EL3 IMP DEF",
821 	[ESR_ELx_EC_IABT_LOW]		= "IABT (lower EL)",
822 	[ESR_ELx_EC_IABT_CUR]		= "IABT (current EL)",
823 	[ESR_ELx_EC_PC_ALIGN]		= "PC Alignment",
824 	[ESR_ELx_EC_DABT_LOW]		= "DABT (lower EL)",
825 	[ESR_ELx_EC_DABT_CUR]		= "DABT (current EL)",
826 	[ESR_ELx_EC_SP_ALIGN]		= "SP Alignment",
827 	[ESR_ELx_EC_FP_EXC32]		= "FP (AArch32)",
828 	[ESR_ELx_EC_FP_EXC64]		= "FP (AArch64)",
829 	[ESR_ELx_EC_SERROR]		= "SError",
830 	[ESR_ELx_EC_BREAKPT_LOW]	= "Breakpoint (lower EL)",
831 	[ESR_ELx_EC_BREAKPT_CUR]	= "Breakpoint (current EL)",
832 	[ESR_ELx_EC_SOFTSTP_LOW]	= "Software Step (lower EL)",
833 	[ESR_ELx_EC_SOFTSTP_CUR]	= "Software Step (current EL)",
834 	[ESR_ELx_EC_WATCHPT_LOW]	= "Watchpoint (lower EL)",
835 	[ESR_ELx_EC_WATCHPT_CUR]	= "Watchpoint (current EL)",
836 	[ESR_ELx_EC_BKPT32]		= "BKPT (AArch32)",
837 	[ESR_ELx_EC_VECTOR32]		= "Vector catch (AArch32)",
838 	[ESR_ELx_EC_BRK64]		= "BRK (AArch64)",
839 };
840 
841 const char *esr_get_class_string(unsigned long esr)
842 {
843 	return esr_class_str[ESR_ELx_EC(esr)];
844 }
845 
846 /*
847  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
848  * exceptions taken from EL0.
849  */
850 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
851 {
852 	unsigned long pc = instruction_pointer(regs);
853 
854 	current->thread.fault_address = 0;
855 	current->thread.fault_code = esr;
856 
857 	arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
858 			      "Bad EL0 synchronous exception");
859 }
860 
861 #ifdef CONFIG_VMAP_STACK
862 
863 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
864 	__aligned(16);
865 
866 void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
867 {
868 	unsigned long tsk_stk = (unsigned long)current->stack;
869 	unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
870 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
871 
872 	console_verbose();
873 	pr_emerg("Insufficient stack space to handle exception!");
874 
875 	pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr));
876 	pr_emerg("FAR: 0x%016lx\n", far);
877 
878 	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
879 		 tsk_stk, tsk_stk + THREAD_SIZE);
880 	pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
881 		 irq_stk, irq_stk + IRQ_STACK_SIZE);
882 	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
883 		 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
884 
885 	__show_regs(regs);
886 
887 	/*
888 	 * We use nmi_panic to limit the potential for recusive overflows, and
889 	 * to get a better stack trace.
890 	 */
891 	nmi_panic(NULL, "kernel stack overflow");
892 	cpu_park_loop();
893 }
894 #endif
895 
896 void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
897 {
898 	console_verbose();
899 
900 	pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
901 		smp_processor_id(), esr, esr_get_class_string(esr));
902 	if (regs)
903 		__show_regs(regs);
904 
905 	nmi_panic(regs, "Asynchronous SError Interrupt");
906 
907 	cpu_park_loop();
908 	unreachable();
909 }
910 
911 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
912 {
913 	unsigned long aet = arm64_ras_serror_get_severity(esr);
914 
915 	switch (aet) {
916 	case ESR_ELx_AET_CE:	/* corrected error */
917 	case ESR_ELx_AET_UEO:	/* restartable, not yet consumed */
918 		/*
919 		 * The CPU can make progress. We may take UEO again as
920 		 * a more severe error.
921 		 */
922 		return false;
923 
924 	case ESR_ELx_AET_UEU:	/* Uncorrected Unrecoverable */
925 	case ESR_ELx_AET_UER:	/* Uncorrected Recoverable */
926 		/*
927 		 * The CPU can't make progress. The exception may have
928 		 * been imprecise.
929 		 *
930 		 * Neoverse-N1 #1349291 means a non-KVM SError reported as
931 		 * Unrecoverable should be treated as Uncontainable. We
932 		 * call arm64_serror_panic() in both cases.
933 		 */
934 		return true;
935 
936 	case ESR_ELx_AET_UC:	/* Uncontainable or Uncategorized error */
937 	default:
938 		/* Error has been silently propagated */
939 		arm64_serror_panic(regs, esr);
940 	}
941 }
942 
943 void do_serror(struct pt_regs *regs, unsigned long esr)
944 {
945 	/* non-RAS errors are not containable */
946 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
947 		arm64_serror_panic(regs, esr);
948 }
949 
950 /* GENERIC_BUG traps */
951 
952 int is_valid_bugaddr(unsigned long addr)
953 {
954 	/*
955 	 * bug_handler() only called for BRK #BUG_BRK_IMM.
956 	 * So the answer is trivial -- any spurious instances with no
957 	 * bug table entry will be rejected by report_bug() and passed
958 	 * back to the debug-monitors code and handled as a fatal
959 	 * unexpected debug exception.
960 	 */
961 	return 1;
962 }
963 
964 static int bug_handler(struct pt_regs *regs, unsigned long esr)
965 {
966 	switch (report_bug(regs->pc, regs)) {
967 	case BUG_TRAP_TYPE_BUG:
968 		die("Oops - BUG", regs, esr);
969 		break;
970 
971 	case BUG_TRAP_TYPE_WARN:
972 		break;
973 
974 	default:
975 		/* unknown/unrecognised bug trap type */
976 		return DBG_HOOK_ERROR;
977 	}
978 
979 	/* If thread survives, skip over the BUG instruction and continue: */
980 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
981 	return DBG_HOOK_HANDLED;
982 }
983 
984 static struct break_hook bug_break_hook = {
985 	.fn = bug_handler,
986 	.imm = BUG_BRK_IMM,
987 };
988 
989 #ifdef CONFIG_CFI_CLANG
990 static int cfi_handler(struct pt_regs *regs, unsigned long esr)
991 {
992 	unsigned long target;
993 	u32 type;
994 
995 	target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr));
996 	type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr));
997 
998 	switch (report_cfi_failure(regs, regs->pc, &target, type)) {
999 	case BUG_TRAP_TYPE_BUG:
1000 		die("Oops - CFI", regs, esr);
1001 		break;
1002 
1003 	case BUG_TRAP_TYPE_WARN:
1004 		break;
1005 
1006 	default:
1007 		return DBG_HOOK_ERROR;
1008 	}
1009 
1010 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1011 	return DBG_HOOK_HANDLED;
1012 }
1013 
1014 static struct break_hook cfi_break_hook = {
1015 	.fn = cfi_handler,
1016 	.imm = CFI_BRK_IMM_BASE,
1017 	.mask = CFI_BRK_IMM_MASK,
1018 };
1019 #endif /* CONFIG_CFI_CLANG */
1020 
1021 static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
1022 {
1023 	pr_err("%s generated an invalid instruction at %pS!\n",
1024 		"Kernel text patching",
1025 		(void *)instruction_pointer(regs));
1026 
1027 	/* We cannot handle this */
1028 	return DBG_HOOK_ERROR;
1029 }
1030 
1031 static struct break_hook fault_break_hook = {
1032 	.fn = reserved_fault_handler,
1033 	.imm = FAULT_BRK_IMM,
1034 };
1035 
1036 #ifdef CONFIG_KASAN_SW_TAGS
1037 
1038 #define KASAN_ESR_RECOVER	0x20
1039 #define KASAN_ESR_WRITE	0x10
1040 #define KASAN_ESR_SIZE_MASK	0x0f
1041 #define KASAN_ESR_SIZE(esr)	(1 << ((esr) & KASAN_ESR_SIZE_MASK))
1042 
1043 static int kasan_handler(struct pt_regs *regs, unsigned long esr)
1044 {
1045 	bool recover = esr & KASAN_ESR_RECOVER;
1046 	bool write = esr & KASAN_ESR_WRITE;
1047 	size_t size = KASAN_ESR_SIZE(esr);
1048 	u64 addr = regs->regs[0];
1049 	u64 pc = regs->pc;
1050 
1051 	kasan_report(addr, size, write, pc);
1052 
1053 	/*
1054 	 * The instrumentation allows to control whether we can proceed after
1055 	 * a crash was detected. This is done by passing the -recover flag to
1056 	 * the compiler. Disabling recovery allows to generate more compact
1057 	 * code.
1058 	 *
1059 	 * Unfortunately disabling recovery doesn't work for the kernel right
1060 	 * now. KASAN reporting is disabled in some contexts (for example when
1061 	 * the allocator accesses slab object metadata; this is controlled by
1062 	 * current->kasan_depth). All these accesses are detected by the tool,
1063 	 * even though the reports for them are not printed.
1064 	 *
1065 	 * This is something that might be fixed at some point in the future.
1066 	 */
1067 	if (!recover)
1068 		die("Oops - KASAN", regs, esr);
1069 
1070 	/* If thread survives, skip over the brk instruction and continue: */
1071 	arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1072 	return DBG_HOOK_HANDLED;
1073 }
1074 
1075 static struct break_hook kasan_break_hook = {
1076 	.fn	= kasan_handler,
1077 	.imm	= KASAN_BRK_IMM,
1078 	.mask	= KASAN_BRK_MASK,
1079 };
1080 #endif
1081 
1082 #ifdef CONFIG_UBSAN_TRAP
1083 static int ubsan_handler(struct pt_regs *regs, unsigned long esr)
1084 {
1085 	die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr);
1086 	return DBG_HOOK_HANDLED;
1087 }
1088 
1089 static struct break_hook ubsan_break_hook = {
1090 	.fn	= ubsan_handler,
1091 	.imm	= UBSAN_BRK_IMM,
1092 	.mask	= UBSAN_BRK_MASK,
1093 };
1094 #endif
1095 
1096 #define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
1097 
1098 /*
1099  * Initial handler for AArch64 BRK exceptions
1100  * This handler only used until debug_traps_init().
1101  */
1102 int __init early_brk64(unsigned long addr, unsigned long esr,
1103 		struct pt_regs *regs)
1104 {
1105 #ifdef CONFIG_CFI_CLANG
1106 	if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE)
1107 		return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
1108 #endif
1109 #ifdef CONFIG_KASAN_SW_TAGS
1110 	if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1111 		return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1112 #endif
1113 #ifdef CONFIG_UBSAN_TRAP
1114 	if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
1115 		return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
1116 #endif
1117 	return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1118 }
1119 
1120 void __init trap_init(void)
1121 {
1122 	register_kernel_break_hook(&bug_break_hook);
1123 #ifdef CONFIG_CFI_CLANG
1124 	register_kernel_break_hook(&cfi_break_hook);
1125 #endif
1126 	register_kernel_break_hook(&fault_break_hook);
1127 #ifdef CONFIG_KASAN_SW_TAGS
1128 	register_kernel_break_hook(&kasan_break_hook);
1129 #endif
1130 #ifdef CONFIG_UBSAN_TRAP
1131 	register_kernel_break_hook(&ubsan_break_hook);
1132 #endif
1133 	debug_traps_init();
1134 }
1135