xref: /freebsd/sys/arm64/arm64/trap.c (revision c802b486)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_ddb.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/asan.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/lock.h>
36 #include <sys/msan.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/syscall.h>
41 #include <sys/sysent.h>
42 #ifdef KDB
43 #include <sys/kdb.h>
44 #endif
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_extern.h>
52 
53 #include <machine/frame.h>
54 #include <machine/md_var.h>
55 #include <machine/pcb.h>
56 #include <machine/pcpu.h>
57 #include <machine/undefined.h>
58 
59 #ifdef KDTRACE_HOOKS
60 #include <sys/dtrace_bsd.h>
61 #endif
62 
63 #ifdef VFP
64 #include <machine/vfp.h>
65 #endif
66 
67 #ifdef KDB
68 #include <machine/db_machdep.h>
69 #endif
70 
71 #ifdef DDB
72 #include <ddb/ddb.h>
73 #include <ddb/db_sym.h>
74 #endif
75 
76 /* Called from exception.S */
77 void do_el1h_sync(struct thread *, struct trapframe *);
78 void do_el0_sync(struct thread *, struct trapframe *);
79 void do_el0_error(struct trapframe *);
80 void do_serror(struct trapframe *);
81 void unhandled_exception(struct trapframe *);
82 
83 static void print_gp_register(const char *name, uint64_t value);
84 static void print_registers(struct trapframe *frame);
85 
86 int (*dtrace_invop_jump_addr)(struct trapframe *);
87 
88 typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t,
89     uint64_t, int);
90 
91 static abort_handler align_abort;
92 static abort_handler data_abort;
93 static abort_handler external_abort;
94 
95 static abort_handler *abort_handlers[] = {
96 	[ISS_DATA_DFSC_TF_L0] = data_abort,
97 	[ISS_DATA_DFSC_TF_L1] = data_abort,
98 	[ISS_DATA_DFSC_TF_L2] = data_abort,
99 	[ISS_DATA_DFSC_TF_L3] = data_abort,
100 	[ISS_DATA_DFSC_AFF_L1] = data_abort,
101 	[ISS_DATA_DFSC_AFF_L2] = data_abort,
102 	[ISS_DATA_DFSC_AFF_L3] = data_abort,
103 	[ISS_DATA_DFSC_PF_L1] = data_abort,
104 	[ISS_DATA_DFSC_PF_L2] = data_abort,
105 	[ISS_DATA_DFSC_PF_L3] = data_abort,
106 	[ISS_DATA_DFSC_ALIGN] = align_abort,
107 	[ISS_DATA_DFSC_EXT] =  external_abort,
108 	[ISS_DATA_DFSC_EXT_L0] =  external_abort,
109 	[ISS_DATA_DFSC_EXT_L1] =  external_abort,
110 	[ISS_DATA_DFSC_EXT_L2] =  external_abort,
111 	[ISS_DATA_DFSC_EXT_L3] =  external_abort,
112 	[ISS_DATA_DFSC_ECC] =  external_abort,
113 	[ISS_DATA_DFSC_ECC_L0] =  external_abort,
114 	[ISS_DATA_DFSC_ECC_L1] =  external_abort,
115 	[ISS_DATA_DFSC_ECC_L2] =  external_abort,
116 	[ISS_DATA_DFSC_ECC_L3] =  external_abort,
117 };
118 
119 static __inline void
call_trapsignal(struct thread * td,int sig,int code,void * addr,int trapno)120 call_trapsignal(struct thread *td, int sig, int code, void *addr, int trapno)
121 {
122 	ksiginfo_t ksi;
123 
124 	ksiginfo_init_trap(&ksi);
125 	ksi.ksi_signo = sig;
126 	ksi.ksi_code = code;
127 	ksi.ksi_addr = addr;
128 	ksi.ksi_trapno = trapno;
129 	trapsignal(td, &ksi);
130 }
131 
132 int
cpu_fetch_syscall_args(struct thread * td)133 cpu_fetch_syscall_args(struct thread *td)
134 {
135 	struct proc *p;
136 	syscallarg_t *ap, *dst_ap;
137 	struct syscall_args *sa;
138 
139 	p = td->td_proc;
140 	sa = &td->td_sa;
141 	ap = td->td_frame->tf_x;
142 	dst_ap = &sa->args[0];
143 
144 	sa->code = td->td_frame->tf_x[8];
145 	sa->original_code = sa->code;
146 
147 	if (__predict_false(sa->code == SYS_syscall || sa->code == SYS___syscall)) {
148 		sa->code = *ap++;
149 	} else {
150 		*dst_ap++ = *ap++;
151 	}
152 
153 	if (__predict_false(sa->code >= p->p_sysent->sv_size))
154 		sa->callp = &nosys_sysent;
155 	else
156 		sa->callp = &p->p_sysent->sv_table[sa->code];
157 
158 	KASSERT(sa->callp->sy_narg <= nitems(sa->args),
159 	    ("Syscall %d takes too many arguments", sa->code));
160 
161 	memcpy(dst_ap, ap, (nitems(sa->args) - 1) * sizeof(*dst_ap));
162 
163 	td->td_retval[0] = 0;
164 	td->td_retval[1] = 0;
165 
166 	return (0);
167 }
168 
169 #include "../../kern/subr_syscall.c"
170 
171 /*
172  * Test for fault generated by given access instruction in
173  * bus_peek_<foo> or bus_poke_<foo> bus function.
174  */
175 extern uint32_t generic_bs_peek_1f, generic_bs_peek_2f;
176 extern uint32_t generic_bs_peek_4f, generic_bs_peek_8f;
177 extern uint32_t generic_bs_poke_1f, generic_bs_poke_2f;
178 extern uint32_t generic_bs_poke_4f, generic_bs_poke_8f;
179 
180 static bool
test_bs_fault(void * addr)181 test_bs_fault(void *addr)
182 {
183 	return (addr == &generic_bs_peek_1f ||
184 	    addr == &generic_bs_peek_2f ||
185 	    addr == &generic_bs_peek_4f ||
186 	    addr == &generic_bs_peek_8f ||
187 	    addr == &generic_bs_poke_1f ||
188 	    addr == &generic_bs_poke_2f ||
189 	    addr == &generic_bs_poke_4f ||
190 	    addr == &generic_bs_poke_8f);
191 }
192 
193 static void
svc_handler(struct thread * td,struct trapframe * frame)194 svc_handler(struct thread *td, struct trapframe *frame)
195 {
196 
197 	if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) {
198 		syscallenter(td);
199 		syscallret(td);
200 	} else {
201 		call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
202 		    ESR_ELx_EXCEPTION(frame->tf_esr));
203 		userret(td, frame);
204 	}
205 }
206 
207 static void
align_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)208 align_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
209     uint64_t far, int lower)
210 {
211 	if (!lower) {
212 		print_registers(frame);
213 		print_gp_register("far", far);
214 		printf(" esr: 0x%.16lx\n", esr);
215 		panic("Misaligned access from kernel space!");
216 	}
217 
218 	call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
219 	    ESR_ELx_EXCEPTION(frame->tf_esr));
220 	userret(td, frame);
221 }
222 
223 
224 static void
external_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)225 external_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
226     uint64_t far, int lower)
227 {
228 	if (lower) {
229 		call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)far,
230 		    ESR_ELx_EXCEPTION(frame->tf_esr));
231 		userret(td, frame);
232 		return;
233 	}
234 
235 	/*
236 	 * Try to handle synchronous external aborts caused by
237 	 * bus_space_peek() and/or bus_space_poke() functions.
238 	 */
239 	if (test_bs_fault((void *)frame->tf_elr)) {
240 		frame->tf_elr = (uint64_t)generic_bs_fault;
241 		return;
242 	}
243 
244 	print_registers(frame);
245 	print_gp_register("far", far);
246 	panic("Unhandled external data abort");
247 }
248 
249 /*
250  * It is unsafe to access the stack canary value stored in "td" until
251  * kernel map translation faults are handled, see the pmap_klookup() call below.
252  * Thus, stack-smashing detection with per-thread canaries must be disabled in
253  * this function.
254  */
255 static void NO_PERTHREAD_SSP
data_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)256 data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
257     uint64_t far, int lower)
258 {
259 	struct vm_map *map;
260 	struct pcb *pcb;
261 	vm_prot_t ftype;
262 	int error, sig, ucode;
263 #ifdef KDB
264 	bool handled;
265 #endif
266 
267 	/*
268 	 * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive
269 	 * and Store-Exclusive instruction usage restrictions", state
270 	 * of the exclusive monitors after data abort exception is unknown.
271 	 */
272 	clrex();
273 
274 #ifdef KDB
275 	if (kdb_active) {
276 		kdb_reenter();
277 		return;
278 	}
279 #endif
280 
281 	if (lower) {
282 		map = &td->td_proc->p_vmspace->vm_map;
283 	} else if (!ADDR_IS_CANONICAL(far)) {
284 		/* We received a TBI/PAC/etc. fault from the kernel */
285 		error = KERN_INVALID_ADDRESS;
286 		pcb = td->td_pcb;
287 		goto bad_far;
288 	} else if (ADDR_IS_KERNEL(far)) {
289 		/*
290 		 * Handle a special case: the data abort was caused by accessing
291 		 * a thread structure while its mapping was being promoted or
292 		 * demoted, as a consequence of the break-before-make rule.  It
293 		 * is not safe to enable interrupts or dereference "td" before
294 		 * this case is handled.
295 		 *
296 		 * In principle, if pmap_klookup() fails, there is no need to
297 		 * call pmap_fault() below, but avoiding that call is not worth
298 		 * the effort.
299 		 */
300 		if (ESR_ELx_EXCEPTION(esr) == EXCP_DATA_ABORT) {
301 			switch (esr & ISS_DATA_DFSC_MASK) {
302 			case ISS_DATA_DFSC_TF_L0:
303 			case ISS_DATA_DFSC_TF_L1:
304 			case ISS_DATA_DFSC_TF_L2:
305 			case ISS_DATA_DFSC_TF_L3:
306 				if (pmap_klookup(far, NULL))
307 					return;
308 				break;
309 			}
310 		}
311 		intr_enable();
312 		map = kernel_map;
313 	} else {
314 		intr_enable();
315 		map = &td->td_proc->p_vmspace->vm_map;
316 		if (map == NULL)
317 			map = kernel_map;
318 	}
319 	pcb = td->td_pcb;
320 
321 	/*
322 	 * Try to handle translation, access flag, and permission faults.
323 	 * Translation faults may occur as a result of the required
324 	 * break-before-make sequence used when promoting or demoting
325 	 * superpages.  Such faults must not occur while holding the pmap lock,
326 	 * or pmap_fault() will recurse on that lock.
327 	 */
328 	if ((lower || map == kernel_map || pcb->pcb_onfault != 0) &&
329 	    pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
330 		return;
331 
332 #ifdef INVARIANTS
333 	if (td->td_md.md_spinlock_count != 0) {
334 		print_registers(frame);
335 		print_gp_register("far", far);
336 		printf(" esr: 0x%.16lx\n", esr);
337 		panic("data abort with spinlock held (spinlock count %d != 0)",
338 		    td->td_md.md_spinlock_count);
339 	}
340 #endif
341 	if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
342 	    WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
343 		print_registers(frame);
344 		print_gp_register("far", far);
345 		printf(" esr: 0x%.16lx\n", esr);
346 		panic("data abort in critical section or under mutex");
347 	}
348 
349 	switch (ESR_ELx_EXCEPTION(esr)) {
350 	case EXCP_INSN_ABORT:
351 	case EXCP_INSN_ABORT_L:
352 		ftype = VM_PROT_EXECUTE;
353 		break;
354 	default:
355 		/*
356 		 * If the exception was because of a read or cache operation
357 		 * pass a read fault type into the vm code. Cache operations
358 		 * need read permission but will set the WnR flag when the
359 		 * memory is unmapped.
360 		 */
361 		if ((esr & ISS_DATA_WnR) == 0 || (esr & ISS_DATA_CM) != 0)
362 			ftype = VM_PROT_READ;
363 		else
364 			ftype = VM_PROT_WRITE;
365 		break;
366 	}
367 
368 	/* Fault in the page. */
369 	error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode);
370 	if (error != KERN_SUCCESS) {
371 		if (lower) {
372 			call_trapsignal(td, sig, ucode, (void *)far,
373 			    ESR_ELx_EXCEPTION(esr));
374 		} else {
375 bad_far:
376 			if (td->td_intr_nesting_level == 0 &&
377 			    pcb->pcb_onfault != 0) {
378 				frame->tf_x[0] = error;
379 				frame->tf_elr = pcb->pcb_onfault;
380 				return;
381 			}
382 
383 			printf("Fatal data abort:\n");
384 			print_registers(frame);
385 			print_gp_register("far", far);
386 			printf(" esr: 0x%.16lx\n", esr);
387 
388 #ifdef KDB
389 			if (debugger_on_trap) {
390 				kdb_why = KDB_WHY_TRAP;
391 				handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0,
392 				    frame);
393 				kdb_why = KDB_WHY_UNSET;
394 				if (handled)
395 					return;
396 			}
397 #endif
398 			panic("vm_fault failed: 0x%lx error %d",
399 			    frame->tf_elr, error);
400 		}
401 	}
402 
403 	if (lower)
404 		userret(td, frame);
405 }
406 
407 static void
print_gp_register(const char * name,uint64_t value)408 print_gp_register(const char *name, uint64_t value)
409 {
410 #if defined(DDB)
411 	c_db_sym_t sym;
412 	const char *sym_name;
413 	db_expr_t sym_value;
414 	db_expr_t offset;
415 #endif
416 
417 	printf(" %s: 0x%.16lx", name, value);
418 #if defined(DDB)
419 	/* If this looks like a kernel address try to find the symbol */
420 	if (value >= VM_MIN_KERNEL_ADDRESS) {
421 		sym = db_search_symbol(value, DB_STGY_ANY, &offset);
422 		if (sym != C_DB_SYM_NULL) {
423 			db_symbol_values(sym, &sym_name, &sym_value);
424 			printf(" (%s + 0x%lx)", sym_name, offset);
425 		}
426 	}
427 #endif
428 	printf("\n");
429 }
430 
431 static void
print_registers(struct trapframe * frame)432 print_registers(struct trapframe *frame)
433 {
434 	char name[4];
435 	u_int reg;
436 
437 	for (reg = 0; reg < nitems(frame->tf_x); reg++) {
438 		snprintf(name, sizeof(name), "%sx%d", (reg < 10) ? " " : "",
439 		    reg);
440 		print_gp_register(name, frame->tf_x[reg]);
441 	}
442 	printf("  sp: 0x%.16lx\n", frame->tf_sp);
443 	print_gp_register(" lr", frame->tf_lr);
444 	print_gp_register("elr", frame->tf_elr);
445 	printf("spsr: 0x%.16lx\n", frame->tf_spsr);
446 }
447 
448 #ifdef VFP
449 static void
fpe_trap(struct thread * td,void * addr,uint32_t exception)450 fpe_trap(struct thread *td, void *addr, uint32_t exception)
451 {
452 	int code;
453 
454 	code = FPE_FLTIDO;
455 	if ((exception & ISS_FP_TFV) != 0) {
456 		if ((exception & ISS_FP_IOF) != 0)
457 			code = FPE_FLTINV;
458 		else if ((exception & ISS_FP_DZF) != 0)
459 			code = FPE_FLTDIV;
460 		else if ((exception & ISS_FP_OFF) != 0)
461 			code = FPE_FLTOVF;
462 		else if ((exception & ISS_FP_UFF) != 0)
463 			code = FPE_FLTUND;
464 		else if ((exception & ISS_FP_IXF) != 0)
465 			code = FPE_FLTRES;
466 	}
467 	call_trapsignal(td, SIGFPE, code, addr, exception);
468 }
469 #endif
470 
471 /*
472  * See the comment above data_abort().
473  */
474 void NO_PERTHREAD_SSP
do_el1h_sync(struct thread * td,struct trapframe * frame)475 do_el1h_sync(struct thread *td, struct trapframe *frame)
476 {
477 	uint32_t exception;
478 	uint64_t esr, far;
479 	int dfsc;
480 
481 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
482 	kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
483 
484 	far = frame->tf_far;
485 	/* Read the esr register to get the exception details */
486 	esr = frame->tf_esr;
487 	exception = ESR_ELx_EXCEPTION(esr);
488 
489 #ifdef KDTRACE_HOOKS
490 	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception))
491 		return;
492 #endif
493 
494 	CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx",
495 	    __func__, exception, frame->tf_elr, esr);
496 
497 	/*
498 	 * Enable debug exceptions if we aren't already handling one. They will
499 	 * be masked again in the exception handler's epilogue.
500 	 */
501 	switch (exception) {
502 	case EXCP_BRK:
503 	case EXCP_BRKPT_EL1:
504 	case EXCP_WATCHPT_EL1:
505 	case EXCP_SOFTSTP_EL1:
506 		break;
507 	default:
508 		dbg_enable();
509 		break;
510 	}
511 
512 	switch (exception) {
513 	case EXCP_FP_SIMD:
514 	case EXCP_TRAP_FP:
515 #ifdef VFP
516 		if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) {
517 			vfp_restore_state();
518 		} else
519 #endif
520 		{
521 			print_registers(frame);
522 			printf(" esr: 0x%.16lx\n", esr);
523 			panic("VFP exception in the kernel");
524 		}
525 		break;
526 	case EXCP_INSN_ABORT:
527 	case EXCP_DATA_ABORT:
528 		dfsc = esr & ISS_DATA_DFSC_MASK;
529 		if (dfsc < nitems(abort_handlers) &&
530 		    abort_handlers[dfsc] != NULL) {
531 			abort_handlers[dfsc](td, frame, esr, far, 0);
532 		} else {
533 			print_registers(frame);
534 			print_gp_register("far", far);
535 			printf(" esr: 0x%.16lx\n", esr);
536 			panic("Unhandled EL1 %s abort: 0x%x",
537 			    exception == EXCP_INSN_ABORT ? "instruction" :
538 			    "data", dfsc);
539 		}
540 		break;
541 	case EXCP_BRK:
542 #ifdef KDTRACE_HOOKS
543 		if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \
544 		    dtrace_invop_jump_addr != 0) {
545 			dtrace_invop_jump_addr(frame);
546 			break;
547 		}
548 #endif
549 #ifdef KDB
550 		kdb_trap(exception, 0, frame);
551 #else
552 		panic("No debugger in kernel.");
553 #endif
554 		break;
555 	case EXCP_BRKPT_EL1:
556 	case EXCP_WATCHPT_EL1:
557 	case EXCP_SOFTSTP_EL1:
558 #ifdef KDB
559 		kdb_trap(exception, 0, frame);
560 #else
561 		panic("No debugger in kernel.");
562 #endif
563 		break;
564 	case EXCP_FPAC:
565 		/* We can see this if the authentication on PAC fails */
566 		print_registers(frame);
567 		print_gp_register("far", far);
568 		panic("FPAC kernel exception");
569 		break;
570 	case EXCP_UNKNOWN:
571 		if (undef_insn(1, frame))
572 			break;
573 		print_registers(frame);
574 		print_gp_register("far", far);
575 		panic("Undefined instruction: %08x",
576 		    *(uint32_t *)frame->tf_elr);
577 		break;
578 	case EXCP_BTI:
579 		print_registers(frame);
580 		print_gp_register("far", far);
581 		panic("Branch Target exception");
582 		break;
583 	default:
584 		print_registers(frame);
585 		print_gp_register("far", far);
586 		panic("Unknown kernel exception 0x%x esr_el1 0x%lx", exception,
587 		    esr);
588 	}
589 }
590 
591 void
do_el0_sync(struct thread * td,struct trapframe * frame)592 do_el0_sync(struct thread *td, struct trapframe *frame)
593 {
594 	pcpu_bp_harden bp_harden;
595 	uint32_t exception;
596 	uint64_t esr, far;
597 	int dfsc;
598 
599 	/* Check we have a sane environment when entering from userland */
600 	KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
601 	    ("Invalid pcpu address from userland: %p (tpidr 0x%lx)",
602 	     get_pcpu(), READ_SPECIALREG(tpidr_el1)));
603 
604 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
605 	kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
606 
607 	far = frame->tf_far;
608 	esr = frame->tf_esr;
609 	exception = ESR_ELx_EXCEPTION(esr);
610 	if (exception == EXCP_INSN_ABORT_L && far > VM_MAXUSER_ADDRESS) {
611 		/*
612 		 * Userspace may be trying to train the branch predictor to
613 		 * attack the kernel. If we are on a CPU affected by this
614 		 * call the handler to clear the branch predictor state.
615 		 */
616 		bp_harden = PCPU_GET(bp_harden);
617 		if (bp_harden != NULL)
618 			bp_harden();
619 	}
620 	intr_enable();
621 
622 	CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx",
623 	    __func__, exception, frame->tf_elr, esr);
624 
625 	switch (exception) {
626 	case EXCP_FP_SIMD:
627 #ifdef VFP
628 		vfp_restore_state();
629 #else
630 		panic("VFP exception in userland");
631 #endif
632 		break;
633 	case EXCP_TRAP_FP:
634 #ifdef VFP
635 		fpe_trap(td, (void *)frame->tf_elr, esr);
636 		userret(td, frame);
637 #else
638 		panic("VFP exception in userland");
639 #endif
640 		break;
641 	case EXCP_SVE:
642 		call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_elr,
643 		    exception);
644 		userret(td, frame);
645 		break;
646 	case EXCP_SVC32:
647 	case EXCP_SVC64:
648 		svc_handler(td, frame);
649 		break;
650 	case EXCP_INSN_ABORT_L:
651 	case EXCP_DATA_ABORT_L:
652 	case EXCP_DATA_ABORT:
653 		dfsc = esr & ISS_DATA_DFSC_MASK;
654 		if (dfsc < nitems(abort_handlers) &&
655 		    abort_handlers[dfsc] != NULL)
656 			abort_handlers[dfsc](td, frame, esr, far, 1);
657 		else {
658 			print_registers(frame);
659 			print_gp_register("far", far);
660 			printf(" esr: 0x%.16lx\n", esr);
661 			panic("Unhandled EL0 %s abort: 0x%x",
662 			    exception == EXCP_INSN_ABORT_L ? "instruction" :
663 			    "data", dfsc);
664 		}
665 		break;
666 	case EXCP_UNKNOWN:
667 		if (!undef_insn(0, frame))
668 			call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far,
669 			    exception);
670 		userret(td, frame);
671 		break;
672 	case EXCP_FPAC:
673 		call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
674 		    exception);
675 		userret(td, frame);
676 		break;
677 	case EXCP_SP_ALIGN:
678 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
679 		    exception);
680 		userret(td, frame);
681 		break;
682 	case EXCP_PC_ALIGN:
683 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
684 		    exception);
685 		userret(td, frame);
686 		break;
687 	case EXCP_BRKPT_EL0:
688 	case EXCP_BRK:
689 #ifdef COMPAT_FREEBSD32
690 	case EXCP_BRKPT_32:
691 #endif /* COMPAT_FREEBSD32 */
692 		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr,
693 		    exception);
694 		userret(td, frame);
695 		break;
696 	case EXCP_WATCHPT_EL0:
697 		call_trapsignal(td, SIGTRAP, TRAP_TRACE, (void *)far,
698 		    exception);
699 		userret(td, frame);
700 		break;
701 	case EXCP_MSR:
702 		/*
703 		 * The CPU can raise EXCP_MSR when userspace executes an mrs
704 		 * instruction to access a special register userspace doesn't
705 		 * have access to.
706 		 */
707 		if (!undef_insn(0, frame))
708 			call_trapsignal(td, SIGILL, ILL_PRVOPC,
709 			    (void *)frame->tf_elr, exception);
710 		userret(td, frame);
711 		break;
712 	case EXCP_SOFTSTP_EL0:
713 		PROC_LOCK(td->td_proc);
714 		if ((td->td_dbgflags & TDB_STEP) != 0) {
715 			td->td_frame->tf_spsr &= ~PSR_SS;
716 			td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
717 			WRITE_SPECIALREG(mdscr_el1,
718 			    READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
719 		}
720 		PROC_UNLOCK(td->td_proc);
721 		call_trapsignal(td, SIGTRAP, TRAP_TRACE,
722 		    (void *)frame->tf_elr, exception);
723 		userret(td, frame);
724 		break;
725 	case EXCP_BTI:
726 		call_trapsignal(td, SIGILL, ILL_ILLOPC, (void *)frame->tf_elr,
727 		    exception);
728 		userret(td, frame);
729 		break;
730 	default:
731 		call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr,
732 		    exception);
733 		userret(td, frame);
734 		break;
735 	}
736 
737 	KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
738 	    ("Kernel VFP flags set while entering userspace"));
739 	KASSERT(
740 	    td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
741 	    ("Kernel VFP state in use when entering userspace"));
742 }
743 
744 /*
745  * TODO: We will need to handle these later when we support ARMv8.2 RAS.
746  */
747 void
do_serror(struct trapframe * frame)748 do_serror(struct trapframe *frame)
749 {
750 	uint64_t esr, far;
751 
752 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
753 	kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
754 
755 	far = frame->tf_far;
756 	esr = frame->tf_esr;
757 
758 	print_registers(frame);
759 	print_gp_register("far", far);
760 	printf(" esr: 0x%.16lx\n", esr);
761 	panic("Unhandled System Error");
762 }
763 
764 void
unhandled_exception(struct trapframe * frame)765 unhandled_exception(struct trapframe *frame)
766 {
767 	uint64_t esr, far;
768 
769 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
770 	kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
771 
772 	far = frame->tf_far;
773 	esr = frame->tf_esr;
774 
775 	print_registers(frame);
776 	print_gp_register("far", far);
777 	printf(" esr: 0x%.16lx\n", esr);
778 	panic("Unhandled exception");
779 }
780