xref: /freebsd/sys/arm64/arm64/trap.c (revision 0731b0a9)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_ddb.h"
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/asan.h>
36 #include <sys/kernel.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/ptrace.h>
42 #include <sys/syscall.h>
43 #include <sys/sysent.h>
44 #ifdef KDB
45 #include <sys/kdb.h>
46 #endif
47 
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_extern.h>
54 
55 #include <machine/frame.h>
56 #include <machine/md_var.h>
57 #include <machine/pcb.h>
58 #include <machine/pcpu.h>
59 #include <machine/undefined.h>
60 
61 #ifdef KDTRACE_HOOKS
62 #include <sys/dtrace_bsd.h>
63 #endif
64 
65 #ifdef VFP
66 #include <machine/vfp.h>
67 #endif
68 
69 #ifdef KDB
70 #include <machine/db_machdep.h>
71 #endif
72 
73 #ifdef DDB
74 #include <ddb/ddb.h>
75 #include <ddb/db_sym.h>
76 #endif
77 
78 /* Called from exception.S */
79 void do_el1h_sync(struct thread *, struct trapframe *);
80 void do_el0_sync(struct thread *, struct trapframe *);
81 void do_el0_error(struct trapframe *);
82 void do_serror(struct trapframe *);
83 void unhandled_exception(struct trapframe *);
84 
85 static void print_gp_register(const char *name, uint64_t value);
86 static void print_registers(struct trapframe *frame);
87 
88 int (*dtrace_invop_jump_addr)(struct trapframe *);
89 
90 typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t,
91     uint64_t, int);
92 
93 static abort_handler align_abort;
94 static abort_handler data_abort;
95 static abort_handler external_abort;
96 
97 static abort_handler *abort_handlers[] = {
98 	[ISS_DATA_DFSC_TF_L0] = data_abort,
99 	[ISS_DATA_DFSC_TF_L1] = data_abort,
100 	[ISS_DATA_DFSC_TF_L2] = data_abort,
101 	[ISS_DATA_DFSC_TF_L3] = data_abort,
102 	[ISS_DATA_DFSC_AFF_L1] = data_abort,
103 	[ISS_DATA_DFSC_AFF_L2] = data_abort,
104 	[ISS_DATA_DFSC_AFF_L3] = data_abort,
105 	[ISS_DATA_DFSC_PF_L1] = data_abort,
106 	[ISS_DATA_DFSC_PF_L2] = data_abort,
107 	[ISS_DATA_DFSC_PF_L3] = data_abort,
108 	[ISS_DATA_DFSC_ALIGN] = align_abort,
109 	[ISS_DATA_DFSC_EXT] =  external_abort,
110 	[ISS_DATA_DFSC_EXT_L0] =  external_abort,
111 	[ISS_DATA_DFSC_EXT_L1] =  external_abort,
112 	[ISS_DATA_DFSC_EXT_L2] =  external_abort,
113 	[ISS_DATA_DFSC_EXT_L3] =  external_abort,
114 	[ISS_DATA_DFSC_ECC] =  external_abort,
115 	[ISS_DATA_DFSC_ECC_L0] =  external_abort,
116 	[ISS_DATA_DFSC_ECC_L1] =  external_abort,
117 	[ISS_DATA_DFSC_ECC_L2] =  external_abort,
118 	[ISS_DATA_DFSC_ECC_L3] =  external_abort,
119 };
120 
121 static __inline void
122 call_trapsignal(struct thread *td, int sig, int code, void *addr, int trapno)
123 {
124 	ksiginfo_t ksi;
125 
126 	ksiginfo_init_trap(&ksi);
127 	ksi.ksi_signo = sig;
128 	ksi.ksi_code = code;
129 	ksi.ksi_addr = addr;
130 	ksi.ksi_trapno = trapno;
131 	trapsignal(td, &ksi);
132 }
133 
134 int
135 cpu_fetch_syscall_args(struct thread *td)
136 {
137 	struct proc *p;
138 	syscallarg_t *ap, *dst_ap;
139 	struct syscall_args *sa;
140 
141 	p = td->td_proc;
142 	sa = &td->td_sa;
143 	ap = td->td_frame->tf_x;
144 	dst_ap = &sa->args[0];
145 
146 	sa->code = td->td_frame->tf_x[8];
147 	sa->original_code = sa->code;
148 
149 	if (__predict_false(sa->code == SYS_syscall || sa->code == SYS___syscall)) {
150 		sa->code = *ap++;
151 	} else {
152 		*dst_ap++ = *ap++;
153 	}
154 
155 	if (__predict_false(sa->code >= p->p_sysent->sv_size))
156 		sa->callp = &p->p_sysent->sv_table[0];
157 	else
158 		sa->callp = &p->p_sysent->sv_table[sa->code];
159 
160 	KASSERT(sa->callp->sy_narg <= nitems(sa->args),
161 	    ("Syscall %d takes too many arguments", sa->code));
162 
163 	memcpy(dst_ap, ap, (nitems(sa->args) - 1) * sizeof(*dst_ap));
164 
165 	td->td_retval[0] = 0;
166 	td->td_retval[1] = 0;
167 
168 	return (0);
169 }
170 
171 #include "../../kern/subr_syscall.c"
172 
173 /*
174  * Test for fault generated by given access instruction in
175  * bus_peek_<foo> or bus_poke_<foo> bus function.
176  */
177 extern uint32_t generic_bs_peek_1f, generic_bs_peek_2f;
178 extern uint32_t generic_bs_peek_4f, generic_bs_peek_8f;
179 extern uint32_t generic_bs_poke_1f, generic_bs_poke_2f;
180 extern uint32_t generic_bs_poke_4f, generic_bs_poke_8f;
181 
182 static bool
183 test_bs_fault(void *addr)
184 {
185 	return (addr == &generic_bs_peek_1f ||
186 	    addr == &generic_bs_peek_2f ||
187 	    addr == &generic_bs_peek_4f ||
188 	    addr == &generic_bs_peek_8f ||
189 	    addr == &generic_bs_poke_1f ||
190 	    addr == &generic_bs_poke_2f ||
191 	    addr == &generic_bs_poke_4f ||
192 	    addr == &generic_bs_poke_8f);
193 }
194 
195 static void
196 svc_handler(struct thread *td, struct trapframe *frame)
197 {
198 
199 	if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) {
200 		syscallenter(td);
201 		syscallret(td);
202 	} else {
203 		call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
204 		    ESR_ELx_EXCEPTION(frame->tf_esr));
205 		userret(td, frame);
206 	}
207 }
208 
209 static void
210 align_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
211     uint64_t far, int lower)
212 {
213 	if (!lower) {
214 		print_registers(frame);
215 		print_gp_register("far", far);
216 		printf(" esr: %16lx\n", esr);
217 		panic("Misaligned access from kernel space!");
218 	}
219 
220 	call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
221 	    ESR_ELx_EXCEPTION(frame->tf_esr));
222 	userret(td, frame);
223 }
224 
225 
226 static void
227 external_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
228     uint64_t far, int lower)
229 {
230 
231 	/*
232 	 * Try to handle synchronous external aborts caused by
233 	 * bus_space_peek() and/or bus_space_poke() functions.
234 	 */
235 	if (!lower && test_bs_fault((void *)frame->tf_elr)) {
236 		frame->tf_elr = (uint64_t)generic_bs_fault;
237 		return;
238 	}
239 
240 	print_registers(frame);
241 	print_gp_register("far", far);
242 	panic("Unhandled EL%d external data abort", lower ? 0: 1);
243 }
244 
245 /*
246  * It is unsafe to access the stack canary value stored in "td" until
247  * kernel map translation faults are handled, see the pmap_klookup() call below.
248  * Thus, stack-smashing detection with per-thread canaries must be disabled in
249  * this function.
250  */
251 static void NO_PERTHREAD_SSP
252 data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
253     uint64_t far, int lower)
254 {
255 	struct vm_map *map;
256 	struct pcb *pcb;
257 	vm_prot_t ftype;
258 	int error, sig, ucode;
259 #ifdef KDB
260 	bool handled;
261 #endif
262 
263 	/*
264 	 * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive
265 	 * and Store-Exclusive instruction usage restrictions", state
266 	 * of the exclusive monitors after data abort exception is unknown.
267 	 */
268 	clrex();
269 
270 #ifdef KDB
271 	if (kdb_active) {
272 		kdb_reenter();
273 		return;
274 	}
275 #endif
276 
277 	if (lower) {
278 		map = &td->td_proc->p_vmspace->vm_map;
279 	} else if (!ADDR_IS_CANONICAL(far)) {
280 		/* We received a TBI/PAC/etc. fault from the kernel */
281 		error = KERN_INVALID_ADDRESS;
282 		goto bad_far;
283 	} else if (ADDR_IS_KERNEL(far)) {
284 		/*
285 		 * Handle a special case: the data abort was caused by accessing
286 		 * a thread structure while its mapping was being promoted or
287 		 * demoted, as a consequence of the break-before-make rule.  It
288 		 * is not safe to enable interrupts or dereference "td" before
289 		 * this case is handled.
290 		 *
291 		 * In principle, if pmap_klookup() fails, there is no need to
292 		 * call pmap_fault() below, but avoiding that call is not worth
293 		 * the effort.
294 		 */
295 		if (ESR_ELx_EXCEPTION(esr) == EXCP_DATA_ABORT) {
296 			switch (esr & ISS_DATA_DFSC_MASK) {
297 			case ISS_DATA_DFSC_TF_L0:
298 			case ISS_DATA_DFSC_TF_L1:
299 			case ISS_DATA_DFSC_TF_L2:
300 			case ISS_DATA_DFSC_TF_L3:
301 				if (pmap_klookup(far, NULL))
302 					return;
303 				break;
304 			}
305 		}
306 		intr_enable();
307 		map = kernel_map;
308 	} else {
309 		intr_enable();
310 		map = &td->td_proc->p_vmspace->vm_map;
311 		if (map == NULL)
312 			map = kernel_map;
313 	}
314 	pcb = td->td_pcb;
315 
316 	/*
317 	 * Try to handle translation, access flag, and permission faults.
318 	 * Translation faults may occur as a result of the required
319 	 * break-before-make sequence used when promoting or demoting
320 	 * superpages.  Such faults must not occur while holding the pmap lock,
321 	 * or pmap_fault() will recurse on that lock.
322 	 */
323 	if ((lower || map == kernel_map || pcb->pcb_onfault != 0) &&
324 	    pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
325 		return;
326 
327 #ifdef INVARIANTS
328 	if (td->td_md.md_spinlock_count != 0) {
329 		print_registers(frame);
330 		print_gp_register("far", far);
331 		printf(" esr: %.16lx\n", esr);
332 		panic("data abort with spinlock held");
333 	}
334 #endif
335 	if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
336 	    WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
337 		print_registers(frame);
338 		print_gp_register("far", far);
339 		printf(" esr: %16lx\n", esr);
340 		panic("data abort in critical section or under mutex");
341 	}
342 
343 	switch (ESR_ELx_EXCEPTION(esr)) {
344 	case EXCP_INSN_ABORT:
345 	case EXCP_INSN_ABORT_L:
346 		ftype = VM_PROT_EXECUTE;
347 		break;
348 	default:
349 		/*
350 		 * If the exception was because of a read or cache operation
351 		 * pass a read fault type into the vm code. Cache operations
352 		 * need read permission but will set the WnR flag when the
353 		 * memory is unmapped.
354 		 */
355 		if ((esr & ISS_DATA_WnR) == 0 || (esr & ISS_DATA_CM) != 0)
356 			ftype = VM_PROT_READ;
357 		else
358 			ftype = VM_PROT_WRITE;
359 		break;
360 	}
361 
362 	/* Fault in the page. */
363 	error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode);
364 	if (error != KERN_SUCCESS) {
365 		if (lower) {
366 			call_trapsignal(td, sig, ucode, (void *)far,
367 			    ESR_ELx_EXCEPTION(esr));
368 		} else {
369 bad_far:
370 			if (td->td_intr_nesting_level == 0 &&
371 			    pcb->pcb_onfault != 0) {
372 				frame->tf_x[0] = error;
373 				frame->tf_elr = pcb->pcb_onfault;
374 				return;
375 			}
376 
377 			printf("Fatal data abort:\n");
378 			print_registers(frame);
379 			print_gp_register("far", far);
380 			printf(" esr: %16lx\n", esr);
381 
382 #ifdef KDB
383 			if (debugger_on_trap) {
384 				kdb_why = KDB_WHY_TRAP;
385 				handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0,
386 				    frame);
387 				kdb_why = KDB_WHY_UNSET;
388 				if (handled)
389 					return;
390 			}
391 #endif
392 			panic("vm_fault failed: %lx error %d",
393 			    frame->tf_elr, error);
394 		}
395 	}
396 
397 	if (lower)
398 		userret(td, frame);
399 }
400 
401 static void
402 print_gp_register(const char *name, uint64_t value)
403 {
404 #if defined(DDB)
405 	c_db_sym_t sym;
406 	const char *sym_name;
407 	db_expr_t sym_value;
408 	db_expr_t offset;
409 #endif
410 
411 	printf(" %s: %16lx", name, value);
412 #if defined(DDB)
413 	/* If this looks like a kernel address try to find the symbol */
414 	if (value >= VM_MIN_KERNEL_ADDRESS) {
415 		sym = db_search_symbol(value, DB_STGY_ANY, &offset);
416 		if (sym != C_DB_SYM_NULL) {
417 			db_symbol_values(sym, &sym_name, &sym_value);
418 			printf(" (%s + %lx)", sym_name, offset);
419 		}
420 	}
421 #endif
422 	printf("\n");
423 }
424 
425 static void
426 print_registers(struct trapframe *frame)
427 {
428 	char name[4];
429 	u_int reg;
430 
431 	for (reg = 0; reg < nitems(frame->tf_x); reg++) {
432 		snprintf(name, sizeof(name), "%sx%d", (reg < 10) ? " " : "",
433 		    reg);
434 		print_gp_register(name, frame->tf_x[reg]);
435 	}
436 	printf("  sp: %16lx\n", frame->tf_sp);
437 	print_gp_register(" lr", frame->tf_lr);
438 	print_gp_register("elr", frame->tf_elr);
439 	printf("spsr: %16lx\n", frame->tf_spsr);
440 }
441 
442 #ifdef VFP
443 static void
444 fpe_trap(struct thread *td, void *addr, uint32_t exception)
445 {
446 	int code;
447 
448 	code = FPE_FLTIDO;
449 	if ((exception & ISS_FP_TFV) != 0) {
450 		if ((exception & ISS_FP_IOF) != 0)
451 			code = FPE_FLTINV;
452 		else if ((exception & ISS_FP_DZF) != 0)
453 			code = FPE_FLTDIV;
454 		else if ((exception & ISS_FP_OFF) != 0)
455 			code = FPE_FLTOVF;
456 		else if ((exception & ISS_FP_UFF) != 0)
457 			code = FPE_FLTUND;
458 		else if ((exception & ISS_FP_IXF) != 0)
459 			code = FPE_FLTRES;
460 	}
461 	call_trapsignal(td, SIGFPE, code, addr, exception);
462 }
463 #endif
464 
465 /*
466  * See the comment above data_abort().
467  */
468 void NO_PERTHREAD_SSP
469 do_el1h_sync(struct thread *td, struct trapframe *frame)
470 {
471 	uint32_t exception;
472 	uint64_t esr, far;
473 	int dfsc;
474 
475 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
476 	far = frame->tf_far;
477 	/* Read the esr register to get the exception details */
478 	esr = frame->tf_esr;
479 	exception = ESR_ELx_EXCEPTION(esr);
480 
481 #ifdef KDTRACE_HOOKS
482 	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception))
483 		return;
484 #endif
485 
486 	CTR4(KTR_TRAP,
487 	    "do_el1_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td,
488 	    esr, frame->tf_elr, frame);
489 
490 	/*
491 	 * Enable debug exceptions if we aren't already handling one. They will
492 	 * be masked again in the exception handler's epilogue.
493 	 */
494 	if (exception != EXCP_BRK && exception != EXCP_WATCHPT_EL1 &&
495 	    exception != EXCP_SOFTSTP_EL1)
496 		dbg_enable();
497 
498 	switch (exception) {
499 	case EXCP_FP_SIMD:
500 	case EXCP_TRAP_FP:
501 #ifdef VFP
502 		if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) {
503 			vfp_restore_state();
504 		} else
505 #endif
506 		{
507 			print_registers(frame);
508 			printf(" esr: %16lx\n", esr);
509 			panic("VFP exception in the kernel");
510 		}
511 		break;
512 	case EXCP_INSN_ABORT:
513 	case EXCP_DATA_ABORT:
514 		dfsc = esr & ISS_DATA_DFSC_MASK;
515 		if (dfsc < nitems(abort_handlers) &&
516 		    abort_handlers[dfsc] != NULL) {
517 			abort_handlers[dfsc](td, frame, esr, far, 0);
518 		} else {
519 			print_registers(frame);
520 			print_gp_register("far", far);
521 			printf(" esr: %16lx\n", esr);
522 			panic("Unhandled EL1 %s abort: %x",
523 			    exception == EXCP_INSN_ABORT ? "instruction" :
524 			    "data", dfsc);
525 		}
526 		break;
527 	case EXCP_BRK:
528 #ifdef KDTRACE_HOOKS
529 		if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \
530 		    dtrace_invop_jump_addr != 0) {
531 			dtrace_invop_jump_addr(frame);
532 			break;
533 		}
534 #endif
535 #ifdef KDB
536 		kdb_trap(exception, 0, frame);
537 #else
538 		panic("No debugger in kernel.");
539 #endif
540 		break;
541 	case EXCP_WATCHPT_EL1:
542 	case EXCP_SOFTSTP_EL1:
543 #ifdef KDB
544 		kdb_trap(exception, 0, frame);
545 #else
546 		panic("No debugger in kernel.");
547 #endif
548 		break;
549 	case EXCP_FPAC:
550 		/* We can see this if the authentication on PAC fails */
551 		print_registers(frame);
552 		print_gp_register("far", far);
553 		panic("FPAC kernel exception");
554 		break;
555 	case EXCP_UNKNOWN:
556 		if (undef_insn(1, frame))
557 			break;
558 		print_registers(frame);
559 		print_gp_register("far", far);
560 		panic("Undefined instruction: %08x",
561 		    *(uint32_t *)frame->tf_elr);
562 		break;
563 	default:
564 		print_registers(frame);
565 		print_gp_register("far", far);
566 		panic("Unknown kernel exception %x esr_el1 %lx", exception,
567 		    esr);
568 	}
569 }
570 
571 void
572 do_el0_sync(struct thread *td, struct trapframe *frame)
573 {
574 	pcpu_bp_harden bp_harden;
575 	uint32_t exception;
576 	uint64_t esr, far;
577 	int dfsc;
578 
579 	/* Check we have a sane environment when entering from userland */
580 	KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
581 	    ("Invalid pcpu address from userland: %p (tpidr %lx)",
582 	     get_pcpu(), READ_SPECIALREG(tpidr_el1)));
583 
584 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
585 	far = frame->tf_far;
586 	esr = frame->tf_esr;
587 	exception = ESR_ELx_EXCEPTION(esr);
588 	if (exception == EXCP_INSN_ABORT_L && far > VM_MAXUSER_ADDRESS) {
589 		/*
590 		 * Userspace may be trying to train the branch predictor to
591 		 * attack the kernel. If we are on a CPU affected by this
592 		 * call the handler to clear the branch predictor state.
593 		 */
594 		bp_harden = PCPU_GET(bp_harden);
595 		if (bp_harden != NULL)
596 			bp_harden();
597 	}
598 	intr_enable();
599 
600 	CTR4(KTR_TRAP,
601 	    "do_el0_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td, esr,
602 	    frame->tf_elr, frame);
603 
604 	switch (exception) {
605 	case EXCP_FP_SIMD:
606 #ifdef VFP
607 		vfp_restore_state();
608 #else
609 		panic("VFP exception in userland");
610 #endif
611 		break;
612 	case EXCP_TRAP_FP:
613 #ifdef VFP
614 		fpe_trap(td, (void *)frame->tf_elr, esr);
615 		userret(td, frame);
616 #else
617 		panic("VFP exception in userland");
618 #endif
619 		break;
620 	case EXCP_SVE:
621 		call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_elr,
622 		    exception);
623 		userret(td, frame);
624 		break;
625 	case EXCP_SVC32:
626 	case EXCP_SVC64:
627 		svc_handler(td, frame);
628 		break;
629 	case EXCP_INSN_ABORT_L:
630 	case EXCP_DATA_ABORT_L:
631 	case EXCP_DATA_ABORT:
632 		dfsc = esr & ISS_DATA_DFSC_MASK;
633 		if (dfsc < nitems(abort_handlers) &&
634 		    abort_handlers[dfsc] != NULL)
635 			abort_handlers[dfsc](td, frame, esr, far, 1);
636 		else {
637 			print_registers(frame);
638 			print_gp_register("far", far);
639 			printf(" esr: %16lx\n", esr);
640 			panic("Unhandled EL0 %s abort: %x",
641 			    exception == EXCP_INSN_ABORT_L ? "instruction" :
642 			    "data", dfsc);
643 		}
644 		break;
645 	case EXCP_UNKNOWN:
646 		if (!undef_insn(0, frame))
647 			call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far,
648 			    exception);
649 		userret(td, frame);
650 		break;
651 	case EXCP_FPAC:
652 		call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
653 		    exception);
654 		userret(td, frame);
655 		break;
656 	case EXCP_SP_ALIGN:
657 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
658 		    exception);
659 		userret(td, frame);
660 		break;
661 	case EXCP_PC_ALIGN:
662 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
663 		    exception);
664 		userret(td, frame);
665 		break;
666 	case EXCP_BRKPT_EL0:
667 	case EXCP_BRK:
668 #ifdef COMPAT_FREEBSD32
669 	case EXCP_BRKPT_32:
670 #endif /* COMPAT_FREEBSD32 */
671 		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr,
672 		    exception);
673 		userret(td, frame);
674 		break;
675 	case EXCP_WATCHPT_EL0:
676 		call_trapsignal(td, SIGTRAP, TRAP_TRACE, (void *)far,
677 		    exception);
678 		userret(td, frame);
679 		break;
680 	case EXCP_MSR:
681 		/*
682 		 * The CPU can raise EXCP_MSR when userspace executes an mrs
683 		 * instruction to access a special register userspace doesn't
684 		 * have access to.
685 		 */
686 		if (!undef_insn(0, frame))
687 			call_trapsignal(td, SIGILL, ILL_PRVOPC,
688 			    (void *)frame->tf_elr, exception);
689 		userret(td, frame);
690 		break;
691 	case EXCP_SOFTSTP_EL0:
692 		PROC_LOCK(td->td_proc);
693 		if ((td->td_dbgflags & TDB_STEP) != 0) {
694 			td->td_frame->tf_spsr &= ~PSR_SS;
695 			td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
696 			WRITE_SPECIALREG(mdscr_el1,
697 			    READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
698 		}
699 		PROC_UNLOCK(td->td_proc);
700 		call_trapsignal(td, SIGTRAP, TRAP_TRACE,
701 		    (void *)frame->tf_elr, exception);
702 		userret(td, frame);
703 		break;
704 	default:
705 		call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr,
706 		    exception);
707 		userret(td, frame);
708 		break;
709 	}
710 
711 	KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
712 	    ("Kernel VFP flags set while entering userspace"));
713 	KASSERT(
714 	    td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
715 	    ("Kernel VFP state in use when entering userspace"));
716 }
717 
718 /*
719  * TODO: We will need to handle these later when we support ARMv8.2 RAS.
720  */
721 void
722 do_serror(struct trapframe *frame)
723 {
724 	uint64_t esr, far;
725 
726 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
727 	far = frame->tf_far;
728 	esr = frame->tf_esr;
729 
730 	print_registers(frame);
731 	print_gp_register("far", far);
732 	printf(" esr: %16lx\n", esr);
733 	panic("Unhandled System Error");
734 }
735 
736 void
737 unhandled_exception(struct trapframe *frame)
738 {
739 	uint64_t esr, far;
740 
741 	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
742 	far = frame->tf_far;
743 	esr = frame->tf_esr;
744 
745 	print_registers(frame);
746 	print_gp_register("far", far);
747 	printf(" esr: %16lx\n", esr);
748 	panic("Unhandled exception");
749 }
750