1 /*- 2 * Copyright (c) 2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include "opt_ddb.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/asan.h> 33 #include <sys/kernel.h> 34 #include <sys/ktr.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/ptrace.h> 39 #include <sys/syscall.h> 40 #include <sys/sysent.h> 41 #ifdef KDB 42 #include <sys/kdb.h> 43 #endif 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_kern.h> 48 #include <vm/vm_map.h> 49 #include <vm/vm_param.h> 50 #include <vm/vm_extern.h> 51 52 #include <machine/frame.h> 53 #include <machine/md_var.h> 54 #include <machine/pcb.h> 55 #include <machine/pcpu.h> 56 #include <machine/undefined.h> 57 58 #ifdef KDTRACE_HOOKS 59 #include <sys/dtrace_bsd.h> 60 #endif 61 62 #ifdef VFP 63 #include <machine/vfp.h> 64 #endif 65 66 #ifdef KDB 67 #include <machine/db_machdep.h> 68 #endif 69 70 #ifdef DDB 71 #include <ddb/ddb.h> 72 #include <ddb/db_sym.h> 73 #endif 74 75 /* Called from exception.S */ 76 void do_el1h_sync(struct thread *, struct trapframe *); 77 void do_el0_sync(struct thread *, struct trapframe *); 78 void do_el0_error(struct trapframe *); 79 void do_serror(struct trapframe *); 80 void unhandled_exception(struct trapframe *); 81 82 static void print_gp_register(const char *name, uint64_t value); 83 static void print_registers(struct trapframe *frame); 84 85 int (*dtrace_invop_jump_addr)(struct trapframe *); 86 87 typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t, 88 uint64_t, int); 89 90 static abort_handler align_abort; 91 static abort_handler data_abort; 92 static abort_handler external_abort; 93 94 static abort_handler *abort_handlers[] = { 95 [ISS_DATA_DFSC_TF_L0] = data_abort, 96 [ISS_DATA_DFSC_TF_L1] = data_abort, 97 [ISS_DATA_DFSC_TF_L2] = data_abort, 98 [ISS_DATA_DFSC_TF_L3] = data_abort, 99 [ISS_DATA_DFSC_AFF_L1] = data_abort, 100 [ISS_DATA_DFSC_AFF_L2] = data_abort, 101 [ISS_DATA_DFSC_AFF_L3] = data_abort, 102 [ISS_DATA_DFSC_PF_L1] = data_abort, 103 [ISS_DATA_DFSC_PF_L2] = data_abort, 104 [ISS_DATA_DFSC_PF_L3] = data_abort, 105 [ISS_DATA_DFSC_ALIGN] = align_abort, 106 [ISS_DATA_DFSC_EXT] = external_abort, 107 [ISS_DATA_DFSC_EXT_L0] = external_abort, 108 [ISS_DATA_DFSC_EXT_L1] = external_abort, 109 [ISS_DATA_DFSC_EXT_L2] = external_abort, 110 [ISS_DATA_DFSC_EXT_L3] = external_abort, 111 [ISS_DATA_DFSC_ECC] = external_abort, 112 [ISS_DATA_DFSC_ECC_L0] = external_abort, 113 [ISS_DATA_DFSC_ECC_L1] = external_abort, 114 [ISS_DATA_DFSC_ECC_L2] = external_abort, 115 [ISS_DATA_DFSC_ECC_L3] = external_abort, 116 }; 117 118 static __inline void 119 call_trapsignal(struct thread *td, int sig, int code, void *addr, int trapno) 120 { 121 ksiginfo_t ksi; 122 123 ksiginfo_init_trap(&ksi); 124 ksi.ksi_signo = sig; 125 ksi.ksi_code = code; 126 ksi.ksi_addr = addr; 127 ksi.ksi_trapno = trapno; 128 trapsignal(td, &ksi); 129 } 130 131 int 132 cpu_fetch_syscall_args(struct thread *td) 133 { 134 struct proc *p; 135 syscallarg_t *ap, *dst_ap; 136 struct syscall_args *sa; 137 138 p = td->td_proc; 139 sa = &td->td_sa; 140 ap = td->td_frame->tf_x; 141 dst_ap = &sa->args[0]; 142 143 sa->code = td->td_frame->tf_x[8]; 144 sa->original_code = sa->code; 145 146 if (__predict_false(sa->code == SYS_syscall || sa->code == SYS___syscall)) { 147 sa->code = *ap++; 148 } else { 149 *dst_ap++ = *ap++; 150 } 151 152 if (__predict_false(sa->code >= p->p_sysent->sv_size)) 153 sa->callp = &nosys_sysent; 154 else 155 sa->callp = &p->p_sysent->sv_table[sa->code]; 156 157 KASSERT(sa->callp->sy_narg <= nitems(sa->args), 158 ("Syscall %d takes too many arguments", sa->code)); 159 160 memcpy(dst_ap, ap, (nitems(sa->args) - 1) * sizeof(*dst_ap)); 161 162 td->td_retval[0] = 0; 163 td->td_retval[1] = 0; 164 165 return (0); 166 } 167 168 #include "../../kern/subr_syscall.c" 169 170 /* 171 * Test for fault generated by given access instruction in 172 * bus_peek_<foo> or bus_poke_<foo> bus function. 173 */ 174 extern uint32_t generic_bs_peek_1f, generic_bs_peek_2f; 175 extern uint32_t generic_bs_peek_4f, generic_bs_peek_8f; 176 extern uint32_t generic_bs_poke_1f, generic_bs_poke_2f; 177 extern uint32_t generic_bs_poke_4f, generic_bs_poke_8f; 178 179 static bool 180 test_bs_fault(void *addr) 181 { 182 return (addr == &generic_bs_peek_1f || 183 addr == &generic_bs_peek_2f || 184 addr == &generic_bs_peek_4f || 185 addr == &generic_bs_peek_8f || 186 addr == &generic_bs_poke_1f || 187 addr == &generic_bs_poke_2f || 188 addr == &generic_bs_poke_4f || 189 addr == &generic_bs_poke_8f); 190 } 191 192 static void 193 svc_handler(struct thread *td, struct trapframe *frame) 194 { 195 196 if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) { 197 syscallenter(td); 198 syscallret(td); 199 } else { 200 call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr, 201 ESR_ELx_EXCEPTION(frame->tf_esr)); 202 userret(td, frame); 203 } 204 } 205 206 static void 207 align_abort(struct thread *td, struct trapframe *frame, uint64_t esr, 208 uint64_t far, int lower) 209 { 210 if (!lower) { 211 print_registers(frame); 212 print_gp_register("far", far); 213 printf(" esr: 0x%.16lx\n", esr); 214 panic("Misaligned access from kernel space!"); 215 } 216 217 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr, 218 ESR_ELx_EXCEPTION(frame->tf_esr)); 219 userret(td, frame); 220 } 221 222 223 static void 224 external_abort(struct thread *td, struct trapframe *frame, uint64_t esr, 225 uint64_t far, int lower) 226 { 227 if (lower) { 228 call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)far, 229 ESR_ELx_EXCEPTION(frame->tf_esr)); 230 userret(td, frame); 231 return; 232 } 233 234 /* 235 * Try to handle synchronous external aborts caused by 236 * bus_space_peek() and/or bus_space_poke() functions. 237 */ 238 if (test_bs_fault((void *)frame->tf_elr)) { 239 frame->tf_elr = (uint64_t)generic_bs_fault; 240 return; 241 } 242 243 print_registers(frame); 244 print_gp_register("far", far); 245 panic("Unhandled external data abort"); 246 } 247 248 /* 249 * It is unsafe to access the stack canary value stored in "td" until 250 * kernel map translation faults are handled, see the pmap_klookup() call below. 251 * Thus, stack-smashing detection with per-thread canaries must be disabled in 252 * this function. 253 */ 254 static void NO_PERTHREAD_SSP 255 data_abort(struct thread *td, struct trapframe *frame, uint64_t esr, 256 uint64_t far, int lower) 257 { 258 struct vm_map *map; 259 struct pcb *pcb; 260 vm_prot_t ftype; 261 int error, sig, ucode; 262 #ifdef KDB 263 bool handled; 264 #endif 265 266 /* 267 * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive 268 * and Store-Exclusive instruction usage restrictions", state 269 * of the exclusive monitors after data abort exception is unknown. 270 */ 271 clrex(); 272 273 #ifdef KDB 274 if (kdb_active) { 275 kdb_reenter(); 276 return; 277 } 278 #endif 279 280 if (lower) { 281 map = &td->td_proc->p_vmspace->vm_map; 282 } else if (!ADDR_IS_CANONICAL(far)) { 283 /* We received a TBI/PAC/etc. fault from the kernel */ 284 error = KERN_INVALID_ADDRESS; 285 pcb = td->td_pcb; 286 goto bad_far; 287 } else if (ADDR_IS_KERNEL(far)) { 288 /* 289 * Handle a special case: the data abort was caused by accessing 290 * a thread structure while its mapping was being promoted or 291 * demoted, as a consequence of the break-before-make rule. It 292 * is not safe to enable interrupts or dereference "td" before 293 * this case is handled. 294 * 295 * In principle, if pmap_klookup() fails, there is no need to 296 * call pmap_fault() below, but avoiding that call is not worth 297 * the effort. 298 */ 299 if (ESR_ELx_EXCEPTION(esr) == EXCP_DATA_ABORT) { 300 switch (esr & ISS_DATA_DFSC_MASK) { 301 case ISS_DATA_DFSC_TF_L0: 302 case ISS_DATA_DFSC_TF_L1: 303 case ISS_DATA_DFSC_TF_L2: 304 case ISS_DATA_DFSC_TF_L3: 305 if (pmap_klookup(far, NULL)) 306 return; 307 break; 308 } 309 } 310 intr_enable(); 311 map = kernel_map; 312 } else { 313 intr_enable(); 314 map = &td->td_proc->p_vmspace->vm_map; 315 if (map == NULL) 316 map = kernel_map; 317 } 318 pcb = td->td_pcb; 319 320 /* 321 * Try to handle translation, access flag, and permission faults. 322 * Translation faults may occur as a result of the required 323 * break-before-make sequence used when promoting or demoting 324 * superpages. Such faults must not occur while holding the pmap lock, 325 * or pmap_fault() will recurse on that lock. 326 */ 327 if ((lower || map == kernel_map || pcb->pcb_onfault != 0) && 328 pmap_fault(map->pmap, esr, far) == KERN_SUCCESS) 329 return; 330 331 #ifdef INVARIANTS 332 if (td->td_md.md_spinlock_count != 0) { 333 print_registers(frame); 334 print_gp_register("far", far); 335 printf(" esr: 0x%.16lx\n", esr); 336 panic("data abort with spinlock held (spinlock count %d != 0)", 337 td->td_md.md_spinlock_count); 338 } 339 #endif 340 if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | 341 WARN_GIANTOK, NULL, "Kernel page fault") != 0) { 342 print_registers(frame); 343 print_gp_register("far", far); 344 printf(" esr: 0x%.16lx\n", esr); 345 panic("data abort in critical section or under mutex"); 346 } 347 348 switch (ESR_ELx_EXCEPTION(esr)) { 349 case EXCP_INSN_ABORT: 350 case EXCP_INSN_ABORT_L: 351 ftype = VM_PROT_EXECUTE; 352 break; 353 default: 354 /* 355 * If the exception was because of a read or cache operation 356 * pass a read fault type into the vm code. Cache operations 357 * need read permission but will set the WnR flag when the 358 * memory is unmapped. 359 */ 360 if ((esr & ISS_DATA_WnR) == 0 || (esr & ISS_DATA_CM) != 0) 361 ftype = VM_PROT_READ; 362 else 363 ftype = VM_PROT_WRITE; 364 break; 365 } 366 367 /* Fault in the page. */ 368 error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode); 369 if (error != KERN_SUCCESS) { 370 if (lower) { 371 call_trapsignal(td, sig, ucode, (void *)far, 372 ESR_ELx_EXCEPTION(esr)); 373 } else { 374 bad_far: 375 if (td->td_intr_nesting_level == 0 && 376 pcb->pcb_onfault != 0) { 377 frame->tf_x[0] = error; 378 frame->tf_elr = pcb->pcb_onfault; 379 return; 380 } 381 382 printf("Fatal data abort:\n"); 383 print_registers(frame); 384 print_gp_register("far", far); 385 printf(" esr: 0x%.16lx\n", esr); 386 387 #ifdef KDB 388 if (debugger_on_trap) { 389 kdb_why = KDB_WHY_TRAP; 390 handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0, 391 frame); 392 kdb_why = KDB_WHY_UNSET; 393 if (handled) 394 return; 395 } 396 #endif 397 panic("vm_fault failed: 0x%lx error %d", 398 frame->tf_elr, error); 399 } 400 } 401 402 if (lower) 403 userret(td, frame); 404 } 405 406 static void 407 print_gp_register(const char *name, uint64_t value) 408 { 409 #if defined(DDB) 410 c_db_sym_t sym; 411 const char *sym_name; 412 db_expr_t sym_value; 413 db_expr_t offset; 414 #endif 415 416 printf(" %s: 0x%.16lx", name, value); 417 #if defined(DDB) 418 /* If this looks like a kernel address try to find the symbol */ 419 if (value >= VM_MIN_KERNEL_ADDRESS) { 420 sym = db_search_symbol(value, DB_STGY_ANY, &offset); 421 if (sym != C_DB_SYM_NULL) { 422 db_symbol_values(sym, &sym_name, &sym_value); 423 printf(" (%s + 0x%lx)", sym_name, offset); 424 } 425 } 426 #endif 427 printf("\n"); 428 } 429 430 static void 431 print_registers(struct trapframe *frame) 432 { 433 char name[4]; 434 u_int reg; 435 436 for (reg = 0; reg < nitems(frame->tf_x); reg++) { 437 snprintf(name, sizeof(name), "%sx%d", (reg < 10) ? " " : "", 438 reg); 439 print_gp_register(name, frame->tf_x[reg]); 440 } 441 printf(" sp: 0x%.16lx\n", frame->tf_sp); 442 print_gp_register(" lr", frame->tf_lr); 443 print_gp_register("elr", frame->tf_elr); 444 printf("spsr: 0x%.16lx\n", frame->tf_spsr); 445 } 446 447 #ifdef VFP 448 static void 449 fpe_trap(struct thread *td, void *addr, uint32_t exception) 450 { 451 int code; 452 453 code = FPE_FLTIDO; 454 if ((exception & ISS_FP_TFV) != 0) { 455 if ((exception & ISS_FP_IOF) != 0) 456 code = FPE_FLTINV; 457 else if ((exception & ISS_FP_DZF) != 0) 458 code = FPE_FLTDIV; 459 else if ((exception & ISS_FP_OFF) != 0) 460 code = FPE_FLTOVF; 461 else if ((exception & ISS_FP_UFF) != 0) 462 code = FPE_FLTUND; 463 else if ((exception & ISS_FP_IXF) != 0) 464 code = FPE_FLTRES; 465 } 466 call_trapsignal(td, SIGFPE, code, addr, exception); 467 } 468 #endif 469 470 /* 471 * See the comment above data_abort(). 472 */ 473 void NO_PERTHREAD_SSP 474 do_el1h_sync(struct thread *td, struct trapframe *frame) 475 { 476 uint32_t exception; 477 uint64_t esr, far; 478 int dfsc; 479 480 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); 481 far = frame->tf_far; 482 /* Read the esr register to get the exception details */ 483 esr = frame->tf_esr; 484 exception = ESR_ELx_EXCEPTION(esr); 485 486 #ifdef KDTRACE_HOOKS 487 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception)) 488 return; 489 #endif 490 491 CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx", 492 __func__, exception, frame->tf_elr, esr); 493 494 /* 495 * Enable debug exceptions if we aren't already handling one. They will 496 * be masked again in the exception handler's epilogue. 497 */ 498 if (exception != EXCP_BRK && exception != EXCP_WATCHPT_EL1 && 499 exception != EXCP_SOFTSTP_EL1) 500 dbg_enable(); 501 502 switch (exception) { 503 case EXCP_FP_SIMD: 504 case EXCP_TRAP_FP: 505 #ifdef VFP 506 if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) { 507 vfp_restore_state(); 508 } else 509 #endif 510 { 511 print_registers(frame); 512 printf(" esr: 0x%.16lx\n", esr); 513 panic("VFP exception in the kernel"); 514 } 515 break; 516 case EXCP_INSN_ABORT: 517 case EXCP_DATA_ABORT: 518 dfsc = esr & ISS_DATA_DFSC_MASK; 519 if (dfsc < nitems(abort_handlers) && 520 abort_handlers[dfsc] != NULL) { 521 abort_handlers[dfsc](td, frame, esr, far, 0); 522 } else { 523 print_registers(frame); 524 print_gp_register("far", far); 525 printf(" esr: 0x%.16lx\n", esr); 526 panic("Unhandled EL1 %s abort: 0x%x", 527 exception == EXCP_INSN_ABORT ? "instruction" : 528 "data", dfsc); 529 } 530 break; 531 case EXCP_BRK: 532 #ifdef KDTRACE_HOOKS 533 if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \ 534 dtrace_invop_jump_addr != 0) { 535 dtrace_invop_jump_addr(frame); 536 break; 537 } 538 #endif 539 #ifdef KDB 540 kdb_trap(exception, 0, frame); 541 #else 542 panic("No debugger in kernel."); 543 #endif 544 break; 545 case EXCP_WATCHPT_EL1: 546 case EXCP_SOFTSTP_EL1: 547 #ifdef KDB 548 kdb_trap(exception, 0, frame); 549 #else 550 panic("No debugger in kernel."); 551 #endif 552 break; 553 case EXCP_FPAC: 554 /* We can see this if the authentication on PAC fails */ 555 print_registers(frame); 556 print_gp_register("far", far); 557 panic("FPAC kernel exception"); 558 break; 559 case EXCP_UNKNOWN: 560 if (undef_insn(1, frame)) 561 break; 562 print_registers(frame); 563 print_gp_register("far", far); 564 panic("Undefined instruction: %08x", 565 *(uint32_t *)frame->tf_elr); 566 break; 567 case EXCP_BTI: 568 print_registers(frame); 569 print_gp_register("far", far); 570 panic("Branch Target exception"); 571 break; 572 default: 573 print_registers(frame); 574 print_gp_register("far", far); 575 panic("Unknown kernel exception 0x%x esr_el1 0x%lx", exception, 576 esr); 577 } 578 } 579 580 void 581 do_el0_sync(struct thread *td, struct trapframe *frame) 582 { 583 pcpu_bp_harden bp_harden; 584 uint32_t exception; 585 uint64_t esr, far; 586 int dfsc; 587 588 /* Check we have a sane environment when entering from userland */ 589 KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS, 590 ("Invalid pcpu address from userland: %p (tpidr 0x%lx)", 591 get_pcpu(), READ_SPECIALREG(tpidr_el1))); 592 593 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); 594 far = frame->tf_far; 595 esr = frame->tf_esr; 596 exception = ESR_ELx_EXCEPTION(esr); 597 if (exception == EXCP_INSN_ABORT_L && far > VM_MAXUSER_ADDRESS) { 598 /* 599 * Userspace may be trying to train the branch predictor to 600 * attack the kernel. If we are on a CPU affected by this 601 * call the handler to clear the branch predictor state. 602 */ 603 bp_harden = PCPU_GET(bp_harden); 604 if (bp_harden != NULL) 605 bp_harden(); 606 } 607 intr_enable(); 608 609 CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx", 610 __func__, exception, frame->tf_elr, esr); 611 612 switch (exception) { 613 case EXCP_FP_SIMD: 614 #ifdef VFP 615 vfp_restore_state(); 616 #else 617 panic("VFP exception in userland"); 618 #endif 619 break; 620 case EXCP_TRAP_FP: 621 #ifdef VFP 622 fpe_trap(td, (void *)frame->tf_elr, esr); 623 userret(td, frame); 624 #else 625 panic("VFP exception in userland"); 626 #endif 627 break; 628 case EXCP_SVE: 629 call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_elr, 630 exception); 631 userret(td, frame); 632 break; 633 case EXCP_SVC32: 634 case EXCP_SVC64: 635 svc_handler(td, frame); 636 break; 637 case EXCP_INSN_ABORT_L: 638 case EXCP_DATA_ABORT_L: 639 case EXCP_DATA_ABORT: 640 dfsc = esr & ISS_DATA_DFSC_MASK; 641 if (dfsc < nitems(abort_handlers) && 642 abort_handlers[dfsc] != NULL) 643 abort_handlers[dfsc](td, frame, esr, far, 1); 644 else { 645 print_registers(frame); 646 print_gp_register("far", far); 647 printf(" esr: 0x%.16lx\n", esr); 648 panic("Unhandled EL0 %s abort: 0x%x", 649 exception == EXCP_INSN_ABORT_L ? "instruction" : 650 "data", dfsc); 651 } 652 break; 653 case EXCP_UNKNOWN: 654 if (!undef_insn(0, frame)) 655 call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far, 656 exception); 657 userret(td, frame); 658 break; 659 case EXCP_FPAC: 660 call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr, 661 exception); 662 userret(td, frame); 663 break; 664 case EXCP_SP_ALIGN: 665 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp, 666 exception); 667 userret(td, frame); 668 break; 669 case EXCP_PC_ALIGN: 670 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr, 671 exception); 672 userret(td, frame); 673 break; 674 case EXCP_BRKPT_EL0: 675 case EXCP_BRK: 676 #ifdef COMPAT_FREEBSD32 677 case EXCP_BRKPT_32: 678 #endif /* COMPAT_FREEBSD32 */ 679 call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr, 680 exception); 681 userret(td, frame); 682 break; 683 case EXCP_WATCHPT_EL0: 684 call_trapsignal(td, SIGTRAP, TRAP_TRACE, (void *)far, 685 exception); 686 userret(td, frame); 687 break; 688 case EXCP_MSR: 689 /* 690 * The CPU can raise EXCP_MSR when userspace executes an mrs 691 * instruction to access a special register userspace doesn't 692 * have access to. 693 */ 694 if (!undef_insn(0, frame)) 695 call_trapsignal(td, SIGILL, ILL_PRVOPC, 696 (void *)frame->tf_elr, exception); 697 userret(td, frame); 698 break; 699 case EXCP_SOFTSTP_EL0: 700 PROC_LOCK(td->td_proc); 701 if ((td->td_dbgflags & TDB_STEP) != 0) { 702 td->td_frame->tf_spsr &= ~PSR_SS; 703 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; 704 WRITE_SPECIALREG(mdscr_el1, 705 READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS); 706 } 707 PROC_UNLOCK(td->td_proc); 708 call_trapsignal(td, SIGTRAP, TRAP_TRACE, 709 (void *)frame->tf_elr, exception); 710 userret(td, frame); 711 break; 712 case EXCP_BTI: 713 call_trapsignal(td, SIGILL, ILL_ILLOPC, (void *)frame->tf_elr, 714 exception); 715 userret(td, frame); 716 break; 717 default: 718 call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr, 719 exception); 720 userret(td, frame); 721 break; 722 } 723 724 KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, 725 ("Kernel VFP flags set while entering userspace")); 726 KASSERT( 727 td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate, 728 ("Kernel VFP state in use when entering userspace")); 729 } 730 731 /* 732 * TODO: We will need to handle these later when we support ARMv8.2 RAS. 733 */ 734 void 735 do_serror(struct trapframe *frame) 736 { 737 uint64_t esr, far; 738 739 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); 740 far = frame->tf_far; 741 esr = frame->tf_esr; 742 743 print_registers(frame); 744 print_gp_register("far", far); 745 printf(" esr: 0x%.16lx\n", esr); 746 panic("Unhandled System Error"); 747 } 748 749 void 750 unhandled_exception(struct trapframe *frame) 751 { 752 uint64_t esr, far; 753 754 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); 755 far = frame->tf_far; 756 esr = frame->tf_esr; 757 758 print_registers(frame); 759 print_gp_register("far", far); 760 printf(" esr: 0x%.16lx\n", esr); 761 panic("Unhandled exception"); 762 } 763