1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/kdb.h> 39 #include <sys/proc.h> 40 #include <sys/ktr.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/pioctl.h> 44 #include <sys/ptrace.h> 45 #include <sys/reboot.h> 46 #include <sys/syscall.h> 47 #include <sys/sysent.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/uio.h> 51 #include <sys/signalvar.h> 52 #include <sys/vmmeter.h> 53 54 #include <security/audit/audit.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_param.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_page.h> 63 64 #include <machine/_inttypes.h> 65 #include <machine/altivec.h> 66 #include <machine/cpu.h> 67 #include <machine/db_machdep.h> 68 #include <machine/fpu.h> 69 #include <machine/frame.h> 70 #include <machine/pcb.h> 71 #include <machine/psl.h> 72 #include <machine/trap.h> 73 #include <machine/spr.h> 74 #include <machine/sr.h> 75 76 /* Below matches setjmp.S */ 77 #define FAULTBUF_LR 21 78 #define FAULTBUF_R1 1 79 #define FAULTBUF_R2 2 80 #define FAULTBUF_CR 22 81 #define FAULTBUF_R14 3 82 83 #define MOREARGS(sp) ((caddr_t)((uintptr_t)(sp) + \ 84 sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */ 85 86 static void trap_fatal(struct trapframe *frame); 87 static void printtrap(u_int vector, struct trapframe *frame, int isfatal, 88 int user); 89 static int trap_pfault(struct trapframe *frame, int user); 90 static int fix_unaligned(struct thread *td, struct trapframe *frame); 91 static int handle_onfault(struct trapframe *frame); 92 static void syscall(struct trapframe *frame); 93 94 #if defined(__powerpc64__) && defined(AIM) 95 void handle_kernel_slb_spill(int, register_t, register_t); 96 static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr); 97 extern int n_slbs; 98 static void normalize_inputs(void); 99 #endif 100 101 extern vm_offset_t __startkernel; 102 103 #ifdef KDB 104 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ 105 #endif 106 107 struct powerpc_exception { 108 u_int vector; 109 char *name; 110 }; 111 112 #ifdef KDTRACE_HOOKS 113 #include <sys/dtrace_bsd.h> 114 115 int (*dtrace_invop_jump_addr)(struct trapframe *); 116 #endif 117 118 static struct powerpc_exception powerpc_exceptions[] = { 119 { EXC_CRIT, "critical input" }, 120 { EXC_RST, "system reset" }, 121 { EXC_MCHK, "machine check" }, 122 { EXC_DSI, "data storage interrupt" }, 123 { EXC_DSE, "data segment exception" }, 124 { EXC_ISI, "instruction storage interrupt" }, 125 { EXC_ISE, "instruction segment exception" }, 126 { EXC_EXI, "external interrupt" }, 127 { EXC_ALI, "alignment" }, 128 { EXC_PGM, "program" }, 129 { EXC_HEA, "hypervisor emulation assistance" }, 130 { EXC_FPU, "floating-point unavailable" }, 131 { EXC_APU, "auxiliary proc unavailable" }, 132 { EXC_DECR, "decrementer" }, 133 { EXC_FIT, "fixed-interval timer" }, 134 { EXC_WDOG, "watchdog timer" }, 135 { EXC_SC, "system call" }, 136 { EXC_TRC, "trace" }, 137 { EXC_FPA, "floating-point assist" }, 138 { EXC_DEBUG, "debug" }, 139 { EXC_PERF, "performance monitoring" }, 140 { EXC_VEC, "altivec unavailable" }, 141 { EXC_VSX, "vsx unavailable" }, 142 { EXC_FAC, "facility unavailable" }, 143 { EXC_ITMISS, "instruction tlb miss" }, 144 { EXC_DLMISS, "data load tlb miss" }, 145 { EXC_DSMISS, "data store tlb miss" }, 146 { EXC_BPT, "instruction breakpoint" }, 147 { EXC_SMI, "system management" }, 148 { EXC_VECAST_G4, "altivec assist" }, 149 { EXC_THRM, "thermal management" }, 150 { EXC_RUNMODETRC, "run mode/trace" }, 151 { EXC_SOFT_PATCH, "soft patch exception" }, 152 { EXC_LAST, NULL } 153 }; 154 155 #define ESR_BITMASK \ 156 "\20" \ 157 "\040b0\037b1\036b2\035b3\034PIL\033PRR\032PTR\031FP" \ 158 "\030ST\027b9\026DLK\025ILK\024b12\023b13\022BO\021PIE" \ 159 "\020b16\017b17\016b18\015b19\014b20\013b21\012b22\011b23" \ 160 "\010SPE\007EPID\006b26\005b27\004b28\003b29\002b30\001b31" 161 #define MCSR_BITMASK \ 162 "\20" \ 163 "\040MCP\037ICERR\036DCERR\035TLBPERR\034L2MMU_MHIT\033b5\032b6\031b7" \ 164 "\030b8\027b9\026b10\025NMI\024MAV\023MEA\022b14\021IF" \ 165 "\020LD\017ST\016LDG\015b19\014b20\013b21\012b22\011b23" \ 166 "\010b24\007b25\006b26\005b27\004b28\003b29\002TLBSYNC\001BSL2_ERR" 167 #define MSSSR_BITMASK \ 168 "\20" \ 169 "\040b0\037b1\036b2\035b3\034b4\033b5\032b6\031b7" \ 170 "\030b8\027b9\026b10\025b11\024b12\023L2TAG\022L2DAT\021L3TAG" \ 171 "\020L3DAT\017APE\016DPE\015TEA\014b20\013b21\012b22\011b23" \ 172 "\010b24\007b25\006b26\005b27\004b28\003b29\002b30\001b31" 173 174 175 static const char * 176 trapname(u_int vector) 177 { 178 struct powerpc_exception *pe; 179 180 for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) { 181 if (pe->vector == vector) 182 return (pe->name); 183 } 184 185 return ("unknown"); 186 } 187 188 static inline bool 189 frame_is_trap_inst(struct trapframe *frame) 190 { 191 #ifdef AIM 192 return (frame->exc == EXC_PGM && frame->srr1 & EXC_PGM_TRAP); 193 #else 194 return ((frame->cpu.booke.esr & ESR_PTR) != 0); 195 #endif 196 } 197 198 void 199 trap(struct trapframe *frame) 200 { 201 struct thread *td; 202 struct proc *p; 203 #ifdef KDTRACE_HOOKS 204 uint32_t inst; 205 #endif 206 int sig, type, user; 207 u_int ucode; 208 ksiginfo_t ksi; 209 register_t fscr; 210 211 VM_CNT_INC(v_trap); 212 213 #ifdef KDB 214 if (kdb_active) { 215 kdb_reenter(); 216 return; 217 } 218 #endif 219 220 td = curthread; 221 p = td->td_proc; 222 223 type = ucode = frame->exc; 224 sig = 0; 225 user = frame->srr1 & PSL_PR; 226 227 CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name, 228 trapname(type), user ? "user" : "kernel"); 229 230 #ifdef KDTRACE_HOOKS 231 /* 232 * A trap can occur while DTrace executes a probe. Before 233 * executing the probe, DTrace blocks re-scheduling and sets 234 * a flag in its per-cpu flags to indicate that it doesn't 235 * want to fault. On returning from the probe, the no-fault 236 * flag is cleared and finally re-scheduling is enabled. 237 * 238 * If the DTrace kernel module has registered a trap handler, 239 * call it and if it returns non-zero, assume that it has 240 * handled the trap and modified the trap frame so that this 241 * function can return normally. 242 */ 243 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0) 244 return; 245 #endif 246 247 if (user) { 248 td->td_pticks = 0; 249 td->td_frame = frame; 250 if (td->td_cowgen != p->p_cowgen) 251 thread_cow_update(td); 252 253 /* User Mode Traps */ 254 switch (type) { 255 case EXC_RUNMODETRC: 256 case EXC_TRC: 257 frame->srr1 &= ~PSL_SE; 258 sig = SIGTRAP; 259 ucode = TRAP_TRACE; 260 break; 261 262 #if defined(__powerpc64__) && defined(AIM) 263 case EXC_ISE: 264 case EXC_DSE: 265 if (handle_user_slb_spill(&p->p_vmspace->vm_pmap, 266 (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){ 267 sig = SIGSEGV; 268 ucode = SEGV_MAPERR; 269 } 270 break; 271 #endif 272 case EXC_DSI: 273 case EXC_ISI: 274 sig = trap_pfault(frame, 1); 275 if (sig == SIGSEGV) 276 ucode = SEGV_MAPERR; 277 break; 278 279 case EXC_SC: 280 syscall(frame); 281 break; 282 283 case EXC_FPU: 284 KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU, 285 ("FPU already enabled for thread")); 286 enable_fpu(td); 287 break; 288 289 case EXC_VEC: 290 KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC, 291 ("Altivec already enabled for thread")); 292 enable_vec(td); 293 break; 294 295 case EXC_VSX: 296 KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX, 297 ("VSX already enabled for thread")); 298 if (!(td->td_pcb->pcb_flags & PCB_VEC)) 299 enable_vec(td); 300 if (!(td->td_pcb->pcb_flags & PCB_FPU)) 301 save_fpu(td); 302 td->td_pcb->pcb_flags |= PCB_VSX; 303 enable_fpu(td); 304 break; 305 306 case EXC_FAC: 307 fscr = mfspr(SPR_FSCR); 308 if ((fscr & FSCR_IC_MASK) == FSCR_IC_HTM) { 309 CTR0(KTR_TRAP, "Hardware Transactional Memory subsystem disabled"); 310 } 311 sig = SIGILL; 312 ucode = ILL_ILLOPC; 313 break; 314 case EXC_HEA: 315 sig = SIGILL; 316 ucode = ILL_ILLOPC; 317 break; 318 319 case EXC_VECAST_E: 320 case EXC_VECAST_G4: 321 case EXC_VECAST_G5: 322 /* 323 * We get a VPU assist exception for IEEE mode 324 * vector operations on denormalized floats. 325 * Emulating this is a giant pain, so for now, 326 * just switch off IEEE mode and treat them as 327 * zero. 328 */ 329 330 save_vec(td); 331 td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ; 332 enable_vec(td); 333 break; 334 335 case EXC_ALI: 336 if (fix_unaligned(td, frame) != 0) { 337 sig = SIGBUS; 338 ucode = BUS_ADRALN; 339 } 340 else 341 frame->srr0 += 4; 342 break; 343 344 case EXC_DEBUG: /* Single stepping */ 345 mtspr(SPR_DBSR, mfspr(SPR_DBSR)); 346 frame->srr1 &= ~PSL_DE; 347 frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); 348 sig = SIGTRAP; 349 ucode = TRAP_TRACE; 350 break; 351 352 case EXC_PGM: 353 /* Identify the trap reason */ 354 if (frame_is_trap_inst(frame)) { 355 #ifdef KDTRACE_HOOKS 356 inst = fuword32((const void *)frame->srr0); 357 if (inst == 0x0FFFDDDD && 358 dtrace_pid_probe_ptr != NULL) { 359 (*dtrace_pid_probe_ptr)(frame); 360 break; 361 } 362 #endif 363 sig = SIGTRAP; 364 ucode = TRAP_BRKPT; 365 } else { 366 sig = ppc_instr_emulate(frame, td); 367 if (sig == SIGILL) { 368 if (frame->srr1 & EXC_PGM_PRIV) 369 ucode = ILL_PRVOPC; 370 else if (frame->srr1 & EXC_PGM_ILLEGAL) 371 ucode = ILL_ILLOPC; 372 } else if (sig == SIGFPE) 373 ucode = FPE_FLTINV; /* Punt for now, invalid operation. */ 374 } 375 break; 376 377 case EXC_MCHK: 378 /* 379 * Note that this may not be recoverable for the user 380 * process, depending on the type of machine check, 381 * but it at least prevents the kernel from dying. 382 */ 383 sig = SIGBUS; 384 ucode = BUS_OBJERR; 385 break; 386 387 #if defined(__powerpc64__) && defined(AIM) 388 case EXC_SOFT_PATCH: 389 /* 390 * Point to the instruction that generated the exception to execute it again, 391 * and normalize the register values. 392 */ 393 frame->srr0 -= 4; 394 normalize_inputs(); 395 break; 396 #endif 397 398 default: 399 trap_fatal(frame); 400 } 401 } else { 402 /* Kernel Mode Traps */ 403 404 KASSERT(cold || td->td_ucred != NULL, 405 ("kernel trap doesn't have ucred")); 406 switch (type) { 407 case EXC_PGM: 408 #ifdef KDTRACE_HOOKS 409 if (frame_is_trap_inst(frame)) { 410 if (*(uint32_t *)frame->srr0 == EXC_DTRACE) { 411 if (dtrace_invop_jump_addr != NULL) { 412 dtrace_invop_jump_addr(frame); 413 return; 414 } 415 } 416 } 417 #endif 418 #ifdef KDB 419 if (db_trap_glue(frame)) 420 return; 421 #endif 422 break; 423 #if defined(__powerpc64__) && defined(AIM) 424 case EXC_DSE: 425 if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 && 426 (frame->dar & SEGMENT_MASK) == USER_ADDR) { 427 __asm __volatile ("slbmte %0, %1" :: 428 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), 429 "r"(USER_SLB_SLBE)); 430 return; 431 } 432 break; 433 #endif 434 case EXC_DSI: 435 if (trap_pfault(frame, 0) == 0) 436 return; 437 break; 438 case EXC_MCHK: 439 if (handle_onfault(frame)) 440 return; 441 break; 442 default: 443 break; 444 } 445 trap_fatal(frame); 446 } 447 448 if (sig != 0) { 449 if (p->p_sysent->sv_transtrap != NULL) 450 sig = (p->p_sysent->sv_transtrap)(sig, type); 451 ksiginfo_init_trap(&ksi); 452 ksi.ksi_signo = sig; 453 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */ 454 ksi.ksi_addr = (void *)frame->srr0; 455 ksi.ksi_trapno = type; 456 trapsignal(td, &ksi); 457 } 458 459 userret(td, frame); 460 } 461 462 static void 463 trap_fatal(struct trapframe *frame) 464 { 465 #ifdef KDB 466 bool handled; 467 #endif 468 469 printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); 470 #ifdef KDB 471 if (debugger_on_trap) { 472 kdb_why = KDB_WHY_TRAP; 473 handled = kdb_trap(frame->exc, 0, frame); 474 kdb_why = KDB_WHY_UNSET; 475 if (handled) 476 return; 477 } 478 #endif 479 panic("%s trap", trapname(frame->exc)); 480 } 481 482 static void 483 cpu_printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) 484 { 485 #ifdef AIM 486 uint16_t ver; 487 488 switch (vector) { 489 case EXC_DSE: 490 case EXC_DSI: 491 case EXC_DTMISS: 492 printf(" dsisr = 0x%lx\n", 493 (u_long)frame->cpu.aim.dsisr); 494 break; 495 case EXC_MCHK: 496 ver = mfpvr() >> 16; 497 if (MPC745X_P(ver)) 498 printf(" msssr0 = 0x%b\n", 499 (int)mfspr(SPR_MSSSR0), MSSSR_BITMASK); 500 break; 501 } 502 #elif defined(BOOKE) 503 vm_paddr_t pa; 504 505 switch (vector) { 506 case EXC_MCHK: 507 pa = mfspr(SPR_MCARU); 508 pa = (pa << 32) | (u_register_t)mfspr(SPR_MCAR); 509 printf(" mcsr = 0x%b\n", 510 (int)mfspr(SPR_MCSR), MCSR_BITMASK); 511 printf(" mcar = 0x%jx\n", (uintmax_t)pa); 512 } 513 printf(" esr = 0x%b\n", 514 (int)frame->cpu.booke.esr, ESR_BITMASK); 515 #endif 516 } 517 518 static void 519 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) 520 { 521 522 printf("\n"); 523 printf("%s %s trap:\n", isfatal ? "fatal" : "handled", 524 user ? "user" : "kernel"); 525 printf("\n"); 526 printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); 527 switch (vector) { 528 case EXC_DSE: 529 case EXC_DSI: 530 case EXC_DTMISS: 531 case EXC_ALI: 532 printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar); 533 break; 534 case EXC_ISE: 535 case EXC_ISI: 536 case EXC_ITMISS: 537 printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0); 538 break; 539 case EXC_MCHK: 540 break; 541 } 542 cpu_printtrap(vector, frame, isfatal, user); 543 printf(" srr0 = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n", 544 frame->srr0, frame->srr0 - (register_t)(__startkernel - KERNBASE)); 545 printf(" srr1 = 0x%lx\n", (u_long)frame->srr1); 546 printf(" current msr = 0x%" PRIxPTR "\n", mfmsr()); 547 printf(" lr = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n", 548 frame->lr, frame->lr - (register_t)(__startkernel - KERNBASE)); 549 printf(" frame = %p\n", frame); 550 printf(" curthread = %p\n", curthread); 551 if (curthread != NULL) 552 printf(" pid = %d, comm = %s\n", 553 curthread->td_proc->p_pid, curthread->td_name); 554 printf("\n"); 555 } 556 557 /* 558 * Handles a fatal fault when we have onfault state to recover. Returns 559 * non-zero if there was onfault recovery state available. 560 */ 561 static int 562 handle_onfault(struct trapframe *frame) 563 { 564 struct thread *td; 565 jmp_buf *fb; 566 567 td = curthread; 568 fb = td->td_pcb->pcb_onfault; 569 if (fb != NULL) { 570 frame->srr0 = (*fb)->_jb[FAULTBUF_LR]; 571 frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1]; 572 frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2]; 573 frame->fixreg[3] = 1; 574 frame->cr = (*fb)->_jb[FAULTBUF_CR]; 575 bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14], 576 18 * sizeof(register_t)); 577 td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */ 578 return (1); 579 } 580 return (0); 581 } 582 583 int 584 cpu_fetch_syscall_args(struct thread *td) 585 { 586 struct proc *p; 587 struct trapframe *frame; 588 struct syscall_args *sa; 589 caddr_t params; 590 size_t argsz; 591 int error, n, i; 592 593 p = td->td_proc; 594 frame = td->td_frame; 595 sa = &td->td_sa; 596 597 sa->code = frame->fixreg[0]; 598 params = (caddr_t)(frame->fixreg + FIRSTARG); 599 n = NARGREG; 600 601 if (sa->code == SYS_syscall) { 602 /* 603 * code is first argument, 604 * followed by actual args. 605 */ 606 sa->code = *(register_t *) params; 607 params += sizeof(register_t); 608 n -= 1; 609 } else if (sa->code == SYS___syscall) { 610 /* 611 * Like syscall, but code is a quad, 612 * so as to maintain quad alignment 613 * for the rest of the args. 614 */ 615 if (SV_PROC_FLAG(p, SV_ILP32)) { 616 params += sizeof(register_t); 617 sa->code = *(register_t *) params; 618 params += sizeof(register_t); 619 n -= 2; 620 } else { 621 sa->code = *(register_t *) params; 622 params += sizeof(register_t); 623 n -= 1; 624 } 625 } 626 627 if (sa->code >= p->p_sysent->sv_size) 628 sa->callp = &p->p_sysent->sv_table[0]; 629 else 630 sa->callp = &p->p_sysent->sv_table[sa->code]; 631 632 sa->narg = sa->callp->sy_narg; 633 634 if (SV_PROC_FLAG(p, SV_ILP32)) { 635 argsz = sizeof(uint32_t); 636 637 for (i = 0; i < n; i++) 638 sa->args[i] = ((u_register_t *)(params))[i] & 639 0xffffffff; 640 } else { 641 argsz = sizeof(uint64_t); 642 643 for (i = 0; i < n; i++) 644 sa->args[i] = ((u_register_t *)(params))[i]; 645 } 646 647 if (sa->narg > n) 648 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, 649 (sa->narg - n) * argsz); 650 else 651 error = 0; 652 653 #ifdef __powerpc64__ 654 if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) { 655 /* Expand the size of arguments copied from the stack */ 656 657 for (i = sa->narg; i >= n; i--) 658 sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n]; 659 } 660 #endif 661 662 if (error == 0) { 663 td->td_retval[0] = 0; 664 td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; 665 } 666 return (error); 667 } 668 669 #include "../../kern/subr_syscall.c" 670 671 void 672 syscall(struct trapframe *frame) 673 { 674 struct thread *td; 675 int error; 676 677 td = curthread; 678 td->td_frame = frame; 679 680 #if defined(__powerpc64__) && defined(AIM) 681 /* 682 * Speculatively restore last user SLB segment, which we know is 683 * invalid already, since we are likely to do copyin()/copyout(). 684 */ 685 if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0) 686 __asm __volatile ("slbmte %0, %1; isync" :: 687 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); 688 #endif 689 690 error = syscallenter(td); 691 syscallret(td, error); 692 } 693 694 #if defined(__powerpc64__) && defined(AIM) 695 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */ 696 void 697 handle_kernel_slb_spill(int type, register_t dar, register_t srr0) 698 { 699 struct slb *slbcache; 700 uint64_t slbe, slbv; 701 uint64_t esid, addr; 702 int i; 703 704 addr = (type == EXC_ISE) ? srr0 : dar; 705 slbcache = PCPU_GET(aim.slb); 706 esid = (uintptr_t)addr >> ADDR_SR_SHFT; 707 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 708 709 /* See if the hardware flushed this somehow (can happen in LPARs) */ 710 for (i = 0; i < n_slbs; i++) 711 if (slbcache[i].slbe == (slbe | (uint64_t)i)) 712 return; 713 714 /* Not in the map, needs to actually be added */ 715 slbv = kernel_va_to_slbv(addr); 716 if (slbcache[USER_SLB_SLOT].slbe == 0) { 717 for (i = 0; i < n_slbs; i++) { 718 if (i == USER_SLB_SLOT) 719 continue; 720 if (!(slbcache[i].slbe & SLBE_VALID)) 721 goto fillkernslb; 722 } 723 724 if (i == n_slbs) 725 slbcache[USER_SLB_SLOT].slbe = 1; 726 } 727 728 /* Sacrifice a random SLB entry that is not the user entry */ 729 i = mftb() % n_slbs; 730 if (i == USER_SLB_SLOT) 731 i = (i+1) % n_slbs; 732 733 fillkernslb: 734 /* Write new entry */ 735 slbcache[i].slbv = slbv; 736 slbcache[i].slbe = slbe | (uint64_t)i; 737 738 /* Trap handler will restore from cache on exit */ 739 } 740 741 static int 742 handle_user_slb_spill(pmap_t pm, vm_offset_t addr) 743 { 744 struct slb *user_entry; 745 uint64_t esid; 746 int i; 747 748 if (pm->pm_slb == NULL) 749 return (-1); 750 751 esid = (uintptr_t)addr >> ADDR_SR_SHFT; 752 753 PMAP_LOCK(pm); 754 user_entry = user_va_to_slb_entry(pm, addr); 755 756 if (user_entry == NULL) { 757 /* allocate_vsid auto-spills it */ 758 (void)allocate_user_vsid(pm, esid, 0); 759 } else { 760 /* 761 * Check that another CPU has not already mapped this. 762 * XXX: Per-thread SLB caches would be better. 763 */ 764 for (i = 0; i < pm->pm_slb_len; i++) 765 if (pm->pm_slb[i] == user_entry) 766 break; 767 768 if (i == pm->pm_slb_len) 769 slb_insert_user(pm, user_entry); 770 } 771 PMAP_UNLOCK(pm); 772 773 return (0); 774 } 775 #endif 776 777 static int 778 trap_pfault(struct trapframe *frame, int user) 779 { 780 vm_offset_t eva, va; 781 struct thread *td; 782 struct proc *p; 783 vm_map_t map; 784 vm_prot_t ftype; 785 int rv, is_user; 786 787 td = curthread; 788 p = td->td_proc; 789 if (frame->exc == EXC_ISI) { 790 eva = frame->srr0; 791 ftype = VM_PROT_EXECUTE; 792 if (frame->srr1 & SRR1_ISI_PFAULT) 793 ftype |= VM_PROT_READ; 794 } else { 795 eva = frame->dar; 796 #ifdef BOOKE 797 if (frame->cpu.booke.esr & ESR_ST) 798 #else 799 if (frame->cpu.aim.dsisr & DSISR_STORE) 800 #endif 801 ftype = VM_PROT_WRITE; 802 else 803 ftype = VM_PROT_READ; 804 } 805 806 if (user) { 807 KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); 808 map = &p->p_vmspace->vm_map; 809 } else { 810 rv = pmap_decode_kernel_ptr(eva, &is_user, &eva); 811 if (rv != 0) 812 return (SIGSEGV); 813 814 if (is_user) 815 map = &p->p_vmspace->vm_map; 816 else 817 map = kernel_map; 818 } 819 va = trunc_page(eva); 820 821 /* Fault in the page. */ 822 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 823 /* 824 * XXXDTRACE: add dtrace_doubletrap_func here? 825 */ 826 827 if (rv == KERN_SUCCESS) 828 return (0); 829 830 if (!user && handle_onfault(frame)) 831 return (0); 832 833 return (SIGSEGV); 834 } 835 836 /* 837 * For now, this only deals with the particular unaligned access case 838 * that gcc tends to generate. Eventually it should handle all of the 839 * possibilities that can happen on a 32-bit PowerPC in big-endian mode. 840 */ 841 842 static int 843 fix_unaligned(struct thread *td, struct trapframe *frame) 844 { 845 struct thread *fputhread; 846 #ifdef __SPE__ 847 uint32_t inst; 848 #endif 849 int indicator, reg; 850 double *fpr; 851 852 #ifdef __SPE__ 853 indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE)); 854 if (indicator & ESR_SPE) { 855 if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0) 856 return (-1); 857 reg = EXC_ALI_SPE_REG(inst); 858 fpr = (double *)td->td_pcb->pcb_vec.vr[reg]; 859 fputhread = PCPU_GET(vecthread); 860 861 /* Juggle the SPE to ensure that we've initialized 862 * the registers, and that their current state is in 863 * the PCB. 864 */ 865 if (fputhread != td) { 866 if (fputhread) 867 save_vec(fputhread); 868 enable_vec(td); 869 } 870 save_vec(td); 871 872 if (!(indicator & ESR_ST)) { 873 if (copyin((void *)frame->dar, fpr, 874 sizeof(double)) != 0) 875 return (-1); 876 frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1]; 877 enable_vec(td); 878 } else { 879 td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg]; 880 if (copyout(fpr, (void *)frame->dar, 881 sizeof(double)) != 0) 882 return (-1); 883 } 884 return (0); 885 } 886 #else 887 indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr); 888 889 switch (indicator) { 890 case EXC_ALI_LFD: 891 case EXC_ALI_STFD: 892 reg = EXC_ALI_RST(frame->cpu.aim.dsisr); 893 fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr; 894 fputhread = PCPU_GET(fputhread); 895 896 /* Juggle the FPU to ensure that we've initialized 897 * the FPRs, and that their current state is in 898 * the PCB. 899 */ 900 if (fputhread != td) { 901 if (fputhread) 902 save_fpu(fputhread); 903 enable_fpu(td); 904 } 905 save_fpu(td); 906 907 if (indicator == EXC_ALI_LFD) { 908 if (copyin((void *)frame->dar, fpr, 909 sizeof(double)) != 0) 910 return (-1); 911 enable_fpu(td); 912 } else { 913 if (copyout(fpr, (void *)frame->dar, 914 sizeof(double)) != 0) 915 return (-1); 916 } 917 return (0); 918 break; 919 } 920 #endif 921 922 return (-1); 923 } 924 925 #if defined(__powerpc64__) && defined(AIM) 926 #define MSKNSHL(x, m, n) "(((" #x ") & " #m ") << " #n ")" 927 #define MSKNSHR(x, m, n) "(((" #x ") & " #m ") >> " #n ")" 928 929 /* xvcpsgndp instruction, built in opcode format. 930 * This can be changed to use mnemonic after a toolchain update. 931 */ 932 #define XVCPSGNDP(xt, xa, xb) \ 933 __asm __volatile(".long (" \ 934 MSKNSHL(60, 0x3f, 26) " | " \ 935 MSKNSHL(xt, 0x1f, 21) " | " \ 936 MSKNSHL(xa, 0x1f, 16) " | " \ 937 MSKNSHL(xb, 0x1f, 11) " | " \ 938 MSKNSHL(240, 0xff, 3) " | " \ 939 MSKNSHR(xa, 0x20, 3) " | " \ 940 MSKNSHR(xa, 0x20, 4) " | " \ 941 MSKNSHR(xa, 0x20, 5) ")") 942 943 /* Macros to normalize 1 or 10 VSX registers */ 944 #define NORM(x) XVCPSGNDP(x, x, x) 945 #define NORM10(x) \ 946 NORM(x ## 0); NORM(x ## 1); NORM(x ## 2); NORM(x ## 3); NORM(x ## 4); \ 947 NORM(x ## 5); NORM(x ## 6); NORM(x ## 7); NORM(x ## 8); NORM(x ## 9) 948 949 static void 950 normalize_inputs(void) 951 { 952 unsigned long msr; 953 954 /* enable VSX */ 955 msr = mfmsr(); 956 mtmsr(msr | PSL_VSX); 957 958 NORM(0); NORM(1); NORM(2); NORM(3); NORM(4); 959 NORM(5); NORM(6); NORM(7); NORM(8); NORM(9); 960 NORM10(1); NORM10(2); NORM10(3); NORM10(4); NORM10(5); 961 NORM(60); NORM(61); NORM(62); NORM(63); 962 963 /* restore MSR */ 964 mtmsr(msr); 965 } 966 #endif 967 968 #ifdef KDB 969 int 970 db_trap_glue(struct trapframe *frame) 971 { 972 973 if (!(frame->srr1 & PSL_PR) 974 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC 975 || frame_is_trap_inst(frame) 976 || frame->exc == EXC_BPT 977 || frame->exc == EXC_DEBUG 978 || frame->exc == EXC_DSI)) { 979 int type = frame->exc; 980 981 /* Ignore DTrace traps. */ 982 if (*(uint32_t *)frame->srr0 == EXC_DTRACE) 983 return (0); 984 if (frame_is_trap_inst(frame)) { 985 type = T_BREAKPOINT; 986 } 987 return (kdb_trap(type, 0, frame)); 988 } 989 990 return (0); 991 } 992 #endif 993