1 /*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34 #include <sys/param.h>
35 #include <sys/kdb.h>
36 #include <sys/proc.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/ptrace.h>
41 #include <sys/reboot.h>
42 #include <sys/syscall.h>
43 #include <sys/sysent.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/uio.h>
47 #include <sys/signalvar.h>
48 #include <sys/vmmeter.h>
49
50 #include <security/audit/audit.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59
60 #include <machine/_inttypes.h>
61 #include <machine/altivec.h>
62 #include <machine/cpu.h>
63 #include <machine/db_machdep.h>
64 #include <machine/fpu.h>
65 #include <machine/frame.h>
66 #include <machine/pcb.h>
67 #include <machine/psl.h>
68 #include <machine/slb.h>
69 #include <machine/spr.h>
70 #include <machine/sr.h>
71 #include <machine/trap.h>
72
73 /* Below matches setjmp.S */
74 #define FAULTBUF_LR 21
75 #define FAULTBUF_R1 1
76 #define FAULTBUF_R2 2
77 #define FAULTBUF_CR 22
78 #define FAULTBUF_R14 3
79
80 #define MOREARGS(sp) ((caddr_t)((uintptr_t)(sp) + \
81 sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */
82
83 static void trap_fatal(struct trapframe *frame);
84 static void printtrap(u_int vector, struct trapframe *frame, int isfatal,
85 int user);
86 static bool trap_pfault(struct trapframe *frame, bool user, int *signo,
87 int *ucode);
88 static int fix_unaligned(struct thread *td, struct trapframe *frame);
89 static int handle_onfault(struct trapframe *frame);
90 static void syscall(struct trapframe *frame);
91
92 #if defined(__powerpc64__) && defined(AIM)
93 static void normalize_inputs(void);
94 #endif
95
96 extern vm_offset_t __startkernel;
97
98 extern int copy_fault(void);
99 extern int fusufault(void);
100
101 #ifdef KDB
102 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
103 #endif
104
105 struct powerpc_exception {
106 u_int vector;
107 char *name;
108 };
109
110 #ifdef KDTRACE_HOOKS
111 #include <sys/dtrace_bsd.h>
112
113 int (*dtrace_invop_jump_addr)(struct trapframe *);
114 #endif
115
116 static struct powerpc_exception powerpc_exceptions[] = {
117 { EXC_CRIT, "critical input" },
118 { EXC_RST, "system reset" },
119 { EXC_MCHK, "machine check" },
120 { EXC_DSI, "data storage interrupt" },
121 { EXC_DSE, "data segment exception" },
122 { EXC_ISI, "instruction storage interrupt" },
123 { EXC_ISE, "instruction segment exception" },
124 { EXC_EXI, "external interrupt" },
125 { EXC_ALI, "alignment" },
126 { EXC_PGM, "program" },
127 { EXC_HEA, "hypervisor emulation assistance" },
128 { EXC_FPU, "floating-point unavailable" },
129 { EXC_APU, "auxiliary proc unavailable" },
130 { EXC_DECR, "decrementer" },
131 { EXC_FIT, "fixed-interval timer" },
132 { EXC_WDOG, "watchdog timer" },
133 { EXC_SC, "system call" },
134 { EXC_TRC, "trace" },
135 { EXC_FPA, "floating-point assist" },
136 { EXC_DEBUG, "debug" },
137 { EXC_PERF, "performance monitoring" },
138 { EXC_VEC, "altivec unavailable" },
139 { EXC_VSX, "vsx unavailable" },
140 { EXC_FAC, "facility unavailable" },
141 { EXC_ITMISS, "instruction tlb miss" },
142 { EXC_DLMISS, "data load tlb miss" },
143 { EXC_DSMISS, "data store tlb miss" },
144 { EXC_BPT, "instruction breakpoint" },
145 { EXC_SMI, "system management" },
146 { EXC_VECAST_G4, "altivec assist" },
147 { EXC_THRM, "thermal management" },
148 { EXC_RUNMODETRC, "run mode/trace" },
149 { EXC_SOFT_PATCH, "soft patch exception" },
150 { EXC_LAST, NULL }
151 };
152
153 static int uprintf_signal;
154 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
155 &uprintf_signal, 0,
156 "Print debugging information on trap signal to ctty");
157
158 #define ESR_BITMASK \
159 "\20" \
160 "\040b0\037b1\036b2\035b3\034PIL\033PRR\032PTR\031FP" \
161 "\030ST\027b9\026DLK\025ILK\024b12\023b13\022BO\021PIE" \
162 "\020b16\017b17\016b18\015b19\014b20\013b21\012b22\011b23" \
163 "\010SPE\007EPID\006b26\005b27\004b28\003b29\002b30\001b31"
164 #define MCSR_BITMASK \
165 "\20" \
166 "\040MCP\037ICERR\036DCERR\035TLBPERR\034L2MMU_MHIT\033b5\032b6\031b7" \
167 "\030b8\027b9\026b10\025NMI\024MAV\023MEA\022b14\021IF" \
168 "\020LD\017ST\016LDG\015b19\014b20\013b21\012b22\011b23" \
169 "\010b24\007b25\006b26\005b27\004b28\003b29\002TLBSYNC\001BSL2_ERR"
170 #define MSSSR_BITMASK \
171 "\20" \
172 "\040b0\037b1\036b2\035b3\034b4\033b5\032b6\031b7" \
173 "\030b8\027b9\026b10\025b11\024b12\023L2TAG\022L2DAT\021L3TAG" \
174 "\020L3DAT\017APE\016DPE\015TEA\014b20\013b21\012b22\011b23" \
175 "\010b24\007b25\006b26\005b27\004b28\003b29\002b30\001b31"
176
177 static const char *
trapname(u_int vector)178 trapname(u_int vector)
179 {
180 struct powerpc_exception *pe;
181
182 for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) {
183 if (pe->vector == vector)
184 return (pe->name);
185 }
186
187 return ("unknown");
188 }
189
190 static inline bool
frame_is_trap_inst(struct trapframe * frame)191 frame_is_trap_inst(struct trapframe *frame)
192 {
193 #ifdef AIM
194 return (frame->exc == EXC_PGM && frame->srr1 & EXC_PGM_TRAP);
195 #else
196 return ((frame->cpu.booke.esr & ESR_PTR) != 0);
197 #endif
198 }
199
200 void
trap(struct trapframe * frame)201 trap(struct trapframe *frame)
202 {
203 struct thread *td;
204 struct proc *p;
205 #ifdef KDTRACE_HOOKS
206 uint32_t inst;
207 #endif
208 int sig, type, user;
209 u_int ucode;
210 ksiginfo_t ksi;
211 register_t addr, fscr;
212
213 VM_CNT_INC(v_trap);
214
215 #ifdef KDB
216 if (kdb_active) {
217 kdb_reenter();
218 return;
219 }
220 #endif
221
222 td = curthread;
223 p = td->td_proc;
224
225 type = ucode = frame->exc;
226 sig = 0;
227 user = frame->srr1 & PSL_PR;
228 addr = 0;
229
230 CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
231 trapname(type), user ? "user" : "kernel");
232
233 #ifdef KDTRACE_HOOKS
234 /*
235 * A trap can occur while DTrace executes a probe. Before
236 * executing the probe, DTrace blocks re-scheduling and sets
237 * a flag in its per-cpu flags to indicate that it doesn't
238 * want to fault. On returning from the probe, the no-fault
239 * flag is cleared and finally re-scheduling is enabled.
240 *
241 * If the DTrace kernel module has registered a trap handler,
242 * call it and if it returns non-zero, assume that it has
243 * handled the trap and modified the trap frame so that this
244 * function can return normally.
245 */
246 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
247 return;
248 #endif
249
250 if (user) {
251 td->td_pticks = 0;
252 td->td_frame = frame;
253 addr = frame->srr0;
254 if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
255 thread_cow_update(td);
256
257 /* User Mode Traps */
258 switch (type) {
259 case EXC_RUNMODETRC:
260 case EXC_TRC:
261 frame->srr1 &= ~PSL_SE;
262 sig = SIGTRAP;
263 ucode = TRAP_TRACE;
264 break;
265
266 #if defined(__powerpc64__) && defined(AIM)
267 case EXC_DSE:
268 addr = frame->dar;
269 /* FALLTHROUGH */
270 case EXC_ISE:
271 /* DSE/ISE are automatically fatal with radix pmap. */
272 if (radix_mmu ||
273 handle_user_slb_spill(&p->p_vmspace->vm_pmap,
274 addr) != 0){
275 sig = SIGSEGV;
276 ucode = SEGV_MAPERR;
277 }
278 break;
279 #endif
280 case EXC_DSI:
281 addr = frame->dar;
282 /* FALLTHROUGH */
283 case EXC_ISI:
284 if (trap_pfault(frame, true, &sig, &ucode))
285 sig = 0;
286 break;
287
288 case EXC_SC:
289 syscall(frame);
290 break;
291
292 case EXC_FPU:
293 KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
294 ("FPU already enabled for thread"));
295 enable_fpu(td);
296 break;
297
298 case EXC_VEC:
299 KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
300 ("Altivec already enabled for thread"));
301 enable_vec(td);
302 break;
303
304 case EXC_VSX:
305 KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
306 ("VSX already enabled for thread"));
307 if (!(td->td_pcb->pcb_flags & PCB_VEC))
308 enable_vec(td);
309 if (td->td_pcb->pcb_flags & PCB_FPU)
310 save_fpu(td);
311 td->td_pcb->pcb_flags |= PCB_VSX;
312 enable_fpu(td);
313 break;
314
315 case EXC_FAC:
316 fscr = mfspr(SPR_FSCR);
317 switch (fscr & FSCR_IC_MASK) {
318 case FSCR_IC_HTM:
319 CTR0(KTR_TRAP,
320 "Hardware Transactional Memory subsystem disabled");
321 sig = SIGILL;
322 ucode = ILL_ILLOPC;
323 break;
324 case FSCR_IC_DSCR:
325 td->td_pcb->pcb_flags |= PCB_CFSCR | PCB_CDSCR;
326 fscr |= FSCR_DSCR;
327 mtspr(SPR_DSCR, 0);
328 break;
329 case FSCR_IC_EBB:
330 td->td_pcb->pcb_flags |= PCB_CFSCR;
331 fscr |= FSCR_EBB;
332 mtspr(SPR_EBBHR, 0);
333 mtspr(SPR_EBBRR, 0);
334 mtspr(SPR_BESCR, 0);
335 break;
336 case FSCR_IC_TAR:
337 td->td_pcb->pcb_flags |= PCB_CFSCR;
338 fscr |= FSCR_TAR;
339 mtspr(SPR_TAR, 0);
340 break;
341 case FSCR_IC_LM:
342 td->td_pcb->pcb_flags |= PCB_CFSCR;
343 fscr |= FSCR_LM;
344 mtspr(SPR_LMRR, 0);
345 mtspr(SPR_LMSER, 0);
346 break;
347 default:
348 sig = SIGILL;
349 ucode = ILL_ILLOPC;
350 }
351 mtspr(SPR_FSCR, fscr & ~FSCR_IC_MASK);
352 break;
353 case EXC_HEA:
354 sig = SIGILL;
355 ucode = ILL_ILLOPC;
356 break;
357
358 case EXC_VECAST_E:
359 case EXC_VECAST_G4:
360 case EXC_VECAST_G5:
361 /*
362 * We get a VPU assist exception for IEEE mode
363 * vector operations on denormalized floats.
364 * Emulating this is a giant pain, so for now,
365 * just switch off IEEE mode and treat them as
366 * zero.
367 */
368
369 save_vec(td);
370 td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
371 enable_vec(td);
372 break;
373
374 case EXC_ALI:
375 if (fix_unaligned(td, frame) != 0) {
376 sig = SIGBUS;
377 ucode = BUS_ADRALN;
378 addr = frame->dar;
379 }
380 else
381 frame->srr0 += 4;
382 break;
383
384 case EXC_DEBUG: /* Single stepping */
385 mtspr(SPR_DBSR, mfspr(SPR_DBSR));
386 frame->srr1 &= ~PSL_DE;
387 frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
388 sig = SIGTRAP;
389 ucode = TRAP_TRACE;
390 break;
391
392 case EXC_PGM:
393 /* Identify the trap reason */
394 if (frame_is_trap_inst(frame)) {
395 #ifdef KDTRACE_HOOKS
396 inst = fuword32((const void *)frame->srr0);
397 if (inst == 0x0FFFDDDD &&
398 dtrace_pid_probe_ptr != NULL) {
399 (*dtrace_pid_probe_ptr)(frame);
400 break;
401 }
402 #endif
403 sig = SIGTRAP;
404 ucode = TRAP_BRKPT;
405 break;
406 }
407
408 if ((frame->srr1 & EXC_PGM_FPENABLED) &&
409 (td->td_pcb->pcb_flags & PCB_FPU))
410 sig = SIGFPE;
411 else
412 sig = ppc_instr_emulate(frame, td);
413
414 if (sig == SIGILL) {
415 if (frame->srr1 & EXC_PGM_PRIV)
416 ucode = ILL_PRVOPC;
417 else if (frame->srr1 & EXC_PGM_ILLEGAL)
418 ucode = ILL_ILLOPC;
419 } else if (sig == SIGFPE) {
420 ucode = get_fpu_exception(td);
421 }
422
423 break;
424
425 case EXC_MCHK:
426 sig = cpu_machine_check(td, frame, &ucode);
427 printtrap(frame->exc, frame, 0, (frame->srr1 & PSL_PR));
428 break;
429
430 #if defined(__powerpc64__) && defined(AIM)
431 case EXC_SOFT_PATCH:
432 /*
433 * Point to the instruction that generated the exception to execute it again,
434 * and normalize the register values.
435 */
436 frame->srr0 -= 4;
437 normalize_inputs();
438 break;
439 #endif
440
441 default:
442 trap_fatal(frame);
443 }
444 } else {
445 /* Kernel Mode Traps */
446
447 KASSERT(cold || td->td_ucred != NULL,
448 ("kernel trap doesn't have ucred"));
449 switch (type) {
450 case EXC_PGM:
451 #ifdef KDTRACE_HOOKS
452 if (frame_is_trap_inst(frame)) {
453 if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
454 if (dtrace_invop_jump_addr != NULL) {
455 dtrace_invop_jump_addr(frame);
456 return;
457 }
458 }
459 }
460 #endif
461 #ifdef KDB
462 if (db_trap_glue(frame))
463 return;
464 #endif
465 break;
466 #if defined(__powerpc64__) && defined(AIM)
467 case EXC_DSE:
468 /* DSE on radix mmu is automatically fatal. */
469 if (radix_mmu)
470 break;
471 if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
472 (frame->dar & SEGMENT_MASK) == USER_ADDR) {
473 __asm __volatile ("slbmte %0, %1" ::
474 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
475 "r"(USER_SLB_SLBE));
476 return;
477 }
478 break;
479 #endif
480 case EXC_DSI:
481 if (trap_pfault(frame, false, NULL, NULL))
482 return;
483 break;
484 case EXC_MCHK:
485 if (handle_onfault(frame))
486 return;
487 break;
488 default:
489 break;
490 }
491 trap_fatal(frame);
492 }
493
494 if (sig != 0) {
495 ksiginfo_init_trap(&ksi);
496 ksi.ksi_signo = sig;
497 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
498 ksi.ksi_addr = (void *)addr;
499 ksi.ksi_trapno = type;
500 if (uprintf_signal) {
501 uprintf("pid %d comm %s: signal %d code %d type 0x%x "
502 "addr 0x%lx r1 0x%lx srr0 0x%lx srr1 0x%lx\n",
503 p->p_pid, p->p_comm, sig, ucode, type,
504 (u_long)addr, (u_long)frame->fixreg[1],
505 (u_long)frame->srr0, (u_long)frame->srr1);
506 }
507
508 trapsignal(td, &ksi);
509 }
510
511 userret(td, frame);
512 }
513
514 static void
trap_fatal(struct trapframe * frame)515 trap_fatal(struct trapframe *frame)
516 {
517 #ifdef KDB
518 bool handled;
519 #endif
520
521 printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
522 #ifdef KDB
523 if (debugger_on_trap) {
524 kdb_why = KDB_WHY_TRAP;
525 handled = kdb_trap(frame->exc, 0, frame);
526 kdb_why = KDB_WHY_UNSET;
527 if (handled)
528 return;
529 }
530 #endif
531 panic("%s trap", trapname(frame->exc));
532 }
533
534 static void
cpu_printtrap(u_int vector,struct trapframe * frame,int isfatal,int user)535 cpu_printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
536 {
537 #ifdef AIM
538 uint16_t ver;
539
540 switch (vector) {
541 case EXC_MCHK:
542 ver = mfpvr() >> 16;
543 if (MPC745X_P(ver))
544 printf(" msssr0 = 0x%b\n",
545 (int)mfspr(SPR_MSSSR0), MSSSR_BITMASK);
546 case EXC_DSE:
547 case EXC_DSI:
548 case EXC_DTMISS:
549 printf(" dsisr = 0x%lx\n",
550 (u_long)frame->cpu.aim.dsisr);
551 break;
552 }
553 #elif defined(BOOKE)
554 vm_paddr_t pa;
555
556 switch (vector) {
557 case EXC_MCHK:
558 pa = mfspr(SPR_MCARU);
559 pa = (pa << 32) | (u_register_t)mfspr(SPR_MCAR);
560 printf(" mcsr = 0x%b\n",
561 (int)mfspr(SPR_MCSR), MCSR_BITMASK);
562 printf(" mcar = 0x%jx\n", (uintmax_t)pa);
563 }
564 printf(" esr = 0x%b\n",
565 (int)frame->cpu.booke.esr, ESR_BITMASK);
566 #endif
567 }
568
569 static void
printtrap(u_int vector,struct trapframe * frame,int isfatal,int user)570 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
571 {
572
573 printf("\n");
574 printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
575 user ? "user" : "kernel");
576 printf("\n");
577 printf(" exception = 0x%x (%s)\n", vector, trapname(vector));
578 switch (vector) {
579 case EXC_DSE:
580 case EXC_DSI:
581 case EXC_DTMISS:
582 case EXC_ALI:
583 case EXC_MCHK:
584 printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar);
585 break;
586 case EXC_ISE:
587 case EXC_ISI:
588 case EXC_ITMISS:
589 printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0);
590 break;
591 }
592 cpu_printtrap(vector, frame, isfatal, user);
593 printf(" srr0 = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
594 frame->srr0, frame->srr0 - (register_t)(__startkernel - KERNBASE));
595 printf(" srr1 = 0x%lx\n", (u_long)frame->srr1);
596 printf(" current msr = 0x%" PRIxPTR "\n", mfmsr());
597 printf(" lr = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
598 frame->lr, frame->lr - (register_t)(__startkernel - KERNBASE));
599 printf(" frame = %p\n", frame);
600 printf(" curthread = %p\n", curthread);
601 if (curthread != NULL)
602 printf(" pid = %d, comm = %s\n",
603 curthread->td_proc->p_pid, curthread->td_name);
604 printf("\n");
605 }
606
607 /*
608 * Handles a fatal fault when we have onfault state to recover. Returns
609 * non-zero if there was onfault recovery state available.
610 */
611 static int
handle_onfault(struct trapframe * frame)612 handle_onfault(struct trapframe *frame)
613 {
614 struct thread *td;
615 jmp_buf *fb;
616
617 td = curthread;
618 #if defined(__powerpc64__) || defined(BOOKE)
619 uintptr_t dispatch = (uintptr_t)td->td_pcb->pcb_onfault;
620
621 if (dispatch == 0)
622 return (0);
623 /* Short-circuit radix and Book-E paths. */
624 switch (dispatch) {
625 case COPYFAULT:
626 frame->srr0 = (uintptr_t)copy_fault;
627 return (1);
628 case FUSUFAULT:
629 frame->srr0 = (uintptr_t)fusufault;
630 return (1);
631 default:
632 break;
633 }
634 #endif
635 fb = td->td_pcb->pcb_onfault;
636 if (fb != NULL) {
637 frame->srr0 = (*fb)->_jb[FAULTBUF_LR];
638 frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1];
639 frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2];
640 frame->fixreg[3] = 1;
641 frame->cr = (*fb)->_jb[FAULTBUF_CR];
642 bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14],
643 18 * sizeof(register_t));
644 td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */
645 return (1);
646 }
647 return (0);
648 }
649
650 int
cpu_fetch_syscall_args(struct thread * td)651 cpu_fetch_syscall_args(struct thread *td)
652 {
653 struct proc *p;
654 struct trapframe *frame;
655 struct syscall_args *sa;
656 caddr_t params;
657 size_t argsz;
658 int error, n, narg, i;
659
660 p = td->td_proc;
661 frame = td->td_frame;
662 sa = &td->td_sa;
663
664 sa->code = frame->fixreg[0];
665 sa->original_code = sa->code;
666 params = (caddr_t)(frame->fixreg + FIRSTARG);
667 n = NARGREG;
668
669 if (sa->code == SYS_syscall) {
670 /*
671 * code is first argument,
672 * followed by actual args.
673 */
674 sa->code = *(register_t *) params;
675 params += sizeof(register_t);
676 n -= 1;
677 } else if (sa->code == SYS___syscall) {
678 /*
679 * Like syscall, but code is a quad,
680 * so as to maintain quad alignment
681 * for the rest of the args.
682 */
683 if (SV_PROC_FLAG(p, SV_ILP32)) {
684 params += sizeof(register_t);
685 sa->code = *(register_t *) params;
686 params += sizeof(register_t);
687 n -= 2;
688 } else {
689 sa->code = *(register_t *) params;
690 params += sizeof(register_t);
691 n -= 1;
692 }
693 }
694
695 if (sa->code >= p->p_sysent->sv_size)
696 sa->callp = &nosys_sysent;
697 else
698 sa->callp = &p->p_sysent->sv_table[sa->code];
699
700 narg = sa->callp->sy_narg;
701
702 if (SV_PROC_FLAG(p, SV_ILP32)) {
703 argsz = sizeof(uint32_t);
704
705 for (i = 0; i < n; i++)
706 sa->args[i] = ((u_register_t *)(params))[i] &
707 0xffffffff;
708 } else {
709 argsz = sizeof(uint64_t);
710
711 for (i = 0; i < n; i++)
712 sa->args[i] = ((u_register_t *)(params))[i];
713 }
714
715 if (narg > n)
716 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
717 (narg - n) * argsz);
718 else
719 error = 0;
720
721 #ifdef __powerpc64__
722 if (SV_PROC_FLAG(p, SV_ILP32) && narg > n) {
723 /* Expand the size of arguments copied from the stack */
724
725 for (i = narg; i >= n; i--)
726 sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
727 }
728 #endif
729
730 if (error == 0) {
731 td->td_retval[0] = 0;
732 td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
733 }
734 return (error);
735 }
736
737 #include "../../kern/subr_syscall.c"
738
739 void
syscall(struct trapframe * frame)740 syscall(struct trapframe *frame)
741 {
742 struct thread *td;
743
744 td = curthread;
745 td->td_frame = frame;
746
747 #if defined(__powerpc64__) && defined(AIM)
748 /*
749 * Speculatively restore last user SLB segment, which we know is
750 * invalid already, since we are likely to do copyin()/copyout().
751 */
752 if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0)
753 __asm __volatile ("slbmte %0, %1; isync" ::
754 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
755 #endif
756
757 syscallenter(td);
758 syscallret(td);
759 }
760
761 static bool
trap_pfault(struct trapframe * frame,bool user,int * signo,int * ucode)762 trap_pfault(struct trapframe *frame, bool user, int *signo, int *ucode)
763 {
764 vm_offset_t eva;
765 struct thread *td;
766 struct proc *p;
767 vm_map_t map;
768 vm_prot_t ftype;
769 int rv, is_user;
770
771 td = curthread;
772 p = td->td_proc;
773 if (frame->exc == EXC_ISI) {
774 eva = frame->srr0;
775 ftype = VM_PROT_EXECUTE;
776 if (frame->srr1 & SRR1_ISI_PFAULT)
777 ftype |= VM_PROT_READ;
778 } else {
779 eva = frame->dar;
780 #ifdef BOOKE
781 if (frame->cpu.booke.esr & ESR_ST)
782 #else
783 if (frame->cpu.aim.dsisr & DSISR_STORE)
784 #endif
785 ftype = VM_PROT_WRITE;
786 else
787 ftype = VM_PROT_READ;
788 }
789 #if defined(__powerpc64__) && defined(AIM)
790 if (radix_mmu && pmap_nofault(&p->p_vmspace->vm_pmap, eva, ftype) == 0)
791 return (true);
792 #endif
793
794 if (__predict_false((td->td_pflags & TDP_NOFAULTING) == 0)) {
795 /*
796 * If we get a page fault while in a critical section, then
797 * it is most likely a fatal kernel page fault. The kernel
798 * is already going to panic trying to get a sleep lock to
799 * do the VM lookup, so just consider it a fatal trap so the
800 * kernel can print out a useful trap message and even get
801 * to the debugger.
802 *
803 * If we get a page fault while holding a non-sleepable
804 * lock, then it is most likely a fatal kernel page fault.
805 * If WITNESS is enabled, then it's going to whine about
806 * bogus LORs with various VM locks, so just skip to the
807 * fatal trap handling directly.
808 */
809 if (td->td_critnest != 0 ||
810 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
811 "Kernel page fault") != 0) {
812 trap_fatal(frame);
813 return (false);
814 }
815 }
816 if (user) {
817 KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
818 map = &p->p_vmspace->vm_map;
819 } else {
820 rv = pmap_decode_kernel_ptr(eva, &is_user, &eva);
821 if (rv != 0)
822 return (false);
823
824 if (is_user)
825 map = &p->p_vmspace->vm_map;
826 else
827 map = kernel_map;
828 }
829
830 /* Fault in the page. */
831 rv = vm_fault_trap(map, eva, ftype, VM_FAULT_NORMAL, signo, ucode);
832 /*
833 * XXXDTRACE: add dtrace_doubletrap_func here?
834 */
835
836 if (rv == KERN_SUCCESS)
837 return (true);
838
839 if (!user && handle_onfault(frame))
840 return (true);
841
842 return (false);
843 }
844
845 /*
846 * For now, this only deals with the particular unaligned access case
847 * that gcc tends to generate. Eventually it should handle all of the
848 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
849 */
850
851 static int
fix_unaligned(struct thread * td,struct trapframe * frame)852 fix_unaligned(struct thread *td, struct trapframe *frame)
853 {
854 struct thread *fputhread;
855 #ifdef BOOKE
856 uint32_t inst;
857 #endif
858 int indicator, reg;
859 double *fpr;
860
861 #ifdef __SPE__
862 indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE));
863 if (indicator & ESR_SPE) {
864 if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0)
865 return (-1);
866 reg = EXC_ALI_INST_RST(inst);
867 fpr = (double *)td->td_pcb->pcb_vec.vr[reg];
868 fputhread = PCPU_GET(vecthread);
869
870 /* Juggle the SPE to ensure that we've initialized
871 * the registers, and that their current state is in
872 * the PCB.
873 */
874 if (fputhread != td) {
875 if (fputhread)
876 save_vec(fputhread);
877 enable_vec(td);
878 }
879 save_vec(td);
880
881 if (!(indicator & ESR_ST)) {
882 if (copyin((void *)frame->dar, fpr,
883 sizeof(double)) != 0)
884 return (-1);
885 frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1];
886 enable_vec(td);
887 } else {
888 td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg];
889 if (copyout(fpr, (void *)frame->dar,
890 sizeof(double)) != 0)
891 return (-1);
892 }
893 return (0);
894 }
895 #else
896 #ifdef BOOKE
897 indicator = (frame->cpu.booke.esr & ESR_ST) ? EXC_ALI_STFD : EXC_ALI_LFD;
898 #else
899 indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
900 #endif
901
902 switch (indicator) {
903 case EXC_ALI_LFD:
904 case EXC_ALI_STFD:
905 #ifdef BOOKE
906 if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0)
907 return (-1);
908 reg = EXC_ALI_INST_RST(inst);
909 #else
910 reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
911 #endif
912 fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr;
913 fputhread = PCPU_GET(fputhread);
914
915 /* Juggle the FPU to ensure that we've initialized
916 * the FPRs, and that their current state is in
917 * the PCB.
918 */
919 if (fputhread != td) {
920 if (fputhread)
921 save_fpu(fputhread);
922 enable_fpu(td);
923 }
924 save_fpu(td);
925
926 if (indicator == EXC_ALI_LFD) {
927 if (copyin((void *)frame->dar, fpr,
928 sizeof(double)) != 0)
929 return (-1);
930 enable_fpu(td);
931 } else {
932 if (copyout(fpr, (void *)frame->dar,
933 sizeof(double)) != 0)
934 return (-1);
935 }
936 return (0);
937 break;
938 }
939 #endif
940
941 return (-1);
942 }
943
944 #if defined(__powerpc64__) && defined(AIM)
945 #define MSKNSHL(x, m, n) "(((" #x ") & " #m ") << " #n ")"
946 #define MSKNSHR(x, m, n) "(((" #x ") & " #m ") >> " #n ")"
947
948 /* xvcpsgndp instruction, built in opcode format.
949 * This can be changed to use mnemonic after a toolchain update.
950 */
951 #define XVCPSGNDP(xt, xa, xb) \
952 __asm __volatile(".long (" \
953 MSKNSHL(60, 0x3f, 26) " | " \
954 MSKNSHL(xt, 0x1f, 21) " | " \
955 MSKNSHL(xa, 0x1f, 16) " | " \
956 MSKNSHL(xb, 0x1f, 11) " | " \
957 MSKNSHL(240, 0xff, 3) " | " \
958 MSKNSHR(xa, 0x20, 3) " | " \
959 MSKNSHR(xa, 0x20, 4) " | " \
960 MSKNSHR(xa, 0x20, 5) ")")
961
962 /* Macros to normalize 1 or 10 VSX registers */
963 #define NORM(x) XVCPSGNDP(x, x, x)
964 #define NORM10(x) \
965 NORM(x ## 0); NORM(x ## 1); NORM(x ## 2); NORM(x ## 3); NORM(x ## 4); \
966 NORM(x ## 5); NORM(x ## 6); NORM(x ## 7); NORM(x ## 8); NORM(x ## 9)
967
968 static void
normalize_inputs(void)969 normalize_inputs(void)
970 {
971 register_t msr;
972
973 /* enable VSX */
974 msr = mfmsr();
975 mtmsr(msr | PSL_VSX);
976
977 NORM(0); NORM(1); NORM(2); NORM(3); NORM(4);
978 NORM(5); NORM(6); NORM(7); NORM(8); NORM(9);
979 NORM10(1); NORM10(2); NORM10(3); NORM10(4); NORM10(5);
980 NORM(60); NORM(61); NORM(62); NORM(63);
981
982 /* restore MSR */
983 mtmsr(msr);
984 }
985 #endif
986
987 #ifdef KDB
988 int
db_trap_glue(struct trapframe * frame)989 db_trap_glue(struct trapframe *frame)
990 {
991
992 if (!(frame->srr1 & PSL_PR)
993 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
994 || frame_is_trap_inst(frame)
995 || frame->exc == EXC_BPT
996 || frame->exc == EXC_DEBUG
997 || frame->exc == EXC_DSI)) {
998 int type = frame->exc;
999
1000 /* Ignore DTrace traps. */
1001 if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
1002 return (0);
1003 if (frame_is_trap_inst(frame)) {
1004 type = T_BREAKPOINT;
1005 }
1006 return (kdb_trap(type, 0, frame));
1007 }
1008
1009 return (0);
1010 }
1011 #endif
1012