xref: /openbsd/sys/arch/powerpc/powerpc/trap.c (revision 7b36286a)
1 /*	$OpenBSD: trap.c,v 1.83 2008/06/14 10:55:20 mk Exp $	*/
2 /*	$NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/proc.h>
36 #include <sys/signalvar.h>
37 #include <sys/reboot.h>
38 #include <sys/syscall.h>
39 #include <sys/systm.h>
40 #include <sys/user.h>
41 #include <sys/ktrace.h>
42 #include <sys/pool.h>
43 
44 #include <dev/cons.h>
45 
46 #include <machine/cpu.h>
47 #include <machine/fpu.h>
48 #include <machine/frame.h>
49 #include <machine/pcb.h>
50 #include <machine/pmap.h>
51 #include <machine/psl.h>
52 #include <machine/trap.h>
53 #include <machine/db_machdep.h>
54 
55 #include "systrace.h"
56 #include <dev/systrace.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 #include <ddb/db_extern.h>
61 #include <ddb/db_sym.h>
62 #include <ddb/db_output.h>
63 
64 static int fix_unaligned(struct proc *p, struct trapframe *frame);
65 int badaddr(char *addr, u_int32_t len);
66 static __inline void userret(struct proc *);
67 void trap(struct trapframe *frame);
68 
69 /* These definitions should probably be somewhere else				XXX */
70 #define	FIRSTARG	3		/* first argument is in reg 3 */
71 #define	NARGREG		8		/* 8 args are in registers */
72 #define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
73 
74 #ifdef DDB
75 void ppc_dumpbt(struct trapframe *frame);
76 
77 void
78 ppc_dumpbt(struct trapframe *frame)
79 {
80 	u_int32_t addr;
81 	/* dumpframe is defined in db_trace.c */
82 	addr=frame->fixreg[1];
83 	while (addr != 0)
84 		addr = db_dumpframe(addr, db_printf);
85 	return;
86 }
87 #endif
88 
89 #ifdef ALTIVEC
90 /*
91  * Save state of the vector processor, This is done lazily in the hope
92  * that few processes in the system will be using the vector unit
93  * and that the exception time taken to switch them will be less than
94  * the necessary time to save the vector on every context switch.
95  *
96  * Also note that in this version, the VRSAVE register is saved with
97  * the state of the current process holding the vector processor,
98  * and the contents of that register are not used to optimize the save.
99  *
100  * This can lead to VRSAVE corruption, data passing between processes,
101  * because this register is accessable without the MSR[VEC] bit set.
102  * To store/restore this cleanly a processor identifier bit would need
103  * to be saved and this register saved on every context switch.
104  * Since we do not use the information, we may be able to get by
105  * with not saving it rigorously.
106  */
107 void
108 save_vec(struct proc *p)
109 {
110 	struct pcb *pcb = &p->p_addr->u_pcb;
111 	struct vreg *pcb_vr = pcb->pcb_vr;
112 	u_int32_t oldmsr, msr;
113 
114 	if (p == NULL)
115 		return;
116 
117 	/* first we enable vector so that we dont throw an exception
118 	 * in kernel mode
119 	 */
120 	oldmsr = ppc_mfmsr();
121 	msr = (oldmsr & ~PSL_EE) | PSL_VEC;
122 	ppc_mtmsr(msr);
123 	__asm__ volatile ("sync;isync");
124 
125 	pcb->pcb_vr->vrsave = ppc_mfvrsave();
126 
127 #define STR(x) #x
128 #define SAVE_VEC_REG(reg, addr)   \
129 	__asm__ volatile ("stvxl %0, 0, %1" :: "n"(reg),"r" (addr));
130 
131 	SAVE_VEC_REG(0,&pcb_vr->vreg[0]);
132 	SAVE_VEC_REG(1,&pcb_vr->vreg[1]);
133 	SAVE_VEC_REG(2,&pcb_vr->vreg[2]);
134 	SAVE_VEC_REG(3,&pcb_vr->vreg[3]);
135 	SAVE_VEC_REG(4,&pcb_vr->vreg[4]);
136 	SAVE_VEC_REG(5,&pcb_vr->vreg[5]);
137 	SAVE_VEC_REG(6,&pcb_vr->vreg[6]);
138 	SAVE_VEC_REG(7,&pcb_vr->vreg[7]);
139 	SAVE_VEC_REG(8,&pcb_vr->vreg[8]);
140 	SAVE_VEC_REG(9,&pcb_vr->vreg[9]);
141 	SAVE_VEC_REG(10,&pcb_vr->vreg[10]);
142 	SAVE_VEC_REG(11,&pcb_vr->vreg[11]);
143 	SAVE_VEC_REG(12,&pcb_vr->vreg[12]);
144 	SAVE_VEC_REG(13,&pcb_vr->vreg[13]);
145 	SAVE_VEC_REG(14,&pcb_vr->vreg[14]);
146 	SAVE_VEC_REG(15,&pcb_vr->vreg[15]);
147 	SAVE_VEC_REG(16,&pcb_vr->vreg[16]);
148 	SAVE_VEC_REG(17,&pcb_vr->vreg[17]);
149 	SAVE_VEC_REG(18,&pcb_vr->vreg[18]);
150 	SAVE_VEC_REG(19,&pcb_vr->vreg[19]);
151 	SAVE_VEC_REG(20,&pcb_vr->vreg[20]);
152 	SAVE_VEC_REG(21,&pcb_vr->vreg[21]);
153 	SAVE_VEC_REG(22,&pcb_vr->vreg[22]);
154 	SAVE_VEC_REG(23,&pcb_vr->vreg[23]);
155 	SAVE_VEC_REG(24,&pcb_vr->vreg[24]);
156 	SAVE_VEC_REG(25,&pcb_vr->vreg[25]);
157 	SAVE_VEC_REG(26,&pcb_vr->vreg[26]);
158 	SAVE_VEC_REG(27,&pcb_vr->vreg[27]);
159 	SAVE_VEC_REG(28,&pcb_vr->vreg[28]);
160 	SAVE_VEC_REG(29,&pcb_vr->vreg[29]);
161 	SAVE_VEC_REG(30,&pcb_vr->vreg[30]);
162 	SAVE_VEC_REG(31,&pcb_vr->vreg[31]);
163 	__asm__ volatile ("mfvscr 0");
164 	SAVE_VEC_REG(0,&pcb_vr->vscr);
165 
166 	curcpu()->ci_vecproc = NULL;
167 	pcb->pcb_veccpu = NULL;
168 
169 	/* fix kernel msr back */
170 	ppc_mtmsr(oldmsr);
171 }
172 
173 /*
174  * Copy the context of a given process into the vector registers.
175  */
176 void
177 enable_vec(struct proc *p)
178 {
179 	struct pcb *pcb = &p->p_addr->u_pcb;
180 	struct vreg *pcb_vr = pcb->pcb_vr;
181 	struct cpu_info *ci = curcpu();
182 	u_int32_t oldmsr, msr;
183 
184 	/* If this is the very first altivec instruction executed
185 	 * by this process, create a context.
186 	 */
187 	if (pcb->pcb_vr == NULL)
188 		pcb->pcb_vr = pool_get(&ppc_vecpl, PR_WAITOK | PR_ZERO);
189 
190 	if (curcpu()->ci_vecproc != NULL || pcb->pcb_veccpu != NULL)
191 		printf("attempting to restore vector in use vecproc %x"
192 		    " veccpu %x\n", curcpu()->ci_vecproc, pcb->pcb_veccpu);
193 
194 	/* first we enable vector so that we dont throw an exception
195 	 * in kernel mode
196 	 */
197 	oldmsr = ppc_mfmsr();
198 	msr = (oldmsr & ~PSL_EE) | PSL_VEC;
199 	ppc_mtmsr(msr);
200 	__asm__ volatile ("sync;isync");
201 	ci->ci_vecproc = p;
202 	pcb->pcb_veccpu = ci;
203 
204 #define LOAD_VEC_REG(reg, addr)   \
205 	__asm__ volatile ("lvxl %0, 0, %1" :: "n"(reg), "r" (addr));
206 
207 	LOAD_VEC_REG(0, &pcb_vr->vscr);
208 	__asm__ volatile ("mtvscr 0");
209 	ppc_mtvrsave(pcb_vr->vrsave);
210 
211 	LOAD_VEC_REG(0, &pcb_vr->vreg[0]);
212 	LOAD_VEC_REG(1, &pcb_vr->vreg[1]);
213 	LOAD_VEC_REG(2, &pcb_vr->vreg[2]);
214 	LOAD_VEC_REG(3, &pcb_vr->vreg[3]);
215 	LOAD_VEC_REG(4, &pcb_vr->vreg[4]);
216 	LOAD_VEC_REG(5, &pcb_vr->vreg[5]);
217 	LOAD_VEC_REG(6, &pcb_vr->vreg[6]);
218 	LOAD_VEC_REG(7, &pcb_vr->vreg[7]);
219 	LOAD_VEC_REG(8, &pcb_vr->vreg[8]);
220 	LOAD_VEC_REG(9, &pcb_vr->vreg[9]);
221 	LOAD_VEC_REG(10, &pcb_vr->vreg[10]);
222 	LOAD_VEC_REG(11, &pcb_vr->vreg[11]);
223 	LOAD_VEC_REG(12, &pcb_vr->vreg[12]);
224 	LOAD_VEC_REG(13, &pcb_vr->vreg[13]);
225 	LOAD_VEC_REG(14, &pcb_vr->vreg[14]);
226 	LOAD_VEC_REG(15, &pcb_vr->vreg[15]);
227 	LOAD_VEC_REG(16, &pcb_vr->vreg[16]);
228 	LOAD_VEC_REG(17, &pcb_vr->vreg[17]);
229 	LOAD_VEC_REG(18, &pcb_vr->vreg[18]);
230 	LOAD_VEC_REG(19, &pcb_vr->vreg[19]);
231 	LOAD_VEC_REG(20, &pcb_vr->vreg[20]);
232 	LOAD_VEC_REG(21, &pcb_vr->vreg[21]);
233 	LOAD_VEC_REG(22, &pcb_vr->vreg[22]);
234 	LOAD_VEC_REG(23, &pcb_vr->vreg[23]);
235 	LOAD_VEC_REG(24, &pcb_vr->vreg[24]);
236 	LOAD_VEC_REG(25, &pcb_vr->vreg[25]);
237 	LOAD_VEC_REG(26, &pcb_vr->vreg[26]);
238 	LOAD_VEC_REG(27, &pcb_vr->vreg[27]);
239 	LOAD_VEC_REG(28, &pcb_vr->vreg[28]);
240 	LOAD_VEC_REG(29, &pcb_vr->vreg[29]);
241 	LOAD_VEC_REG(30, &pcb_vr->vreg[30]);
242 	LOAD_VEC_REG(31, &pcb_vr->vreg[31]);
243 
244 	/* fix kernel msr back */
245 	ppc_mtmsr(oldmsr);
246 }
247 #endif /* ALTIVEC */
248 
249 static __inline void
250 userret(struct proc *p)
251 {
252 	int sig;
253 
254 	/* take pending signals */
255 	while ((sig = CURSIG(p)) != 0)
256 		postsig(sig);
257 	curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
258 }
259 
260 void
261 trap(struct trapframe *frame)
262 {
263 	struct cpu_info *ci = curcpu();
264 	struct proc *p = curproc;
265 	int type = frame->exc;
266 	union sigval sv;
267 	char *name;
268 	db_expr_t offset;
269 
270 	if (frame->srr1 & PSL_PR) {
271 		type |= EXC_USER;
272 	}
273 
274 	switch (type) {
275 	case EXC_TRC|EXC_USER:
276 		{
277 			sv.sival_int = frame->srr0;
278 			KERNEL_PROC_LOCK(p);
279 			trapsignal(p, SIGTRAP, type, TRAP_TRACE, sv);
280 			KERNEL_PROC_UNLOCK(p);
281 		}
282 		break;
283 
284 	case EXC_MCHK:
285 		{
286 			faultbuf *fb;
287 
288 			if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
289 				p->p_addr->u_pcb.pcb_onfault = 0;
290 				frame->srr0 = fb->pc;		/* PC */
291 				frame->srr1 = fb->sr;		/* SR */
292 				frame->fixreg[1] = fb->sp;	/* SP */
293 				frame->fixreg[3] = 1;		/* != 0 */
294 				frame->cr = fb->cr;
295 				bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
296 				return;
297 			}
298 		}
299 		goto brain_damage;
300 
301 	case EXC_DSI:
302 		{
303 			struct vm_map *map;
304 			vaddr_t va;
305 			int ftype;
306 			faultbuf *fb;
307 
308 			map = kernel_map;
309 			va = frame->dar;
310 			if ((va >> ADDR_SR_SHIFT) == PPC_USER_SR) {
311 				sr_t user_sr;
312 
313 				asm ("mfsr %0, %1"
314 				     : "=r"(user_sr) : "K"(PPC_USER_SR));
315 				va &= ADDR_PIDX | ADDR_POFF;
316 				va |= user_sr << ADDR_SR_SHIFT;
317 				map = &p->p_vmspace->vm_map;
318 				if (pte_spill_v(map->pmap, va, frame->dsisr, 0))
319 					return;
320 			}
321 			if (frame->dsisr & DSISR_STORE)
322 				ftype = VM_PROT_READ | VM_PROT_WRITE;
323 			else
324 				ftype = VM_PROT_READ;
325 			KERNEL_LOCK();
326 			if (uvm_fault(map, trunc_page(va), 0, ftype) == 0) {
327 				KERNEL_UNLOCK();
328 				return;
329 			}
330 			KERNEL_UNLOCK();
331 
332 			if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
333 				p->p_addr->u_pcb.pcb_onfault = 0;
334 				frame->srr0 = fb->pc;		/* PC */
335 				frame->fixreg[1] = fb->sp;	/* SP */
336 				frame->fixreg[3] = 1;		/* != 0 */
337 				frame->cr = fb->cr;
338 				bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
339 				return;
340 			}
341 			map = kernel_map;
342 		}
343 printf("kern dsi on addr %x iar %x\n", frame->dar, frame->srr0);
344 		goto brain_damage;
345 	case EXC_DSI|EXC_USER:
346 		{
347 			int ftype, vftype;
348 
349 			/* Try spill handler first */
350 			if (pte_spill_v(p->p_vmspace->vm_map.pmap,
351 			    frame->dar, frame->dsisr, 0))
352 				break;
353 
354 			KERNEL_PROC_LOCK(p);
355 			if (frame->dsisr & DSISR_STORE) {
356 				ftype = VM_PROT_READ | VM_PROT_WRITE;
357 				vftype = VM_PROT_WRITE;
358 			} else
359 				vftype = ftype = VM_PROT_READ;
360 			if (uvm_fault(&p->p_vmspace->vm_map,
361 				     trunc_page(frame->dar), 0, ftype) == 0) {
362 				uvm_grow(p, trunc_page(frame->dar));
363 				KERNEL_PROC_UNLOCK(p);
364 				break;
365 			}
366 
367 #if 0
368 printf("dsi on addr %x iar %x lr %x\n", frame->dar, frame->srr0,frame->lr);
369 #endif
370 /*
371  * keep this for later in case we want it later.
372 */
373 			sv.sival_int = frame->dar;
374 			trapsignal(p, SIGSEGV, vftype, SEGV_MAPERR, sv);
375 			KERNEL_PROC_UNLOCK(p);
376 		}
377 		break;
378 	case EXC_ISI|EXC_USER:
379 		{
380 			int ftype;
381 
382 			/* Try spill handler */
383 			if (pte_spill_v(p->p_vmspace->vm_map.pmap,
384 			    frame->srr0, 0, 1))
385 				break;
386 
387 			KERNEL_PROC_LOCK(p);
388 			ftype = VM_PROT_READ | VM_PROT_EXECUTE;
389 			if (uvm_fault(&p->p_vmspace->vm_map,
390 			    trunc_page(frame->srr0), 0, ftype) == 0) {
391 				uvm_grow(p, trunc_page(frame->srr0));
392 				KERNEL_PROC_UNLOCK(p);
393 				break;
394 			}
395 			KERNEL_PROC_UNLOCK(p);
396 		}
397 #if 0
398 printf("isi iar %x lr %x\n", frame->srr0, frame->lr);
399 #endif
400 		/* FALLTHROUGH */
401 	case EXC_MCHK|EXC_USER:
402 /* XXX Likely that returning from this trap is bogus... */
403 /* XXX Have to make sure that sigreturn does the right thing. */
404 		sv.sival_int = frame->srr0;
405 		KERNEL_PROC_LOCK(p);
406 		trapsignal(p, SIGSEGV, VM_PROT_EXECUTE, SEGV_MAPERR, sv);
407 		KERNEL_PROC_UNLOCK(p);
408 		break;
409 	case EXC_SC|EXC_USER:
410 		{
411 			struct sysent *callp;
412 			size_t argsize;
413 			register_t code, error;
414 			register_t *params, rval[2];
415 			int nsys, n, nolock;
416 			register_t args[10];
417 
418 			uvmexp.syscalls++;
419 
420 			nsys = p->p_emul->e_nsysent;
421 			callp = p->p_emul->e_sysent;
422 
423 			code = frame->fixreg[0];
424 			params = frame->fixreg + FIRSTARG;
425 
426 			switch (code) {
427 			case SYS_syscall:
428 				/*
429 				 * code is first argument,
430 				 * followed by actual args.
431 				 */
432 				code = *params++;
433 				break;
434 			case SYS___syscall:
435 				/*
436 				 * Like syscall, but code is a quad,
437 				 * so as to maintain quad alignment
438 				 * for the rest of the args.
439 				 */
440 				if (callp != sysent)
441 					break;
442 				params++;
443 				code = *params++;
444 				break;
445 			default:
446 				break;
447 			}
448 			if (code < 0 || code >= nsys)
449 				callp += p->p_emul->e_nosys;
450 			else
451 				callp += code;
452 			argsize = callp->sy_argsize;
453 			n = NARGREG - (params - (frame->fixreg + FIRSTARG));
454 			if (argsize > n * sizeof(register_t)) {
455 				bcopy(params, args, n * sizeof(register_t));
456 
457 				error = copyin(MOREARGS(frame->fixreg[1]),
458 				   args + n, argsize - n * sizeof(register_t));
459 				if (error) {
460 #ifdef	KTRACE
461 					/* Can't get all the arguments! */
462 					if (KTRPOINT(p, KTR_SYSCALL))
463 						ktrsyscall(p, code,
464 							   argsize, args);
465 #endif
466 					goto syscall_bad;
467 				}
468 				params = args;
469 			}
470 
471 #ifdef	KTRACE
472 			if (KTRPOINT(p, KTR_SYSCALL)) {
473 				KERNEL_PROC_LOCK(p);
474 				ktrsyscall(p, code, argsize, params);
475 				KERNEL_PROC_UNLOCK(p);
476 			}
477 #endif
478 			rval[0] = 0;
479 			rval[1] = frame->fixreg[FIRSTARG + 1];
480 
481 #ifdef SYSCALL_DEBUG
482 			KERNEL_PROC_LOCK(p);
483 			scdebug_call(p, code, params);
484 			KERNEL_PROC_UNLOCK(p);
485 #endif
486 
487 
488 #if NSYSTRACE > 0
489 			if (ISSET(p->p_flag, P_SYSTRACE)) {
490 				KERNEL_PROC_LOCK(p);
491 				error = systrace_redirect(code, p, params,
492 				    rval);
493 				KERNEL_PROC_UNLOCK(p);
494 			} else
495 #endif
496 			{
497 				nolock = (callp->sy_flags & SY_NOLOCK);
498 				if (!nolock)
499 					KERNEL_PROC_LOCK(p);
500 				error = (*callp->sy_call)(p, params, rval);
501 				if (!nolock)
502 					KERNEL_PROC_UNLOCK(p);
503 			}
504 			switch (error) {
505 			case 0:
506 				frame->fixreg[0] = error;
507 				frame->fixreg[FIRSTARG] = rval[0];
508 				frame->fixreg[FIRSTARG + 1] = rval[1];
509 				frame->cr &= ~0x10000000;
510 				break;
511 			case ERESTART:
512 				/*
513 				 * Set user's pc back to redo the system call.
514 				 */
515 				frame->srr0 -= 4;
516 				break;
517 			case EJUSTRETURN:
518 				/* nothing to do */
519 				break;
520 			default:
521 syscall_bad:
522 				if (p->p_emul->e_errno)
523 					error = p->p_emul->e_errno[error];
524 				frame->fixreg[0] = error;
525 				frame->fixreg[FIRSTARG] = error;
526 				frame->fixreg[FIRSTARG + 1] = rval[1];
527 				frame->cr |= 0x10000000;
528 				break;
529 			}
530 #ifdef SYSCALL_DEBUG
531 			KERNEL_PROC_LOCK(p);
532 			scdebug_ret(p, code, error, rval);
533 			KERNEL_PROC_UNLOCK(p);
534 #endif
535 #ifdef	KTRACE
536 			if (KTRPOINT(p, KTR_SYSRET)) {
537 				KERNEL_PROC_LOCK(p);
538 				ktrsysret(p, code, error, rval[0]);
539 				KERNEL_PROC_UNLOCK(p);
540 			}
541 #endif
542 		}
543 		break;
544 
545 	case EXC_FPU|EXC_USER:
546 		if (ci->ci_fpuproc)
547 			save_fpu();
548 		uvmexp.fpswtch++;
549 		enable_fpu(p);
550 		break;
551 
552 	case EXC_ALI|EXC_USER:
553 		/* alignment exception
554 		 * we check to see if this can be fixed up
555 		 * by the code that fixes the typical gcc misaligned code
556 		 * then kill the process if not.
557 		 */
558 		if (fix_unaligned(p, frame) == 0)
559 			frame->srr0 += 4;
560 		else {
561 			sv.sival_int = frame->srr0;
562 			KERNEL_PROC_LOCK(p);
563 			trapsignal(p, SIGSEGV, VM_PROT_EXECUTE, SEGV_MAPERR,
564 				sv);
565 			KERNEL_PROC_UNLOCK(p);
566 		}
567 		break;
568 
569 	default:
570 
571 brain_damage:
572 /*
573 mpc_print_pci_stat();
574 */
575 
576 #ifdef DDB
577 		/* set up registers */
578 		db_save_regs(frame);
579 		db_find_sym_and_offset(frame->srr0, &name, &offset);
580 #else
581 		name = NULL;
582 #endif
583 		if (!name) {
584 			name = "0";
585 			offset = frame->srr0;
586 		}
587 		panic ("trap type %x at %x (%s+0x%lx) lr %x",
588 			type, frame->srr0, name, offset, frame->lr);
589 
590 
591 	case EXC_PGM|EXC_USER:
592 	{
593 #if 0
594 		char *errstr[8];
595 		int errnum = 0;
596 
597 		if (frame->srr1 & (1<<(31-11))) {
598 			/* floating point enabled program exception */
599 			errstr[errnum] = "floating point";
600 			errnum++;
601 		}
602 		if (frame->srr1 & (1<<(31-12))) {
603 			/* illegal instruction program exception */
604 			errstr[errnum] = "illegal instruction";
605 			errnum++;
606 		}
607 		if (frame->srr1 & (1<<(31-13))) {
608 			/* privileged instruction exception */
609 			errstr[errnum] = "privileged instr";
610 			errnum++;
611 		}
612 #endif
613 		if (frame->srr1 & (1<<(31-14))) {
614 #if 0
615 			errstr[errnum] = "trap instr";
616 			errnum++;
617 #endif
618 			sv.sival_int = frame->srr0;
619 			KERNEL_PROC_LOCK(p);
620 			trapsignal(p, SIGTRAP, type, TRAP_BRKPT, sv);
621 			KERNEL_PROC_UNLOCK(p);
622 			break;
623 		}
624 #if 0
625 		if (frame->srr1 & (1<<(31-15))) {
626 			errstr[errnum] = "previous address";
627 			errnum++;
628 		}
629 #endif
630 #if 0
631 printf("pgm iar %x srr1 %x\n", frame->srr0, frame->srr1);
632 {
633 int i;
634 for (i = 0; i < errnum; i++) {
635 	printf("\t[%s]\n", errstr[i]);
636 }
637 }
638 #endif
639 		sv.sival_int = frame->srr0;
640 		KERNEL_PROC_LOCK(p);
641 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
642 		KERNEL_PROC_UNLOCK(p);
643 		break;
644 	}
645 	case EXC_PGM:
646 		/* should check for correct byte here or panic */
647 #ifdef DDB
648 		db_save_regs(frame);
649 		cnpollc(TRUE);
650 		db_trap(T_BREAKPOINT, 0);
651 		cnpollc(FALSE);
652 #else
653 		panic("trap EXC_PGM");
654 #endif
655 		break;
656 
657 	/* This is not really a perf exception, but is an ALTIVEC unavail
658 	 * if we do not handle it, kill the process with illegal instruction.
659 	 */
660 	case EXC_PERF|EXC_USER:
661 #ifdef ALTIVEC
662 	case EXC_VEC|EXC_USER:
663 		if (ci->ci_vecproc)
664 			save_vec(ci->ci_vecproc);
665 
666 		enable_vec(p);
667 		break;
668 #else  /* ALTIVEC */
669 		sv.sival_int = frame->srr0;
670 		KERNEL_PROC_LOCK(p);
671 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
672 		KERNEL_PROC_UNLOCK(p);
673 		break;
674 #endif
675 
676 	case EXC_VECAST|EXC_USER:
677 		KERNEL_PROC_LOCK(p);
678 		trapsignal(p, SIGFPE, 0, FPE_FLTRES, sv);
679 		KERNEL_PROC_UNLOCK(p);
680 		break;
681 
682 	case EXC_AST|EXC_USER:
683 		uvmexp.softs++;
684 		p->p_md.md_astpending = 0;	/* we are about to do it */
685 		if (p->p_flag & P_OWEUPC) {
686 			KERNEL_PROC_LOCK(p);
687 			ADDUPROF(p);
688 			KERNEL_PROC_UNLOCK(p);
689 		}
690 		if (ci->ci_want_resched)
691 			preempt(NULL);
692 		break;
693 	}
694 
695 	userret(p);
696 
697 	/*
698 	 * If someone stole the fpu while we were away, disable it
699 	 */
700 	if (p != ci->ci_fpuproc)
701 		frame->srr1 &= ~PSL_FP;
702 	else if (p->p_addr->u_pcb.pcb_flags & PCB_FPU)
703 		frame->srr1 |= PSL_FP;
704 
705 #ifdef ALTIVEC
706 	/*
707 	 * If someone stole the vector unit while we were away, disable it
708 	 */
709 	if (p == ci->ci_vecproc)
710 		frame->srr1 |= PSL_VEC;
711 	else
712 		frame->srr1 &= ~PSL_VEC;
713 #endif /* ALTIVEC */
714 }
715 
716 void
717 child_return(void *arg)
718 {
719 	struct proc *p = (struct proc *)arg;
720 	struct trapframe *tf = trapframe(p);
721 
722 	tf->fixreg[0] = 0;
723 	tf->fixreg[FIRSTARG] = 0;
724 	tf->fixreg[FIRSTARG + 1] = 1;
725 	tf->cr &= ~0x10000000;
726 	/* Disable FPU, VECT, as we can't be fpuproc */
727 	tf->srr1 &= ~(PSL_FP|PSL_VEC);
728 
729 	KERNEL_PROC_UNLOCK(p);
730 
731 	userret(p);
732 
733 #ifdef	KTRACE
734 	if (KTRPOINT(p, KTR_SYSRET)) {
735 		KERNEL_PROC_LOCK(p);
736 		ktrsysret(p,
737 		    (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
738 		KERNEL_PROC_UNLOCK(p);
739 	}
740 #endif
741 }
742 
743 int
744 badaddr(char *addr, u_int32_t len)
745 {
746 	faultbuf env;
747 	u_int32_t v;
748 	void *oldh = curpcb->pcb_onfault;
749 
750 	if (setfault(&env)) {
751 		curpcb->pcb_onfault = oldh;
752 		return EFAULT;
753 	}
754 	switch(len) {
755 	case 4:
756 		v = *((volatile u_int32_t *)addr);
757 		break;
758 	case 2:
759 		v = *((volatile u_int16_t *)addr);
760 		break;
761 	default:
762 		v = *((volatile u_int8_t *)addr);
763 		break;
764 	}
765 	/* Make sure all loads retire before turning off fault handling!! */
766 	__asm__ volatile ("sync");
767 	curpcb->pcb_onfault = oldh;
768 	return(0);
769 }
770 
771 
772 /*
773  * For now, this only deals with the particular unaligned access case
774  * that gcc tends to generate.  Eventually it should handle all of the
775  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
776  */
777 
778 static int
779 fix_unaligned(struct proc *p, struct trapframe *frame)
780 {
781 	int indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
782 	struct cpu_info *ci = curcpu();
783 
784 	switch (indicator) {
785 	case EXC_ALI_LFD:
786 	case EXC_ALI_STFD:
787 		{
788 			int reg = EXC_ALI_RST(frame->dsisr);
789 			double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
790 
791 			/* Juggle the FPU to ensure that we've initialized
792 			 * the FPRs, and that their current state is in
793 			 * the PCB.
794 			 */
795 			if (ci->ci_fpuproc != p) {
796 				if (ci->ci_fpuproc)
797 					save_fpu();
798 				enable_fpu(p);
799 			}
800 			save_fpu();
801 
802 			if (indicator == EXC_ALI_LFD) {
803 				if (copyin((void *)frame->dar, fpr,
804 				    sizeof(double)) != 0)
805 					return -1;
806 			} else {
807 				if (copyout(fpr, (void *)frame->dar,
808 				    sizeof(double)) != 0)
809 					return -1;
810 			}
811 			enable_fpu(p);
812 			return 0;
813 		}
814 		break;
815 	}
816 	return -1;
817 }
818