xref: /openbsd/sys/arch/powerpc/powerpc/trap.c (revision 78b63d65)
1 /*	$OpenBSD: trap.c,v 1.41 2001/11/29 04:47:41 drahn Exp $	*/
2 /*	$NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/proc.h>
36 #include <sys/signalvar.h>
37 #include <sys/reboot.h>
38 #include <sys/syscall.h>
39 #include <sys/systm.h>
40 #include <sys/user.h>
41 #include <sys/ktrace.h>
42 #include <sys/pool.h>
43 
44 #include <machine/cpu.h>
45 #include <machine/fpu.h>
46 #include <machine/frame.h>
47 #include <machine/pcb.h>
48 #include <machine/pmap.h>
49 #include <machine/psl.h>
50 #include <machine/trap.h>
51 #include <machine/db_machdep.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #include <ddb/db_extern.h>
56 #include <ddb/db_sym.h>
57 
58 static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
59 int badaddr __P((char *addr, u_int32_t len));
60 void trap __P((struct trapframe *frame));
61 
62 /* These definitions should probably be somewhere else				XXX */
63 #define	FIRSTARG	3		/* first argument is in reg 3 */
64 #define	NARGREG		8		/* 8 args are in registers */
65 #define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
66 
67 volatile int want_resched;
68 struct proc *ppc_vecproc;
69 
70 #ifdef DDB
71 void ppc_dumpbt __P((struct trapframe *frame));
72 
73 void
74 ppc_dumpbt(struct trapframe *frame)
75 {
76 	u_int32_t addr;
77 	/* dumpframe is defined in db_trace.c */
78 	addr=frame->fixreg[1];
79 	while (addr != 0) {
80 		addr = db_dumpframe(addr);
81 	}
82 	return;
83 }
84 #endif
85 
86 #ifdef PPC_VECTOR_SUPPORTED
87 /*
88  * Save state of the vector processor, This is done lazily in the hope
89  * that few processes in the system will be using the vector unit
90  * and that the exception time taken to switch them will be less than
91  * the necessary time to save the vector on every context switch.
92  *
93  * Also note that in this version, the VRSAVE register is saved with
94  * the state of the current process holding the vector processor,
95  * and the contents of that register are not used to optimize the save.
96  *
97  * This can lead to VRSAVE corruption, data passing between processes,
98  * because this register is accessable without the MSR[VEC] bit set.
99  * To store/restore this cleanly a processor identifier bit would need
100  * to be saved and this register saved on every context switch.
101  * Since we do not use the information, we may be able to get by
102  * with not saving it rigorously.
103  */
104 void
105 save_vec(struct proc *p)
106 {
107 	struct pcb *pcb = &p->p_addr->u_pcb;
108 	u_int32_t oldmsr, msr;
109 	u_int32_t tmp;
110 	/* first we enable vector so that we dont throw an exception
111 	 * in kernel mode
112 	 */
113 	__asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
114 	msr = oldmsr | PSL_VEC;
115 	__asm__ volatile ("mtmsr %0" :: "r" (msr));
116 	__asm__ volatile ("sync;isync");
117 
118 	__asm__ volatile ("mfvscr %0" : "=r" (tmp));
119 	pcb->pcb_vr->vrsave = tmp;
120 	__asm__ volatile ("mfspr %0, 256" : "=r" (tmp));
121 	pcb->pcb_vr->vscr = tmp;
122 
123 	__asm__ volatile ("stvxl  0, 0, %0" ::"r" (&pcb->pcb_vr->vreg[0]));
124 	__asm__ volatile ("stvxl  1, 0, %0" ::"r" (&pcb->pcb_vr->vreg[1]));
125 	__asm__ volatile ("stvxl  2, 0, %0" ::"r" (&pcb->pcb_vr->vreg[2]));
126 	__asm__ volatile ("stvxl  3, 0, %0" ::"r" (&pcb->pcb_vr->vreg[3]));
127 	__asm__ volatile ("stvxl  4, 0, %0" ::"r" (&pcb->pcb_vr->vreg[4]));
128 	__asm__ volatile ("stvxl  5, 0, %0" ::"r" (&pcb->pcb_vr->vreg[5]));
129 	__asm__ volatile ("stvxl  6, 0, %0" ::"r" (&pcb->pcb_vr->vreg[6]));
130 	__asm__ volatile ("stvxl  7, 0, %0" ::"r" (&pcb->pcb_vr->vreg[7]));
131 	__asm__ volatile ("stvxl  8, 0, %0" ::"r" (&pcb->pcb_vr->vreg[8]));
132 	__asm__ volatile ("stvxl  9, 0, %0" ::"r" (&pcb->pcb_vr->vreg[9]));
133 	__asm__ volatile ("stvxl  10, 0, %0" ::"r" (&pcb->pcb_vr->vreg[10]));
134 	__asm__ volatile ("stvxl  11, 0, %0" ::"r" (&pcb->pcb_vr->vreg[11]));
135 	__asm__ volatile ("stvxl  12, 0, %0" ::"r" (&pcb->pcb_vr->vreg[12]));
136 	__asm__ volatile ("stvxl  13, 0, %0" ::"r" (&pcb->pcb_vr->vreg[13]));
137 	__asm__ volatile ("stvxl  14, 0, %0" ::"r" (&pcb->pcb_vr->vreg[14]));
138 	__asm__ volatile ("stvxl  15, 0, %0" ::"r" (&pcb->pcb_vr->vreg[15]));
139 	__asm__ volatile ("stvxl  16, 0, %0" ::"r" (&pcb->pcb_vr->vreg[16]));
140 	__asm__ volatile ("stvxl  17, 0, %0" ::"r" (&pcb->pcb_vr->vreg[17]));
141 	__asm__ volatile ("stvxl  18, 0, %0" ::"r" (&pcb->pcb_vr->vreg[18]));
142 	__asm__ volatile ("stvxl  19, 0, %0" ::"r" (&pcb->pcb_vr->vreg[19]));
143 	__asm__ volatile ("stvxl  20, 0, %0" ::"r" (&pcb->pcb_vr->vreg[20]));
144 	__asm__ volatile ("stvxl  21, 0, %0" ::"r" (&pcb->pcb_vr->vreg[21]));
145 	__asm__ volatile ("stvxl  22, 0, %0" ::"r" (&pcb->pcb_vr->vreg[22]));
146 	__asm__ volatile ("stvxl  23, 0, %0" ::"r" (&pcb->pcb_vr->vreg[23]));
147 	__asm__ volatile ("stvxl  24, 0, %0" ::"r" (&pcb->pcb_vr->vreg[24]));
148 	__asm__ volatile ("stvxl  25, 0, %0" ::"r" (&pcb->pcb_vr->vreg[25]));
149 	__asm__ volatile ("stvxl  26, 0, %0" ::"r" (&pcb->pcb_vr->vreg[26]));
150 	__asm__ volatile ("stvxl  27, 0, %0" ::"r" (&pcb->pcb_vr->vreg[27]));
151 	__asm__ volatile ("stvxl  28, 0, %0" ::"r" (&pcb->pcb_vr->vreg[28]));
152 	__asm__ volatile ("stvxl  29, 0, %0" ::"r" (&pcb->pcb_vr->vreg[29]));
153 	__asm__ volatile ("stvxl  30, 0, %0" ::"r" (&pcb->pcb_vr->vreg[30]));
154 	__asm__ volatile ("stvxl  31, 0, %0" ::"r" (&pcb->pcb_vr->vreg[31]));
155 
156 	/* fix kernel msr back */
157 	__asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
158 }
159 
160 /*
161  * Copy the context of a given process into the vector registers.
162  */
163 void
164 enable_vec(struct proc *p)
165 {
166 	struct pcb *pcb = &p->p_addr->u_pcb;
167 	u_int32_t oldmsr, msr;
168 	u_int32_t tmp;
169 
170 	/* If this is the very first altivec instruction executed
171 	 * by this process, create a context.
172 	 */
173 	if (pcb->pcb_vr == NULL) {
174 		pcb->pcb_vr = pool_get(&ppc_vecpl, PR_WAITOK);
175 		bzero(pcb->pcb_vr, sizeof *(pcb->pcb_vr));
176 	}
177 
178 	/* first we enable vector so that we dont throw an exception
179 	 * in kernel mode
180 	 */
181 	__asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
182 	msr = oldmsr | PSL_VEC;
183 	__asm__ volatile ("mtmsr %0" :: "r" (msr));
184 	__asm__ volatile ("sync;isync");
185 
186 	tmp = pcb->pcb_vr->vrsave;
187 	__asm__ volatile ("mtvscr %0" :: "r" (tmp));
188 	tmp = pcb->pcb_vr->vscr;
189 	__asm__ volatile ("mtspr 256, %0" :: "r" (tmp));
190 
191 	__asm__ volatile ("lvxl  0, 0, %0" ::"r" (&pcb->pcb_vr->vreg[0]));
192 	__asm__ volatile ("lvxl  1, 0, %0" ::"r" (&pcb->pcb_vr->vreg[1]));
193 	__asm__ volatile ("lvxl  2, 0, %0" ::"r" (&pcb->pcb_vr->vreg[2]));
194 	__asm__ volatile ("lvxl  3, 0, %0" ::"r" (&pcb->pcb_vr->vreg[3]));
195 	__asm__ volatile ("lvxl  4, 0, %0" ::"r" (&pcb->pcb_vr->vreg[4]));
196 	__asm__ volatile ("lvxl  5, 0, %0" ::"r" (&pcb->pcb_vr->vreg[5]));
197 	__asm__ volatile ("lvxl  6, 0, %0" ::"r" (&pcb->pcb_vr->vreg[6]));
198 	__asm__ volatile ("lvxl  7, 0, %0" ::"r" (&pcb->pcb_vr->vreg[7]));
199 	__asm__ volatile ("lvxl  8, 0, %0" ::"r" (&pcb->pcb_vr->vreg[8]));
200 	__asm__ volatile ("lvxl  9, 0, %0" ::"r" (&pcb->pcb_vr->vreg[9]));
201 	__asm__ volatile ("lvxl  10, 0, %0" ::"r" (&pcb->pcb_vr->vreg[10]));
202 	__asm__ volatile ("lvxl  11, 0, %0" ::"r" (&pcb->pcb_vr->vreg[11]));
203 	__asm__ volatile ("lvxl  12, 0, %0" ::"r" (&pcb->pcb_vr->vreg[12]));
204 	__asm__ volatile ("lvxl  13, 0, %0" ::"r" (&pcb->pcb_vr->vreg[13]));
205 	__asm__ volatile ("lvxl  14, 0, %0" ::"r" (&pcb->pcb_vr->vreg[14]));
206 	__asm__ volatile ("lvxl  15, 0, %0" ::"r" (&pcb->pcb_vr->vreg[15]));
207 	__asm__ volatile ("lvxl  16, 0, %0" ::"r" (&pcb->pcb_vr->vreg[16]));
208 	__asm__ volatile ("lvxl  17, 0, %0" ::"r" (&pcb->pcb_vr->vreg[17]));
209 	__asm__ volatile ("lvxl  18, 0, %0" ::"r" (&pcb->pcb_vr->vreg[18]));
210 	__asm__ volatile ("lvxl  19, 0, %0" ::"r" (&pcb->pcb_vr->vreg[19]));
211 	__asm__ volatile ("lvxl  20, 0, %0" ::"r" (&pcb->pcb_vr->vreg[20]));
212 	__asm__ volatile ("lvxl  21, 0, %0" ::"r" (&pcb->pcb_vr->vreg[21]));
213 	__asm__ volatile ("lvxl  22, 0, %0" ::"r" (&pcb->pcb_vr->vreg[22]));
214 	__asm__ volatile ("lvxl  23, 0, %0" ::"r" (&pcb->pcb_vr->vreg[23]));
215 	__asm__ volatile ("lvxl  24, 0, %0" ::"r" (&pcb->pcb_vr->vreg[24]));
216 	__asm__ volatile ("lvxl  25, 0, %0" ::"r" (&pcb->pcb_vr->vreg[25]));
217 	__asm__ volatile ("lvxl  26, 0, %0" ::"r" (&pcb->pcb_vr->vreg[26]));
218 	__asm__ volatile ("lvxl  27, 0, %0" ::"r" (&pcb->pcb_vr->vreg[27]));
219 	__asm__ volatile ("lvxl  28, 0, %0" ::"r" (&pcb->pcb_vr->vreg[28]));
220 	__asm__ volatile ("lvxl  29, 0, %0" ::"r" (&pcb->pcb_vr->vreg[29]));
221 	__asm__ volatile ("lvxl  30, 0, %0" ::"r" (&pcb->pcb_vr->vreg[30]));
222 	__asm__ volatile ("lvxl  31, 0, %0" ::"r" (&pcb->pcb_vr->vreg[31]));
223 
224 	/* fix kernel msr back */
225 	__asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
226 }
227 #endif /* PPC_VECTOR_SUPPORTED */
228 
229 
230 void
231 trap(frame)
232 	struct trapframe *frame;
233 {
234 	struct proc *p = curproc;
235 	int type = frame->exc;
236 	u_quad_t sticks;
237 	union sigval sv;
238 	char *name;
239 	db_expr_t offset;
240 
241 	if (frame->srr1 & PSL_PR) {
242 		type |= EXC_USER;
243 		sticks = p->p_sticks;
244 	}
245 
246 	switch (type) {
247 	case EXC_TRC|EXC_USER:
248 		{
249 			sv.sival_int = frame->srr0;
250 			trapsignal(p, SIGTRAP, type, TRAP_TRACE, sv);
251 		}
252 		break;
253 
254 	case EXC_MCHK:
255 		{
256 			faultbuf *fb;
257 
258 			if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
259 				p->p_addr->u_pcb.pcb_onfault = 0;
260 				frame->srr0 = fb->pc;		/* PC */
261 				frame->srr1 = fb->sr;		/* SR */
262 				frame->fixreg[1] = fb->sp;	/* SP */
263 				frame->fixreg[3] = 1;		/* != 0 */
264 				frame->cr = fb->cr;
265 				bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
266 				return;
267 			}
268 		}
269 		goto brain_damage;
270 
271 	case EXC_DSI:
272 		{
273 			struct vm_map *map;
274 			vm_offset_t va;
275 			int ftype;
276 			faultbuf *fb;
277 
278 			map = kernel_map;
279 			va = frame->dar;
280 			if ((va >> ADDR_SR_SHFT) == USER_SR) {
281 				sr_t user_sr;
282 
283 				asm ("mfsr %0, %1"
284 				     : "=r"(user_sr) : "K"(USER_SR));
285 				va &= ADDR_PIDX | ADDR_POFF;
286 				va |= user_sr << ADDR_SR_SHFT;
287 				map = &p->p_vmspace->vm_map;
288 			}
289 			if (frame->dsisr & DSISR_STORE)
290 				ftype = VM_PROT_READ | VM_PROT_WRITE;
291 			else
292 				ftype = VM_PROT_READ;
293 			if (uvm_fault(map, trunc_page(va), 0, ftype) == 0) {
294 				return;
295 			}
296 			if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
297 				p->p_addr->u_pcb.pcb_onfault = 0;
298 				frame->srr0 = fb->pc;		/* PC */
299 				frame->fixreg[1] = fb->sp;	/* SP */
300 				frame->fixreg[3] = 1;		/* != 0 */
301 				frame->cr = fb->cr;
302 				bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
303 				return;
304 			}
305 			map = kernel_map;
306 		}
307 printf("kern dsi on addr %x iar %x\n", frame->dar, frame->srr0);
308 		goto brain_damage;
309 	case EXC_DSI|EXC_USER:
310 		{
311 			int ftype, vftype;
312 
313 			if (frame->dsisr & DSISR_STORE) {
314 				ftype = VM_PROT_READ | VM_PROT_WRITE;
315 				vftype = VM_PROT_WRITE;
316 			} else
317 				vftype = ftype = VM_PROT_READ;
318 			if (uvm_fault(&p->p_vmspace->vm_map,
319 				     trunc_page(frame->dar), 0, ftype) == 0) {
320 				break;
321 			}
322 #if 0
323 printf("dsi on addr %x iar %x lr %x\n", frame->dar, frame->srr0,frame->lr);
324 #endif
325 /*
326  * keep this for later in case we want it later.
327 */
328 			sv.sival_int = frame->dar;
329 			trapsignal(p, SIGSEGV, vftype, SEGV_MAPERR, sv);
330 		}
331 		break;
332 	case EXC_ISI|EXC_USER:
333 		{
334 			int ftype;
335 
336 			ftype = VM_PROT_READ | VM_PROT_EXECUTE;
337 			if (uvm_fault(&p->p_vmspace->vm_map,
338 				     trunc_page(frame->srr0), 0, ftype) == 0) {
339 				break;
340 			}
341 		}
342 #if 0
343 printf("isi iar %x\n", frame->srr0);
344 #endif
345 	case EXC_MCHK|EXC_USER:
346 /* XXX Likely that returning from this trap is bogus... */
347 /* XXX Have to make sure that sigreturn does the right thing. */
348 		sv.sival_int = frame->srr0;
349 		trapsignal(p, SIGSEGV, VM_PROT_EXECUTE, SEGV_MAPERR, sv);
350 		break;
351 	case EXC_SC|EXC_USER:
352 		{
353 			struct sysent *callp;
354 			size_t argsize;
355 			register_t code, error;
356 			register_t *params, rval[2];
357 			int nsys, n;
358 			register_t args[10];
359 
360 			uvmexp.syscalls++;
361 
362 			nsys = p->p_emul->e_nsysent;
363 			callp = p->p_emul->e_sysent;
364 
365 			code = frame->fixreg[0];
366 			params = frame->fixreg + FIRSTARG;
367 
368 			switch (code) {
369 			case SYS_syscall:
370 				/*
371 				 * code is first argument,
372 				 * followed by actual args.
373 				 */
374 				code = *params++;
375 				break;
376 			case SYS___syscall:
377 				/*
378 				 * Like syscall, but code is a quad,
379 				 * so as to maintain quad alignment
380 				 * for the rest of the args.
381 				 */
382 				if (callp != sysent)
383 					break;
384 				params++;
385 				code = *params++;
386 				break;
387 			default:
388 				break;
389 			}
390 			if (code < 0 || code >= nsys)
391 				callp += p->p_emul->e_nosys;
392 			else
393 				callp += code;
394 			argsize = callp->sy_argsize;
395 			n = NARGREG - (params - (frame->fixreg + FIRSTARG));
396 			if (argsize > n * sizeof(register_t)) {
397 				bcopy(params, args, n * sizeof(register_t));
398 				if ((error = copyin(MOREARGS(frame->fixreg[1]),
399 				   args + n, argsize - n * sizeof(register_t)))) {
400 #ifdef	KTRACE
401 					/* Can't get all the arguments! */
402 					if (KTRPOINT(p, KTR_SYSCALL))
403 						ktrsyscall(p, code,
404 							   argsize, args);
405 #endif
406 					goto syscall_bad;
407 				}
408 				params = args;
409 			}
410 #ifdef	KTRACE
411 			if (KTRPOINT(p, KTR_SYSCALL))
412 				ktrsyscall(p, code, argsize, params);
413 #endif
414 			rval[0] = 0;
415 			rval[1] = frame->fixreg[FIRSTARG + 1];
416 
417 #ifdef SYSCALL_DEBUG
418 	scdebug_call(p, code, params);
419 #endif
420 
421 
422 			switch (error = (*callp->sy_call)(p, params, rval)) {
423 			case 0:
424 				frame->fixreg[0] = error;
425 				frame->fixreg[FIRSTARG] = rval[0];
426 				frame->fixreg[FIRSTARG + 1] = rval[1];
427 				frame->cr &= ~0x10000000;
428 				break;
429 			case ERESTART:
430 				/*
431 				 * Set user's pc back to redo the system call.
432 				 */
433 				frame->srr0 -= 4;
434 				break;
435 			case EJUSTRETURN:
436 				/* nothing to do */
437 				break;
438 			default:
439 syscall_bad:
440 				if (p->p_emul->e_errno)
441 					error = p->p_emul->e_errno[error];
442 				frame->fixreg[0] = error;
443 				frame->fixreg[FIRSTARG] = error;
444 				frame->fixreg[FIRSTARG + 1] = rval[1];
445 				frame->cr |= 0x10000000;
446 				break;
447 			}
448 #ifdef SYSCALL_DEBUG
449         scdebug_ret(p, code, error, rval);
450 #endif
451 #ifdef	KTRACE
452 			if (KTRPOINT(p, KTR_SYSRET))
453 				ktrsysret(p, code, error, rval[0]);
454 #endif
455 		}
456 		break;
457 
458 	case EXC_FPU|EXC_USER:
459 		if (fpuproc)
460 			save_fpu(fpuproc);
461 		fpuproc = p;
462 		enable_fpu(p);
463 		break;
464 
465 	case EXC_ALI|EXC_USER:
466 		/* alignment exception
467 		 * we check to see if this can be fixed up
468 		 * by the code that fixes the typical gcc misaligned code
469 		 * then kill the process if not.
470 		 */
471 		if (fix_unaligned(p, frame) == 0) {
472 			frame->srr0 += 4;
473 		} else {
474 			sv.sival_int = frame->srr0;
475 			trapsignal(p, SIGSEGV, VM_PROT_EXECUTE, SEGV_MAPERR,
476 				sv);
477 		}
478 		break;
479 
480 	default:
481 
482 brain_damage:
483 /*
484 mpc_print_pci_stat();
485 */
486 
487 #ifdef DDB
488 		/* set up registers */
489 		db_save_regs(frame);
490 #endif
491 		db_find_sym_and_offset(frame->srr0, &name, &offset);
492 		if (!name) {
493 			name = "0";
494 			offset = frame->srr0;
495 		}
496 		panic ("trap type %x at %x (%s+0x%x) lr %x\n",
497 			type, frame->srr0, name, offset, frame->lr);
498 
499 
500 	case EXC_PGM|EXC_USER:
501 	{
502 		char *errstr[8];
503 		int errnum = 0;
504 
505 		if (frame->srr1 & (1<<(31-11))) {
506 			/* floating point enabled program exception */
507 			errstr[errnum] = "floating point";
508 			errnum++;
509 		}
510 		if (frame->srr1 & (1<<(31-12))) {
511 			/* illegal instruction program exception */
512 			errstr[errnum] = "illegal instruction";
513 			errnum++;
514 		}
515 		if (frame->srr1 & (1<<(31-13))) {
516 			/* privileged instruction exception */
517 			errstr[errnum] = "priviledged instr";
518 			errnum++;
519 		}
520 		if (frame->srr1 & (1<<(31-14))) {
521 			errstr[errnum] = "trap instr";
522 			errnum++;
523 			/* trap instruction exception */
524 			/*
525 				instr = copyin (srr0)
526 				if (instr == BKPT_INST && uid == 0) {
527 					db_trap(T_BREAKPOINT?)
528 					break;
529 				}
530 			*/
531 		}
532 		if (frame->srr1 & (1<<(31-15))) {
533 			errstr[errnum] = "previous address";
534 			errnum++;
535 		}
536 #if 0
537 printf("pgm iar %x srr1 %x\n", frame->srr0, frame->srr1);
538 for (i = 0; i < errnum; i++) {
539 	printf("\t[%s]\n", errstr[i]);
540 }
541 #endif
542 		sv.sival_int = frame->srr0;
543 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
544 		break;
545 	}
546 	case EXC_PGM:
547 		/* should check for correct byte here or panic */
548 #ifdef DDB
549 		db_save_regs(frame);
550 		db_trap(T_BREAKPOINT, 0);
551 #else
552 		panic("trap EXC_PGM");
553 #endif
554 		break;
555 
556 	/* This is not really a perf exception, but is an ALTIVEC unavail
557 	 * if we do not handle it, kill the process with illegal instruction.
558 	 */
559 	case EXC_PERF|EXC_USER:
560 #ifdef PPC_VECTOR_SUPPORTED
561 	case EXC_VEC|EXC_USER:
562 		if (ppc_vecproc) {
563 			save_vec(ppc_vecproc);
564 		}
565 		ppc_vecproc = p;
566 		enable_vec(p);
567 		break;
568 #else /* PPC_VECTOR_SUPPORTED */
569 		sv.sival_int = frame->srr0;
570 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
571 		break;
572 #endif /* PPC_VECTOR_SUPPORTED */
573 
574 	case EXC_AST|EXC_USER:
575 		/* This is just here that we trap */
576 		break;
577 	}
578 
579 	astpending = 0;		/* we are about to do it */
580 
581 	uvmexp.softs++;
582 
583 	if (p->p_flag & P_OWEUPC) {
584 		p->p_flag &= ~P_OWEUPC;
585 		ADDUPROF(p);
586 	}
587 
588 	/* take pending signals */
589 	{
590 		int sig;
591 
592 		while ((sig = CURSIG(p)))
593 			postsig(sig);
594 	}
595 
596 	p->p_priority = p->p_usrpri;
597 	if (want_resched) {
598 		int sig;
599 
600 		/*
601 		 * We're being preempted.
602 		 */
603 		preempt(NULL);
604 		while ((sig = CURSIG(p)))
605 			postsig(sig);
606 	}
607 
608 	/*
609 	 * If profiling, charge recent system time to the trapped pc.
610 	 */
611 	if (p->p_flag & P_PROFIL) {
612 		extern int psratio;
613 
614 		addupc_task(p, frame->srr0,
615 			    (int)(p->p_sticks - sticks) * psratio);
616 	}
617 	/*
618 	 * If someone stole the fpu while we were away, disable it
619 	 */
620 	if (p != fpuproc) {
621 		frame->srr1 &= ~PSL_FP;
622 	} else {
623 		frame->srr1 |= PSL_FP;
624 	}
625 
626 #ifdef PPC_VECTOR_SUPPORTED
627 	/*
628 	 * If someone stole the vector unit while we were away, disable it
629 	 */
630 	if (p != ppc_vecproc) {
631 		frame->srr1 &= ~PSL_VEC;
632 	} else {
633 		frame->srr1 |= PSL_VEC;
634 	}
635 #endif /* PPC_VECTOR_SUPPORTED */
636 
637 	curpriority = p->p_priority;
638 }
639 
640 void
641 child_return(arg)
642 	void *arg;
643 {
644 	struct proc *p = (struct proc *)arg;
645 	struct trapframe *tf = trapframe(p);
646 
647 	tf->fixreg[0] = 0;
648 	tf->fixreg[FIRSTARG] = 0;
649 	tf->fixreg[FIRSTARG + 1] = 1;
650 	tf->cr &= ~0x10000000;
651 	/* Disable FPU, VECT, as we can't be fpuproc */
652 	tf->srr1 &= ~(PSL_FP|PSL_VEC);
653 #ifdef	KTRACE
654 	if (KTRPOINT(p, KTR_SYSRET))
655 		ktrsysret(p, SYS_fork, 0, 0);
656 #endif
657 	/* Profiling?							XXX */
658 	curpriority = p->p_priority;
659 }
660 
661 static inline void
662 setusr(int content)
663 {
664 	asm volatile ("isync; mtsr %0,%1; isync"
665 		      :: "n"(USER_SR), "r"(content));
666 }
667 
668 int
669 badaddr(addr, len)
670 	char *addr;
671 	u_int32_t len;
672 {
673 	faultbuf env;
674 	u_int32_t v;
675 	register void *oldh = curpcb->pcb_onfault;
676 
677 	if (setfault(env)) {
678 		curpcb->pcb_onfault = oldh;
679 		return EFAULT;
680 	}
681 	switch(len) {
682 	case 4:
683 		v = *((volatile u_int32_t *)addr);
684 		break;
685 	case 2:
686 		v = *((volatile u_int16_t *)addr);
687 		break;
688 	default:
689 		v = *((volatile u_int8_t *)addr);
690 		break;
691 	}
692 	curpcb->pcb_onfault = oldh;
693 	return(0);
694 }
695 
696 int
697 copyin(udaddr, kaddr, len)
698 	const void *udaddr;
699 	void *kaddr;
700 	size_t len;
701 {
702 	void *p;
703 	size_t l;
704 	faultbuf env;
705 	register void *oldh = curpcb->pcb_onfault;
706 
707 	if (setfault(env)) {
708 		curpcb->pcb_onfault = oldh;
709 		return EFAULT;
710 	}
711 	while (len > 0) {
712 		p = USER_ADDR + ((u_int)udaddr & ~SEGMENT_MASK);
713 		l = (USER_ADDR + SEGMENT_LENGTH) - p;
714 		if (l > len)
715 			l = len;
716 		setusr(curpcb->pcb_pm->pm_sr[(u_int)udaddr >> ADDR_SR_SHFT]);
717 		bcopy(p, kaddr, l);
718 		udaddr += l;
719 		kaddr += l;
720 		len -= l;
721 	}
722 	curpcb->pcb_onfault = oldh;
723 	return 0;
724 }
725 
726 int
727 copyout(kaddr, udaddr, len)
728 	const void *kaddr;
729 	void *udaddr;
730 	size_t len;
731 {
732 	void *p;
733 	size_t l;
734 	faultbuf env;
735 	register void *oldh = curpcb->pcb_onfault;
736 
737 	if (setfault(env)) {
738 		curpcb->pcb_onfault = oldh;
739 		return EFAULT;
740 	}
741 	while (len > 0) {
742 		p = USER_ADDR + ((u_int)udaddr & ~SEGMENT_MASK);
743 		l = (USER_ADDR + SEGMENT_LENGTH) - p;
744 		if (l > len)
745 			l = len;
746 		setusr(curpcb->pcb_pm->pm_sr[(u_int)udaddr >> ADDR_SR_SHFT]);
747 		bcopy(kaddr, p, l);
748 		udaddr += l;
749 		kaddr += l;
750 		len -= l;
751 	}
752 	curpcb->pcb_onfault = oldh;
753 	return 0;
754 }
755 
756 /*
757  * For now, this only deals with the particular unaligned access case
758  * that gcc tends to generate.  Eventually it should handle all of the
759  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
760  */
761 
762 static int
763 fix_unaligned(p, frame)
764 	struct proc *p;
765 	struct trapframe *frame;
766 {
767 	int indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
768 
769 	switch (indicator) {
770 	case EXC_ALI_LFD:
771 	case EXC_ALI_STFD:
772 		{
773 			int reg = EXC_ALI_RST(frame->dsisr);
774 			double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
775 
776 			/* Juggle the FPU to ensure that we've initialized
777 			 * the FPRs, and that their current state is in
778 			 * the PCB.
779 			 */
780 			if (fpuproc != p) {
781 				if (fpuproc)
782 					save_fpu(fpuproc);
783 				enable_fpu(p);
784 			}
785 			save_fpu(p);
786 
787 			if (indicator == EXC_ALI_LFD) {
788 				if (copyin((void *)frame->dar, fpr,
789 				    sizeof(double)) != 0)
790 					return -1;
791 				enable_fpu(p);
792 			} else {
793 				if (copyout(fpr, (void *)frame->dar,
794 				    sizeof(double)) != 0)
795 					return -1;
796 			}
797 			return 0;
798 		}
799 		break;
800 	}
801 
802 	return -1;
803 }
804