xref: /openbsd/sys/arch/powerpc/powerpc/trap.c (revision 949c1c4e)
1 /*	$OpenBSD: trap.c,v 1.135 2024/11/07 16:02:29 miod Exp $	*/
2 /*	$NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #include <sys/param.h>
35 #include <sys/proc.h>
36 #include <sys/signalvar.h>
37 #include <sys/reboot.h>
38 #include <sys/systm.h>
39 #include <sys/user.h>
40 #include <sys/pool.h>
41 #include <sys/syscall.h>
42 #include <sys/syscall_mi.h>
43 
44 #include <dev/cons.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/cpu.h>
49 #include <machine/fpu.h>
50 #include <machine/frame.h>
51 #include <machine/pcb.h>
52 #include <machine/psl.h>
53 #include <machine/trap.h>
54 #include <machine/db_machdep.h>
55 
56 #include <ddb/db_extern.h>
57 #include <ddb/db_sym.h>
58 #include <ddb/db_output.h>
59 
60 static int fix_unaligned(struct proc *p, struct trapframe *frame);
61 int badaddr(char *addr, u_int32_t len);
62 void trap(struct trapframe *frame);
63 
64 /* XXX This definition should probably be somewhere else */
65 #define	FIRSTARG	3		/* first syscall argument is in reg 3 */
66 
67 #ifdef ALTIVEC
68 static int altivec_assist(struct proc *p, vaddr_t);
69 
70 /*
71  * Save state of the vector processor, This is done lazily in the hope
72  * that few processes in the system will be using the vector unit
73  * and that the exception time taken to switch them will be less than
74  * the necessary time to save the vector on every context switch.
75  *
76  * Also note that in this version, the VRSAVE register is saved with
77  * the state of the current process holding the vector processor,
78  * and the contents of that register are not used to optimize the save.
79  *
80  * This can lead to VRSAVE corruption, data passing between processes,
81  * because this register is accessible without the MSR[VEC] bit set.
82  * To store/restore this cleanly a processor identifier bit would need
83  * to be saved and this register saved on every context switch.
84  * Since we do not use the information, we may be able to get by
85  * with not saving it rigorously.
86  */
87 void
save_vec(struct proc * p)88 save_vec(struct proc *p)
89 {
90 	struct pcb *pcb = &p->p_addr->u_pcb;
91 	struct vreg *pcb_vr = pcb->pcb_vr;
92 	u_int32_t oldmsr, msr;
93 
94 	/* first we enable vector so that we dont throw an exception
95 	 * in kernel mode
96 	 */
97 	oldmsr = ppc_mfmsr();
98 	msr = (oldmsr & ~PSL_EE) | PSL_VEC;
99 	ppc_mtmsr(msr);
100 	__asm__ volatile ("sync;isync");
101 
102 	pcb->pcb_vr->vrsave = ppc_mfvrsave();
103 
104 #define STR(x) #x
105 #define SAVE_VEC_REG(reg, addr)   \
106 	__asm__ volatile ("stvxl %0, 0, %1" :: "n"(reg),"r" (addr));
107 
108 	SAVE_VEC_REG(0,&pcb_vr->vreg[0]);
109 	SAVE_VEC_REG(1,&pcb_vr->vreg[1]);
110 	SAVE_VEC_REG(2,&pcb_vr->vreg[2]);
111 	SAVE_VEC_REG(3,&pcb_vr->vreg[3]);
112 	SAVE_VEC_REG(4,&pcb_vr->vreg[4]);
113 	SAVE_VEC_REG(5,&pcb_vr->vreg[5]);
114 	SAVE_VEC_REG(6,&pcb_vr->vreg[6]);
115 	SAVE_VEC_REG(7,&pcb_vr->vreg[7]);
116 	SAVE_VEC_REG(8,&pcb_vr->vreg[8]);
117 	SAVE_VEC_REG(9,&pcb_vr->vreg[9]);
118 	SAVE_VEC_REG(10,&pcb_vr->vreg[10]);
119 	SAVE_VEC_REG(11,&pcb_vr->vreg[11]);
120 	SAVE_VEC_REG(12,&pcb_vr->vreg[12]);
121 	SAVE_VEC_REG(13,&pcb_vr->vreg[13]);
122 	SAVE_VEC_REG(14,&pcb_vr->vreg[14]);
123 	SAVE_VEC_REG(15,&pcb_vr->vreg[15]);
124 	SAVE_VEC_REG(16,&pcb_vr->vreg[16]);
125 	SAVE_VEC_REG(17,&pcb_vr->vreg[17]);
126 	SAVE_VEC_REG(18,&pcb_vr->vreg[18]);
127 	SAVE_VEC_REG(19,&pcb_vr->vreg[19]);
128 	SAVE_VEC_REG(20,&pcb_vr->vreg[20]);
129 	SAVE_VEC_REG(21,&pcb_vr->vreg[21]);
130 	SAVE_VEC_REG(22,&pcb_vr->vreg[22]);
131 	SAVE_VEC_REG(23,&pcb_vr->vreg[23]);
132 	SAVE_VEC_REG(24,&pcb_vr->vreg[24]);
133 	SAVE_VEC_REG(25,&pcb_vr->vreg[25]);
134 	SAVE_VEC_REG(26,&pcb_vr->vreg[26]);
135 	SAVE_VEC_REG(27,&pcb_vr->vreg[27]);
136 	SAVE_VEC_REG(28,&pcb_vr->vreg[28]);
137 	SAVE_VEC_REG(29,&pcb_vr->vreg[29]);
138 	SAVE_VEC_REG(30,&pcb_vr->vreg[30]);
139 	SAVE_VEC_REG(31,&pcb_vr->vreg[31]);
140 	__asm__ volatile ("mfvscr 0");
141 	SAVE_VEC_REG(0,&pcb_vr->vscr);
142 
143 	curcpu()->ci_vecproc = NULL;
144 	pcb->pcb_veccpu = NULL;
145 
146 	/* fix kernel msr back */
147 	ppc_mtmsr(oldmsr);
148 }
149 
150 /*
151  * Copy the context of a given process into the vector registers.
152  */
153 void
enable_vec(struct proc * p)154 enable_vec(struct proc *p)
155 {
156 	struct pcb *pcb = &p->p_addr->u_pcb;
157 	struct vreg *pcb_vr;
158 	struct cpu_info *ci = curcpu();
159 	u_int32_t oldmsr, msr;
160 
161 	/* If this is the very first altivec instruction executed
162 	 * by this process, create a context.
163 	 */
164 	if (pcb->pcb_vr == NULL)
165 		pcb->pcb_vr = pool_get(&ppc_vecpl, PR_WAITOK | PR_ZERO);
166 	pcb_vr = pcb->pcb_vr;
167 
168 	if (curcpu()->ci_vecproc != NULL || pcb->pcb_veccpu != NULL)
169 		printf("attempting to restore vector in use vecproc %p"
170 		    " veccpu %p\n", curcpu()->ci_vecproc, pcb->pcb_veccpu);
171 
172 	/* first we enable vector so that we dont throw an exception
173 	 * in kernel mode
174 	 */
175 	oldmsr = ppc_mfmsr();
176 	msr = (oldmsr & ~PSL_EE) | PSL_VEC;
177 	ppc_mtmsr(msr);
178 	__asm__ volatile ("sync;isync");
179 	ci->ci_vecproc = p;
180 	pcb->pcb_veccpu = ci;
181 
182 #define LOAD_VEC_REG(reg, addr)   \
183 	__asm__ volatile ("lvxl %0, 0, %1" :: "n"(reg), "r" (addr));
184 
185 	LOAD_VEC_REG(0, &pcb_vr->vscr);
186 	__asm__ volatile ("mtvscr 0");
187 	ppc_mtvrsave(pcb_vr->vrsave);
188 
189 	LOAD_VEC_REG(0, &pcb_vr->vreg[0]);
190 	LOAD_VEC_REG(1, &pcb_vr->vreg[1]);
191 	LOAD_VEC_REG(2, &pcb_vr->vreg[2]);
192 	LOAD_VEC_REG(3, &pcb_vr->vreg[3]);
193 	LOAD_VEC_REG(4, &pcb_vr->vreg[4]);
194 	LOAD_VEC_REG(5, &pcb_vr->vreg[5]);
195 	LOAD_VEC_REG(6, &pcb_vr->vreg[6]);
196 	LOAD_VEC_REG(7, &pcb_vr->vreg[7]);
197 	LOAD_VEC_REG(8, &pcb_vr->vreg[8]);
198 	LOAD_VEC_REG(9, &pcb_vr->vreg[9]);
199 	LOAD_VEC_REG(10, &pcb_vr->vreg[10]);
200 	LOAD_VEC_REG(11, &pcb_vr->vreg[11]);
201 	LOAD_VEC_REG(12, &pcb_vr->vreg[12]);
202 	LOAD_VEC_REG(13, &pcb_vr->vreg[13]);
203 	LOAD_VEC_REG(14, &pcb_vr->vreg[14]);
204 	LOAD_VEC_REG(15, &pcb_vr->vreg[15]);
205 	LOAD_VEC_REG(16, &pcb_vr->vreg[16]);
206 	LOAD_VEC_REG(17, &pcb_vr->vreg[17]);
207 	LOAD_VEC_REG(18, &pcb_vr->vreg[18]);
208 	LOAD_VEC_REG(19, &pcb_vr->vreg[19]);
209 	LOAD_VEC_REG(20, &pcb_vr->vreg[20]);
210 	LOAD_VEC_REG(21, &pcb_vr->vreg[21]);
211 	LOAD_VEC_REG(22, &pcb_vr->vreg[22]);
212 	LOAD_VEC_REG(23, &pcb_vr->vreg[23]);
213 	LOAD_VEC_REG(24, &pcb_vr->vreg[24]);
214 	LOAD_VEC_REG(25, &pcb_vr->vreg[25]);
215 	LOAD_VEC_REG(26, &pcb_vr->vreg[26]);
216 	LOAD_VEC_REG(27, &pcb_vr->vreg[27]);
217 	LOAD_VEC_REG(28, &pcb_vr->vreg[28]);
218 	LOAD_VEC_REG(29, &pcb_vr->vreg[29]);
219 	LOAD_VEC_REG(30, &pcb_vr->vreg[30]);
220 	LOAD_VEC_REG(31, &pcb_vr->vreg[31]);
221 
222 	/* fix kernel msr back */
223 	ppc_mtmsr(oldmsr);
224 }
225 #endif /* ALTIVEC */
226 
227 void
trap(struct trapframe * frame)228 trap(struct trapframe *frame)
229 {
230 	struct cpu_info *ci = curcpu();
231 	struct proc *p = curproc;
232 	int type = frame->exc;
233 	union sigval sv;
234 	const char *name;
235 	db_expr_t offset;
236 	faultbuf *fb;
237 	struct vm_map *map;
238 	vaddr_t va;
239 	int access_type;
240 	const struct sysent *callp = sysent;
241 	register_t code, error;
242 	register_t *params, rval[2];
243 
244 	if (frame->srr1 & PSL_PR) {
245 		type |= EXC_USER;
246 		refreshcreds(p);
247 	}
248 
249 	switch (type) {
250 	case EXC_TRC|EXC_USER:
251 		sv.sival_int = frame->srr0;
252 		trapsignal(p, SIGTRAP, type, TRAP_TRACE, sv);
253 		break;
254 	case EXC_MCHK:
255 		if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
256 			p->p_addr->u_pcb.pcb_onfault = 0;
257 			frame->srr0 = fb->pc;		/* PC */
258 			frame->fixreg[1] = fb->sp;	/* SP */
259 			frame->fixreg[3] = 1;		/* != 0 */
260 			frame->cr = fb->cr;
261 			bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
262 			return;
263 		}
264 		goto brain_damage;
265 
266 	case EXC_DSI:
267 		map = kernel_map;
268 		va = frame->dar;
269 		if ((va >> ADDR_SR_SHIFT) == PPC_USER_SR) {
270 			sr_t user_sr;
271 
272 			asm ("mfsr %0, %1" : "=r"(user_sr) : "K"(PPC_USER_SR));
273 			va &= ADDR_PIDX | ADDR_POFF;
274 			va |= user_sr << ADDR_SR_SHIFT;
275 			map = &p->p_vmspace->vm_map;
276 			if (pte_spill_v(map->pmap, va, frame->dsisr, 0))
277 				return;
278 		}
279 		if (frame->dsisr & DSISR_STORE)
280 			access_type = PROT_WRITE;
281 		else
282 			access_type = PROT_READ;
283 
284 		error = uvm_fault(map, trunc_page(va), 0, access_type);
285 		if (error == 0)
286 			return;
287 
288 		if ((fb = p->p_addr->u_pcb.pcb_onfault)) {
289 			p->p_addr->u_pcb.pcb_onfault = 0;
290 			frame->srr0 = fb->pc;		/* PC */
291 			frame->fixreg[1] = fb->sp;	/* SP */
292 			frame->fixreg[3] = 1;		/* != 0 */
293 			frame->cr = fb->cr;
294 			bcopy(&fb->regs[0], &frame->fixreg[13], 19*4);
295 			return;
296 		}
297 		map = kernel_map;
298 		goto brain_damage;
299 
300 	case EXC_DSI|EXC_USER:
301 		/* Try spill handler first */
302 		if (pte_spill_v(p->p_vmspace->vm_map.pmap,
303 		    frame->dar, frame->dsisr, 0))
304 			break;
305 
306 		if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
307 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
308 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
309 			goto out;
310 
311 		if (frame->dsisr & DSISR_STORE)
312 			access_type = PROT_WRITE;
313 		else
314 			access_type = PROT_READ;
315 
316 		error = uvm_fault(&p->p_vmspace->vm_map,
317 		    trunc_page(frame->dar), 0, access_type);
318 		if (error == 0) {
319 			uvm_grow(p, frame->dar);
320 			break;
321 		}
322 
323 		/*
324 		 * keep this for later in case we want it later.
325 		 */
326 		sv.sival_int = frame->dar;
327 		trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
328 		break;
329 
330 	case EXC_ISI|EXC_USER:
331 		/* Try spill handler */
332 		if (pte_spill_v(p->p_vmspace->vm_map.pmap,
333 		    frame->srr0, 0, 1))
334 			break;
335 
336 		access_type = PROT_EXEC;
337 
338 		error = uvm_fault(&p->p_vmspace->vm_map,
339 		    trunc_page(frame->srr0), 0, access_type);
340 
341 		if (error == 0) {
342 			uvm_grow(p, frame->srr0);
343 			break;
344 		}
345 		sv.sival_int = frame->srr0;
346 		trapsignal(p, SIGSEGV, PROT_EXEC, SEGV_MAPERR, sv);
347 		break;
348 
349 	case EXC_MCHK|EXC_USER:
350 /* XXX Likely that returning from this trap is bogus... */
351 /* XXX Have to make sure that sigreturn does the right thing. */
352 		sv.sival_int = frame->srr0;
353 		trapsignal(p, SIGSEGV, PROT_EXEC, SEGV_MAPERR, sv);
354 		break;
355 
356 	case EXC_SC|EXC_USER:
357 		uvmexp.syscalls++;
358 
359 		code = frame->fixreg[0];
360 	        if (code <= 0 || code >= SYS_MAXSYSCALL) {
361 			error = ENOSYS;
362 			goto bad;
363 		}
364 
365 	        callp += code;
366 
367 		params = frame->fixreg + FIRSTARG;
368 
369 		rval[0] = 0;
370 		rval[1] = frame->fixreg[FIRSTARG + 1];
371 
372 		error = mi_syscall(p, code, callp, params, rval);
373 
374 		switch (error) {
375 		case 0:
376 			frame->fixreg[0] = error;
377 			frame->fixreg[FIRSTARG] = rval[0];
378 			frame->fixreg[FIRSTARG + 1] = rval[1];
379 			frame->cr &= ~0x10000000;
380 			break;
381 		case ERESTART:
382 			/*
383 			 * Set user's pc back to redo the system call.
384 			 */
385 			frame->srr0 -= 4;
386 			break;
387 		case EJUSTRETURN:
388 			/* nothing to do */
389 			break;
390 		default:
391 			frame->fixreg[FIRSTARG + 1] = rval[1];
392 		bad:
393 			frame->fixreg[0] = error;
394 			frame->fixreg[FIRSTARG] = error;
395 			frame->cr |= 0x10000000;
396 			break;
397 		}
398 
399 		mi_syscall_return(p, code, error, rval);
400 		goto finish;
401 
402 	case EXC_FPU|EXC_USER:
403 		if (ci->ci_fpuproc)
404 			save_fpu();
405 		uvmexp.fpswtch++;
406 		enable_fpu(p);
407 		break;
408 
409 	case EXC_ALI|EXC_USER:
410 		/* alignment exception
411 		 * we check to see if this can be fixed up
412 		 * by the code that fixes the typical gcc misaligned code
413 		 * then kill the process if not.
414 		 */
415 		if (fix_unaligned(p, frame) == 0)
416 			frame->srr0 += 4;
417 		else {
418 			sv.sival_int = frame->srr0;
419 			trapsignal(p, SIGBUS, PROT_EXEC, BUS_ADRALN, sv);
420 		}
421 		break;
422 
423 	default:
424 
425 brain_damage:
426 #ifdef DDB
427 		/* set up registers */
428 		db_save_regs(frame);
429 		db_find_sym_and_offset(frame->srr0, &name, &offset);
430 		if (!name) {
431 			name = "0";
432 			offset = frame->srr0;
433 		}
434 #else
435 		name = "0";
436 		offset = frame->srr0;
437 #endif
438 		panic("trap type %x srr1 %x at %x (%s+0x%lx) lr %lx",
439 		    type, frame->srr1, frame->srr0, name, offset, frame->lr);
440 
441 	case EXC_PGM|EXC_USER:
442 		if (frame->srr1 & (1<<(31-14))) {
443 			sv.sival_int = frame->srr0;
444 			trapsignal(p, SIGTRAP, type, TRAP_BRKPT, sv);
445 			break;
446 		}
447 		sv.sival_int = frame->srr0;
448 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
449 		break;
450 
451 	case EXC_PGM:
452 		/* should check for correct byte here or panic */
453 #ifdef DDB
454 		db_save_regs(frame);
455 		db_active++;
456 		cnpollc(TRUE);
457 		db_trap(T_BREAKPOINT, 0);
458 		cnpollc(FALSE);
459 		db_active--;
460 #else
461 		panic("trap EXC_PGM");
462 #endif
463 		break;
464 
465 	/* This is not really a perf exception, but is an ALTIVEC unavail
466 	 * if we do not handle it, kill the process with illegal instruction.
467 	 */
468 	case EXC_PERF|EXC_USER:
469 #ifdef ALTIVEC
470 	case EXC_VEC|EXC_USER:
471 		if (ci->ci_vecproc)
472 			save_vec(ci->ci_vecproc);
473 
474 		enable_vec(p);
475 		break;
476 #else  /* ALTIVEC */
477 		sv.sival_int = frame->srr0;
478 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
479 		break;
480 #endif
481 
482 	case EXC_VECAST_G4|EXC_USER:
483 	case EXC_VECAST_G5|EXC_USER:
484 #ifdef ALTIVEC
485 		if (altivec_assist(p, (vaddr_t)frame->srr0) == 0) {
486 			frame->srr0 += 4;
487 			break;
488 		}
489 #endif
490 		sv.sival_int = frame->srr0;
491 		trapsignal(p, SIGFPE, 0, FPE_FLTRES, sv);
492 		break;
493 
494 	case EXC_AST|EXC_USER:
495 		p->p_md.md_astpending = 0;	/* we are about to do it */
496 		uvmexp.softs++;
497 		mi_ast(p, curcpu()->ci_want_resched);
498 		break;
499 	}
500 
501 out:
502 	userret(p);
503 
504 finish:
505 	/*
506 	 * If someone stole the fpu while we were away, disable it
507 	 */
508 	if (p != ci->ci_fpuproc)
509 		frame->srr1 &= ~PSL_FP;
510 	else if (p->p_addr->u_pcb.pcb_flags & PCB_FPU)
511 		frame->srr1 |= PSL_FP;
512 
513 #ifdef ALTIVEC
514 	/*
515 	 * If someone stole the vector unit while we were away, disable it
516 	 */
517 	if (p == ci->ci_vecproc)
518 		frame->srr1 |= PSL_VEC;
519 	else
520 		frame->srr1 &= ~PSL_VEC;
521 #endif /* ALTIVEC */
522 }
523 
524 void
child_return(void * arg)525 child_return(void *arg)
526 {
527 	struct proc *p = (struct proc *)arg;
528 	struct trapframe *tf = trapframe(p);
529 
530 	tf->fixreg[0] = 0;
531 	tf->fixreg[FIRSTARG] = 0;
532 	tf->cr &= ~0x10000000;
533 	/* Disable FPU, VECT, as we can't be fpuproc */
534 	tf->srr1 &= ~(PSL_FP|PSL_VEC);
535 
536 	KERNEL_UNLOCK();
537 
538 	mi_child_return(p);
539 }
540 
541 int
badaddr(char * addr,u_int32_t len)542 badaddr(char *addr, u_int32_t len)
543 {
544 	faultbuf env;
545 	u_int32_t v;
546 	void *oldh = curpcb->pcb_onfault;
547 
548 	if (setfault(&env)) {
549 		curpcb->pcb_onfault = oldh;
550 		return EFAULT;
551 	}
552 	switch(len) {
553 	case 4:
554 		v = *((volatile u_int32_t *)addr);
555 		break;
556 	case 2:
557 		v = *((volatile u_int16_t *)addr);
558 		break;
559 	default:
560 		v = *((volatile u_int8_t *)addr);
561 		break;
562 	}
563 	/* Make sure all loads retire before turning off fault handling!! */
564 	__asm__ volatile ("sync");
565 	curpcb->pcb_onfault = oldh;
566 	return(0);
567 }
568 
569 
570 /*
571  * For now, this only deals with the particular unaligned access case
572  * that gcc tends to generate.  Eventually it should handle all of the
573  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
574  */
575 
576 static int
fix_unaligned(struct proc * p,struct trapframe * frame)577 fix_unaligned(struct proc *p, struct trapframe *frame)
578 {
579 	int indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
580 	struct cpu_info *ci = curcpu();
581 	int reg;
582 	double *fpr;
583 
584 	switch (indicator) {
585 	case EXC_ALI_LFD:
586 	case EXC_ALI_STFD:
587 		reg = EXC_ALI_RST(frame->dsisr);
588 		fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
589 
590 		/* Juggle the FPU to ensure that we've initialized
591 		 * the FPRs, and that their current state is in
592 		 * the PCB.
593 		 */
594 		if (ci->ci_fpuproc != p) {
595 			if (ci->ci_fpuproc)
596 				save_fpu();
597 			enable_fpu(p);
598 		}
599 		save_fpu();
600 
601 		if (indicator == EXC_ALI_LFD) {
602 			if (copyin((void *)frame->dar, fpr,
603 			    sizeof(double)) != 0)
604 				return -1;
605 		} else {
606 			if (copyout(fpr, (void *)frame->dar,
607 			    sizeof(double)) != 0)
608 				return -1;
609 		}
610 		enable_fpu(p);
611 		return 0;
612 	}
613 	return -1;
614 }
615 
616 #ifdef ALTIVEC
617 static inline int
copyinsn(struct proc * p,vaddr_t uva,int * insn)618 copyinsn(struct proc *p, vaddr_t uva, int *insn)
619 {
620 	struct vm_map *map = &p->p_vmspace->vm_map;
621 	int error = 0;
622 
623 	if (__predict_false((uva & 3) != 0))
624 		return EFAULT;
625 
626 	do {
627 		if (pmap_copyinsn(map->pmap, uva, (uint32_t *)insn) == 0)
628 			break;
629 		error = uvm_fault(map, trunc_page(uva), 0, PROT_EXEC);
630 	} while (error == 0);
631 
632 	return error;
633 }
634 
635 static int
altivec_assist(struct proc * p,vaddr_t user_pc)636 altivec_assist(struct proc *p, vaddr_t user_pc)
637 {
638 	/* These labels are in vecast.S */
639 	void vecast_asm(uint32_t, void *);
640 	void vecast_vaddfp(void);
641 	void vecast_vsubfp(void);
642 	void vecast_vmaddfp(void);
643 	void vecast_vnmsubfp(void);
644 	void vecast_vrefp(void);
645 	void vecast_vrsqrtefp(void);
646 	void vecast_vlogefp(void);
647 	void vecast_vexptefp(void);
648 	void vecast_vctuxs(void);
649 	void vecast_vctsxs(void);
650 
651 	uint32_t insn, op, va, vc, lo;
652 	int error;
653 	void (*lab)(void);
654 
655 	error = copyinsn(p, user_pc, &insn);
656 	if (error)
657 		return error;
658 	op = (insn & 0xfc000000) >> 26;	/* primary opcode */
659 	va = (insn & 0x001f0000) >> 16;	/* vector A */
660 	vc = (insn & 0x000007c0) >>  6;	/* vector C or extended opcode */
661 	lo =  insn & 0x0000003f;	/* extended opcode */
662 
663 	/* Stop if this isn't an altivec instruction. */
664 	if (op != 4)
665 		return EINVAL;
666 
667 	/* Decide which instruction to emulate. */
668 	lab = NULL;
669 	switch (lo) {
670 	case 10:
671 		switch (vc) {
672 		case 0:
673 			lab = vecast_vaddfp;
674 			break;
675 		case 1:
676 			lab = vecast_vsubfp;
677 			break;
678 		case 4:
679 			if (va == 0)
680 				lab = vecast_vrefp;
681 			break;
682 		case 5:
683 			if (va == 0)
684 				lab = vecast_vrsqrtefp;
685 			break;
686 		case 6:
687 			if (va == 0)
688 				lab = vecast_vexptefp;
689 			break;
690 		case 7:
691 			if (va == 0)
692 				lab = vecast_vlogefp;
693 			break;
694 		case 14:
695 			lab = vecast_vctuxs;
696 			break;
697 		case 15:
698 			lab = vecast_vctsxs;
699 			break;
700 		}
701 		break;
702 	case 46:
703 		lab = vecast_vmaddfp;
704 		break;
705 	case 47:
706 		lab = vecast_vnmsubfp;
707 		break;
708 	}
709 
710 	if (lab) {
711 		vecast_asm(insn, lab);	/* Emulate it. */
712 		return 0;
713 	} else
714 		return EINVAL;
715 }
716 #endif
717