xref: /netbsd/sys/arch/alpha/alpha/trap.c (revision 6550d01e)
1 /* $NetBSD: trap.c,v 1.124 2010/12/20 00:25:24 matt Exp $ */
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, and by Ross Harvey.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1999 Christopher G. Demetriou.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *      This product includes software developed by Christopher G. Demetriou
47  *	for the NetBSD Project.
48  * 4. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 
63 /*
64  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
65  * All rights reserved.
66  *
67  * Author: Chris G. Demetriou
68  *
69  * Permission to use, copy, modify and distribute this software and
70  * its documentation is hereby granted, provided that both the copyright
71  * notice and this permission notice appear in all copies of the
72  * software, derivative works or modified versions, and any portions
73  * thereof, and that both notices appear in supporting documentation.
74  *
75  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
76  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
77  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78  *
79  * Carnegie Mellon requests users of this software to return to
80  *
81  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
82  *  School of Computer Science
83  *  Carnegie Mellon University
84  *  Pittsburgh PA 15213-3890
85  *
86  * any improvements or extensions that they make and grant Carnegie the
87  * rights to redistribute these changes.
88  */
89 
90 #include "opt_fix_unaligned_vax_fp.h"
91 #include "opt_ddb.h"
92 #include "opt_multiprocessor.h"
93 
94 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
95 
96 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.124 2010/12/20 00:25:24 matt Exp $");
97 
98 #include <sys/param.h>
99 #include <sys/systm.h>
100 #include <sys/proc.h>
101 #include <sys/sa.h>
102 #include <sys/savar.h>
103 #include <sys/syscall.h>
104 #include <sys/buf.h>
105 #include <sys/kauth.h>
106 #include <sys/kmem.h>
107 #include <sys/cpu.h>
108 #include <sys/atomic.h>
109 
110 #include <uvm/uvm_extern.h>
111 
112 #include <machine/reg.h>
113 #include <machine/alpha.h>
114 #include <machine/fpu.h>
115 #include <machine/rpb.h>
116 #ifdef DDB
117 #include <machine/db_machdep.h>
118 #endif
119 #include <alpha/alpha/db_instruction.h>
120 #include <machine/userret.h>
121 
122 static int unaligned_fixup(u_long, u_long, u_long, struct lwp *);
123 static int handle_opdec(struct lwp *l, u_long *ucodep);
124 static int alpha_ucode_to_ksiginfo(u_long ucode);
125 
126 struct evcnt fpevent_use;
127 struct evcnt fpevent_reuse;
128 
129 /*
130  * Initialize the trap vectors for the current processor.
131  */
132 void
133 trap_init(void)
134 {
135 
136 	/*
137 	 * Point interrupt/exception vectors to our own.
138 	 */
139 	alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
140 	alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
141 	alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
142 	alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
143 	alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
144 	alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
145 
146 	/*
147 	 * Clear pending machine checks and error reports, and enable
148 	 * system- and processor-correctable error reporting.
149 	 */
150 	alpha_pal_wrmces(alpha_pal_rdmces() &
151 	    ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC));
152 
153 	/*
154 	 * If this is the primary processor, initialize some trap
155 	 * event counters.
156 	 */
157 	if (cpu_number() == hwrpb->rpb_primary_cpu_id) {
158 		evcnt_attach_dynamic(&fpevent_use, EVCNT_TYPE_MISC, NULL,
159 		    "FP", "proc use");
160 		evcnt_attach_dynamic(&fpevent_reuse, EVCNT_TYPE_MISC, NULL,
161 		    "FP", "proc re-use");
162 	}
163 }
164 
165 static void
166 printtrap(const u_long a0, const u_long a1, const u_long a2,
167     const u_long entry, struct trapframe *framep, int isfatal, int user)
168 {
169 	char ubuf[64];
170 	const char *entryname;
171 	u_long cpu_id = cpu_number();
172 
173 	switch (entry) {
174 	case ALPHA_KENTRY_INT:
175 		entryname = "interrupt";
176 		break;
177 	case ALPHA_KENTRY_ARITH:
178 		entryname = "arithmetic trap";
179 		break;
180 	case ALPHA_KENTRY_MM:
181 		entryname = "memory management fault";
182 		break;
183 	case ALPHA_KENTRY_IF:
184 		entryname = "instruction fault";
185 		break;
186 	case ALPHA_KENTRY_UNA:
187 		entryname = "unaligned access fault";
188 		break;
189 	case ALPHA_KENTRY_SYS:
190 		entryname = "system call";
191 		break;
192 	default:
193 		sprintf(ubuf, "type %lx", entry);
194 		entryname = (const char *) ubuf;
195 		break;
196 	}
197 
198 	printf("\n");
199 	printf("CPU %lu: %s %s trap:\n", cpu_id, isfatal ? "fatal" : "handled",
200 	    user ? "user" : "kernel");
201 	printf("\n");
202 	printf("CPU %lu    trap entry = 0x%lx (%s)\n", cpu_id, entry,
203 	    entryname);
204 	printf("CPU %lu    a0         = 0x%lx\n", cpu_id, a0);
205 	printf("CPU %lu    a1         = 0x%lx\n", cpu_id, a1);
206 	printf("CPU %lu    a2         = 0x%lx\n", cpu_id, a2);
207 	printf("CPU %lu    pc         = 0x%lx\n", cpu_id,
208 	    framep->tf_regs[FRAME_PC]);
209 	printf("CPU %lu    ra         = 0x%lx\n", cpu_id,
210 	    framep->tf_regs[FRAME_RA]);
211 	printf("CPU %lu    pv         = 0x%lx\n", cpu_id,
212 	    framep->tf_regs[FRAME_T12]);
213 	printf("CPU %lu    curlwp     = %p\n", cpu_id, curlwp);
214 	printf("CPU %lu        pid = %d, comm = %s\n", cpu_id,
215 	    curproc->p_pid, curproc->p_comm);
216 	printf("\n");
217 }
218 
219 /*
220  * Trap is called from locore to handle most types of processor traps.
221  * System calls are broken out for efficiency and ASTs are broken out
222  * to make the code a bit cleaner and more representative of the
223  * Alpha architecture.
224  */
225 /*ARGSUSED*/
226 void
227 trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
228     struct trapframe *framep)
229 {
230 	struct lwp *l;
231 	struct proc *p;
232 	struct pcb *pcb;
233 	vaddr_t onfault;
234 	ksiginfo_t ksi;
235 	vm_prot_t ftype;
236 	u_int64_t ucode;
237 	int i, user;
238 #if defined(DDB)
239 	int call_debugger = 1;
240 #endif
241 
242 	curcpu()->ci_data.cpu_ntrap++;
243 
244 	l = curlwp;
245 
246 	user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
247 	if (user) {
248 		l->l_md.md_tf = framep;
249 		p = l->l_proc;
250 		(void)memset(&ksi, 0, sizeof(ksi));
251 		LWP_CACHE_CREDS(l, p);
252 	} else {
253 		p = NULL;
254 	}
255 
256 	switch (entry) {
257 	case ALPHA_KENTRY_UNA:
258 		/*
259 		 * If user-land, do whatever fixups, printing, and
260 		 * signalling is appropriate (based on system-wide
261 		 * and per-process unaligned-access-handling flags).
262 		 */
263 		if (user) {
264 			i = unaligned_fixup(a0, a1, a2, l);
265 			if (i == 0)
266 				goto out;
267 
268 			KSI_INIT_TRAP(&ksi);
269 			ksi.ksi_signo = i;
270 			ksi.ksi_code = BUS_ADRALN;
271 			ksi.ksi_addr = (void *)a0;		/* VA */
272 			ksi.ksi_trap = BUS_ADRALN;      /* XXX appropriate? */
273 			break;
274 		}
275 
276 		/*
277 		 * Unaligned access from kernel mode is always an error,
278 		 * EVEN IF A COPY FAULT HANDLER IS SET!
279 		 *
280 		 * It's an error if a copy fault handler is set because
281 		 * the various routines which do user-initiated copies
282 		 * do so in a memcpy-like manner.  In other words, the
283 		 * kernel never assumes that pointers provided by the
284 		 * user are properly aligned, and so if the kernel
285 		 * does cause an unaligned access it's a kernel bug.
286 		 */
287 		goto dopanic;
288 
289 	case ALPHA_KENTRY_ARITH:
290 		/*
291 		 * Resolve trap shadows, interpret FP ops requiring infinities,
292 		 * NaNs, or denorms, and maintain FPCR corrections.
293 		 */
294 		if (user) {
295 			i = alpha_fp_complete(a0, a1, l, &ucode);
296 			if (i == 0)
297 				goto out;
298 			KSI_INIT_TRAP(&ksi);
299 			ksi.ksi_signo = i;
300 			if (i == SIGSEGV)
301 				ksi.ksi_code = SEGV_MAPERR; /* just pick one */
302 			else {
303 				ksi.ksi_code = alpha_ucode_to_ksiginfo(ucode);
304 				ksi.ksi_addr =
305 					(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
306 				ksi.ksi_trap = (int)ucode;
307 			}
308 			break;
309 		}
310 
311 		/* Always fatal in kernel.  Should never happen. */
312 		goto dopanic;
313 
314 	case ALPHA_KENTRY_IF:
315 		/*
316 		 * These are always fatal in kernel, and should never
317 		 * happen.  (Debugger entry is handled in XentIF.)
318 		 */
319 		if (user == 0) {
320 #if defined(DDB)
321 			/*
322 			 * ...unless a debugger is configured.  It will
323 			 * inform us if the trap was handled.
324 			 */
325 			if (alpha_debug(a0, a1, a2, entry, framep))
326 				goto out;
327 
328 			/*
329 			 * Debugger did NOT handle the trap, don't
330 			 * call the debugger again!
331 			 */
332 			call_debugger = 0;
333 #endif
334 			goto dopanic;
335 		}
336 		i = 0;
337 		switch (a0) {
338 		case ALPHA_IF_CODE_GENTRAP:
339 			if (framep->tf_regs[FRAME_A0] == -2) { /* weird! */
340 				KSI_INIT_TRAP(&ksi);
341 				ksi.ksi_signo = SIGFPE;
342 				ksi.ksi_code =  alpha_ucode_to_ksiginfo(ucode);
343 				ksi.ksi_addr =
344 					(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
345 				ksi.ksi_trap =  a0;	/* exception summary */
346 				break;
347 			}
348 			/* FALLTHROUGH */
349 		case ALPHA_IF_CODE_BPT:
350 		case ALPHA_IF_CODE_BUGCHK:
351 			KSI_INIT_TRAP(&ksi);
352 			ksi.ksi_signo = SIGTRAP;
353 			ksi.ksi_code = TRAP_BRKPT;
354 			ksi.ksi_addr = (void *)l->l_md.md_tf->tf_regs[FRAME_PC];
355 			ksi.ksi_trap = a0;		/* trap type */
356 			break;
357 
358 		case ALPHA_IF_CODE_OPDEC:
359 			i = handle_opdec(l, &ucode);
360 			KSI_INIT_TRAP(&ksi);
361 			if (i == 0)
362 				goto out;
363 			else if (i == SIGSEGV)
364 				ksi.ksi_code = SEGV_MAPERR;
365 			else if (i == SIGILL)
366 				ksi.ksi_code = ILL_ILLOPC;
367 			ksi.ksi_signo = i;
368 			ksi.ksi_addr =
369 				(void *)l->l_md.md_tf->tf_regs[FRAME_PC];
370 			ksi.ksi_trap = (int)ucode;
371 			break;
372 
373 		case ALPHA_IF_CODE_FEN:
374 			alpha_enable_fp(l, 0);
375 			alpha_pal_wrfen(0);
376 			goto out;
377 
378 		default:
379 			printf("trap: unknown IF type 0x%lx\n", a0);
380 			goto dopanic;
381 		}
382 		break;
383 
384 	case ALPHA_KENTRY_MM:
385 		pcb = lwp_getpcb(l);
386 		onfault = pcb->pcb_onfault;
387 
388 		switch (a1) {
389 		case ALPHA_MMCSR_FOR:
390 		case ALPHA_MMCSR_FOE:
391 		case ALPHA_MMCSR_FOW:
392 			if (pmap_emulate_reference(l, a0, user, a1)) {
393 				ftype = VM_PROT_EXECUTE;
394 				goto do_fault;
395 			}
396 			goto out;
397 
398 		case ALPHA_MMCSR_INVALTRANS:
399 		case ALPHA_MMCSR_ACCESS:
400 	    	{
401 			vaddr_t va;
402 			struct vmspace *vm = NULL;
403 			struct vm_map *map;
404 			int rv;
405 
406 			switch (a2) {
407 			case -1:		/* instruction fetch fault */
408 				ftype = VM_PROT_EXECUTE;
409 				break;
410 			case 0:			/* load instruction */
411 				ftype = VM_PROT_READ;
412 				break;
413 			case 1:			/* store instruction */
414 				ftype = VM_PROT_WRITE;
415 				break;
416 			default:
417 #ifdef DIAGNOSTIC
418 				panic("trap: bad fault type");
419 #else
420 				ftype = VM_PROT_NONE;
421 				break;
422 #endif
423 			}
424 
425 			if (user) {
426 				if (l->l_flag & LW_SA) {
427 					l->l_savp->savp_faultaddr = (vaddr_t)a0;
428 					l->l_pflag |= LP_SA_PAGEFAULT;
429 				}
430 			} else {
431 				struct cpu_info *ci = curcpu();
432 
433 				if (l == NULL) {
434 					/*
435 					 * If there is no current process,
436 					 * it can be nothing but a fatal
437 					 * error (i.e. memory in this case
438 					 * must be wired).
439 					 */
440 					goto dopanic;
441 				}
442 
443 				/*
444 				 * If it was caused by fuswintr or suswintr,
445 				 * just punt.  Note that we check the faulting
446 				 * address against the address accessed by
447 				 * [fs]uswintr, in case another fault happens
448 				 * when they are running.
449 				 */
450 
451 				if (onfault == (vaddr_t)fswintrberr &&
452 				    pcb->pcb_accessaddr == a0) {
453 					framep->tf_regs[FRAME_PC] = onfault;
454 					pcb->pcb_onfault = 0;
455 					goto out;
456 				}
457 
458 				/*
459 				 * If we're in interrupt context at this
460 				 * point, this is an error.
461 				 */
462 				if (ci->ci_intrdepth != 0)
463 					goto dopanic;
464 			}
465 
466 			/*
467 			 * It is only a kernel address space fault iff:
468 			 *	1. !user and
469 			 *	2. pcb_onfault not set or
470 			 *	3. pcb_onfault set but kernel space data fault
471 			 * The last can occur during an exec() copyin where the
472 			 * argument space is lazy-allocated.
473 			 */
474 do_fault:
475 			pcb = lwp_getpcb(l);
476 			if (user == 0 && (a0 >= VM_MIN_KERNEL_ADDRESS ||
477 					  onfault == 0))
478 				map = kernel_map;
479 			else {
480 				vm = l->l_proc->p_vmspace;
481 				map = &vm->vm_map;
482 			}
483 
484 			va = trunc_page((vaddr_t)a0);
485 			pcb->pcb_onfault = 0;
486 			rv = uvm_fault(map, va, ftype);
487 			pcb->pcb_onfault = onfault;
488 
489 			/*
490 			 * If this was a stack access we keep track of the
491 			 * maximum accessed stack size.  Also, if vm_fault
492 			 * gets a protection failure it is due to accessing
493 			 * the stack region outside the current limit and
494 			 * we need to reflect that as an access error.
495 			 */
496 			if (map != kernel_map &&
497 			    (void *)va >= vm->vm_maxsaddr &&
498 			    va < USRSTACK) {
499 				if (rv == 0)
500 					uvm_grow(l->l_proc, va);
501 				else if (rv == EACCES &&
502 					   ftype != VM_PROT_EXECUTE)
503 					rv = EFAULT;
504 			}
505 			if (rv == 0) {
506 				if (user)
507 					l->l_pflag &= ~LP_SA_PAGEFAULT;
508 				goto out;
509 			}
510 
511 			if (user == 0) {
512 				/* Check for copyin/copyout fault */
513 				if (onfault != 0) {
514 					framep->tf_regs[FRAME_PC] = onfault;
515 					framep->tf_regs[FRAME_V0] = rv;
516 					goto out;
517 				}
518 				goto dopanic;
519 			}
520 			KSI_INIT_TRAP(&ksi);
521 			ksi.ksi_addr = (void *)a0;
522 			ksi.ksi_trap = a1; /* MMCSR VALUE */
523 			if (rv == ENOMEM) {
524 				printf("UVM: pid %d (%s), uid %d killed: "
525 				    "out of swap\n", l->l_proc->p_pid,
526 				    l->l_proc->p_comm,
527 				    l->l_cred ?
528 				    kauth_cred_geteuid(l->l_cred) : -1);
529 				ksi.ksi_signo = SIGKILL;
530 			} else
531 				ksi.ksi_signo = SIGSEGV;
532 			if (rv == EACCES)
533 				ksi.ksi_code = SEGV_ACCERR;
534 			else
535 				ksi.ksi_code = SEGV_MAPERR;
536 			l->l_pflag &= ~LP_SA_PAGEFAULT;
537 			break;
538 		    }
539 
540 		default:
541 			printf("trap: unknown MMCSR value 0x%lx\n", a1);
542 			goto dopanic;
543 		}
544 		break;
545 
546 	default:
547 		goto dopanic;
548 	}
549 
550 #ifdef DEBUG
551 	printtrap(a0, a1, a2, entry, framep, 1, user);
552 #endif
553 	(*p->p_emul->e_trapsignal)(l, &ksi);
554 out:
555 	if (user)
556 		userret(l);
557 	return;
558 
559 dopanic:
560 	printtrap(a0, a1, a2, entry, framep, 1, user);
561 
562 	/* XXX dump registers */
563 
564 #if defined(DDB)
565 	if (call_debugger && alpha_debug(a0, a1, a2, entry, framep)) {
566 		/*
567 		 * The debugger has handled the trap; just return.
568 		 */
569 		goto out;
570 	}
571 #endif
572 
573 	panic("trap");
574 }
575 
576 /*
577  * Set the float-point enable for the current process, and return
578  * the FPU context to the named process. If check == 0, it is an
579  * error for the named process to already be fpcurlwp.
580  */
581 void
582 alpha_enable_fp(struct lwp *l, int check)
583 {
584 #if defined(MULTIPROCESSOR)
585 	int s;
586 #endif
587 	struct cpu_info *ci = curcpu();
588 	struct pcb *pcb;
589 
590 	if (check && ci->ci_fpcurlwp == l) {
591 		alpha_pal_wrfen(1);
592 		return;
593 	}
594 	if (ci->ci_fpcurlwp == l)
595 		panic("trap: fp disabled for fpcurlwp == %p", l);
596 
597 	if (ci->ci_fpcurlwp != NULL)
598 		fpusave_cpu(ci, 1);
599 
600 	KDASSERT(ci->ci_fpcurlwp == NULL);
601 
602 	pcb = lwp_getpcb(l);
603 #if defined(MULTIPROCESSOR)
604 	if (pcb->pcb_fpcpu != NULL)
605 		fpusave_proc(l, 1);
606 #else
607 	KDASSERT(pcb->pcb_fpcpu == NULL);
608 #endif
609 
610 #if defined(MULTIPROCESSOR)
611 	s = splhigh();		/* block IPIs */
612 #endif
613 	FPCPU_LOCK(pcb);
614 
615 	pcb->pcb_fpcpu = ci;
616 	ci->ci_fpcurlwp = l;
617 
618 	FPCPU_UNLOCK(pcb);
619 #if defined(MULTIPROCESSOR)
620 	splx(s);
621 #endif
622 
623 	/*
624 	 * Instrument FP usage -- if a process had not previously
625 	 * used FP, mark it as having used FP for the first time,
626 	 * and count this event.
627 	 *
628 	 * If a process has used FP, count a "used FP, and took
629 	 * a trap to use it again" event.
630 	 */
631 	if ((l->l_md.md_flags & MDP_FPUSED) == 0) {
632 		atomic_inc_ulong(&fpevent_use.ev_count);
633 		l->l_md.md_flags |= MDP_FPUSED;
634 	} else
635 		atomic_inc_ulong(&fpevent_reuse.ev_count);
636 
637 	alpha_pal_wrfen(1);
638 	restorefpstate(&pcb->pcb_fp);
639 }
640 
641 /*
642  * Process an asynchronous software trap.
643  * This is relatively easy.
644  */
645 void
646 ast(struct trapframe *framep)
647 {
648 	struct lwp *l;
649 
650 	/*
651 	 * We may not have a current process to do AST processing
652 	 * on.  This happens on multiprocessor systems in which
653 	 * at least one CPU simply has no current process to run,
654 	 * but roundrobin() (called via hardclock()) kicks us to
655 	 * attempt to preempt the process running on our CPU.
656 	 */
657 	l = curlwp;
658 	if (l == NULL)
659 		return;
660 
661 	//curcpu()->ci_data.cpu_nast++;
662 	l->l_md.md_tf = framep;
663 
664 	if (l->l_pflag & LP_OWEUPC) {
665 		l->l_pflag &= ~LP_OWEUPC;
666 		ADDUPROF(l);
667 	}
668 
669 	if (curcpu()->ci_want_resched) {
670 		/*
671 		 * We are being preempted.
672 		 */
673 		preempt();
674 	}
675 
676 	userret(l);
677 }
678 
679 /*
680  * Unaligned access handler.  It's not clear that this can get much slower...
681  *
682  */
683 static const int reg_to_framereg[32] = {
684 	FRAME_V0,	FRAME_T0,	FRAME_T1,	FRAME_T2,
685 	FRAME_T3,	FRAME_T4,	FRAME_T5,	FRAME_T6,
686 	FRAME_T7,	FRAME_S0,	FRAME_S1,	FRAME_S2,
687 	FRAME_S3,	FRAME_S4,	FRAME_S5,	FRAME_S6,
688 	FRAME_A0,	FRAME_A1,	FRAME_A2,	FRAME_A3,
689 	FRAME_A4,	FRAME_A5,	FRAME_T8,	FRAME_T9,
690 	FRAME_T10,	FRAME_T11,	FRAME_RA,	FRAME_T12,
691 	FRAME_AT,	FRAME_GP,	FRAME_SP,	-1,
692 };
693 
694 #define	irp(l, reg)							\
695 	((reg_to_framereg[(reg)] == -1) ? NULL :			\
696 	    &(l)->l_md.md_tf->tf_regs[reg_to_framereg[(reg)]])
697 
698 #define	frp(l, reg)							\
699 	(&pcb->pcb_fp.fpr_regs[(reg)])
700 
701 #define	dump_fp_regs(pcb)						\
702 	if (pcb->pcb_fpcpu != NULL)					\
703 		fpusave_proc(l, 1)
704 
705 #define	unaligned_load(storage, ptrf, mod)				\
706 	if (copyin((void *)va, &(storage), sizeof (storage)) != 0)	\
707 		break;							\
708 	signo = 0;							\
709 	if ((regptr = ptrf(l, reg)) != NULL)				\
710 		*regptr = mod (storage);
711 
712 #define	unaligned_store(storage, ptrf, mod)				\
713 	if ((regptr = ptrf(l, reg)) != NULL)				\
714 		(storage) = mod (*regptr);				\
715 	else								\
716 		(storage) = 0;						\
717 	if (copyout(&(storage), (void *)va, sizeof (storage)) != 0)	\
718 		break;							\
719 	signo = 0;
720 
721 #define	unaligned_load_integer(storage)					\
722 	unaligned_load(storage, irp, )
723 
724 #define	unaligned_store_integer(storage)				\
725 	unaligned_store(storage, irp, )
726 
727 #define	unaligned_load_floating(storage, mod) do {			\
728 	struct pcb *pcb = lwp_getpcb(l);				\
729 	dump_fp_regs(pcb);						\
730 	unaligned_load(storage, frp, mod)				\
731 } while (/*CONSTCOND*/0)
732 
733 #define	unaligned_store_floating(storage, mod) do {			\
734 	struct pcb *pcb = lwp_getpcb(l);				\
735 	dump_fp_regs(pcb);						\
736 	unaligned_store(storage, frp, mod)				\
737 } while (/*CONSTCOND*/0)
738 
739 static unsigned long
740 Sfloat_to_reg(u_int s)
741 {
742 	unsigned long sign, expn, frac;
743 	unsigned long result;
744 
745 	sign = (s & 0x80000000) >> 31;
746 	expn = (s & 0x7f800000) >> 23;
747 	frac = (s & 0x007fffff) >>  0;
748 
749 	/* map exponent part, as appropriate. */
750 	if (expn == 0xff)
751 		expn = 0x7ff;
752 	else if ((expn & 0x80) != 0)
753 		expn = (0x400 | (expn & ~0x80));
754 	else if ((expn & 0x80) == 0 && expn != 0)
755 		expn = (0x380 | (expn & ~0x80));
756 
757 	result = (sign << 63) | (expn << 52) | (frac << 29);
758 	return (result);
759 }
760 
761 static unsigned int
762 reg_to_Sfloat(u_long r)
763 {
764 	unsigned long sign, expn, frac;
765 	unsigned int result;
766 
767 	sign = (r & 0x8000000000000000) >> 63;
768 	expn = (r & 0x7ff0000000000000) >> 52;
769 	frac = (r & 0x000fffffe0000000) >> 29;
770 
771 	/* map exponent part, as appropriate. */
772 	expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00);
773 
774 	result = (sign << 31) | (expn << 23) | (frac << 0);
775 	return (result);
776 }
777 
778 /*
779  * Conversion of T floating datums to and from register format
780  * requires no bit reordering whatsoever.
781  */
782 static unsigned long
783 Tfloat_reg_cvt(u_long input)
784 {
785 
786 	return (input);
787 }
788 
789 #ifdef FIX_UNALIGNED_VAX_FP
790 static unsigned long
791 Ffloat_to_reg(u_int f)
792 {
793 	unsigned long sign, expn, frlo, frhi;
794 	unsigned long result;
795 
796 	sign = (f & 0x00008000) >> 15;
797 	expn = (f & 0x00007f80) >>  7;
798 	frhi = (f & 0x0000007f) >>  0;
799 	frlo = (f & 0xffff0000) >> 16;
800 
801 	/* map exponent part, as appropriate. */
802 	if ((expn & 0x80) != 0)
803 		expn = (0x400 | (expn & ~0x80));
804 	else if ((expn & 0x80) == 0 && expn != 0)
805 		expn = (0x380 | (expn & ~0x80));
806 
807 	result = (sign << 63) | (expn << 52) | (frhi << 45) | (frlo << 29);
808 	return (result);
809 }
810 
811 static unsigned int
812 reg_to_Ffloat(u_long r)
813 {
814 	unsigned long sign, expn, frhi, frlo;
815 	unsigned int result;
816 
817 	sign = (r & 0x8000000000000000) >> 63;
818 	expn = (r & 0x7ff0000000000000) >> 52;
819 	frhi = (r & 0x000fe00000000000) >> 45;
820 	frlo = (r & 0x00001fffe0000000) >> 29;
821 
822 	/* map exponent part, as appropriate. */
823 	expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00);
824 
825 	result = (sign << 15) | (expn << 7) | (frhi << 0) | (frlo << 16);
826 	return (result);
827 }
828 
829 /*
830  * Conversion of G floating datums to and from register format is
831  * symmetrical.  Just swap shorts in the quad...
832  */
833 static unsigned long
834 Gfloat_reg_cvt(u_long input)
835 {
836 	unsigned long a, b, c, d;
837 	unsigned long result;
838 
839 	a = (input & 0x000000000000ffff) >> 0;
840 	b = (input & 0x00000000ffff0000) >> 16;
841 	c = (input & 0x0000ffff00000000) >> 32;
842 	d = (input & 0xffff000000000000) >> 48;
843 
844 	result = (a << 48) | (b << 32) | (c << 16) | (d << 0);
845 	return (result);
846 }
847 #endif /* FIX_UNALIGNED_VAX_FP */
848 
849 struct unaligned_fixup_data {
850 	const char *type;	/* opcode name */
851 	int fixable;		/* fixable, 0 if fixup not supported */
852 	int size;		/* size, 0 if unknown */
853 };
854 
855 #define	UNKNOWN()	{ "0x%lx", 0, 0 }
856 #define	FIX_LD(n,s)	{ n, 1, s }
857 #define	FIX_ST(n,s)	{ n, 1, s }
858 #define	NOFIX_LD(n,s)	{ n, 0, s }
859 #define	NOFIX_ST(n,s)	{ n, 0, s }
860 
861 int
862 unaligned_fixup(u_long va, u_long opcode, u_long reg, struct lwp *l)
863 {
864 	static const struct unaligned_fixup_data tab_unknown[1] = {
865 		UNKNOWN(),
866 	};
867 	static const struct unaligned_fixup_data tab_0c[0x02] = {
868 		FIX_LD("ldwu", 2),	FIX_ST("stw", 2),
869 	};
870 	static const struct unaligned_fixup_data tab_20[0x10] = {
871 #ifdef FIX_UNALIGNED_VAX_FP
872 		FIX_LD("ldf", 4),	FIX_LD("ldg", 8),
873 #else
874 		NOFIX_LD("ldf", 4),	NOFIX_LD("ldg", 8),
875 #endif
876 		FIX_LD("lds", 4),	FIX_LD("ldt", 8),
877 #ifdef FIX_UNALIGNED_VAX_FP
878 		FIX_ST("stf", 4),	FIX_ST("stg", 8),
879 #else
880 		NOFIX_ST("stf", 4),	NOFIX_ST("stg", 8),
881 #endif
882 		FIX_ST("sts", 4),	FIX_ST("stt", 8),
883 		FIX_LD("ldl", 4),	FIX_LD("ldq", 8),
884 		NOFIX_LD("ldl_c", 4),	NOFIX_LD("ldq_c", 8),
885 		FIX_ST("stl", 4),	FIX_ST("stq", 8),
886 		NOFIX_ST("stl_c", 4),	NOFIX_ST("stq_c", 8),
887 	};
888 	const struct unaligned_fixup_data *selected_tab;
889 	int doprint, dofix, dosigbus, signo;
890 	unsigned long *regptr, longdata;
891 	int intdata;		/* signed to get extension when storing */
892 	u_int16_t worddata;	/* unsigned to _avoid_ extension */
893 
894 	/*
895 	 * Read USP into frame in case it's the register to be modified.
896 	 * This keeps us from having to check for it in lots of places
897 	 * later.
898 	 */
899 	l->l_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
900 
901 	/*
902 	 * Figure out what actions to take.
903 	 *
904 	 * XXX In the future, this should have a per-process component
905 	 * as well.
906 	 */
907 	doprint = alpha_unaligned_print;
908 	dofix = alpha_unaligned_fix;
909 	dosigbus = alpha_unaligned_sigbus;
910 
911 	/*
912 	 * Find out which opcode it is.  Arrange to have the opcode
913 	 * printed if it's an unknown opcode.
914 	 */
915 	if (opcode >= 0x0c && opcode <= 0x0d)
916 		selected_tab = &tab_0c[opcode - 0x0c];
917 	else if (opcode >= 0x20 && opcode <= 0x2f)
918 		selected_tab = &tab_20[opcode - 0x20];
919 	else
920 		selected_tab = tab_unknown;
921 
922 	/*
923 	 * If we're supposed to be noisy, squawk now.
924 	 */
925 	if (doprint) {
926 		uprintf(
927 		"pid %d (%s): unaligned access: "
928 		"va=0x%lx pc=0x%lx ra=0x%lx sp=0x%lx op=",
929 		    l->l_proc->p_pid, l->l_proc->p_comm, va,
930 		    l->l_md.md_tf->tf_regs[FRAME_PC] - 4,
931 		    l->l_md.md_tf->tf_regs[FRAME_RA],
932 		    l->l_md.md_tf->tf_regs[FRAME_SP]);
933 		uprintf(selected_tab->type,opcode);
934 		uprintf("\n");
935 	}
936 
937 	/*
938 	 * If we should try to fix it and know how, give it a shot.
939 	 *
940 	 * We never allow bad data to be unknowingly used by the user process.
941 	 * That is, if we can't access the address needed to fix up the trap,
942 	 * we cause a SIGSEGV rather than letting the user process go on
943 	 * without warning.
944 	 *
945 	 * If we're trying to do a fixup, we assume that things
946 	 * will be botched.  If everything works out OK,
947 	 * unaligned_{load,store}_* clears the signal flag.
948 	 */
949 	signo = SIGSEGV;
950 	if (dofix && selected_tab->fixable) {
951 		switch (opcode) {
952 		case 0x0c:			/* ldwu */
953 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
954 			unaligned_load_integer(worddata);
955 			break;
956 
957 		case 0x0d:			/* stw */
958 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
959 			unaligned_store_integer(worddata);
960 			break;
961 
962 #ifdef FIX_UNALIGNED_VAX_FP
963 		case 0x20:			/* ldf */
964 			unaligned_load_floating(intdata, Ffloat_to_reg);
965 			break;
966 
967 		case 0x21:			/* ldg */
968 			unaligned_load_floating(longdata, Gfloat_reg_cvt);
969 			break;
970 #endif
971 
972 		case 0x22:			/* lds */
973 			unaligned_load_floating(intdata, Sfloat_to_reg);
974 			break;
975 
976 		case 0x23:			/* ldt */
977 			unaligned_load_floating(longdata, Tfloat_reg_cvt);
978 			break;
979 
980 #ifdef FIX_UNALIGNED_VAX_FP
981 		case 0x24:			/* stf */
982 			unaligned_store_floating(intdata, reg_to_Ffloat);
983 			break;
984 
985 		case 0x25:			/* stg */
986 			unaligned_store_floating(longdata, Gfloat_reg_cvt);
987 			break;
988 #endif
989 
990 		case 0x26:			/* sts */
991 			unaligned_store_floating(intdata, reg_to_Sfloat);
992 			break;
993 
994 		case 0x27:			/* stt */
995 			unaligned_store_floating(longdata, Tfloat_reg_cvt);
996 			break;
997 
998 		case 0x28:			/* ldl */
999 			unaligned_load_integer(intdata);
1000 			break;
1001 
1002 		case 0x29:			/* ldq */
1003 			unaligned_load_integer(longdata);
1004 			break;
1005 
1006 		case 0x2c:			/* stl */
1007 			unaligned_store_integer(intdata);
1008 			break;
1009 
1010 		case 0x2d:			/* stq */
1011 			unaligned_store_integer(longdata);
1012 			break;
1013 
1014 #ifdef DIAGNOSTIC
1015 		default:
1016 			panic("unaligned_fixup: can't get here");
1017 #endif
1018 		}
1019 	}
1020 
1021 	/*
1022 	 * Force SIGBUS if requested.
1023 	 */
1024 	if (dosigbus)
1025 		signo = SIGBUS;
1026 
1027 	/*
1028 	 * Write back USP.
1029 	 */
1030 	alpha_pal_wrusp(l->l_md.md_tf->tf_regs[FRAME_SP]);
1031 
1032 	return (signo);
1033 }
1034 
1035 /*
1036  * Reserved/unimplemented instruction (opDec fault) handler
1037  *
1038  * Argument is the process that caused it.  No useful information
1039  * is passed to the trap handler other than the fault type.  The
1040  * address of the instruction that caused the fault is 4 less than
1041  * the PC stored in the trap frame.
1042  *
1043  * If the instruction is emulated successfully, this function returns 0.
1044  * Otherwise, this function returns the signal to deliver to the process,
1045  * and fills in *ucodep with the code to be delivered.
1046  */
1047 int
1048 handle_opdec(struct lwp *l, u_long *ucodep)
1049 {
1050 	alpha_instruction inst;
1051 	register_t *regptr, memaddr;
1052 	u_int64_t inst_pc;
1053 	int sig;
1054 
1055 	/*
1056 	 * Read USP into frame in case it's going to be used or modified.
1057 	 * This keeps us from having to check for it in lots of places
1058 	 * later.
1059 	 */
1060 	l->l_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
1061 
1062 	inst_pc = memaddr = l->l_md.md_tf->tf_regs[FRAME_PC] - 4;
1063 	if (copyin((void *)inst_pc, &inst, sizeof (inst)) != 0) {
1064 		/*
1065 		 * really, this should never happen, but in case it
1066 		 * does we handle it.
1067 		 */
1068 		printf("WARNING: handle_opdec() couldn't fetch instruction\n");
1069 		goto sigsegv;
1070 	}
1071 
1072 	switch (inst.generic_format.opcode) {
1073 	case op_ldbu:
1074 	case op_ldwu:
1075 	case op_stw:
1076 	case op_stb:
1077 		regptr = irp(l, inst.mem_format.rb);
1078 		if (regptr != NULL)
1079 			memaddr = *regptr;
1080 		else
1081 			memaddr = 0;
1082 		memaddr += inst.mem_format.displacement;
1083 
1084 		regptr = irp(l, inst.mem_format.ra);
1085 
1086 		if (inst.mem_format.opcode == op_ldwu ||
1087 		    inst.mem_format.opcode == op_stw) {
1088 			if (memaddr & 0x01) {
1089 				sig = unaligned_fixup(memaddr,
1090 				    inst.mem_format.opcode,
1091 				    inst.mem_format.ra, l);
1092 				if (sig)
1093 					goto unaligned_fixup_sig;
1094 				break;
1095 			}
1096 		}
1097 
1098 		if (inst.mem_format.opcode == op_ldbu) {
1099 			u_int8_t b;
1100 
1101 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
1102 			if (copyin((void *)memaddr, &b, sizeof (b)) != 0)
1103 				goto sigsegv;
1104 			if (regptr != NULL)
1105 				*regptr = b;
1106 		} else if (inst.mem_format.opcode == op_ldwu) {
1107 			u_int16_t w;
1108 
1109 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
1110 			if (copyin((void *)memaddr, &w, sizeof (w)) != 0)
1111 				goto sigsegv;
1112 			if (regptr != NULL)
1113 				*regptr = w;
1114 		} else if (inst.mem_format.opcode == op_stw) {
1115 			u_int16_t w;
1116 
1117 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
1118 			w = (regptr != NULL) ? *regptr : 0;
1119 			if (copyout(&w, (void *)memaddr, sizeof (w)) != 0)
1120 				goto sigsegv;
1121 		} else if (inst.mem_format.opcode == op_stb) {
1122 			u_int8_t b;
1123 
1124 			/* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
1125 			b = (regptr != NULL) ? *regptr : 0;
1126 			if (copyout(&b, (void *)memaddr, sizeof (b)) != 0)
1127 				goto sigsegv;
1128 		}
1129 		break;
1130 
1131 	case op_intmisc:
1132 		if (inst.operate_generic_format.function == op_sextb &&
1133 		    inst.operate_generic_format.ra == 31) {
1134 			int8_t b;
1135 
1136 			if (inst.operate_generic_format.is_lit) {
1137 				b = inst.operate_lit_format.literal;
1138 			} else {
1139 				if (inst.operate_reg_format.sbz != 0)
1140 					goto sigill;
1141 				regptr = irp(l, inst.operate_reg_format.rb);
1142 				b = (regptr != NULL) ? *regptr : 0;
1143 			}
1144 
1145 			regptr = irp(l, inst.operate_generic_format.rc);
1146 			if (regptr != NULL)
1147 				*regptr = b;
1148 			break;
1149 		}
1150 		if (inst.operate_generic_format.function == op_sextw &&
1151 		    inst.operate_generic_format.ra == 31) {
1152 			int16_t w;
1153 
1154 			if (inst.operate_generic_format.is_lit) {
1155 				w = inst.operate_lit_format.literal;
1156 			} else {
1157 				if (inst.operate_reg_format.sbz != 0)
1158 					goto sigill;
1159 				regptr = irp(l, inst.operate_reg_format.rb);
1160 				w = (regptr != NULL) ? *regptr : 0;
1161 			}
1162 
1163 			regptr = irp(l, inst.operate_generic_format.rc);
1164 			if (regptr != NULL)
1165 				*regptr = w;
1166 			break;
1167 		}
1168 		goto sigill;
1169 
1170 	default:
1171 		goto sigill;
1172 	}
1173 
1174 	/*
1175 	 * Write back USP.  Note that in the error cases below,
1176 	 * nothing will have been successfully modified so we don't
1177 	 * have to write it out.
1178 	 */
1179 	alpha_pal_wrusp(l->l_md.md_tf->tf_regs[FRAME_SP]);
1180 
1181 	return (0);
1182 
1183 sigill:
1184 	*ucodep = ALPHA_IF_CODE_OPDEC;			/* trap type */
1185 	return (SIGILL);
1186 
1187 sigsegv:
1188 	sig = SIGSEGV;
1189 	l->l_md.md_tf->tf_regs[FRAME_PC] = inst_pc;	/* re-run instr. */
1190 unaligned_fixup_sig:
1191 	*ucodep = memaddr;				/* faulting address */
1192 	return (sig);
1193 }
1194 
1195 /* map alpha fp flags to ksiginfo fp codes */
1196 static int
1197 alpha_ucode_to_ksiginfo(u_long ucode)
1198 {
1199 	long i;
1200 
1201 	static const int alpha_ksiginfo_table[] = { FPE_FLTINV,
1202 					     FPE_FLTDIV,
1203 					     FPE_FLTOVF,
1204 					     FPE_FLTUND,
1205 					     FPE_FLTRES,
1206 					     FPE_INTOVF };
1207 
1208 	for(i=0;i < sizeof(alpha_ksiginfo_table)/sizeof(int); i++) {
1209 		if (ucode & (1 << i))
1210 			return (alpha_ksiginfo_table[i]);
1211 	}
1212 	/* punt if the flags weren't set */
1213 	return (0);
1214 }
1215 
1216 /*
1217  * Start a new LWP
1218  */
1219 void
1220 startlwp(void *arg)
1221 {
1222 	ucontext_t *uc = arg;
1223 	lwp_t *l = curlwp;
1224 	int error;
1225 
1226 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
1227 	KASSERT(error == 0);
1228 
1229 	kmem_free(uc, sizeof(ucontext_t));
1230 	userret(l);
1231 }
1232 
1233 /*
1234  * XXX This is a terrible name.
1235  */
1236 void
1237 upcallret(struct lwp *l)
1238 {
1239 	KERNEL_UNLOCK_LAST(l);
1240 
1241 	userret(l);
1242 }
1243