1 /* $NetBSD: trap.c,v 1.77 2001/07/18 22:22:02 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, by Charles M. Hannum, and by Ross Harvey. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1999 Christopher G. Demetriou. All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by Christopher G. Demetriou 54 * for the NetBSD Project. 55 * 4. The name of the author may not be used to endorse or promote products 56 * derived from this software without specific prior written permission 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 68 */ 69 70 /* 71 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 72 * All rights reserved. 73 * 74 * Author: Chris G. Demetriou 75 * 76 * Permission to use, copy, modify and distribute this software and 77 * its documentation is hereby granted, provided that both the copyright 78 * notice and this permission notice appear in all copies of the 79 * software, derivative works or modified versions, and any portions 80 * thereof, and that both notices appear in supporting documentation. 81 * 82 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 83 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 84 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 85 * 86 * Carnegie Mellon requests users of this software to return to 87 * 88 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 89 * School of Computer Science 90 * Carnegie Mellon University 91 * Pittsburgh PA 15213-3890 92 * 93 * any improvements or extensions that they make and grant Carnegie the 94 * rights to redistribute these changes. 95 */ 96 97 #include "opt_fix_unaligned_vax_fp.h" 98 #include "opt_ddb.h" 99 100 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 101 102 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.77 2001/07/18 22:22:02 thorpej Exp $"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <uvm/uvm_extern.h> 107 #include <sys/proc.h> 108 #include <sys/user.h> 109 #include <sys/syscall.h> 110 #include <sys/buf.h> 111 112 #include <uvm/uvm_extern.h> 113 114 #include <machine/cpu.h> 115 #include <machine/reg.h> 116 #include <machine/alpha.h> 117 #include <machine/rpb.h> 118 #ifdef DDB 119 #include <machine/db_machdep.h> 120 #endif 121 #include <alpha/alpha/db_instruction.h> 122 #include <machine/userret.h> 123 124 static int unaligned_fixup(u_long, u_long, u_long, struct proc *); 125 static int handle_opdec(struct proc *p, u_int64_t *ucodep); 126 127 struct evcnt fpevent_use; 128 struct evcnt fpevent_reuse; 129 130 /* 131 * Initialize the trap vectors for the current processor. 132 */ 133 void 134 trap_init(void) 135 { 136 137 /* 138 * Point interrupt/exception vectors to our own. 139 */ 140 alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT); 141 alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH); 142 alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM); 143 alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF); 144 alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA); 145 alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS); 146 147 /* 148 * Clear pending machine checks and error reports, and enable 149 * system- and processor-correctable error reporting. 150 */ 151 alpha_pal_wrmces(alpha_pal_rdmces() & 152 ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC)); 153 154 /* 155 * If this is the primary processor, initialize some trap 156 * event counters. 157 */ 158 if (cpu_number() == hwrpb->rpb_primary_cpu_id) { 159 evcnt_attach_dynamic(&fpevent_use, EVCNT_TYPE_MISC, NULL, 160 "FP", "proc use"); 161 evcnt_attach_dynamic(&fpevent_reuse, EVCNT_TYPE_MISC, NULL, 162 "FP", "proc re-use"); 163 } 164 } 165 166 static void 167 printtrap(const u_long a0, const u_long a1, const u_long a2, 168 const u_long entry, struct trapframe *framep, int isfatal, int user) 169 { 170 char ubuf[64]; 171 const char *entryname; 172 u_long cpu_id = cpu_number(); 173 174 switch (entry) { 175 case ALPHA_KENTRY_INT: 176 entryname = "interrupt"; 177 break; 178 case ALPHA_KENTRY_ARITH: 179 entryname = "arithmetic trap"; 180 break; 181 case ALPHA_KENTRY_MM: 182 entryname = "memory management fault"; 183 break; 184 case ALPHA_KENTRY_IF: 185 entryname = "instruction fault"; 186 break; 187 case ALPHA_KENTRY_UNA: 188 entryname = "unaligned access fault"; 189 break; 190 case ALPHA_KENTRY_SYS: 191 entryname = "system call"; 192 break; 193 default: 194 sprintf(ubuf, "type %lx", entry); 195 entryname = (const char *) ubuf; 196 break; 197 } 198 199 printf("\n"); 200 printf("CPU %lu: %s %s trap:\n", cpu_id, isfatal ? "fatal" : "handled", 201 user ? "user" : "kernel"); 202 printf("\n"); 203 printf("CPU %lu trap entry = 0x%lx (%s)\n", cpu_id, entry, 204 entryname); 205 printf("CPU %lu a0 = 0x%lx\n", cpu_id, a0); 206 printf("CPU %lu a1 = 0x%lx\n", cpu_id, a1); 207 printf("CPU %lu a2 = 0x%lx\n", cpu_id, a2); 208 printf("CPU %lu pc = 0x%lx\n", cpu_id, 209 framep->tf_regs[FRAME_PC]); 210 printf("CPU %lu ra = 0x%lx\n", cpu_id, 211 framep->tf_regs[FRAME_RA]); 212 printf("CPU %lu pv = 0x%lx\n", cpu_id, 213 framep->tf_regs[FRAME_T12]); 214 printf("CPU %lu curproc = %p\n", cpu_id, curproc); 215 if (curproc != NULL) 216 printf("CPU %lu pid = %d, comm = %s\n", cpu_id, 217 curproc->p_pid, curproc->p_comm); 218 printf("\n"); 219 } 220 221 /* 222 * Trap is called from locore to handle most types of processor traps. 223 * System calls are broken out for efficiency and ASTs are broken out 224 * to make the code a bit cleaner and more representative of the 225 * Alpha architecture. 226 */ 227 /*ARGSUSED*/ 228 void 229 trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry, 230 struct trapframe *framep) 231 { 232 register struct proc *p; 233 register int i; 234 u_int64_t ucode; 235 int user; 236 #if defined(DDB) 237 int call_debugger = 1; 238 #endif 239 240 p = curproc; 241 242 uvmexp.traps++; /* XXXSMP: NOT ATOMIC */ 243 ucode = 0; 244 user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0; 245 if (user) 246 p->p_md.md_tf = framep; 247 248 switch (entry) { 249 case ALPHA_KENTRY_UNA: 250 /* 251 * If user-land, do whatever fixups, printing, and 252 * signalling is appropriate (based on system-wide 253 * and per-process unaligned-access-handling flags). 254 */ 255 if (user) { 256 KERNEL_PROC_LOCK(p); 257 i = unaligned_fixup(a0, a1, a2, p); 258 KERNEL_PROC_UNLOCK(p); 259 if (i == 0) 260 goto out; 261 262 ucode = a0; /* VA */ 263 break; 264 } 265 266 /* 267 * Unaligned access from kernel mode is always an error, 268 * EVEN IF A COPY FAULT HANDLER IS SET! 269 * 270 * It's an error if a copy fault handler is set because 271 * the various routines which do user-initiated copies 272 * do so in a memcpy-like manner. In other words, the 273 * kernel never assumes that pointers provided by the 274 * user are properly aligned, and so if the kernel 275 * does cause an unaligned access it's a kernel bug. 276 */ 277 goto dopanic; 278 279 case ALPHA_KENTRY_ARITH: 280 /* 281 * Resolve trap shadows, interpret FP ops requiring infinities, 282 * NaNs, or denorms, and maintain FPCR corrections. 283 */ 284 if (user) { 285 i = alpha_fp_complete(a0, a1, p, &ucode); 286 if (i == 0) 287 goto out; 288 break; 289 } 290 291 /* Always fatal in kernel. Should never happen. */ 292 goto dopanic; 293 294 case ALPHA_KENTRY_IF: 295 /* 296 * These are always fatal in kernel, and should never 297 * happen. (Debugger entry is handled in XentIF.) 298 */ 299 if (user == 0) { 300 #if defined(DDB) 301 /* 302 * ...unless a debugger is configured. It will 303 * inform us if the trap was handled. 304 */ 305 if (alpha_debug(a0, a1, a2, entry, framep)) 306 goto out; 307 308 /* 309 * Debugger did NOT handle the trap, don't 310 * call the debugger again! 311 */ 312 call_debugger = 0; 313 #endif 314 goto dopanic; 315 } 316 i = 0; 317 switch (a0) { 318 case ALPHA_IF_CODE_GENTRAP: 319 if (framep->tf_regs[FRAME_A0] == -2) { /* weird! */ 320 i = SIGFPE; 321 ucode = a0; /* exception summary */ 322 break; 323 } 324 /* FALLTHROUTH */ 325 case ALPHA_IF_CODE_BPT: 326 case ALPHA_IF_CODE_BUGCHK: 327 ucode = a0; /* trap type */ 328 i = SIGTRAP; 329 break; 330 331 case ALPHA_IF_CODE_OPDEC: 332 KERNEL_PROC_LOCK(p); 333 i = handle_opdec(p, &ucode); 334 KERNEL_PROC_UNLOCK(p); 335 if (i == 0) 336 goto out; 337 break; 338 339 case ALPHA_IF_CODE_FEN: 340 alpha_enable_fp(p, 0); 341 alpha_pal_wrfen(0); 342 goto out; 343 344 default: 345 printf("trap: unknown IF type 0x%lx\n", a0); 346 goto dopanic; 347 } 348 break; 349 350 case ALPHA_KENTRY_MM: 351 switch (a1) { 352 case ALPHA_MMCSR_FOR: 353 case ALPHA_MMCSR_FOE: 354 case ALPHA_MMCSR_FOW: 355 if (user) 356 KERNEL_PROC_LOCK(p); 357 else 358 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 359 360 pmap_emulate_reference(p, a0, user, 361 a1 == ALPHA_MMCSR_FOW ? 1 : 0); 362 363 if (user) 364 KERNEL_PROC_UNLOCK(p); 365 else 366 KERNEL_UNLOCK(); 367 goto out; 368 369 case ALPHA_MMCSR_INVALTRANS: 370 case ALPHA_MMCSR_ACCESS: 371 { 372 register vaddr_t va; 373 register struct vmspace *vm = NULL; 374 register struct vm_map *map; 375 vm_prot_t ftype; 376 int rv; 377 378 if (user) 379 KERNEL_PROC_LOCK(p); 380 else { 381 struct cpu_info *ci = curcpu(); 382 383 if (p == NULL) { 384 /* 385 * If there is no current process, 386 * it can be nothing but a fatal 387 * error (i.e. memory in this case 388 * must be wired). 389 */ 390 goto dopanic; 391 } 392 393 /* 394 * If it was caused by fuswintr or suswintr, 395 * just punt. Note that we check the faulting 396 * address against the address accessed by 397 * [fs]uswintr, in case another fault happens 398 * when they are running. 399 */ 400 if (p->p_addr->u_pcb.pcb_onfault == 401 (unsigned long)fswintrberr && 402 p->p_addr->u_pcb.pcb_accessaddr == a0) { 403 framep->tf_regs[FRAME_PC] = 404 p->p_addr->u_pcb.pcb_onfault; 405 p->p_addr->u_pcb.pcb_onfault = 0; 406 goto out; 407 } 408 409 /* 410 * If we're in interrupt context at this 411 * point, this is an error. 412 */ 413 if (ci->ci_intrdepth != 0) 414 goto dopanic; 415 416 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE); 417 } 418 419 /* 420 * It is only a kernel address space fault iff: 421 * 1. !user and 422 * 2. pcb_onfault not set or 423 * 3. pcb_onfault set but kernel space data fault 424 * The last can occur during an exec() copyin where the 425 * argument space is lazy-allocated. 426 */ 427 if (user == 0 && (a0 >= VM_MIN_KERNEL_ADDRESS || 428 p->p_addr->u_pcb.pcb_onfault == 0)) 429 map = kernel_map; 430 else { 431 vm = p->p_vmspace; 432 map = &vm->vm_map; 433 } 434 435 switch (a2) { 436 case -1: /* instruction fetch fault */ 437 case 0: /* load instruction */ 438 ftype = VM_PROT_READ; 439 break; 440 case 1: /* store instruction */ 441 ftype = VM_PROT_WRITE; 442 break; 443 #ifdef DIAGNOSTIC 444 default: /* XXX gcc -Wuninitialized */ 445 if (user) 446 KERNEL_PROC_UNLOCK(p); 447 else 448 KERNEL_UNLOCK(); 449 goto dopanic; 450 #endif 451 } 452 453 va = trunc_page((vaddr_t)a0); 454 rv = uvm_fault(map, va, 455 (a1 == ALPHA_MMCSR_INVALTRANS) ? 456 VM_FAULT_INVALID : VM_FAULT_PROTECT, ftype); 457 /* 458 * If this was a stack access we keep track of the 459 * maximum accessed stack size. Also, if vm_fault 460 * gets a protection failure it is due to accessing 461 * the stack region outside the current limit and 462 * we need to reflect that as an access error. 463 */ 464 if (map != kernel_map && 465 (caddr_t)va >= vm->vm_maxsaddr && 466 va < USRSTACK) { 467 if (rv == 0) { 468 unsigned nss; 469 470 nss = btoc(USRSTACK - 471 (unsigned long)va); 472 if (nss > vm->vm_ssize) 473 vm->vm_ssize = nss; 474 } else if (rv == EACCES) 475 rv = EFAULT; 476 } 477 if (rv == 0) { 478 if (user) 479 KERNEL_PROC_UNLOCK(p); 480 else 481 KERNEL_UNLOCK(); 482 goto out; 483 } 484 485 if (user == 0) { 486 KERNEL_UNLOCK(); 487 488 /* Check for copyin/copyout fault */ 489 if (p != NULL && 490 p->p_addr->u_pcb.pcb_onfault != 0) { 491 framep->tf_regs[FRAME_PC] = 492 p->p_addr->u_pcb.pcb_onfault; 493 p->p_addr->u_pcb.pcb_onfault = 0; 494 goto out; 495 } 496 goto dopanic; 497 } 498 ucode = a0; 499 if (rv == ENOMEM) { 500 printf("UVM: pid %d (%s), uid %d killed: " 501 "out of swap\n", p->p_pid, p->p_comm, 502 p->p_cred && p->p_ucred ? 503 p->p_ucred->cr_uid : -1); 504 i = SIGKILL; 505 } else 506 i = SIGSEGV; 507 KERNEL_PROC_UNLOCK(p); 508 break; 509 } 510 511 default: 512 printf("trap: unknown MMCSR value 0x%lx\n", a1); 513 goto dopanic; 514 } 515 break; 516 517 default: 518 goto dopanic; 519 } 520 521 #ifdef DEBUG 522 printtrap(a0, a1, a2, entry, framep, 1, user); 523 #endif 524 KERNEL_PROC_LOCK(p); 525 trapsignal(p, i, ucode); 526 KERNEL_PROC_UNLOCK(p); 527 out: 528 if (user) 529 userret(p); 530 return; 531 532 dopanic: 533 printtrap(a0, a1, a2, entry, framep, 1, user); 534 535 /* XXX dump registers */ 536 537 #if defined(DDB) 538 if (call_debugger && alpha_debug(a0, a1, a2, entry, framep)) { 539 /* 540 * The debugger has handled the trap; just return. 541 */ 542 goto out; 543 } 544 #endif 545 546 panic("trap"); 547 } 548 549 /* 550 * Set the float-point enable for the current process, and return 551 * the FPU context to the named process. If check == 0, it is an 552 * error for the named process to already be fpcurproc. 553 */ 554 void 555 alpha_enable_fp(struct proc *p, int check) 556 { 557 #if defined(MULTIPROCESSOR) 558 int s; 559 #endif 560 struct cpu_info *ci = curcpu(); 561 562 if (check && ci->ci_fpcurproc == p) { 563 alpha_pal_wrfen(1); 564 return; 565 } 566 if (ci->ci_fpcurproc == p) 567 panic("trap: fp disabled for fpcurproc == %p", p); 568 569 if (ci->ci_fpcurproc != NULL) 570 fpusave_cpu(ci, 1); 571 572 KDASSERT(ci->ci_fpcurproc == NULL); 573 574 #if defined(MULTIPROCESSOR) 575 if (p->p_addr->u_pcb.pcb_fpcpu != NULL) 576 fpusave_proc(p, 1); 577 #else 578 KDASSERT(p->p_addr->u_pcb.pcb_fpcpu == NULL); 579 #endif 580 581 FPCPU_LOCK(&p->p_addr->u_pcb, s); 582 583 p->p_addr->u_pcb.pcb_fpcpu = ci; 584 ci->ci_fpcurproc = p; 585 586 FPCPU_UNLOCK(&p->p_addr->u_pcb, s); 587 588 /* 589 * Instrument FP usage -- if a process had not previously 590 * used FP, mark it as having used FP for the first time, 591 * and count this event. 592 * 593 * If a process has used FP, count a "used FP, and took 594 * a trap to use it again" event. 595 */ 596 if ((p->p_md.md_flags & MDP_FPUSED) == 0) { 597 atomic_add_ulong(&fpevent_use.ev_count, 1); 598 p->p_md.md_flags |= MDP_FPUSED; 599 } else 600 atomic_add_ulong(&fpevent_reuse.ev_count, 1); 601 602 alpha_pal_wrfen(1); 603 restorefpstate(&p->p_addr->u_pcb.pcb_fp); 604 } 605 606 /* 607 * Process an asynchronous software trap. 608 * This is relatively easy. 609 */ 610 void 611 ast(struct trapframe *framep) 612 { 613 register struct proc *p; 614 615 /* 616 * We may not have a current process to do AST processing 617 * on. This happens on multiprocessor systems in which 618 * at least one CPU simply has no current process to run, 619 * but roundrobin() (called via hardclock()) kicks us to 620 * attempt to preempt the process running on our CPU. 621 */ 622 p = curproc; 623 if (p == NULL) 624 return; 625 626 KERNEL_PROC_LOCK(p); 627 628 uvmexp.softs++; 629 p->p_md.md_tf = framep; 630 631 if (p->p_flag & P_OWEUPC) { 632 p->p_flag &= ~P_OWEUPC; 633 ADDUPROF(p); 634 } 635 636 if (curcpu()->ci_want_resched) { 637 /* 638 * We are being preempted. 639 */ 640 preempt(NULL); 641 } 642 643 KERNEL_PROC_UNLOCK(p); 644 userret(p); 645 } 646 647 /* 648 * Unaligned access handler. It's not clear that this can get much slower... 649 * 650 */ 651 static const int reg_to_framereg[32] = { 652 FRAME_V0, FRAME_T0, FRAME_T1, FRAME_T2, 653 FRAME_T3, FRAME_T4, FRAME_T5, FRAME_T6, 654 FRAME_T7, FRAME_S0, FRAME_S1, FRAME_S2, 655 FRAME_S3, FRAME_S4, FRAME_S5, FRAME_S6, 656 FRAME_A0, FRAME_A1, FRAME_A2, FRAME_A3, 657 FRAME_A4, FRAME_A5, FRAME_T8, FRAME_T9, 658 FRAME_T10, FRAME_T11, FRAME_RA, FRAME_T12, 659 FRAME_AT, FRAME_GP, FRAME_SP, -1, 660 }; 661 662 #define irp(p, reg) \ 663 ((reg_to_framereg[(reg)] == -1) ? NULL : \ 664 &(p)->p_md.md_tf->tf_regs[reg_to_framereg[(reg)]]) 665 666 #define frp(p, reg) \ 667 (&(p)->p_addr->u_pcb.pcb_fp.fpr_regs[(reg)]) 668 669 #define dump_fp_regs() \ 670 if (p->p_addr->u_pcb.pcb_fpcpu != NULL) \ 671 fpusave_proc(p, 1) 672 673 #define unaligned_load(storage, ptrf, mod) \ 674 if (copyin((caddr_t)va, &(storage), sizeof (storage)) != 0) \ 675 break; \ 676 signal = 0; \ 677 if ((regptr = ptrf(p, reg)) != NULL) \ 678 *regptr = mod (storage); 679 680 #define unaligned_store(storage, ptrf, mod) \ 681 if ((regptr = ptrf(p, reg)) != NULL) \ 682 (storage) = mod (*regptr); \ 683 else \ 684 (storage) = 0; \ 685 if (copyout(&(storage), (caddr_t)va, sizeof (storage)) != 0) \ 686 break; \ 687 signal = 0; 688 689 #define unaligned_load_integer(storage) \ 690 unaligned_load(storage, irp, ) 691 692 #define unaligned_store_integer(storage) \ 693 unaligned_store(storage, irp, ) 694 695 #define unaligned_load_floating(storage, mod) \ 696 dump_fp_regs(); \ 697 unaligned_load(storage, frp, mod) 698 699 #define unaligned_store_floating(storage, mod) \ 700 dump_fp_regs(); \ 701 unaligned_store(storage, frp, mod) 702 703 static unsigned long 704 Sfloat_to_reg(u_int s) 705 { 706 unsigned long sign, expn, frac; 707 unsigned long result; 708 709 sign = (s & 0x80000000) >> 31; 710 expn = (s & 0x7f800000) >> 23; 711 frac = (s & 0x007fffff) >> 0; 712 713 /* map exponent part, as appropriate. */ 714 if (expn == 0xff) 715 expn = 0x7ff; 716 else if ((expn & 0x80) != 0) 717 expn = (0x400 | (expn & ~0x80)); 718 else if ((expn & 0x80) == 0 && expn != 0) 719 expn = (0x380 | (expn & ~0x80)); 720 721 result = (sign << 63) | (expn << 52) | (frac << 29); 722 return (result); 723 } 724 725 static unsigned int 726 reg_to_Sfloat(u_long r) 727 { 728 unsigned long sign, expn, frac; 729 unsigned int result; 730 731 sign = (r & 0x8000000000000000) >> 63; 732 expn = (r & 0x7ff0000000000000) >> 52; 733 frac = (r & 0x000fffffe0000000) >> 29; 734 735 /* map exponent part, as appropriate. */ 736 expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00); 737 738 result = (sign << 31) | (expn << 23) | (frac << 0); 739 return (result); 740 } 741 742 /* 743 * Conversion of T floating datums to and from register format 744 * requires no bit reordering whatsoever. 745 */ 746 static unsigned long 747 Tfloat_reg_cvt(u_long input) 748 { 749 750 return (input); 751 } 752 753 #ifdef FIX_UNALIGNED_VAX_FP 754 static unsigned long 755 Ffloat_to_reg(u_int f) 756 { 757 unsigned long sign, expn, frlo, frhi; 758 unsigned long result; 759 760 sign = (f & 0x00008000) >> 15; 761 expn = (f & 0x00007f80) >> 7; 762 frhi = (f & 0x0000007f) >> 0; 763 frlo = (f & 0xffff0000) >> 16; 764 765 /* map exponent part, as appropriate. */ 766 if ((expn & 0x80) != 0) 767 expn = (0x400 | (expn & ~0x80)); 768 else if ((expn & 0x80) == 0 && expn != 0) 769 expn = (0x380 | (expn & ~0x80)); 770 771 result = (sign << 63) | (expn << 52) | (frhi << 45) | (frlo << 29); 772 return (result); 773 } 774 775 static unsigned int 776 reg_to_Ffloat(u_long r) 777 { 778 unsigned long sign, expn, frhi, frlo; 779 unsigned int result; 780 781 sign = (r & 0x8000000000000000) >> 63; 782 expn = (r & 0x7ff0000000000000) >> 52; 783 frhi = (r & 0x000fe00000000000) >> 45; 784 frlo = (r & 0x00001fffe0000000) >> 29; 785 786 /* map exponent part, as appropriate. */ 787 expn = (expn & 0x7f) | ((expn & 0x400) != 0 ? 0x80 : 0x00); 788 789 result = (sign << 15) | (expn << 7) | (frhi << 0) | (frlo << 16); 790 return (result); 791 } 792 793 /* 794 * Conversion of G floating datums to and from register format is 795 * symmetrical. Just swap shorts in the quad... 796 */ 797 static unsigned long 798 Gfloat_reg_cvt(u_long input) 799 { 800 unsigned long a, b, c, d; 801 unsigned long result; 802 803 a = (input & 0x000000000000ffff) >> 0; 804 b = (input & 0x00000000ffff0000) >> 16; 805 c = (input & 0x0000ffff00000000) >> 32; 806 d = (input & 0xffff000000000000) >> 48; 807 808 result = (a << 48) | (b << 32) | (c << 16) | (d << 0); 809 return (result); 810 } 811 #endif /* FIX_UNALIGNED_VAX_FP */ 812 813 struct unaligned_fixup_data { 814 const char *type; /* opcode name */ 815 int fixable; /* fixable, 0 if fixup not supported */ 816 int size; /* size, 0 if unknown */ 817 int acc; /* useracc type; B_READ or B_WRITE */ 818 }; 819 820 #define UNKNOWN() { "0x%lx", 0, 0, 0 } 821 #define FIX_LD(n,s) { n, 1, s, B_READ } 822 #define FIX_ST(n,s) { n, 1, s, B_WRITE } 823 #define NOFIX_LD(n,s) { n, 0, s, B_READ } 824 #define NOFIX_ST(n,s) { n, 0, s, B_WRITE } 825 826 int 827 unaligned_fixup(u_long va, u_long opcode, u_long reg, struct proc *p) 828 { 829 const struct unaligned_fixup_data tab_unknown[1] = { 830 UNKNOWN(), 831 }; 832 const struct unaligned_fixup_data tab_0c[0x02] = { 833 FIX_LD("ldwu", 2), FIX_ST("stw", 2), 834 }; 835 const struct unaligned_fixup_data tab_20[0x10] = { 836 #ifdef FIX_UNALIGNED_VAX_FP 837 FIX_LD("ldf", 4), FIX_LD("ldg", 8), 838 #else 839 NOFIX_LD("ldf", 4), NOFIX_LD("ldg", 8), 840 #endif 841 FIX_LD("lds", 4), FIX_LD("ldt", 8), 842 #ifdef FIX_UNALIGNED_VAX_FP 843 FIX_ST("stf", 4), FIX_ST("stg", 8), 844 #else 845 NOFIX_ST("stf", 4), NOFIX_ST("stg", 8), 846 #endif 847 FIX_ST("sts", 4), FIX_ST("stt", 8), 848 FIX_LD("ldl", 4), FIX_LD("ldq", 8), 849 NOFIX_LD("ldl_c", 4), NOFIX_LD("ldq_c", 8), 850 FIX_ST("stl", 4), FIX_ST("stq", 8), 851 NOFIX_ST("stl_c", 4), NOFIX_ST("stq_c", 8), 852 }; 853 const struct unaligned_fixup_data *selected_tab; 854 int doprint, dofix, dosigbus, signal; 855 unsigned long *regptr, longdata; 856 int intdata; /* signed to get extension when storing */ 857 u_int16_t worddata; /* unsigned to _avoid_ extension */ 858 859 /* 860 * Read USP into frame in case it's the register to be modified. 861 * This keeps us from having to check for it in lots of places 862 * later. 863 */ 864 p->p_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp(); 865 866 /* 867 * Figure out what actions to take. 868 * 869 * XXX In the future, this should have a per-process component 870 * as well. 871 */ 872 doprint = alpha_unaligned_print; 873 dofix = alpha_unaligned_fix; 874 dosigbus = alpha_unaligned_sigbus; 875 876 /* 877 * Find out which opcode it is. Arrange to have the opcode 878 * printed if it's an unknown opcode. 879 */ 880 if (opcode >= 0x0c && opcode <= 0x0d) 881 selected_tab = &tab_0c[opcode - 0x0c]; 882 else if (opcode >= 0x20 && opcode <= 0x2f) 883 selected_tab = &tab_20[opcode - 0x20]; 884 else 885 selected_tab = tab_unknown; 886 887 /* 888 * See if the user can access the memory in question. 889 * If it's an unknown opcode, we don't know whether to 890 * read or write, so we don't check. 891 * 892 * We adjust the PC backwards so that the instruction will 893 * be re-run. 894 */ 895 if (selected_tab->size != 0 && 896 !uvm_useracc((caddr_t)va, selected_tab->size, selected_tab->acc)) { 897 p->p_md.md_tf->tf_regs[FRAME_PC] -= 4; 898 signal = SIGSEGV; 899 goto out; 900 } 901 902 /* 903 * If we're supposed to be noisy, squawk now. 904 */ 905 if (doprint) { 906 uprintf( 907 "pid %d (%s): unaligned access: " 908 "va=0x%lx pc=0x%lx ra=0x%lx sp=0x%lx op=", 909 p->p_pid, p->p_comm, va, 910 p->p_md.md_tf->tf_regs[FRAME_PC] - 4, 911 p->p_md.md_tf->tf_regs[FRAME_RA], 912 p->p_md.md_tf->tf_regs[FRAME_SP]); 913 uprintf(selected_tab->type,opcode); 914 uprintf("\n"); 915 } 916 917 /* 918 * If we should try to fix it and know how, give it a shot. 919 * 920 * We never allow bad data to be unknowingly used by the 921 * user process. That is, if we decide not to fix up an 922 * access we cause a SIGBUS rather than letting the user 923 * process go on without warning. 924 * 925 * If we're trying to do a fixup, we assume that things 926 * will be botched. If everything works out OK, 927 * unaligned_{load,store}_* clears the signal flag. 928 */ 929 signal = SIGBUS; 930 if (dofix && selected_tab->fixable) { 931 switch (opcode) { 932 case 0x0c: /* ldwu */ 933 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 934 unaligned_load_integer(worddata); 935 break; 936 937 case 0x0d: /* stw */ 938 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 939 unaligned_store_integer(worddata); 940 break; 941 942 #ifdef FIX_UNALIGNED_VAX_FP 943 case 0x20: /* ldf */ 944 unaligned_load_floating(intdata, Ffloat_to_reg); 945 break; 946 947 case 0x21: /* ldg */ 948 unaligned_load_floating(longdata, Gfloat_reg_cvt); 949 break; 950 #endif 951 952 case 0x22: /* lds */ 953 unaligned_load_floating(intdata, Sfloat_to_reg); 954 break; 955 956 case 0x23: /* ldt */ 957 unaligned_load_floating(longdata, Tfloat_reg_cvt); 958 break; 959 960 #ifdef FIX_UNALIGNED_VAX_FP 961 case 0x24: /* stf */ 962 unaligned_store_floating(intdata, reg_to_Ffloat); 963 break; 964 965 case 0x25: /* stg */ 966 unaligned_store_floating(longdata, Gfloat_reg_cvt); 967 break; 968 #endif 969 970 case 0x26: /* sts */ 971 unaligned_store_floating(intdata, reg_to_Sfloat); 972 break; 973 974 case 0x27: /* stt */ 975 unaligned_store_floating(longdata, Tfloat_reg_cvt); 976 break; 977 978 case 0x28: /* ldl */ 979 unaligned_load_integer(intdata); 980 break; 981 982 case 0x29: /* ldq */ 983 unaligned_load_integer(longdata); 984 break; 985 986 case 0x2c: /* stl */ 987 unaligned_store_integer(intdata); 988 break; 989 990 case 0x2d: /* stq */ 991 unaligned_store_integer(longdata); 992 break; 993 994 #ifdef DIAGNOSTIC 995 default: 996 panic("unaligned_fixup: can't get here"); 997 #endif 998 } 999 } 1000 1001 /* 1002 * Force SIGBUS if requested. 1003 */ 1004 if (dosigbus) 1005 signal = SIGBUS; 1006 1007 out: 1008 /* 1009 * Write back USP. 1010 */ 1011 alpha_pal_wrusp(p->p_md.md_tf->tf_regs[FRAME_SP]); 1012 1013 return (signal); 1014 } 1015 1016 /* 1017 * Reserved/unimplemented instruction (opDec fault) handler 1018 * 1019 * Argument is the process that caused it. No useful information 1020 * is passed to the trap handler other than the fault type. The 1021 * address of the instruction that caused the fault is 4 less than 1022 * the PC stored in the trap frame. 1023 * 1024 * If the instruction is emulated successfully, this function returns 0. 1025 * Otherwise, this function returns the signal to deliver to the process, 1026 * and fills in *ucodep with the code to be delivered. 1027 */ 1028 int 1029 handle_opdec(struct proc *p, u_int64_t *ucodep) 1030 { 1031 alpha_instruction inst; 1032 register_t *regptr, memaddr; 1033 u_int64_t inst_pc; 1034 int sig; 1035 1036 /* 1037 * Read USP into frame in case it's going to be used or modified. 1038 * This keeps us from having to check for it in lots of places 1039 * later. 1040 */ 1041 p->p_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp(); 1042 1043 inst_pc = memaddr = p->p_md.md_tf->tf_regs[FRAME_PC] - 4; 1044 if (copyin((caddr_t)inst_pc, &inst, sizeof (inst)) != 0) { 1045 /* 1046 * really, this should never happen, but in case it 1047 * does we handle it. 1048 */ 1049 printf("WARNING: handle_opdec() couldn't fetch instruction\n"); 1050 goto sigsegv; 1051 } 1052 1053 switch (inst.generic_format.opcode) { 1054 case op_ldbu: 1055 case op_ldwu: 1056 case op_stw: 1057 case op_stb: 1058 regptr = irp(p, inst.mem_format.rb); 1059 if (regptr != NULL) 1060 memaddr = *regptr; 1061 else 1062 memaddr = 0; 1063 memaddr += inst.mem_format.displacement; 1064 1065 regptr = irp(p, inst.mem_format.ra); 1066 1067 if (inst.mem_format.opcode == op_ldwu || 1068 inst.mem_format.opcode == op_stw) { 1069 if (memaddr & 0x01) { 1070 sig = unaligned_fixup(memaddr, 1071 inst.mem_format.opcode, 1072 inst.mem_format.ra, p); 1073 if (sig) 1074 goto unaligned_fixup_sig; 1075 break; 1076 } 1077 } 1078 1079 if (inst.mem_format.opcode == op_ldbu) { 1080 u_int8_t b; 1081 1082 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 1083 if (copyin((caddr_t)memaddr, &b, sizeof (b)) != 0) 1084 goto sigsegv; 1085 if (regptr != NULL) 1086 *regptr = b; 1087 } else if (inst.mem_format.opcode == op_ldwu) { 1088 u_int16_t w; 1089 1090 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 1091 if (copyin((caddr_t)memaddr, &w, sizeof (w)) != 0) 1092 goto sigsegv; 1093 if (regptr != NULL) 1094 *regptr = w; 1095 } else if (inst.mem_format.opcode == op_stw) { 1096 u_int16_t w; 1097 1098 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 1099 w = (regptr != NULL) ? *regptr : 0; 1100 if (copyout(&w, (caddr_t)memaddr, sizeof (w)) != 0) 1101 goto sigsegv; 1102 } else if (inst.mem_format.opcode == op_stb) { 1103 u_int8_t b; 1104 1105 /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */ 1106 b = (regptr != NULL) ? *regptr : 0; 1107 if (copyout(&b, (caddr_t)memaddr, sizeof (b)) != 0) 1108 goto sigsegv; 1109 } 1110 break; 1111 1112 case op_intmisc: 1113 if (inst.operate_generic_format.function == op_sextb && 1114 inst.operate_generic_format.ra == 31) { 1115 int8_t b; 1116 1117 if (inst.operate_generic_format.is_lit) { 1118 b = inst.operate_lit_format.literal; 1119 } else { 1120 if (inst.operate_reg_format.sbz != 0) 1121 goto sigill; 1122 regptr = irp(p, inst.operate_reg_format.rb); 1123 b = (regptr != NULL) ? *regptr : 0; 1124 } 1125 1126 regptr = irp(p, inst.operate_generic_format.rc); 1127 if (regptr != NULL) 1128 *regptr = b; 1129 break; 1130 } 1131 if (inst.operate_generic_format.function == op_sextw && 1132 inst.operate_generic_format.ra == 31) { 1133 int16_t w; 1134 1135 if (inst.operate_generic_format.is_lit) { 1136 w = inst.operate_lit_format.literal; 1137 } else { 1138 if (inst.operate_reg_format.sbz != 0) 1139 goto sigill; 1140 regptr = irp(p, inst.operate_reg_format.rb); 1141 w = (regptr != NULL) ? *regptr : 0; 1142 } 1143 1144 regptr = irp(p, inst.operate_generic_format.rc); 1145 if (regptr != NULL) 1146 *regptr = w; 1147 break; 1148 } 1149 goto sigill; 1150 1151 default: 1152 goto sigill; 1153 } 1154 1155 /* 1156 * Write back USP. Note that in the error cases below, 1157 * nothing will have been successfully modified so we don't 1158 * have to write it out. 1159 */ 1160 alpha_pal_wrusp(p->p_md.md_tf->tf_regs[FRAME_SP]); 1161 1162 return (0); 1163 1164 sigill: 1165 *ucodep = ALPHA_IF_CODE_OPDEC; /* trap type */ 1166 return (SIGILL); 1167 1168 sigsegv: 1169 sig = SIGSEGV; 1170 p->p_md.md_tf->tf_regs[FRAME_PC] = inst_pc; /* re-run instr. */ 1171 unaligned_fixup_sig: 1172 *ucodep = memaddr; /* faulting address */ 1173 return (sig); 1174 } 1175