xref: /qemu/target/alpha/helper.c (revision 7271a819)
1 /*
2  *  Alpha emulation cpu helpers for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
25 #include "exec/helper-proto.h"
26 
27 
28 #define CONVERT_BIT(X, SRC, DST) \
29     (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
30 
31 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
32 {
33     return (uint64_t)env->fpcr << 32;
34 }
35 
36 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
37 {
38     uint32_t fpcr = val >> 32;
39     uint32_t t = 0;
40 
41     t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
42     t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
43     t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
44     t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
45     t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
46 
47     env->fpcr = fpcr;
48     env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
49 
50     switch (fpcr & FPCR_DYN_MASK) {
51     case FPCR_DYN_NORMAL:
52     default:
53         t = float_round_nearest_even;
54         break;
55     case FPCR_DYN_CHOPPED:
56         t = float_round_to_zero;
57         break;
58     case FPCR_DYN_MINUS:
59         t = float_round_down;
60         break;
61     case FPCR_DYN_PLUS:
62         t = float_round_up;
63         break;
64     }
65     env->fpcr_dyn_round = t;
66 
67     env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
68     env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
69 }
70 
71 uint64_t helper_load_fpcr(CPUAlphaState *env)
72 {
73     return cpu_alpha_load_fpcr(env);
74 }
75 
76 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
77 {
78     cpu_alpha_store_fpcr(env, val);
79 }
80 
81 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
82 {
83 #ifndef CONFIG_USER_ONLY
84     if (env->flags & ENV_FLAG_PAL_MODE) {
85         if (reg >= 8 && reg <= 14) {
86             return &env->shadow[reg - 8];
87         } else if (reg == 25) {
88             return &env->shadow[7];
89         }
90     }
91 #endif
92     return &env->ir[reg];
93 }
94 
95 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
96 {
97     return *cpu_alpha_addr_gr(env, reg);
98 }
99 
100 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
101 {
102     *cpu_alpha_addr_gr(env, reg) = val;
103 }
104 
105 #if defined(CONFIG_USER_ONLY)
106 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
107                                int rw, int mmu_idx)
108 {
109     AlphaCPU *cpu = ALPHA_CPU(cs);
110 
111     cs->exception_index = EXCP_MMFAULT;
112     cpu->env.trap_arg0 = address;
113     return 1;
114 }
115 #else
116 /* Returns the OSF/1 entMM failure indication, or -1 on success.  */
117 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
118                                 int prot_need, int mmu_idx,
119                                 target_ulong *pphys, int *pprot)
120 {
121     CPUState *cs = CPU(alpha_env_get_cpu(env));
122     target_long saddr = addr;
123     target_ulong phys = 0;
124     target_ulong L1pte, L2pte, L3pte;
125     target_ulong pt, index;
126     int prot = 0;
127     int ret = MM_K_ACV;
128 
129     /* Handle physical accesses.  */
130     if (mmu_idx == MMU_PHYS_IDX) {
131         phys = addr;
132         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
133         ret = -1;
134         goto exit;
135     }
136 
137     /* Ensure that the virtual address is properly sign-extended from
138        the last implemented virtual address bit.  */
139     if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
140         goto exit;
141     }
142 
143     /* Translate the superpage.  */
144     /* ??? When we do more than emulate Unix PALcode, we'll need to
145        determine which KSEG is actually active.  */
146     if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
147         /* User-space cannot access KSEG addresses.  */
148         if (mmu_idx != MMU_KERNEL_IDX) {
149             goto exit;
150         }
151 
152         /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
153            We would not do this if the 48-bit KSEG is enabled.  */
154         phys = saddr & ((1ull << 40) - 1);
155         phys |= (saddr & (1ull << 40)) << 3;
156 
157         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
158         ret = -1;
159         goto exit;
160     }
161 
162     /* Interpret the page table exactly like PALcode does.  */
163 
164     pt = env->ptbr;
165 
166     /* TODO: rather than using ldq_phys() to read the page table we should
167      * use address_space_ldq() so that we can handle the case when
168      * the page table read gives a bus fault, rather than ignoring it.
169      * For the existing code the zero data that ldq_phys will return for
170      * an access to invalid memory will result in our treating the page
171      * table as invalid, which may even be the right behaviour.
172      */
173 
174     /* L1 page table read.  */
175     index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
176     L1pte = ldq_phys(cs->as, pt + index*8);
177 
178     if (unlikely((L1pte & PTE_VALID) == 0)) {
179         ret = MM_K_TNV;
180         goto exit;
181     }
182     if (unlikely((L1pte & PTE_KRE) == 0)) {
183         goto exit;
184     }
185     pt = L1pte >> 32 << TARGET_PAGE_BITS;
186 
187     /* L2 page table read.  */
188     index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
189     L2pte = ldq_phys(cs->as, pt + index*8);
190 
191     if (unlikely((L2pte & PTE_VALID) == 0)) {
192         ret = MM_K_TNV;
193         goto exit;
194     }
195     if (unlikely((L2pte & PTE_KRE) == 0)) {
196         goto exit;
197     }
198     pt = L2pte >> 32 << TARGET_PAGE_BITS;
199 
200     /* L3 page table read.  */
201     index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
202     L3pte = ldq_phys(cs->as, pt + index*8);
203 
204     phys = L3pte >> 32 << TARGET_PAGE_BITS;
205     if (unlikely((L3pte & PTE_VALID) == 0)) {
206         ret = MM_K_TNV;
207         goto exit;
208     }
209 
210 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
211 # error page bits out of date
212 #endif
213 
214     /* Check access violations.  */
215     if (L3pte & (PTE_KRE << mmu_idx)) {
216         prot |= PAGE_READ | PAGE_EXEC;
217     }
218     if (L3pte & (PTE_KWE << mmu_idx)) {
219         prot |= PAGE_WRITE;
220     }
221     if (unlikely((prot & prot_need) == 0 && prot_need)) {
222         goto exit;
223     }
224 
225     /* Check fault-on-operation violations.  */
226     prot &= ~(L3pte >> 1);
227     ret = -1;
228     if (unlikely((prot & prot_need) == 0)) {
229         ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
230                prot_need & PAGE_WRITE ? MM_K_FOW :
231                prot_need & PAGE_READ ? MM_K_FOR : -1);
232     }
233 
234  exit:
235     *pphys = phys;
236     *pprot = prot;
237     return ret;
238 }
239 
240 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
241 {
242     AlphaCPU *cpu = ALPHA_CPU(cs);
243     target_ulong phys;
244     int prot, fail;
245 
246     fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
247     return (fail >= 0 ? -1 : phys);
248 }
249 
250 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
251                                int mmu_idx)
252 {
253     AlphaCPU *cpu = ALPHA_CPU(cs);
254     CPUAlphaState *env = &cpu->env;
255     target_ulong phys;
256     int prot, fail;
257 
258     fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
259     if (unlikely(fail >= 0)) {
260         cs->exception_index = EXCP_MMFAULT;
261         env->trap_arg0 = addr;
262         env->trap_arg1 = fail;
263         env->trap_arg2 = (rw == 2 ? -1 : rw);
264         return 1;
265     }
266 
267     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
268                  prot, mmu_idx, TARGET_PAGE_SIZE);
269     return 0;
270 }
271 #endif /* USER_ONLY */
272 
273 void alpha_cpu_do_interrupt(CPUState *cs)
274 {
275     AlphaCPU *cpu = ALPHA_CPU(cs);
276     CPUAlphaState *env = &cpu->env;
277     int i = cs->exception_index;
278 
279     if (qemu_loglevel_mask(CPU_LOG_INT)) {
280         static int count;
281         const char *name = "<unknown>";
282 
283         switch (i) {
284         case EXCP_RESET:
285             name = "reset";
286             break;
287         case EXCP_MCHK:
288             name = "mchk";
289             break;
290         case EXCP_SMP_INTERRUPT:
291             name = "smp_interrupt";
292             break;
293         case EXCP_CLK_INTERRUPT:
294             name = "clk_interrupt";
295             break;
296         case EXCP_DEV_INTERRUPT:
297             name = "dev_interrupt";
298             break;
299         case EXCP_MMFAULT:
300             name = "mmfault";
301             break;
302         case EXCP_UNALIGN:
303             name = "unalign";
304             break;
305         case EXCP_OPCDEC:
306             name = "opcdec";
307             break;
308         case EXCP_ARITH:
309             name = "arith";
310             break;
311         case EXCP_FEN:
312             name = "fen";
313             break;
314         case EXCP_CALL_PAL:
315             name = "call_pal";
316             break;
317         }
318         qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
319                  PRIx64 " sp=%016" PRIx64 "\n",
320                  ++count, name, env->error_code, cs->cpu_index,
321                  env->pc, env->ir[IR_SP]);
322     }
323 
324     cs->exception_index = -1;
325 
326 #if !defined(CONFIG_USER_ONLY)
327     switch (i) {
328     case EXCP_RESET:
329         i = 0x0000;
330         break;
331     case EXCP_MCHK:
332         i = 0x0080;
333         break;
334     case EXCP_SMP_INTERRUPT:
335         i = 0x0100;
336         break;
337     case EXCP_CLK_INTERRUPT:
338         i = 0x0180;
339         break;
340     case EXCP_DEV_INTERRUPT:
341         i = 0x0200;
342         break;
343     case EXCP_MMFAULT:
344         i = 0x0280;
345         break;
346     case EXCP_UNALIGN:
347         i = 0x0300;
348         break;
349     case EXCP_OPCDEC:
350         i = 0x0380;
351         break;
352     case EXCP_ARITH:
353         i = 0x0400;
354         break;
355     case EXCP_FEN:
356         i = 0x0480;
357         break;
358     case EXCP_CALL_PAL:
359         i = env->error_code;
360         /* There are 64 entry points for both privileged and unprivileged,
361            with bit 0x80 indicating unprivileged.  Each entry point gets
362            64 bytes to do its job.  */
363         if (i & 0x80) {
364             i = 0x2000 + (i - 0x80) * 64;
365         } else {
366             i = 0x1000 + i * 64;
367         }
368         break;
369     default:
370         cpu_abort(cs, "Unhandled CPU exception");
371     }
372 
373     /* Remember where the exception happened.  Emulate real hardware in
374        that the low bit of the PC indicates PALmode.  */
375     env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
376 
377     /* Continue execution at the PALcode entry point.  */
378     env->pc = env->palbr + i;
379 
380     /* Switch to PALmode.  */
381     env->flags |= ENV_FLAG_PAL_MODE;
382 #endif /* !USER_ONLY */
383 }
384 
385 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
386 {
387     AlphaCPU *cpu = ALPHA_CPU(cs);
388     CPUAlphaState *env = &cpu->env;
389     int idx = -1;
390 
391     /* We never take interrupts while in PALmode.  */
392     if (env->flags & ENV_FLAG_PAL_MODE) {
393         return false;
394     }
395 
396     /* Fall through the switch, collecting the highest priority
397        interrupt that isn't masked by the processor status IPL.  */
398     /* ??? This hard-codes the OSF/1 interrupt levels.  */
399     switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
400     case 0 ... 3:
401         if (interrupt_request & CPU_INTERRUPT_HARD) {
402             idx = EXCP_DEV_INTERRUPT;
403         }
404         /* FALLTHRU */
405     case 4:
406         if (interrupt_request & CPU_INTERRUPT_TIMER) {
407             idx = EXCP_CLK_INTERRUPT;
408         }
409         /* FALLTHRU */
410     case 5:
411         if (interrupt_request & CPU_INTERRUPT_SMP) {
412             idx = EXCP_SMP_INTERRUPT;
413         }
414         /* FALLTHRU */
415     case 6:
416         if (interrupt_request & CPU_INTERRUPT_MCHK) {
417             idx = EXCP_MCHK;
418         }
419     }
420     if (idx >= 0) {
421         cs->exception_index = idx;
422         env->error_code = 0;
423         alpha_cpu_do_interrupt(cs);
424         return true;
425     }
426     return false;
427 }
428 
429 void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
430                           int flags)
431 {
432     static const char *linux_reg_names[] = {
433         "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
434         "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
435         "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
436         "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
437     };
438     AlphaCPU *cpu = ALPHA_CPU(cs);
439     CPUAlphaState *env = &cpu->env;
440     int i;
441 
442     cpu_fprintf(f, "     PC  " TARGET_FMT_lx "      PS  %02x\n",
443                 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
444     for (i = 0; i < 31; i++) {
445         cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
446                     linux_reg_names[i], cpu_alpha_load_gr(env, i));
447         if ((i % 3) == 2)
448             cpu_fprintf(f, "\n");
449     }
450 
451     cpu_fprintf(f, "lock_a   " TARGET_FMT_lx " lock_v   " TARGET_FMT_lx "\n",
452                 env->lock_addr, env->lock_value);
453 
454     for (i = 0; i < 31; i++) {
455         cpu_fprintf(f, "FIR%02d    " TARGET_FMT_lx " ", i,
456                     *((uint64_t *)(&env->fir[i])));
457         if ((i % 3) == 2)
458             cpu_fprintf(f, "\n");
459     }
460     cpu_fprintf(f, "\n");
461 }
462 
463 /* This should only be called from translate, via gen_excp.
464    We expect that ENV->PC has already been updated.  */
465 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
466 {
467     AlphaCPU *cpu = alpha_env_get_cpu(env);
468     CPUState *cs = CPU(cpu);
469 
470     cs->exception_index = excp;
471     env->error_code = error;
472     cpu_loop_exit(cs);
473 }
474 
475 /* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
476 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
477                                 int excp, int error)
478 {
479     AlphaCPU *cpu = alpha_env_get_cpu(env);
480     CPUState *cs = CPU(cpu);
481 
482     cs->exception_index = excp;
483     env->error_code = error;
484     if (retaddr) {
485         cpu_restore_state(cs, retaddr);
486         /* Floating-point exceptions (our only users) point to the next PC.  */
487         env->pc += 4;
488     }
489     cpu_loop_exit(cs);
490 }
491 
492 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
493                               int exc, uint64_t mask)
494 {
495     env->trap_arg0 = exc;
496     env->trap_arg1 = mask;
497     dynamic_excp(env, retaddr, EXCP_ARITH, 0);
498 }
499