xref: /qemu/target/ppc/cpu.c (revision b83a80e8)
1 /*
2  *  PowerPC CPU routines for qemu.
3  *
4  * Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "cpu-models.h"
23 #include "cpu-qom.h"
24 #include "exec/log.h"
25 #include "fpu/softfloat-helpers.h"
26 #include "mmu-hash64.h"
27 #include "helper_regs.h"
28 #include "sysemu/tcg.h"
29 
30 target_ulong cpu_read_xer(const CPUPPCState *env)
31 {
32     if (is_isa300(env)) {
33         return env->xer | (env->so << XER_SO) |
34             (env->ov << XER_OV) | (env->ca << XER_CA) |
35             (env->ov32 << XER_OV32) | (env->ca32 << XER_CA32);
36     }
37 
38     return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) |
39         (env->ca << XER_CA);
40 }
41 
42 void cpu_write_xer(CPUPPCState *env, target_ulong xer)
43 {
44     env->so = (xer >> XER_SO) & 1;
45     env->ov = (xer >> XER_OV) & 1;
46     env->ca = (xer >> XER_CA) & 1;
47     /* write all the flags, while reading back check of isa300 */
48     env->ov32 = (xer >> XER_OV32) & 1;
49     env->ca32 = (xer >> XER_CA32) & 1;
50     env->xer = xer & ~((1ul << XER_SO) |
51                        (1ul << XER_OV) | (1ul << XER_CA) |
52                        (1ul << XER_OV32) | (1ul << XER_CA32));
53 }
54 
55 void ppc_store_vscr(CPUPPCState *env, uint32_t vscr)
56 {
57     env->vscr = vscr & ~(1u << VSCR_SAT);
58     /* Which bit we set is completely arbitrary, but clear the rest.  */
59     env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
60     env->vscr_sat.u64[1] = 0;
61     set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
62 }
63 
64 uint32_t ppc_get_vscr(CPUPPCState *env)
65 {
66     uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
67     return env->vscr | (sat << VSCR_SAT);
68 }
69 
70 /* GDBstub can read and write MSR... */
71 void ppc_store_msr(CPUPPCState *env, target_ulong value)
72 {
73     hreg_store_msr(env, value, 0);
74 }
75 
76 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
77 {
78     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
79     CPUPPCState *env = &cpu->env;
80 
81     env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
82     /* The gtse bit affects hflags */
83     hreg_compute_hflags(env);
84 }
85 
86 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
87 {
88     int rnd_type;
89 
90     /* Set rounding mode */
91     switch (fpscr_rn) {
92     case 0:
93         /* Best approximation (round to nearest) */
94         rnd_type = float_round_nearest_even;
95         break;
96     case 1:
97         /* Smaller magnitude (round toward zero) */
98         rnd_type = float_round_to_zero;
99         break;
100     case 2:
101         /* Round toward +infinite */
102         rnd_type = float_round_up;
103         break;
104     default:
105     case 3:
106         /* Round toward -infinite */
107         rnd_type = float_round_down;
108         break;
109     }
110     set_float_rounding_mode(rnd_type, &env->fp_status);
111 }
112 
113 void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
114 {
115     val &= FPSCR_MTFS_MASK;
116     if (val & FPSCR_IX) {
117         val |= FP_VX;
118     }
119     if ((val >> FPSCR_XX) & (val >> FPSCR_XE) & 0x1f) {
120         val |= FP_FEX;
121     }
122     env->fpscr = val;
123     if (tcg_enabled()) {
124         fpscr_set_rounding_mode(env);
125     }
126 }
127