xref: /qemu/target/ppc/mem_helper.c (revision 33848cee)
1 /*
2  *  PowerPC memory access emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/exec-all.h"
22 #include "qemu/host-utils.h"
23 #include "exec/helper-proto.h"
24 
25 #include "helper_regs.h"
26 #include "exec/cpu_ldst.h"
27 
28 //#define DEBUG_OP
29 
30 static inline bool needs_byteswap(const CPUPPCState *env)
31 {
32 #if defined(TARGET_WORDS_BIGENDIAN)
33   return msr_le;
34 #else
35   return !msr_le;
36 #endif
37 }
38 
39 /*****************************************************************************/
40 /* Memory load and stores */
41 
42 static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
43                                     target_long arg)
44 {
45 #if defined(TARGET_PPC64)
46     if (!msr_is_64bit(env, env->msr)) {
47         return (uint32_t)(addr + arg);
48     } else
49 #endif
50     {
51         return addr + arg;
52     }
53 }
54 
55 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
56 {
57     for (; reg < 32; reg++) {
58         if (needs_byteswap(env)) {
59             env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC()));
60         } else {
61             env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC());
62         }
63         addr = addr_add(env, addr, 4);
64     }
65 }
66 
67 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
68 {
69     for (; reg < 32; reg++) {
70         if (needs_byteswap(env)) {
71             cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]),
72                                                    GETPC());
73         } else {
74             cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC());
75         }
76         addr = addr_add(env, addr, 4);
77     }
78 }
79 
80 static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
81                    uint32_t reg, uintptr_t raddr)
82 {
83     int sh;
84 
85     for (; nb > 3; nb -= 4) {
86         env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr);
87         reg = (reg + 1) % 32;
88         addr = addr_add(env, addr, 4);
89     }
90     if (unlikely(nb > 0)) {
91         env->gpr[reg] = 0;
92         for (sh = 24; nb > 0; nb--, sh -= 8) {
93             env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh;
94             addr = addr_add(env, addr, 1);
95         }
96     }
97 }
98 
99 void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
100 {
101     do_lsw(env, addr, nb, reg, GETPC());
102 }
103 
104 /* PPC32 specification says we must generate an exception if
105  * rA is in the range of registers to be loaded.
106  * In an other hand, IBM says this is valid, but rA won't be loaded.
107  * For now, I'll follow the spec...
108  */
109 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
110                  uint32_t ra, uint32_t rb)
111 {
112     if (likely(xer_bc != 0)) {
113         int num_used_regs = (xer_bc + 3) / 4;
114         if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
115                      lsw_reg_in_range(reg, num_used_regs, rb))) {
116             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
117                                    POWERPC_EXCP_INVAL |
118                                    POWERPC_EXCP_INVAL_LSWX, GETPC());
119         } else {
120             do_lsw(env, addr, xer_bc, reg, GETPC());
121         }
122     }
123 }
124 
125 void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
126                  uint32_t reg)
127 {
128     int sh;
129 
130     for (; nb > 3; nb -= 4) {
131         cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC());
132         reg = (reg + 1) % 32;
133         addr = addr_add(env, addr, 4);
134     }
135     if (unlikely(nb > 0)) {
136         for (sh = 24; nb > 0; nb--, sh -= 8) {
137             cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC());
138             addr = addr_add(env, addr, 1);
139         }
140     }
141 }
142 
143 void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
144 {
145     target_ulong mask, dcbz_size = env->dcache_line_size;
146     uint32_t i;
147     void *haddr;
148 
149 #if defined(TARGET_PPC64)
150     /* Check for dcbz vs dcbzl on 970 */
151     if (env->excp_model == POWERPC_EXCP_970 &&
152         !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
153         dcbz_size = 32;
154     }
155 #endif
156 
157     /* Align address */
158     mask = ~(dcbz_size - 1);
159     addr &= mask;
160 
161     /* Check reservation */
162     if ((env->reserve_addr & mask) == (addr & mask))  {
163         env->reserve_addr = (target_ulong)-1ULL;
164     }
165 
166     /* Try fast path translate */
167     haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, env->dmmu_idx);
168     if (haddr) {
169         memset(haddr, 0, dcbz_size);
170     } else {
171         /* Slow path */
172         for (i = 0; i < dcbz_size; i += 8) {
173             cpu_stq_data_ra(env, addr + i, 0, GETPC());
174         }
175     }
176 }
177 
178 void helper_icbi(CPUPPCState *env, target_ulong addr)
179 {
180     addr &= ~(env->dcache_line_size - 1);
181     /* Invalidate one cache line :
182      * PowerPC specification says this is to be treated like a load
183      * (not a fetch) by the MMU. To be sure it will be so,
184      * do the load "by hand".
185      */
186     cpu_ldl_data_ra(env, addr, GETPC());
187 }
188 
189 /* XXX: to be tested */
190 target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
191                           uint32_t ra, uint32_t rb)
192 {
193     int i, c, d;
194 
195     d = 24;
196     for (i = 0; i < xer_bc; i++) {
197         c = cpu_ldub_data_ra(env, addr, GETPC());
198         addr = addr_add(env, addr, 1);
199         /* ra (if not 0) and rb are never modified */
200         if (likely(reg != rb && (ra == 0 || reg != ra))) {
201             env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
202         }
203         if (unlikely(c == xer_cmp)) {
204             break;
205         }
206         if (likely(d != 0)) {
207             d -= 8;
208         } else {
209             d = 24;
210             reg++;
211             reg = reg & 0x1F;
212         }
213     }
214     return i;
215 }
216 
217 /*****************************************************************************/
218 /* Altivec extension helpers */
219 #if defined(HOST_WORDS_BIGENDIAN)
220 #define HI_IDX 0
221 #define LO_IDX 1
222 #else
223 #define HI_IDX 1
224 #define LO_IDX 0
225 #endif
226 
227 /* We use msr_le to determine index ordering in a vector.  However,
228    byteswapping is not simply controlled by msr_le.  We also need to take
229    into account endianness of the target.  This is done for the little-endian
230    PPC64 user-mode target. */
231 
232 #define LVE(name, access, swap, element)                        \
233     void helper_##name(CPUPPCState *env, ppc_avr_t *r,          \
234                        target_ulong addr)                       \
235     {                                                           \
236         size_t n_elems = ARRAY_SIZE(r->element);                \
237         int adjust = HI_IDX*(n_elems - 1);                      \
238         int sh = sizeof(r->element[0]) >> 1;                    \
239         int index = (addr & 0xf) >> sh;                         \
240         if (msr_le) {                                           \
241             index = n_elems - index - 1;                        \
242         }                                                       \
243                                                                 \
244         if (needs_byteswap(env)) {                              \
245             r->element[LO_IDX ? index : (adjust - index)] =     \
246                 swap(access(env, addr, GETPC()));               \
247         } else {                                                \
248             r->element[LO_IDX ? index : (adjust - index)] =     \
249                 access(env, addr, GETPC());                     \
250         }                                                       \
251     }
252 #define I(x) (x)
253 LVE(lvebx, cpu_ldub_data_ra, I, u8)
254 LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
255 LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
256 #undef I
257 #undef LVE
258 
259 #define STVE(name, access, swap, element)                               \
260     void helper_##name(CPUPPCState *env, ppc_avr_t *r,                  \
261                        target_ulong addr)                               \
262     {                                                                   \
263         size_t n_elems = ARRAY_SIZE(r->element);                        \
264         int adjust = HI_IDX * (n_elems - 1);                            \
265         int sh = sizeof(r->element[0]) >> 1;                            \
266         int index = (addr & 0xf) >> sh;                                 \
267         if (msr_le) {                                                   \
268             index = n_elems - index - 1;                                \
269         }                                                               \
270                                                                         \
271         if (needs_byteswap(env)) {                                      \
272             access(env, addr, swap(r->element[LO_IDX ? index :          \
273                                               (adjust - index)]),       \
274                         GETPC());                                       \
275         } else {                                                        \
276             access(env, addr, r->element[LO_IDX ? index :               \
277                                          (adjust - index)], GETPC());   \
278         }                                                               \
279     }
280 #define I(x) (x)
281 STVE(stvebx, cpu_stb_data_ra, I, u8)
282 STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
283 STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
284 #undef I
285 #undef LVE
286 
287 #undef HI_IDX
288 #undef LO_IDX
289 
290 void helper_tbegin(CPUPPCState *env)
291 {
292     /* As a degenerate implementation, always fail tbegin.  The reason
293      * given is "Nesting overflow".  The "persistent" bit is set,
294      * providing a hint to the error handler to not retry.  The TFIAR
295      * captures the address of the failure, which is this tbegin
296      * instruction.  Instruction execution will continue with the
297      * next instruction in memory, which is precisely what we want.
298      */
299 
300     env->spr[SPR_TEXASR] =
301         (1ULL << TEXASR_FAILURE_PERSISTENT) |
302         (1ULL << TEXASR_NESTING_OVERFLOW) |
303         (msr_hv << TEXASR_PRIVILEGE_HV) |
304         (msr_pr << TEXASR_PRIVILEGE_PR) |
305         (1ULL << TEXASR_FAILURE_SUMMARY) |
306         (1ULL << TEXASR_TFIAR_EXACT);
307     env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
308     env->spr[SPR_TFHAR] = env->nip + 4;
309     env->crf[0] = 0xB; /* 0b1010 = transaction failure */
310 }
311