1 /*
2 * PowerPC memory access emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "helper_regs.h"
27 #include "exec/cpu_ldst.h"
28 #include "tcg.h"
29 #include "internal.h"
30 #include "qemu/atomic128.h"
31
32 /* #define DEBUG_OP */
33
needs_byteswap(const CPUPPCState * env)34 static inline bool needs_byteswap(const CPUPPCState *env)
35 {
36 #if defined(TARGET_WORDS_BIGENDIAN)
37 return msr_le;
38 #else
39 return !msr_le;
40 #endif
41 }
42
43 /*****************************************************************************/
44 /* Memory load and stores */
45
addr_add(CPUPPCState * env,target_ulong addr,target_long arg)46 static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
47 target_long arg)
48 {
49 #if defined(TARGET_PPC64)
50 if (!msr_is_64bit(env, env->msr)) {
51 return (uint32_t)(addr + arg);
52 } else
53 #endif
54 {
55 return addr + arg;
56 }
57 }
58
helper_lmw(CPUPPCState * env,target_ulong addr,uint32_t reg)59 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
60 {
61 for (; reg < 32; reg++) {
62 if (needs_byteswap(env)) {
63 env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC()));
64 } else {
65 env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC());
66 }
67 addr = addr_add(env, addr, 4);
68 }
69 }
70
helper_stmw(CPUPPCState * env,target_ulong addr,uint32_t reg)71 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
72 {
73 for (; reg < 32; reg++) {
74 if (needs_byteswap(env)) {
75 cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]),
76 GETPC());
77 } else {
78 cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC());
79 }
80 addr = addr_add(env, addr, 4);
81 }
82 }
83
do_lsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg,uintptr_t raddr)84 static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
85 uint32_t reg, uintptr_t raddr)
86 {
87 int sh;
88
89 for (; nb > 3; nb -= 4) {
90 env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr);
91 reg = (reg + 1) % 32;
92 addr = addr_add(env, addr, 4);
93 }
94 if (unlikely(nb > 0)) {
95 env->gpr[reg] = 0;
96 for (sh = 24; nb > 0; nb--, sh -= 8) {
97 env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh;
98 addr = addr_add(env, addr, 1);
99 }
100 }
101 }
102
helper_lsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg)103 void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
104 {
105 do_lsw(env, addr, nb, reg, GETPC());
106 }
107
108 /*
109 * PPC32 specification says we must generate an exception if rA is in
110 * the range of registers to be loaded. In an other hand, IBM says
111 * this is valid, but rA won't be loaded. For now, I'll follow the
112 * spec...
113 */
helper_lswx(CPUPPCState * env,target_ulong addr,uint32_t reg,uint32_t ra,uint32_t rb)114 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
115 uint32_t ra, uint32_t rb)
116 {
117 if (likely(xer_bc != 0)) {
118 int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
119 if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
120 lsw_reg_in_range(reg, num_used_regs, rb))) {
121 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
122 POWERPC_EXCP_INVAL |
123 POWERPC_EXCP_INVAL_LSWX, GETPC());
124 } else {
125 do_lsw(env, addr, xer_bc, reg, GETPC());
126 }
127 }
128 }
129
helper_stsw(CPUPPCState * env,target_ulong addr,uint32_t nb,uint32_t reg)130 void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
131 uint32_t reg)
132 {
133 int sh;
134
135 for (; nb > 3; nb -= 4) {
136 cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC());
137 reg = (reg + 1) % 32;
138 addr = addr_add(env, addr, 4);
139 }
140 if (unlikely(nb > 0)) {
141 for (sh = 24; nb > 0; nb--, sh -= 8) {
142 cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC());
143 addr = addr_add(env, addr, 1);
144 }
145 }
146 }
147
dcbz_common(CPUPPCState * env,target_ulong addr,uint32_t opcode,bool epid,uintptr_t retaddr)148 static void dcbz_common(CPUPPCState *env, target_ulong addr,
149 uint32_t opcode, bool epid, uintptr_t retaddr)
150 {
151 target_ulong mask, dcbz_size = env->dcache_line_size;
152 uint32_t i;
153 void *haddr;
154 int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx;
155
156 #if defined(TARGET_PPC64)
157 /* Check for dcbz vs dcbzl on 970 */
158 if (env->excp_model == POWERPC_EXCP_970 &&
159 !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
160 dcbz_size = 32;
161 }
162 #endif
163
164 /* Align address */
165 mask = ~(dcbz_size - 1);
166 addr &= mask;
167
168 /* Check reservation */
169 if ((env->reserve_addr & mask) == (addr & mask)) {
170 env->reserve_addr = (target_ulong)-1ULL;
171 }
172
173 /* Try fast path translate */
174 haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx);
175 if (haddr) {
176 memset(haddr, 0, dcbz_size);
177 } else {
178 /* Slow path */
179 for (i = 0; i < dcbz_size; i += 8) {
180 if (epid) {
181 #if !defined(CONFIG_USER_ONLY)
182 /* Does not make sense on USER_ONLY config */
183 cpu_stq_eps_ra(env, addr + i, 0, retaddr);
184 #endif
185 } else {
186 cpu_stq_data_ra(env, addr + i, 0, retaddr);
187 }
188 }
189 }
190 }
191
helper_dcbz(CPUPPCState * env,target_ulong addr,uint32_t opcode)192 void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
193 {
194 dcbz_common(env, addr, opcode, false, GETPC());
195 }
196
helper_dcbzep(CPUPPCState * env,target_ulong addr,uint32_t opcode)197 void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
198 {
199 dcbz_common(env, addr, opcode, true, GETPC());
200 }
201
helper_icbi(CPUPPCState * env,target_ulong addr)202 void helper_icbi(CPUPPCState *env, target_ulong addr)
203 {
204 addr &= ~(env->dcache_line_size - 1);
205 /*
206 * Invalidate one cache line :
207 * PowerPC specification says this is to be treated like a load
208 * (not a fetch) by the MMU. To be sure it will be so,
209 * do the load "by hand".
210 */
211 cpu_ldl_data_ra(env, addr, GETPC());
212 }
213
helper_icbiep(CPUPPCState * env,target_ulong addr)214 void helper_icbiep(CPUPPCState *env, target_ulong addr)
215 {
216 #if !defined(CONFIG_USER_ONLY)
217 /* See comments above */
218 addr &= ~(env->dcache_line_size - 1);
219 cpu_ldl_epl_ra(env, addr, GETPC());
220 #endif
221 }
222
223 /* XXX: to be tested */
helper_lscbx(CPUPPCState * env,target_ulong addr,uint32_t reg,uint32_t ra,uint32_t rb)224 target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
225 uint32_t ra, uint32_t rb)
226 {
227 int i, c, d;
228
229 d = 24;
230 for (i = 0; i < xer_bc; i++) {
231 c = cpu_ldub_data_ra(env, addr, GETPC());
232 addr = addr_add(env, addr, 1);
233 /* ra (if not 0) and rb are never modified */
234 if (likely(reg != rb && (ra == 0 || reg != ra))) {
235 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
236 }
237 if (unlikely(c == xer_cmp)) {
238 break;
239 }
240 if (likely(d != 0)) {
241 d -= 8;
242 } else {
243 d = 24;
244 reg++;
245 reg = reg & 0x1F;
246 }
247 }
248 return i;
249 }
250
251 #ifdef TARGET_PPC64
helper_lq_le_parallel(CPUPPCState * env,target_ulong addr,uint32_t opidx)252 uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
253 uint32_t opidx)
254 {
255 Int128 ret;
256
257 /* We will have raised EXCP_ATOMIC from the translator. */
258 assert(HAVE_ATOMIC128);
259 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
260 env->retxh = int128_gethi(ret);
261 return int128_getlo(ret);
262 }
263
helper_lq_be_parallel(CPUPPCState * env,target_ulong addr,uint32_t opidx)264 uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
265 uint32_t opidx)
266 {
267 Int128 ret;
268
269 /* We will have raised EXCP_ATOMIC from the translator. */
270 assert(HAVE_ATOMIC128);
271 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
272 env->retxh = int128_gethi(ret);
273 return int128_getlo(ret);
274 }
275
helper_stq_le_parallel(CPUPPCState * env,target_ulong addr,uint64_t lo,uint64_t hi,uint32_t opidx)276 void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
277 uint64_t lo, uint64_t hi, uint32_t opidx)
278 {
279 Int128 val;
280
281 /* We will have raised EXCP_ATOMIC from the translator. */
282 assert(HAVE_ATOMIC128);
283 val = int128_make128(lo, hi);
284 helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
285 }
286
helper_stq_be_parallel(CPUPPCState * env,target_ulong addr,uint64_t lo,uint64_t hi,uint32_t opidx)287 void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
288 uint64_t lo, uint64_t hi, uint32_t opidx)
289 {
290 Int128 val;
291
292 /* We will have raised EXCP_ATOMIC from the translator. */
293 assert(HAVE_ATOMIC128);
294 val = int128_make128(lo, hi);
295 helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
296 }
297
helper_stqcx_le_parallel(CPUPPCState * env,target_ulong addr,uint64_t new_lo,uint64_t new_hi,uint32_t opidx)298 uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
299 uint64_t new_lo, uint64_t new_hi,
300 uint32_t opidx)
301 {
302 bool success = false;
303
304 /* We will have raised EXCP_ATOMIC from the translator. */
305 assert(HAVE_CMPXCHG128);
306
307 if (likely(addr == env->reserve_addr)) {
308 Int128 oldv, cmpv, newv;
309
310 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
311 newv = int128_make128(new_lo, new_hi);
312 oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
313 opidx, GETPC());
314 success = int128_eq(oldv, cmpv);
315 }
316 env->reserve_addr = -1;
317 return env->so + success * CRF_EQ_BIT;
318 }
319
helper_stqcx_be_parallel(CPUPPCState * env,target_ulong addr,uint64_t new_lo,uint64_t new_hi,uint32_t opidx)320 uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
321 uint64_t new_lo, uint64_t new_hi,
322 uint32_t opidx)
323 {
324 bool success = false;
325
326 /* We will have raised EXCP_ATOMIC from the translator. */
327 assert(HAVE_CMPXCHG128);
328
329 if (likely(addr == env->reserve_addr)) {
330 Int128 oldv, cmpv, newv;
331
332 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
333 newv = int128_make128(new_lo, new_hi);
334 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
335 opidx, GETPC());
336 success = int128_eq(oldv, cmpv);
337 }
338 env->reserve_addr = -1;
339 return env->so + success * CRF_EQ_BIT;
340 }
341 #endif
342
343 /*****************************************************************************/
344 /* Altivec extension helpers */
345 #if defined(HOST_WORDS_BIGENDIAN)
346 #define HI_IDX 0
347 #define LO_IDX 1
348 #else
349 #define HI_IDX 1
350 #define LO_IDX 0
351 #endif
352
353 /*
354 * We use msr_le to determine index ordering in a vector. However,
355 * byteswapping is not simply controlled by msr_le. We also need to
356 * take into account endianness of the target. This is done for the
357 * little-endian PPC64 user-mode target.
358 */
359
360 #define LVE(name, access, swap, element) \
361 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
362 target_ulong addr) \
363 { \
364 size_t n_elems = ARRAY_SIZE(r->element); \
365 int adjust = HI_IDX * (n_elems - 1); \
366 int sh = sizeof(r->element[0]) >> 1; \
367 int index = (addr & 0xf) >> sh; \
368 if (msr_le) { \
369 index = n_elems - index - 1; \
370 } \
371 \
372 if (needs_byteswap(env)) { \
373 r->element[LO_IDX ? index : (adjust - index)] = \
374 swap(access(env, addr, GETPC())); \
375 } else { \
376 r->element[LO_IDX ? index : (adjust - index)] = \
377 access(env, addr, GETPC()); \
378 } \
379 }
380 #define I(x) (x)
LVE(lvebx,cpu_ldub_data_ra,I,u8)381 LVE(lvebx, cpu_ldub_data_ra, I, u8)
382 LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
383 LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
384 #undef I
385 #undef LVE
386
387 #define STVE(name, access, swap, element) \
388 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
389 target_ulong addr) \
390 { \
391 size_t n_elems = ARRAY_SIZE(r->element); \
392 int adjust = HI_IDX * (n_elems - 1); \
393 int sh = sizeof(r->element[0]) >> 1; \
394 int index = (addr & 0xf) >> sh; \
395 if (msr_le) { \
396 index = n_elems - index - 1; \
397 } \
398 \
399 if (needs_byteswap(env)) { \
400 access(env, addr, swap(r->element[LO_IDX ? index : \
401 (adjust - index)]), \
402 GETPC()); \
403 } else { \
404 access(env, addr, r->element[LO_IDX ? index : \
405 (adjust - index)], GETPC()); \
406 } \
407 }
408 #define I(x) (x)
409 STVE(stvebx, cpu_stb_data_ra, I, u8)
410 STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
411 STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
412 #undef I
413 #undef LVE
414
415 #ifdef TARGET_PPC64
416 #define GET_NB(rb) ((rb >> 56) & 0xFF)
417
418 #define VSX_LXVL(name, lj) \
419 void helper_##name(CPUPPCState *env, target_ulong addr, \
420 ppc_vsr_t *xt, target_ulong rb) \
421 { \
422 ppc_vsr_t t; \
423 uint64_t nb = GET_NB(rb); \
424 int i; \
425 \
426 t.s128 = int128_zero(); \
427 if (nb) { \
428 nb = (nb >= 16) ? 16 : nb; \
429 if (msr_le && !lj) { \
430 for (i = 16; i > 16 - nb; i--) { \
431 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
432 addr = addr_add(env, addr, 1); \
433 } \
434 } else { \
435 for (i = 0; i < nb; i++) { \
436 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
437 addr = addr_add(env, addr, 1); \
438 } \
439 } \
440 } \
441 *xt = t; \
442 }
443
444 VSX_LXVL(lxvl, 0)
445 VSX_LXVL(lxvll, 1)
446 #undef VSX_LXVL
447
448 #define VSX_STXVL(name, lj) \
449 void helper_##name(CPUPPCState *env, target_ulong addr, \
450 ppc_vsr_t *xt, target_ulong rb) \
451 { \
452 target_ulong nb = GET_NB(rb); \
453 int i; \
454 \
455 if (!nb) { \
456 return; \
457 } \
458 \
459 nb = (nb >= 16) ? 16 : nb; \
460 if (msr_le && !lj) { \
461 for (i = 16; i > 16 - nb; i--) { \
462 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
463 addr = addr_add(env, addr, 1); \
464 } \
465 } else { \
466 for (i = 0; i < nb; i++) { \
467 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
468 addr = addr_add(env, addr, 1); \
469 } \
470 } \
471 }
472
473 VSX_STXVL(stxvl, 0)
474 VSX_STXVL(stxvll, 1)
475 #undef VSX_STXVL
476 #undef GET_NB
477 #endif /* TARGET_PPC64 */
478
479 #undef HI_IDX
480 #undef LO_IDX
481
482 void helper_tbegin(CPUPPCState *env)
483 {
484 /*
485 * As a degenerate implementation, always fail tbegin. The reason
486 * given is "Nesting overflow". The "persistent" bit is set,
487 * providing a hint to the error handler to not retry. The TFIAR
488 * captures the address of the failure, which is this tbegin
489 * instruction. Instruction execution will continue with the next
490 * instruction in memory, which is precisely what we want.
491 */
492
493 env->spr[SPR_TEXASR] =
494 (1ULL << TEXASR_FAILURE_PERSISTENT) |
495 (1ULL << TEXASR_NESTING_OVERFLOW) |
496 (msr_hv << TEXASR_PRIVILEGE_HV) |
497 (msr_pr << TEXASR_PRIVILEGE_PR) |
498 (1ULL << TEXASR_FAILURE_SUMMARY) |
499 (1ULL << TEXASR_TFIAR_EXACT);
500 env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
501 env->spr[SPR_TFHAR] = env->nip + 4;
502 env->crf[0] = 0xB; /* 0b1010 = transaction failure */
503 }
504