1 /*
2 * MIPS emulation helpers for qemu.
3 *
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "internal.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/memop.h"
28 #include "sysemu/kvm.h"
29 #include "fpu/softfloat.h"
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
helper_raise_exception_err(CPUMIPSState * env,uint32_t exception,int error_code)34 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
35 int error_code)
36 {
37 do_raise_exception_err(env, exception, error_code, 0);
38 }
39
helper_raise_exception(CPUMIPSState * env,uint32_t exception)40 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
41 {
42 do_raise_exception(env, exception, GETPC());
43 }
44
helper_raise_exception_debug(CPUMIPSState * env)45 void helper_raise_exception_debug(CPUMIPSState *env)
46 {
47 do_raise_exception(env, EXCP_DEBUG, 0);
48 }
49
raise_exception(CPUMIPSState * env,uint32_t exception)50 static void raise_exception(CPUMIPSState *env, uint32_t exception)
51 {
52 do_raise_exception(env, exception, 0);
53 }
54
55 #if defined(CONFIG_USER_ONLY)
56 #define HELPER_LD(name, insn, type) \
57 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
58 int mem_idx, uintptr_t retaddr) \
59 { \
60 return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
61 }
62 #else
63 #define HELPER_LD(name, insn, type) \
64 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
65 int mem_idx, uintptr_t retaddr) \
66 { \
67 switch (mem_idx) { \
68 case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
69 case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
70 default: \
71 case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
72 case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
73 } \
74 }
75 #endif
HELPER_LD(lw,ldl,int32_t)76 HELPER_LD(lw, ldl, int32_t)
77 #if defined(TARGET_MIPS64)
78 HELPER_LD(ld, ldq, int64_t)
79 #endif
80 #undef HELPER_LD
81
82 #if defined(CONFIG_USER_ONLY)
83 #define HELPER_ST(name, insn, type) \
84 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
85 type val, int mem_idx, uintptr_t retaddr) \
86 { \
87 cpu_##insn##_data_ra(env, addr, val, retaddr); \
88 }
89 #else
90 #define HELPER_ST(name, insn, type) \
91 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
92 type val, int mem_idx, uintptr_t retaddr) \
93 { \
94 switch (mem_idx) { \
95 case 0: \
96 cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
97 break; \
98 case 1: \
99 cpu_##insn##_super_ra(env, addr, val, retaddr); \
100 break; \
101 default: \
102 case 2: \
103 cpu_##insn##_user_ra(env, addr, val, retaddr); \
104 break; \
105 case 3: \
106 cpu_##insn##_error_ra(env, addr, val, retaddr); \
107 break; \
108 } \
109 }
110 #endif
111 HELPER_ST(sb, stb, uint8_t)
112 HELPER_ST(sw, stl, uint32_t)
113 #if defined(TARGET_MIPS64)
114 HELPER_ST(sd, stq, uint64_t)
115 #endif
116 #undef HELPER_ST
117
118 /* 64 bits arithmetic for 32 bits hosts */
119 static inline uint64_t get_HILO(CPUMIPSState *env)
120 {
121 return ((uint64_t)(env->active_tc.HI[0]) << 32) |
122 (uint32_t)env->active_tc.LO[0];
123 }
124
set_HIT0_LO(CPUMIPSState * env,uint64_t HILO)125 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
126 {
127 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
128 return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
129 }
130
set_HI_LOT0(CPUMIPSState * env,uint64_t HILO)131 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
132 {
133 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
134 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
135 return tmp;
136 }
137
138 /* Multiplication variants of the vr54xx. */
helper_muls(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)139 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
140 target_ulong arg2)
141 {
142 return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
143 (int64_t)(int32_t)arg2));
144 }
145
helper_mulsu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)146 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
147 target_ulong arg2)
148 {
149 return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
150 (uint64_t)(uint32_t)arg2);
151 }
152
helper_macc(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)153 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
154 target_ulong arg2)
155 {
156 return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
157 (int64_t)(int32_t)arg2);
158 }
159
helper_macchi(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)160 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
161 target_ulong arg2)
162 {
163 return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
164 (int64_t)(int32_t)arg2);
165 }
166
helper_maccu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)167 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
168 target_ulong arg2)
169 {
170 return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
171 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
172 }
173
helper_macchiu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)174 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
175 target_ulong arg2)
176 {
177 return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
178 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
179 }
180
helper_msac(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)181 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
182 target_ulong arg2)
183 {
184 return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
185 (int64_t)(int32_t)arg2);
186 }
187
helper_msachi(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)188 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
189 target_ulong arg2)
190 {
191 return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
192 (int64_t)(int32_t)arg2);
193 }
194
helper_msacu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)195 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
196 target_ulong arg2)
197 {
198 return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
199 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
200 }
201
helper_msachiu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)202 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
203 target_ulong arg2)
204 {
205 return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
206 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
207 }
208
helper_mulhi(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)209 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
210 target_ulong arg2)
211 {
212 return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
213 }
214
helper_mulhiu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)215 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
216 target_ulong arg2)
217 {
218 return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
219 (uint64_t)(uint32_t)arg2);
220 }
221
helper_mulshi(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)222 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
223 target_ulong arg2)
224 {
225 return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
226 (int64_t)(int32_t)arg2);
227 }
228
helper_mulshiu(CPUMIPSState * env,target_ulong arg1,target_ulong arg2)229 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
230 target_ulong arg2)
231 {
232 return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
233 (uint64_t)(uint32_t)arg2);
234 }
235
bitswap(target_ulong v)236 static inline target_ulong bitswap(target_ulong v)
237 {
238 v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
239 ((v & (target_ulong)0x5555555555555555ULL) << 1);
240 v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
241 ((v & (target_ulong)0x3333333333333333ULL) << 2);
242 v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
243 ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
244 return v;
245 }
246
247 #ifdef TARGET_MIPS64
helper_dbitswap(target_ulong rt)248 target_ulong helper_dbitswap(target_ulong rt)
249 {
250 return bitswap(rt);
251 }
252 #endif
253
helper_bitswap(target_ulong rt)254 target_ulong helper_bitswap(target_ulong rt)
255 {
256 return (int32_t)bitswap(rt);
257 }
258
helper_rotx(target_ulong rs,uint32_t shift,uint32_t shiftx,uint32_t stripe)259 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
260 uint32_t stripe)
261 {
262 int i;
263 uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
264 uint64_t tmp1 = tmp0;
265 for (i = 0; i <= 46; i++) {
266 int s;
267 if (i & 0x8) {
268 s = shift;
269 } else {
270 s = shiftx;
271 }
272
273 if (stripe != 0 && !(i & 0x4)) {
274 s = ~s;
275 }
276 if (s & 0x10) {
277 if (tmp0 & (1LL << (i + 16))) {
278 tmp1 |= 1LL << i;
279 } else {
280 tmp1 &= ~(1LL << i);
281 }
282 }
283 }
284
285 uint64_t tmp2 = tmp1;
286 for (i = 0; i <= 38; i++) {
287 int s;
288 if (i & 0x4) {
289 s = shift;
290 } else {
291 s = shiftx;
292 }
293
294 if (s & 0x8) {
295 if (tmp1 & (1LL << (i + 8))) {
296 tmp2 |= 1LL << i;
297 } else {
298 tmp2 &= ~(1LL << i);
299 }
300 }
301 }
302
303 uint64_t tmp3 = tmp2;
304 for (i = 0; i <= 34; i++) {
305 int s;
306 if (i & 0x2) {
307 s = shift;
308 } else {
309 s = shiftx;
310 }
311 if (s & 0x4) {
312 if (tmp2 & (1LL << (i + 4))) {
313 tmp3 |= 1LL << i;
314 } else {
315 tmp3 &= ~(1LL << i);
316 }
317 }
318 }
319
320 uint64_t tmp4 = tmp3;
321 for (i = 0; i <= 32; i++) {
322 int s;
323 if (i & 0x1) {
324 s = shift;
325 } else {
326 s = shiftx;
327 }
328 if (s & 0x2) {
329 if (tmp3 & (1LL << (i + 2))) {
330 tmp4 |= 1LL << i;
331 } else {
332 tmp4 &= ~(1LL << i);
333 }
334 }
335 }
336
337 uint64_t tmp5 = tmp4;
338 for (i = 0; i <= 31; i++) {
339 int s;
340 s = shift;
341 if (s & 0x1) {
342 if (tmp4 & (1LL << (i + 1))) {
343 tmp5 |= 1LL << i;
344 } else {
345 tmp5 &= ~(1LL << i);
346 }
347 }
348 }
349
350 return (int64_t)(int32_t)(uint32_t)tmp5;
351 }
352
353 #ifndef CONFIG_USER_ONLY
354
do_translate_address(CPUMIPSState * env,target_ulong address,int rw,uintptr_t retaddr)355 static inline hwaddr do_translate_address(CPUMIPSState *env,
356 target_ulong address,
357 int rw, uintptr_t retaddr)
358 {
359 hwaddr paddr;
360 CPUState *cs = env_cpu(env);
361
362 paddr = cpu_mips_translate_address(env, address, rw);
363
364 if (paddr == -1LL) {
365 cpu_loop_exit_restore(cs, retaddr);
366 } else {
367 return paddr;
368 }
369 }
370
371 #define HELPER_LD_ATOMIC(name, insn, almask) \
372 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
373 { \
374 if (arg & almask) { \
375 if (!(env->hflags & MIPS_HFLAG_DM)) { \
376 env->CP0_BadVAddr = arg; \
377 } \
378 do_raise_exception(env, EXCP_AdEL, GETPC()); \
379 } \
380 env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \
381 env->lladdr = arg; \
382 env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
383 return env->llval; \
384 }
385 HELPER_LD_ATOMIC(ll, lw, 0x3)
386 #ifdef TARGET_MIPS64
387 HELPER_LD_ATOMIC(lld, ld, 0x7)
388 #endif
389 #undef HELPER_LD_ATOMIC
390 #endif
391
392 #ifdef TARGET_WORDS_BIGENDIAN
393 #define GET_LMASK(v) ((v) & 3)
394 #define GET_OFFSET(addr, offset) (addr + (offset))
395 #else
396 #define GET_LMASK(v) (((v) & 3) ^ 3)
397 #define GET_OFFSET(addr, offset) (addr - (offset))
398 #endif
399
helper_swl(CPUMIPSState * env,target_ulong arg1,target_ulong arg2,int mem_idx)400 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
401 int mem_idx)
402 {
403 do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
404
405 if (GET_LMASK(arg2) <= 2) {
406 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
407 GETPC());
408 }
409
410 if (GET_LMASK(arg2) <= 1) {
411 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
412 GETPC());
413 }
414
415 if (GET_LMASK(arg2) == 0) {
416 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
417 GETPC());
418 }
419 }
420
helper_swr(CPUMIPSState * env,target_ulong arg1,target_ulong arg2,int mem_idx)421 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
422 int mem_idx)
423 {
424 do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
425
426 if (GET_LMASK(arg2) >= 1) {
427 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
428 GETPC());
429 }
430
431 if (GET_LMASK(arg2) >= 2) {
432 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
433 GETPC());
434 }
435
436 if (GET_LMASK(arg2) == 3) {
437 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
438 GETPC());
439 }
440 }
441
442 #if defined(TARGET_MIPS64)
443 /*
444 * "half" load and stores. We must do the memory access inline,
445 * or fault handling won't work.
446 */
447 #ifdef TARGET_WORDS_BIGENDIAN
448 #define GET_LMASK64(v) ((v) & 7)
449 #else
450 #define GET_LMASK64(v) (((v) & 7) ^ 7)
451 #endif
452
helper_sdl(CPUMIPSState * env,target_ulong arg1,target_ulong arg2,int mem_idx)453 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
454 int mem_idx)
455 {
456 do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
457
458 if (GET_LMASK64(arg2) <= 6) {
459 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
460 GETPC());
461 }
462
463 if (GET_LMASK64(arg2) <= 5) {
464 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
465 GETPC());
466 }
467
468 if (GET_LMASK64(arg2) <= 4) {
469 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
470 GETPC());
471 }
472
473 if (GET_LMASK64(arg2) <= 3) {
474 do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
475 GETPC());
476 }
477
478 if (GET_LMASK64(arg2) <= 2) {
479 do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
480 GETPC());
481 }
482
483 if (GET_LMASK64(arg2) <= 1) {
484 do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
485 GETPC());
486 }
487
488 if (GET_LMASK64(arg2) <= 0) {
489 do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
490 GETPC());
491 }
492 }
493
helper_sdr(CPUMIPSState * env,target_ulong arg1,target_ulong arg2,int mem_idx)494 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
495 int mem_idx)
496 {
497 do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
498
499 if (GET_LMASK64(arg2) >= 1) {
500 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
501 GETPC());
502 }
503
504 if (GET_LMASK64(arg2) >= 2) {
505 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
506 GETPC());
507 }
508
509 if (GET_LMASK64(arg2) >= 3) {
510 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
511 GETPC());
512 }
513
514 if (GET_LMASK64(arg2) >= 4) {
515 do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
516 GETPC());
517 }
518
519 if (GET_LMASK64(arg2) >= 5) {
520 do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
521 GETPC());
522 }
523
524 if (GET_LMASK64(arg2) >= 6) {
525 do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
526 GETPC());
527 }
528
529 if (GET_LMASK64(arg2) == 7) {
530 do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
531 GETPC());
532 }
533 }
534 #endif /* TARGET_MIPS64 */
535
536 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
537
helper_lwm(CPUMIPSState * env,target_ulong addr,target_ulong reglist,uint32_t mem_idx)538 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
539 uint32_t mem_idx)
540 {
541 target_ulong base_reglist = reglist & 0xf;
542 target_ulong do_r31 = reglist & 0x10;
543
544 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
545 target_ulong i;
546
547 for (i = 0; i < base_reglist; i++) {
548 env->active_tc.gpr[multiple_regs[i]] =
549 (target_long)do_lw(env, addr, mem_idx, GETPC());
550 addr += 4;
551 }
552 }
553
554 if (do_r31) {
555 env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
556 GETPC());
557 }
558 }
559
helper_swm(CPUMIPSState * env,target_ulong addr,target_ulong reglist,uint32_t mem_idx)560 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
561 uint32_t mem_idx)
562 {
563 target_ulong base_reglist = reglist & 0xf;
564 target_ulong do_r31 = reglist & 0x10;
565
566 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
567 target_ulong i;
568
569 for (i = 0; i < base_reglist; i++) {
570 do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
571 GETPC());
572 addr += 4;
573 }
574 }
575
576 if (do_r31) {
577 do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
578 }
579 }
580
581 #if defined(TARGET_MIPS64)
helper_ldm(CPUMIPSState * env,target_ulong addr,target_ulong reglist,uint32_t mem_idx)582 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
583 uint32_t mem_idx)
584 {
585 target_ulong base_reglist = reglist & 0xf;
586 target_ulong do_r31 = reglist & 0x10;
587
588 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
589 target_ulong i;
590
591 for (i = 0; i < base_reglist; i++) {
592 env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
593 GETPC());
594 addr += 8;
595 }
596 }
597
598 if (do_r31) {
599 env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
600 }
601 }
602
helper_sdm(CPUMIPSState * env,target_ulong addr,target_ulong reglist,uint32_t mem_idx)603 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
604 uint32_t mem_idx)
605 {
606 target_ulong base_reglist = reglist & 0xf;
607 target_ulong do_r31 = reglist & 0x10;
608
609 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
610 target_ulong i;
611
612 for (i = 0; i < base_reglist; i++) {
613 do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
614 GETPC());
615 addr += 8;
616 }
617 }
618
619 if (do_r31) {
620 do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
621 }
622 }
623 #endif
624
625 #ifndef CONFIG_USER_ONLY
626 /* SMP helpers. */
mips_vpe_is_wfi(MIPSCPU * c)627 static bool mips_vpe_is_wfi(MIPSCPU *c)
628 {
629 CPUState *cpu = CPU(c);
630 CPUMIPSState *env = &c->env;
631
632 /*
633 * If the VPE is halted but otherwise active, it means it's waiting for
634 * an interrupt.\
635 */
636 return cpu->halted && mips_vpe_active(env);
637 }
638
mips_vp_is_wfi(MIPSCPU * c)639 static bool mips_vp_is_wfi(MIPSCPU *c)
640 {
641 CPUState *cpu = CPU(c);
642 CPUMIPSState *env = &c->env;
643
644 return cpu->halted && mips_vp_active(env);
645 }
646
mips_vpe_wake(MIPSCPU * c)647 static inline void mips_vpe_wake(MIPSCPU *c)
648 {
649 /*
650 * Don't set ->halted = 0 directly, let it be done via cpu_has_work
651 * because there might be other conditions that state that c should
652 * be sleeping.
653 */
654 qemu_mutex_lock_iothread();
655 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
656 qemu_mutex_unlock_iothread();
657 }
658
mips_vpe_sleep(MIPSCPU * cpu)659 static inline void mips_vpe_sleep(MIPSCPU *cpu)
660 {
661 CPUState *cs = CPU(cpu);
662
663 /*
664 * The VPE was shut off, really go to bed.
665 * Reset any old _WAKE requests.
666 */
667 cs->halted = 1;
668 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
669 }
670
mips_tc_wake(MIPSCPU * cpu,int tc)671 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
672 {
673 CPUMIPSState *c = &cpu->env;
674
675 /* FIXME: TC reschedule. */
676 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
677 mips_vpe_wake(cpu);
678 }
679 }
680
mips_tc_sleep(MIPSCPU * cpu,int tc)681 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
682 {
683 CPUMIPSState *c = &cpu->env;
684
685 /* FIXME: TC reschedule. */
686 if (!mips_vpe_active(c)) {
687 mips_vpe_sleep(cpu);
688 }
689 }
690
691 /**
692 * mips_cpu_map_tc:
693 * @env: CPU from which mapping is performed.
694 * @tc: Should point to an int with the value of the global TC index.
695 *
696 * This function will transform @tc into a local index within the
697 * returned #CPUMIPSState.
698 */
699
700 /*
701 * FIXME: This code assumes that all VPEs have the same number of TCs,
702 * which depends on runtime setup. Can probably be fixed by
703 * walking the list of CPUMIPSStates.
704 */
mips_cpu_map_tc(CPUMIPSState * env,int * tc)705 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
706 {
707 MIPSCPU *cpu;
708 CPUState *cs;
709 CPUState *other_cs;
710 int vpe_idx;
711 int tc_idx = *tc;
712
713 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
714 /* Not allowed to address other CPUs. */
715 *tc = env->current_tc;
716 return env;
717 }
718
719 cs = env_cpu(env);
720 vpe_idx = tc_idx / cs->nr_threads;
721 *tc = tc_idx % cs->nr_threads;
722 other_cs = qemu_get_cpu(vpe_idx);
723 if (other_cs == NULL) {
724 return env;
725 }
726 cpu = MIPS_CPU(other_cs);
727 return &cpu->env;
728 }
729
730 /*
731 * The per VPE CP0_Status register shares some fields with the per TC
732 * CP0_TCStatus registers. These fields are wired to the same registers,
733 * so changes to either of them should be reflected on both registers.
734 *
735 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
736 *
737 * These helper call synchronizes the regs for a given cpu.
738 */
739
740 /*
741 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
742 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
743 * int tc);
744 */
745
746 /* Called for updates to CP0_TCStatus. */
sync_c0_tcstatus(CPUMIPSState * cpu,int tc,target_ulong v)747 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
748 target_ulong v)
749 {
750 uint32_t status;
751 uint32_t tcu, tmx, tasid, tksu;
752 uint32_t mask = ((1U << CP0St_CU3)
753 | (1 << CP0St_CU2)
754 | (1 << CP0St_CU1)
755 | (1 << CP0St_CU0)
756 | (1 << CP0St_MX)
757 | (3 << CP0St_KSU));
758
759 tcu = (v >> CP0TCSt_TCU0) & 0xf;
760 tmx = (v >> CP0TCSt_TMX) & 0x1;
761 tasid = v & cpu->CP0_EntryHi_ASID_mask;
762 tksu = (v >> CP0TCSt_TKSU) & 0x3;
763
764 status = tcu << CP0St_CU0;
765 status |= tmx << CP0St_MX;
766 status |= tksu << CP0St_KSU;
767
768 cpu->CP0_Status &= ~mask;
769 cpu->CP0_Status |= status;
770
771 /* Sync the TASID with EntryHi. */
772 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
773 cpu->CP0_EntryHi |= tasid;
774
775 compute_hflags(cpu);
776 }
777
778 /* Called for updates to CP0_EntryHi. */
sync_c0_entryhi(CPUMIPSState * cpu,int tc)779 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
780 {
781 int32_t *tcst;
782 uint32_t asid, v = cpu->CP0_EntryHi;
783
784 asid = v & cpu->CP0_EntryHi_ASID_mask;
785
786 if (tc == cpu->current_tc) {
787 tcst = &cpu->active_tc.CP0_TCStatus;
788 } else {
789 tcst = &cpu->tcs[tc].CP0_TCStatus;
790 }
791
792 *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
793 *tcst |= asid;
794 }
795
796 /* CP0 helpers */
helper_mfc0_mvpcontrol(CPUMIPSState * env)797 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
798 {
799 return env->mvp->CP0_MVPControl;
800 }
801
helper_mfc0_mvpconf0(CPUMIPSState * env)802 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
803 {
804 return env->mvp->CP0_MVPConf0;
805 }
806
helper_mfc0_mvpconf1(CPUMIPSState * env)807 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
808 {
809 return env->mvp->CP0_MVPConf1;
810 }
811
helper_mfc0_random(CPUMIPSState * env)812 target_ulong helper_mfc0_random(CPUMIPSState *env)
813 {
814 return (int32_t)cpu_mips_get_random(env);
815 }
816
helper_mfc0_tcstatus(CPUMIPSState * env)817 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
818 {
819 return env->active_tc.CP0_TCStatus;
820 }
821
helper_mftc0_tcstatus(CPUMIPSState * env)822 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
823 {
824 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
825 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
826
827 if (other_tc == other->current_tc) {
828 return other->active_tc.CP0_TCStatus;
829 } else {
830 return other->tcs[other_tc].CP0_TCStatus;
831 }
832 }
833
helper_mfc0_tcbind(CPUMIPSState * env)834 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
835 {
836 return env->active_tc.CP0_TCBind;
837 }
838
helper_mftc0_tcbind(CPUMIPSState * env)839 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
840 {
841 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
842 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
843
844 if (other_tc == other->current_tc) {
845 return other->active_tc.CP0_TCBind;
846 } else {
847 return other->tcs[other_tc].CP0_TCBind;
848 }
849 }
850
helper_mfc0_tcrestart(CPUMIPSState * env)851 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
852 {
853 return env->active_tc.PC;
854 }
855
helper_mftc0_tcrestart(CPUMIPSState * env)856 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
857 {
858 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
859 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
860
861 if (other_tc == other->current_tc) {
862 return other->active_tc.PC;
863 } else {
864 return other->tcs[other_tc].PC;
865 }
866 }
867
helper_mfc0_tchalt(CPUMIPSState * env)868 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
869 {
870 return env->active_tc.CP0_TCHalt;
871 }
872
helper_mftc0_tchalt(CPUMIPSState * env)873 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
874 {
875 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
876 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
877
878 if (other_tc == other->current_tc) {
879 return other->active_tc.CP0_TCHalt;
880 } else {
881 return other->tcs[other_tc].CP0_TCHalt;
882 }
883 }
884
helper_mfc0_tccontext(CPUMIPSState * env)885 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
886 {
887 return env->active_tc.CP0_TCContext;
888 }
889
helper_mftc0_tccontext(CPUMIPSState * env)890 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
891 {
892 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
893 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
894
895 if (other_tc == other->current_tc) {
896 return other->active_tc.CP0_TCContext;
897 } else {
898 return other->tcs[other_tc].CP0_TCContext;
899 }
900 }
901
helper_mfc0_tcschedule(CPUMIPSState * env)902 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
903 {
904 return env->active_tc.CP0_TCSchedule;
905 }
906
helper_mftc0_tcschedule(CPUMIPSState * env)907 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
908 {
909 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
910 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
911
912 if (other_tc == other->current_tc) {
913 return other->active_tc.CP0_TCSchedule;
914 } else {
915 return other->tcs[other_tc].CP0_TCSchedule;
916 }
917 }
918
helper_mfc0_tcschefback(CPUMIPSState * env)919 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
920 {
921 return env->active_tc.CP0_TCScheFBack;
922 }
923
helper_mftc0_tcschefback(CPUMIPSState * env)924 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
925 {
926 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
927 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
928
929 if (other_tc == other->current_tc) {
930 return other->active_tc.CP0_TCScheFBack;
931 } else {
932 return other->tcs[other_tc].CP0_TCScheFBack;
933 }
934 }
935
helper_mfc0_count(CPUMIPSState * env)936 target_ulong helper_mfc0_count(CPUMIPSState *env)
937 {
938 return (int32_t)cpu_mips_get_count(env);
939 }
940
helper_mfc0_saar(CPUMIPSState * env)941 target_ulong helper_mfc0_saar(CPUMIPSState *env)
942 {
943 if ((env->CP0_SAARI & 0x3f) < 2) {
944 return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f];
945 }
946 return 0;
947 }
948
helper_mfhc0_saar(CPUMIPSState * env)949 target_ulong helper_mfhc0_saar(CPUMIPSState *env)
950 {
951 if ((env->CP0_SAARI & 0x3f) < 2) {
952 return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32;
953 }
954 return 0;
955 }
956
helper_mftc0_entryhi(CPUMIPSState * env)957 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
958 {
959 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
960 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
961
962 return other->CP0_EntryHi;
963 }
964
helper_mftc0_cause(CPUMIPSState * env)965 target_ulong helper_mftc0_cause(CPUMIPSState *env)
966 {
967 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
968 int32_t tccause;
969 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
970
971 if (other_tc == other->current_tc) {
972 tccause = other->CP0_Cause;
973 } else {
974 tccause = other->CP0_Cause;
975 }
976
977 return tccause;
978 }
979
helper_mftc0_status(CPUMIPSState * env)980 target_ulong helper_mftc0_status(CPUMIPSState *env)
981 {
982 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
983 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
984
985 return other->CP0_Status;
986 }
987
helper_mfc0_lladdr(CPUMIPSState * env)988 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
989 {
990 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
991 }
992
helper_mfc0_maar(CPUMIPSState * env)993 target_ulong helper_mfc0_maar(CPUMIPSState *env)
994 {
995 return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
996 }
997
helper_mfhc0_maar(CPUMIPSState * env)998 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
999 {
1000 return env->CP0_MAAR[env->CP0_MAARI] >> 32;
1001 }
1002
helper_mfc0_watchlo(CPUMIPSState * env,uint32_t sel)1003 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1004 {
1005 return (int32_t)env->CP0_WatchLo[sel];
1006 }
1007
helper_mfc0_watchhi(CPUMIPSState * env,uint32_t sel)1008 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
1009 {
1010 return env->CP0_WatchHi[sel];
1011 }
1012
helper_mfc0_debug(CPUMIPSState * env)1013 target_ulong helper_mfc0_debug(CPUMIPSState *env)
1014 {
1015 target_ulong t0 = env->CP0_Debug;
1016 if (env->hflags & MIPS_HFLAG_DM) {
1017 t0 |= 1 << CP0DB_DM;
1018 }
1019
1020 return t0;
1021 }
1022
helper_mftc0_debug(CPUMIPSState * env)1023 target_ulong helper_mftc0_debug(CPUMIPSState *env)
1024 {
1025 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1026 int32_t tcstatus;
1027 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1028
1029 if (other_tc == other->current_tc) {
1030 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1031 } else {
1032 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1033 }
1034
1035 /* XXX: Might be wrong, check with EJTAG spec. */
1036 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1037 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1038 }
1039
1040 #if defined(TARGET_MIPS64)
helper_dmfc0_tcrestart(CPUMIPSState * env)1041 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
1042 {
1043 return env->active_tc.PC;
1044 }
1045
helper_dmfc0_tchalt(CPUMIPSState * env)1046 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
1047 {
1048 return env->active_tc.CP0_TCHalt;
1049 }
1050
helper_dmfc0_tccontext(CPUMIPSState * env)1051 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
1052 {
1053 return env->active_tc.CP0_TCContext;
1054 }
1055
helper_dmfc0_tcschedule(CPUMIPSState * env)1056 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
1057 {
1058 return env->active_tc.CP0_TCSchedule;
1059 }
1060
helper_dmfc0_tcschefback(CPUMIPSState * env)1061 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
1062 {
1063 return env->active_tc.CP0_TCScheFBack;
1064 }
1065
helper_dmfc0_lladdr(CPUMIPSState * env)1066 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
1067 {
1068 return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
1069 }
1070
helper_dmfc0_maar(CPUMIPSState * env)1071 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
1072 {
1073 return env->CP0_MAAR[env->CP0_MAARI];
1074 }
1075
helper_dmfc0_watchlo(CPUMIPSState * env,uint32_t sel)1076 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1077 {
1078 return env->CP0_WatchLo[sel];
1079 }
1080
helper_dmfc0_saar(CPUMIPSState * env)1081 target_ulong helper_dmfc0_saar(CPUMIPSState *env)
1082 {
1083 if ((env->CP0_SAARI & 0x3f) < 2) {
1084 return env->CP0_SAAR[env->CP0_SAARI & 0x3f];
1085 }
1086 return 0;
1087 }
1088 #endif /* TARGET_MIPS64 */
1089
helper_mtc0_index(CPUMIPSState * env,target_ulong arg1)1090 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
1091 {
1092 uint32_t index_p = env->CP0_Index & 0x80000000;
1093 uint32_t tlb_index = arg1 & 0x7fffffff;
1094 if (tlb_index < env->tlb->nb_tlb) {
1095 if (env->insn_flags & ISA_MIPS32R6) {
1096 index_p |= arg1 & 0x80000000;
1097 }
1098 env->CP0_Index = index_p | tlb_index;
1099 }
1100 }
1101
helper_mtc0_mvpcontrol(CPUMIPSState * env,target_ulong arg1)1102 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
1103 {
1104 uint32_t mask = 0;
1105 uint32_t newval;
1106
1107 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1108 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1109 (1 << CP0MVPCo_EVP);
1110 }
1111 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
1112 mask |= (1 << CP0MVPCo_STLB);
1113 }
1114 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1115
1116 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
1117
1118 env->mvp->CP0_MVPControl = newval;
1119 }
1120
helper_mtc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)1121 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1122 {
1123 uint32_t mask;
1124 uint32_t newval;
1125
1126 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1127 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1128 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1129
1130 /*
1131 * Yield scheduler intercept not implemented.
1132 * Gating storage scheduler intercept not implemented.
1133 */
1134
1135 /* TODO: Enable/disable TCs. */
1136
1137 env->CP0_VPEControl = newval;
1138 }
1139
helper_mttc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)1140 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1141 {
1142 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1143 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1144 uint32_t mask;
1145 uint32_t newval;
1146
1147 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1148 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1149 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1150
1151 /* TODO: Enable/disable TCs. */
1152
1153 other->CP0_VPEControl = newval;
1154 }
1155
helper_mftc0_vpecontrol(CPUMIPSState * env)1156 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
1157 {
1158 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1159 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1160 /* FIXME: Mask away return zero on read bits. */
1161 return other->CP0_VPEControl;
1162 }
1163
helper_mftc0_vpeconf0(CPUMIPSState * env)1164 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
1165 {
1166 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1167 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1168
1169 return other->CP0_VPEConf0;
1170 }
1171
helper_mtc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)1172 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1173 {
1174 uint32_t mask = 0;
1175 uint32_t newval;
1176
1177 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1178 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
1179 mask |= (0xff << CP0VPEC0_XTC);
1180 }
1181 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1182 }
1183 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1184
1185 /* TODO: TC exclusive handling due to ERL/EXL. */
1186
1187 env->CP0_VPEConf0 = newval;
1188 }
1189
helper_mttc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)1190 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1191 {
1192 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1193 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1194 uint32_t mask = 0;
1195 uint32_t newval;
1196
1197 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1198 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1199
1200 /* TODO: TC exclusive handling due to ERL/EXL. */
1201 other->CP0_VPEConf0 = newval;
1202 }
1203
helper_mtc0_vpeconf1(CPUMIPSState * env,target_ulong arg1)1204 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
1205 {
1206 uint32_t mask = 0;
1207 uint32_t newval;
1208
1209 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1210 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1211 (0xff << CP0VPEC1_NCP1);
1212 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1213
1214 /* UDI not implemented. */
1215 /* CP2 not implemented. */
1216
1217 /* TODO: Handle FPU (CP1) binding. */
1218
1219 env->CP0_VPEConf1 = newval;
1220 }
1221
helper_mtc0_yqmask(CPUMIPSState * env,target_ulong arg1)1222 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
1223 {
1224 /* Yield qualifier inputs not implemented. */
1225 env->CP0_YQMask = 0x00000000;
1226 }
1227
helper_mtc0_vpeopt(CPUMIPSState * env,target_ulong arg1)1228 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
1229 {
1230 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1231 }
1232
1233 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
1234
helper_mtc0_entrylo0(CPUMIPSState * env,target_ulong arg1)1235 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
1236 {
1237 /* 1k pages not implemented */
1238 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1239 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
1240 | (rxi << (CP0EnLo_XI - 30));
1241 }
1242
1243 #if defined(TARGET_MIPS64)
1244 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
1245
helper_dmtc0_entrylo0(CPUMIPSState * env,uint64_t arg1)1246 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
1247 {
1248 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1249 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
1250 }
1251 #endif
1252
helper_mtc0_tcstatus(CPUMIPSState * env,target_ulong arg1)1253 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1254 {
1255 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1256 uint32_t newval;
1257
1258 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1259
1260 env->active_tc.CP0_TCStatus = newval;
1261 sync_c0_tcstatus(env, env->current_tc, newval);
1262 }
1263
helper_mttc0_tcstatus(CPUMIPSState * env,target_ulong arg1)1264 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1265 {
1266 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1267 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1268
1269 if (other_tc == other->current_tc) {
1270 other->active_tc.CP0_TCStatus = arg1;
1271 } else {
1272 other->tcs[other_tc].CP0_TCStatus = arg1;
1273 }
1274 sync_c0_tcstatus(other, other_tc, arg1);
1275 }
1276
helper_mtc0_tcbind(CPUMIPSState * env,target_ulong arg1)1277 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1278 {
1279 uint32_t mask = (1 << CP0TCBd_TBE);
1280 uint32_t newval;
1281
1282 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
1283 mask |= (1 << CP0TCBd_CurVPE);
1284 }
1285 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1286 env->active_tc.CP0_TCBind = newval;
1287 }
1288
helper_mttc0_tcbind(CPUMIPSState * env,target_ulong arg1)1289 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1290 {
1291 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1292 uint32_t mask = (1 << CP0TCBd_TBE);
1293 uint32_t newval;
1294 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1295
1296 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
1297 mask |= (1 << CP0TCBd_CurVPE);
1298 }
1299 if (other_tc == other->current_tc) {
1300 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1301 other->active_tc.CP0_TCBind = newval;
1302 } else {
1303 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1304 other->tcs[other_tc].CP0_TCBind = newval;
1305 }
1306 }
1307
helper_mtc0_tcrestart(CPUMIPSState * env,target_ulong arg1)1308 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1309 {
1310 env->active_tc.PC = arg1;
1311 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1312 env->CP0_LLAddr = 0;
1313 env->lladdr = 0;
1314 /* MIPS16 not implemented. */
1315 }
1316
helper_mttc0_tcrestart(CPUMIPSState * env,target_ulong arg1)1317 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1318 {
1319 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1320 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1321
1322 if (other_tc == other->current_tc) {
1323 other->active_tc.PC = arg1;
1324 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1325 other->CP0_LLAddr = 0;
1326 other->lladdr = 0;
1327 /* MIPS16 not implemented. */
1328 } else {
1329 other->tcs[other_tc].PC = arg1;
1330 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1331 other->CP0_LLAddr = 0;
1332 other->lladdr = 0;
1333 /* MIPS16 not implemented. */
1334 }
1335 }
1336
helper_mtc0_tchalt(CPUMIPSState * env,target_ulong arg1)1337 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1338 {
1339 MIPSCPU *cpu = env_archcpu(env);
1340
1341 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1342
1343 /* TODO: Halt TC / Restart (if allocated+active) TC. */
1344 if (env->active_tc.CP0_TCHalt & 1) {
1345 mips_tc_sleep(cpu, env->current_tc);
1346 } else {
1347 mips_tc_wake(cpu, env->current_tc);
1348 }
1349 }
1350
helper_mttc0_tchalt(CPUMIPSState * env,target_ulong arg1)1351 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1352 {
1353 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1354 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1355 MIPSCPU *other_cpu = env_archcpu(other);
1356
1357 /* TODO: Halt TC / Restart (if allocated+active) TC. */
1358
1359 if (other_tc == other->current_tc) {
1360 other->active_tc.CP0_TCHalt = arg1;
1361 } else {
1362 other->tcs[other_tc].CP0_TCHalt = arg1;
1363 }
1364
1365 if (arg1 & 1) {
1366 mips_tc_sleep(other_cpu, other_tc);
1367 } else {
1368 mips_tc_wake(other_cpu, other_tc);
1369 }
1370 }
1371
helper_mtc0_tccontext(CPUMIPSState * env,target_ulong arg1)1372 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1373 {
1374 env->active_tc.CP0_TCContext = arg1;
1375 }
1376
helper_mttc0_tccontext(CPUMIPSState * env,target_ulong arg1)1377 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1378 {
1379 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1380 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1381
1382 if (other_tc == other->current_tc) {
1383 other->active_tc.CP0_TCContext = arg1;
1384 } else {
1385 other->tcs[other_tc].CP0_TCContext = arg1;
1386 }
1387 }
1388
helper_mtc0_tcschedule(CPUMIPSState * env,target_ulong arg1)1389 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1390 {
1391 env->active_tc.CP0_TCSchedule = arg1;
1392 }
1393
helper_mttc0_tcschedule(CPUMIPSState * env,target_ulong arg1)1394 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1395 {
1396 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1397 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1398
1399 if (other_tc == other->current_tc) {
1400 other->active_tc.CP0_TCSchedule = arg1;
1401 } else {
1402 other->tcs[other_tc].CP0_TCSchedule = arg1;
1403 }
1404 }
1405
helper_mtc0_tcschefback(CPUMIPSState * env,target_ulong arg1)1406 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1407 {
1408 env->active_tc.CP0_TCScheFBack = arg1;
1409 }
1410
helper_mttc0_tcschefback(CPUMIPSState * env,target_ulong arg1)1411 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1412 {
1413 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1414 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1415
1416 if (other_tc == other->current_tc) {
1417 other->active_tc.CP0_TCScheFBack = arg1;
1418 } else {
1419 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1420 }
1421 }
1422
helper_mtc0_entrylo1(CPUMIPSState * env,target_ulong arg1)1423 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
1424 {
1425 /* 1k pages not implemented */
1426 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1427 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
1428 | (rxi << (CP0EnLo_XI - 30));
1429 }
1430
1431 #if defined(TARGET_MIPS64)
helper_dmtc0_entrylo1(CPUMIPSState * env,uint64_t arg1)1432 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
1433 {
1434 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1435 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
1436 }
1437 #endif
1438
helper_mtc0_context(CPUMIPSState * env,target_ulong arg1)1439 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
1440 {
1441 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1442 }
1443
update_pagemask(CPUMIPSState * env,target_ulong arg1,int32_t * pagemask)1444 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
1445 {
1446 uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1);
1447 if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) ||
1448 (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
1449 mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
1450 mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
1451 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1452 }
1453 }
1454
helper_mtc0_pagemask(CPUMIPSState * env,target_ulong arg1)1455 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
1456 {
1457 update_pagemask(env, arg1, &env->CP0_PageMask);
1458 }
1459
helper_mtc0_pagegrain(CPUMIPSState * env,target_ulong arg1)1460 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
1461 {
1462 /* SmartMIPS not implemented */
1463 /* 1k pages not implemented */
1464 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
1465 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
1466 compute_hflags(env);
1467 restore_pamask(env);
1468 }
1469
helper_mtc0_segctl0(CPUMIPSState * env,target_ulong arg1)1470 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
1471 {
1472 CPUState *cs = env_cpu(env);
1473
1474 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
1475 tlb_flush(cs);
1476 }
1477
helper_mtc0_segctl1(CPUMIPSState * env,target_ulong arg1)1478 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
1479 {
1480 CPUState *cs = env_cpu(env);
1481
1482 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
1483 tlb_flush(cs);
1484 }
1485
helper_mtc0_segctl2(CPUMIPSState * env,target_ulong arg1)1486 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
1487 {
1488 CPUState *cs = env_cpu(env);
1489
1490 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
1491 tlb_flush(cs);
1492 }
1493
helper_mtc0_pwfield(CPUMIPSState * env,target_ulong arg1)1494 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
1495 {
1496 #if defined(TARGET_MIPS64)
1497 uint64_t mask = 0x3F3FFFFFFFULL;
1498 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
1499 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
1500
1501 if ((env->insn_flags & ISA_MIPS32R6)) {
1502 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
1503 mask &= ~(0x3FULL << CP0PF_BDI);
1504 }
1505 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
1506 mask &= ~(0x3FULL << CP0PF_GDI);
1507 }
1508 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
1509 mask &= ~(0x3FULL << CP0PF_UDI);
1510 }
1511 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
1512 mask &= ~(0x3FULL << CP0PF_MDI);
1513 }
1514 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
1515 mask &= ~(0x3FULL << CP0PF_PTI);
1516 }
1517 }
1518 env->CP0_PWField = arg1 & mask;
1519
1520 if ((new_ptei >= 32) ||
1521 ((env->insn_flags & ISA_MIPS32R6) &&
1522 (new_ptei == 0 || new_ptei == 1))) {
1523 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
1524 (old_ptei << CP0PF_PTEI);
1525 }
1526 #else
1527 uint32_t mask = 0x3FFFFFFF;
1528 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
1529 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
1530
1531 if ((env->insn_flags & ISA_MIPS32R6)) {
1532 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
1533 mask &= ~(0x3F << CP0PF_GDW);
1534 }
1535 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
1536 mask &= ~(0x3F << CP0PF_UDW);
1537 }
1538 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
1539 mask &= ~(0x3F << CP0PF_MDW);
1540 }
1541 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
1542 mask &= ~(0x3F << CP0PF_PTW);
1543 }
1544 }
1545 env->CP0_PWField = arg1 & mask;
1546
1547 if ((new_ptew >= 32) ||
1548 ((env->insn_flags & ISA_MIPS32R6) &&
1549 (new_ptew == 0 || new_ptew == 1))) {
1550 env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
1551 (old_ptew << CP0PF_PTEW);
1552 }
1553 #endif
1554 }
1555
helper_mtc0_pwsize(CPUMIPSState * env,target_ulong arg1)1556 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
1557 {
1558 #if defined(TARGET_MIPS64)
1559 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
1560 #else
1561 env->CP0_PWSize = arg1 & 0x3FFFFFFF;
1562 #endif
1563 }
1564
helper_mtc0_wired(CPUMIPSState * env,target_ulong arg1)1565 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1566 {
1567 if (env->insn_flags & ISA_MIPS32R6) {
1568 if (arg1 < env->tlb->nb_tlb) {
1569 env->CP0_Wired = arg1;
1570 }
1571 } else {
1572 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1573 }
1574 }
1575
helper_mtc0_pwctl(CPUMIPSState * env,target_ulong arg1)1576 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1577 {
1578 #if defined(TARGET_MIPS64)
1579 /* PWEn = 0. Hardware page table walking is not implemented. */
1580 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1581 #else
1582 env->CP0_PWCtl = (arg1 & 0x800000FF);
1583 #endif
1584 }
1585
helper_mtc0_srsconf0(CPUMIPSState * env,target_ulong arg1)1586 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1587 {
1588 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1589 }
1590
helper_mtc0_srsconf1(CPUMIPSState * env,target_ulong arg1)1591 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1592 {
1593 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1594 }
1595
helper_mtc0_srsconf2(CPUMIPSState * env,target_ulong arg1)1596 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1597 {
1598 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1599 }
1600
helper_mtc0_srsconf3(CPUMIPSState * env,target_ulong arg1)1601 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1602 {
1603 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1604 }
1605
helper_mtc0_srsconf4(CPUMIPSState * env,target_ulong arg1)1606 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1607 {
1608 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1609 }
1610
helper_mtc0_hwrena(CPUMIPSState * env,target_ulong arg1)1611 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1612 {
1613 uint32_t mask = 0x0000000F;
1614
1615 if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1616 (env->insn_flags & ISA_MIPS32R6)) {
1617 mask |= (1 << 4);
1618 }
1619 if (env->insn_flags & ISA_MIPS32R6) {
1620 mask |= (1 << 5);
1621 }
1622 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1623 mask |= (1 << 29);
1624
1625 if (arg1 & (1 << 29)) {
1626 env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1627 } else {
1628 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1629 }
1630 }
1631
1632 env->CP0_HWREna = arg1 & mask;
1633 }
1634
helper_mtc0_count(CPUMIPSState * env,target_ulong arg1)1635 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1636 {
1637 cpu_mips_store_count(env, arg1);
1638 }
1639
helper_mtc0_saari(CPUMIPSState * env,target_ulong arg1)1640 void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
1641 {
1642 uint32_t target = arg1 & 0x3f;
1643 if (target <= 1) {
1644 env->CP0_SAARI = target;
1645 }
1646 }
1647
helper_mtc0_saar(CPUMIPSState * env,target_ulong arg1)1648 void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1)
1649 {
1650 uint32_t target = env->CP0_SAARI & 0x3f;
1651 if (target < 2) {
1652 env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL;
1653 switch (target) {
1654 case 0:
1655 if (env->itu) {
1656 itc_reconfigure(env->itu);
1657 }
1658 break;
1659 }
1660 }
1661 }
1662
helper_mthc0_saar(CPUMIPSState * env,target_ulong arg1)1663 void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1)
1664 {
1665 uint32_t target = env->CP0_SAARI & 0x3f;
1666 if (target < 2) {
1667 env->CP0_SAAR[target] =
1668 (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) |
1669 (env->CP0_SAAR[target] & 0x00000000ffffffffULL);
1670 switch (target) {
1671 case 0:
1672 if (env->itu) {
1673 itc_reconfigure(env->itu);
1674 }
1675 break;
1676 }
1677 }
1678 }
1679
helper_mtc0_entryhi(CPUMIPSState * env,target_ulong arg1)1680 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1681 {
1682 target_ulong old, val, mask;
1683 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1684 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1685 mask |= 1 << CP0EnHi_EHINV;
1686 }
1687
1688 /* 1k pages not implemented */
1689 #if defined(TARGET_MIPS64)
1690 if (env->insn_flags & ISA_MIPS32R6) {
1691 int entryhi_r = extract64(arg1, 62, 2);
1692 int config0_at = extract32(env->CP0_Config0, 13, 2);
1693 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1694 if ((entryhi_r == 2) ||
1695 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1696 /* skip EntryHi.R field if new value is reserved */
1697 mask &= ~(0x3ull << 62);
1698 }
1699 }
1700 mask &= env->SEGMask;
1701 #endif
1702 old = env->CP0_EntryHi;
1703 val = (arg1 & mask) | (old & ~mask);
1704 env->CP0_EntryHi = val;
1705 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1706 sync_c0_entryhi(env, env->current_tc);
1707 }
1708 /* If the ASID changes, flush qemu's TLB. */
1709 if ((old & env->CP0_EntryHi_ASID_mask) !=
1710 (val & env->CP0_EntryHi_ASID_mask)) {
1711 tlb_flush(env_cpu(env));
1712 }
1713 }
1714
helper_mttc0_entryhi(CPUMIPSState * env,target_ulong arg1)1715 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1716 {
1717 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1718 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1719
1720 other->CP0_EntryHi = arg1;
1721 sync_c0_entryhi(other, other_tc);
1722 }
1723
helper_mtc0_compare(CPUMIPSState * env,target_ulong arg1)1724 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1725 {
1726 cpu_mips_store_compare(env, arg1);
1727 }
1728
helper_mtc0_status(CPUMIPSState * env,target_ulong arg1)1729 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1730 {
1731 uint32_t val, old;
1732
1733 old = env->CP0_Status;
1734 cpu_mips_store_status(env, arg1);
1735 val = env->CP0_Status;
1736
1737 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1738 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1739 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1740 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1741 env->CP0_Cause);
1742 switch (cpu_mmu_index(env, false)) {
1743 case 3:
1744 qemu_log(", ERL\n");
1745 break;
1746 case MIPS_HFLAG_UM:
1747 qemu_log(", UM\n");
1748 break;
1749 case MIPS_HFLAG_SM:
1750 qemu_log(", SM\n");
1751 break;
1752 case MIPS_HFLAG_KM:
1753 qemu_log("\n");
1754 break;
1755 default:
1756 cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1757 break;
1758 }
1759 }
1760 }
1761
helper_mttc0_status(CPUMIPSState * env,target_ulong arg1)1762 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1763 {
1764 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1765 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1766 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1767
1768 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1769 sync_c0_status(env, other, other_tc);
1770 }
1771
helper_mtc0_intctl(CPUMIPSState * env,target_ulong arg1)1772 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1773 {
1774 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1775 }
1776
helper_mtc0_srsctl(CPUMIPSState * env,target_ulong arg1)1777 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1778 {
1779 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1780 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1781 }
1782
helper_mtc0_cause(CPUMIPSState * env,target_ulong arg1)1783 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1784 {
1785 cpu_mips_store_cause(env, arg1);
1786 }
1787
helper_mttc0_cause(CPUMIPSState * env,target_ulong arg1)1788 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1789 {
1790 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1791 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1792
1793 cpu_mips_store_cause(other, arg1);
1794 }
1795
helper_mftc0_epc(CPUMIPSState * env)1796 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1797 {
1798 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1799 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1800
1801 return other->CP0_EPC;
1802 }
1803
helper_mftc0_ebase(CPUMIPSState * env)1804 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1805 {
1806 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1807 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1808
1809 return other->CP0_EBase;
1810 }
1811
helper_mtc0_ebase(CPUMIPSState * env,target_ulong arg1)1812 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1813 {
1814 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1815 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1816 mask |= ~0x3FFFFFFF;
1817 }
1818 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1819 }
1820
helper_mttc0_ebase(CPUMIPSState * env,target_ulong arg1)1821 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1822 {
1823 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1824 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1825 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1826 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1827 mask |= ~0x3FFFFFFF;
1828 }
1829 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1830 }
1831
helper_mftc0_configx(CPUMIPSState * env,target_ulong idx)1832 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1833 {
1834 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1835 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1836
1837 switch (idx) {
1838 case 0: return other->CP0_Config0;
1839 case 1: return other->CP0_Config1;
1840 case 2: return other->CP0_Config2;
1841 case 3: return other->CP0_Config3;
1842 /* 4 and 5 are reserved. */
1843 case 6: return other->CP0_Config6;
1844 case 7: return other->CP0_Config7;
1845 default:
1846 break;
1847 }
1848 return 0;
1849 }
1850
helper_mtc0_config0(CPUMIPSState * env,target_ulong arg1)1851 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1852 {
1853 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1854 }
1855
helper_mtc0_config2(CPUMIPSState * env,target_ulong arg1)1856 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1857 {
1858 /* tertiary/secondary caches not implemented */
1859 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1860 }
1861
helper_mtc0_config3(CPUMIPSState * env,target_ulong arg1)1862 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1863 {
1864 if (env->insn_flags & ASE_MICROMIPS) {
1865 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1866 (arg1 & (1 << CP0C3_ISA_ON_EXC));
1867 }
1868 }
1869
helper_mtc0_config4(CPUMIPSState * env,target_ulong arg1)1870 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1871 {
1872 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1873 (arg1 & env->CP0_Config4_rw_bitmask);
1874 }
1875
helper_mtc0_config5(CPUMIPSState * env,target_ulong arg1)1876 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1877 {
1878 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1879 (arg1 & env->CP0_Config5_rw_bitmask);
1880 compute_hflags(env);
1881 }
1882
helper_mtc0_lladdr(CPUMIPSState * env,target_ulong arg1)1883 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1884 {
1885 target_long mask = env->CP0_LLAddr_rw_bitmask;
1886 arg1 = arg1 << env->CP0_LLAddr_shift;
1887 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1888 }
1889
1890 #define MTC0_MAAR_MASK(env) \
1891 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1892
helper_mtc0_maar(CPUMIPSState * env,target_ulong arg1)1893 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1894 {
1895 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1896 }
1897
helper_mthc0_maar(CPUMIPSState * env,target_ulong arg1)1898 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1899 {
1900 env->CP0_MAAR[env->CP0_MAARI] =
1901 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1902 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1903 }
1904
helper_mtc0_maari(CPUMIPSState * env,target_ulong arg1)1905 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1906 {
1907 int index = arg1 & 0x3f;
1908 if (index == 0x3f) {
1909 /*
1910 * Software may write all ones to INDEX to determine the
1911 * maximum value supported.
1912 */
1913 env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1914 } else if (index < MIPS_MAAR_MAX) {
1915 env->CP0_MAARI = index;
1916 }
1917 /*
1918 * Other than the all ones, if the value written is not supported,
1919 * then INDEX is unchanged from its previous value.
1920 */
1921 }
1922
helper_mtc0_watchlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1923 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1924 {
1925 /*
1926 * Watch exceptions for instructions, data loads, data stores
1927 * not implemented.
1928 */
1929 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1930 }
1931
helper_mtc0_watchhi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1932 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1933 {
1934 int mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1935 env->CP0_WatchHi[sel] = arg1 & mask;
1936 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1937 }
1938
helper_mtc0_xcontext(CPUMIPSState * env,target_ulong arg1)1939 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1940 {
1941 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1942 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1943 }
1944
helper_mtc0_framemask(CPUMIPSState * env,target_ulong arg1)1945 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1946 {
1947 env->CP0_Framemask = arg1; /* XXX */
1948 }
1949
helper_mtc0_debug(CPUMIPSState * env,target_ulong arg1)1950 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1951 {
1952 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1953 if (arg1 & (1 << CP0DB_DM)) {
1954 env->hflags |= MIPS_HFLAG_DM;
1955 } else {
1956 env->hflags &= ~MIPS_HFLAG_DM;
1957 }
1958 }
1959
helper_mttc0_debug(CPUMIPSState * env,target_ulong arg1)1960 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1961 {
1962 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1963 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1964 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1965
1966 /* XXX: Might be wrong, check with EJTAG spec. */
1967 if (other_tc == other->current_tc) {
1968 other->active_tc.CP0_Debug_tcstatus = val;
1969 } else {
1970 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1971 }
1972 other->CP0_Debug = (other->CP0_Debug &
1973 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1974 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1975 }
1976
helper_mtc0_performance0(CPUMIPSState * env,target_ulong arg1)1977 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1978 {
1979 env->CP0_Performance0 = arg1 & 0x000007ff;
1980 }
1981
helper_mtc0_errctl(CPUMIPSState * env,target_ulong arg1)1982 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1983 {
1984 int32_t wst = arg1 & (1 << CP0EC_WST);
1985 int32_t spr = arg1 & (1 << CP0EC_SPR);
1986 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1987
1988 env->CP0_ErrCtl = wst | spr | itc;
1989
1990 if (itc && !wst && !spr) {
1991 env->hflags |= MIPS_HFLAG_ITC_CACHE;
1992 } else {
1993 env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1994 }
1995 }
1996
helper_mtc0_taglo(CPUMIPSState * env,target_ulong arg1)1997 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1998 {
1999 if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
2000 /*
2001 * If CACHE instruction is configured for ITC tags then make all
2002 * CP0.TagLo bits writable. The actual write to ITC Configuration
2003 * Tag will take care of the read-only bits.
2004 */
2005 env->CP0_TagLo = arg1;
2006 } else {
2007 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
2008 }
2009 }
2010
helper_mtc0_datalo(CPUMIPSState * env,target_ulong arg1)2011 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
2012 {
2013 env->CP0_DataLo = arg1; /* XXX */
2014 }
2015
helper_mtc0_taghi(CPUMIPSState * env,target_ulong arg1)2016 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
2017 {
2018 env->CP0_TagHi = arg1; /* XXX */
2019 }
2020
helper_mtc0_datahi(CPUMIPSState * env,target_ulong arg1)2021 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
2022 {
2023 env->CP0_DataHi = arg1; /* XXX */
2024 }
2025
2026 /* MIPS MT functions */
helper_mftgpr(CPUMIPSState * env,uint32_t sel)2027 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
2028 {
2029 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2030 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2031
2032 if (other_tc == other->current_tc) {
2033 return other->active_tc.gpr[sel];
2034 } else {
2035 return other->tcs[other_tc].gpr[sel];
2036 }
2037 }
2038
helper_mftlo(CPUMIPSState * env,uint32_t sel)2039 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
2040 {
2041 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2042 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2043
2044 if (other_tc == other->current_tc) {
2045 return other->active_tc.LO[sel];
2046 } else {
2047 return other->tcs[other_tc].LO[sel];
2048 }
2049 }
2050
helper_mfthi(CPUMIPSState * env,uint32_t sel)2051 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
2052 {
2053 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2054 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2055
2056 if (other_tc == other->current_tc) {
2057 return other->active_tc.HI[sel];
2058 } else {
2059 return other->tcs[other_tc].HI[sel];
2060 }
2061 }
2062
helper_mftacx(CPUMIPSState * env,uint32_t sel)2063 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
2064 {
2065 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2066 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2067
2068 if (other_tc == other->current_tc) {
2069 return other->active_tc.ACX[sel];
2070 } else {
2071 return other->tcs[other_tc].ACX[sel];
2072 }
2073 }
2074
helper_mftdsp(CPUMIPSState * env)2075 target_ulong helper_mftdsp(CPUMIPSState *env)
2076 {
2077 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2078 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2079
2080 if (other_tc == other->current_tc) {
2081 return other->active_tc.DSPControl;
2082 } else {
2083 return other->tcs[other_tc].DSPControl;
2084 }
2085 }
2086
helper_mttgpr(CPUMIPSState * env,target_ulong arg1,uint32_t sel)2087 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2088 {
2089 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2090 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2091
2092 if (other_tc == other->current_tc) {
2093 other->active_tc.gpr[sel] = arg1;
2094 } else {
2095 other->tcs[other_tc].gpr[sel] = arg1;
2096 }
2097 }
2098
helper_mttlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)2099 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2100 {
2101 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2102 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2103
2104 if (other_tc == other->current_tc) {
2105 other->active_tc.LO[sel] = arg1;
2106 } else {
2107 other->tcs[other_tc].LO[sel] = arg1;
2108 }
2109 }
2110
helper_mtthi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)2111 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2112 {
2113 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2114 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2115
2116 if (other_tc == other->current_tc) {
2117 other->active_tc.HI[sel] = arg1;
2118 } else {
2119 other->tcs[other_tc].HI[sel] = arg1;
2120 }
2121 }
2122
helper_mttacx(CPUMIPSState * env,target_ulong arg1,uint32_t sel)2123 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
2124 {
2125 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2126 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2127
2128 if (other_tc == other->current_tc) {
2129 other->active_tc.ACX[sel] = arg1;
2130 } else {
2131 other->tcs[other_tc].ACX[sel] = arg1;
2132 }
2133 }
2134
helper_mttdsp(CPUMIPSState * env,target_ulong arg1)2135 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
2136 {
2137 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
2138 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
2139
2140 if (other_tc == other->current_tc) {
2141 other->active_tc.DSPControl = arg1;
2142 } else {
2143 other->tcs[other_tc].DSPControl = arg1;
2144 }
2145 }
2146
2147 /* MIPS MT functions */
helper_dmt(void)2148 target_ulong helper_dmt(void)
2149 {
2150 /* TODO */
2151 return 0;
2152 }
2153
helper_emt(void)2154 target_ulong helper_emt(void)
2155 {
2156 /* TODO */
2157 return 0;
2158 }
2159
helper_dvpe(CPUMIPSState * env)2160 target_ulong helper_dvpe(CPUMIPSState *env)
2161 {
2162 CPUState *other_cs = first_cpu;
2163 target_ulong prev = env->mvp->CP0_MVPControl;
2164
2165 CPU_FOREACH(other_cs) {
2166 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2167 /* Turn off all VPEs except the one executing the dvpe. */
2168 if (&other_cpu->env != env) {
2169 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
2170 mips_vpe_sleep(other_cpu);
2171 }
2172 }
2173 return prev;
2174 }
2175
helper_evpe(CPUMIPSState * env)2176 target_ulong helper_evpe(CPUMIPSState *env)
2177 {
2178 CPUState *other_cs = first_cpu;
2179 target_ulong prev = env->mvp->CP0_MVPControl;
2180
2181 CPU_FOREACH(other_cs) {
2182 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2183
2184 if (&other_cpu->env != env
2185 /* If the VPE is WFI, don't disturb its sleep. */
2186 && !mips_vpe_is_wfi(other_cpu)) {
2187 /* Enable the VPE. */
2188 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
2189 mips_vpe_wake(other_cpu); /* And wake it up. */
2190 }
2191 }
2192 return prev;
2193 }
2194 #endif /* !CONFIG_USER_ONLY */
2195
helper_fork(target_ulong arg1,target_ulong arg2)2196 void helper_fork(target_ulong arg1, target_ulong arg2)
2197 {
2198 /*
2199 * arg1 = rt, arg2 = rs
2200 * TODO: store to TC register
2201 */
2202 }
2203
helper_yield(CPUMIPSState * env,target_ulong arg)2204 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
2205 {
2206 target_long arg1 = arg;
2207
2208 if (arg1 < 0) {
2209 /* No scheduling policy implemented. */
2210 if (arg1 != -2) {
2211 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
2212 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
2213 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2214 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
2215 do_raise_exception(env, EXCP_THREAD, GETPC());
2216 }
2217 }
2218 } else if (arg1 == 0) {
2219 if (0) {
2220 /* TODO: TC underflow */
2221 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2222 do_raise_exception(env, EXCP_THREAD, GETPC());
2223 } else {
2224 /* TODO: Deallocate TC */
2225 }
2226 } else if (arg1 > 0) {
2227 /* Yield qualifier inputs not implemented. */
2228 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
2229 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
2230 do_raise_exception(env, EXCP_THREAD, GETPC());
2231 }
2232 return env->CP0_YQMask;
2233 }
2234
2235 /* R6 Multi-threading */
2236 #ifndef CONFIG_USER_ONLY
helper_dvp(CPUMIPSState * env)2237 target_ulong helper_dvp(CPUMIPSState *env)
2238 {
2239 CPUState *other_cs = first_cpu;
2240 target_ulong prev = env->CP0_VPControl;
2241
2242 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
2243 CPU_FOREACH(other_cs) {
2244 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2245 /* Turn off all VPs except the one executing the dvp. */
2246 if (&other_cpu->env != env) {
2247 mips_vpe_sleep(other_cpu);
2248 }
2249 }
2250 env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
2251 }
2252 return prev;
2253 }
2254
helper_evp(CPUMIPSState * env)2255 target_ulong helper_evp(CPUMIPSState *env)
2256 {
2257 CPUState *other_cs = first_cpu;
2258 target_ulong prev = env->CP0_VPControl;
2259
2260 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
2261 CPU_FOREACH(other_cs) {
2262 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
2263 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
2264 /*
2265 * If the VP is WFI, don't disturb its sleep.
2266 * Otherwise, wake it up.
2267 */
2268 mips_vpe_wake(other_cpu);
2269 }
2270 }
2271 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
2272 }
2273 return prev;
2274 }
2275 #endif /* !CONFIG_USER_ONLY */
2276
2277 #ifndef CONFIG_USER_ONLY
2278 /* TLB management */
r4k_mips_tlb_flush_extra(CPUMIPSState * env,int first)2279 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
2280 {
2281 /* Discard entries from env->tlb[first] onwards. */
2282 while (env->tlb->tlb_in_use > first) {
2283 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
2284 }
2285 }
2286
get_tlb_pfn_from_entrylo(uint64_t entrylo)2287 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
2288 {
2289 #if defined(TARGET_MIPS64)
2290 return extract64(entrylo, 6, 54);
2291 #else
2292 return extract64(entrylo, 6, 24) | /* PFN */
2293 (extract64(entrylo, 32, 32) << 24); /* PFNX */
2294 #endif
2295 }
2296
r4k_fill_tlb(CPUMIPSState * env,int idx)2297 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
2298 {
2299 r4k_tlb_t *tlb;
2300 uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
2301
2302 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2303 tlb = &env->tlb->mmu.r4k.tlb[idx];
2304 if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
2305 tlb->EHINV = 1;
2306 return;
2307 }
2308 tlb->EHINV = 0;
2309 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2310 #if defined(TARGET_MIPS64)
2311 tlb->VPN &= env->SEGMask;
2312 #endif
2313 tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2314 tlb->PageMask = env->CP0_PageMask;
2315 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2316 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2317 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2318 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2319 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
2320 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
2321 tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
2322 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2323 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2324 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2325 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
2326 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
2327 tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
2328 }
2329
r4k_helper_tlbinv(CPUMIPSState * env)2330 void r4k_helper_tlbinv(CPUMIPSState *env)
2331 {
2332 int idx;
2333 r4k_tlb_t *tlb;
2334 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2335
2336 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
2337 tlb = &env->tlb->mmu.r4k.tlb[idx];
2338 if (!tlb->G && tlb->ASID == ASID) {
2339 tlb->EHINV = 1;
2340 }
2341 }
2342 cpu_mips_tlb_flush(env);
2343 }
2344
r4k_helper_tlbinvf(CPUMIPSState * env)2345 void r4k_helper_tlbinvf(CPUMIPSState *env)
2346 {
2347 int idx;
2348
2349 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
2350 env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
2351 }
2352 cpu_mips_tlb_flush(env);
2353 }
2354
r4k_helper_tlbwi(CPUMIPSState * env)2355 void r4k_helper_tlbwi(CPUMIPSState *env)
2356 {
2357 r4k_tlb_t *tlb;
2358 int idx;
2359 target_ulong VPN;
2360 uint16_t ASID;
2361 bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
2362
2363 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2364 tlb = &env->tlb->mmu.r4k.tlb[idx];
2365 VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2366 #if defined(TARGET_MIPS64)
2367 VPN &= env->SEGMask;
2368 #endif
2369 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2370 EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
2371 G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2372 V0 = (env->CP0_EntryLo0 & 2) != 0;
2373 D0 = (env->CP0_EntryLo0 & 4) != 0;
2374 XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
2375 RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
2376 V1 = (env->CP0_EntryLo1 & 2) != 0;
2377 D1 = (env->CP0_EntryLo1 & 4) != 0;
2378 XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
2379 RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
2380
2381 /*
2382 * Discard cached TLB entries, unless tlbwi is just upgrading access
2383 * permissions on the current entry.
2384 */
2385 if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
2386 (!tlb->EHINV && EHINV) ||
2387 (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
2388 (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
2389 (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
2390 (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
2391 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2392 }
2393
2394 r4k_invalidate_tlb(env, idx, 0);
2395 r4k_fill_tlb(env, idx);
2396 }
2397
r4k_helper_tlbwr(CPUMIPSState * env)2398 void r4k_helper_tlbwr(CPUMIPSState *env)
2399 {
2400 int r = cpu_mips_get_random(env);
2401
2402 r4k_invalidate_tlb(env, r, 1);
2403 r4k_fill_tlb(env, r);
2404 }
2405
r4k_helper_tlbp(CPUMIPSState * env)2406 void r4k_helper_tlbp(CPUMIPSState *env)
2407 {
2408 r4k_tlb_t *tlb;
2409 target_ulong mask;
2410 target_ulong tag;
2411 target_ulong VPN;
2412 uint16_t ASID;
2413 int i;
2414
2415 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2416 for (i = 0; i < env->tlb->nb_tlb; i++) {
2417 tlb = &env->tlb->mmu.r4k.tlb[i];
2418 /* 1k pages are not supported. */
2419 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2420 tag = env->CP0_EntryHi & ~mask;
2421 VPN = tlb->VPN & ~mask;
2422 #if defined(TARGET_MIPS64)
2423 tag &= env->SEGMask;
2424 #endif
2425 /* Check ASID, virtual page number & size */
2426 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
2427 /* TLB match */
2428 env->CP0_Index = i;
2429 break;
2430 }
2431 }
2432 if (i == env->tlb->nb_tlb) {
2433 /* No match. Discard any shadow entries, if any of them match. */
2434 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2435 tlb = &env->tlb->mmu.r4k.tlb[i];
2436 /* 1k pages are not supported. */
2437 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2438 tag = env->CP0_EntryHi & ~mask;
2439 VPN = tlb->VPN & ~mask;
2440 #if defined(TARGET_MIPS64)
2441 tag &= env->SEGMask;
2442 #endif
2443 /* Check ASID, virtual page number & size */
2444 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2445 r4k_mips_tlb_flush_extra(env, i);
2446 break;
2447 }
2448 }
2449
2450 env->CP0_Index |= 0x80000000;
2451 }
2452 }
2453
get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)2454 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
2455 {
2456 #if defined(TARGET_MIPS64)
2457 return tlb_pfn << 6;
2458 #else
2459 return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
2460 (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
2461 #endif
2462 }
2463
r4k_helper_tlbr(CPUMIPSState * env)2464 void r4k_helper_tlbr(CPUMIPSState *env)
2465 {
2466 r4k_tlb_t *tlb;
2467 uint16_t ASID;
2468 int idx;
2469
2470 ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
2471 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2472 tlb = &env->tlb->mmu.r4k.tlb[idx];
2473
2474 /* If this will change the current ASID, flush qemu's TLB. */
2475 if (ASID != tlb->ASID) {
2476 cpu_mips_tlb_flush(env);
2477 }
2478
2479 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2480
2481 if (tlb->EHINV) {
2482 env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
2483 env->CP0_PageMask = 0;
2484 env->CP0_EntryLo0 = 0;
2485 env->CP0_EntryLo1 = 0;
2486 } else {
2487 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2488 env->CP0_PageMask = tlb->PageMask;
2489 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2490 ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
2491 ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
2492 get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
2493 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2494 ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
2495 ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
2496 get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
2497 }
2498 }
2499
helper_tlbwi(CPUMIPSState * env)2500 void helper_tlbwi(CPUMIPSState *env)
2501 {
2502 env->tlb->helper_tlbwi(env);
2503 }
2504
helper_tlbwr(CPUMIPSState * env)2505 void helper_tlbwr(CPUMIPSState *env)
2506 {
2507 env->tlb->helper_tlbwr(env);
2508 }
2509
helper_tlbp(CPUMIPSState * env)2510 void helper_tlbp(CPUMIPSState *env)
2511 {
2512 env->tlb->helper_tlbp(env);
2513 }
2514
helper_tlbr(CPUMIPSState * env)2515 void helper_tlbr(CPUMIPSState *env)
2516 {
2517 env->tlb->helper_tlbr(env);
2518 }
2519
helper_tlbinv(CPUMIPSState * env)2520 void helper_tlbinv(CPUMIPSState *env)
2521 {
2522 env->tlb->helper_tlbinv(env);
2523 }
2524
helper_tlbinvf(CPUMIPSState * env)2525 void helper_tlbinvf(CPUMIPSState *env)
2526 {
2527 env->tlb->helper_tlbinvf(env);
2528 }
2529
2530 /* Specials */
helper_di(CPUMIPSState * env)2531 target_ulong helper_di(CPUMIPSState *env)
2532 {
2533 target_ulong t0 = env->CP0_Status;
2534
2535 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2536 return t0;
2537 }
2538
helper_ei(CPUMIPSState * env)2539 target_ulong helper_ei(CPUMIPSState *env)
2540 {
2541 target_ulong t0 = env->CP0_Status;
2542
2543 env->CP0_Status = t0 | (1 << CP0St_IE);
2544 return t0;
2545 }
2546
debug_pre_eret(CPUMIPSState * env)2547 static void debug_pre_eret(CPUMIPSState *env)
2548 {
2549 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2550 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2551 env->active_tc.PC, env->CP0_EPC);
2552 if (env->CP0_Status & (1 << CP0St_ERL)) {
2553 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2554 }
2555 if (env->hflags & MIPS_HFLAG_DM) {
2556 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2557 }
2558 qemu_log("\n");
2559 }
2560 }
2561
debug_post_eret(CPUMIPSState * env)2562 static void debug_post_eret(CPUMIPSState *env)
2563 {
2564 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2565 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2566 env->active_tc.PC, env->CP0_EPC);
2567 if (env->CP0_Status & (1 << CP0St_ERL)) {
2568 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2569 }
2570 if (env->hflags & MIPS_HFLAG_DM) {
2571 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2572 }
2573 switch (cpu_mmu_index(env, false)) {
2574 case 3:
2575 qemu_log(", ERL\n");
2576 break;
2577 case MIPS_HFLAG_UM:
2578 qemu_log(", UM\n");
2579 break;
2580 case MIPS_HFLAG_SM:
2581 qemu_log(", SM\n");
2582 break;
2583 case MIPS_HFLAG_KM:
2584 qemu_log("\n");
2585 break;
2586 default:
2587 cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
2588 break;
2589 }
2590 }
2591 }
2592
set_pc(CPUMIPSState * env,target_ulong error_pc)2593 static void set_pc(CPUMIPSState *env, target_ulong error_pc)
2594 {
2595 env->active_tc.PC = error_pc & ~(target_ulong)1;
2596 if (error_pc & 1) {
2597 env->hflags |= MIPS_HFLAG_M16;
2598 } else {
2599 env->hflags &= ~(MIPS_HFLAG_M16);
2600 }
2601 }
2602
exception_return(CPUMIPSState * env)2603 static inline void exception_return(CPUMIPSState *env)
2604 {
2605 debug_pre_eret(env);
2606 if (env->CP0_Status & (1 << CP0St_ERL)) {
2607 set_pc(env, env->CP0_ErrorEPC);
2608 env->CP0_Status &= ~(1 << CP0St_ERL);
2609 } else {
2610 set_pc(env, env->CP0_EPC);
2611 env->CP0_Status &= ~(1 << CP0St_EXL);
2612 }
2613 compute_hflags(env);
2614 debug_post_eret(env);
2615 }
2616
helper_eret(CPUMIPSState * env)2617 void helper_eret(CPUMIPSState *env)
2618 {
2619 exception_return(env);
2620 env->CP0_LLAddr = 1;
2621 env->lladdr = 1;
2622 }
2623
helper_eretnc(CPUMIPSState * env)2624 void helper_eretnc(CPUMIPSState *env)
2625 {
2626 exception_return(env);
2627 }
2628
helper_deret(CPUMIPSState * env)2629 void helper_deret(CPUMIPSState *env)
2630 {
2631 debug_pre_eret(env);
2632
2633 env->hflags &= ~MIPS_HFLAG_DM;
2634 compute_hflags(env);
2635
2636 set_pc(env, env->CP0_DEPC);
2637
2638 debug_post_eret(env);
2639 }
2640 #endif /* !CONFIG_USER_ONLY */
2641
check_hwrena(CPUMIPSState * env,int reg,uintptr_t pc)2642 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
2643 {
2644 if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
2645 return;
2646 }
2647 do_raise_exception(env, EXCP_RI, pc);
2648 }
2649
helper_rdhwr_cpunum(CPUMIPSState * env)2650 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
2651 {
2652 check_hwrena(env, 0, GETPC());
2653 return env->CP0_EBase & 0x3ff;
2654 }
2655
helper_rdhwr_synci_step(CPUMIPSState * env)2656 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
2657 {
2658 check_hwrena(env, 1, GETPC());
2659 return env->SYNCI_Step;
2660 }
2661
helper_rdhwr_cc(CPUMIPSState * env)2662 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2663 {
2664 check_hwrena(env, 2, GETPC());
2665 #ifdef CONFIG_USER_ONLY
2666 return env->CP0_Count;
2667 #else
2668 return (int32_t)cpu_mips_get_count(env);
2669 #endif
2670 }
2671
helper_rdhwr_ccres(CPUMIPSState * env)2672 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2673 {
2674 check_hwrena(env, 3, GETPC());
2675 return env->CCRes;
2676 }
2677
helper_rdhwr_performance(CPUMIPSState * env)2678 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
2679 {
2680 check_hwrena(env, 4, GETPC());
2681 return env->CP0_Performance0;
2682 }
2683
helper_rdhwr_xnp(CPUMIPSState * env)2684 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
2685 {
2686 check_hwrena(env, 5, GETPC());
2687 return (env->CP0_Config5 >> CP0C5_XNP) & 1;
2688 }
2689
helper_pmon(CPUMIPSState * env,int function)2690 void helper_pmon(CPUMIPSState *env, int function)
2691 {
2692 function /= 2;
2693 switch (function) {
2694 case 2: /* TODO: char inbyte(int waitflag); */
2695 if (env->active_tc.gpr[4] == 0) {
2696 env->active_tc.gpr[2] = -1;
2697 }
2698 /* Fall through */
2699 case 11: /* TODO: char inbyte (void); */
2700 env->active_tc.gpr[2] = -1;
2701 break;
2702 case 3:
2703 case 12:
2704 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2705 break;
2706 case 17:
2707 break;
2708 case 158:
2709 {
2710 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2711 printf("%s", fmt);
2712 }
2713 break;
2714 }
2715 }
2716
helper_wait(CPUMIPSState * env)2717 void helper_wait(CPUMIPSState *env)
2718 {
2719 CPUState *cs = env_cpu(env);
2720
2721 cs->halted = 1;
2722 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
2723 /*
2724 * Last instruction in the block, PC was updated before
2725 * - no need to recover PC and icount.
2726 */
2727 raise_exception(env, EXCP_HLT);
2728 }
2729
2730 #if !defined(CONFIG_USER_ONLY)
2731
mips_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)2732 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
2733 MMUAccessType access_type,
2734 int mmu_idx, uintptr_t retaddr)
2735 {
2736 MIPSCPU *cpu = MIPS_CPU(cs);
2737 CPUMIPSState *env = &cpu->env;
2738 int error_code = 0;
2739 int excp;
2740
2741 if (!(env->hflags & MIPS_HFLAG_DM)) {
2742 env->CP0_BadVAddr = addr;
2743 }
2744
2745 if (access_type == MMU_DATA_STORE) {
2746 excp = EXCP_AdES;
2747 } else {
2748 excp = EXCP_AdEL;
2749 if (access_type == MMU_INST_FETCH) {
2750 error_code |= EXCP_INST_NOTAVAIL;
2751 }
2752 }
2753
2754 do_raise_exception_err(env, excp, error_code, retaddr);
2755 }
2756
mips_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)2757 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
2758 vaddr addr, unsigned size,
2759 MMUAccessType access_type,
2760 int mmu_idx, MemTxAttrs attrs,
2761 MemTxResult response, uintptr_t retaddr)
2762 {
2763 MIPSCPU *cpu = MIPS_CPU(cs);
2764 CPUMIPSState *env = &cpu->env;
2765
2766 if (access_type == MMU_INST_FETCH) {
2767 do_raise_exception(env, EXCP_IBE, retaddr);
2768 } else {
2769 do_raise_exception(env, EXCP_DBE, retaddr);
2770 }
2771 }
2772 #endif /* !CONFIG_USER_ONLY */
2773
2774 /* Complex FPU operations which may need stack space. */
2775
2776 #define FLOAT_TWO32 make_float32(1 << 30)
2777 #define FLOAT_TWO64 make_float64(1ULL << 62)
2778
2779 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2780 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2781
2782 /* convert MIPS rounding mode in FCR31 to IEEE library */
2783 unsigned int ieee_rm[] = {
2784 float_round_nearest_even,
2785 float_round_to_zero,
2786 float_round_up,
2787 float_round_down
2788 };
2789
helper_cfc1(CPUMIPSState * env,uint32_t reg)2790 target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
2791 {
2792 target_ulong arg1 = 0;
2793
2794 switch (reg) {
2795 case 0:
2796 arg1 = (int32_t)env->active_fpu.fcr0;
2797 break;
2798 case 1:
2799 /* UFR Support - Read Status FR */
2800 if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) {
2801 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2802 arg1 = (int32_t)
2803 ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR);
2804 } else {
2805 do_raise_exception(env, EXCP_RI, GETPC());
2806 }
2807 }
2808 break;
2809 case 5:
2810 /* FRE Support - read Config5.FRE bit */
2811 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
2812 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2813 arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1;
2814 } else {
2815 helper_raise_exception(env, EXCP_RI);
2816 }
2817 }
2818 break;
2819 case 25:
2820 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) |
2821 ((env->active_fpu.fcr31 >> 23) & 0x1);
2822 break;
2823 case 26:
2824 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2825 break;
2826 case 28:
2827 arg1 = (env->active_fpu.fcr31 & 0x00000f83) |
2828 ((env->active_fpu.fcr31 >> 22) & 0x4);
2829 break;
2830 default:
2831 arg1 = (int32_t)env->active_fpu.fcr31;
2832 break;
2833 }
2834
2835 return arg1;
2836 }
2837
helper_ctc1(CPUMIPSState * env,target_ulong arg1,uint32_t fs,uint32_t rt)2838 void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
2839 {
2840 switch (fs) {
2841 case 1:
2842 /* UFR Alias - Reset Status FR */
2843 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2844 return;
2845 }
2846 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2847 env->CP0_Status &= ~(1 << CP0St_FR);
2848 compute_hflags(env);
2849 } else {
2850 do_raise_exception(env, EXCP_RI, GETPC());
2851 }
2852 break;
2853 case 4:
2854 /* UNFR Alias - Set Status FR */
2855 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2856 return;
2857 }
2858 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2859 env->CP0_Status |= (1 << CP0St_FR);
2860 compute_hflags(env);
2861 } else {
2862 do_raise_exception(env, EXCP_RI, GETPC());
2863 }
2864 break;
2865 case 5:
2866 /* FRE Support - clear Config5.FRE bit */
2867 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
2868 return;
2869 }
2870 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2871 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
2872 compute_hflags(env);
2873 } else {
2874 helper_raise_exception(env, EXCP_RI);
2875 }
2876 break;
2877 case 6:
2878 /* FRE Support - set Config5.FRE bit */
2879 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
2880 return;
2881 }
2882 if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
2883 env->CP0_Config5 |= (1 << CP0C5_FRE);
2884 compute_hflags(env);
2885 } else {
2886 helper_raise_exception(env, EXCP_RI);
2887 }
2888 break;
2889 case 25:
2890 if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) {
2891 return;
2892 }
2893 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) |
2894 ((arg1 & 0xfe) << 24) |
2895 ((arg1 & 0x1) << 23);
2896 break;
2897 case 26:
2898 if (arg1 & 0x007c0000) {
2899 return;
2900 }
2901 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) |
2902 (arg1 & 0x0003f07c);
2903 break;
2904 case 28:
2905 if (arg1 & 0x007c0000) {
2906 return;
2907 }
2908 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) |
2909 (arg1 & 0x00000f83) |
2910 ((arg1 & 0x4) << 22);
2911 break;
2912 case 31:
2913 env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) |
2914 (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
2915 break;
2916 default:
2917 if (env->insn_flags & ISA_MIPS32R6) {
2918 do_raise_exception(env, EXCP_RI, GETPC());
2919 }
2920 return;
2921 }
2922 restore_fp_status(env);
2923 set_float_exception_flags(0, &env->active_fpu.fp_status);
2924 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) &
2925 GET_FP_CAUSE(env->active_fpu.fcr31)) {
2926 do_raise_exception(env, EXCP_FPE, GETPC());
2927 }
2928 }
2929
ieee_ex_to_mips(int xcpt)2930 int ieee_ex_to_mips(int xcpt)
2931 {
2932 int ret = 0;
2933 if (xcpt) {
2934 if (xcpt & float_flag_invalid) {
2935 ret |= FP_INVALID;
2936 }
2937 if (xcpt & float_flag_overflow) {
2938 ret |= FP_OVERFLOW;
2939 }
2940 if (xcpt & float_flag_underflow) {
2941 ret |= FP_UNDERFLOW;
2942 }
2943 if (xcpt & float_flag_divbyzero) {
2944 ret |= FP_DIV0;
2945 }
2946 if (xcpt & float_flag_inexact) {
2947 ret |= FP_INEXACT;
2948 }
2949 }
2950 return ret;
2951 }
2952
update_fcr31(CPUMIPSState * env,uintptr_t pc)2953 static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
2954 {
2955 int tmp = ieee_ex_to_mips(get_float_exception_flags(
2956 &env->active_fpu.fp_status));
2957
2958 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2959
2960 if (tmp) {
2961 set_float_exception_flags(0, &env->active_fpu.fp_status);
2962
2963 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
2964 do_raise_exception(env, EXCP_FPE, pc);
2965 } else {
2966 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2967 }
2968 }
2969 }
2970
2971 /*
2972 * Float support.
2973 * Single precition routines have a "s" suffix, double precision a
2974 * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2975 * paired single lower "pl", paired single upper "pu".
2976 */
2977
2978 /* unary operations, modifying fp status */
helper_float_sqrt_d(CPUMIPSState * env,uint64_t fdt0)2979 uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
2980 {
2981 fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2982 update_fcr31(env, GETPC());
2983 return fdt0;
2984 }
2985
helper_float_sqrt_s(CPUMIPSState * env,uint32_t fst0)2986 uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
2987 {
2988 fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2989 update_fcr31(env, GETPC());
2990 return fst0;
2991 }
2992
helper_float_cvtd_s(CPUMIPSState * env,uint32_t fst0)2993 uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
2994 {
2995 uint64_t fdt2;
2996
2997 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2998 update_fcr31(env, GETPC());
2999 return fdt2;
3000 }
3001
helper_float_cvtd_w(CPUMIPSState * env,uint32_t wt0)3002 uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
3003 {
3004 uint64_t fdt2;
3005
3006 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
3007 update_fcr31(env, GETPC());
3008 return fdt2;
3009 }
3010
helper_float_cvtd_l(CPUMIPSState * env,uint64_t dt0)3011 uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
3012 {
3013 uint64_t fdt2;
3014
3015 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
3016 update_fcr31(env, GETPC());
3017 return fdt2;
3018 }
3019
helper_float_cvt_l_d(CPUMIPSState * env,uint64_t fdt0)3020 uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0)
3021 {
3022 uint64_t dt2;
3023
3024 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3025 if (get_float_exception_flags(&env->active_fpu.fp_status)
3026 & (float_flag_invalid | float_flag_overflow)) {
3027 dt2 = FP_TO_INT64_OVERFLOW;
3028 }
3029 update_fcr31(env, GETPC());
3030 return dt2;
3031 }
3032
helper_float_cvt_l_s(CPUMIPSState * env,uint32_t fst0)3033 uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0)
3034 {
3035 uint64_t dt2;
3036
3037 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3038 if (get_float_exception_flags(&env->active_fpu.fp_status)
3039 & (float_flag_invalid | float_flag_overflow)) {
3040 dt2 = FP_TO_INT64_OVERFLOW;
3041 }
3042 update_fcr31(env, GETPC());
3043 return dt2;
3044 }
3045
helper_float_cvtps_pw(CPUMIPSState * env,uint64_t dt0)3046 uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
3047 {
3048 uint32_t fst2;
3049 uint32_t fsth2;
3050
3051 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3052 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
3053 update_fcr31(env, GETPC());
3054 return ((uint64_t)fsth2 << 32) | fst2;
3055 }
3056
helper_float_cvtpw_ps(CPUMIPSState * env,uint64_t fdt0)3057 uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
3058 {
3059 uint32_t wt2;
3060 uint32_t wth2;
3061 int excp, excph;
3062
3063 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3064 excp = get_float_exception_flags(&env->active_fpu.fp_status);
3065 if (excp & (float_flag_overflow | float_flag_invalid)) {
3066 wt2 = FP_TO_INT32_OVERFLOW;
3067 }
3068
3069 set_float_exception_flags(0, &env->active_fpu.fp_status);
3070 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
3071 excph = get_float_exception_flags(&env->active_fpu.fp_status);
3072 if (excph & (float_flag_overflow | float_flag_invalid)) {
3073 wth2 = FP_TO_INT32_OVERFLOW;
3074 }
3075
3076 set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
3077 update_fcr31(env, GETPC());
3078
3079 return ((uint64_t)wth2 << 32) | wt2;
3080 }
3081
helper_float_cvts_d(CPUMIPSState * env,uint64_t fdt0)3082 uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
3083 {
3084 uint32_t fst2;
3085
3086 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
3087 update_fcr31(env, GETPC());
3088 return fst2;
3089 }
3090
helper_float_cvts_w(CPUMIPSState * env,uint32_t wt0)3091 uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
3092 {
3093 uint32_t fst2;
3094
3095 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
3096 update_fcr31(env, GETPC());
3097 return fst2;
3098 }
3099
helper_float_cvts_l(CPUMIPSState * env,uint64_t dt0)3100 uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
3101 {
3102 uint32_t fst2;
3103
3104 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
3105 update_fcr31(env, GETPC());
3106 return fst2;
3107 }
3108
helper_float_cvts_pl(CPUMIPSState * env,uint32_t wt0)3109 uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
3110 {
3111 uint32_t wt2;
3112
3113 wt2 = wt0;
3114 update_fcr31(env, GETPC());
3115 return wt2;
3116 }
3117
helper_float_cvts_pu(CPUMIPSState * env,uint32_t wth0)3118 uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
3119 {
3120 uint32_t wt2;
3121
3122 wt2 = wth0;
3123 update_fcr31(env, GETPC());
3124 return wt2;
3125 }
3126
helper_float_cvt_w_s(CPUMIPSState * env,uint32_t fst0)3127 uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0)
3128 {
3129 uint32_t wt2;
3130
3131 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3132 if (get_float_exception_flags(&env->active_fpu.fp_status)
3133 & (float_flag_invalid | float_flag_overflow)) {
3134 wt2 = FP_TO_INT32_OVERFLOW;
3135 }
3136 update_fcr31(env, GETPC());
3137 return wt2;
3138 }
3139
helper_float_cvt_w_d(CPUMIPSState * env,uint64_t fdt0)3140 uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0)
3141 {
3142 uint32_t wt2;
3143
3144 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3145 if (get_float_exception_flags(&env->active_fpu.fp_status)
3146 & (float_flag_invalid | float_flag_overflow)) {
3147 wt2 = FP_TO_INT32_OVERFLOW;
3148 }
3149 update_fcr31(env, GETPC());
3150 return wt2;
3151 }
3152
helper_float_round_l_d(CPUMIPSState * env,uint64_t fdt0)3153 uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0)
3154 {
3155 uint64_t dt2;
3156
3157 set_float_rounding_mode(float_round_nearest_even,
3158 &env->active_fpu.fp_status);
3159 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3160 restore_rounding_mode(env);
3161 if (get_float_exception_flags(&env->active_fpu.fp_status)
3162 & (float_flag_invalid | float_flag_overflow)) {
3163 dt2 = FP_TO_INT64_OVERFLOW;
3164 }
3165 update_fcr31(env, GETPC());
3166 return dt2;
3167 }
3168
helper_float_round_l_s(CPUMIPSState * env,uint32_t fst0)3169 uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0)
3170 {
3171 uint64_t dt2;
3172
3173 set_float_rounding_mode(float_round_nearest_even,
3174 &env->active_fpu.fp_status);
3175 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3176 restore_rounding_mode(env);
3177 if (get_float_exception_flags(&env->active_fpu.fp_status)
3178 & (float_flag_invalid | float_flag_overflow)) {
3179 dt2 = FP_TO_INT64_OVERFLOW;
3180 }
3181 update_fcr31(env, GETPC());
3182 return dt2;
3183 }
3184
helper_float_round_w_d(CPUMIPSState * env,uint64_t fdt0)3185 uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0)
3186 {
3187 uint32_t wt2;
3188
3189 set_float_rounding_mode(float_round_nearest_even,
3190 &env->active_fpu.fp_status);
3191 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3192 restore_rounding_mode(env);
3193 if (get_float_exception_flags(&env->active_fpu.fp_status)
3194 & (float_flag_invalid | float_flag_overflow)) {
3195 wt2 = FP_TO_INT32_OVERFLOW;
3196 }
3197 update_fcr31(env, GETPC());
3198 return wt2;
3199 }
3200
helper_float_round_w_s(CPUMIPSState * env,uint32_t fst0)3201 uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0)
3202 {
3203 uint32_t wt2;
3204
3205 set_float_rounding_mode(float_round_nearest_even,
3206 &env->active_fpu.fp_status);
3207 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3208 restore_rounding_mode(env);
3209 if (get_float_exception_flags(&env->active_fpu.fp_status)
3210 & (float_flag_invalid | float_flag_overflow)) {
3211 wt2 = FP_TO_INT32_OVERFLOW;
3212 }
3213 update_fcr31(env, GETPC());
3214 return wt2;
3215 }
3216
helper_float_trunc_l_d(CPUMIPSState * env,uint64_t fdt0)3217 uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0)
3218 {
3219 uint64_t dt2;
3220
3221 dt2 = float64_to_int64_round_to_zero(fdt0,
3222 &env->active_fpu.fp_status);
3223 if (get_float_exception_flags(&env->active_fpu.fp_status)
3224 & (float_flag_invalid | float_flag_overflow)) {
3225 dt2 = FP_TO_INT64_OVERFLOW;
3226 }
3227 update_fcr31(env, GETPC());
3228 return dt2;
3229 }
3230
helper_float_trunc_l_s(CPUMIPSState * env,uint32_t fst0)3231 uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0)
3232 {
3233 uint64_t dt2;
3234
3235 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
3236 if (get_float_exception_flags(&env->active_fpu.fp_status)
3237 & (float_flag_invalid | float_flag_overflow)) {
3238 dt2 = FP_TO_INT64_OVERFLOW;
3239 }
3240 update_fcr31(env, GETPC());
3241 return dt2;
3242 }
3243
helper_float_trunc_w_d(CPUMIPSState * env,uint64_t fdt0)3244 uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0)
3245 {
3246 uint32_t wt2;
3247
3248 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
3249 if (get_float_exception_flags(&env->active_fpu.fp_status)
3250 & (float_flag_invalid | float_flag_overflow)) {
3251 wt2 = FP_TO_INT32_OVERFLOW;
3252 }
3253 update_fcr31(env, GETPC());
3254 return wt2;
3255 }
3256
helper_float_trunc_w_s(CPUMIPSState * env,uint32_t fst0)3257 uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0)
3258 {
3259 uint32_t wt2;
3260
3261 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
3262 if (get_float_exception_flags(&env->active_fpu.fp_status)
3263 & (float_flag_invalid | float_flag_overflow)) {
3264 wt2 = FP_TO_INT32_OVERFLOW;
3265 }
3266 update_fcr31(env, GETPC());
3267 return wt2;
3268 }
3269
helper_float_ceil_l_d(CPUMIPSState * env,uint64_t fdt0)3270 uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0)
3271 {
3272 uint64_t dt2;
3273
3274 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3275 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3276 restore_rounding_mode(env);
3277 if (get_float_exception_flags(&env->active_fpu.fp_status)
3278 & (float_flag_invalid | float_flag_overflow)) {
3279 dt2 = FP_TO_INT64_OVERFLOW;
3280 }
3281 update_fcr31(env, GETPC());
3282 return dt2;
3283 }
3284
helper_float_ceil_l_s(CPUMIPSState * env,uint32_t fst0)3285 uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0)
3286 {
3287 uint64_t dt2;
3288
3289 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3290 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3291 restore_rounding_mode(env);
3292 if (get_float_exception_flags(&env->active_fpu.fp_status)
3293 & (float_flag_invalid | float_flag_overflow)) {
3294 dt2 = FP_TO_INT64_OVERFLOW;
3295 }
3296 update_fcr31(env, GETPC());
3297 return dt2;
3298 }
3299
helper_float_ceil_w_d(CPUMIPSState * env,uint64_t fdt0)3300 uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0)
3301 {
3302 uint32_t wt2;
3303
3304 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3305 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3306 restore_rounding_mode(env);
3307 if (get_float_exception_flags(&env->active_fpu.fp_status)
3308 & (float_flag_invalid | float_flag_overflow)) {
3309 wt2 = FP_TO_INT32_OVERFLOW;
3310 }
3311 update_fcr31(env, GETPC());
3312 return wt2;
3313 }
3314
helper_float_ceil_w_s(CPUMIPSState * env,uint32_t fst0)3315 uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0)
3316 {
3317 uint32_t wt2;
3318
3319 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3320 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3321 restore_rounding_mode(env);
3322 if (get_float_exception_flags(&env->active_fpu.fp_status)
3323 & (float_flag_invalid | float_flag_overflow)) {
3324 wt2 = FP_TO_INT32_OVERFLOW;
3325 }
3326 update_fcr31(env, GETPC());
3327 return wt2;
3328 }
3329
helper_float_floor_l_d(CPUMIPSState * env,uint64_t fdt0)3330 uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0)
3331 {
3332 uint64_t dt2;
3333
3334 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3335 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3336 restore_rounding_mode(env);
3337 if (get_float_exception_flags(&env->active_fpu.fp_status)
3338 & (float_flag_invalid | float_flag_overflow)) {
3339 dt2 = FP_TO_INT64_OVERFLOW;
3340 }
3341 update_fcr31(env, GETPC());
3342 return dt2;
3343 }
3344
helper_float_floor_l_s(CPUMIPSState * env,uint32_t fst0)3345 uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0)
3346 {
3347 uint64_t dt2;
3348
3349 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3350 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3351 restore_rounding_mode(env);
3352 if (get_float_exception_flags(&env->active_fpu.fp_status)
3353 & (float_flag_invalid | float_flag_overflow)) {
3354 dt2 = FP_TO_INT64_OVERFLOW;
3355 }
3356 update_fcr31(env, GETPC());
3357 return dt2;
3358 }
3359
helper_float_floor_w_d(CPUMIPSState * env,uint64_t fdt0)3360 uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0)
3361 {
3362 uint32_t wt2;
3363
3364 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3365 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3366 restore_rounding_mode(env);
3367 if (get_float_exception_flags(&env->active_fpu.fp_status)
3368 & (float_flag_invalid | float_flag_overflow)) {
3369 wt2 = FP_TO_INT32_OVERFLOW;
3370 }
3371 update_fcr31(env, GETPC());
3372 return wt2;
3373 }
3374
helper_float_floor_w_s(CPUMIPSState * env,uint32_t fst0)3375 uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0)
3376 {
3377 uint32_t wt2;
3378
3379 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3380 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3381 restore_rounding_mode(env);
3382 if (get_float_exception_flags(&env->active_fpu.fp_status)
3383 & (float_flag_invalid | float_flag_overflow)) {
3384 wt2 = FP_TO_INT32_OVERFLOW;
3385 }
3386 update_fcr31(env, GETPC());
3387 return wt2;
3388 }
3389
helper_float_cvt_2008_l_d(CPUMIPSState * env,uint64_t fdt0)3390 uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3391 {
3392 uint64_t dt2;
3393
3394 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3395 if (get_float_exception_flags(&env->active_fpu.fp_status)
3396 & float_flag_invalid) {
3397 if (float64_is_any_nan(fdt0)) {
3398 dt2 = 0;
3399 }
3400 }
3401 update_fcr31(env, GETPC());
3402 return dt2;
3403 }
3404
helper_float_cvt_2008_l_s(CPUMIPSState * env,uint32_t fst0)3405 uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3406 {
3407 uint64_t dt2;
3408
3409 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3410 if (get_float_exception_flags(&env->active_fpu.fp_status)
3411 & float_flag_invalid) {
3412 if (float32_is_any_nan(fst0)) {
3413 dt2 = 0;
3414 }
3415 }
3416 update_fcr31(env, GETPC());
3417 return dt2;
3418 }
3419
helper_float_cvt_2008_w_d(CPUMIPSState * env,uint64_t fdt0)3420 uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3421 {
3422 uint32_t wt2;
3423
3424 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3425 if (get_float_exception_flags(&env->active_fpu.fp_status)
3426 & float_flag_invalid) {
3427 if (float64_is_any_nan(fdt0)) {
3428 wt2 = 0;
3429 }
3430 }
3431 update_fcr31(env, GETPC());
3432 return wt2;
3433 }
3434
helper_float_cvt_2008_w_s(CPUMIPSState * env,uint32_t fst0)3435 uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3436 {
3437 uint32_t wt2;
3438
3439 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3440 if (get_float_exception_flags(&env->active_fpu.fp_status)
3441 & float_flag_invalid) {
3442 if (float32_is_any_nan(fst0)) {
3443 wt2 = 0;
3444 }
3445 }
3446 update_fcr31(env, GETPC());
3447 return wt2;
3448 }
3449
helper_float_round_2008_l_d(CPUMIPSState * env,uint64_t fdt0)3450 uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3451 {
3452 uint64_t dt2;
3453
3454 set_float_rounding_mode(float_round_nearest_even,
3455 &env->active_fpu.fp_status);
3456 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3457 restore_rounding_mode(env);
3458 if (get_float_exception_flags(&env->active_fpu.fp_status)
3459 & float_flag_invalid) {
3460 if (float64_is_any_nan(fdt0)) {
3461 dt2 = 0;
3462 }
3463 }
3464 update_fcr31(env, GETPC());
3465 return dt2;
3466 }
3467
helper_float_round_2008_l_s(CPUMIPSState * env,uint32_t fst0)3468 uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3469 {
3470 uint64_t dt2;
3471
3472 set_float_rounding_mode(float_round_nearest_even,
3473 &env->active_fpu.fp_status);
3474 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3475 restore_rounding_mode(env);
3476 if (get_float_exception_flags(&env->active_fpu.fp_status)
3477 & float_flag_invalid) {
3478 if (float32_is_any_nan(fst0)) {
3479 dt2 = 0;
3480 }
3481 }
3482 update_fcr31(env, GETPC());
3483 return dt2;
3484 }
3485
helper_float_round_2008_w_d(CPUMIPSState * env,uint64_t fdt0)3486 uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3487 {
3488 uint32_t wt2;
3489
3490 set_float_rounding_mode(float_round_nearest_even,
3491 &env->active_fpu.fp_status);
3492 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3493 restore_rounding_mode(env);
3494 if (get_float_exception_flags(&env->active_fpu.fp_status)
3495 & float_flag_invalid) {
3496 if (float64_is_any_nan(fdt0)) {
3497 wt2 = 0;
3498 }
3499 }
3500 update_fcr31(env, GETPC());
3501 return wt2;
3502 }
3503
helper_float_round_2008_w_s(CPUMIPSState * env,uint32_t fst0)3504 uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3505 {
3506 uint32_t wt2;
3507
3508 set_float_rounding_mode(float_round_nearest_even,
3509 &env->active_fpu.fp_status);
3510 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3511 restore_rounding_mode(env);
3512 if (get_float_exception_flags(&env->active_fpu.fp_status)
3513 & float_flag_invalid) {
3514 if (float32_is_any_nan(fst0)) {
3515 wt2 = 0;
3516 }
3517 }
3518 update_fcr31(env, GETPC());
3519 return wt2;
3520 }
3521
helper_float_trunc_2008_l_d(CPUMIPSState * env,uint64_t fdt0)3522 uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3523 {
3524 uint64_t dt2;
3525
3526 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
3527 if (get_float_exception_flags(&env->active_fpu.fp_status)
3528 & float_flag_invalid) {
3529 if (float64_is_any_nan(fdt0)) {
3530 dt2 = 0;
3531 }
3532 }
3533 update_fcr31(env, GETPC());
3534 return dt2;
3535 }
3536
helper_float_trunc_2008_l_s(CPUMIPSState * env,uint32_t fst0)3537 uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3538 {
3539 uint64_t dt2;
3540
3541 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
3542 if (get_float_exception_flags(&env->active_fpu.fp_status)
3543 & float_flag_invalid) {
3544 if (float32_is_any_nan(fst0)) {
3545 dt2 = 0;
3546 }
3547 }
3548 update_fcr31(env, GETPC());
3549 return dt2;
3550 }
3551
helper_float_trunc_2008_w_d(CPUMIPSState * env,uint64_t fdt0)3552 uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3553 {
3554 uint32_t wt2;
3555
3556 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
3557 if (get_float_exception_flags(&env->active_fpu.fp_status)
3558 & float_flag_invalid) {
3559 if (float64_is_any_nan(fdt0)) {
3560 wt2 = 0;
3561 }
3562 }
3563 update_fcr31(env, GETPC());
3564 return wt2;
3565 }
3566
helper_float_trunc_2008_w_s(CPUMIPSState * env,uint32_t fst0)3567 uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3568 {
3569 uint32_t wt2;
3570
3571 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
3572 if (get_float_exception_flags(&env->active_fpu.fp_status)
3573 & float_flag_invalid) {
3574 if (float32_is_any_nan(fst0)) {
3575 wt2 = 0;
3576 }
3577 }
3578 update_fcr31(env, GETPC());
3579 return wt2;
3580 }
3581
helper_float_ceil_2008_l_d(CPUMIPSState * env,uint64_t fdt0)3582 uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3583 {
3584 uint64_t dt2;
3585
3586 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3587 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3588 restore_rounding_mode(env);
3589 if (get_float_exception_flags(&env->active_fpu.fp_status)
3590 & float_flag_invalid) {
3591 if (float64_is_any_nan(fdt0)) {
3592 dt2 = 0;
3593 }
3594 }
3595 update_fcr31(env, GETPC());
3596 return dt2;
3597 }
3598
helper_float_ceil_2008_l_s(CPUMIPSState * env,uint32_t fst0)3599 uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3600 {
3601 uint64_t dt2;
3602
3603 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3604 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3605 restore_rounding_mode(env);
3606 if (get_float_exception_flags(&env->active_fpu.fp_status)
3607 & float_flag_invalid) {
3608 if (float32_is_any_nan(fst0)) {
3609 dt2 = 0;
3610 }
3611 }
3612 update_fcr31(env, GETPC());
3613 return dt2;
3614 }
3615
helper_float_ceil_2008_w_d(CPUMIPSState * env,uint64_t fdt0)3616 uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3617 {
3618 uint32_t wt2;
3619
3620 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3621 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3622 restore_rounding_mode(env);
3623 if (get_float_exception_flags(&env->active_fpu.fp_status)
3624 & float_flag_invalid) {
3625 if (float64_is_any_nan(fdt0)) {
3626 wt2 = 0;
3627 }
3628 }
3629 update_fcr31(env, GETPC());
3630 return wt2;
3631 }
3632
helper_float_ceil_2008_w_s(CPUMIPSState * env,uint32_t fst0)3633 uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3634 {
3635 uint32_t wt2;
3636
3637 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
3638 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3639 restore_rounding_mode(env);
3640 if (get_float_exception_flags(&env->active_fpu.fp_status)
3641 & float_flag_invalid) {
3642 if (float32_is_any_nan(fst0)) {
3643 wt2 = 0;
3644 }
3645 }
3646 update_fcr31(env, GETPC());
3647 return wt2;
3648 }
3649
helper_float_floor_2008_l_d(CPUMIPSState * env,uint64_t fdt0)3650 uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
3651 {
3652 uint64_t dt2;
3653
3654 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3655 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
3656 restore_rounding_mode(env);
3657 if (get_float_exception_flags(&env->active_fpu.fp_status)
3658 & float_flag_invalid) {
3659 if (float64_is_any_nan(fdt0)) {
3660 dt2 = 0;
3661 }
3662 }
3663 update_fcr31(env, GETPC());
3664 return dt2;
3665 }
3666
helper_float_floor_2008_l_s(CPUMIPSState * env,uint32_t fst0)3667 uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0)
3668 {
3669 uint64_t dt2;
3670
3671 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3672 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
3673 restore_rounding_mode(env);
3674 if (get_float_exception_flags(&env->active_fpu.fp_status)
3675 & float_flag_invalid) {
3676 if (float32_is_any_nan(fst0)) {
3677 dt2 = 0;
3678 }
3679 }
3680 update_fcr31(env, GETPC());
3681 return dt2;
3682 }
3683
helper_float_floor_2008_w_d(CPUMIPSState * env,uint64_t fdt0)3684 uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
3685 {
3686 uint32_t wt2;
3687
3688 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3689 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
3690 restore_rounding_mode(env);
3691 if (get_float_exception_flags(&env->active_fpu.fp_status)
3692 & float_flag_invalid) {
3693 if (float64_is_any_nan(fdt0)) {
3694 wt2 = 0;
3695 }
3696 }
3697 update_fcr31(env, GETPC());
3698 return wt2;
3699 }
3700
helper_float_floor_2008_w_s(CPUMIPSState * env,uint32_t fst0)3701 uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0)
3702 {
3703 uint32_t wt2;
3704
3705 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
3706 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
3707 restore_rounding_mode(env);
3708 if (get_float_exception_flags(&env->active_fpu.fp_status)
3709 & float_flag_invalid) {
3710 if (float32_is_any_nan(fst0)) {
3711 wt2 = 0;
3712 }
3713 }
3714 update_fcr31(env, GETPC());
3715 return wt2;
3716 }
3717
3718 /* unary operations, not modifying fp status */
3719 #define FLOAT_UNOP(name) \
3720 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
3721 { \
3722 return float64_ ## name(fdt0); \
3723 } \
3724 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
3725 { \
3726 return float32_ ## name(fst0); \
3727 } \
3728 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
3729 { \
3730 uint32_t wt0; \
3731 uint32_t wth0; \
3732 \
3733 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
3734 wth0 = float32_ ## name(fdt0 >> 32); \
3735 return ((uint64_t)wth0 << 32) | wt0; \
3736 }
3737 FLOAT_UNOP(abs)
FLOAT_UNOP(chs)3738 FLOAT_UNOP(chs)
3739 #undef FLOAT_UNOP
3740
3741 /* MIPS specific unary operations */
3742 uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
3743 {
3744 uint64_t fdt2;
3745
3746 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
3747 update_fcr31(env, GETPC());
3748 return fdt2;
3749 }
3750
helper_float_recip_s(CPUMIPSState * env,uint32_t fst0)3751 uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
3752 {
3753 uint32_t fst2;
3754
3755 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
3756 update_fcr31(env, GETPC());
3757 return fst2;
3758 }
3759
helper_float_rsqrt_d(CPUMIPSState * env,uint64_t fdt0)3760 uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
3761 {
3762 uint64_t fdt2;
3763
3764 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
3765 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
3766 update_fcr31(env, GETPC());
3767 return fdt2;
3768 }
3769
helper_float_rsqrt_s(CPUMIPSState * env,uint32_t fst0)3770 uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
3771 {
3772 uint32_t fst2;
3773
3774 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
3775 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3776 update_fcr31(env, GETPC());
3777 return fst2;
3778 }
3779
helper_float_recip1_d(CPUMIPSState * env,uint64_t fdt0)3780 uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
3781 {
3782 uint64_t fdt2;
3783
3784 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
3785 update_fcr31(env, GETPC());
3786 return fdt2;
3787 }
3788
helper_float_recip1_s(CPUMIPSState * env,uint32_t fst0)3789 uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
3790 {
3791 uint32_t fst2;
3792
3793 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
3794 update_fcr31(env, GETPC());
3795 return fst2;
3796 }
3797
helper_float_recip1_ps(CPUMIPSState * env,uint64_t fdt0)3798 uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
3799 {
3800 uint32_t fst2;
3801 uint32_t fsth2;
3802
3803 fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF,
3804 &env->active_fpu.fp_status);
3805 fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
3806 update_fcr31(env, GETPC());
3807 return ((uint64_t)fsth2 << 32) | fst2;
3808 }
3809
helper_float_rsqrt1_d(CPUMIPSState * env,uint64_t fdt0)3810 uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
3811 {
3812 uint64_t fdt2;
3813
3814 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
3815 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
3816 update_fcr31(env, GETPC());
3817 return fdt2;
3818 }
3819
helper_float_rsqrt1_s(CPUMIPSState * env,uint32_t fst0)3820 uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
3821 {
3822 uint32_t fst2;
3823
3824 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
3825 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3826 update_fcr31(env, GETPC());
3827 return fst2;
3828 }
3829
helper_float_rsqrt1_ps(CPUMIPSState * env,uint64_t fdt0)3830 uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
3831 {
3832 uint32_t fst2;
3833 uint32_t fsth2;
3834
3835 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
3836 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
3837 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
3838 fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
3839 update_fcr31(env, GETPC());
3840 return ((uint64_t)fsth2 << 32) | fst2;
3841 }
3842
3843 #define FLOAT_RINT(name, bits) \
3844 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
3845 uint ## bits ## _t fs) \
3846 { \
3847 uint ## bits ## _t fdret; \
3848 \
3849 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
3850 update_fcr31(env, GETPC()); \
3851 return fdret; \
3852 }
3853
3854 FLOAT_RINT(rint_s, 32)
3855 FLOAT_RINT(rint_d, 64)
3856 #undef FLOAT_RINT
3857
3858 #define FLOAT_CLASS_SIGNALING_NAN 0x001
3859 #define FLOAT_CLASS_QUIET_NAN 0x002
3860 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
3861 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
3862 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
3863 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020
3864 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040
3865 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080
3866 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
3867 #define FLOAT_CLASS_POSITIVE_ZERO 0x200
3868
3869 #define FLOAT_CLASS(name, bits) \
3870 uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \
3871 float_status *status) \
3872 { \
3873 if (float ## bits ## _is_signaling_nan(arg, status)) { \
3874 return FLOAT_CLASS_SIGNALING_NAN; \
3875 } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
3876 return FLOAT_CLASS_QUIET_NAN; \
3877 } else if (float ## bits ## _is_neg(arg)) { \
3878 if (float ## bits ## _is_infinity(arg)) { \
3879 return FLOAT_CLASS_NEGATIVE_INFINITY; \
3880 } else if (float ## bits ## _is_zero(arg)) { \
3881 return FLOAT_CLASS_NEGATIVE_ZERO; \
3882 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3883 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
3884 } else { \
3885 return FLOAT_CLASS_NEGATIVE_NORMAL; \
3886 } \
3887 } else { \
3888 if (float ## bits ## _is_infinity(arg)) { \
3889 return FLOAT_CLASS_POSITIVE_INFINITY; \
3890 } else if (float ## bits ## _is_zero(arg)) { \
3891 return FLOAT_CLASS_POSITIVE_ZERO; \
3892 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3893 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
3894 } else { \
3895 return FLOAT_CLASS_POSITIVE_NORMAL; \
3896 } \
3897 } \
3898 } \
3899 \
3900 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
3901 uint ## bits ## _t arg) \
3902 { \
3903 return float_ ## name(arg, &env->active_fpu.fp_status); \
3904 }
3905
3906 FLOAT_CLASS(class_s, 32)
3907 FLOAT_CLASS(class_d, 64)
3908 #undef FLOAT_CLASS
3909
3910 /* binary operations */
3911 #define FLOAT_BINOP(name) \
3912 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3913 uint64_t fdt0, uint64_t fdt1) \
3914 { \
3915 uint64_t dt2; \
3916 \
3917 dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\
3918 update_fcr31(env, GETPC()); \
3919 return dt2; \
3920 } \
3921 \
3922 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3923 uint32_t fst0, uint32_t fst1) \
3924 { \
3925 uint32_t wt2; \
3926 \
3927 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\
3928 update_fcr31(env, GETPC()); \
3929 return wt2; \
3930 } \
3931 \
3932 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3933 uint64_t fdt0, \
3934 uint64_t fdt1) \
3935 { \
3936 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3937 uint32_t fsth0 = fdt0 >> 32; \
3938 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3939 uint32_t fsth1 = fdt1 >> 32; \
3940 uint32_t wt2; \
3941 uint32_t wth2; \
3942 \
3943 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status); \
3944 wth2 = float32_ ## name(fsth0, fsth1, &env->active_fpu.fp_status); \
3945 update_fcr31(env, GETPC()); \
3946 return ((uint64_t)wth2 << 32) | wt2; \
3947 }
3948
FLOAT_BINOP(add)3949 FLOAT_BINOP(add)
3950 FLOAT_BINOP(sub)
3951 FLOAT_BINOP(mul)
3952 FLOAT_BINOP(div)
3953 #undef FLOAT_BINOP
3954
3955 /* MIPS specific binary operations */
3956 uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3957 {
3958 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3959 fdt2 = float64_chs(float64_sub(fdt2, float64_one,
3960 &env->active_fpu.fp_status));
3961 update_fcr31(env, GETPC());
3962 return fdt2;
3963 }
3964
helper_float_recip2_s(CPUMIPSState * env,uint32_t fst0,uint32_t fst2)3965 uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3966 {
3967 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3968 fst2 = float32_chs(float32_sub(fst2, float32_one,
3969 &env->active_fpu.fp_status));
3970 update_fcr31(env, GETPC());
3971 return fst2;
3972 }
3973
helper_float_recip2_ps(CPUMIPSState * env,uint64_t fdt0,uint64_t fdt2)3974 uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3975 {
3976 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3977 uint32_t fsth0 = fdt0 >> 32;
3978 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3979 uint32_t fsth2 = fdt2 >> 32;
3980
3981 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3982 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3983 fst2 = float32_chs(float32_sub(fst2, float32_one,
3984 &env->active_fpu.fp_status));
3985 fsth2 = float32_chs(float32_sub(fsth2, float32_one,
3986 &env->active_fpu.fp_status));
3987 update_fcr31(env, GETPC());
3988 return ((uint64_t)fsth2 << 32) | fst2;
3989 }
3990
helper_float_rsqrt2_d(CPUMIPSState * env,uint64_t fdt0,uint64_t fdt2)3991 uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3992 {
3993 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3994 fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
3995 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64,
3996 &env->active_fpu.fp_status));
3997 update_fcr31(env, GETPC());
3998 return fdt2;
3999 }
4000
helper_float_rsqrt2_s(CPUMIPSState * env,uint32_t fst0,uint32_t fst2)4001 uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
4002 {
4003 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
4004 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
4005 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32,
4006 &env->active_fpu.fp_status));
4007 update_fcr31(env, GETPC());
4008 return fst2;
4009 }
4010
helper_float_rsqrt2_ps(CPUMIPSState * env,uint64_t fdt0,uint64_t fdt2)4011 uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
4012 {
4013 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
4014 uint32_t fsth0 = fdt0 >> 32;
4015 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
4016 uint32_t fsth2 = fdt2 >> 32;
4017
4018 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
4019 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
4020 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
4021 fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
4022 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32,
4023 &env->active_fpu.fp_status));
4024 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32,
4025 &env->active_fpu.fp_status));
4026 update_fcr31(env, GETPC());
4027 return ((uint64_t)fsth2 << 32) | fst2;
4028 }
4029
helper_float_addr_ps(CPUMIPSState * env,uint64_t fdt0,uint64_t fdt1)4030 uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
4031 {
4032 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
4033 uint32_t fsth0 = fdt0 >> 32;
4034 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
4035 uint32_t fsth1 = fdt1 >> 32;
4036 uint32_t fst2;
4037 uint32_t fsth2;
4038
4039 fst2 = float32_add(fst0, fsth0, &env->active_fpu.fp_status);
4040 fsth2 = float32_add(fst1, fsth1, &env->active_fpu.fp_status);
4041 update_fcr31(env, GETPC());
4042 return ((uint64_t)fsth2 << 32) | fst2;
4043 }
4044
helper_float_mulr_ps(CPUMIPSState * env,uint64_t fdt0,uint64_t fdt1)4045 uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
4046 {
4047 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
4048 uint32_t fsth0 = fdt0 >> 32;
4049 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
4050 uint32_t fsth1 = fdt1 >> 32;
4051 uint32_t fst2;
4052 uint32_t fsth2;
4053
4054 fst2 = float32_mul(fst0, fsth0, &env->active_fpu.fp_status);
4055 fsth2 = float32_mul(fst1, fsth1, &env->active_fpu.fp_status);
4056 update_fcr31(env, GETPC());
4057 return ((uint64_t)fsth2 << 32) | fst2;
4058 }
4059
4060 #define FLOAT_MINMAX(name, bits, minmaxfunc) \
4061 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
4062 uint ## bits ## _t fs, \
4063 uint ## bits ## _t ft) \
4064 { \
4065 uint ## bits ## _t fdret; \
4066 \
4067 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
4068 &env->active_fpu.fp_status); \
4069 update_fcr31(env, GETPC()); \
4070 return fdret; \
4071 }
4072
4073 FLOAT_MINMAX(max_s, 32, maxnum)
4074 FLOAT_MINMAX(max_d, 64, maxnum)
4075 FLOAT_MINMAX(maxa_s, 32, maxnummag)
4076 FLOAT_MINMAX(maxa_d, 64, maxnummag)
4077
4078 FLOAT_MINMAX(min_s, 32, minnum)
4079 FLOAT_MINMAX(min_d, 64, minnum)
4080 FLOAT_MINMAX(mina_s, 32, minnummag)
4081 FLOAT_MINMAX(mina_d, 64, minnummag)
4082 #undef FLOAT_MINMAX
4083
4084 /* ternary operations */
4085 #define UNFUSED_FMA(prefix, a, b, c, flags) \
4086 { \
4087 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
4088 if ((flags) & float_muladd_negate_c) { \
4089 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
4090 } else { \
4091 a = prefix##_add(a, c, &env->active_fpu.fp_status); \
4092 } \
4093 if ((flags) & float_muladd_negate_result) { \
4094 a = prefix##_chs(a); \
4095 } \
4096 }
4097
4098 /* FMA based operations */
4099 #define FLOAT_FMA(name, type) \
4100 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
4101 uint64_t fdt0, uint64_t fdt1, \
4102 uint64_t fdt2) \
4103 { \
4104 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
4105 update_fcr31(env, GETPC()); \
4106 return fdt0; \
4107 } \
4108 \
4109 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
4110 uint32_t fst0, uint32_t fst1, \
4111 uint32_t fst2) \
4112 { \
4113 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4114 update_fcr31(env, GETPC()); \
4115 return fst0; \
4116 } \
4117 \
4118 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
4119 uint64_t fdt0, uint64_t fdt1, \
4120 uint64_t fdt2) \
4121 { \
4122 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
4123 uint32_t fsth0 = fdt0 >> 32; \
4124 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
4125 uint32_t fsth1 = fdt1 >> 32; \
4126 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
4127 uint32_t fsth2 = fdt2 >> 32; \
4128 \
4129 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4130 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
4131 update_fcr31(env, GETPC()); \
4132 return ((uint64_t)fsth0 << 32) | fst0; \
4133 }
4134 FLOAT_FMA(madd, 0)
FLOAT_FMA(msub,float_muladd_negate_c)4135 FLOAT_FMA(msub, float_muladd_negate_c)
4136 FLOAT_FMA(nmadd, float_muladd_negate_result)
4137 FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
4138 #undef FLOAT_FMA
4139
4140 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \
4141 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
4142 uint ## bits ## _t fs, \
4143 uint ## bits ## _t ft, \
4144 uint ## bits ## _t fd) \
4145 { \
4146 uint ## bits ## _t fdret; \
4147 \
4148 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
4149 &env->active_fpu.fp_status); \
4150 update_fcr31(env, GETPC()); \
4151 return fdret; \
4152 }
4153
4154 FLOAT_FMADDSUB(maddf_s, 32, 0)
4155 FLOAT_FMADDSUB(maddf_d, 64, 0)
4156 FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product)
4157 FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product)
4158 #undef FLOAT_FMADDSUB
4159
4160 /* compare operations */
4161 #define FOP_COND_D(op, cond) \
4162 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4163 uint64_t fdt1, int cc) \
4164 { \
4165 int c; \
4166 c = cond; \
4167 update_fcr31(env, GETPC()); \
4168 if (c) \
4169 SET_FP_COND(cc, env->active_fpu); \
4170 else \
4171 CLEAR_FP_COND(cc, env->active_fpu); \
4172 } \
4173 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4174 uint64_t fdt1, int cc) \
4175 { \
4176 int c; \
4177 fdt0 = float64_abs(fdt0); \
4178 fdt1 = float64_abs(fdt1); \
4179 c = cond; \
4180 update_fcr31(env, GETPC()); \
4181 if (c) \
4182 SET_FP_COND(cc, env->active_fpu); \
4183 else \
4184 CLEAR_FP_COND(cc, env->active_fpu); \
4185 }
4186
4187 /*
4188 * NOTE: the comma operator will make "cond" to eval to false,
4189 * but float64_unordered_quiet() is still called.
4190 */
4191 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0,
4192 &env->active_fpu.fp_status), 0))
4193 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0,
4194 &env->active_fpu.fp_status))
4195 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1,
4196 &env->active_fpu.fp_status))
4197 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0,
4198 &env->active_fpu.fp_status)
4199 || float64_eq_quiet(fdt0, fdt1,
4200 &env->active_fpu.fp_status))
4201 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1,
4202 &env->active_fpu.fp_status))
4203 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0,
4204 &env->active_fpu.fp_status)
4205 || float64_lt_quiet(fdt0, fdt1,
4206 &env->active_fpu.fp_status))
4207 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1,
4208 &env->active_fpu.fp_status))
4209 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0,
4210 &env->active_fpu.fp_status)
4211 || float64_le_quiet(fdt0, fdt1,
4212 &env->active_fpu.fp_status))
4213 /*
4214 * NOTE: the comma operator will make "cond" to eval to false,
4215 * but float64_unordered() is still called.
4216 */
4217 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0,
4218 &env->active_fpu.fp_status), 0))
4219 FOP_COND_D(ngle, float64_unordered(fdt1, fdt0,
4220 &env->active_fpu.fp_status))
4221 FOP_COND_D(seq, float64_eq(fdt0, fdt1,
4222 &env->active_fpu.fp_status))
4223 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0,
4224 &env->active_fpu.fp_status)
4225 || float64_eq(fdt0, fdt1,
4226 &env->active_fpu.fp_status))
4227 FOP_COND_D(lt, float64_lt(fdt0, fdt1,
4228 &env->active_fpu.fp_status))
4229 FOP_COND_D(nge, float64_unordered(fdt1, fdt0,
4230 &env->active_fpu.fp_status)
4231 || float64_lt(fdt0, fdt1,
4232 &env->active_fpu.fp_status))
4233 FOP_COND_D(le, float64_le(fdt0, fdt1,
4234 &env->active_fpu.fp_status))
4235 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0,
4236 &env->active_fpu.fp_status)
4237 || float64_le(fdt0, fdt1,
4238 &env->active_fpu.fp_status))
4239
4240 #define FOP_COND_S(op, cond) \
4241 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4242 uint32_t fst1, int cc) \
4243 { \
4244 int c; \
4245 c = cond; \
4246 update_fcr31(env, GETPC()); \
4247 if (c) \
4248 SET_FP_COND(cc, env->active_fpu); \
4249 else \
4250 CLEAR_FP_COND(cc, env->active_fpu); \
4251 } \
4252 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4253 uint32_t fst1, int cc) \
4254 { \
4255 int c; \
4256 fst0 = float32_abs(fst0); \
4257 fst1 = float32_abs(fst1); \
4258 c = cond; \
4259 update_fcr31(env, GETPC()); \
4260 if (c) \
4261 SET_FP_COND(cc, env->active_fpu); \
4262 else \
4263 CLEAR_FP_COND(cc, env->active_fpu); \
4264 }
4265
4266 /*
4267 * NOTE: the comma operator will make "cond" to eval to false,
4268 * but float32_unordered_quiet() is still called.
4269 */
4270 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0,
4271 &env->active_fpu.fp_status), 0))
4272 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0,
4273 &env->active_fpu.fp_status))
4274 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1,
4275 &env->active_fpu.fp_status))
4276 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0,
4277 &env->active_fpu.fp_status)
4278 || float32_eq_quiet(fst0, fst1,
4279 &env->active_fpu.fp_status))
4280 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1,
4281 &env->active_fpu.fp_status))
4282 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0,
4283 &env->active_fpu.fp_status)
4284 || float32_lt_quiet(fst0, fst1,
4285 &env->active_fpu.fp_status))
4286 FOP_COND_S(ole, float32_le_quiet(fst0, fst1,
4287 &env->active_fpu.fp_status))
4288 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0,
4289 &env->active_fpu.fp_status)
4290 || float32_le_quiet(fst0, fst1,
4291 &env->active_fpu.fp_status))
4292 /*
4293 * NOTE: the comma operator will make "cond" to eval to false,
4294 * but float32_unordered() is still called.
4295 */
4296 FOP_COND_S(sf, (float32_unordered(fst1, fst0,
4297 &env->active_fpu.fp_status), 0))
4298 FOP_COND_S(ngle, float32_unordered(fst1, fst0,
4299 &env->active_fpu.fp_status))
4300 FOP_COND_S(seq, float32_eq(fst0, fst1,
4301 &env->active_fpu.fp_status))
4302 FOP_COND_S(ngl, float32_unordered(fst1, fst0,
4303 &env->active_fpu.fp_status)
4304 || float32_eq(fst0, fst1,
4305 &env->active_fpu.fp_status))
4306 FOP_COND_S(lt, float32_lt(fst0, fst1,
4307 &env->active_fpu.fp_status))
4308 FOP_COND_S(nge, float32_unordered(fst1, fst0,
4309 &env->active_fpu.fp_status)
4310 || float32_lt(fst0, fst1,
4311 &env->active_fpu.fp_status))
4312 FOP_COND_S(le, float32_le(fst0, fst1,
4313 &env->active_fpu.fp_status))
4314 FOP_COND_S(ngt, float32_unordered(fst1, fst0,
4315 &env->active_fpu.fp_status)
4316 || float32_le(fst0, fst1,
4317 &env->active_fpu.fp_status))
4318
4319 #define FOP_COND_PS(op, condl, condh) \
4320 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4321 uint64_t fdt1, int cc) \
4322 { \
4323 uint32_t fst0, fsth0, fst1, fsth1; \
4324 int ch, cl; \
4325 fst0 = fdt0 & 0XFFFFFFFF; \
4326 fsth0 = fdt0 >> 32; \
4327 fst1 = fdt1 & 0XFFFFFFFF; \
4328 fsth1 = fdt1 >> 32; \
4329 cl = condl; \
4330 ch = condh; \
4331 update_fcr31(env, GETPC()); \
4332 if (cl) \
4333 SET_FP_COND(cc, env->active_fpu); \
4334 else \
4335 CLEAR_FP_COND(cc, env->active_fpu); \
4336 if (ch) \
4337 SET_FP_COND(cc + 1, env->active_fpu); \
4338 else \
4339 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4340 } \
4341 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4342 uint64_t fdt1, int cc) \
4343 { \
4344 uint32_t fst0, fsth0, fst1, fsth1; \
4345 int ch, cl; \
4346 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
4347 fsth0 = float32_abs(fdt0 >> 32); \
4348 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
4349 fsth1 = float32_abs(fdt1 >> 32); \
4350 cl = condl; \
4351 ch = condh; \
4352 update_fcr31(env, GETPC()); \
4353 if (cl) \
4354 SET_FP_COND(cc, env->active_fpu); \
4355 else \
4356 CLEAR_FP_COND(cc, env->active_fpu); \
4357 if (ch) \
4358 SET_FP_COND(cc + 1, env->active_fpu); \
4359 else \
4360 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4361 }
4362
4363 /*
4364 * NOTE: the comma operator will make "cond" to eval to false,
4365 * but float32_unordered_quiet() is still called.
4366 */
4367 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0,
4368 &env->active_fpu.fp_status), 0),
4369 (float32_unordered_quiet(fsth1, fsth0,
4370 &env->active_fpu.fp_status), 0))
4371 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0,
4372 &env->active_fpu.fp_status),
4373 float32_unordered_quiet(fsth1, fsth0,
4374 &env->active_fpu.fp_status))
4375 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1,
4376 &env->active_fpu.fp_status),
4377 float32_eq_quiet(fsth0, fsth1,
4378 &env->active_fpu.fp_status))
4379 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0,
4380 &env->active_fpu.fp_status)
4381 || float32_eq_quiet(fst0, fst1,
4382 &env->active_fpu.fp_status),
4383 float32_unordered_quiet(fsth1, fsth0,
4384 &env->active_fpu.fp_status)
4385 || float32_eq_quiet(fsth0, fsth1,
4386 &env->active_fpu.fp_status))
4387 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1,
4388 &env->active_fpu.fp_status),
4389 float32_lt_quiet(fsth0, fsth1,
4390 &env->active_fpu.fp_status))
4391 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0,
4392 &env->active_fpu.fp_status)
4393 || float32_lt_quiet(fst0, fst1,
4394 &env->active_fpu.fp_status),
4395 float32_unordered_quiet(fsth1, fsth0,
4396 &env->active_fpu.fp_status)
4397 || float32_lt_quiet(fsth0, fsth1,
4398 &env->active_fpu.fp_status))
4399 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1,
4400 &env->active_fpu.fp_status),
4401 float32_le_quiet(fsth0, fsth1,
4402 &env->active_fpu.fp_status))
4403 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0,
4404 &env->active_fpu.fp_status)
4405 || float32_le_quiet(fst0, fst1,
4406 &env->active_fpu.fp_status),
4407 float32_unordered_quiet(fsth1, fsth0,
4408 &env->active_fpu.fp_status)
4409 || float32_le_quiet(fsth0, fsth1,
4410 &env->active_fpu.fp_status))
4411 /*
4412 * NOTE: the comma operator will make "cond" to eval to false,
4413 * but float32_unordered() is still called.
4414 */
4415 FOP_COND_PS(sf, (float32_unordered(fst1, fst0,
4416 &env->active_fpu.fp_status), 0),
4417 (float32_unordered(fsth1, fsth0,
4418 &env->active_fpu.fp_status), 0))
4419 FOP_COND_PS(ngle, float32_unordered(fst1, fst0,
4420 &env->active_fpu.fp_status),
4421 float32_unordered(fsth1, fsth0,
4422 &env->active_fpu.fp_status))
4423 FOP_COND_PS(seq, float32_eq(fst0, fst1,
4424 &env->active_fpu.fp_status),
4425 float32_eq(fsth0, fsth1,
4426 &env->active_fpu.fp_status))
4427 FOP_COND_PS(ngl, float32_unordered(fst1, fst0,
4428 &env->active_fpu.fp_status)
4429 || float32_eq(fst0, fst1,
4430 &env->active_fpu.fp_status),
4431 float32_unordered(fsth1, fsth0,
4432 &env->active_fpu.fp_status)
4433 || float32_eq(fsth0, fsth1,
4434 &env->active_fpu.fp_status))
4435 FOP_COND_PS(lt, float32_lt(fst0, fst1,
4436 &env->active_fpu.fp_status),
4437 float32_lt(fsth0, fsth1,
4438 &env->active_fpu.fp_status))
4439 FOP_COND_PS(nge, float32_unordered(fst1, fst0,
4440 &env->active_fpu.fp_status)
4441 || float32_lt(fst0, fst1,
4442 &env->active_fpu.fp_status),
4443 float32_unordered(fsth1, fsth0,
4444 &env->active_fpu.fp_status)
4445 || float32_lt(fsth0, fsth1,
4446 &env->active_fpu.fp_status))
4447 FOP_COND_PS(le, float32_le(fst0, fst1,
4448 &env->active_fpu.fp_status),
4449 float32_le(fsth0, fsth1,
4450 &env->active_fpu.fp_status))
4451 FOP_COND_PS(ngt, float32_unordered(fst1, fst0,
4452 &env->active_fpu.fp_status)
4453 || float32_le(fst0, fst1,
4454 &env->active_fpu.fp_status),
4455 float32_unordered(fsth1, fsth0,
4456 &env->active_fpu.fp_status)
4457 || float32_le(fsth0, fsth1,
4458 &env->active_fpu.fp_status))
4459
4460 /* R6 compare operations */
4461 #define FOP_CONDN_D(op, cond) \
4462 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4463 uint64_t fdt1) \
4464 { \
4465 uint64_t c; \
4466 c = cond; \
4467 update_fcr31(env, GETPC()); \
4468 if (c) { \
4469 return -1; \
4470 } else { \
4471 return 0; \
4472 } \
4473 }
4474
4475 /*
4476 * NOTE: the comma operator will make "cond" to eval to false,
4477 * but float64_unordered_quiet() is still called.
4478 */
4479 FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0,
4480 &env->active_fpu.fp_status), 0))
4481 FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0,
4482 &env->active_fpu.fp_status)))
4483 FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1,
4484 &env->active_fpu.fp_status)))
4485 FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0,
4486 &env->active_fpu.fp_status)
4487 || float64_eq_quiet(fdt0, fdt1,
4488 &env->active_fpu.fp_status)))
4489 FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1,
4490 &env->active_fpu.fp_status)))
4491 FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0,
4492 &env->active_fpu.fp_status)
4493 || float64_lt_quiet(fdt0, fdt1,
4494 &env->active_fpu.fp_status)))
4495 FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1,
4496 &env->active_fpu.fp_status)))
4497 FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0,
4498 &env->active_fpu.fp_status)
4499 || float64_le_quiet(fdt0, fdt1,
4500 &env->active_fpu.fp_status)))
4501 /*
4502 * NOTE: the comma operator will make "cond" to eval to false,
4503 * but float64_unordered() is still called.\
4504 */
4505 FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0,
4506 &env->active_fpu.fp_status), 0))
4507 FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0,
4508 &env->active_fpu.fp_status)))
4509 FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1,
4510 &env->active_fpu.fp_status)))
4511 FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0,
4512 &env->active_fpu.fp_status)
4513 || float64_eq(fdt0, fdt1,
4514 &env->active_fpu.fp_status)))
4515 FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1,
4516 &env->active_fpu.fp_status)))
4517 FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0,
4518 &env->active_fpu.fp_status)
4519 || float64_lt(fdt0, fdt1,
4520 &env->active_fpu.fp_status)))
4521 FOP_CONDN_D(sle, (float64_le(fdt0, fdt1,
4522 &env->active_fpu.fp_status)))
4523 FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0,
4524 &env->active_fpu.fp_status)
4525 || float64_le(fdt0, fdt1,
4526 &env->active_fpu.fp_status)))
4527 FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0,
4528 &env->active_fpu.fp_status)
4529 || float64_le_quiet(fdt0, fdt1,
4530 &env->active_fpu.fp_status)))
4531 FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0,
4532 &env->active_fpu.fp_status)
4533 || float64_lt_quiet(fdt1, fdt0,
4534 &env->active_fpu.fp_status)
4535 || float64_lt_quiet(fdt0, fdt1,
4536 &env->active_fpu.fp_status)))
4537 FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0,
4538 &env->active_fpu.fp_status)
4539 || float64_lt_quiet(fdt0, fdt1,
4540 &env->active_fpu.fp_status)))
4541 FOP_CONDN_D(sor, (float64_le(fdt1, fdt0,
4542 &env->active_fpu.fp_status)
4543 || float64_le(fdt0, fdt1,
4544 &env->active_fpu.fp_status)))
4545 FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0,
4546 &env->active_fpu.fp_status)
4547 || float64_lt(fdt1, fdt0,
4548 &env->active_fpu.fp_status)
4549 || float64_lt(fdt0, fdt1,
4550 &env->active_fpu.fp_status)))
4551 FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0,
4552 &env->active_fpu.fp_status)
4553 || float64_lt(fdt0, fdt1,
4554 &env->active_fpu.fp_status)))
4555
4556 #define FOP_CONDN_S(op, cond) \
4557 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4558 uint32_t fst1) \
4559 { \
4560 uint64_t c; \
4561 c = cond; \
4562 update_fcr31(env, GETPC()); \
4563 if (c) { \
4564 return -1; \
4565 } else { \
4566 return 0; \
4567 } \
4568 }
4569
4570 /*
4571 * NOTE: the comma operator will make "cond" to eval to false,
4572 * but float32_unordered_quiet() is still called.
4573 */
4574 FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0,
4575 &env->active_fpu.fp_status), 0))
4576 FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0,
4577 &env->active_fpu.fp_status)))
4578 FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1,
4579 &env->active_fpu.fp_status)))
4580 FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0,
4581 &env->active_fpu.fp_status)
4582 || float32_eq_quiet(fst0, fst1,
4583 &env->active_fpu.fp_status)))
4584 FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1,
4585 &env->active_fpu.fp_status)))
4586 FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0,
4587 &env->active_fpu.fp_status)
4588 || float32_lt_quiet(fst0, fst1,
4589 &env->active_fpu.fp_status)))
4590 FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1,
4591 &env->active_fpu.fp_status)))
4592 FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0,
4593 &env->active_fpu.fp_status)
4594 || float32_le_quiet(fst0, fst1,
4595 &env->active_fpu.fp_status)))
4596 /*
4597 * NOTE: the comma operator will make "cond" to eval to false,
4598 * but float32_unordered() is still called.
4599 */
4600 FOP_CONDN_S(saf, (float32_unordered(fst1, fst0,
4601 &env->active_fpu.fp_status), 0))
4602 FOP_CONDN_S(sun, (float32_unordered(fst1, fst0,
4603 &env->active_fpu.fp_status)))
4604 FOP_CONDN_S(seq, (float32_eq(fst0, fst1,
4605 &env->active_fpu.fp_status)))
4606 FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0,
4607 &env->active_fpu.fp_status)
4608 || float32_eq(fst0, fst1,
4609 &env->active_fpu.fp_status)))
4610 FOP_CONDN_S(slt, (float32_lt(fst0, fst1,
4611 &env->active_fpu.fp_status)))
4612 FOP_CONDN_S(sult, (float32_unordered(fst1, fst0,
4613 &env->active_fpu.fp_status)
4614 || float32_lt(fst0, fst1,
4615 &env->active_fpu.fp_status)))
4616 FOP_CONDN_S(sle, (float32_le(fst0, fst1,
4617 &env->active_fpu.fp_status)))
4618 FOP_CONDN_S(sule, (float32_unordered(fst1, fst0,
4619 &env->active_fpu.fp_status)
4620 || float32_le(fst0, fst1,
4621 &env->active_fpu.fp_status)))
4622 FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0,
4623 &env->active_fpu.fp_status)
4624 || float32_le_quiet(fst0, fst1,
4625 &env->active_fpu.fp_status)))
4626 FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0,
4627 &env->active_fpu.fp_status)
4628 || float32_lt_quiet(fst1, fst0,
4629 &env->active_fpu.fp_status)
4630 || float32_lt_quiet(fst0, fst1,
4631 &env->active_fpu.fp_status)))
4632 FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0,
4633 &env->active_fpu.fp_status)
4634 || float32_lt_quiet(fst0, fst1,
4635 &env->active_fpu.fp_status)))
4636 FOP_CONDN_S(sor, (float32_le(fst1, fst0,
4637 &env->active_fpu.fp_status)
4638 || float32_le(fst0, fst1,
4639 &env->active_fpu.fp_status)))
4640 FOP_CONDN_S(sune, (float32_unordered(fst1, fst0,
4641 &env->active_fpu.fp_status)
4642 || float32_lt(fst1, fst0,
4643 &env->active_fpu.fp_status)
4644 || float32_lt(fst0, fst1,
4645 &env->active_fpu.fp_status)))
4646 FOP_CONDN_S(sne, (float32_lt(fst1, fst0,
4647 &env->active_fpu.fp_status)
4648 || float32_lt(fst0, fst1,
4649 &env->active_fpu.fp_status)))
4650
4651 /* MSA */
4652 /* Data format min and max values */
4653 #define DF_BITS(df) (1 << ((df) + 3))
4654
4655 /* Element-by-element access macros */
4656 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
4657
4658 #if !defined(CONFIG_USER_ONLY)
4659 #define MEMOP_IDX(DF) \
4660 TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
4661 cpu_mmu_index(env, false));
4662 #else
4663 #define MEMOP_IDX(DF)
4664 #endif
4665
4666 void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
4667 target_ulong addr)
4668 {
4669 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4670 MEMOP_IDX(DF_BYTE)
4671 #if !defined(CONFIG_USER_ONLY)
4672 #if !defined(HOST_WORDS_BIGENDIAN)
4673 pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
4674 pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
4675 pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
4676 pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
4677 pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
4678 pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
4679 pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
4680 pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
4681 pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
4682 pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
4683 pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
4684 pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
4685 pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
4686 pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
4687 pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
4688 pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
4689 #else
4690 pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
4691 pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
4692 pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
4693 pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
4694 pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
4695 pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
4696 pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
4697 pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
4698 pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
4699 pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
4700 pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
4701 pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
4702 pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
4703 pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
4704 pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
4705 pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
4706 #endif
4707 #else
4708 #if !defined(HOST_WORDS_BIGENDIAN)
4709 pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
4710 pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
4711 pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
4712 pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
4713 pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
4714 pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
4715 pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
4716 pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
4717 pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
4718 pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
4719 pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
4720 pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
4721 pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
4722 pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
4723 pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
4724 pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
4725 #else
4726 pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
4727 pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
4728 pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
4729 pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
4730 pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
4731 pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
4732 pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
4733 pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
4734 pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
4735 pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
4736 pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
4737 pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
4738 pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
4739 pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
4740 pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
4741 pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
4742 #endif
4743 #endif
4744 }
4745
helper_msa_ld_h(CPUMIPSState * env,uint32_t wd,target_ulong addr)4746 void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
4747 target_ulong addr)
4748 {
4749 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4750 MEMOP_IDX(DF_HALF)
4751 #if !defined(CONFIG_USER_ONLY)
4752 #if !defined(HOST_WORDS_BIGENDIAN)
4753 pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
4754 pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
4755 pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
4756 pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
4757 pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
4758 pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
4759 pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
4760 pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
4761 #else
4762 pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
4763 pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
4764 pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
4765 pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
4766 pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
4767 pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
4768 pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
4769 pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
4770 #endif
4771 #else
4772 #if !defined(HOST_WORDS_BIGENDIAN)
4773 pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
4774 pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
4775 pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
4776 pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
4777 pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
4778 pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
4779 pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
4780 pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
4781 #else
4782 pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
4783 pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
4784 pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
4785 pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
4786 pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
4787 pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
4788 pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
4789 pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
4790 #endif
4791 #endif
4792 }
4793
helper_msa_ld_w(CPUMIPSState * env,uint32_t wd,target_ulong addr)4794 void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
4795 target_ulong addr)
4796 {
4797 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4798 MEMOP_IDX(DF_WORD)
4799 #if !defined(CONFIG_USER_ONLY)
4800 #if !defined(HOST_WORDS_BIGENDIAN)
4801 pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
4802 pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
4803 pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
4804 pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
4805 #else
4806 pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
4807 pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
4808 pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
4809 pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
4810 #endif
4811 #else
4812 #if !defined(HOST_WORDS_BIGENDIAN)
4813 pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
4814 pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
4815 pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
4816 pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
4817 #else
4818 pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
4819 pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
4820 pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
4821 pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
4822 #endif
4823 #endif
4824 }
4825
helper_msa_ld_d(CPUMIPSState * env,uint32_t wd,target_ulong addr)4826 void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
4827 target_ulong addr)
4828 {
4829 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4830 MEMOP_IDX(DF_DOUBLE)
4831 #if !defined(CONFIG_USER_ONLY)
4832 pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
4833 pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
4834 #else
4835 pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
4836 pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
4837 #endif
4838 }
4839
4840 #define MSA_PAGESPAN(x) \
4841 ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE)
4842
ensure_writable_pages(CPUMIPSState * env,target_ulong addr,int mmu_idx,uintptr_t retaddr)4843 static inline void ensure_writable_pages(CPUMIPSState *env,
4844 target_ulong addr,
4845 int mmu_idx,
4846 uintptr_t retaddr)
4847 {
4848 /* FIXME: Probe the actual accesses (pass and use a size) */
4849 if (unlikely(MSA_PAGESPAN(addr))) {
4850 /* first page */
4851 probe_write(env, addr, 0, mmu_idx, retaddr);
4852 /* second page */
4853 addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4854 probe_write(env, addr, 0, mmu_idx, retaddr);
4855 }
4856 }
4857
helper_msa_st_b(CPUMIPSState * env,uint32_t wd,target_ulong addr)4858 void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
4859 target_ulong addr)
4860 {
4861 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4862 int mmu_idx = cpu_mmu_index(env, false);
4863
4864 MEMOP_IDX(DF_BYTE)
4865 ensure_writable_pages(env, addr, mmu_idx, GETPC());
4866 #if !defined(CONFIG_USER_ONLY)
4867 #if !defined(HOST_WORDS_BIGENDIAN)
4868 helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC());
4869 helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC());
4870 helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC());
4871 helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC());
4872 helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC());
4873 helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC());
4874 helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC());
4875 helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC());
4876 helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC());
4877 helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC());
4878 helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
4879 helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
4880 helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
4881 helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
4882 helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
4883 helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
4884 #else
4885 helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC());
4886 helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC());
4887 helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC());
4888 helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC());
4889 helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC());
4890 helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC());
4891 helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC());
4892 helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC());
4893 helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC());
4894 helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC());
4895 helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
4896 helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
4897 helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
4898 helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
4899 helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC());
4900 helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC());
4901 #endif
4902 #else
4903 #if !defined(HOST_WORDS_BIGENDIAN)
4904 cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]);
4905 cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]);
4906 cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]);
4907 cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]);
4908 cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]);
4909 cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]);
4910 cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]);
4911 cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]);
4912 cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]);
4913 cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]);
4914 cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
4915 cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
4916 cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
4917 cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
4918 cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
4919 cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
4920 #else
4921 cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]);
4922 cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]);
4923 cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]);
4924 cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]);
4925 cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]);
4926 cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]);
4927 cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]);
4928 cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]);
4929 cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
4930 cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
4931 cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
4932 cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
4933 cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
4934 cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
4935 cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]);
4936 cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]);
4937 #endif
4938 #endif
4939 }
4940
helper_msa_st_h(CPUMIPSState * env,uint32_t wd,target_ulong addr)4941 void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
4942 target_ulong addr)
4943 {
4944 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4945 int mmu_idx = cpu_mmu_index(env, false);
4946
4947 MEMOP_IDX(DF_HALF)
4948 ensure_writable_pages(env, addr, mmu_idx, GETPC());
4949 #if !defined(CONFIG_USER_ONLY)
4950 #if !defined(HOST_WORDS_BIGENDIAN)
4951 helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
4952 helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
4953 helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
4954 helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
4955 helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
4956 helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
4957 helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
4958 helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
4959 #else
4960 helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
4961 helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
4962 helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
4963 helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
4964 helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
4965 helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
4966 helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
4967 helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
4968 #endif
4969 #else
4970 #if !defined(HOST_WORDS_BIGENDIAN)
4971 cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
4972 cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
4973 cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
4974 cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
4975 cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
4976 cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
4977 cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
4978 cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
4979 #else
4980 cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
4981 cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
4982 cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
4983 cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
4984 cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
4985 cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
4986 cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
4987 cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
4988 #endif
4989 #endif
4990 }
4991
helper_msa_st_w(CPUMIPSState * env,uint32_t wd,target_ulong addr)4992 void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
4993 target_ulong addr)
4994 {
4995 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
4996 int mmu_idx = cpu_mmu_index(env, false);
4997
4998 MEMOP_IDX(DF_WORD)
4999 ensure_writable_pages(env, addr, mmu_idx, GETPC());
5000 #if !defined(CONFIG_USER_ONLY)
5001 #if !defined(HOST_WORDS_BIGENDIAN)
5002 helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
5003 helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
5004 helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
5005 helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
5006 #else
5007 helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
5008 helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
5009 helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
5010 helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
5011 #endif
5012 #else
5013 #if !defined(HOST_WORDS_BIGENDIAN)
5014 cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
5015 cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
5016 cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
5017 cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
5018 #else
5019 cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
5020 cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
5021 cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
5022 cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
5023 #endif
5024 #endif
5025 }
5026
helper_msa_st_d(CPUMIPSState * env,uint32_t wd,target_ulong addr)5027 void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
5028 target_ulong addr)
5029 {
5030 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
5031 int mmu_idx = cpu_mmu_index(env, false);
5032
5033 MEMOP_IDX(DF_DOUBLE)
5034 ensure_writable_pages(env, addr, mmu_idx, GETPC());
5035 #if !defined(CONFIG_USER_ONLY)
5036 helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
5037 helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
5038 #else
5039 cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
5040 cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
5041 #endif
5042 }
5043
helper_cache(CPUMIPSState * env,target_ulong addr,uint32_t op)5044 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
5045 {
5046 #ifndef CONFIG_USER_ONLY
5047 target_ulong index = addr & 0x1fffffff;
5048 if (op == 9) {
5049 /* Index Store Tag */
5050 memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
5051 MO_64, MEMTXATTRS_UNSPECIFIED);
5052 } else if (op == 5) {
5053 /* Index Load Tag */
5054 memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
5055 MO_64, MEMTXATTRS_UNSPECIFIED);
5056 }
5057 #endif
5058 }
5059