xref: /qemu/target/sparc/translate.c (revision 4d7dd4ed)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S)                 qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S)                 qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B)           qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B)           qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_fabsq                ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fnegq                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
92 # define FSR_LDXFSR_MASK                        0
93 # define FSR_LDXFSR_OLDMASK                     0
94 # define MAXTL_MASK                             0
95 #endif
96 
97 /* Dynamic PC, must exit to main loop. */
98 #define DYNAMIC_PC         1
99 /* Dynamic PC, one of two values according to jump_pc[T2]. */
100 #define JUMP_PC            2
101 /* Dynamic PC, may lookup next TB. */
102 #define DYNAMIC_PC_LOOKUP  3
103 
104 #define DISAS_EXIT  DISAS_TARGET_0
105 
106 /* global register indexes */
107 static TCGv_ptr cpu_regwptr;
108 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
109 static TCGv_i32 cpu_cc_op;
110 static TCGv_i32 cpu_psr;
111 static TCGv cpu_fsr, cpu_pc, cpu_npc;
112 static TCGv cpu_regs[32];
113 static TCGv cpu_y;
114 static TCGv cpu_tbr;
115 static TCGv cpu_cond;
116 #ifdef TARGET_SPARC64
117 static TCGv_i32 cpu_xcc, cpu_fprs;
118 static TCGv cpu_gsr;
119 #else
120 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
121 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
122 #endif
123 /* Floating point registers */
124 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
125 
126 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
127 #ifdef TARGET_SPARC64
128 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
129 # define env64_field_offsetof(X)  env_field_offsetof(X)
130 #else
131 # define env32_field_offsetof(X)  env_field_offsetof(X)
132 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
133 #endif
134 
135 typedef struct DisasDelayException {
136     struct DisasDelayException *next;
137     TCGLabel *lab;
138     TCGv_i32 excp;
139     /* Saved state at parent insn. */
140     target_ulong pc;
141     target_ulong npc;
142 } DisasDelayException;
143 
144 typedef struct DisasContext {
145     DisasContextBase base;
146     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
147     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
148     target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
149     int mem_idx;
150     bool fpu_enabled;
151     bool address_mask_32bit;
152 #ifndef CONFIG_USER_ONLY
153     bool supervisor;
154 #ifdef TARGET_SPARC64
155     bool hypervisor;
156 #endif
157 #endif
158 
159     uint32_t cc_op;  /* current CC operation */
160     sparc_def_t *def;
161 #ifdef TARGET_SPARC64
162     int fprs_dirty;
163     int asi;
164 #endif
165     DisasDelayException *delay_excp_list;
166 } DisasContext;
167 
168 typedef struct {
169     TCGCond cond;
170     bool is_bool;
171     TCGv c1, c2;
172 } DisasCompare;
173 
174 // This function uses non-native bit order
175 #define GET_FIELD(X, FROM, TO)                                  \
176     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
177 
178 // This function uses the order in the manuals, i.e. bit 0 is 2^0
179 #define GET_FIELD_SP(X, FROM, TO)               \
180     GET_FIELD(X, 31 - (TO), 31 - (FROM))
181 
182 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
183 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
184 
185 #ifdef TARGET_SPARC64
186 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
187 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
188 #else
189 #define DFPREG(r) (r & 0x1e)
190 #define QFPREG(r) (r & 0x1c)
191 #endif
192 
193 #define UA2005_HTRAP_MASK 0xff
194 #define V8_TRAP_MASK 0x7f
195 
196 #define IS_IMM (insn & (1<<13))
197 
198 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
199 {
200 #if defined(TARGET_SPARC64)
201     int bit = (rd < 32) ? 1 : 2;
202     /* If we know we've already set this bit within the TB,
203        we can avoid setting it again.  */
204     if (!(dc->fprs_dirty & bit)) {
205         dc->fprs_dirty |= bit;
206         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
207     }
208 #endif
209 }
210 
211 /* floating point registers moves */
212 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
213 {
214     TCGv_i32 ret = tcg_temp_new_i32();
215     if (src & 1) {
216         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
217     } else {
218         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
219     }
220     return ret;
221 }
222 
223 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
224 {
225     TCGv_i64 t = tcg_temp_new_i64();
226 
227     tcg_gen_extu_i32_i64(t, v);
228     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
229                         (dst & 1 ? 0 : 32), 32);
230     gen_update_fprs_dirty(dc, dst);
231 }
232 
233 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
234 {
235     return tcg_temp_new_i32();
236 }
237 
238 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
239 {
240     src = DFPREG(src);
241     return cpu_fpr[src / 2];
242 }
243 
244 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
245 {
246     dst = DFPREG(dst);
247     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
248     gen_update_fprs_dirty(dc, dst);
249 }
250 
251 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
252 {
253     return cpu_fpr[DFPREG(dst) / 2];
254 }
255 
256 static void gen_op_load_fpr_QT0(unsigned int src)
257 {
258     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
259                    offsetof(CPU_QuadU, ll.upper));
260     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
261                    offsetof(CPU_QuadU, ll.lower));
262 }
263 
264 static void gen_op_load_fpr_QT1(unsigned int src)
265 {
266     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
267                    offsetof(CPU_QuadU, ll.upper));
268     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
269                    offsetof(CPU_QuadU, ll.lower));
270 }
271 
272 static void gen_op_store_QT0_fpr(unsigned int dst)
273 {
274     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
275                    offsetof(CPU_QuadU, ll.upper));
276     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
277                    offsetof(CPU_QuadU, ll.lower));
278 }
279 
280 /* moves */
281 #ifdef CONFIG_USER_ONLY
282 #define supervisor(dc) 0
283 #define hypervisor(dc) 0
284 #else
285 #ifdef TARGET_SPARC64
286 #define hypervisor(dc) (dc->hypervisor)
287 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
288 #else
289 #define supervisor(dc) (dc->supervisor)
290 #define hypervisor(dc) 0
291 #endif
292 #endif
293 
294 #if !defined(TARGET_SPARC64)
295 # define AM_CHECK(dc)  false
296 #elif defined(TARGET_ABI32)
297 # define AM_CHECK(dc)  true
298 #elif defined(CONFIG_USER_ONLY)
299 # define AM_CHECK(dc)  false
300 #else
301 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
302 #endif
303 
304 static void gen_address_mask(DisasContext *dc, TCGv addr)
305 {
306     if (AM_CHECK(dc)) {
307         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
308     }
309 }
310 
311 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
312 {
313     return AM_CHECK(dc) ? (uint32_t)addr : addr;
314 }
315 
316 static TCGv gen_load_gpr(DisasContext *dc, int reg)
317 {
318     if (reg > 0) {
319         assert(reg < 32);
320         return cpu_regs[reg];
321     } else {
322         TCGv t = tcg_temp_new();
323         tcg_gen_movi_tl(t, 0);
324         return t;
325     }
326 }
327 
328 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
329 {
330     if (reg > 0) {
331         assert(reg < 32);
332         tcg_gen_mov_tl(cpu_regs[reg], v);
333     }
334 }
335 
336 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
337 {
338     if (reg > 0) {
339         assert(reg < 32);
340         return cpu_regs[reg];
341     } else {
342         return tcg_temp_new();
343     }
344 }
345 
346 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
347 {
348     return translator_use_goto_tb(&s->base, pc) &&
349            translator_use_goto_tb(&s->base, npc);
350 }
351 
352 static void gen_goto_tb(DisasContext *s, int tb_num,
353                         target_ulong pc, target_ulong npc)
354 {
355     if (use_goto_tb(s, pc, npc))  {
356         /* jump to same page: we can use a direct jump */
357         tcg_gen_goto_tb(tb_num);
358         tcg_gen_movi_tl(cpu_pc, pc);
359         tcg_gen_movi_tl(cpu_npc, npc);
360         tcg_gen_exit_tb(s->base.tb, tb_num);
361     } else {
362         /* jump to another page: we can use an indirect jump */
363         tcg_gen_movi_tl(cpu_pc, pc);
364         tcg_gen_movi_tl(cpu_npc, npc);
365         tcg_gen_lookup_and_goto_ptr();
366     }
367 }
368 
369 // XXX suboptimal
370 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
371 {
372     tcg_gen_extu_i32_tl(reg, src);
373     tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
374 }
375 
376 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
377 {
378     tcg_gen_extu_i32_tl(reg, src);
379     tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
380 }
381 
382 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
383 {
384     tcg_gen_extu_i32_tl(reg, src);
385     tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
386 }
387 
388 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
389 {
390     tcg_gen_extu_i32_tl(reg, src);
391     tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
392 }
393 
394 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
395 {
396     tcg_gen_mov_tl(cpu_cc_src, src1);
397     tcg_gen_mov_tl(cpu_cc_src2, src2);
398     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
399     tcg_gen_mov_tl(dst, cpu_cc_dst);
400 }
401 
402 static TCGv_i32 gen_add32_carry32(void)
403 {
404     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
405 
406     /* Carry is computed from a previous add: (dst < src)  */
407 #if TARGET_LONG_BITS == 64
408     cc_src1_32 = tcg_temp_new_i32();
409     cc_src2_32 = tcg_temp_new_i32();
410     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
411     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
412 #else
413     cc_src1_32 = cpu_cc_dst;
414     cc_src2_32 = cpu_cc_src;
415 #endif
416 
417     carry_32 = tcg_temp_new_i32();
418     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
419 
420     return carry_32;
421 }
422 
423 static TCGv_i32 gen_sub32_carry32(void)
424 {
425     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
426 
427     /* Carry is computed from a previous borrow: (src1 < src2)  */
428 #if TARGET_LONG_BITS == 64
429     cc_src1_32 = tcg_temp_new_i32();
430     cc_src2_32 = tcg_temp_new_i32();
431     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
432     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
433 #else
434     cc_src1_32 = cpu_cc_src;
435     cc_src2_32 = cpu_cc_src2;
436 #endif
437 
438     carry_32 = tcg_temp_new_i32();
439     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
440 
441     return carry_32;
442 }
443 
444 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
445                             TCGv_i32 carry_32, bool update_cc)
446 {
447     tcg_gen_add_tl(dst, src1, src2);
448 
449 #ifdef TARGET_SPARC64
450     TCGv carry = tcg_temp_new();
451     tcg_gen_extu_i32_tl(carry, carry_32);
452     tcg_gen_add_tl(dst, dst, carry);
453 #else
454     tcg_gen_add_i32(dst, dst, carry_32);
455 #endif
456 
457     if (update_cc) {
458         tcg_debug_assert(dst == cpu_cc_dst);
459         tcg_gen_mov_tl(cpu_cc_src, src1);
460         tcg_gen_mov_tl(cpu_cc_src2, src2);
461     }
462 }
463 
464 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
465 {
466     TCGv discard;
467 
468     if (TARGET_LONG_BITS == 64) {
469         gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
470         return;
471     }
472 
473     /*
474      * We can re-use the host's hardware carry generation by using
475      * an ADD2 opcode.  We discard the low part of the output.
476      * Ideally we'd combine this operation with the add that
477      * generated the carry in the first place.
478      */
479     discard = tcg_temp_new();
480     tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
481 
482     if (update_cc) {
483         tcg_debug_assert(dst == cpu_cc_dst);
484         tcg_gen_mov_tl(cpu_cc_src, src1);
485         tcg_gen_mov_tl(cpu_cc_src2, src2);
486     }
487 }
488 
489 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
490 {
491     gen_op_addc_int_add(dst, src1, src2, false);
492 }
493 
494 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
495 {
496     gen_op_addc_int_add(dst, src1, src2, true);
497 }
498 
499 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
500 {
501     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
502 }
503 
504 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
505 {
506     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
507 }
508 
509 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
510                                     bool update_cc)
511 {
512     TCGv_i32 carry_32 = tcg_temp_new_i32();
513     gen_helper_compute_C_icc(carry_32, tcg_env);
514     gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
515 }
516 
517 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
518 {
519     gen_op_addc_int_generic(dst, src1, src2, false);
520 }
521 
522 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
523 {
524     gen_op_addc_int_generic(dst, src1, src2, true);
525 }
526 
527 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
528 {
529     tcg_gen_mov_tl(cpu_cc_src, src1);
530     tcg_gen_mov_tl(cpu_cc_src2, src2);
531     tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
532     tcg_gen_mov_tl(dst, cpu_cc_dst);
533 }
534 
535 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
536                             TCGv_i32 carry_32, bool update_cc)
537 {
538     TCGv carry;
539 
540 #if TARGET_LONG_BITS == 64
541     carry = tcg_temp_new();
542     tcg_gen_extu_i32_i64(carry, carry_32);
543 #else
544     carry = carry_32;
545 #endif
546 
547     tcg_gen_sub_tl(dst, src1, src2);
548     tcg_gen_sub_tl(dst, dst, carry);
549 
550     if (update_cc) {
551         tcg_debug_assert(dst == cpu_cc_dst);
552         tcg_gen_mov_tl(cpu_cc_src, src1);
553         tcg_gen_mov_tl(cpu_cc_src2, src2);
554     }
555 }
556 
557 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
558 {
559     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
560 }
561 
562 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
563 {
564     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
565 }
566 
567 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
568 {
569     TCGv discard;
570 
571     if (TARGET_LONG_BITS == 64) {
572         gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
573         return;
574     }
575 
576     /*
577      * We can re-use the host's hardware carry generation by using
578      * a SUB2 opcode.  We discard the low part of the output.
579      */
580     discard = tcg_temp_new();
581     tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
582 
583     if (update_cc) {
584         tcg_debug_assert(dst == cpu_cc_dst);
585         tcg_gen_mov_tl(cpu_cc_src, src1);
586         tcg_gen_mov_tl(cpu_cc_src2, src2);
587     }
588 }
589 
590 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
591 {
592     gen_op_subc_int_sub(dst, src1, src2, false);
593 }
594 
595 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
596 {
597     gen_op_subc_int_sub(dst, src1, src2, true);
598 }
599 
600 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
601                                     bool update_cc)
602 {
603     TCGv_i32 carry_32 = tcg_temp_new_i32();
604 
605     gen_helper_compute_C_icc(carry_32, tcg_env);
606     gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
607 }
608 
609 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
610 {
611     gen_op_subc_int_generic(dst, src1, src2, false);
612 }
613 
614 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
615 {
616     gen_op_subc_int_generic(dst, src1, src2, true);
617 }
618 
619 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620 {
621     TCGv r_temp, zero, t0;
622 
623     r_temp = tcg_temp_new();
624     t0 = tcg_temp_new();
625 
626     /* old op:
627     if (!(env->y & 1))
628         T1 = 0;
629     */
630     zero = tcg_constant_tl(0);
631     tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
632     tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
633     tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
634     tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
635                        zero, cpu_cc_src2);
636 
637     // b2 = T0 & 1;
638     // env->y = (b2 << 31) | (env->y >> 1);
639     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
640     tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
641 
642     // b1 = N ^ V;
643     gen_mov_reg_N(t0, cpu_psr);
644     gen_mov_reg_V(r_temp, cpu_psr);
645     tcg_gen_xor_tl(t0, t0, r_temp);
646 
647     // T0 = (b1 << 31) | (T0 >> 1);
648     // src1 = T0;
649     tcg_gen_shli_tl(t0, t0, 31);
650     tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
651     tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
652 
653     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
654 
655     tcg_gen_mov_tl(dst, cpu_cc_dst);
656 }
657 
658 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
659 {
660 #if TARGET_LONG_BITS == 32
661     if (sign_ext) {
662         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
663     } else {
664         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
665     }
666 #else
667     TCGv t0 = tcg_temp_new_i64();
668     TCGv t1 = tcg_temp_new_i64();
669 
670     if (sign_ext) {
671         tcg_gen_ext32s_i64(t0, src1);
672         tcg_gen_ext32s_i64(t1, src2);
673     } else {
674         tcg_gen_ext32u_i64(t0, src1);
675         tcg_gen_ext32u_i64(t1, src2);
676     }
677 
678     tcg_gen_mul_i64(dst, t0, t1);
679     tcg_gen_shri_i64(cpu_y, dst, 32);
680 #endif
681 }
682 
683 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
684 {
685     /* zero-extend truncated operands before multiplication */
686     gen_op_multiply(dst, src1, src2, 0);
687 }
688 
689 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
690 {
691     /* sign-extend truncated operands before multiplication */
692     gen_op_multiply(dst, src1, src2, 1);
693 }
694 
695 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
696 {
697     gen_helper_udivx(dst, tcg_env, src1, src2);
698 }
699 
700 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
701 {
702     gen_helper_sdivx(dst, tcg_env, src1, src2);
703 }
704 
705 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
706 {
707     gen_helper_udiv(dst, tcg_env, src1, src2);
708 }
709 
710 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
711 {
712     gen_helper_sdiv(dst, tcg_env, src1, src2);
713 }
714 
715 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
716 {
717     gen_helper_udiv_cc(dst, tcg_env, src1, src2);
718 }
719 
720 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
721 {
722     gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
723 }
724 
725 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
726 {
727     gen_helper_taddcctv(dst, tcg_env, src1, src2);
728 }
729 
730 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
731 {
732     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
733 }
734 
735 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
736 {
737     tcg_gen_ctpop_tl(dst, src2);
738 }
739 
740 #ifndef TARGET_SPARC64
741 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
742 {
743     g_assert_not_reached();
744 }
745 #endif
746 
747 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
748 {
749     gen_helper_array8(dst, src1, src2);
750     tcg_gen_shli_tl(dst, dst, 1);
751 }
752 
753 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
754 {
755     gen_helper_array8(dst, src1, src2);
756     tcg_gen_shli_tl(dst, dst, 2);
757 }
758 
759 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
760 {
761 #ifdef TARGET_SPARC64
762     gen_helper_fpack16(dst, cpu_gsr, src);
763 #else
764     g_assert_not_reached();
765 #endif
766 }
767 
768 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
769 {
770 #ifdef TARGET_SPARC64
771     gen_helper_fpackfix(dst, cpu_gsr, src);
772 #else
773     g_assert_not_reached();
774 #endif
775 }
776 
777 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
778 {
779 #ifdef TARGET_SPARC64
780     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
781 #else
782     g_assert_not_reached();
783 #endif
784 }
785 
786 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
787 {
788 #ifdef TARGET_SPARC64
789     TCGv t1, t2, shift;
790 
791     t1 = tcg_temp_new();
792     t2 = tcg_temp_new();
793     shift = tcg_temp_new();
794 
795     tcg_gen_andi_tl(shift, cpu_gsr, 7);
796     tcg_gen_shli_tl(shift, shift, 3);
797     tcg_gen_shl_tl(t1, s1, shift);
798 
799     /*
800      * A shift of 64 does not produce 0 in TCG.  Divide this into a
801      * shift of (up to 63) followed by a constant shift of 1.
802      */
803     tcg_gen_xori_tl(shift, shift, 63);
804     tcg_gen_shr_tl(t2, s2, shift);
805     tcg_gen_shri_tl(t2, t2, 1);
806 
807     tcg_gen_or_tl(dst, t1, t2);
808 #else
809     g_assert_not_reached();
810 #endif
811 }
812 
813 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
814 {
815 #ifdef TARGET_SPARC64
816     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
817 #else
818     g_assert_not_reached();
819 #endif
820 }
821 
822 // 1
823 static void gen_op_eval_ba(TCGv dst)
824 {
825     tcg_gen_movi_tl(dst, 1);
826 }
827 
828 // Z
829 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
830 {
831     gen_mov_reg_Z(dst, src);
832 }
833 
834 // Z | (N ^ V)
835 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
836 {
837     TCGv t0 = tcg_temp_new();
838     gen_mov_reg_N(t0, src);
839     gen_mov_reg_V(dst, src);
840     tcg_gen_xor_tl(dst, dst, t0);
841     gen_mov_reg_Z(t0, src);
842     tcg_gen_or_tl(dst, dst, t0);
843 }
844 
845 // N ^ V
846 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
847 {
848     TCGv t0 = tcg_temp_new();
849     gen_mov_reg_V(t0, src);
850     gen_mov_reg_N(dst, src);
851     tcg_gen_xor_tl(dst, dst, t0);
852 }
853 
854 // C | Z
855 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
856 {
857     TCGv t0 = tcg_temp_new();
858     gen_mov_reg_Z(t0, src);
859     gen_mov_reg_C(dst, src);
860     tcg_gen_or_tl(dst, dst, t0);
861 }
862 
863 // C
864 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
865 {
866     gen_mov_reg_C(dst, src);
867 }
868 
869 // V
870 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
871 {
872     gen_mov_reg_V(dst, src);
873 }
874 
875 // 0
876 static void gen_op_eval_bn(TCGv dst)
877 {
878     tcg_gen_movi_tl(dst, 0);
879 }
880 
881 // N
882 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
883 {
884     gen_mov_reg_N(dst, src);
885 }
886 
887 // !Z
888 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
889 {
890     gen_mov_reg_Z(dst, src);
891     tcg_gen_xori_tl(dst, dst, 0x1);
892 }
893 
894 // !(Z | (N ^ V))
895 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
896 {
897     gen_op_eval_ble(dst, src);
898     tcg_gen_xori_tl(dst, dst, 0x1);
899 }
900 
901 // !(N ^ V)
902 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
903 {
904     gen_op_eval_bl(dst, src);
905     tcg_gen_xori_tl(dst, dst, 0x1);
906 }
907 
908 // !(C | Z)
909 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
910 {
911     gen_op_eval_bleu(dst, src);
912     tcg_gen_xori_tl(dst, dst, 0x1);
913 }
914 
915 // !C
916 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
917 {
918     gen_mov_reg_C(dst, src);
919     tcg_gen_xori_tl(dst, dst, 0x1);
920 }
921 
922 // !N
923 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
924 {
925     gen_mov_reg_N(dst, src);
926     tcg_gen_xori_tl(dst, dst, 0x1);
927 }
928 
929 // !V
930 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
931 {
932     gen_mov_reg_V(dst, src);
933     tcg_gen_xori_tl(dst, dst, 0x1);
934 }
935 
936 /*
937   FPSR bit field FCC1 | FCC0:
938    0 =
939    1 <
940    2 >
941    3 unordered
942 */
943 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
944                                     unsigned int fcc_offset)
945 {
946     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
947     tcg_gen_andi_tl(reg, reg, 0x1);
948 }
949 
950 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
951 {
952     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
953     tcg_gen_andi_tl(reg, reg, 0x1);
954 }
955 
956 // !0: FCC0 | FCC1
957 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
958 {
959     TCGv t0 = tcg_temp_new();
960     gen_mov_reg_FCC0(dst, src, fcc_offset);
961     gen_mov_reg_FCC1(t0, src, fcc_offset);
962     tcg_gen_or_tl(dst, dst, t0);
963 }
964 
965 // 1 or 2: FCC0 ^ FCC1
966 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
967 {
968     TCGv t0 = tcg_temp_new();
969     gen_mov_reg_FCC0(dst, src, fcc_offset);
970     gen_mov_reg_FCC1(t0, src, fcc_offset);
971     tcg_gen_xor_tl(dst, dst, t0);
972 }
973 
974 // 1 or 3: FCC0
975 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
976 {
977     gen_mov_reg_FCC0(dst, src, fcc_offset);
978 }
979 
980 // 1: FCC0 & !FCC1
981 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
982 {
983     TCGv t0 = tcg_temp_new();
984     gen_mov_reg_FCC0(dst, src, fcc_offset);
985     gen_mov_reg_FCC1(t0, src, fcc_offset);
986     tcg_gen_andc_tl(dst, dst, t0);
987 }
988 
989 // 2 or 3: FCC1
990 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
991 {
992     gen_mov_reg_FCC1(dst, src, fcc_offset);
993 }
994 
995 // 2: !FCC0 & FCC1
996 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
997 {
998     TCGv t0 = tcg_temp_new();
999     gen_mov_reg_FCC0(dst, src, fcc_offset);
1000     gen_mov_reg_FCC1(t0, src, fcc_offset);
1001     tcg_gen_andc_tl(dst, t0, dst);
1002 }
1003 
1004 // 3: FCC0 & FCC1
1005 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
1006 {
1007     TCGv t0 = tcg_temp_new();
1008     gen_mov_reg_FCC0(dst, src, fcc_offset);
1009     gen_mov_reg_FCC1(t0, src, fcc_offset);
1010     tcg_gen_and_tl(dst, dst, t0);
1011 }
1012 
1013 // 0: !(FCC0 | FCC1)
1014 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
1015 {
1016     TCGv t0 = tcg_temp_new();
1017     gen_mov_reg_FCC0(dst, src, fcc_offset);
1018     gen_mov_reg_FCC1(t0, src, fcc_offset);
1019     tcg_gen_or_tl(dst, dst, t0);
1020     tcg_gen_xori_tl(dst, dst, 0x1);
1021 }
1022 
1023 // 0 or 3: !(FCC0 ^ FCC1)
1024 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
1025 {
1026     TCGv t0 = tcg_temp_new();
1027     gen_mov_reg_FCC0(dst, src, fcc_offset);
1028     gen_mov_reg_FCC1(t0, src, fcc_offset);
1029     tcg_gen_xor_tl(dst, dst, t0);
1030     tcg_gen_xori_tl(dst, dst, 0x1);
1031 }
1032 
1033 // 0 or 2: !FCC0
1034 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
1035 {
1036     gen_mov_reg_FCC0(dst, src, fcc_offset);
1037     tcg_gen_xori_tl(dst, dst, 0x1);
1038 }
1039 
1040 // !1: !(FCC0 & !FCC1)
1041 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
1042 {
1043     TCGv t0 = tcg_temp_new();
1044     gen_mov_reg_FCC0(dst, src, fcc_offset);
1045     gen_mov_reg_FCC1(t0, src, fcc_offset);
1046     tcg_gen_andc_tl(dst, dst, t0);
1047     tcg_gen_xori_tl(dst, dst, 0x1);
1048 }
1049 
1050 // 0 or 1: !FCC1
1051 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
1052 {
1053     gen_mov_reg_FCC1(dst, src, fcc_offset);
1054     tcg_gen_xori_tl(dst, dst, 0x1);
1055 }
1056 
1057 // !2: !(!FCC0 & FCC1)
1058 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
1059 {
1060     TCGv t0 = tcg_temp_new();
1061     gen_mov_reg_FCC0(dst, src, fcc_offset);
1062     gen_mov_reg_FCC1(t0, src, fcc_offset);
1063     tcg_gen_andc_tl(dst, t0, dst);
1064     tcg_gen_xori_tl(dst, dst, 0x1);
1065 }
1066 
1067 // !3: !(FCC0 & FCC1)
1068 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
1069 {
1070     TCGv t0 = tcg_temp_new();
1071     gen_mov_reg_FCC0(dst, src, fcc_offset);
1072     gen_mov_reg_FCC1(t0, src, fcc_offset);
1073     tcg_gen_and_tl(dst, dst, t0);
1074     tcg_gen_xori_tl(dst, dst, 0x1);
1075 }
1076 
1077 static void gen_branch2(DisasContext *dc, target_ulong pc1,
1078                         target_ulong pc2, TCGv r_cond)
1079 {
1080     TCGLabel *l1 = gen_new_label();
1081 
1082     tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1083 
1084     gen_goto_tb(dc, 0, pc1, pc1 + 4);
1085 
1086     gen_set_label(l1);
1087     gen_goto_tb(dc, 1, pc2, pc2 + 4);
1088 }
1089 
1090 static void gen_generic_branch(DisasContext *dc)
1091 {
1092     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1093     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1094     TCGv zero = tcg_constant_tl(0);
1095 
1096     tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1097 }
1098 
1099 /* call this function before using the condition register as it may
1100    have been set for a jump */
1101 static void flush_cond(DisasContext *dc)
1102 {
1103     if (dc->npc == JUMP_PC) {
1104         gen_generic_branch(dc);
1105         dc->npc = DYNAMIC_PC_LOOKUP;
1106     }
1107 }
1108 
1109 static void save_npc(DisasContext *dc)
1110 {
1111     if (dc->npc & 3) {
1112         switch (dc->npc) {
1113         case JUMP_PC:
1114             gen_generic_branch(dc);
1115             dc->npc = DYNAMIC_PC_LOOKUP;
1116             break;
1117         case DYNAMIC_PC:
1118         case DYNAMIC_PC_LOOKUP:
1119             break;
1120         default:
1121             g_assert_not_reached();
1122         }
1123     } else {
1124         tcg_gen_movi_tl(cpu_npc, dc->npc);
1125     }
1126 }
1127 
1128 static void update_psr(DisasContext *dc)
1129 {
1130     if (dc->cc_op != CC_OP_FLAGS) {
1131         dc->cc_op = CC_OP_FLAGS;
1132         gen_helper_compute_psr(tcg_env);
1133     }
1134 }
1135 
1136 static void save_state(DisasContext *dc)
1137 {
1138     tcg_gen_movi_tl(cpu_pc, dc->pc);
1139     save_npc(dc);
1140 }
1141 
1142 static void gen_exception(DisasContext *dc, int which)
1143 {
1144     save_state(dc);
1145     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1146     dc->base.is_jmp = DISAS_NORETURN;
1147 }
1148 
1149 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1150 {
1151     DisasDelayException *e = g_new0(DisasDelayException, 1);
1152 
1153     e->next = dc->delay_excp_list;
1154     dc->delay_excp_list = e;
1155 
1156     e->lab = gen_new_label();
1157     e->excp = excp;
1158     e->pc = dc->pc;
1159     /* Caller must have used flush_cond before branch. */
1160     assert(e->npc != JUMP_PC);
1161     e->npc = dc->npc;
1162 
1163     return e->lab;
1164 }
1165 
1166 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1167 {
1168     return delay_exceptionv(dc, tcg_constant_i32(excp));
1169 }
1170 
1171 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1172 {
1173     TCGv t = tcg_temp_new();
1174     TCGLabel *lab;
1175 
1176     tcg_gen_andi_tl(t, addr, mask);
1177 
1178     flush_cond(dc);
1179     lab = delay_exception(dc, TT_UNALIGNED);
1180     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1181 }
1182 
1183 static void gen_mov_pc_npc(DisasContext *dc)
1184 {
1185     if (dc->npc & 3) {
1186         switch (dc->npc) {
1187         case JUMP_PC:
1188             gen_generic_branch(dc);
1189             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1190             dc->pc = DYNAMIC_PC_LOOKUP;
1191             break;
1192         case DYNAMIC_PC:
1193         case DYNAMIC_PC_LOOKUP:
1194             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1195             dc->pc = dc->npc;
1196             break;
1197         default:
1198             g_assert_not_reached();
1199         }
1200     } else {
1201         dc->pc = dc->npc;
1202     }
1203 }
1204 
1205 static void gen_op_next_insn(void)
1206 {
1207     tcg_gen_mov_tl(cpu_pc, cpu_npc);
1208     tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1209 }
1210 
1211 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1212                         DisasContext *dc)
1213 {
1214     static int subcc_cond[16] = {
1215         TCG_COND_NEVER,
1216         TCG_COND_EQ,
1217         TCG_COND_LE,
1218         TCG_COND_LT,
1219         TCG_COND_LEU,
1220         TCG_COND_LTU,
1221         -1, /* neg */
1222         -1, /* overflow */
1223         TCG_COND_ALWAYS,
1224         TCG_COND_NE,
1225         TCG_COND_GT,
1226         TCG_COND_GE,
1227         TCG_COND_GTU,
1228         TCG_COND_GEU,
1229         -1, /* pos */
1230         -1, /* no overflow */
1231     };
1232 
1233     static int logic_cond[16] = {
1234         TCG_COND_NEVER,
1235         TCG_COND_EQ,     /* eq:  Z */
1236         TCG_COND_LE,     /* le:  Z | (N ^ V) -> Z | N */
1237         TCG_COND_LT,     /* lt:  N ^ V -> N */
1238         TCG_COND_EQ,     /* leu: C | Z -> Z */
1239         TCG_COND_NEVER,  /* ltu: C -> 0 */
1240         TCG_COND_LT,     /* neg: N */
1241         TCG_COND_NEVER,  /* vs:  V -> 0 */
1242         TCG_COND_ALWAYS,
1243         TCG_COND_NE,     /* ne:  !Z */
1244         TCG_COND_GT,     /* gt:  !(Z | (N ^ V)) -> !(Z | N) */
1245         TCG_COND_GE,     /* ge:  !(N ^ V) -> !N */
1246         TCG_COND_NE,     /* gtu: !(C | Z) -> !Z */
1247         TCG_COND_ALWAYS, /* geu: !C -> 1 */
1248         TCG_COND_GE,     /* pos: !N */
1249         TCG_COND_ALWAYS, /* vc:  !V -> 1 */
1250     };
1251 
1252     TCGv_i32 r_src;
1253     TCGv r_dst;
1254 
1255 #ifdef TARGET_SPARC64
1256     if (xcc) {
1257         r_src = cpu_xcc;
1258     } else {
1259         r_src = cpu_psr;
1260     }
1261 #else
1262     r_src = cpu_psr;
1263 #endif
1264 
1265     switch (dc->cc_op) {
1266     case CC_OP_LOGIC:
1267         cmp->cond = logic_cond[cond];
1268     do_compare_dst_0:
1269         cmp->is_bool = false;
1270         cmp->c2 = tcg_constant_tl(0);
1271 #ifdef TARGET_SPARC64
1272         if (!xcc) {
1273             cmp->c1 = tcg_temp_new();
1274             tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1275             break;
1276         }
1277 #endif
1278         cmp->c1 = cpu_cc_dst;
1279         break;
1280 
1281     case CC_OP_SUB:
1282         switch (cond) {
1283         case 6:  /* neg */
1284         case 14: /* pos */
1285             cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1286             goto do_compare_dst_0;
1287 
1288         case 7: /* overflow */
1289         case 15: /* !overflow */
1290             goto do_dynamic;
1291 
1292         default:
1293             cmp->cond = subcc_cond[cond];
1294             cmp->is_bool = false;
1295 #ifdef TARGET_SPARC64
1296             if (!xcc) {
1297                 /* Note that sign-extension works for unsigned compares as
1298                    long as both operands are sign-extended.  */
1299                 cmp->c1 = tcg_temp_new();
1300                 cmp->c2 = tcg_temp_new();
1301                 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1302                 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1303                 break;
1304             }
1305 #endif
1306             cmp->c1 = cpu_cc_src;
1307             cmp->c2 = cpu_cc_src2;
1308             break;
1309         }
1310         break;
1311 
1312     default:
1313     do_dynamic:
1314         gen_helper_compute_psr(tcg_env);
1315         dc->cc_op = CC_OP_FLAGS;
1316         /* FALLTHRU */
1317 
1318     case CC_OP_FLAGS:
1319         /* We're going to generate a boolean result.  */
1320         cmp->cond = TCG_COND_NE;
1321         cmp->is_bool = true;
1322         cmp->c1 = r_dst = tcg_temp_new();
1323         cmp->c2 = tcg_constant_tl(0);
1324 
1325         switch (cond) {
1326         case 0x0:
1327             gen_op_eval_bn(r_dst);
1328             break;
1329         case 0x1:
1330             gen_op_eval_be(r_dst, r_src);
1331             break;
1332         case 0x2:
1333             gen_op_eval_ble(r_dst, r_src);
1334             break;
1335         case 0x3:
1336             gen_op_eval_bl(r_dst, r_src);
1337             break;
1338         case 0x4:
1339             gen_op_eval_bleu(r_dst, r_src);
1340             break;
1341         case 0x5:
1342             gen_op_eval_bcs(r_dst, r_src);
1343             break;
1344         case 0x6:
1345             gen_op_eval_bneg(r_dst, r_src);
1346             break;
1347         case 0x7:
1348             gen_op_eval_bvs(r_dst, r_src);
1349             break;
1350         case 0x8:
1351             gen_op_eval_ba(r_dst);
1352             break;
1353         case 0x9:
1354             gen_op_eval_bne(r_dst, r_src);
1355             break;
1356         case 0xa:
1357             gen_op_eval_bg(r_dst, r_src);
1358             break;
1359         case 0xb:
1360             gen_op_eval_bge(r_dst, r_src);
1361             break;
1362         case 0xc:
1363             gen_op_eval_bgu(r_dst, r_src);
1364             break;
1365         case 0xd:
1366             gen_op_eval_bcc(r_dst, r_src);
1367             break;
1368         case 0xe:
1369             gen_op_eval_bpos(r_dst, r_src);
1370             break;
1371         case 0xf:
1372             gen_op_eval_bvc(r_dst, r_src);
1373             break;
1374         }
1375         break;
1376     }
1377 }
1378 
1379 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1380 {
1381     unsigned int offset;
1382     TCGv r_dst;
1383 
1384     /* For now we still generate a straight boolean result.  */
1385     cmp->cond = TCG_COND_NE;
1386     cmp->is_bool = true;
1387     cmp->c1 = r_dst = tcg_temp_new();
1388     cmp->c2 = tcg_constant_tl(0);
1389 
1390     switch (cc) {
1391     default:
1392     case 0x0:
1393         offset = 0;
1394         break;
1395     case 0x1:
1396         offset = 32 - 10;
1397         break;
1398     case 0x2:
1399         offset = 34 - 10;
1400         break;
1401     case 0x3:
1402         offset = 36 - 10;
1403         break;
1404     }
1405 
1406     switch (cond) {
1407     case 0x0:
1408         gen_op_eval_bn(r_dst);
1409         break;
1410     case 0x1:
1411         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1412         break;
1413     case 0x2:
1414         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1415         break;
1416     case 0x3:
1417         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1418         break;
1419     case 0x4:
1420         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1421         break;
1422     case 0x5:
1423         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1424         break;
1425     case 0x6:
1426         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1427         break;
1428     case 0x7:
1429         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1430         break;
1431     case 0x8:
1432         gen_op_eval_ba(r_dst);
1433         break;
1434     case 0x9:
1435         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1436         break;
1437     case 0xa:
1438         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1439         break;
1440     case 0xb:
1441         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1442         break;
1443     case 0xc:
1444         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1445         break;
1446     case 0xd:
1447         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1448         break;
1449     case 0xe:
1450         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1451         break;
1452     case 0xf:
1453         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1454         break;
1455     }
1456 }
1457 
1458 // Inverted logic
1459 static const TCGCond gen_tcg_cond_reg[8] = {
1460     TCG_COND_NEVER,  /* reserved */
1461     TCG_COND_NE,
1462     TCG_COND_GT,
1463     TCG_COND_GE,
1464     TCG_COND_NEVER,  /* reserved */
1465     TCG_COND_EQ,
1466     TCG_COND_LE,
1467     TCG_COND_LT,
1468 };
1469 
1470 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1471 {
1472     cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1473     cmp->is_bool = false;
1474     cmp->c1 = r_src;
1475     cmp->c2 = tcg_constant_tl(0);
1476 }
1477 
1478 static void gen_op_clear_ieee_excp_and_FTT(void)
1479 {
1480     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1481 }
1482 
1483 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1484 {
1485     gen_op_clear_ieee_excp_and_FTT();
1486     tcg_gen_mov_i32(dst, src);
1487 }
1488 
1489 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1490 {
1491     gen_op_clear_ieee_excp_and_FTT();
1492     gen_helper_fnegs(dst, src);
1493 }
1494 
1495 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1496 {
1497     gen_op_clear_ieee_excp_and_FTT();
1498     gen_helper_fabss(dst, src);
1499 }
1500 
1501 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1502 {
1503     gen_op_clear_ieee_excp_and_FTT();
1504     tcg_gen_mov_i64(dst, src);
1505 }
1506 
1507 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1508 {
1509     gen_op_clear_ieee_excp_and_FTT();
1510     gen_helper_fnegd(dst, src);
1511 }
1512 
1513 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1514 {
1515     gen_op_clear_ieee_excp_and_FTT();
1516     gen_helper_fabsd(dst, src);
1517 }
1518 
1519 #ifdef TARGET_SPARC64
1520 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1521 {
1522     switch (fccno) {
1523     case 0:
1524         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1525         break;
1526     case 1:
1527         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1528         break;
1529     case 2:
1530         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1531         break;
1532     case 3:
1533         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1534         break;
1535     }
1536 }
1537 
1538 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1539 {
1540     switch (fccno) {
1541     case 0:
1542         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1543         break;
1544     case 1:
1545         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1546         break;
1547     case 2:
1548         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1549         break;
1550     case 3:
1551         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1552         break;
1553     }
1554 }
1555 
1556 static void gen_op_fcmpq(int fccno)
1557 {
1558     switch (fccno) {
1559     case 0:
1560         gen_helper_fcmpq(cpu_fsr, tcg_env);
1561         break;
1562     case 1:
1563         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1564         break;
1565     case 2:
1566         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1567         break;
1568     case 3:
1569         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1570         break;
1571     }
1572 }
1573 
1574 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1575 {
1576     switch (fccno) {
1577     case 0:
1578         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1579         break;
1580     case 1:
1581         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1582         break;
1583     case 2:
1584         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1585         break;
1586     case 3:
1587         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1588         break;
1589     }
1590 }
1591 
1592 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1593 {
1594     switch (fccno) {
1595     case 0:
1596         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1597         break;
1598     case 1:
1599         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1600         break;
1601     case 2:
1602         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1603         break;
1604     case 3:
1605         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1606         break;
1607     }
1608 }
1609 
1610 static void gen_op_fcmpeq(int fccno)
1611 {
1612     switch (fccno) {
1613     case 0:
1614         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1615         break;
1616     case 1:
1617         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1618         break;
1619     case 2:
1620         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1621         break;
1622     case 3:
1623         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1624         break;
1625     }
1626 }
1627 
1628 #else
1629 
1630 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1631 {
1632     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1633 }
1634 
1635 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1636 {
1637     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1638 }
1639 
1640 static void gen_op_fcmpq(int fccno)
1641 {
1642     gen_helper_fcmpq(cpu_fsr, tcg_env);
1643 }
1644 
1645 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1646 {
1647     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1648 }
1649 
1650 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1651 {
1652     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1653 }
1654 
1655 static void gen_op_fcmpeq(int fccno)
1656 {
1657     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1658 }
1659 #endif
1660 
1661 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1662 {
1663     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1664     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1665     gen_exception(dc, TT_FP_EXCP);
1666 }
1667 
1668 static int gen_trap_ifnofpu(DisasContext *dc)
1669 {
1670 #if !defined(CONFIG_USER_ONLY)
1671     if (!dc->fpu_enabled) {
1672         gen_exception(dc, TT_NFPU_INSN);
1673         return 1;
1674     }
1675 #endif
1676     return 0;
1677 }
1678 
1679 /* asi moves */
1680 typedef enum {
1681     GET_ASI_HELPER,
1682     GET_ASI_EXCP,
1683     GET_ASI_DIRECT,
1684     GET_ASI_DTWINX,
1685     GET_ASI_BLOCK,
1686     GET_ASI_SHORT,
1687     GET_ASI_BCOPY,
1688     GET_ASI_BFILL,
1689 } ASIType;
1690 
1691 typedef struct {
1692     ASIType type;
1693     int asi;
1694     int mem_idx;
1695     MemOp memop;
1696 } DisasASI;
1697 
1698 /*
1699  * Build DisasASI.
1700  * For asi == -1, treat as non-asi.
1701  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1702  */
1703 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1704 {
1705     ASIType type = GET_ASI_HELPER;
1706     int mem_idx = dc->mem_idx;
1707 
1708     if (asi == -1) {
1709         /* Artificial "non-asi" case. */
1710         type = GET_ASI_DIRECT;
1711         goto done;
1712     }
1713 
1714 #ifndef TARGET_SPARC64
1715     /* Before v9, all asis are immediate and privileged.  */
1716     if (asi < 0) {
1717         gen_exception(dc, TT_ILL_INSN);
1718         type = GET_ASI_EXCP;
1719     } else if (supervisor(dc)
1720                /* Note that LEON accepts ASI_USERDATA in user mode, for
1721                   use with CASA.  Also note that previous versions of
1722                   QEMU allowed (and old versions of gcc emitted) ASI_P
1723                   for LEON, which is incorrect.  */
1724                || (asi == ASI_USERDATA
1725                    && (dc->def->features & CPU_FEATURE_CASA))) {
1726         switch (asi) {
1727         case ASI_USERDATA:   /* User data access */
1728             mem_idx = MMU_USER_IDX;
1729             type = GET_ASI_DIRECT;
1730             break;
1731         case ASI_KERNELDATA: /* Supervisor data access */
1732             mem_idx = MMU_KERNEL_IDX;
1733             type = GET_ASI_DIRECT;
1734             break;
1735         case ASI_M_BYPASS:    /* MMU passthrough */
1736         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1737             mem_idx = MMU_PHYS_IDX;
1738             type = GET_ASI_DIRECT;
1739             break;
1740         case ASI_M_BCOPY: /* Block copy, sta access */
1741             mem_idx = MMU_KERNEL_IDX;
1742             type = GET_ASI_BCOPY;
1743             break;
1744         case ASI_M_BFILL: /* Block fill, stda access */
1745             mem_idx = MMU_KERNEL_IDX;
1746             type = GET_ASI_BFILL;
1747             break;
1748         }
1749 
1750         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1751          * permissions check in get_physical_address(..).
1752          */
1753         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1754     } else {
1755         gen_exception(dc, TT_PRIV_INSN);
1756         type = GET_ASI_EXCP;
1757     }
1758 #else
1759     if (asi < 0) {
1760         asi = dc->asi;
1761     }
1762     /* With v9, all asis below 0x80 are privileged.  */
1763     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1764        down that bit into DisasContext.  For the moment that's ok,
1765        since the direct implementations below doesn't have any ASIs
1766        in the restricted [0x30, 0x7f] range, and the check will be
1767        done properly in the helper.  */
1768     if (!supervisor(dc) && asi < 0x80) {
1769         gen_exception(dc, TT_PRIV_ACT);
1770         type = GET_ASI_EXCP;
1771     } else {
1772         switch (asi) {
1773         case ASI_REAL:      /* Bypass */
1774         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1775         case ASI_REAL_L:    /* Bypass LE */
1776         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1777         case ASI_TWINX_REAL:   /* Real address, twinx */
1778         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1779         case ASI_QUAD_LDD_PHYS:
1780         case ASI_QUAD_LDD_PHYS_L:
1781             mem_idx = MMU_PHYS_IDX;
1782             break;
1783         case ASI_N:  /* Nucleus */
1784         case ASI_NL: /* Nucleus LE */
1785         case ASI_TWINX_N:
1786         case ASI_TWINX_NL:
1787         case ASI_NUCLEUS_QUAD_LDD:
1788         case ASI_NUCLEUS_QUAD_LDD_L:
1789             if (hypervisor(dc)) {
1790                 mem_idx = MMU_PHYS_IDX;
1791             } else {
1792                 mem_idx = MMU_NUCLEUS_IDX;
1793             }
1794             break;
1795         case ASI_AIUP:  /* As if user primary */
1796         case ASI_AIUPL: /* As if user primary LE */
1797         case ASI_TWINX_AIUP:
1798         case ASI_TWINX_AIUP_L:
1799         case ASI_BLK_AIUP_4V:
1800         case ASI_BLK_AIUP_L_4V:
1801         case ASI_BLK_AIUP:
1802         case ASI_BLK_AIUPL:
1803             mem_idx = MMU_USER_IDX;
1804             break;
1805         case ASI_AIUS:  /* As if user secondary */
1806         case ASI_AIUSL: /* As if user secondary LE */
1807         case ASI_TWINX_AIUS:
1808         case ASI_TWINX_AIUS_L:
1809         case ASI_BLK_AIUS_4V:
1810         case ASI_BLK_AIUS_L_4V:
1811         case ASI_BLK_AIUS:
1812         case ASI_BLK_AIUSL:
1813             mem_idx = MMU_USER_SECONDARY_IDX;
1814             break;
1815         case ASI_S:  /* Secondary */
1816         case ASI_SL: /* Secondary LE */
1817         case ASI_TWINX_S:
1818         case ASI_TWINX_SL:
1819         case ASI_BLK_COMMIT_S:
1820         case ASI_BLK_S:
1821         case ASI_BLK_SL:
1822         case ASI_FL8_S:
1823         case ASI_FL8_SL:
1824         case ASI_FL16_S:
1825         case ASI_FL16_SL:
1826             if (mem_idx == MMU_USER_IDX) {
1827                 mem_idx = MMU_USER_SECONDARY_IDX;
1828             } else if (mem_idx == MMU_KERNEL_IDX) {
1829                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1830             }
1831             break;
1832         case ASI_P:  /* Primary */
1833         case ASI_PL: /* Primary LE */
1834         case ASI_TWINX_P:
1835         case ASI_TWINX_PL:
1836         case ASI_BLK_COMMIT_P:
1837         case ASI_BLK_P:
1838         case ASI_BLK_PL:
1839         case ASI_FL8_P:
1840         case ASI_FL8_PL:
1841         case ASI_FL16_P:
1842         case ASI_FL16_PL:
1843             break;
1844         }
1845         switch (asi) {
1846         case ASI_REAL:
1847         case ASI_REAL_IO:
1848         case ASI_REAL_L:
1849         case ASI_REAL_IO_L:
1850         case ASI_N:
1851         case ASI_NL:
1852         case ASI_AIUP:
1853         case ASI_AIUPL:
1854         case ASI_AIUS:
1855         case ASI_AIUSL:
1856         case ASI_S:
1857         case ASI_SL:
1858         case ASI_P:
1859         case ASI_PL:
1860             type = GET_ASI_DIRECT;
1861             break;
1862         case ASI_TWINX_REAL:
1863         case ASI_TWINX_REAL_L:
1864         case ASI_TWINX_N:
1865         case ASI_TWINX_NL:
1866         case ASI_TWINX_AIUP:
1867         case ASI_TWINX_AIUP_L:
1868         case ASI_TWINX_AIUS:
1869         case ASI_TWINX_AIUS_L:
1870         case ASI_TWINX_P:
1871         case ASI_TWINX_PL:
1872         case ASI_TWINX_S:
1873         case ASI_TWINX_SL:
1874         case ASI_QUAD_LDD_PHYS:
1875         case ASI_QUAD_LDD_PHYS_L:
1876         case ASI_NUCLEUS_QUAD_LDD:
1877         case ASI_NUCLEUS_QUAD_LDD_L:
1878             type = GET_ASI_DTWINX;
1879             break;
1880         case ASI_BLK_COMMIT_P:
1881         case ASI_BLK_COMMIT_S:
1882         case ASI_BLK_AIUP_4V:
1883         case ASI_BLK_AIUP_L_4V:
1884         case ASI_BLK_AIUP:
1885         case ASI_BLK_AIUPL:
1886         case ASI_BLK_AIUS_4V:
1887         case ASI_BLK_AIUS_L_4V:
1888         case ASI_BLK_AIUS:
1889         case ASI_BLK_AIUSL:
1890         case ASI_BLK_S:
1891         case ASI_BLK_SL:
1892         case ASI_BLK_P:
1893         case ASI_BLK_PL:
1894             type = GET_ASI_BLOCK;
1895             break;
1896         case ASI_FL8_S:
1897         case ASI_FL8_SL:
1898         case ASI_FL8_P:
1899         case ASI_FL8_PL:
1900             memop = MO_UB;
1901             type = GET_ASI_SHORT;
1902             break;
1903         case ASI_FL16_S:
1904         case ASI_FL16_SL:
1905         case ASI_FL16_P:
1906         case ASI_FL16_PL:
1907             memop = MO_TEUW;
1908             type = GET_ASI_SHORT;
1909             break;
1910         }
1911         /* The little-endian asis all have bit 3 set.  */
1912         if (asi & 8) {
1913             memop ^= MO_BSWAP;
1914         }
1915     }
1916 #endif
1917 
1918  done:
1919     return (DisasASI){ type, asi, mem_idx, memop };
1920 }
1921 
1922 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1923 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1924                               TCGv_i32 asi, TCGv_i32 mop)
1925 {
1926     g_assert_not_reached();
1927 }
1928 
1929 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1930                               TCGv_i32 asi, TCGv_i32 mop)
1931 {
1932     g_assert_not_reached();
1933 }
1934 #endif
1935 
1936 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1937 {
1938     switch (da->type) {
1939     case GET_ASI_EXCP:
1940         break;
1941     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1942         gen_exception(dc, TT_ILL_INSN);
1943         break;
1944     case GET_ASI_DIRECT:
1945         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1946         break;
1947     default:
1948         {
1949             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1950             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1951 
1952             save_state(dc);
1953 #ifdef TARGET_SPARC64
1954             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1955 #else
1956             {
1957                 TCGv_i64 t64 = tcg_temp_new_i64();
1958                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1959                 tcg_gen_trunc_i64_tl(dst, t64);
1960             }
1961 #endif
1962         }
1963         break;
1964     }
1965 }
1966 
1967 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1968 {
1969     switch (da->type) {
1970     case GET_ASI_EXCP:
1971         break;
1972 
1973     case GET_ASI_DTWINX: /* Reserved for stda.  */
1974         if (TARGET_LONG_BITS == 32) {
1975             gen_exception(dc, TT_ILL_INSN);
1976             break;
1977         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1978             /* Pre OpenSPARC CPUs don't have these */
1979             gen_exception(dc, TT_ILL_INSN);
1980             break;
1981         }
1982         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1983         /* fall through */
1984 
1985     case GET_ASI_DIRECT:
1986         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1987         break;
1988 
1989     case GET_ASI_BCOPY:
1990         assert(TARGET_LONG_BITS == 32);
1991         /* Copy 32 bytes from the address in SRC to ADDR.  */
1992         /* ??? The original qemu code suggests 4-byte alignment, dropping
1993            the low bits, but the only place I can see this used is in the
1994            Linux kernel with 32 byte alignment, which would make more sense
1995            as a cacheline-style operation.  */
1996         {
1997             TCGv saddr = tcg_temp_new();
1998             TCGv daddr = tcg_temp_new();
1999             TCGv four = tcg_constant_tl(4);
2000             TCGv_i32 tmp = tcg_temp_new_i32();
2001             int i;
2002 
2003             tcg_gen_andi_tl(saddr, src, -4);
2004             tcg_gen_andi_tl(daddr, addr, -4);
2005             for (i = 0; i < 32; i += 4) {
2006                 /* Since the loads and stores are paired, allow the
2007                    copy to happen in the host endianness.  */
2008                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
2009                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
2010                 tcg_gen_add_tl(saddr, saddr, four);
2011                 tcg_gen_add_tl(daddr, daddr, four);
2012             }
2013         }
2014         break;
2015 
2016     default:
2017         {
2018             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2019             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2020 
2021             save_state(dc);
2022 #ifdef TARGET_SPARC64
2023             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2024 #else
2025             {
2026                 TCGv_i64 t64 = tcg_temp_new_i64();
2027                 tcg_gen_extu_tl_i64(t64, src);
2028                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2029             }
2030 #endif
2031 
2032             /* A write to a TLB register may alter page maps.  End the TB. */
2033             dc->npc = DYNAMIC_PC;
2034         }
2035         break;
2036     }
2037 }
2038 
2039 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
2040                          TCGv dst, TCGv src, TCGv addr)
2041 {
2042     switch (da->type) {
2043     case GET_ASI_EXCP:
2044         break;
2045     case GET_ASI_DIRECT:
2046         tcg_gen_atomic_xchg_tl(dst, addr, src,
2047                                da->mem_idx, da->memop | MO_ALIGN);
2048         break;
2049     default:
2050         /* ??? Should be DAE_invalid_asi.  */
2051         gen_exception(dc, TT_DATA_ACCESS);
2052         break;
2053     }
2054 }
2055 
2056 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
2057                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2058 {
2059     switch (da->type) {
2060     case GET_ASI_EXCP:
2061         return;
2062     case GET_ASI_DIRECT:
2063         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2064                                   da->mem_idx, da->memop | MO_ALIGN);
2065         break;
2066     default:
2067         /* ??? Should be DAE_invalid_asi.  */
2068         gen_exception(dc, TT_DATA_ACCESS);
2069         break;
2070     }
2071 }
2072 
2073 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2074 {
2075     switch (da->type) {
2076     case GET_ASI_EXCP:
2077         break;
2078     case GET_ASI_DIRECT:
2079         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
2080                                da->mem_idx, MO_UB);
2081         break;
2082     default:
2083         /* ??? In theory, this should be raise DAE_invalid_asi.
2084            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
2085         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2086             gen_helper_exit_atomic(tcg_env);
2087         } else {
2088             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2089             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2090             TCGv_i64 s64, t64;
2091 
2092             save_state(dc);
2093             t64 = tcg_temp_new_i64();
2094             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2095 
2096             s64 = tcg_constant_i64(0xff);
2097             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2098 
2099             tcg_gen_trunc_i64_tl(dst, t64);
2100 
2101             /* End the TB.  */
2102             dc->npc = DYNAMIC_PC;
2103         }
2104         break;
2105     }
2106 }
2107 
2108 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2109                         TCGv addr, int rd)
2110 {
2111     MemOp memop = da->memop;
2112     MemOp size = memop & MO_SIZE;
2113     TCGv_i32 d32;
2114     TCGv_i64 d64;
2115     TCGv addr_tmp;
2116 
2117     /* TODO: Use 128-bit load/store below. */
2118     if (size == MO_128) {
2119         memop = (memop & ~MO_SIZE) | MO_64;
2120     }
2121 
2122     switch (da->type) {
2123     case GET_ASI_EXCP:
2124         break;
2125 
2126     case GET_ASI_DIRECT:
2127         memop |= MO_ALIGN_4;
2128         switch (size) {
2129         case MO_32:
2130             d32 = gen_dest_fpr_F(dc);
2131             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2132             gen_store_fpr_F(dc, rd, d32);
2133             break;
2134 
2135         case MO_64:
2136             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2137             break;
2138 
2139         case MO_128:
2140             d64 = tcg_temp_new_i64();
2141             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2142             addr_tmp = tcg_temp_new();
2143             tcg_gen_addi_tl(addr_tmp, addr, 8);
2144             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2145             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2146             break;
2147         default:
2148             g_assert_not_reached();
2149         }
2150         break;
2151 
2152     case GET_ASI_BLOCK:
2153         /* Valid for lddfa on aligned registers only.  */
2154         if (orig_size == MO_64 && (rd & 7) == 0) {
2155             /* The first operation checks required alignment.  */
2156             addr_tmp = tcg_temp_new();
2157             for (int i = 0; ; ++i) {
2158                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2159                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2160                 if (i == 7) {
2161                     break;
2162                 }
2163                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2164                 addr = addr_tmp;
2165             }
2166         } else {
2167             gen_exception(dc, TT_ILL_INSN);
2168         }
2169         break;
2170 
2171     case GET_ASI_SHORT:
2172         /* Valid for lddfa only.  */
2173         if (orig_size == MO_64) {
2174             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2175                                 memop | MO_ALIGN);
2176         } else {
2177             gen_exception(dc, TT_ILL_INSN);
2178         }
2179         break;
2180 
2181     default:
2182         {
2183             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2184             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2185 
2186             save_state(dc);
2187             /* According to the table in the UA2011 manual, the only
2188                other asis that are valid for ldfa/lddfa/ldqfa are
2189                the NO_FAULT asis.  We still need a helper for these,
2190                but we can just use the integer asi helper for them.  */
2191             switch (size) {
2192             case MO_32:
2193                 d64 = tcg_temp_new_i64();
2194                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2195                 d32 = gen_dest_fpr_F(dc);
2196                 tcg_gen_extrl_i64_i32(d32, d64);
2197                 gen_store_fpr_F(dc, rd, d32);
2198                 break;
2199             case MO_64:
2200                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2201                                   r_asi, r_mop);
2202                 break;
2203             case MO_128:
2204                 d64 = tcg_temp_new_i64();
2205                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2206                 addr_tmp = tcg_temp_new();
2207                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2208                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2209                                   r_asi, r_mop);
2210                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2211                 break;
2212             default:
2213                 g_assert_not_reached();
2214             }
2215         }
2216         break;
2217     }
2218 }
2219 
2220 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2221                         TCGv addr, int rd)
2222 {
2223     MemOp memop = da->memop;
2224     MemOp size = memop & MO_SIZE;
2225     TCGv_i32 d32;
2226     TCGv addr_tmp;
2227 
2228     /* TODO: Use 128-bit load/store below. */
2229     if (size == MO_128) {
2230         memop = (memop & ~MO_SIZE) | MO_64;
2231     }
2232 
2233     switch (da->type) {
2234     case GET_ASI_EXCP:
2235         break;
2236 
2237     case GET_ASI_DIRECT:
2238         memop |= MO_ALIGN_4;
2239         switch (size) {
2240         case MO_32:
2241             d32 = gen_load_fpr_F(dc, rd);
2242             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2243             break;
2244         case MO_64:
2245             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2246                                 memop | MO_ALIGN_4);
2247             break;
2248         case MO_128:
2249             /* Only 4-byte alignment required.  However, it is legal for the
2250                cpu to signal the alignment fault, and the OS trap handler is
2251                required to fix it up.  Requiring 16-byte alignment here avoids
2252                having to probe the second page before performing the first
2253                write.  */
2254             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2255                                 memop | MO_ALIGN_16);
2256             addr_tmp = tcg_temp_new();
2257             tcg_gen_addi_tl(addr_tmp, addr, 8);
2258             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2259             break;
2260         default:
2261             g_assert_not_reached();
2262         }
2263         break;
2264 
2265     case GET_ASI_BLOCK:
2266         /* Valid for stdfa on aligned registers only.  */
2267         if (orig_size == MO_64 && (rd & 7) == 0) {
2268             /* The first operation checks required alignment.  */
2269             addr_tmp = tcg_temp_new();
2270             for (int i = 0; ; ++i) {
2271                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2272                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2273                 if (i == 7) {
2274                     break;
2275                 }
2276                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2277                 addr = addr_tmp;
2278             }
2279         } else {
2280             gen_exception(dc, TT_ILL_INSN);
2281         }
2282         break;
2283 
2284     case GET_ASI_SHORT:
2285         /* Valid for stdfa only.  */
2286         if (orig_size == MO_64) {
2287             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2288                                 memop | MO_ALIGN);
2289         } else {
2290             gen_exception(dc, TT_ILL_INSN);
2291         }
2292         break;
2293 
2294     default:
2295         /* According to the table in the UA2011 manual, the only
2296            other asis that are valid for ldfa/lddfa/ldqfa are
2297            the PST* asis, which aren't currently handled.  */
2298         gen_exception(dc, TT_ILL_INSN);
2299         break;
2300     }
2301 }
2302 
2303 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2304 {
2305     TCGv hi = gen_dest_gpr(dc, rd);
2306     TCGv lo = gen_dest_gpr(dc, rd + 1);
2307 
2308     switch (da->type) {
2309     case GET_ASI_EXCP:
2310         return;
2311 
2312     case GET_ASI_DTWINX:
2313 #ifdef TARGET_SPARC64
2314         {
2315             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2316             TCGv_i128 t = tcg_temp_new_i128();
2317 
2318             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2319             /*
2320              * Note that LE twinx acts as if each 64-bit register result is
2321              * byte swapped.  We perform one 128-bit LE load, so must swap
2322              * the order of the writebacks.
2323              */
2324             if ((mop & MO_BSWAP) == MO_TE) {
2325                 tcg_gen_extr_i128_i64(lo, hi, t);
2326             } else {
2327                 tcg_gen_extr_i128_i64(hi, lo, t);
2328             }
2329         }
2330         break;
2331 #else
2332         g_assert_not_reached();
2333 #endif
2334 
2335     case GET_ASI_DIRECT:
2336         {
2337             TCGv_i64 tmp = tcg_temp_new_i64();
2338 
2339             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2340 
2341             /* Note that LE ldda acts as if each 32-bit register
2342                result is byte swapped.  Having just performed one
2343                64-bit bswap, we need now to swap the writebacks.  */
2344             if ((da->memop & MO_BSWAP) == MO_TE) {
2345                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2346             } else {
2347                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2348             }
2349         }
2350         break;
2351 
2352     default:
2353         /* ??? In theory we've handled all of the ASIs that are valid
2354            for ldda, and this should raise DAE_invalid_asi.  However,
2355            real hardware allows others.  This can be seen with e.g.
2356            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2357         {
2358             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2359             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2360             TCGv_i64 tmp = tcg_temp_new_i64();
2361 
2362             save_state(dc);
2363             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2364 
2365             /* See above.  */
2366             if ((da->memop & MO_BSWAP) == MO_TE) {
2367                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2368             } else {
2369                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2370             }
2371         }
2372         break;
2373     }
2374 
2375     gen_store_gpr(dc, rd, hi);
2376     gen_store_gpr(dc, rd + 1, lo);
2377 }
2378 
2379 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2380 {
2381     TCGv hi = gen_load_gpr(dc, rd);
2382     TCGv lo = gen_load_gpr(dc, rd + 1);
2383 
2384     switch (da->type) {
2385     case GET_ASI_EXCP:
2386         break;
2387 
2388     case GET_ASI_DTWINX:
2389 #ifdef TARGET_SPARC64
2390         {
2391             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2392             TCGv_i128 t = tcg_temp_new_i128();
2393 
2394             /*
2395              * Note that LE twinx acts as if each 64-bit register result is
2396              * byte swapped.  We perform one 128-bit LE store, so must swap
2397              * the order of the construction.
2398              */
2399             if ((mop & MO_BSWAP) == MO_TE) {
2400                 tcg_gen_concat_i64_i128(t, lo, hi);
2401             } else {
2402                 tcg_gen_concat_i64_i128(t, hi, lo);
2403             }
2404             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2405         }
2406         break;
2407 #else
2408         g_assert_not_reached();
2409 #endif
2410 
2411     case GET_ASI_DIRECT:
2412         {
2413             TCGv_i64 t64 = tcg_temp_new_i64();
2414 
2415             /* Note that LE stda acts as if each 32-bit register result is
2416                byte swapped.  We will perform one 64-bit LE store, so now
2417                we must swap the order of the construction.  */
2418             if ((da->memop & MO_BSWAP) == MO_TE) {
2419                 tcg_gen_concat_tl_i64(t64, lo, hi);
2420             } else {
2421                 tcg_gen_concat_tl_i64(t64, hi, lo);
2422             }
2423             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2424         }
2425         break;
2426 
2427     case GET_ASI_BFILL:
2428         assert(TARGET_LONG_BITS == 32);
2429         /* Store 32 bytes of T64 to ADDR.  */
2430         /* ??? The original qemu code suggests 8-byte alignment, dropping
2431            the low bits, but the only place I can see this used is in the
2432            Linux kernel with 32 byte alignment, which would make more sense
2433            as a cacheline-style operation.  */
2434         {
2435             TCGv_i64 t64 = tcg_temp_new_i64();
2436             TCGv d_addr = tcg_temp_new();
2437             TCGv eight = tcg_constant_tl(8);
2438             int i;
2439 
2440             tcg_gen_concat_tl_i64(t64, lo, hi);
2441             tcg_gen_andi_tl(d_addr, addr, -8);
2442             for (i = 0; i < 32; i += 8) {
2443                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2444                 tcg_gen_add_tl(d_addr, d_addr, eight);
2445             }
2446         }
2447         break;
2448 
2449     default:
2450         /* ??? In theory we've handled all of the ASIs that are valid
2451            for stda, and this should raise DAE_invalid_asi.  */
2452         {
2453             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2454             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2455             TCGv_i64 t64 = tcg_temp_new_i64();
2456 
2457             /* See above.  */
2458             if ((da->memop & MO_BSWAP) == MO_TE) {
2459                 tcg_gen_concat_tl_i64(t64, lo, hi);
2460             } else {
2461                 tcg_gen_concat_tl_i64(t64, hi, lo);
2462             }
2463 
2464             save_state(dc);
2465             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2466         }
2467         break;
2468     }
2469 }
2470 
2471 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2472 {
2473 #ifdef TARGET_SPARC64
2474     TCGv_i32 c32, zero, dst, s1, s2;
2475 
2476     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2477        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2478        the later.  */
2479     c32 = tcg_temp_new_i32();
2480     if (cmp->is_bool) {
2481         tcg_gen_extrl_i64_i32(c32, cmp->c1);
2482     } else {
2483         TCGv_i64 c64 = tcg_temp_new_i64();
2484         tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2485         tcg_gen_extrl_i64_i32(c32, c64);
2486     }
2487 
2488     s1 = gen_load_fpr_F(dc, rs);
2489     s2 = gen_load_fpr_F(dc, rd);
2490     dst = gen_dest_fpr_F(dc);
2491     zero = tcg_constant_i32(0);
2492 
2493     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2494 
2495     gen_store_fpr_F(dc, rd, dst);
2496 #else
2497     qemu_build_not_reached();
2498 #endif
2499 }
2500 
2501 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2502 {
2503 #ifdef TARGET_SPARC64
2504     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2505     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2506                         gen_load_fpr_D(dc, rs),
2507                         gen_load_fpr_D(dc, rd));
2508     gen_store_fpr_D(dc, rd, dst);
2509 #else
2510     qemu_build_not_reached();
2511 #endif
2512 }
2513 
2514 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2515 {
2516 #ifdef TARGET_SPARC64
2517     int qd = QFPREG(rd);
2518     int qs = QFPREG(rs);
2519 
2520     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2521                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2522     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2523                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2524 
2525     gen_update_fprs_dirty(dc, qd);
2526 #else
2527     qemu_build_not_reached();
2528 #endif
2529 }
2530 
2531 #ifdef TARGET_SPARC64
2532 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2533 {
2534     TCGv_i32 r_tl = tcg_temp_new_i32();
2535 
2536     /* load env->tl into r_tl */
2537     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2538 
2539     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2540     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2541 
2542     /* calculate offset to current trap state from env->ts, reuse r_tl */
2543     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2544     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2545 
2546     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2547     {
2548         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2549         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2550         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2551     }
2552 }
2553 #endif
2554 
2555 static int extract_dfpreg(DisasContext *dc, int x)
2556 {
2557     return DFPREG(x);
2558 }
2559 
2560 static int extract_qfpreg(DisasContext *dc, int x)
2561 {
2562     return QFPREG(x);
2563 }
2564 
2565 /* Include the auto-generated decoder.  */
2566 #include "decode-insns.c.inc"
2567 
2568 #define TRANS(NAME, AVAIL, FUNC, ...) \
2569     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2570     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2571 
2572 #define avail_ALL(C)      true
2573 #ifdef TARGET_SPARC64
2574 # define avail_32(C)      false
2575 # define avail_ASR17(C)   false
2576 # define avail_CASA(C)    true
2577 # define avail_DIV(C)     true
2578 # define avail_MUL(C)     true
2579 # define avail_POWERDOWN(C) false
2580 # define avail_64(C)      true
2581 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2582 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2583 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2584 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2585 #else
2586 # define avail_32(C)      true
2587 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2588 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2589 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2590 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2591 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2592 # define avail_64(C)      false
2593 # define avail_GL(C)      false
2594 # define avail_HYPV(C)    false
2595 # define avail_VIS1(C)    false
2596 # define avail_VIS2(C)    false
2597 #endif
2598 
2599 /* Default case for non jump instructions. */
2600 static bool advance_pc(DisasContext *dc)
2601 {
2602     if (dc->npc & 3) {
2603         switch (dc->npc) {
2604         case DYNAMIC_PC:
2605         case DYNAMIC_PC_LOOKUP:
2606             dc->pc = dc->npc;
2607             gen_op_next_insn();
2608             break;
2609         case JUMP_PC:
2610             /* we can do a static jump */
2611             gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2612             dc->base.is_jmp = DISAS_NORETURN;
2613             break;
2614         default:
2615             g_assert_not_reached();
2616         }
2617     } else {
2618         dc->pc = dc->npc;
2619         dc->npc = dc->npc + 4;
2620     }
2621     return true;
2622 }
2623 
2624 /*
2625  * Major opcodes 00 and 01 -- branches, call, and sethi
2626  */
2627 
2628 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2629 {
2630     if (annul) {
2631         dc->pc = dc->npc + 4;
2632         dc->npc = dc->pc + 4;
2633     } else {
2634         dc->pc = dc->npc;
2635         dc->npc = dc->pc + 4;
2636     }
2637     return true;
2638 }
2639 
2640 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2641                                        target_ulong dest)
2642 {
2643     if (annul) {
2644         dc->pc = dest;
2645         dc->npc = dest + 4;
2646     } else {
2647         dc->pc = dc->npc;
2648         dc->npc = dest;
2649         tcg_gen_mov_tl(cpu_pc, cpu_npc);
2650     }
2651     return true;
2652 }
2653 
2654 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2655                               bool annul, target_ulong dest)
2656 {
2657     target_ulong npc = dc->npc;
2658 
2659     if (annul) {
2660         TCGLabel *l1 = gen_new_label();
2661 
2662         tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2663         gen_goto_tb(dc, 0, npc, dest);
2664         gen_set_label(l1);
2665         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2666 
2667         dc->base.is_jmp = DISAS_NORETURN;
2668     } else {
2669         if (npc & 3) {
2670             switch (npc) {
2671             case DYNAMIC_PC:
2672             case DYNAMIC_PC_LOOKUP:
2673                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2674                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2675                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2676                                    cmp->c1, cmp->c2,
2677                                    tcg_constant_tl(dest), cpu_npc);
2678                 dc->pc = npc;
2679                 break;
2680             default:
2681                 g_assert_not_reached();
2682             }
2683         } else {
2684             dc->pc = npc;
2685             dc->jump_pc[0] = dest;
2686             dc->jump_pc[1] = npc + 4;
2687             dc->npc = JUMP_PC;
2688             if (cmp->is_bool) {
2689                 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2690             } else {
2691                 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2692             }
2693         }
2694     }
2695     return true;
2696 }
2697 
2698 static bool raise_priv(DisasContext *dc)
2699 {
2700     gen_exception(dc, TT_PRIV_INSN);
2701     return true;
2702 }
2703 
2704 static bool raise_unimpfpop(DisasContext *dc)
2705 {
2706     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2707     return true;
2708 }
2709 
2710 static bool gen_trap_float128(DisasContext *dc)
2711 {
2712     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2713         return false;
2714     }
2715     return raise_unimpfpop(dc);
2716 }
2717 
2718 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2719 {
2720     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2721     DisasCompare cmp;
2722 
2723     switch (a->cond) {
2724     case 0x0:
2725         return advance_jump_uncond_never(dc, a->a);
2726     case 0x8:
2727         return advance_jump_uncond_always(dc, a->a, target);
2728     default:
2729         flush_cond(dc);
2730 
2731         gen_compare(&cmp, a->cc, a->cond, dc);
2732         return advance_jump_cond(dc, &cmp, a->a, target);
2733     }
2734 }
2735 
2736 TRANS(Bicc, ALL, do_bpcc, a)
2737 TRANS(BPcc,  64, do_bpcc, a)
2738 
2739 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2740 {
2741     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2742     DisasCompare cmp;
2743 
2744     if (gen_trap_ifnofpu(dc)) {
2745         return true;
2746     }
2747     switch (a->cond) {
2748     case 0x0:
2749         return advance_jump_uncond_never(dc, a->a);
2750     case 0x8:
2751         return advance_jump_uncond_always(dc, a->a, target);
2752     default:
2753         flush_cond(dc);
2754 
2755         gen_fcompare(&cmp, a->cc, a->cond);
2756         return advance_jump_cond(dc, &cmp, a->a, target);
2757     }
2758 }
2759 
2760 TRANS(FBPfcc,  64, do_fbpfcc, a)
2761 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2762 
2763 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2764 {
2765     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2766     DisasCompare cmp;
2767 
2768     if (!avail_64(dc)) {
2769         return false;
2770     }
2771     if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2772         return false;
2773     }
2774 
2775     flush_cond(dc);
2776     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2777     return advance_jump_cond(dc, &cmp, a->a, target);
2778 }
2779 
2780 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2781 {
2782     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2783 
2784     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2785     gen_mov_pc_npc(dc);
2786     dc->npc = target;
2787     return true;
2788 }
2789 
2790 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2791 {
2792     /*
2793      * For sparc32, always generate the no-coprocessor exception.
2794      * For sparc64, always generate illegal instruction.
2795      */
2796 #ifdef TARGET_SPARC64
2797     return false;
2798 #else
2799     gen_exception(dc, TT_NCP_INSN);
2800     return true;
2801 #endif
2802 }
2803 
2804 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2805 {
2806     /* Special-case %g0 because that's the canonical nop.  */
2807     if (a->rd) {
2808         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2809     }
2810     return advance_pc(dc);
2811 }
2812 
2813 /*
2814  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2815  */
2816 
2817 static bool do_tcc(DisasContext *dc, int cond, int cc,
2818                    int rs1, bool imm, int rs2_or_imm)
2819 {
2820     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2821                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2822     DisasCompare cmp;
2823     TCGLabel *lab;
2824     TCGv_i32 trap;
2825 
2826     /* Trap never.  */
2827     if (cond == 0) {
2828         return advance_pc(dc);
2829     }
2830 
2831     /*
2832      * Immediate traps are the most common case.  Since this value is
2833      * live across the branch, it really pays to evaluate the constant.
2834      */
2835     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2836         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2837     } else {
2838         trap = tcg_temp_new_i32();
2839         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2840         if (imm) {
2841             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2842         } else {
2843             TCGv_i32 t2 = tcg_temp_new_i32();
2844             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2845             tcg_gen_add_i32(trap, trap, t2);
2846         }
2847         tcg_gen_andi_i32(trap, trap, mask);
2848         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2849     }
2850 
2851     /* Trap always.  */
2852     if (cond == 8) {
2853         save_state(dc);
2854         gen_helper_raise_exception(tcg_env, trap);
2855         dc->base.is_jmp = DISAS_NORETURN;
2856         return true;
2857     }
2858 
2859     /* Conditional trap.  */
2860     flush_cond(dc);
2861     lab = delay_exceptionv(dc, trap);
2862     gen_compare(&cmp, cc, cond, dc);
2863     tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2864 
2865     return advance_pc(dc);
2866 }
2867 
2868 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2869 {
2870     if (avail_32(dc) && a->cc) {
2871         return false;
2872     }
2873     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2874 }
2875 
2876 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2877 {
2878     if (avail_64(dc)) {
2879         return false;
2880     }
2881     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2882 }
2883 
2884 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2885 {
2886     if (avail_32(dc)) {
2887         return false;
2888     }
2889     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2890 }
2891 
2892 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2893 {
2894     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2895     return advance_pc(dc);
2896 }
2897 
2898 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2899 {
2900     if (avail_32(dc)) {
2901         return false;
2902     }
2903     if (a->mmask) {
2904         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2905         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2906     }
2907     if (a->cmask) {
2908         /* For #Sync, etc, end the TB to recognize interrupts. */
2909         dc->base.is_jmp = DISAS_EXIT;
2910     }
2911     return advance_pc(dc);
2912 }
2913 
2914 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2915                           TCGv (*func)(DisasContext *, TCGv))
2916 {
2917     if (!priv) {
2918         return raise_priv(dc);
2919     }
2920     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2921     return advance_pc(dc);
2922 }
2923 
2924 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2925 {
2926     return cpu_y;
2927 }
2928 
2929 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2930 {
2931     /*
2932      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2933      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2934      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2935      */
2936     if (avail_64(dc) && a->rs1 != 0) {
2937         return false;
2938     }
2939     return do_rd_special(dc, true, a->rd, do_rdy);
2940 }
2941 
2942 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2943 {
2944     uint32_t val;
2945 
2946     /*
2947      * TODO: There are many more fields to be filled,
2948      * some of which are writable.
2949      */
2950     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
2951     val |= 1 << 8;                 /* [8]   V8   */
2952 
2953     return tcg_constant_tl(val);
2954 }
2955 
2956 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2957 
2958 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2959 {
2960     update_psr(dc);
2961     gen_helper_rdccr(dst, tcg_env);
2962     return dst;
2963 }
2964 
2965 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2966 
2967 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2968 {
2969 #ifdef TARGET_SPARC64
2970     return tcg_constant_tl(dc->asi);
2971 #else
2972     qemu_build_not_reached();
2973 #endif
2974 }
2975 
2976 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2977 
2978 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2979 {
2980     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2981 
2982     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2983     if (translator_io_start(&dc->base)) {
2984         dc->base.is_jmp = DISAS_EXIT;
2985     }
2986     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2987                               tcg_constant_i32(dc->mem_idx));
2988     return dst;
2989 }
2990 
2991 /* TODO: non-priv access only allowed when enabled. */
2992 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2993 
2994 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2995 {
2996     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2997 }
2998 
2999 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3000 
3001 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3002 {
3003     tcg_gen_ext_i32_tl(dst, cpu_fprs);
3004     return dst;
3005 }
3006 
3007 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3008 
3009 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3010 {
3011     gen_trap_ifnofpu(dc);
3012     return cpu_gsr;
3013 }
3014 
3015 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3016 
3017 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3018 {
3019     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3020     return dst;
3021 }
3022 
3023 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3024 
3025 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3026 {
3027     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3028     return dst;
3029 }
3030 
3031 /* TODO: non-priv access only allowed when enabled. */
3032 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3033 
3034 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3035 {
3036     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3037 
3038     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3039     if (translator_io_start(&dc->base)) {
3040         dc->base.is_jmp = DISAS_EXIT;
3041     }
3042     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3043                               tcg_constant_i32(dc->mem_idx));
3044     return dst;
3045 }
3046 
3047 /* TODO: non-priv access only allowed when enabled. */
3048 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3049 
3050 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3051 {
3052     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3053     return dst;
3054 }
3055 
3056 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3057 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3058 
3059 /*
3060  * UltraSPARC-T1 Strand status.
3061  * HYPV check maybe not enough, UA2005 & UA2007 describe
3062  * this ASR as impl. dep
3063  */
3064 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3065 {
3066     return tcg_constant_tl(1);
3067 }
3068 
3069 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3070 
3071 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3072 {
3073     update_psr(dc);
3074     gen_helper_rdpsr(dst, tcg_env);
3075     return dst;
3076 }
3077 
3078 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3079 
3080 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3081 {
3082     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3083     return dst;
3084 }
3085 
3086 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3087 
3088 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3089 {
3090     TCGv_i32 tl = tcg_temp_new_i32();
3091     TCGv_ptr tp = tcg_temp_new_ptr();
3092 
3093     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3094     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3095     tcg_gen_shli_i32(tl, tl, 3);
3096     tcg_gen_ext_i32_ptr(tp, tl);
3097     tcg_gen_add_ptr(tp, tp, tcg_env);
3098 
3099     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3100     return dst;
3101 }
3102 
3103 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3104 
3105 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3106 {
3107     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3108     return dst;
3109 }
3110 
3111 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3112 
3113 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3114 {
3115     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3116     return dst;
3117 }
3118 
3119 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3120 
3121 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3122 {
3123     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3124     return dst;
3125 }
3126 
3127 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3128 
3129 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3130 {
3131     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3132     return dst;
3133 }
3134 
3135 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3136       do_rdhstick_cmpr)
3137 
3138 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3139 {
3140     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3141     return dst;
3142 }
3143 
3144 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3145 
3146 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3147 {
3148 #ifdef TARGET_SPARC64
3149     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3150 
3151     gen_load_trap_state_at_tl(r_tsptr);
3152     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3153     return dst;
3154 #else
3155     qemu_build_not_reached();
3156 #endif
3157 }
3158 
3159 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3160 
3161 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3162 {
3163 #ifdef TARGET_SPARC64
3164     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3165 
3166     gen_load_trap_state_at_tl(r_tsptr);
3167     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3168     return dst;
3169 #else
3170     qemu_build_not_reached();
3171 #endif
3172 }
3173 
3174 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3175 
3176 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3177 {
3178 #ifdef TARGET_SPARC64
3179     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3180 
3181     gen_load_trap_state_at_tl(r_tsptr);
3182     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3183     return dst;
3184 #else
3185     qemu_build_not_reached();
3186 #endif
3187 }
3188 
3189 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3190 
3191 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3192 {
3193 #ifdef TARGET_SPARC64
3194     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3195 
3196     gen_load_trap_state_at_tl(r_tsptr);
3197     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3198     return dst;
3199 #else
3200     qemu_build_not_reached();
3201 #endif
3202 }
3203 
3204 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3205 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3206 
3207 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3208 {
3209     return cpu_tbr;
3210 }
3211 
3212 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3213 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3214 
3215 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3216 {
3217     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3218     return dst;
3219 }
3220 
3221 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3222 
3223 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3224 {
3225     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3226     return dst;
3227 }
3228 
3229 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3230 
3231 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3232 {
3233     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3234     return dst;
3235 }
3236 
3237 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3238 
3239 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3240 {
3241     gen_helper_rdcwp(dst, tcg_env);
3242     return dst;
3243 }
3244 
3245 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3246 
3247 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3248 {
3249     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3250     return dst;
3251 }
3252 
3253 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3254 
3255 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3256 {
3257     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3258     return dst;
3259 }
3260 
3261 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3262       do_rdcanrestore)
3263 
3264 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3265 {
3266     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3267     return dst;
3268 }
3269 
3270 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3271 
3272 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3273 {
3274     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3275     return dst;
3276 }
3277 
3278 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3279 
3280 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3281 {
3282     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3283     return dst;
3284 }
3285 
3286 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3287 
3288 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3289 {
3290     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3291     return dst;
3292 }
3293 
3294 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3295 
3296 /* UA2005 strand status */
3297 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3298 {
3299     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3300     return dst;
3301 }
3302 
3303 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3304 
3305 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3306 {
3307     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3308     return dst;
3309 }
3310 
3311 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3312 
3313 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3314 {
3315     if (avail_64(dc)) {
3316         gen_helper_flushw(tcg_env);
3317         return advance_pc(dc);
3318     }
3319     return false;
3320 }
3321 
3322 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3323                           void (*func)(DisasContext *, TCGv))
3324 {
3325     TCGv src;
3326 
3327     /* For simplicity, we under-decoded the rs2 form. */
3328     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3329         return false;
3330     }
3331     if (!priv) {
3332         return raise_priv(dc);
3333     }
3334 
3335     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3336         src = tcg_constant_tl(a->rs2_or_imm);
3337     } else {
3338         TCGv src1 = gen_load_gpr(dc, a->rs1);
3339         if (a->rs2_or_imm == 0) {
3340             src = src1;
3341         } else {
3342             src = tcg_temp_new();
3343             if (a->imm) {
3344                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3345             } else {
3346                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3347             }
3348         }
3349     }
3350     func(dc, src);
3351     return advance_pc(dc);
3352 }
3353 
3354 static void do_wry(DisasContext *dc, TCGv src)
3355 {
3356     tcg_gen_ext32u_tl(cpu_y, src);
3357 }
3358 
3359 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3360 
3361 static void do_wrccr(DisasContext *dc, TCGv src)
3362 {
3363     gen_helper_wrccr(tcg_env, src);
3364 }
3365 
3366 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3367 
3368 static void do_wrasi(DisasContext *dc, TCGv src)
3369 {
3370     TCGv tmp = tcg_temp_new();
3371 
3372     tcg_gen_ext8u_tl(tmp, src);
3373     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3374     /* End TB to notice changed ASI. */
3375     dc->base.is_jmp = DISAS_EXIT;
3376 }
3377 
3378 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3379 
3380 static void do_wrfprs(DisasContext *dc, TCGv src)
3381 {
3382 #ifdef TARGET_SPARC64
3383     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3384     dc->fprs_dirty = 0;
3385     dc->base.is_jmp = DISAS_EXIT;
3386 #else
3387     qemu_build_not_reached();
3388 #endif
3389 }
3390 
3391 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3392 
3393 static void do_wrgsr(DisasContext *dc, TCGv src)
3394 {
3395     gen_trap_ifnofpu(dc);
3396     tcg_gen_mov_tl(cpu_gsr, src);
3397 }
3398 
3399 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3400 
3401 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3402 {
3403     gen_helper_set_softint(tcg_env, src);
3404 }
3405 
3406 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3407 
3408 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3409 {
3410     gen_helper_clear_softint(tcg_env, src);
3411 }
3412 
3413 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3414 
3415 static void do_wrsoftint(DisasContext *dc, TCGv src)
3416 {
3417     gen_helper_write_softint(tcg_env, src);
3418 }
3419 
3420 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3421 
3422 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3423 {
3424     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3425 
3426     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3427     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3428     translator_io_start(&dc->base);
3429     gen_helper_tick_set_limit(r_tickptr, src);
3430     /* End TB to handle timer interrupt */
3431     dc->base.is_jmp = DISAS_EXIT;
3432 }
3433 
3434 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3435 
3436 static void do_wrstick(DisasContext *dc, TCGv src)
3437 {
3438 #ifdef TARGET_SPARC64
3439     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3440 
3441     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3442     translator_io_start(&dc->base);
3443     gen_helper_tick_set_count(r_tickptr, src);
3444     /* End TB to handle timer interrupt */
3445     dc->base.is_jmp = DISAS_EXIT;
3446 #else
3447     qemu_build_not_reached();
3448 #endif
3449 }
3450 
3451 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3452 
3453 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3454 {
3455     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3456 
3457     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3458     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3459     translator_io_start(&dc->base);
3460     gen_helper_tick_set_limit(r_tickptr, src);
3461     /* End TB to handle timer interrupt */
3462     dc->base.is_jmp = DISAS_EXIT;
3463 }
3464 
3465 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3466 
3467 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3468 {
3469     save_state(dc);
3470     gen_helper_power_down(tcg_env);
3471 }
3472 
3473 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3474 
3475 static void do_wrpsr(DisasContext *dc, TCGv src)
3476 {
3477     gen_helper_wrpsr(tcg_env, src);
3478     tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3479     dc->cc_op = CC_OP_FLAGS;
3480     dc->base.is_jmp = DISAS_EXIT;
3481 }
3482 
3483 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3484 
3485 static void do_wrwim(DisasContext *dc, TCGv src)
3486 {
3487     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3488     TCGv tmp = tcg_temp_new();
3489 
3490     tcg_gen_andi_tl(tmp, src, mask);
3491     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3492 }
3493 
3494 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3495 
3496 static void do_wrtpc(DisasContext *dc, TCGv src)
3497 {
3498 #ifdef TARGET_SPARC64
3499     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3500 
3501     gen_load_trap_state_at_tl(r_tsptr);
3502     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3503 #else
3504     qemu_build_not_reached();
3505 #endif
3506 }
3507 
3508 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3509 
3510 static void do_wrtnpc(DisasContext *dc, TCGv src)
3511 {
3512 #ifdef TARGET_SPARC64
3513     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3514 
3515     gen_load_trap_state_at_tl(r_tsptr);
3516     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3517 #else
3518     qemu_build_not_reached();
3519 #endif
3520 }
3521 
3522 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3523 
3524 static void do_wrtstate(DisasContext *dc, TCGv src)
3525 {
3526 #ifdef TARGET_SPARC64
3527     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3528 
3529     gen_load_trap_state_at_tl(r_tsptr);
3530     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3531 #else
3532     qemu_build_not_reached();
3533 #endif
3534 }
3535 
3536 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3537 
3538 static void do_wrtt(DisasContext *dc, TCGv src)
3539 {
3540 #ifdef TARGET_SPARC64
3541     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3542 
3543     gen_load_trap_state_at_tl(r_tsptr);
3544     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3545 #else
3546     qemu_build_not_reached();
3547 #endif
3548 }
3549 
3550 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3551 
3552 static void do_wrtick(DisasContext *dc, TCGv src)
3553 {
3554     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3555 
3556     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3557     translator_io_start(&dc->base);
3558     gen_helper_tick_set_count(r_tickptr, src);
3559     /* End TB to handle timer interrupt */
3560     dc->base.is_jmp = DISAS_EXIT;
3561 }
3562 
3563 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3564 
3565 static void do_wrtba(DisasContext *dc, TCGv src)
3566 {
3567     tcg_gen_mov_tl(cpu_tbr, src);
3568 }
3569 
3570 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3571 
3572 static void do_wrpstate(DisasContext *dc, TCGv src)
3573 {
3574     save_state(dc);
3575     if (translator_io_start(&dc->base)) {
3576         dc->base.is_jmp = DISAS_EXIT;
3577     }
3578     gen_helper_wrpstate(tcg_env, src);
3579     dc->npc = DYNAMIC_PC;
3580 }
3581 
3582 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3583 
3584 static void do_wrtl(DisasContext *dc, TCGv src)
3585 {
3586     save_state(dc);
3587     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3588     dc->npc = DYNAMIC_PC;
3589 }
3590 
3591 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3592 
3593 static void do_wrpil(DisasContext *dc, TCGv src)
3594 {
3595     if (translator_io_start(&dc->base)) {
3596         dc->base.is_jmp = DISAS_EXIT;
3597     }
3598     gen_helper_wrpil(tcg_env, src);
3599 }
3600 
3601 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3602 
3603 static void do_wrcwp(DisasContext *dc, TCGv src)
3604 {
3605     gen_helper_wrcwp(tcg_env, src);
3606 }
3607 
3608 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3609 
3610 static void do_wrcansave(DisasContext *dc, TCGv src)
3611 {
3612     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3613 }
3614 
3615 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3616 
3617 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3618 {
3619     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3620 }
3621 
3622 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3623 
3624 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3625 {
3626     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3627 }
3628 
3629 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3630 
3631 static void do_wrotherwin(DisasContext *dc, TCGv src)
3632 {
3633     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3634 }
3635 
3636 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3637 
3638 static void do_wrwstate(DisasContext *dc, TCGv src)
3639 {
3640     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3641 }
3642 
3643 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3644 
3645 static void do_wrgl(DisasContext *dc, TCGv src)
3646 {
3647     gen_helper_wrgl(tcg_env, src);
3648 }
3649 
3650 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3651 
3652 /* UA2005 strand status */
3653 static void do_wrssr(DisasContext *dc, TCGv src)
3654 {
3655     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3656 }
3657 
3658 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3659 
3660 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3661 
3662 static void do_wrhpstate(DisasContext *dc, TCGv src)
3663 {
3664     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3665     dc->base.is_jmp = DISAS_EXIT;
3666 }
3667 
3668 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3669 
3670 static void do_wrhtstate(DisasContext *dc, TCGv src)
3671 {
3672     TCGv_i32 tl = tcg_temp_new_i32();
3673     TCGv_ptr tp = tcg_temp_new_ptr();
3674 
3675     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3676     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3677     tcg_gen_shli_i32(tl, tl, 3);
3678     tcg_gen_ext_i32_ptr(tp, tl);
3679     tcg_gen_add_ptr(tp, tp, tcg_env);
3680 
3681     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3682 }
3683 
3684 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3685 
3686 static void do_wrhintp(DisasContext *dc, TCGv src)
3687 {
3688     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3689 }
3690 
3691 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3692 
3693 static void do_wrhtba(DisasContext *dc, TCGv src)
3694 {
3695     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3696 }
3697 
3698 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3699 
3700 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3701 {
3702     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3703 
3704     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3705     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3706     translator_io_start(&dc->base);
3707     gen_helper_tick_set_limit(r_tickptr, src);
3708     /* End TB to handle timer interrupt */
3709     dc->base.is_jmp = DISAS_EXIT;
3710 }
3711 
3712 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3713       do_wrhstick_cmpr)
3714 
3715 static bool do_saved_restored(DisasContext *dc, bool saved)
3716 {
3717     if (!supervisor(dc)) {
3718         return raise_priv(dc);
3719     }
3720     if (saved) {
3721         gen_helper_saved(tcg_env);
3722     } else {
3723         gen_helper_restored(tcg_env);
3724     }
3725     return advance_pc(dc);
3726 }
3727 
3728 TRANS(SAVED, 64, do_saved_restored, true)
3729 TRANS(RESTORED, 64, do_saved_restored, false)
3730 
3731 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3732 {
3733     return advance_pc(dc);
3734 }
3735 
3736 /*
3737  * TODO: Need a feature bit for sparcv8.
3738  * In the meantime, treat all 32-bit cpus like sparcv7.
3739  */
3740 TRANS(NOP_v7, 32, trans_NOP, a)
3741 TRANS(NOP_v9, 64, trans_NOP, a)
3742 
3743 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3744                          void (*func)(TCGv, TCGv, TCGv),
3745                          void (*funci)(TCGv, TCGv, target_long))
3746 {
3747     TCGv dst, src1;
3748 
3749     /* For simplicity, we under-decoded the rs2 form. */
3750     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3751         return false;
3752     }
3753 
3754     if (a->cc) {
3755         dst = cpu_cc_dst;
3756     } else {
3757         dst = gen_dest_gpr(dc, a->rd);
3758     }
3759     src1 = gen_load_gpr(dc, a->rs1);
3760 
3761     if (a->imm || a->rs2_or_imm == 0) {
3762         if (funci) {
3763             funci(dst, src1, a->rs2_or_imm);
3764         } else {
3765             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3766         }
3767     } else {
3768         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3769     }
3770     gen_store_gpr(dc, a->rd, dst);
3771 
3772     if (a->cc) {
3773         tcg_gen_movi_i32(cpu_cc_op, cc_op);
3774         dc->cc_op = cc_op;
3775     }
3776     return advance_pc(dc);
3777 }
3778 
3779 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3780                      void (*func)(TCGv, TCGv, TCGv),
3781                      void (*funci)(TCGv, TCGv, target_long),
3782                      void (*func_cc)(TCGv, TCGv, TCGv))
3783 {
3784     if (a->cc) {
3785         assert(cc_op >= 0);
3786         return do_arith_int(dc, a, cc_op, func_cc, NULL);
3787     }
3788     return do_arith_int(dc, a, cc_op, func, funci);
3789 }
3790 
3791 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3792                      void (*func)(TCGv, TCGv, TCGv),
3793                      void (*funci)(TCGv, TCGv, target_long))
3794 {
3795     return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
3796 }
3797 
3798 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
3799       tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
3800 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
3801       tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
3802 
3803 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
3804 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
3805 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
3806 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
3807 
3808 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3809 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3810 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3811 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3812 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3813 
3814 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3815 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3816 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3817 
3818 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
3819 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
3820 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
3821 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
3822 
3823 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3824 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
3825 
3826 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3827 {
3828     /* OR with %g0 is the canonical alias for MOV. */
3829     if (!a->cc && a->rs1 == 0) {
3830         if (a->imm || a->rs2_or_imm == 0) {
3831             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3832         } else if (a->rs2_or_imm & ~0x1f) {
3833             /* For simplicity, we under-decoded the rs2 form. */
3834             return false;
3835         } else {
3836             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3837         }
3838         return advance_pc(dc);
3839     }
3840     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3841 }
3842 
3843 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
3844 {
3845     switch (dc->cc_op) {
3846     case CC_OP_DIV:
3847     case CC_OP_LOGIC:
3848         /* Carry is known to be zero.  Fall back to plain ADD.  */
3849         return do_arith(dc, a, CC_OP_ADD,
3850                         tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
3851     case CC_OP_ADD:
3852     case CC_OP_TADD:
3853     case CC_OP_TADDTV:
3854         return do_arith(dc, a, CC_OP_ADDX,
3855                         gen_op_addc_add, NULL, gen_op_addccc_add);
3856     case CC_OP_SUB:
3857     case CC_OP_TSUB:
3858     case CC_OP_TSUBTV:
3859         return do_arith(dc, a, CC_OP_ADDX,
3860                         gen_op_addc_sub, NULL, gen_op_addccc_sub);
3861     default:
3862         return do_arith(dc, a, CC_OP_ADDX,
3863                         gen_op_addc_generic, NULL, gen_op_addccc_generic);
3864     }
3865 }
3866 
3867 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
3868 {
3869     switch (dc->cc_op) {
3870     case CC_OP_DIV:
3871     case CC_OP_LOGIC:
3872         /* Carry is known to be zero.  Fall back to plain SUB.  */
3873         return do_arith(dc, a, CC_OP_SUB,
3874                         tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
3875     case CC_OP_ADD:
3876     case CC_OP_TADD:
3877     case CC_OP_TADDTV:
3878         return do_arith(dc, a, CC_OP_SUBX,
3879                         gen_op_subc_add, NULL, gen_op_subccc_add);
3880     case CC_OP_SUB:
3881     case CC_OP_TSUB:
3882     case CC_OP_TSUBTV:
3883         return do_arith(dc, a, CC_OP_SUBX,
3884                         gen_op_subc_sub, NULL, gen_op_subccc_sub);
3885     default:
3886         return do_arith(dc, a, CC_OP_SUBX,
3887                         gen_op_subc_generic, NULL, gen_op_subccc_generic);
3888     }
3889 }
3890 
3891 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
3892 {
3893     update_psr(dc);
3894     return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
3895 }
3896 
3897 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3898                      int width, bool cc, bool left)
3899 {
3900     TCGv dst, s1, s2, lo1, lo2;
3901     uint64_t amask, tabl, tabr;
3902     int shift, imask, omask;
3903 
3904     dst = gen_dest_gpr(dc, a->rd);
3905     s1 = gen_load_gpr(dc, a->rs1);
3906     s2 = gen_load_gpr(dc, a->rs2);
3907 
3908     if (cc) {
3909         tcg_gen_mov_tl(cpu_cc_src, s1);
3910         tcg_gen_mov_tl(cpu_cc_src2, s2);
3911         tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3912         tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3913         dc->cc_op = CC_OP_SUB;
3914     }
3915 
3916     /*
3917      * Theory of operation: there are two tables, left and right (not to
3918      * be confused with the left and right versions of the opcode).  These
3919      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3920      * these tables are loaded into two constants, TABL and TABR below.
3921      * The operation index = (input & imask) << shift calculates the index
3922      * into the constant, while val = (table >> index) & omask calculates
3923      * the value we're looking for.
3924      */
3925     switch (width) {
3926     case 8:
3927         imask = 0x7;
3928         shift = 3;
3929         omask = 0xff;
3930         if (left) {
3931             tabl = 0x80c0e0f0f8fcfeffULL;
3932             tabr = 0xff7f3f1f0f070301ULL;
3933         } else {
3934             tabl = 0x0103070f1f3f7fffULL;
3935             tabr = 0xfffefcf8f0e0c080ULL;
3936         }
3937         break;
3938     case 16:
3939         imask = 0x6;
3940         shift = 1;
3941         omask = 0xf;
3942         if (left) {
3943             tabl = 0x8cef;
3944             tabr = 0xf731;
3945         } else {
3946             tabl = 0x137f;
3947             tabr = 0xfec8;
3948         }
3949         break;
3950     case 32:
3951         imask = 0x4;
3952         shift = 0;
3953         omask = 0x3;
3954         if (left) {
3955             tabl = (2 << 2) | 3;
3956             tabr = (3 << 2) | 1;
3957         } else {
3958             tabl = (1 << 2) | 3;
3959             tabr = (3 << 2) | 2;
3960         }
3961         break;
3962     default:
3963         abort();
3964     }
3965 
3966     lo1 = tcg_temp_new();
3967     lo2 = tcg_temp_new();
3968     tcg_gen_andi_tl(lo1, s1, imask);
3969     tcg_gen_andi_tl(lo2, s2, imask);
3970     tcg_gen_shli_tl(lo1, lo1, shift);
3971     tcg_gen_shli_tl(lo2, lo2, shift);
3972 
3973     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3974     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3975     tcg_gen_andi_tl(lo1, lo1, omask);
3976     tcg_gen_andi_tl(lo2, lo2, omask);
3977 
3978     amask = address_mask_i(dc, -8);
3979     tcg_gen_andi_tl(s1, s1, amask);
3980     tcg_gen_andi_tl(s2, s2, amask);
3981 
3982     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3983     tcg_gen_and_tl(lo2, lo2, lo1);
3984     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3985 
3986     gen_store_gpr(dc, a->rd, dst);
3987     return advance_pc(dc);
3988 }
3989 
3990 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3991 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3992 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3993 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3994 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3995 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3996 
3997 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3998 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3999 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4000 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4001 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4002 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4003 
4004 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4005                    void (*func)(TCGv, TCGv, TCGv))
4006 {
4007     TCGv dst = gen_dest_gpr(dc, a->rd);
4008     TCGv src1 = gen_load_gpr(dc, a->rs1);
4009     TCGv src2 = gen_load_gpr(dc, a->rs2);
4010 
4011     func(dst, src1, src2);
4012     gen_store_gpr(dc, a->rd, dst);
4013     return advance_pc(dc);
4014 }
4015 
4016 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4017 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4018 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4019 
4020 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4021 {
4022 #ifdef TARGET_SPARC64
4023     TCGv tmp = tcg_temp_new();
4024 
4025     tcg_gen_add_tl(tmp, s1, s2);
4026     tcg_gen_andi_tl(dst, tmp, -8);
4027     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4028 #else
4029     g_assert_not_reached();
4030 #endif
4031 }
4032 
4033 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4034 {
4035 #ifdef TARGET_SPARC64
4036     TCGv tmp = tcg_temp_new();
4037 
4038     tcg_gen_add_tl(tmp, s1, s2);
4039     tcg_gen_andi_tl(dst, tmp, -8);
4040     tcg_gen_neg_tl(tmp, tmp);
4041     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4042 #else
4043     g_assert_not_reached();
4044 #endif
4045 }
4046 
4047 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4048 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4049 
4050 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4051 {
4052 #ifdef TARGET_SPARC64
4053     tcg_gen_add_tl(dst, s1, s2);
4054     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4055 #else
4056     g_assert_not_reached();
4057 #endif
4058 }
4059 
4060 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4061 
4062 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4063 {
4064     TCGv dst, src1, src2;
4065 
4066     /* Reject 64-bit shifts for sparc32. */
4067     if (avail_32(dc) && a->x) {
4068         return false;
4069     }
4070 
4071     src2 = tcg_temp_new();
4072     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4073     src1 = gen_load_gpr(dc, a->rs1);
4074     dst = gen_dest_gpr(dc, a->rd);
4075 
4076     if (l) {
4077         tcg_gen_shl_tl(dst, src1, src2);
4078         if (!a->x) {
4079             tcg_gen_ext32u_tl(dst, dst);
4080         }
4081     } else if (u) {
4082         if (!a->x) {
4083             tcg_gen_ext32u_tl(dst, src1);
4084             src1 = dst;
4085         }
4086         tcg_gen_shr_tl(dst, src1, src2);
4087     } else {
4088         if (!a->x) {
4089             tcg_gen_ext32s_tl(dst, src1);
4090             src1 = dst;
4091         }
4092         tcg_gen_sar_tl(dst, src1, src2);
4093     }
4094     gen_store_gpr(dc, a->rd, dst);
4095     return advance_pc(dc);
4096 }
4097 
4098 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4099 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4100 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4101 
4102 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4103 {
4104     TCGv dst, src1;
4105 
4106     /* Reject 64-bit shifts for sparc32. */
4107     if (avail_32(dc) && (a->x || a->i >= 32)) {
4108         return false;
4109     }
4110 
4111     src1 = gen_load_gpr(dc, a->rs1);
4112     dst = gen_dest_gpr(dc, a->rd);
4113 
4114     if (avail_32(dc) || a->x) {
4115         if (l) {
4116             tcg_gen_shli_tl(dst, src1, a->i);
4117         } else if (u) {
4118             tcg_gen_shri_tl(dst, src1, a->i);
4119         } else {
4120             tcg_gen_sari_tl(dst, src1, a->i);
4121         }
4122     } else {
4123         if (l) {
4124             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4125         } else if (u) {
4126             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4127         } else {
4128             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4129         }
4130     }
4131     gen_store_gpr(dc, a->rd, dst);
4132     return advance_pc(dc);
4133 }
4134 
4135 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4136 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4137 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4138 
4139 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4140 {
4141     /* For simplicity, we under-decoded the rs2 form. */
4142     if (!imm && rs2_or_imm & ~0x1f) {
4143         return NULL;
4144     }
4145     if (imm || rs2_or_imm == 0) {
4146         return tcg_constant_tl(rs2_or_imm);
4147     } else {
4148         return cpu_regs[rs2_or_imm];
4149     }
4150 }
4151 
4152 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4153 {
4154     TCGv dst = gen_load_gpr(dc, rd);
4155 
4156     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4157     gen_store_gpr(dc, rd, dst);
4158     return advance_pc(dc);
4159 }
4160 
4161 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4162 {
4163     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4164     DisasCompare cmp;
4165 
4166     if (src2 == NULL) {
4167         return false;
4168     }
4169     gen_compare(&cmp, a->cc, a->cond, dc);
4170     return do_mov_cond(dc, &cmp, a->rd, src2);
4171 }
4172 
4173 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4174 {
4175     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4176     DisasCompare cmp;
4177 
4178     if (src2 == NULL) {
4179         return false;
4180     }
4181     gen_fcompare(&cmp, a->cc, a->cond);
4182     return do_mov_cond(dc, &cmp, a->rd, src2);
4183 }
4184 
4185 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4186 {
4187     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4188     DisasCompare cmp;
4189 
4190     if (src2 == NULL) {
4191         return false;
4192     }
4193     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4194     return do_mov_cond(dc, &cmp, a->rd, src2);
4195 }
4196 
4197 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4198                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4199 {
4200     TCGv src1, sum;
4201 
4202     /* For simplicity, we under-decoded the rs2 form. */
4203     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4204         return false;
4205     }
4206 
4207     /*
4208      * Always load the sum into a new temporary.
4209      * This is required to capture the value across a window change,
4210      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4211      */
4212     sum = tcg_temp_new();
4213     src1 = gen_load_gpr(dc, a->rs1);
4214     if (a->imm || a->rs2_or_imm == 0) {
4215         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4216     } else {
4217         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4218     }
4219     return func(dc, a->rd, sum);
4220 }
4221 
4222 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4223 {
4224     /*
4225      * Preserve pc across advance, so that we can delay
4226      * the writeback to rd until after src is consumed.
4227      */
4228     target_ulong cur_pc = dc->pc;
4229 
4230     gen_check_align(dc, src, 3);
4231 
4232     gen_mov_pc_npc(dc);
4233     tcg_gen_mov_tl(cpu_npc, src);
4234     gen_address_mask(dc, cpu_npc);
4235     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4236 
4237     dc->npc = DYNAMIC_PC_LOOKUP;
4238     return true;
4239 }
4240 
4241 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4242 
4243 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4244 {
4245     if (!supervisor(dc)) {
4246         return raise_priv(dc);
4247     }
4248 
4249     gen_check_align(dc, src, 3);
4250 
4251     gen_mov_pc_npc(dc);
4252     tcg_gen_mov_tl(cpu_npc, src);
4253     gen_helper_rett(tcg_env);
4254 
4255     dc->npc = DYNAMIC_PC;
4256     return true;
4257 }
4258 
4259 TRANS(RETT, 32, do_add_special, a, do_rett)
4260 
4261 static bool do_return(DisasContext *dc, int rd, TCGv src)
4262 {
4263     gen_check_align(dc, src, 3);
4264 
4265     gen_mov_pc_npc(dc);
4266     tcg_gen_mov_tl(cpu_npc, src);
4267     gen_address_mask(dc, cpu_npc);
4268 
4269     gen_helper_restore(tcg_env);
4270     dc->npc = DYNAMIC_PC_LOOKUP;
4271     return true;
4272 }
4273 
4274 TRANS(RETURN, 64, do_add_special, a, do_return)
4275 
4276 static bool do_save(DisasContext *dc, int rd, TCGv src)
4277 {
4278     gen_helper_save(tcg_env);
4279     gen_store_gpr(dc, rd, src);
4280     return advance_pc(dc);
4281 }
4282 
4283 TRANS(SAVE, ALL, do_add_special, a, do_save)
4284 
4285 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4286 {
4287     gen_helper_restore(tcg_env);
4288     gen_store_gpr(dc, rd, src);
4289     return advance_pc(dc);
4290 }
4291 
4292 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4293 
4294 static bool do_done_retry(DisasContext *dc, bool done)
4295 {
4296     if (!supervisor(dc)) {
4297         return raise_priv(dc);
4298     }
4299     dc->npc = DYNAMIC_PC;
4300     dc->pc = DYNAMIC_PC;
4301     translator_io_start(&dc->base);
4302     if (done) {
4303         gen_helper_done(tcg_env);
4304     } else {
4305         gen_helper_retry(tcg_env);
4306     }
4307     return true;
4308 }
4309 
4310 TRANS(DONE, 64, do_done_retry, true)
4311 TRANS(RETRY, 64, do_done_retry, false)
4312 
4313 /*
4314  * Major opcode 11 -- load and store instructions
4315  */
4316 
4317 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4318 {
4319     TCGv addr, tmp = NULL;
4320 
4321     /* For simplicity, we under-decoded the rs2 form. */
4322     if (!imm && rs2_or_imm & ~0x1f) {
4323         return NULL;
4324     }
4325 
4326     addr = gen_load_gpr(dc, rs1);
4327     if (rs2_or_imm) {
4328         tmp = tcg_temp_new();
4329         if (imm) {
4330             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4331         } else {
4332             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4333         }
4334         addr = tmp;
4335     }
4336     if (AM_CHECK(dc)) {
4337         if (!tmp) {
4338             tmp = tcg_temp_new();
4339         }
4340         tcg_gen_ext32u_tl(tmp, addr);
4341         addr = tmp;
4342     }
4343     return addr;
4344 }
4345 
4346 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4347 {
4348     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4349     DisasASI da;
4350 
4351     if (addr == NULL) {
4352         return false;
4353     }
4354     da = resolve_asi(dc, a->asi, mop);
4355 
4356     reg = gen_dest_gpr(dc, a->rd);
4357     gen_ld_asi(dc, &da, reg, addr);
4358     gen_store_gpr(dc, a->rd, reg);
4359     return advance_pc(dc);
4360 }
4361 
4362 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4363 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4364 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4365 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4366 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4367 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4368 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4369 
4370 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4371 {
4372     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4373     DisasASI da;
4374 
4375     if (addr == NULL) {
4376         return false;
4377     }
4378     da = resolve_asi(dc, a->asi, mop);
4379 
4380     reg = gen_load_gpr(dc, a->rd);
4381     gen_st_asi(dc, &da, reg, addr);
4382     return advance_pc(dc);
4383 }
4384 
4385 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4386 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4387 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4388 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4389 
4390 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4391 {
4392     TCGv addr;
4393     DisasASI da;
4394 
4395     if (a->rd & 1) {
4396         return false;
4397     }
4398     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4399     if (addr == NULL) {
4400         return false;
4401     }
4402     da = resolve_asi(dc, a->asi, MO_TEUQ);
4403     gen_ldda_asi(dc, &da, addr, a->rd);
4404     return advance_pc(dc);
4405 }
4406 
4407 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4408 {
4409     TCGv addr;
4410     DisasASI da;
4411 
4412     if (a->rd & 1) {
4413         return false;
4414     }
4415     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4416     if (addr == NULL) {
4417         return false;
4418     }
4419     da = resolve_asi(dc, a->asi, MO_TEUQ);
4420     gen_stda_asi(dc, &da, addr, a->rd);
4421     return advance_pc(dc);
4422 }
4423 
4424 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4425 {
4426     TCGv addr, reg;
4427     DisasASI da;
4428 
4429     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4430     if (addr == NULL) {
4431         return false;
4432     }
4433     da = resolve_asi(dc, a->asi, MO_UB);
4434 
4435     reg = gen_dest_gpr(dc, a->rd);
4436     gen_ldstub_asi(dc, &da, reg, addr);
4437     gen_store_gpr(dc, a->rd, reg);
4438     return advance_pc(dc);
4439 }
4440 
4441 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4442 {
4443     TCGv addr, dst, src;
4444     DisasASI da;
4445 
4446     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4447     if (addr == NULL) {
4448         return false;
4449     }
4450     da = resolve_asi(dc, a->asi, MO_TEUL);
4451 
4452     dst = gen_dest_gpr(dc, a->rd);
4453     src = gen_load_gpr(dc, a->rd);
4454     gen_swap_asi(dc, &da, dst, src, addr);
4455     gen_store_gpr(dc, a->rd, dst);
4456     return advance_pc(dc);
4457 }
4458 
4459 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4460 {
4461     TCGv addr, o, n, c;
4462     DisasASI da;
4463 
4464     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4465     if (addr == NULL) {
4466         return false;
4467     }
4468     da = resolve_asi(dc, a->asi, mop);
4469 
4470     o = gen_dest_gpr(dc, a->rd);
4471     n = gen_load_gpr(dc, a->rd);
4472     c = gen_load_gpr(dc, a->rs2_or_imm);
4473     gen_cas_asi(dc, &da, o, n, c, addr);
4474     gen_store_gpr(dc, a->rd, o);
4475     return advance_pc(dc);
4476 }
4477 
4478 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4479 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4480 
4481 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4482 {
4483     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4484     DisasASI da;
4485 
4486     if (addr == NULL) {
4487         return false;
4488     }
4489     if (gen_trap_ifnofpu(dc)) {
4490         return true;
4491     }
4492     if (sz == MO_128 && gen_trap_float128(dc)) {
4493         return true;
4494     }
4495     da = resolve_asi(dc, a->asi, MO_TE | sz);
4496     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4497     gen_update_fprs_dirty(dc, a->rd);
4498     return advance_pc(dc);
4499 }
4500 
4501 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4502 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4503 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4504 
4505 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4506 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4507 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4508 
4509 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4510 {
4511     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4512     DisasASI da;
4513 
4514     if (addr == NULL) {
4515         return false;
4516     }
4517     if (gen_trap_ifnofpu(dc)) {
4518         return true;
4519     }
4520     if (sz == MO_128 && gen_trap_float128(dc)) {
4521         return true;
4522     }
4523     da = resolve_asi(dc, a->asi, MO_TE | sz);
4524     gen_stf_asi(dc, &da, sz, addr, a->rd);
4525     return advance_pc(dc);
4526 }
4527 
4528 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4529 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4530 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4531 
4532 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4533 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4534 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4535 
4536 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4537 {
4538     if (!avail_32(dc)) {
4539         return false;
4540     }
4541     if (!supervisor(dc)) {
4542         return raise_priv(dc);
4543     }
4544     if (gen_trap_ifnofpu(dc)) {
4545         return true;
4546     }
4547     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4548     return true;
4549 }
4550 
4551 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4552                      target_ulong new_mask, target_ulong old_mask)
4553 {
4554     TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4555     if (addr == NULL) {
4556         return false;
4557     }
4558     if (gen_trap_ifnofpu(dc)) {
4559         return true;
4560     }
4561     tmp = tcg_temp_new();
4562     tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4563     tcg_gen_andi_tl(tmp, tmp, new_mask);
4564     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4565     tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4566     gen_helper_set_fsr(tcg_env, cpu_fsr);
4567     return advance_pc(dc);
4568 }
4569 
4570 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4571 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4572 
4573 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4574 {
4575     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4576     if (addr == NULL) {
4577         return false;
4578     }
4579     if (gen_trap_ifnofpu(dc)) {
4580         return true;
4581     }
4582     tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4583     return advance_pc(dc);
4584 }
4585 
4586 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4587 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4588 
4589 static bool do_fc(DisasContext *dc, int rd, bool c)
4590 {
4591     uint64_t mask;
4592 
4593     if (gen_trap_ifnofpu(dc)) {
4594         return true;
4595     }
4596 
4597     if (rd & 1) {
4598         mask = MAKE_64BIT_MASK(0, 32);
4599     } else {
4600         mask = MAKE_64BIT_MASK(32, 32);
4601     }
4602     if (c) {
4603         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4604     } else {
4605         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4606     }
4607     gen_update_fprs_dirty(dc, rd);
4608     return advance_pc(dc);
4609 }
4610 
4611 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4612 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4613 
4614 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4615 {
4616     if (gen_trap_ifnofpu(dc)) {
4617         return true;
4618     }
4619 
4620     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4621     gen_update_fprs_dirty(dc, rd);
4622     return advance_pc(dc);
4623 }
4624 
4625 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4626 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4627 
4628 static bool do_ff(DisasContext *dc, arg_r_r *a,
4629                   void (*func)(TCGv_i32, TCGv_i32))
4630 {
4631     TCGv_i32 tmp;
4632 
4633     if (gen_trap_ifnofpu(dc)) {
4634         return true;
4635     }
4636 
4637     tmp = gen_load_fpr_F(dc, a->rs);
4638     func(tmp, tmp);
4639     gen_store_fpr_F(dc, a->rd, tmp);
4640     return advance_pc(dc);
4641 }
4642 
4643 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4644 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4645 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4646 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4647 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4648 
4649 static bool do_fd(DisasContext *dc, arg_r_r *a,
4650                   void (*func)(TCGv_i32, TCGv_i64))
4651 {
4652     TCGv_i32 dst;
4653     TCGv_i64 src;
4654 
4655     if (gen_trap_ifnofpu(dc)) {
4656         return true;
4657     }
4658 
4659     dst = gen_dest_fpr_F(dc);
4660     src = gen_load_fpr_D(dc, a->rs);
4661     func(dst, src);
4662     gen_store_fpr_F(dc, a->rd, dst);
4663     return advance_pc(dc);
4664 }
4665 
4666 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4667 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4668 
4669 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4670                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4671 {
4672     TCGv_i32 tmp;
4673 
4674     if (gen_trap_ifnofpu(dc)) {
4675         return true;
4676     }
4677 
4678     gen_op_clear_ieee_excp_and_FTT();
4679     tmp = gen_load_fpr_F(dc, a->rs);
4680     func(tmp, tcg_env, tmp);
4681     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4682     gen_store_fpr_F(dc, a->rd, tmp);
4683     return advance_pc(dc);
4684 }
4685 
4686 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4687 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4688 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4689 
4690 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4691                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4692 {
4693     TCGv_i32 dst;
4694     TCGv_i64 src;
4695 
4696     if (gen_trap_ifnofpu(dc)) {
4697         return true;
4698     }
4699 
4700     gen_op_clear_ieee_excp_and_FTT();
4701     dst = gen_dest_fpr_F(dc);
4702     src = gen_load_fpr_D(dc, a->rs);
4703     func(dst, tcg_env, src);
4704     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4705     gen_store_fpr_F(dc, a->rd, dst);
4706     return advance_pc(dc);
4707 }
4708 
4709 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4710 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4711 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4712 
4713 static bool do_dd(DisasContext *dc, arg_r_r *a,
4714                   void (*func)(TCGv_i64, TCGv_i64))
4715 {
4716     TCGv_i64 dst, src;
4717 
4718     if (gen_trap_ifnofpu(dc)) {
4719         return true;
4720     }
4721 
4722     dst = gen_dest_fpr_D(dc, a->rd);
4723     src = gen_load_fpr_D(dc, a->rs);
4724     func(dst, src);
4725     gen_store_fpr_D(dc, a->rd, dst);
4726     return advance_pc(dc);
4727 }
4728 
4729 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4730 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4731 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4732 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4733 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4734 
4735 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4736                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4737 {
4738     TCGv_i64 dst, src;
4739 
4740     if (gen_trap_ifnofpu(dc)) {
4741         return true;
4742     }
4743 
4744     gen_op_clear_ieee_excp_and_FTT();
4745     dst = gen_dest_fpr_D(dc, a->rd);
4746     src = gen_load_fpr_D(dc, a->rs);
4747     func(dst, tcg_env, src);
4748     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4749     gen_store_fpr_D(dc, a->rd, dst);
4750     return advance_pc(dc);
4751 }
4752 
4753 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4754 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4755 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4756 
4757 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4758                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4759 {
4760     TCGv_i64 dst;
4761     TCGv_i32 src;
4762 
4763     if (gen_trap_ifnofpu(dc)) {
4764         return true;
4765     }
4766 
4767     gen_op_clear_ieee_excp_and_FTT();
4768     dst = gen_dest_fpr_D(dc, a->rd);
4769     src = gen_load_fpr_F(dc, a->rs);
4770     func(dst, tcg_env, src);
4771     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4772     gen_store_fpr_D(dc, a->rd, dst);
4773     return advance_pc(dc);
4774 }
4775 
4776 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4777 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4778 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4779 
4780 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4781 {
4782     int rd, rs;
4783 
4784     if (!avail_64(dc)) {
4785         return false;
4786     }
4787     if (gen_trap_ifnofpu(dc)) {
4788         return true;
4789     }
4790     if (gen_trap_float128(dc)) {
4791         return true;
4792     }
4793 
4794     gen_op_clear_ieee_excp_and_FTT();
4795     rd = QFPREG(a->rd);
4796     rs = QFPREG(a->rs);
4797     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4798     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4799     gen_update_fprs_dirty(dc, rd);
4800     return advance_pc(dc);
4801 }
4802 
4803 static bool do_qq(DisasContext *dc, arg_r_r *a,
4804                   void (*func)(TCGv_env))
4805 {
4806     if (gen_trap_ifnofpu(dc)) {
4807         return true;
4808     }
4809     if (gen_trap_float128(dc)) {
4810         return true;
4811     }
4812 
4813     gen_op_clear_ieee_excp_and_FTT();
4814     gen_op_load_fpr_QT1(QFPREG(a->rs));
4815     func(tcg_env);
4816     gen_op_store_QT0_fpr(QFPREG(a->rd));
4817     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4818     return advance_pc(dc);
4819 }
4820 
4821 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4822 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4823 
4824 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4825                        void (*func)(TCGv_env))
4826 {
4827     if (gen_trap_ifnofpu(dc)) {
4828         return true;
4829     }
4830     if (gen_trap_float128(dc)) {
4831         return true;
4832     }
4833 
4834     gen_op_clear_ieee_excp_and_FTT();
4835     gen_op_load_fpr_QT1(QFPREG(a->rs));
4836     func(tcg_env);
4837     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4838     gen_op_store_QT0_fpr(QFPREG(a->rd));
4839     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4840     return advance_pc(dc);
4841 }
4842 
4843 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4844 
4845 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4846                       void (*func)(TCGv_i32, TCGv_env))
4847 {
4848     TCGv_i32 dst;
4849 
4850     if (gen_trap_ifnofpu(dc)) {
4851         return true;
4852     }
4853     if (gen_trap_float128(dc)) {
4854         return true;
4855     }
4856 
4857     gen_op_clear_ieee_excp_and_FTT();
4858     gen_op_load_fpr_QT1(QFPREG(a->rs));
4859     dst = gen_dest_fpr_F(dc);
4860     func(dst, tcg_env);
4861     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4862     gen_store_fpr_F(dc, a->rd, dst);
4863     return advance_pc(dc);
4864 }
4865 
4866 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4867 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4868 
4869 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4870                       void (*func)(TCGv_i64, TCGv_env))
4871 {
4872     TCGv_i64 dst;
4873 
4874     if (gen_trap_ifnofpu(dc)) {
4875         return true;
4876     }
4877     if (gen_trap_float128(dc)) {
4878         return true;
4879     }
4880 
4881     gen_op_clear_ieee_excp_and_FTT();
4882     gen_op_load_fpr_QT1(QFPREG(a->rs));
4883     dst = gen_dest_fpr_D(dc, a->rd);
4884     func(dst, tcg_env);
4885     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4886     gen_store_fpr_D(dc, a->rd, dst);
4887     return advance_pc(dc);
4888 }
4889 
4890 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4891 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4892 
4893 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4894                       void (*func)(TCGv_env, TCGv_i32))
4895 {
4896     TCGv_i32 src;
4897 
4898     if (gen_trap_ifnofpu(dc)) {
4899         return true;
4900     }
4901     if (gen_trap_float128(dc)) {
4902         return true;
4903     }
4904 
4905     gen_op_clear_ieee_excp_and_FTT();
4906     src = gen_load_fpr_F(dc, a->rs);
4907     func(tcg_env, src);
4908     gen_op_store_QT0_fpr(QFPREG(a->rd));
4909     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4910     return advance_pc(dc);
4911 }
4912 
4913 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4914 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4915 
4916 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4917                       void (*func)(TCGv_env, TCGv_i64))
4918 {
4919     TCGv_i64 src;
4920 
4921     if (gen_trap_ifnofpu(dc)) {
4922         return true;
4923     }
4924     if (gen_trap_float128(dc)) {
4925         return true;
4926     }
4927 
4928     gen_op_clear_ieee_excp_and_FTT();
4929     src = gen_load_fpr_D(dc, a->rs);
4930     func(tcg_env, src);
4931     gen_op_store_QT0_fpr(QFPREG(a->rd));
4932     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4933     return advance_pc(dc);
4934 }
4935 
4936 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4937 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4938 
4939 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4940                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4941 {
4942     TCGv_i32 src1, src2;
4943 
4944     if (gen_trap_ifnofpu(dc)) {
4945         return true;
4946     }
4947 
4948     src1 = gen_load_fpr_F(dc, a->rs1);
4949     src2 = gen_load_fpr_F(dc, a->rs2);
4950     func(src1, src1, src2);
4951     gen_store_fpr_F(dc, a->rd, src1);
4952     return advance_pc(dc);
4953 }
4954 
4955 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4956 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4957 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4958 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4959 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4960 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4961 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4962 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4963 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4964 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4965 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4966 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4967 
4968 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4969                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4970 {
4971     TCGv_i32 src1, src2;
4972 
4973     if (gen_trap_ifnofpu(dc)) {
4974         return true;
4975     }
4976 
4977     gen_op_clear_ieee_excp_and_FTT();
4978     src1 = gen_load_fpr_F(dc, a->rs1);
4979     src2 = gen_load_fpr_F(dc, a->rs2);
4980     func(src1, tcg_env, src1, src2);
4981     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4982     gen_store_fpr_F(dc, a->rd, src1);
4983     return advance_pc(dc);
4984 }
4985 
4986 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4987 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4988 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4989 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4990 
4991 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4992                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4993 {
4994     TCGv_i64 dst, src1, src2;
4995 
4996     if (gen_trap_ifnofpu(dc)) {
4997         return true;
4998     }
4999 
5000     dst = gen_dest_fpr_D(dc, a->rd);
5001     src1 = gen_load_fpr_D(dc, a->rs1);
5002     src2 = gen_load_fpr_D(dc, a->rs2);
5003     func(dst, src1, src2);
5004     gen_store_fpr_D(dc, a->rd, dst);
5005     return advance_pc(dc);
5006 }
5007 
5008 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
5009 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
5010 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
5011 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5012 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5013 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
5014 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
5015 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
5016 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
5017 
5018 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
5019 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
5020 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
5021 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
5022 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5023 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5024 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5025 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5026 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5027 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5028 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5029 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5030 
5031 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5032 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5033 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5034 
5035 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5036                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5037 {
5038     TCGv_i64 src1, src2;
5039     TCGv dst;
5040 
5041     if (gen_trap_ifnofpu(dc)) {
5042         return true;
5043     }
5044 
5045     dst = gen_dest_gpr(dc, a->rd);
5046     src1 = gen_load_fpr_D(dc, a->rs1);
5047     src2 = gen_load_fpr_D(dc, a->rs2);
5048     func(dst, src1, src2);
5049     gen_store_gpr(dc, a->rd, dst);
5050     return advance_pc(dc);
5051 }
5052 
5053 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5054 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5055 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5056 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5057 
5058 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5059 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5060 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5061 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5062 
5063 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5064                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5065 {
5066     TCGv_i64 dst, src1, src2;
5067 
5068     if (gen_trap_ifnofpu(dc)) {
5069         return true;
5070     }
5071 
5072     gen_op_clear_ieee_excp_and_FTT();
5073     dst = gen_dest_fpr_D(dc, a->rd);
5074     src1 = gen_load_fpr_D(dc, a->rs1);
5075     src2 = gen_load_fpr_D(dc, a->rs2);
5076     func(dst, tcg_env, src1, src2);
5077     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5078     gen_store_fpr_D(dc, a->rd, dst);
5079     return advance_pc(dc);
5080 }
5081 
5082 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5083 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5084 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5085 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5086 
5087 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5088 {
5089     TCGv_i64 dst;
5090     TCGv_i32 src1, src2;
5091 
5092     if (gen_trap_ifnofpu(dc)) {
5093         return true;
5094     }
5095     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5096         return raise_unimpfpop(dc);
5097     }
5098 
5099     gen_op_clear_ieee_excp_and_FTT();
5100     dst = gen_dest_fpr_D(dc, a->rd);
5101     src1 = gen_load_fpr_F(dc, a->rs1);
5102     src2 = gen_load_fpr_F(dc, a->rs2);
5103     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5104     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5105     gen_store_fpr_D(dc, a->rd, dst);
5106     return advance_pc(dc);
5107 }
5108 
5109 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
5110                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5111 {
5112     TCGv_i64 dst, src0, src1, src2;
5113 
5114     if (gen_trap_ifnofpu(dc)) {
5115         return true;
5116     }
5117 
5118     dst  = gen_dest_fpr_D(dc, a->rd);
5119     src0 = gen_load_fpr_D(dc, a->rd);
5120     src1 = gen_load_fpr_D(dc, a->rs1);
5121     src2 = gen_load_fpr_D(dc, a->rs2);
5122     func(dst, src0, src1, src2);
5123     gen_store_fpr_D(dc, a->rd, dst);
5124     return advance_pc(dc);
5125 }
5126 
5127 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5128 
5129 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5130                        void (*func)(TCGv_env))
5131 {
5132     if (gen_trap_ifnofpu(dc)) {
5133         return true;
5134     }
5135     if (gen_trap_float128(dc)) {
5136         return true;
5137     }
5138 
5139     gen_op_clear_ieee_excp_and_FTT();
5140     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5141     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5142     func(tcg_env);
5143     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5144     gen_op_store_QT0_fpr(QFPREG(a->rd));
5145     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5146     return advance_pc(dc);
5147 }
5148 
5149 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5150 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5151 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5152 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5153 
5154 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5155 {
5156     TCGv_i64 src1, src2;
5157 
5158     if (gen_trap_ifnofpu(dc)) {
5159         return true;
5160     }
5161     if (gen_trap_float128(dc)) {
5162         return true;
5163     }
5164 
5165     gen_op_clear_ieee_excp_and_FTT();
5166     src1 = gen_load_fpr_D(dc, a->rs1);
5167     src2 = gen_load_fpr_D(dc, a->rs2);
5168     gen_helper_fdmulq(tcg_env, src1, src2);
5169     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5170     gen_op_store_QT0_fpr(QFPREG(a->rd));
5171     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5172     return advance_pc(dc);
5173 }
5174 
5175 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5176                      void (*func)(DisasContext *, DisasCompare *, int, int))
5177 {
5178     DisasCompare cmp;
5179 
5180     if (gen_trap_ifnofpu(dc)) {
5181         return true;
5182     }
5183     if (is_128 && gen_trap_float128(dc)) {
5184         return true;
5185     }
5186 
5187     gen_op_clear_ieee_excp_and_FTT();
5188     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
5189     func(dc, &cmp, a->rd, a->rs2);
5190     return advance_pc(dc);
5191 }
5192 
5193 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5194 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5195 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5196 
5197 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5198                       void (*func)(DisasContext *, DisasCompare *, int, int))
5199 {
5200     DisasCompare cmp;
5201 
5202     if (gen_trap_ifnofpu(dc)) {
5203         return true;
5204     }
5205     if (is_128 && gen_trap_float128(dc)) {
5206         return true;
5207     }
5208 
5209     gen_op_clear_ieee_excp_and_FTT();
5210     gen_compare(&cmp, a->cc, a->cond, dc);
5211     func(dc, &cmp, a->rd, a->rs2);
5212     return advance_pc(dc);
5213 }
5214 
5215 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5216 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5217 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5218 
5219 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5220                        void (*func)(DisasContext *, DisasCompare *, int, int))
5221 {
5222     DisasCompare cmp;
5223 
5224     if (gen_trap_ifnofpu(dc)) {
5225         return true;
5226     }
5227     if (is_128 && gen_trap_float128(dc)) {
5228         return true;
5229     }
5230 
5231     gen_op_clear_ieee_excp_and_FTT();
5232     gen_fcompare(&cmp, a->cc, a->cond);
5233     func(dc, &cmp, a->rd, a->rs2);
5234     return advance_pc(dc);
5235 }
5236 
5237 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5238 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5239 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5240 
5241 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5242 {
5243     TCGv_i32 src1, src2;
5244 
5245     if (avail_32(dc) && a->cc != 0) {
5246         return false;
5247     }
5248     if (gen_trap_ifnofpu(dc)) {
5249         return true;
5250     }
5251 
5252     gen_op_clear_ieee_excp_and_FTT();
5253     src1 = gen_load_fpr_F(dc, a->rs1);
5254     src2 = gen_load_fpr_F(dc, a->rs2);
5255     if (e) {
5256         gen_op_fcmpes(a->cc, src1, src2);
5257     } else {
5258         gen_op_fcmps(a->cc, src1, src2);
5259     }
5260     return advance_pc(dc);
5261 }
5262 
5263 TRANS(FCMPs, ALL, do_fcmps, a, false)
5264 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5265 
5266 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5267 {
5268     TCGv_i64 src1, src2;
5269 
5270     if (avail_32(dc) && a->cc != 0) {
5271         return false;
5272     }
5273     if (gen_trap_ifnofpu(dc)) {
5274         return true;
5275     }
5276 
5277     gen_op_clear_ieee_excp_and_FTT();
5278     src1 = gen_load_fpr_D(dc, a->rs1);
5279     src2 = gen_load_fpr_D(dc, a->rs2);
5280     if (e) {
5281         gen_op_fcmped(a->cc, src1, src2);
5282     } else {
5283         gen_op_fcmpd(a->cc, src1, src2);
5284     }
5285     return advance_pc(dc);
5286 }
5287 
5288 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5289 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5290 
5291 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5292 {
5293     if (avail_32(dc) && a->cc != 0) {
5294         return false;
5295     }
5296     if (gen_trap_ifnofpu(dc)) {
5297         return true;
5298     }
5299     if (gen_trap_float128(dc)) {
5300         return true;
5301     }
5302 
5303     gen_op_clear_ieee_excp_and_FTT();
5304     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5305     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5306     if (e) {
5307         gen_op_fcmpeq(a->cc);
5308     } else {
5309         gen_op_fcmpq(a->cc);
5310     }
5311     return advance_pc(dc);
5312 }
5313 
5314 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5315 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5316 
5317 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5318 {
5319     DisasContext *dc = container_of(dcbase, DisasContext, base);
5320     CPUSPARCState *env = cpu_env(cs);
5321     int bound;
5322 
5323     dc->pc = dc->base.pc_first;
5324     dc->npc = (target_ulong)dc->base.tb->cs_base;
5325     dc->cc_op = CC_OP_DYNAMIC;
5326     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5327     dc->def = &env->def;
5328     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5329     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5330 #ifndef CONFIG_USER_ONLY
5331     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5332 #endif
5333 #ifdef TARGET_SPARC64
5334     dc->fprs_dirty = 0;
5335     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5336 #ifndef CONFIG_USER_ONLY
5337     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5338 #endif
5339 #endif
5340     /*
5341      * if we reach a page boundary, we stop generation so that the
5342      * PC of a TT_TFAULT exception is always in the right page
5343      */
5344     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5345     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5346 }
5347 
5348 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5349 {
5350 }
5351 
5352 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5353 {
5354     DisasContext *dc = container_of(dcbase, DisasContext, base);
5355     target_ulong npc = dc->npc;
5356 
5357     if (npc & 3) {
5358         switch (npc) {
5359         case JUMP_PC:
5360             assert(dc->jump_pc[1] == dc->pc + 4);
5361             npc = dc->jump_pc[0] | JUMP_PC;
5362             break;
5363         case DYNAMIC_PC:
5364         case DYNAMIC_PC_LOOKUP:
5365             npc = DYNAMIC_PC;
5366             break;
5367         default:
5368             g_assert_not_reached();
5369         }
5370     }
5371     tcg_gen_insn_start(dc->pc, npc);
5372 }
5373 
5374 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5375 {
5376     DisasContext *dc = container_of(dcbase, DisasContext, base);
5377     CPUSPARCState *env = cpu_env(cs);
5378     unsigned int insn;
5379 
5380     insn = translator_ldl(env, &dc->base, dc->pc);
5381     dc->base.pc_next += 4;
5382 
5383     if (!decode(dc, insn)) {
5384         gen_exception(dc, TT_ILL_INSN);
5385     }
5386 
5387     if (dc->base.is_jmp == DISAS_NORETURN) {
5388         return;
5389     }
5390     if (dc->pc != dc->base.pc_next) {
5391         dc->base.is_jmp = DISAS_TOO_MANY;
5392     }
5393 }
5394 
5395 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5396 {
5397     DisasContext *dc = container_of(dcbase, DisasContext, base);
5398     DisasDelayException *e, *e_next;
5399     bool may_lookup;
5400 
5401     switch (dc->base.is_jmp) {
5402     case DISAS_NEXT:
5403     case DISAS_TOO_MANY:
5404         if (((dc->pc | dc->npc) & 3) == 0) {
5405             /* static PC and NPC: we can use direct chaining */
5406             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5407             break;
5408         }
5409 
5410         may_lookup = true;
5411         if (dc->pc & 3) {
5412             switch (dc->pc) {
5413             case DYNAMIC_PC_LOOKUP:
5414                 break;
5415             case DYNAMIC_PC:
5416                 may_lookup = false;
5417                 break;
5418             default:
5419                 g_assert_not_reached();
5420             }
5421         } else {
5422             tcg_gen_movi_tl(cpu_pc, dc->pc);
5423         }
5424 
5425         if (dc->npc & 3) {
5426             switch (dc->npc) {
5427             case JUMP_PC:
5428                 gen_generic_branch(dc);
5429                 break;
5430             case DYNAMIC_PC:
5431                 may_lookup = false;
5432                 break;
5433             case DYNAMIC_PC_LOOKUP:
5434                 break;
5435             default:
5436                 g_assert_not_reached();
5437             }
5438         } else {
5439             tcg_gen_movi_tl(cpu_npc, dc->npc);
5440         }
5441         if (may_lookup) {
5442             tcg_gen_lookup_and_goto_ptr();
5443         } else {
5444             tcg_gen_exit_tb(NULL, 0);
5445         }
5446         break;
5447 
5448     case DISAS_NORETURN:
5449        break;
5450 
5451     case DISAS_EXIT:
5452         /* Exit TB */
5453         save_state(dc);
5454         tcg_gen_exit_tb(NULL, 0);
5455         break;
5456 
5457     default:
5458         g_assert_not_reached();
5459     }
5460 
5461     for (e = dc->delay_excp_list; e ; e = e_next) {
5462         gen_set_label(e->lab);
5463 
5464         tcg_gen_movi_tl(cpu_pc, e->pc);
5465         if (e->npc % 4 == 0) {
5466             tcg_gen_movi_tl(cpu_npc, e->npc);
5467         }
5468         gen_helper_raise_exception(tcg_env, e->excp);
5469 
5470         e_next = e->next;
5471         g_free(e);
5472     }
5473 }
5474 
5475 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5476                                CPUState *cpu, FILE *logfile)
5477 {
5478     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5479     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5480 }
5481 
5482 static const TranslatorOps sparc_tr_ops = {
5483     .init_disas_context = sparc_tr_init_disas_context,
5484     .tb_start           = sparc_tr_tb_start,
5485     .insn_start         = sparc_tr_insn_start,
5486     .translate_insn     = sparc_tr_translate_insn,
5487     .tb_stop            = sparc_tr_tb_stop,
5488     .disas_log          = sparc_tr_disas_log,
5489 };
5490 
5491 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5492                            target_ulong pc, void *host_pc)
5493 {
5494     DisasContext dc = {};
5495 
5496     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5497 }
5498 
5499 void sparc_tcg_init(void)
5500 {
5501     static const char gregnames[32][4] = {
5502         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5503         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5504         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5505         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5506     };
5507     static const char fregnames[32][4] = {
5508         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5509         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5510         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5511         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5512     };
5513 
5514     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5515 #ifdef TARGET_SPARC64
5516         { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5517         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5518 #endif
5519         { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5520         { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5521     };
5522 
5523     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5524 #ifdef TARGET_SPARC64
5525         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5526 #endif
5527         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5528         { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5529         { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5530         { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5531         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5532         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5533         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5534         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5535         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5536     };
5537 
5538     unsigned int i;
5539 
5540     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5541                                          offsetof(CPUSPARCState, regwptr),
5542                                          "regwptr");
5543 
5544     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5545         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5546     }
5547 
5548     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5549         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5550     }
5551 
5552     cpu_regs[0] = NULL;
5553     for (i = 1; i < 8; ++i) {
5554         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5555                                          offsetof(CPUSPARCState, gregs[i]),
5556                                          gregnames[i]);
5557     }
5558 
5559     for (i = 8; i < 32; ++i) {
5560         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5561                                          (i - 8) * sizeof(target_ulong),
5562                                          gregnames[i]);
5563     }
5564 
5565     for (i = 0; i < TARGET_DPREGS; i++) {
5566         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5567                                             offsetof(CPUSPARCState, fpr[i]),
5568                                             fregnames[i]);
5569     }
5570 }
5571 
5572 void sparc_restore_state_to_opc(CPUState *cs,
5573                                 const TranslationBlock *tb,
5574                                 const uint64_t *data)
5575 {
5576     SPARCCPU *cpu = SPARC_CPU(cs);
5577     CPUSPARCState *env = &cpu->env;
5578     target_ulong pc = data[0];
5579     target_ulong npc = data[1];
5580 
5581     env->pc = pc;
5582     if (npc == DYNAMIC_PC) {
5583         /* dynamic NPC: already stored */
5584     } else if (npc & JUMP_PC) {
5585         /* jump PC: use 'cond' and the jump targets of the translation */
5586         if (env->cond) {
5587             env->npc = npc & ~3;
5588         } else {
5589             env->npc = pc + 4;
5590         }
5591     } else {
5592         env->npc = npc;
5593     }
5594 }
5595