xref: /qemu/target/sparc/translate.c (revision 7d5ebd8f)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define MAXTL_MASK                             0
94 #endif
95 
96 /* Dynamic PC, must exit to main loop. */
97 #define DYNAMIC_PC         1
98 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 #define JUMP_PC            2
100 /* Dynamic PC, may lookup next TB. */
101 #define DYNAMIC_PC_LOOKUP  3
102 
103 #define DISAS_EXIT  DISAS_TARGET_0
104 
105 /* global register indexes */
106 static TCGv_ptr cpu_regwptr;
107 static TCGv cpu_pc, cpu_npc;
108 static TCGv cpu_regs[32];
109 static TCGv cpu_y;
110 static TCGv cpu_tbr;
111 static TCGv cpu_cond;
112 static TCGv cpu_cc_N;
113 static TCGv cpu_cc_V;
114 static TCGv cpu_icc_Z;
115 static TCGv cpu_icc_C;
116 #ifdef TARGET_SPARC64
117 static TCGv cpu_xcc_Z;
118 static TCGv cpu_xcc_C;
119 static TCGv_i32 cpu_fprs;
120 static TCGv cpu_gsr;
121 #else
122 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
123 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
124 #endif
125 
126 #ifdef TARGET_SPARC64
127 #define cpu_cc_Z  cpu_xcc_Z
128 #define cpu_cc_C  cpu_xcc_C
129 #else
130 #define cpu_cc_Z  cpu_icc_Z
131 #define cpu_cc_C  cpu_icc_C
132 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
133 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
134 #endif
135 
136 /* Floating point comparison registers */
137 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
138 
139 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
140 #ifdef TARGET_SPARC64
141 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
142 # define env64_field_offsetof(X)  env_field_offsetof(X)
143 #else
144 # define env32_field_offsetof(X)  env_field_offsetof(X)
145 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
146 #endif
147 
148 typedef struct DisasCompare {
149     TCGCond cond;
150     TCGv c1;
151     int c2;
152 } DisasCompare;
153 
154 typedef struct DisasDelayException {
155     struct DisasDelayException *next;
156     TCGLabel *lab;
157     TCGv_i32 excp;
158     /* Saved state at parent insn. */
159     target_ulong pc;
160     target_ulong npc;
161 } DisasDelayException;
162 
163 typedef struct DisasContext {
164     DisasContextBase base;
165     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
166     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 
168     /* Used when JUMP_PC value is used. */
169     DisasCompare jump;
170     target_ulong jump_pc[2];
171 
172     int mem_idx;
173     bool cpu_cond_live;
174     bool fpu_enabled;
175     bool address_mask_32bit;
176 #ifndef CONFIG_USER_ONLY
177     bool supervisor;
178 #ifdef TARGET_SPARC64
179     bool hypervisor;
180 #endif
181 #endif
182 
183     sparc_def_t *def;
184 #ifdef TARGET_SPARC64
185     int fprs_dirty;
186     int asi;
187 #endif
188     DisasDelayException *delay_excp_list;
189 } DisasContext;
190 
191 // This function uses non-native bit order
192 #define GET_FIELD(X, FROM, TO)                                  \
193     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 
195 // This function uses the order in the manuals, i.e. bit 0 is 2^0
196 #define GET_FIELD_SP(X, FROM, TO)               \
197     GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 
199 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
200 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 
202 #define UA2005_HTRAP_MASK 0xff
203 #define V8_TRAP_MASK 0x7f
204 
205 #define IS_IMM (insn & (1<<13))
206 
207 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
208 {
209 #if defined(TARGET_SPARC64)
210     int bit = (rd < 32) ? 1 : 2;
211     /* If we know we've already set this bit within the TB,
212        we can avoid setting it again.  */
213     if (!(dc->fprs_dirty & bit)) {
214         dc->fprs_dirty |= bit;
215         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
216     }
217 #endif
218 }
219 
220 /* floating point registers moves */
221 
222 static int gen_offset_fpr_F(unsigned int reg)
223 {
224     int ret;
225 
226     tcg_debug_assert(reg < 32);
227     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
228     if (reg & 1) {
229         ret += offsetof(CPU_DoubleU, l.lower);
230     } else {
231         ret += offsetof(CPU_DoubleU, l.upper);
232     }
233     return ret;
234 }
235 
236 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
237 {
238     TCGv_i32 ret = tcg_temp_new_i32();
239     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
240     return ret;
241 }
242 
243 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
244 {
245     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static int gen_offset_fpr_D(unsigned int reg)
250 {
251     tcg_debug_assert(reg < 64);
252     tcg_debug_assert(reg % 2 == 0);
253     return offsetof(CPUSPARCState, fpr[reg / 2]);
254 }
255 
256 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
257 {
258     TCGv_i64 ret = tcg_temp_new_i64();
259     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
260     return ret;
261 }
262 
263 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
264 {
265     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
266     gen_update_fprs_dirty(dc, dst);
267 }
268 
269 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
270 {
271     TCGv_i128 ret = tcg_temp_new_i128();
272     TCGv_i64 h = gen_load_fpr_D(dc, src);
273     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
274 
275     tcg_gen_concat_i64_i128(ret, l, h);
276     return ret;
277 }
278 
279 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
280 {
281     TCGv_i64 h = tcg_temp_new_i64();
282     TCGv_i64 l = tcg_temp_new_i64();
283 
284     tcg_gen_extr_i128_i64(l, h, v);
285     gen_store_fpr_D(dc, dst, h);
286     gen_store_fpr_D(dc, dst + 2, l);
287 }
288 
289 /* moves */
290 #ifdef CONFIG_USER_ONLY
291 #define supervisor(dc) 0
292 #define hypervisor(dc) 0
293 #else
294 #ifdef TARGET_SPARC64
295 #define hypervisor(dc) (dc->hypervisor)
296 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #else
298 #define supervisor(dc) (dc->supervisor)
299 #define hypervisor(dc) 0
300 #endif
301 #endif
302 
303 #if !defined(TARGET_SPARC64)
304 # define AM_CHECK(dc)  false
305 #elif defined(TARGET_ABI32)
306 # define AM_CHECK(dc)  true
307 #elif defined(CONFIG_USER_ONLY)
308 # define AM_CHECK(dc)  false
309 #else
310 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
311 #endif
312 
313 static void gen_address_mask(DisasContext *dc, TCGv addr)
314 {
315     if (AM_CHECK(dc)) {
316         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317     }
318 }
319 
320 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
321 {
322     return AM_CHECK(dc) ? (uint32_t)addr : addr;
323 }
324 
325 static TCGv gen_load_gpr(DisasContext *dc, int reg)
326 {
327     if (reg > 0) {
328         assert(reg < 32);
329         return cpu_regs[reg];
330     } else {
331         TCGv t = tcg_temp_new();
332         tcg_gen_movi_tl(t, 0);
333         return t;
334     }
335 }
336 
337 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
338 {
339     if (reg > 0) {
340         assert(reg < 32);
341         tcg_gen_mov_tl(cpu_regs[reg], v);
342     }
343 }
344 
345 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         return cpu_regs[reg];
350     } else {
351         return tcg_temp_new();
352     }
353 }
354 
355 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
356 {
357     return translator_use_goto_tb(&s->base, pc) &&
358            translator_use_goto_tb(&s->base, npc);
359 }
360 
361 static void gen_goto_tb(DisasContext *s, int tb_num,
362                         target_ulong pc, target_ulong npc)
363 {
364     if (use_goto_tb(s, pc, npc))  {
365         /* jump to same page: we can use a direct jump */
366         tcg_gen_goto_tb(tb_num);
367         tcg_gen_movi_tl(cpu_pc, pc);
368         tcg_gen_movi_tl(cpu_npc, npc);
369         tcg_gen_exit_tb(s->base.tb, tb_num);
370     } else {
371         /* jump to another page: we can use an indirect jump */
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_lookup_and_goto_ptr();
375     }
376 }
377 
378 static TCGv gen_carry32(void)
379 {
380     if (TARGET_LONG_BITS == 64) {
381         TCGv t = tcg_temp_new();
382         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
383         return t;
384     }
385     return cpu_icc_C;
386 }
387 
388 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
389 {
390     TCGv z = tcg_constant_tl(0);
391 
392     if (cin) {
393         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
394         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
395     } else {
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
397     }
398     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
399     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
400     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
401     if (TARGET_LONG_BITS == 64) {
402         /*
403          * Carry-in to bit 32 is result ^ src1 ^ src2.
404          * We already have the src xor term in Z, from computation of V.
405          */
406         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
407         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
408     }
409     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
410     tcg_gen_mov_tl(dst, cpu_cc_N);
411 }
412 
413 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
414 {
415     gen_op_addcc_int(dst, src1, src2, NULL);
416 }
417 
418 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     TCGv t = tcg_temp_new();
421 
422     /* Save the tag bits around modification of dst. */
423     tcg_gen_or_tl(t, src1, src2);
424 
425     gen_op_addcc(dst, src1, src2);
426 
427     /* Incorprate tag bits into icc.V */
428     tcg_gen_andi_tl(t, t, 3);
429     tcg_gen_neg_tl(t, t);
430     tcg_gen_ext32u_tl(t, t);
431     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
432 }
433 
434 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
435 {
436     tcg_gen_add_tl(dst, src1, src2);
437     tcg_gen_add_tl(dst, dst, gen_carry32());
438 }
439 
440 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
441 {
442     gen_op_addcc_int(dst, src1, src2, gen_carry32());
443 }
444 
445 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     tcg_gen_add_tl(dst, src1, src2);
448     tcg_gen_add_tl(dst, dst, cpu_cc_C);
449 }
450 
451 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
452 {
453     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
454 }
455 
456 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
457 {
458     TCGv z = tcg_constant_tl(0);
459 
460     if (cin) {
461         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
462         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
463     } else {
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
465     }
466     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
467     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
468     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
469     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
470 #ifdef TARGET_SPARC64
471     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
472     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
473 #endif
474     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
475     tcg_gen_mov_tl(dst, cpu_cc_N);
476 }
477 
478 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
479 {
480     gen_op_subcc_int(dst, src1, src2, NULL);
481 }
482 
483 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     TCGv t = tcg_temp_new();
486 
487     /* Save the tag bits around modification of dst. */
488     tcg_gen_or_tl(t, src1, src2);
489 
490     gen_op_subcc(dst, src1, src2);
491 
492     /* Incorprate tag bits into icc.V */
493     tcg_gen_andi_tl(t, t, 3);
494     tcg_gen_neg_tl(t, t);
495     tcg_gen_ext32u_tl(t, t);
496     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
497 }
498 
499 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
500 {
501     tcg_gen_sub_tl(dst, src1, src2);
502     tcg_gen_sub_tl(dst, dst, gen_carry32());
503 }
504 
505 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
506 {
507     gen_op_subcc_int(dst, src1, src2, gen_carry32());
508 }
509 
510 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     TCGv zero = tcg_constant_tl(0);
513     TCGv one = tcg_constant_tl(1);
514     TCGv t_src1 = tcg_temp_new();
515     TCGv t_src2 = tcg_temp_new();
516     TCGv t0 = tcg_temp_new();
517 
518     tcg_gen_ext32u_tl(t_src1, src1);
519     tcg_gen_ext32u_tl(t_src2, src2);
520 
521     /*
522      * if (!(env->y & 1))
523      *   src2 = 0;
524      */
525     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
526 
527     /*
528      * b2 = src1 & 1;
529      * y = (b2 << 31) | (y >> 1);
530      */
531     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
532     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
533 
534     // b1 = N ^ V;
535     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
536 
537     /*
538      * src1 = (b1 << 31) | (src1 >> 1)
539      */
540     tcg_gen_andi_tl(t0, t0, 1u << 31);
541     tcg_gen_shri_tl(t_src1, t_src1, 1);
542     tcg_gen_or_tl(t_src1, t_src1, t0);
543 
544     gen_op_addcc(dst, t_src1, t_src2);
545 }
546 
547 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
548 {
549 #if TARGET_LONG_BITS == 32
550     if (sign_ext) {
551         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
552     } else {
553         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
554     }
555 #else
556     TCGv t0 = tcg_temp_new_i64();
557     TCGv t1 = tcg_temp_new_i64();
558 
559     if (sign_ext) {
560         tcg_gen_ext32s_i64(t0, src1);
561         tcg_gen_ext32s_i64(t1, src2);
562     } else {
563         tcg_gen_ext32u_i64(t0, src1);
564         tcg_gen_ext32u_i64(t1, src2);
565     }
566 
567     tcg_gen_mul_i64(dst, t0, t1);
568     tcg_gen_shri_i64(cpu_y, dst, 32);
569 #endif
570 }
571 
572 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
573 {
574     /* zero-extend truncated operands before multiplication */
575     gen_op_multiply(dst, src1, src2, 0);
576 }
577 
578 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
579 {
580     /* sign-extend truncated operands before multiplication */
581     gen_op_multiply(dst, src1, src2, 1);
582 }
583 
584 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
585 {
586 #ifdef TARGET_SPARC64
587     gen_helper_sdiv(dst, tcg_env, src1, src2);
588     tcg_gen_ext32s_tl(dst, dst);
589 #else
590     TCGv_i64 t64 = tcg_temp_new_i64();
591     gen_helper_sdiv(t64, tcg_env, src1, src2);
592     tcg_gen_trunc_i64_tl(dst, t64);
593 #endif
594 }
595 
596 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
597 {
598     TCGv_i64 t64;
599 
600 #ifdef TARGET_SPARC64
601     t64 = cpu_cc_V;
602 #else
603     t64 = tcg_temp_new_i64();
604 #endif
605 
606     gen_helper_udiv(t64, tcg_env, src1, src2);
607 
608 #ifdef TARGET_SPARC64
609     tcg_gen_ext32u_tl(cpu_cc_N, t64);
610     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
611     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
612     tcg_gen_movi_tl(cpu_icc_C, 0);
613 #else
614     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
615 #endif
616     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
617     tcg_gen_movi_tl(cpu_cc_C, 0);
618     tcg_gen_mov_tl(dst, cpu_cc_N);
619 }
620 
621 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
622 {
623     TCGv_i64 t64;
624 
625 #ifdef TARGET_SPARC64
626     t64 = cpu_cc_V;
627 #else
628     t64 = tcg_temp_new_i64();
629 #endif
630 
631     gen_helper_sdiv(t64, tcg_env, src1, src2);
632 
633 #ifdef TARGET_SPARC64
634     tcg_gen_ext32s_tl(cpu_cc_N, t64);
635     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
636     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
637     tcg_gen_movi_tl(cpu_icc_C, 0);
638 #else
639     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
640 #endif
641     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
642     tcg_gen_movi_tl(cpu_cc_C, 0);
643     tcg_gen_mov_tl(dst, cpu_cc_N);
644 }
645 
646 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
647 {
648     gen_helper_taddcctv(dst, tcg_env, src1, src2);
649 }
650 
651 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
652 {
653     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
654 }
655 
656 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
657 {
658     tcg_gen_ctpop_tl(dst, src2);
659 }
660 
661 static void gen_op_lzcnt(TCGv dst, TCGv src)
662 {
663     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
664 }
665 
666 #ifndef TARGET_SPARC64
667 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
668 {
669     g_assert_not_reached();
670 }
671 #endif
672 
673 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
674 {
675     gen_helper_array8(dst, src1, src2);
676     tcg_gen_shli_tl(dst, dst, 1);
677 }
678 
679 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
680 {
681     gen_helper_array8(dst, src1, src2);
682     tcg_gen_shli_tl(dst, dst, 2);
683 }
684 
685 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
686 {
687 #ifdef TARGET_SPARC64
688     gen_helper_fpack16(dst, cpu_gsr, src);
689 #else
690     g_assert_not_reached();
691 #endif
692 }
693 
694 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
695 {
696 #ifdef TARGET_SPARC64
697     gen_helper_fpackfix(dst, cpu_gsr, src);
698 #else
699     g_assert_not_reached();
700 #endif
701 }
702 
703 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
704 {
705 #ifdef TARGET_SPARC64
706     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
707 #else
708     g_assert_not_reached();
709 #endif
710 }
711 
712 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
713 {
714     TCGv_i32 t[2];
715 
716     for (int i = 0; i < 2; i++) {
717         TCGv_i32 u = tcg_temp_new_i32();
718         TCGv_i32 v = tcg_temp_new_i32();
719 
720         tcg_gen_sextract_i32(u, src1, i * 16, 16);
721         tcg_gen_sextract_i32(v, src2, i * 16, 16);
722         tcg_gen_add_i32(u, u, v);
723         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
724         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
725         t[i] = u;
726     }
727     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
728 }
729 
730 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
731 {
732     TCGv_i32 t[2];
733 
734     for (int i = 0; i < 2; i++) {
735         TCGv_i32 u = tcg_temp_new_i32();
736         TCGv_i32 v = tcg_temp_new_i32();
737 
738         tcg_gen_sextract_i32(u, src1, i * 16, 16);
739         tcg_gen_sextract_i32(v, src2, i * 16, 16);
740         tcg_gen_sub_i32(u, u, v);
741         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
742         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
743         t[i] = u;
744     }
745     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
746 }
747 
748 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
749 {
750     TCGv_i32 r = tcg_temp_new_i32();
751     TCGv_i32 t = tcg_temp_new_i32();
752     TCGv_i32 v = tcg_temp_new_i32();
753     TCGv_i32 z = tcg_constant_i32(0);
754 
755     tcg_gen_add_i32(r, src1, src2);
756     tcg_gen_xor_i32(t, src1, src2);
757     tcg_gen_xor_i32(v, r, src2);
758     tcg_gen_andc_i32(v, v, t);
759 
760     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
761     tcg_gen_addi_i32(t, t, INT32_MAX);
762 
763     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
764 }
765 
766 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
767 {
768     TCGv_i32 r = tcg_temp_new_i32();
769     TCGv_i32 t = tcg_temp_new_i32();
770     TCGv_i32 v = tcg_temp_new_i32();
771     TCGv_i32 z = tcg_constant_i32(0);
772 
773     tcg_gen_sub_i32(r, src1, src2);
774     tcg_gen_xor_i32(t, src1, src2);
775     tcg_gen_xor_i32(v, r, src1);
776     tcg_gen_and_i32(v, v, t);
777 
778     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
779     tcg_gen_addi_i32(t, t, INT32_MAX);
780 
781     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
782 }
783 
784 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
785 {
786 #ifdef TARGET_SPARC64
787     TCGv t1, t2, shift;
788 
789     t1 = tcg_temp_new();
790     t2 = tcg_temp_new();
791     shift = tcg_temp_new();
792 
793     tcg_gen_andi_tl(shift, cpu_gsr, 7);
794     tcg_gen_shli_tl(shift, shift, 3);
795     tcg_gen_shl_tl(t1, s1, shift);
796 
797     /*
798      * A shift of 64 does not produce 0 in TCG.  Divide this into a
799      * shift of (up to 63) followed by a constant shift of 1.
800      */
801     tcg_gen_xori_tl(shift, shift, 63);
802     tcg_gen_shr_tl(t2, s2, shift);
803     tcg_gen_shri_tl(t2, t2, 1);
804 
805     tcg_gen_or_tl(dst, t1, t2);
806 #else
807     g_assert_not_reached();
808 #endif
809 }
810 
811 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
812 {
813 #ifdef TARGET_SPARC64
814     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
815 #else
816     g_assert_not_reached();
817 #endif
818 }
819 
820 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
821 {
822 #ifdef TARGET_SPARC64
823     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
824 #else
825     g_assert_not_reached();
826 #endif
827 }
828 
829 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
830 {
831     tcg_gen_ext16s_i32(src2, src2);
832     gen_helper_fmul8x16a(dst, src1, src2);
833 }
834 
835 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
836 {
837     tcg_gen_sari_i32(src2, src2, 16);
838     gen_helper_fmul8x16a(dst, src1, src2);
839 }
840 
841 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
842 {
843     TCGv_i32 t0 = tcg_temp_new_i32();
844     TCGv_i32 t1 = tcg_temp_new_i32();
845     TCGv_i32 t2 = tcg_temp_new_i32();
846 
847     tcg_gen_ext8u_i32(t0, src1);
848     tcg_gen_ext16s_i32(t1, src2);
849     tcg_gen_mul_i32(t0, t0, t1);
850 
851     tcg_gen_extract_i32(t1, src1, 16, 8);
852     tcg_gen_sextract_i32(t2, src2, 16, 16);
853     tcg_gen_mul_i32(t1, t1, t2);
854 
855     tcg_gen_concat_i32_i64(dst, t0, t1);
856 }
857 
858 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
859 {
860     TCGv_i32 t0 = tcg_temp_new_i32();
861     TCGv_i32 t1 = tcg_temp_new_i32();
862     TCGv_i32 t2 = tcg_temp_new_i32();
863 
864     /*
865      * The insn description talks about extracting the upper 8 bits
866      * of the signed 16-bit input rs1, performing the multiply, then
867      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
868      * the rs1 input, which avoids the need for two shifts.
869      */
870     tcg_gen_ext16s_i32(t0, src1);
871     tcg_gen_andi_i32(t0, t0, ~0xff);
872     tcg_gen_ext16s_i32(t1, src2);
873     tcg_gen_mul_i32(t0, t0, t1);
874 
875     tcg_gen_sextract_i32(t1, src1, 16, 16);
876     tcg_gen_andi_i32(t1, t1, ~0xff);
877     tcg_gen_sextract_i32(t2, src2, 16, 16);
878     tcg_gen_mul_i32(t1, t1, t2);
879 
880     tcg_gen_concat_i32_i64(dst, t0, t1);
881 }
882 
883 #ifdef TARGET_SPARC64
884 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
885                              TCGv_vec src1, TCGv_vec src2)
886 {
887     TCGv_vec a = tcg_temp_new_vec_matching(dst);
888     TCGv_vec c = tcg_temp_new_vec_matching(dst);
889 
890     tcg_gen_add_vec(vece, a, src1, src2);
891     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
892     /* Vector cmp produces -1 for true, so subtract to add carry. */
893     tcg_gen_sub_vec(vece, dst, a, c);
894 }
895 
896 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
897                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
898 {
899     static const TCGOpcode vecop_list[] = {
900         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
901     };
902     static const GVecGen3 op = {
903         .fni8 = gen_helper_fchksm16,
904         .fniv = gen_vec_fchksm16,
905         .opt_opc = vecop_list,
906         .vece = MO_16,
907     };
908     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
909 }
910 
911 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
912                             TCGv_vec src1, TCGv_vec src2)
913 {
914     TCGv_vec t = tcg_temp_new_vec_matching(dst);
915 
916     tcg_gen_or_vec(vece, t, src1, src2);
917     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
918     tcg_gen_sari_vec(vece, src1, src1, 1);
919     tcg_gen_sari_vec(vece, src2, src2, 1);
920     tcg_gen_add_vec(vece, dst, src1, src2);
921     tcg_gen_add_vec(vece, dst, dst, t);
922 }
923 
924 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
925                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
926 {
927     static const TCGOpcode vecop_list[] = {
928         INDEX_op_add_vec, INDEX_op_sari_vec,
929     };
930     static const GVecGen3 op = {
931         .fni8 = gen_helper_fmean16,
932         .fniv = gen_vec_fmean16,
933         .opt_opc = vecop_list,
934         .vece = MO_16,
935     };
936     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
937 }
938 #else
939 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
940 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
941 #endif
942 
943 static void finishing_insn(DisasContext *dc)
944 {
945     /*
946      * From here, there is no future path through an unwinding exception.
947      * If the current insn cannot raise an exception, the computation of
948      * cpu_cond may be able to be elided.
949      */
950     if (dc->cpu_cond_live) {
951         tcg_gen_discard_tl(cpu_cond);
952         dc->cpu_cond_live = false;
953     }
954 }
955 
956 static void gen_generic_branch(DisasContext *dc)
957 {
958     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
959     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
960     TCGv c2 = tcg_constant_tl(dc->jump.c2);
961 
962     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
963 }
964 
965 /* call this function before using the condition register as it may
966    have been set for a jump */
967 static void flush_cond(DisasContext *dc)
968 {
969     if (dc->npc == JUMP_PC) {
970         gen_generic_branch(dc);
971         dc->npc = DYNAMIC_PC_LOOKUP;
972     }
973 }
974 
975 static void save_npc(DisasContext *dc)
976 {
977     if (dc->npc & 3) {
978         switch (dc->npc) {
979         case JUMP_PC:
980             gen_generic_branch(dc);
981             dc->npc = DYNAMIC_PC_LOOKUP;
982             break;
983         case DYNAMIC_PC:
984         case DYNAMIC_PC_LOOKUP:
985             break;
986         default:
987             g_assert_not_reached();
988         }
989     } else {
990         tcg_gen_movi_tl(cpu_npc, dc->npc);
991     }
992 }
993 
994 static void save_state(DisasContext *dc)
995 {
996     tcg_gen_movi_tl(cpu_pc, dc->pc);
997     save_npc(dc);
998 }
999 
1000 static void gen_exception(DisasContext *dc, int which)
1001 {
1002     finishing_insn(dc);
1003     save_state(dc);
1004     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1005     dc->base.is_jmp = DISAS_NORETURN;
1006 }
1007 
1008 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1009 {
1010     DisasDelayException *e = g_new0(DisasDelayException, 1);
1011 
1012     e->next = dc->delay_excp_list;
1013     dc->delay_excp_list = e;
1014 
1015     e->lab = gen_new_label();
1016     e->excp = excp;
1017     e->pc = dc->pc;
1018     /* Caller must have used flush_cond before branch. */
1019     assert(e->npc != JUMP_PC);
1020     e->npc = dc->npc;
1021 
1022     return e->lab;
1023 }
1024 
1025 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1026 {
1027     return delay_exceptionv(dc, tcg_constant_i32(excp));
1028 }
1029 
1030 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1031 {
1032     TCGv t = tcg_temp_new();
1033     TCGLabel *lab;
1034 
1035     tcg_gen_andi_tl(t, addr, mask);
1036 
1037     flush_cond(dc);
1038     lab = delay_exception(dc, TT_UNALIGNED);
1039     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1040 }
1041 
1042 static void gen_mov_pc_npc(DisasContext *dc)
1043 {
1044     finishing_insn(dc);
1045 
1046     if (dc->npc & 3) {
1047         switch (dc->npc) {
1048         case JUMP_PC:
1049             gen_generic_branch(dc);
1050             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1051             dc->pc = DYNAMIC_PC_LOOKUP;
1052             break;
1053         case DYNAMIC_PC:
1054         case DYNAMIC_PC_LOOKUP:
1055             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1056             dc->pc = dc->npc;
1057             break;
1058         default:
1059             g_assert_not_reached();
1060         }
1061     } else {
1062         dc->pc = dc->npc;
1063     }
1064 }
1065 
1066 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1067                         DisasContext *dc)
1068 {
1069     TCGv t1;
1070 
1071     cmp->c1 = t1 = tcg_temp_new();
1072     cmp->c2 = 0;
1073 
1074     switch (cond & 7) {
1075     case 0x0: /* never */
1076         cmp->cond = TCG_COND_NEVER;
1077         cmp->c1 = tcg_constant_tl(0);
1078         break;
1079 
1080     case 0x1: /* eq: Z */
1081         cmp->cond = TCG_COND_EQ;
1082         if (TARGET_LONG_BITS == 32 || xcc) {
1083             tcg_gen_mov_tl(t1, cpu_cc_Z);
1084         } else {
1085             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1086         }
1087         break;
1088 
1089     case 0x2: /* le: Z | (N ^ V) */
1090         /*
1091          * Simplify:
1092          *   cc_Z || (N ^ V) < 0        NE
1093          *   cc_Z && !((N ^ V) < 0)     EQ
1094          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1095          */
1096         cmp->cond = TCG_COND_EQ;
1097         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1098         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1099         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1100         if (TARGET_LONG_BITS == 64 && !xcc) {
1101             tcg_gen_ext32u_tl(t1, t1);
1102         }
1103         break;
1104 
1105     case 0x3: /* lt: N ^ V */
1106         cmp->cond = TCG_COND_LT;
1107         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1108         if (TARGET_LONG_BITS == 64 && !xcc) {
1109             tcg_gen_ext32s_tl(t1, t1);
1110         }
1111         break;
1112 
1113     case 0x4: /* leu: Z | C */
1114         /*
1115          * Simplify:
1116          *   cc_Z == 0 || cc_C != 0     NE
1117          *   cc_Z != 0 && cc_C == 0     EQ
1118          *   cc_Z & (cc_C ? 0 : -1)     EQ
1119          *   cc_Z & (cc_C - 1)          EQ
1120          */
1121         cmp->cond = TCG_COND_EQ;
1122         if (TARGET_LONG_BITS == 32 || xcc) {
1123             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1124             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1125         } else {
1126             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1127             tcg_gen_subi_tl(t1, t1, 1);
1128             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1129             tcg_gen_ext32u_tl(t1, t1);
1130         }
1131         break;
1132 
1133     case 0x5: /* ltu: C */
1134         cmp->cond = TCG_COND_NE;
1135         if (TARGET_LONG_BITS == 32 || xcc) {
1136             tcg_gen_mov_tl(t1, cpu_cc_C);
1137         } else {
1138             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1139         }
1140         break;
1141 
1142     case 0x6: /* neg: N */
1143         cmp->cond = TCG_COND_LT;
1144         if (TARGET_LONG_BITS == 32 || xcc) {
1145             tcg_gen_mov_tl(t1, cpu_cc_N);
1146         } else {
1147             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1148         }
1149         break;
1150 
1151     case 0x7: /* vs: V */
1152         cmp->cond = TCG_COND_LT;
1153         if (TARGET_LONG_BITS == 32 || xcc) {
1154             tcg_gen_mov_tl(t1, cpu_cc_V);
1155         } else {
1156             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1157         }
1158         break;
1159     }
1160     if (cond & 8) {
1161         cmp->cond = tcg_invert_cond(cmp->cond);
1162     }
1163 }
1164 
1165 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1166 {
1167     TCGv_i32 fcc = cpu_fcc[cc];
1168     TCGv_i32 c1 = fcc;
1169     int c2 = 0;
1170     TCGCond tcond;
1171 
1172     /*
1173      * FCC values:
1174      * 0 =
1175      * 1 <
1176      * 2 >
1177      * 3 unordered
1178      */
1179     switch (cond & 7) {
1180     case 0x0: /* fbn */
1181         tcond = TCG_COND_NEVER;
1182         break;
1183     case 0x1: /* fbne : !0 */
1184         tcond = TCG_COND_NE;
1185         break;
1186     case 0x2: /* fblg : 1 or 2 */
1187         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1188         c1 = tcg_temp_new_i32();
1189         tcg_gen_addi_i32(c1, fcc, -1);
1190         c2 = 1;
1191         tcond = TCG_COND_LEU;
1192         break;
1193     case 0x3: /* fbul : 1 or 3 */
1194         c1 = tcg_temp_new_i32();
1195         tcg_gen_andi_i32(c1, fcc, 1);
1196         tcond = TCG_COND_NE;
1197         break;
1198     case 0x4: /* fbl  : 1 */
1199         c2 = 1;
1200         tcond = TCG_COND_EQ;
1201         break;
1202     case 0x5: /* fbug : 2 or 3 */
1203         c2 = 2;
1204         tcond = TCG_COND_GEU;
1205         break;
1206     case 0x6: /* fbg  : 2 */
1207         c2 = 2;
1208         tcond = TCG_COND_EQ;
1209         break;
1210     case 0x7: /* fbu  : 3 */
1211         c2 = 3;
1212         tcond = TCG_COND_EQ;
1213         break;
1214     }
1215     if (cond & 8) {
1216         tcond = tcg_invert_cond(tcond);
1217     }
1218 
1219     cmp->cond = tcond;
1220     cmp->c2 = c2;
1221     cmp->c1 = tcg_temp_new();
1222     tcg_gen_extu_i32_tl(cmp->c1, c1);
1223 }
1224 
1225 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1226 {
1227     static const TCGCond cond_reg[4] = {
1228         TCG_COND_NEVER,  /* reserved */
1229         TCG_COND_EQ,
1230         TCG_COND_LE,
1231         TCG_COND_LT,
1232     };
1233     TCGCond tcond;
1234 
1235     if ((cond & 3) == 0) {
1236         return false;
1237     }
1238     tcond = cond_reg[cond & 3];
1239     if (cond & 4) {
1240         tcond = tcg_invert_cond(tcond);
1241     }
1242 
1243     cmp->cond = tcond;
1244     cmp->c1 = tcg_temp_new();
1245     cmp->c2 = 0;
1246     tcg_gen_mov_tl(cmp->c1, r_src);
1247     return true;
1248 }
1249 
1250 static void gen_op_clear_ieee_excp_and_FTT(void)
1251 {
1252     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1253                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1254 }
1255 
1256 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1257 {
1258     gen_op_clear_ieee_excp_and_FTT();
1259     tcg_gen_mov_i32(dst, src);
1260 }
1261 
1262 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1263 {
1264     gen_op_clear_ieee_excp_and_FTT();
1265     tcg_gen_xori_i32(dst, src, 1u << 31);
1266 }
1267 
1268 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1269 {
1270     gen_op_clear_ieee_excp_and_FTT();
1271     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1272 }
1273 
1274 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1275 {
1276     gen_op_clear_ieee_excp_and_FTT();
1277     tcg_gen_mov_i64(dst, src);
1278 }
1279 
1280 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1281 {
1282     gen_op_clear_ieee_excp_and_FTT();
1283     tcg_gen_xori_i64(dst, src, 1ull << 63);
1284 }
1285 
1286 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1287 {
1288     gen_op_clear_ieee_excp_and_FTT();
1289     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1290 }
1291 
1292 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1293 {
1294     TCGv_i64 l = tcg_temp_new_i64();
1295     TCGv_i64 h = tcg_temp_new_i64();
1296 
1297     tcg_gen_extr_i128_i64(l, h, src);
1298     tcg_gen_xori_i64(h, h, 1ull << 63);
1299     tcg_gen_concat_i64_i128(dst, l, h);
1300 }
1301 
1302 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1303 {
1304     TCGv_i64 l = tcg_temp_new_i64();
1305     TCGv_i64 h = tcg_temp_new_i64();
1306 
1307     tcg_gen_extr_i128_i64(l, h, src);
1308     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1309     tcg_gen_concat_i64_i128(dst, l, h);
1310 }
1311 
1312 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1313 {
1314     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1315 }
1316 
1317 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1318 {
1319     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1320 }
1321 
1322 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1323 {
1324     int op = float_muladd_negate_c;
1325     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1326 }
1327 
1328 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1329 {
1330     int op = float_muladd_negate_c;
1331     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1332 }
1333 
1334 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1335 {
1336     int op = float_muladd_negate_c | float_muladd_negate_result;
1337     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1338 }
1339 
1340 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1341 {
1342     int op = float_muladd_negate_c | float_muladd_negate_result;
1343     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1344 }
1345 
1346 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1347 {
1348     int op = float_muladd_negate_result;
1349     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1350 }
1351 
1352 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1353 {
1354     int op = float_muladd_negate_result;
1355     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1356 }
1357 
1358 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1359 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1360 {
1361     TCGv_i32 one = tcg_constant_i32(float32_one);
1362     int op = float_muladd_halve_result;
1363     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1364 }
1365 
1366 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1367 {
1368     TCGv_i64 one = tcg_constant_i64(float64_one);
1369     int op = float_muladd_halve_result;
1370     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1371 }
1372 
1373 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1374 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1375 {
1376     TCGv_i32 one = tcg_constant_i32(float32_one);
1377     int op = float_muladd_negate_c | float_muladd_halve_result;
1378     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1379 }
1380 
1381 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1382 {
1383     TCGv_i64 one = tcg_constant_i64(float64_one);
1384     int op = float_muladd_negate_c | float_muladd_halve_result;
1385     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1386 }
1387 
1388 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1389 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1390 {
1391     TCGv_i32 one = tcg_constant_i32(float32_one);
1392     int op = float_muladd_negate_result | float_muladd_halve_result;
1393     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1394 }
1395 
1396 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1397 {
1398     TCGv_i64 one = tcg_constant_i64(float64_one);
1399     int op = float_muladd_negate_result | float_muladd_halve_result;
1400     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1401 }
1402 
1403 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1404 {
1405     /*
1406      * CEXC is only set when succesfully completing an FPop,
1407      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1408      * Thus we can simply store FTT into this field.
1409      */
1410     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1411                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1412     gen_exception(dc, TT_FP_EXCP);
1413 }
1414 
1415 static int gen_trap_ifnofpu(DisasContext *dc)
1416 {
1417 #if !defined(CONFIG_USER_ONLY)
1418     if (!dc->fpu_enabled) {
1419         gen_exception(dc, TT_NFPU_INSN);
1420         return 1;
1421     }
1422 #endif
1423     return 0;
1424 }
1425 
1426 /* asi moves */
1427 typedef enum {
1428     GET_ASI_HELPER,
1429     GET_ASI_EXCP,
1430     GET_ASI_DIRECT,
1431     GET_ASI_DTWINX,
1432     GET_ASI_CODE,
1433     GET_ASI_BLOCK,
1434     GET_ASI_SHORT,
1435     GET_ASI_BCOPY,
1436     GET_ASI_BFILL,
1437 } ASIType;
1438 
1439 typedef struct {
1440     ASIType type;
1441     int asi;
1442     int mem_idx;
1443     MemOp memop;
1444 } DisasASI;
1445 
1446 /*
1447  * Build DisasASI.
1448  * For asi == -1, treat as non-asi.
1449  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1450  */
1451 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1452 {
1453     ASIType type = GET_ASI_HELPER;
1454     int mem_idx = dc->mem_idx;
1455 
1456     if (asi == -1) {
1457         /* Artificial "non-asi" case. */
1458         type = GET_ASI_DIRECT;
1459         goto done;
1460     }
1461 
1462 #ifndef TARGET_SPARC64
1463     /* Before v9, all asis are immediate and privileged.  */
1464     if (asi < 0) {
1465         gen_exception(dc, TT_ILL_INSN);
1466         type = GET_ASI_EXCP;
1467     } else if (supervisor(dc)
1468                /* Note that LEON accepts ASI_USERDATA in user mode, for
1469                   use with CASA.  Also note that previous versions of
1470                   QEMU allowed (and old versions of gcc emitted) ASI_P
1471                   for LEON, which is incorrect.  */
1472                || (asi == ASI_USERDATA
1473                    && (dc->def->features & CPU_FEATURE_CASA))) {
1474         switch (asi) {
1475         case ASI_USERDATA:    /* User data access */
1476             mem_idx = MMU_USER_IDX;
1477             type = GET_ASI_DIRECT;
1478             break;
1479         case ASI_KERNELDATA:  /* Supervisor data access */
1480             mem_idx = MMU_KERNEL_IDX;
1481             type = GET_ASI_DIRECT;
1482             break;
1483         case ASI_USERTXT:     /* User text access */
1484             mem_idx = MMU_USER_IDX;
1485             type = GET_ASI_CODE;
1486             break;
1487         case ASI_KERNELTXT:   /* Supervisor text access */
1488             mem_idx = MMU_KERNEL_IDX;
1489             type = GET_ASI_CODE;
1490             break;
1491         case ASI_M_BYPASS:    /* MMU passthrough */
1492         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1493             mem_idx = MMU_PHYS_IDX;
1494             type = GET_ASI_DIRECT;
1495             break;
1496         case ASI_M_BCOPY: /* Block copy, sta access */
1497             mem_idx = MMU_KERNEL_IDX;
1498             type = GET_ASI_BCOPY;
1499             break;
1500         case ASI_M_BFILL: /* Block fill, stda access */
1501             mem_idx = MMU_KERNEL_IDX;
1502             type = GET_ASI_BFILL;
1503             break;
1504         }
1505 
1506         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1507          * permissions check in get_physical_address(..).
1508          */
1509         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1510     } else {
1511         gen_exception(dc, TT_PRIV_INSN);
1512         type = GET_ASI_EXCP;
1513     }
1514 #else
1515     if (asi < 0) {
1516         asi = dc->asi;
1517     }
1518     /* With v9, all asis below 0x80 are privileged.  */
1519     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1520        down that bit into DisasContext.  For the moment that's ok,
1521        since the direct implementations below doesn't have any ASIs
1522        in the restricted [0x30, 0x7f] range, and the check will be
1523        done properly in the helper.  */
1524     if (!supervisor(dc) && asi < 0x80) {
1525         gen_exception(dc, TT_PRIV_ACT);
1526         type = GET_ASI_EXCP;
1527     } else {
1528         switch (asi) {
1529         case ASI_REAL:      /* Bypass */
1530         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1531         case ASI_REAL_L:    /* Bypass LE */
1532         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1533         case ASI_TWINX_REAL:   /* Real address, twinx */
1534         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1535         case ASI_QUAD_LDD_PHYS:
1536         case ASI_QUAD_LDD_PHYS_L:
1537             mem_idx = MMU_PHYS_IDX;
1538             break;
1539         case ASI_N:  /* Nucleus */
1540         case ASI_NL: /* Nucleus LE */
1541         case ASI_TWINX_N:
1542         case ASI_TWINX_NL:
1543         case ASI_NUCLEUS_QUAD_LDD:
1544         case ASI_NUCLEUS_QUAD_LDD_L:
1545             if (hypervisor(dc)) {
1546                 mem_idx = MMU_PHYS_IDX;
1547             } else {
1548                 mem_idx = MMU_NUCLEUS_IDX;
1549             }
1550             break;
1551         case ASI_AIUP:  /* As if user primary */
1552         case ASI_AIUPL: /* As if user primary LE */
1553         case ASI_TWINX_AIUP:
1554         case ASI_TWINX_AIUP_L:
1555         case ASI_BLK_AIUP_4V:
1556         case ASI_BLK_AIUP_L_4V:
1557         case ASI_BLK_AIUP:
1558         case ASI_BLK_AIUPL:
1559             mem_idx = MMU_USER_IDX;
1560             break;
1561         case ASI_AIUS:  /* As if user secondary */
1562         case ASI_AIUSL: /* As if user secondary LE */
1563         case ASI_TWINX_AIUS:
1564         case ASI_TWINX_AIUS_L:
1565         case ASI_BLK_AIUS_4V:
1566         case ASI_BLK_AIUS_L_4V:
1567         case ASI_BLK_AIUS:
1568         case ASI_BLK_AIUSL:
1569             mem_idx = MMU_USER_SECONDARY_IDX;
1570             break;
1571         case ASI_S:  /* Secondary */
1572         case ASI_SL: /* Secondary LE */
1573         case ASI_TWINX_S:
1574         case ASI_TWINX_SL:
1575         case ASI_BLK_COMMIT_S:
1576         case ASI_BLK_S:
1577         case ASI_BLK_SL:
1578         case ASI_FL8_S:
1579         case ASI_FL8_SL:
1580         case ASI_FL16_S:
1581         case ASI_FL16_SL:
1582             if (mem_idx == MMU_USER_IDX) {
1583                 mem_idx = MMU_USER_SECONDARY_IDX;
1584             } else if (mem_idx == MMU_KERNEL_IDX) {
1585                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1586             }
1587             break;
1588         case ASI_P:  /* Primary */
1589         case ASI_PL: /* Primary LE */
1590         case ASI_TWINX_P:
1591         case ASI_TWINX_PL:
1592         case ASI_BLK_COMMIT_P:
1593         case ASI_BLK_P:
1594         case ASI_BLK_PL:
1595         case ASI_FL8_P:
1596         case ASI_FL8_PL:
1597         case ASI_FL16_P:
1598         case ASI_FL16_PL:
1599             break;
1600         }
1601         switch (asi) {
1602         case ASI_REAL:
1603         case ASI_REAL_IO:
1604         case ASI_REAL_L:
1605         case ASI_REAL_IO_L:
1606         case ASI_N:
1607         case ASI_NL:
1608         case ASI_AIUP:
1609         case ASI_AIUPL:
1610         case ASI_AIUS:
1611         case ASI_AIUSL:
1612         case ASI_S:
1613         case ASI_SL:
1614         case ASI_P:
1615         case ASI_PL:
1616             type = GET_ASI_DIRECT;
1617             break;
1618         case ASI_TWINX_REAL:
1619         case ASI_TWINX_REAL_L:
1620         case ASI_TWINX_N:
1621         case ASI_TWINX_NL:
1622         case ASI_TWINX_AIUP:
1623         case ASI_TWINX_AIUP_L:
1624         case ASI_TWINX_AIUS:
1625         case ASI_TWINX_AIUS_L:
1626         case ASI_TWINX_P:
1627         case ASI_TWINX_PL:
1628         case ASI_TWINX_S:
1629         case ASI_TWINX_SL:
1630         case ASI_QUAD_LDD_PHYS:
1631         case ASI_QUAD_LDD_PHYS_L:
1632         case ASI_NUCLEUS_QUAD_LDD:
1633         case ASI_NUCLEUS_QUAD_LDD_L:
1634             type = GET_ASI_DTWINX;
1635             break;
1636         case ASI_BLK_COMMIT_P:
1637         case ASI_BLK_COMMIT_S:
1638         case ASI_BLK_AIUP_4V:
1639         case ASI_BLK_AIUP_L_4V:
1640         case ASI_BLK_AIUP:
1641         case ASI_BLK_AIUPL:
1642         case ASI_BLK_AIUS_4V:
1643         case ASI_BLK_AIUS_L_4V:
1644         case ASI_BLK_AIUS:
1645         case ASI_BLK_AIUSL:
1646         case ASI_BLK_S:
1647         case ASI_BLK_SL:
1648         case ASI_BLK_P:
1649         case ASI_BLK_PL:
1650             type = GET_ASI_BLOCK;
1651             break;
1652         case ASI_FL8_S:
1653         case ASI_FL8_SL:
1654         case ASI_FL8_P:
1655         case ASI_FL8_PL:
1656             memop = MO_UB;
1657             type = GET_ASI_SHORT;
1658             break;
1659         case ASI_FL16_S:
1660         case ASI_FL16_SL:
1661         case ASI_FL16_P:
1662         case ASI_FL16_PL:
1663             memop = MO_TEUW;
1664             type = GET_ASI_SHORT;
1665             break;
1666         }
1667         /* The little-endian asis all have bit 3 set.  */
1668         if (asi & 8) {
1669             memop ^= MO_BSWAP;
1670         }
1671     }
1672 #endif
1673 
1674  done:
1675     return (DisasASI){ type, asi, mem_idx, memop };
1676 }
1677 
1678 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1679 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1680                               TCGv_i32 asi, TCGv_i32 mop)
1681 {
1682     g_assert_not_reached();
1683 }
1684 
1685 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1686                               TCGv_i32 asi, TCGv_i32 mop)
1687 {
1688     g_assert_not_reached();
1689 }
1690 #endif
1691 
1692 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1693 {
1694     switch (da->type) {
1695     case GET_ASI_EXCP:
1696         break;
1697     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1698         gen_exception(dc, TT_ILL_INSN);
1699         break;
1700     case GET_ASI_DIRECT:
1701         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1702         break;
1703 
1704     case GET_ASI_CODE:
1705 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1706         {
1707             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1708             TCGv_i64 t64 = tcg_temp_new_i64();
1709 
1710             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1711             tcg_gen_trunc_i64_tl(dst, t64);
1712         }
1713         break;
1714 #else
1715         g_assert_not_reached();
1716 #endif
1717 
1718     default:
1719         {
1720             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1721             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1722 
1723             save_state(dc);
1724 #ifdef TARGET_SPARC64
1725             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1726 #else
1727             {
1728                 TCGv_i64 t64 = tcg_temp_new_i64();
1729                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1730                 tcg_gen_trunc_i64_tl(dst, t64);
1731             }
1732 #endif
1733         }
1734         break;
1735     }
1736 }
1737 
1738 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1739 {
1740     switch (da->type) {
1741     case GET_ASI_EXCP:
1742         break;
1743 
1744     case GET_ASI_DTWINX: /* Reserved for stda.  */
1745         if (TARGET_LONG_BITS == 32) {
1746             gen_exception(dc, TT_ILL_INSN);
1747             break;
1748         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1749             /* Pre OpenSPARC CPUs don't have these */
1750             gen_exception(dc, TT_ILL_INSN);
1751             break;
1752         }
1753         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1754         /* fall through */
1755 
1756     case GET_ASI_DIRECT:
1757         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1758         break;
1759 
1760     case GET_ASI_BCOPY:
1761         assert(TARGET_LONG_BITS == 32);
1762         /*
1763          * Copy 32 bytes from the address in SRC to ADDR.
1764          *
1765          * From Ross RT625 hyperSPARC manual, section 4.6:
1766          * "Block Copy and Block Fill will work only on cache line boundaries."
1767          *
1768          * It does not specify if an unaliged address is truncated or trapped.
1769          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1770          * is obviously wrong.  The only place I can see this used is in the
1771          * Linux kernel which begins with page alignment, advancing by 32,
1772          * so is always aligned.  Assume truncation as the simpler option.
1773          *
1774          * Since the loads and stores are paired, allow the copy to happen
1775          * in the host endianness.  The copy need not be atomic.
1776          */
1777         {
1778             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1779             TCGv saddr = tcg_temp_new();
1780             TCGv daddr = tcg_temp_new();
1781             TCGv_i128 tmp = tcg_temp_new_i128();
1782 
1783             tcg_gen_andi_tl(saddr, src, -32);
1784             tcg_gen_andi_tl(daddr, addr, -32);
1785             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1786             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1787             tcg_gen_addi_tl(saddr, saddr, 16);
1788             tcg_gen_addi_tl(daddr, daddr, 16);
1789             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1790             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1791         }
1792         break;
1793 
1794     default:
1795         {
1796             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1797             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1798 
1799             save_state(dc);
1800 #ifdef TARGET_SPARC64
1801             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1802 #else
1803             {
1804                 TCGv_i64 t64 = tcg_temp_new_i64();
1805                 tcg_gen_extu_tl_i64(t64, src);
1806                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1807             }
1808 #endif
1809 
1810             /* A write to a TLB register may alter page maps.  End the TB. */
1811             dc->npc = DYNAMIC_PC;
1812         }
1813         break;
1814     }
1815 }
1816 
1817 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1818                          TCGv dst, TCGv src, TCGv addr)
1819 {
1820     switch (da->type) {
1821     case GET_ASI_EXCP:
1822         break;
1823     case GET_ASI_DIRECT:
1824         tcg_gen_atomic_xchg_tl(dst, addr, src,
1825                                da->mem_idx, da->memop | MO_ALIGN);
1826         break;
1827     default:
1828         /* ??? Should be DAE_invalid_asi.  */
1829         gen_exception(dc, TT_DATA_ACCESS);
1830         break;
1831     }
1832 }
1833 
1834 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1835                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1836 {
1837     switch (da->type) {
1838     case GET_ASI_EXCP:
1839         return;
1840     case GET_ASI_DIRECT:
1841         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1842                                   da->mem_idx, da->memop | MO_ALIGN);
1843         break;
1844     default:
1845         /* ??? Should be DAE_invalid_asi.  */
1846         gen_exception(dc, TT_DATA_ACCESS);
1847         break;
1848     }
1849 }
1850 
1851 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1852 {
1853     switch (da->type) {
1854     case GET_ASI_EXCP:
1855         break;
1856     case GET_ASI_DIRECT:
1857         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1858                                da->mem_idx, MO_UB);
1859         break;
1860     default:
1861         /* ??? In theory, this should be raise DAE_invalid_asi.
1862            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1863         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1864             gen_helper_exit_atomic(tcg_env);
1865         } else {
1866             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1867             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1868             TCGv_i64 s64, t64;
1869 
1870             save_state(dc);
1871             t64 = tcg_temp_new_i64();
1872             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1873 
1874             s64 = tcg_constant_i64(0xff);
1875             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1876 
1877             tcg_gen_trunc_i64_tl(dst, t64);
1878 
1879             /* End the TB.  */
1880             dc->npc = DYNAMIC_PC;
1881         }
1882         break;
1883     }
1884 }
1885 
1886 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1887                         TCGv addr, int rd)
1888 {
1889     MemOp memop = da->memop;
1890     MemOp size = memop & MO_SIZE;
1891     TCGv_i32 d32;
1892     TCGv_i64 d64, l64;
1893     TCGv addr_tmp;
1894 
1895     /* TODO: Use 128-bit load/store below. */
1896     if (size == MO_128) {
1897         memop = (memop & ~MO_SIZE) | MO_64;
1898     }
1899 
1900     switch (da->type) {
1901     case GET_ASI_EXCP:
1902         break;
1903 
1904     case GET_ASI_DIRECT:
1905         memop |= MO_ALIGN_4;
1906         switch (size) {
1907         case MO_32:
1908             d32 = tcg_temp_new_i32();
1909             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1910             gen_store_fpr_F(dc, rd, d32);
1911             break;
1912 
1913         case MO_64:
1914             d64 = tcg_temp_new_i64();
1915             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1916             gen_store_fpr_D(dc, rd, d64);
1917             break;
1918 
1919         case MO_128:
1920             d64 = tcg_temp_new_i64();
1921             l64 = tcg_temp_new_i64();
1922             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1923             addr_tmp = tcg_temp_new();
1924             tcg_gen_addi_tl(addr_tmp, addr, 8);
1925             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1926             gen_store_fpr_D(dc, rd, d64);
1927             gen_store_fpr_D(dc, rd + 2, l64);
1928             break;
1929         default:
1930             g_assert_not_reached();
1931         }
1932         break;
1933 
1934     case GET_ASI_BLOCK:
1935         /* Valid for lddfa on aligned registers only.  */
1936         if (orig_size == MO_64 && (rd & 7) == 0) {
1937             /* The first operation checks required alignment.  */
1938             addr_tmp = tcg_temp_new();
1939             d64 = tcg_temp_new_i64();
1940             for (int i = 0; ; ++i) {
1941                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1942                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1943                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1944                 if (i == 7) {
1945                     break;
1946                 }
1947                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1948                 addr = addr_tmp;
1949             }
1950         } else {
1951             gen_exception(dc, TT_ILL_INSN);
1952         }
1953         break;
1954 
1955     case GET_ASI_SHORT:
1956         /* Valid for lddfa only.  */
1957         if (orig_size == MO_64) {
1958             d64 = tcg_temp_new_i64();
1959             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1960             gen_store_fpr_D(dc, rd, d64);
1961         } else {
1962             gen_exception(dc, TT_ILL_INSN);
1963         }
1964         break;
1965 
1966     default:
1967         {
1968             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1969             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1970 
1971             save_state(dc);
1972             /* According to the table in the UA2011 manual, the only
1973                other asis that are valid for ldfa/lddfa/ldqfa are
1974                the NO_FAULT asis.  We still need a helper for these,
1975                but we can just use the integer asi helper for them.  */
1976             switch (size) {
1977             case MO_32:
1978                 d64 = tcg_temp_new_i64();
1979                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1980                 d32 = tcg_temp_new_i32();
1981                 tcg_gen_extrl_i64_i32(d32, d64);
1982                 gen_store_fpr_F(dc, rd, d32);
1983                 break;
1984             case MO_64:
1985                 d64 = tcg_temp_new_i64();
1986                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1987                 gen_store_fpr_D(dc, rd, d64);
1988                 break;
1989             case MO_128:
1990                 d64 = tcg_temp_new_i64();
1991                 l64 = tcg_temp_new_i64();
1992                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1993                 addr_tmp = tcg_temp_new();
1994                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1995                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
1996                 gen_store_fpr_D(dc, rd, d64);
1997                 gen_store_fpr_D(dc, rd + 2, l64);
1998                 break;
1999             default:
2000                 g_assert_not_reached();
2001             }
2002         }
2003         break;
2004     }
2005 }
2006 
2007 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2008                         TCGv addr, int rd)
2009 {
2010     MemOp memop = da->memop;
2011     MemOp size = memop & MO_SIZE;
2012     TCGv_i32 d32;
2013     TCGv_i64 d64;
2014     TCGv addr_tmp;
2015 
2016     /* TODO: Use 128-bit load/store below. */
2017     if (size == MO_128) {
2018         memop = (memop & ~MO_SIZE) | MO_64;
2019     }
2020 
2021     switch (da->type) {
2022     case GET_ASI_EXCP:
2023         break;
2024 
2025     case GET_ASI_DIRECT:
2026         memop |= MO_ALIGN_4;
2027         switch (size) {
2028         case MO_32:
2029             d32 = gen_load_fpr_F(dc, rd);
2030             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2031             break;
2032         case MO_64:
2033             d64 = gen_load_fpr_D(dc, rd);
2034             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2035             break;
2036         case MO_128:
2037             /* Only 4-byte alignment required.  However, it is legal for the
2038                cpu to signal the alignment fault, and the OS trap handler is
2039                required to fix it up.  Requiring 16-byte alignment here avoids
2040                having to probe the second page before performing the first
2041                write.  */
2042             d64 = gen_load_fpr_D(dc, rd);
2043             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2044             addr_tmp = tcg_temp_new();
2045             tcg_gen_addi_tl(addr_tmp, addr, 8);
2046             d64 = gen_load_fpr_D(dc, rd + 2);
2047             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2048             break;
2049         default:
2050             g_assert_not_reached();
2051         }
2052         break;
2053 
2054     case GET_ASI_BLOCK:
2055         /* Valid for stdfa on aligned registers only.  */
2056         if (orig_size == MO_64 && (rd & 7) == 0) {
2057             /* The first operation checks required alignment.  */
2058             addr_tmp = tcg_temp_new();
2059             for (int i = 0; ; ++i) {
2060                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2061                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2062                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2063                 if (i == 7) {
2064                     break;
2065                 }
2066                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2067                 addr = addr_tmp;
2068             }
2069         } else {
2070             gen_exception(dc, TT_ILL_INSN);
2071         }
2072         break;
2073 
2074     case GET_ASI_SHORT:
2075         /* Valid for stdfa only.  */
2076         if (orig_size == MO_64) {
2077             d64 = gen_load_fpr_D(dc, rd);
2078             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2079         } else {
2080             gen_exception(dc, TT_ILL_INSN);
2081         }
2082         break;
2083 
2084     default:
2085         /* According to the table in the UA2011 manual, the only
2086            other asis that are valid for ldfa/lddfa/ldqfa are
2087            the PST* asis, which aren't currently handled.  */
2088         gen_exception(dc, TT_ILL_INSN);
2089         break;
2090     }
2091 }
2092 
2093 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2094 {
2095     TCGv hi = gen_dest_gpr(dc, rd);
2096     TCGv lo = gen_dest_gpr(dc, rd + 1);
2097 
2098     switch (da->type) {
2099     case GET_ASI_EXCP:
2100         return;
2101 
2102     case GET_ASI_DTWINX:
2103 #ifdef TARGET_SPARC64
2104         {
2105             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2106             TCGv_i128 t = tcg_temp_new_i128();
2107 
2108             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2109             /*
2110              * Note that LE twinx acts as if each 64-bit register result is
2111              * byte swapped.  We perform one 128-bit LE load, so must swap
2112              * the order of the writebacks.
2113              */
2114             if ((mop & MO_BSWAP) == MO_TE) {
2115                 tcg_gen_extr_i128_i64(lo, hi, t);
2116             } else {
2117                 tcg_gen_extr_i128_i64(hi, lo, t);
2118             }
2119         }
2120         break;
2121 #else
2122         g_assert_not_reached();
2123 #endif
2124 
2125     case GET_ASI_DIRECT:
2126         {
2127             TCGv_i64 tmp = tcg_temp_new_i64();
2128 
2129             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2130 
2131             /* Note that LE ldda acts as if each 32-bit register
2132                result is byte swapped.  Having just performed one
2133                64-bit bswap, we need now to swap the writebacks.  */
2134             if ((da->memop & MO_BSWAP) == MO_TE) {
2135                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2136             } else {
2137                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2138             }
2139         }
2140         break;
2141 
2142     case GET_ASI_CODE:
2143 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2144         {
2145             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2146             TCGv_i64 tmp = tcg_temp_new_i64();
2147 
2148             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2149 
2150             /* See above.  */
2151             if ((da->memop & MO_BSWAP) == MO_TE) {
2152                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2153             } else {
2154                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2155             }
2156         }
2157         break;
2158 #else
2159         g_assert_not_reached();
2160 #endif
2161 
2162     default:
2163         /* ??? In theory we've handled all of the ASIs that are valid
2164            for ldda, and this should raise DAE_invalid_asi.  However,
2165            real hardware allows others.  This can be seen with e.g.
2166            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2167         {
2168             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2169             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2170             TCGv_i64 tmp = tcg_temp_new_i64();
2171 
2172             save_state(dc);
2173             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2174 
2175             /* See above.  */
2176             if ((da->memop & MO_BSWAP) == MO_TE) {
2177                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2178             } else {
2179                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2180             }
2181         }
2182         break;
2183     }
2184 
2185     gen_store_gpr(dc, rd, hi);
2186     gen_store_gpr(dc, rd + 1, lo);
2187 }
2188 
2189 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2190 {
2191     TCGv hi = gen_load_gpr(dc, rd);
2192     TCGv lo = gen_load_gpr(dc, rd + 1);
2193 
2194     switch (da->type) {
2195     case GET_ASI_EXCP:
2196         break;
2197 
2198     case GET_ASI_DTWINX:
2199 #ifdef TARGET_SPARC64
2200         {
2201             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2202             TCGv_i128 t = tcg_temp_new_i128();
2203 
2204             /*
2205              * Note that LE twinx acts as if each 64-bit register result is
2206              * byte swapped.  We perform one 128-bit LE store, so must swap
2207              * the order of the construction.
2208              */
2209             if ((mop & MO_BSWAP) == MO_TE) {
2210                 tcg_gen_concat_i64_i128(t, lo, hi);
2211             } else {
2212                 tcg_gen_concat_i64_i128(t, hi, lo);
2213             }
2214             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2215         }
2216         break;
2217 #else
2218         g_assert_not_reached();
2219 #endif
2220 
2221     case GET_ASI_DIRECT:
2222         {
2223             TCGv_i64 t64 = tcg_temp_new_i64();
2224 
2225             /* Note that LE stda acts as if each 32-bit register result is
2226                byte swapped.  We will perform one 64-bit LE store, so now
2227                we must swap the order of the construction.  */
2228             if ((da->memop & MO_BSWAP) == MO_TE) {
2229                 tcg_gen_concat_tl_i64(t64, lo, hi);
2230             } else {
2231                 tcg_gen_concat_tl_i64(t64, hi, lo);
2232             }
2233             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2234         }
2235         break;
2236 
2237     case GET_ASI_BFILL:
2238         assert(TARGET_LONG_BITS == 32);
2239         /*
2240          * Store 32 bytes of [rd:rd+1] to ADDR.
2241          * See comments for GET_ASI_COPY above.
2242          */
2243         {
2244             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2245             TCGv_i64 t8 = tcg_temp_new_i64();
2246             TCGv_i128 t16 = tcg_temp_new_i128();
2247             TCGv daddr = tcg_temp_new();
2248 
2249             tcg_gen_concat_tl_i64(t8, lo, hi);
2250             tcg_gen_concat_i64_i128(t16, t8, t8);
2251             tcg_gen_andi_tl(daddr, addr, -32);
2252             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2253             tcg_gen_addi_tl(daddr, daddr, 16);
2254             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2255         }
2256         break;
2257 
2258     default:
2259         /* ??? In theory we've handled all of the ASIs that are valid
2260            for stda, and this should raise DAE_invalid_asi.  */
2261         {
2262             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2263             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2264             TCGv_i64 t64 = tcg_temp_new_i64();
2265 
2266             /* See above.  */
2267             if ((da->memop & MO_BSWAP) == MO_TE) {
2268                 tcg_gen_concat_tl_i64(t64, lo, hi);
2269             } else {
2270                 tcg_gen_concat_tl_i64(t64, hi, lo);
2271             }
2272 
2273             save_state(dc);
2274             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2275         }
2276         break;
2277     }
2278 }
2279 
2280 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2281 {
2282 #ifdef TARGET_SPARC64
2283     TCGv_i32 c32, zero, dst, s1, s2;
2284     TCGv_i64 c64 = tcg_temp_new_i64();
2285 
2286     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2287        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2288        the later.  */
2289     c32 = tcg_temp_new_i32();
2290     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2291     tcg_gen_extrl_i64_i32(c32, c64);
2292 
2293     s1 = gen_load_fpr_F(dc, rs);
2294     s2 = gen_load_fpr_F(dc, rd);
2295     dst = tcg_temp_new_i32();
2296     zero = tcg_constant_i32(0);
2297 
2298     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2299 
2300     gen_store_fpr_F(dc, rd, dst);
2301 #else
2302     qemu_build_not_reached();
2303 #endif
2304 }
2305 
2306 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2307 {
2308 #ifdef TARGET_SPARC64
2309     TCGv_i64 dst = tcg_temp_new_i64();
2310     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2311                         gen_load_fpr_D(dc, rs),
2312                         gen_load_fpr_D(dc, rd));
2313     gen_store_fpr_D(dc, rd, dst);
2314 #else
2315     qemu_build_not_reached();
2316 #endif
2317 }
2318 
2319 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2320 {
2321 #ifdef TARGET_SPARC64
2322     TCGv c2 = tcg_constant_tl(cmp->c2);
2323     TCGv_i64 h = tcg_temp_new_i64();
2324     TCGv_i64 l = tcg_temp_new_i64();
2325 
2326     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2327                         gen_load_fpr_D(dc, rs),
2328                         gen_load_fpr_D(dc, rd));
2329     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2330                         gen_load_fpr_D(dc, rs + 2),
2331                         gen_load_fpr_D(dc, rd + 2));
2332     gen_store_fpr_D(dc, rd, h);
2333     gen_store_fpr_D(dc, rd + 2, l);
2334 #else
2335     qemu_build_not_reached();
2336 #endif
2337 }
2338 
2339 #ifdef TARGET_SPARC64
2340 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2341 {
2342     TCGv_i32 r_tl = tcg_temp_new_i32();
2343 
2344     /* load env->tl into r_tl */
2345     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2346 
2347     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2348     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2349 
2350     /* calculate offset to current trap state from env->ts, reuse r_tl */
2351     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2352     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2353 
2354     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2355     {
2356         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2357         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2358         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2359     }
2360 }
2361 #endif
2362 
2363 static int extract_dfpreg(DisasContext *dc, int x)
2364 {
2365     int r = x & 0x1e;
2366 #ifdef TARGET_SPARC64
2367     r |= (x & 1) << 5;
2368 #endif
2369     return r;
2370 }
2371 
2372 static int extract_qfpreg(DisasContext *dc, int x)
2373 {
2374     int r = x & 0x1c;
2375 #ifdef TARGET_SPARC64
2376     r |= (x & 1) << 5;
2377 #endif
2378     return r;
2379 }
2380 
2381 /* Include the auto-generated decoder.  */
2382 #include "decode-insns.c.inc"
2383 
2384 #define TRANS(NAME, AVAIL, FUNC, ...) \
2385     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2386     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2387 
2388 #define avail_ALL(C)      true
2389 #ifdef TARGET_SPARC64
2390 # define avail_32(C)      false
2391 # define avail_ASR17(C)   false
2392 # define avail_CASA(C)    true
2393 # define avail_DIV(C)     true
2394 # define avail_MUL(C)     true
2395 # define avail_POWERDOWN(C) false
2396 # define avail_64(C)      true
2397 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2398 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2399 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2400 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2401 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2402 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2403 # define avail_VIS3B(C)   avail_VIS3(C)
2404 #else
2405 # define avail_32(C)      true
2406 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2407 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2408 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2409 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2410 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2411 # define avail_64(C)      false
2412 # define avail_FMAF(C)    false
2413 # define avail_GL(C)      false
2414 # define avail_HYPV(C)    false
2415 # define avail_VIS1(C)    false
2416 # define avail_VIS2(C)    false
2417 # define avail_VIS3(C)    false
2418 # define avail_VIS3B(C)   false
2419 #endif
2420 
2421 /* Default case for non jump instructions. */
2422 static bool advance_pc(DisasContext *dc)
2423 {
2424     TCGLabel *l1;
2425 
2426     finishing_insn(dc);
2427 
2428     if (dc->npc & 3) {
2429         switch (dc->npc) {
2430         case DYNAMIC_PC:
2431         case DYNAMIC_PC_LOOKUP:
2432             dc->pc = dc->npc;
2433             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2434             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2435             break;
2436 
2437         case JUMP_PC:
2438             /* we can do a static jump */
2439             l1 = gen_new_label();
2440             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2441 
2442             /* jump not taken */
2443             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2444 
2445             /* jump taken */
2446             gen_set_label(l1);
2447             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2448 
2449             dc->base.is_jmp = DISAS_NORETURN;
2450             break;
2451 
2452         default:
2453             g_assert_not_reached();
2454         }
2455     } else {
2456         dc->pc = dc->npc;
2457         dc->npc = dc->npc + 4;
2458     }
2459     return true;
2460 }
2461 
2462 /*
2463  * Major opcodes 00 and 01 -- branches, call, and sethi
2464  */
2465 
2466 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2467                               bool annul, int disp)
2468 {
2469     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2470     target_ulong npc;
2471 
2472     finishing_insn(dc);
2473 
2474     if (cmp->cond == TCG_COND_ALWAYS) {
2475         if (annul) {
2476             dc->pc = dest;
2477             dc->npc = dest + 4;
2478         } else {
2479             gen_mov_pc_npc(dc);
2480             dc->npc = dest;
2481         }
2482         return true;
2483     }
2484 
2485     if (cmp->cond == TCG_COND_NEVER) {
2486         npc = dc->npc;
2487         if (npc & 3) {
2488             gen_mov_pc_npc(dc);
2489             if (annul) {
2490                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2491             }
2492             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2493         } else {
2494             dc->pc = npc + (annul ? 4 : 0);
2495             dc->npc = dc->pc + 4;
2496         }
2497         return true;
2498     }
2499 
2500     flush_cond(dc);
2501     npc = dc->npc;
2502 
2503     if (annul) {
2504         TCGLabel *l1 = gen_new_label();
2505 
2506         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2507         gen_goto_tb(dc, 0, npc, dest);
2508         gen_set_label(l1);
2509         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2510 
2511         dc->base.is_jmp = DISAS_NORETURN;
2512     } else {
2513         if (npc & 3) {
2514             switch (npc) {
2515             case DYNAMIC_PC:
2516             case DYNAMIC_PC_LOOKUP:
2517                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2518                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2519                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2520                                    cmp->c1, tcg_constant_tl(cmp->c2),
2521                                    tcg_constant_tl(dest), cpu_npc);
2522                 dc->pc = npc;
2523                 break;
2524             default:
2525                 g_assert_not_reached();
2526             }
2527         } else {
2528             dc->pc = npc;
2529             dc->npc = JUMP_PC;
2530             dc->jump = *cmp;
2531             dc->jump_pc[0] = dest;
2532             dc->jump_pc[1] = npc + 4;
2533 
2534             /* The condition for cpu_cond is always NE -- normalize. */
2535             if (cmp->cond == TCG_COND_NE) {
2536                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2537             } else {
2538                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2539             }
2540             dc->cpu_cond_live = true;
2541         }
2542     }
2543     return true;
2544 }
2545 
2546 static bool raise_priv(DisasContext *dc)
2547 {
2548     gen_exception(dc, TT_PRIV_INSN);
2549     return true;
2550 }
2551 
2552 static bool raise_unimpfpop(DisasContext *dc)
2553 {
2554     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2555     return true;
2556 }
2557 
2558 static bool gen_trap_float128(DisasContext *dc)
2559 {
2560     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2561         return false;
2562     }
2563     return raise_unimpfpop(dc);
2564 }
2565 
2566 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2567 {
2568     DisasCompare cmp;
2569 
2570     gen_compare(&cmp, a->cc, a->cond, dc);
2571     return advance_jump_cond(dc, &cmp, a->a, a->i);
2572 }
2573 
2574 TRANS(Bicc, ALL, do_bpcc, a)
2575 TRANS(BPcc,  64, do_bpcc, a)
2576 
2577 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2578 {
2579     DisasCompare cmp;
2580 
2581     if (gen_trap_ifnofpu(dc)) {
2582         return true;
2583     }
2584     gen_fcompare(&cmp, a->cc, a->cond);
2585     return advance_jump_cond(dc, &cmp, a->a, a->i);
2586 }
2587 
2588 TRANS(FBPfcc,  64, do_fbpfcc, a)
2589 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2590 
2591 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2592 {
2593     DisasCompare cmp;
2594 
2595     if (!avail_64(dc)) {
2596         return false;
2597     }
2598     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2599         return false;
2600     }
2601     return advance_jump_cond(dc, &cmp, a->a, a->i);
2602 }
2603 
2604 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2605 {
2606     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2607 
2608     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2609     gen_mov_pc_npc(dc);
2610     dc->npc = target;
2611     return true;
2612 }
2613 
2614 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2615 {
2616     /*
2617      * For sparc32, always generate the no-coprocessor exception.
2618      * For sparc64, always generate illegal instruction.
2619      */
2620 #ifdef TARGET_SPARC64
2621     return false;
2622 #else
2623     gen_exception(dc, TT_NCP_INSN);
2624     return true;
2625 #endif
2626 }
2627 
2628 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2629 {
2630     /* Special-case %g0 because that's the canonical nop.  */
2631     if (a->rd) {
2632         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2633     }
2634     return advance_pc(dc);
2635 }
2636 
2637 /*
2638  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2639  */
2640 
2641 static bool do_tcc(DisasContext *dc, int cond, int cc,
2642                    int rs1, bool imm, int rs2_or_imm)
2643 {
2644     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2645                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2646     DisasCompare cmp;
2647     TCGLabel *lab;
2648     TCGv_i32 trap;
2649 
2650     /* Trap never.  */
2651     if (cond == 0) {
2652         return advance_pc(dc);
2653     }
2654 
2655     /*
2656      * Immediate traps are the most common case.  Since this value is
2657      * live across the branch, it really pays to evaluate the constant.
2658      */
2659     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2660         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2661     } else {
2662         trap = tcg_temp_new_i32();
2663         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2664         if (imm) {
2665             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2666         } else {
2667             TCGv_i32 t2 = tcg_temp_new_i32();
2668             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2669             tcg_gen_add_i32(trap, trap, t2);
2670         }
2671         tcg_gen_andi_i32(trap, trap, mask);
2672         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2673     }
2674 
2675     finishing_insn(dc);
2676 
2677     /* Trap always.  */
2678     if (cond == 8) {
2679         save_state(dc);
2680         gen_helper_raise_exception(tcg_env, trap);
2681         dc->base.is_jmp = DISAS_NORETURN;
2682         return true;
2683     }
2684 
2685     /* Conditional trap.  */
2686     flush_cond(dc);
2687     lab = delay_exceptionv(dc, trap);
2688     gen_compare(&cmp, cc, cond, dc);
2689     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2690 
2691     return advance_pc(dc);
2692 }
2693 
2694 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2695 {
2696     if (avail_32(dc) && a->cc) {
2697         return false;
2698     }
2699     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2700 }
2701 
2702 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2703 {
2704     if (avail_64(dc)) {
2705         return false;
2706     }
2707     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2708 }
2709 
2710 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2711 {
2712     if (avail_32(dc)) {
2713         return false;
2714     }
2715     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2716 }
2717 
2718 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2719 {
2720     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2721     return advance_pc(dc);
2722 }
2723 
2724 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2725 {
2726     if (avail_32(dc)) {
2727         return false;
2728     }
2729     if (a->mmask) {
2730         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2731         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2732     }
2733     if (a->cmask) {
2734         /* For #Sync, etc, end the TB to recognize interrupts. */
2735         dc->base.is_jmp = DISAS_EXIT;
2736     }
2737     return advance_pc(dc);
2738 }
2739 
2740 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2741                           TCGv (*func)(DisasContext *, TCGv))
2742 {
2743     if (!priv) {
2744         return raise_priv(dc);
2745     }
2746     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2747     return advance_pc(dc);
2748 }
2749 
2750 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2751 {
2752     return cpu_y;
2753 }
2754 
2755 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2756 {
2757     /*
2758      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2759      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2760      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2761      */
2762     if (avail_64(dc) && a->rs1 != 0) {
2763         return false;
2764     }
2765     return do_rd_special(dc, true, a->rd, do_rdy);
2766 }
2767 
2768 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2769 {
2770     gen_helper_rdasr17(dst, tcg_env);
2771     return dst;
2772 }
2773 
2774 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2775 
2776 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2777 {
2778     gen_helper_rdccr(dst, tcg_env);
2779     return dst;
2780 }
2781 
2782 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2783 
2784 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2785 {
2786 #ifdef TARGET_SPARC64
2787     return tcg_constant_tl(dc->asi);
2788 #else
2789     qemu_build_not_reached();
2790 #endif
2791 }
2792 
2793 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2794 
2795 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2796 {
2797     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2798 
2799     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2800     if (translator_io_start(&dc->base)) {
2801         dc->base.is_jmp = DISAS_EXIT;
2802     }
2803     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2804                               tcg_constant_i32(dc->mem_idx));
2805     return dst;
2806 }
2807 
2808 /* TODO: non-priv access only allowed when enabled. */
2809 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2810 
2811 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2812 {
2813     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2814 }
2815 
2816 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2817 
2818 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2819 {
2820     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2821     return dst;
2822 }
2823 
2824 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2825 
2826 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2827 {
2828     gen_trap_ifnofpu(dc);
2829     return cpu_gsr;
2830 }
2831 
2832 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2833 
2834 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2835 {
2836     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2837     return dst;
2838 }
2839 
2840 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2841 
2842 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2843 {
2844     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2845     return dst;
2846 }
2847 
2848 /* TODO: non-priv access only allowed when enabled. */
2849 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2850 
2851 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2852 {
2853     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2854 
2855     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2856     if (translator_io_start(&dc->base)) {
2857         dc->base.is_jmp = DISAS_EXIT;
2858     }
2859     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2860                               tcg_constant_i32(dc->mem_idx));
2861     return dst;
2862 }
2863 
2864 /* TODO: non-priv access only allowed when enabled. */
2865 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2866 
2867 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2868 {
2869     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2870     return dst;
2871 }
2872 
2873 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2874 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2875 
2876 /*
2877  * UltraSPARC-T1 Strand status.
2878  * HYPV check maybe not enough, UA2005 & UA2007 describe
2879  * this ASR as impl. dep
2880  */
2881 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2882 {
2883     return tcg_constant_tl(1);
2884 }
2885 
2886 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2887 
2888 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2889 {
2890     gen_helper_rdpsr(dst, tcg_env);
2891     return dst;
2892 }
2893 
2894 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2895 
2896 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2897 {
2898     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2899     return dst;
2900 }
2901 
2902 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2903 
2904 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2905 {
2906     TCGv_i32 tl = tcg_temp_new_i32();
2907     TCGv_ptr tp = tcg_temp_new_ptr();
2908 
2909     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2910     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2911     tcg_gen_shli_i32(tl, tl, 3);
2912     tcg_gen_ext_i32_ptr(tp, tl);
2913     tcg_gen_add_ptr(tp, tp, tcg_env);
2914 
2915     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2916     return dst;
2917 }
2918 
2919 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2920 
2921 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2922 {
2923     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2924     return dst;
2925 }
2926 
2927 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2928 
2929 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2930 {
2931     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2932     return dst;
2933 }
2934 
2935 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2936 
2937 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2938 {
2939     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2940     return dst;
2941 }
2942 
2943 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2944 
2945 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2946 {
2947     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2948     return dst;
2949 }
2950 
2951 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2952       do_rdhstick_cmpr)
2953 
2954 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2955 {
2956     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2957     return dst;
2958 }
2959 
2960 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2961 
2962 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2963 {
2964 #ifdef TARGET_SPARC64
2965     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2966 
2967     gen_load_trap_state_at_tl(r_tsptr);
2968     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2969     return dst;
2970 #else
2971     qemu_build_not_reached();
2972 #endif
2973 }
2974 
2975 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2976 
2977 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2978 {
2979 #ifdef TARGET_SPARC64
2980     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2981 
2982     gen_load_trap_state_at_tl(r_tsptr);
2983     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2984     return dst;
2985 #else
2986     qemu_build_not_reached();
2987 #endif
2988 }
2989 
2990 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2991 
2992 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2993 {
2994 #ifdef TARGET_SPARC64
2995     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2996 
2997     gen_load_trap_state_at_tl(r_tsptr);
2998     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2999     return dst;
3000 #else
3001     qemu_build_not_reached();
3002 #endif
3003 }
3004 
3005 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3006 
3007 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3008 {
3009 #ifdef TARGET_SPARC64
3010     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3011 
3012     gen_load_trap_state_at_tl(r_tsptr);
3013     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3014     return dst;
3015 #else
3016     qemu_build_not_reached();
3017 #endif
3018 }
3019 
3020 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3021 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3022 
3023 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3024 {
3025     return cpu_tbr;
3026 }
3027 
3028 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3029 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3030 
3031 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3032 {
3033     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3034     return dst;
3035 }
3036 
3037 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3038 
3039 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3040 {
3041     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3042     return dst;
3043 }
3044 
3045 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3046 
3047 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3048 {
3049     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3050     return dst;
3051 }
3052 
3053 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3054 
3055 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3056 {
3057     gen_helper_rdcwp(dst, tcg_env);
3058     return dst;
3059 }
3060 
3061 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3062 
3063 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3064 {
3065     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3066     return dst;
3067 }
3068 
3069 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3070 
3071 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3072 {
3073     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3074     return dst;
3075 }
3076 
3077 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3078       do_rdcanrestore)
3079 
3080 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3081 {
3082     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3083     return dst;
3084 }
3085 
3086 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3087 
3088 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3089 {
3090     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3091     return dst;
3092 }
3093 
3094 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3095 
3096 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3097 {
3098     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3099     return dst;
3100 }
3101 
3102 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3103 
3104 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3105 {
3106     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3107     return dst;
3108 }
3109 
3110 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3111 
3112 /* UA2005 strand status */
3113 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3114 {
3115     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3116     return dst;
3117 }
3118 
3119 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3120 
3121 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3122 {
3123     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3124     return dst;
3125 }
3126 
3127 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3128 
3129 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3130 {
3131     if (avail_64(dc)) {
3132         gen_helper_flushw(tcg_env);
3133         return advance_pc(dc);
3134     }
3135     return false;
3136 }
3137 
3138 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3139                           void (*func)(DisasContext *, TCGv))
3140 {
3141     TCGv src;
3142 
3143     /* For simplicity, we under-decoded the rs2 form. */
3144     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3145         return false;
3146     }
3147     if (!priv) {
3148         return raise_priv(dc);
3149     }
3150 
3151     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3152         src = tcg_constant_tl(a->rs2_or_imm);
3153     } else {
3154         TCGv src1 = gen_load_gpr(dc, a->rs1);
3155         if (a->rs2_or_imm == 0) {
3156             src = src1;
3157         } else {
3158             src = tcg_temp_new();
3159             if (a->imm) {
3160                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3161             } else {
3162                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3163             }
3164         }
3165     }
3166     func(dc, src);
3167     return advance_pc(dc);
3168 }
3169 
3170 static void do_wry(DisasContext *dc, TCGv src)
3171 {
3172     tcg_gen_ext32u_tl(cpu_y, src);
3173 }
3174 
3175 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3176 
3177 static void do_wrccr(DisasContext *dc, TCGv src)
3178 {
3179     gen_helper_wrccr(tcg_env, src);
3180 }
3181 
3182 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3183 
3184 static void do_wrasi(DisasContext *dc, TCGv src)
3185 {
3186     TCGv tmp = tcg_temp_new();
3187 
3188     tcg_gen_ext8u_tl(tmp, src);
3189     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3190     /* End TB to notice changed ASI. */
3191     dc->base.is_jmp = DISAS_EXIT;
3192 }
3193 
3194 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3195 
3196 static void do_wrfprs(DisasContext *dc, TCGv src)
3197 {
3198 #ifdef TARGET_SPARC64
3199     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3200     dc->fprs_dirty = 0;
3201     dc->base.is_jmp = DISAS_EXIT;
3202 #else
3203     qemu_build_not_reached();
3204 #endif
3205 }
3206 
3207 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3208 
3209 static void do_wrgsr(DisasContext *dc, TCGv src)
3210 {
3211     gen_trap_ifnofpu(dc);
3212     tcg_gen_mov_tl(cpu_gsr, src);
3213 }
3214 
3215 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3216 
3217 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3218 {
3219     gen_helper_set_softint(tcg_env, src);
3220 }
3221 
3222 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3223 
3224 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3225 {
3226     gen_helper_clear_softint(tcg_env, src);
3227 }
3228 
3229 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3230 
3231 static void do_wrsoftint(DisasContext *dc, TCGv src)
3232 {
3233     gen_helper_write_softint(tcg_env, src);
3234 }
3235 
3236 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3237 
3238 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3239 {
3240     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3241 
3242     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3243     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3244     translator_io_start(&dc->base);
3245     gen_helper_tick_set_limit(r_tickptr, src);
3246     /* End TB to handle timer interrupt */
3247     dc->base.is_jmp = DISAS_EXIT;
3248 }
3249 
3250 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3251 
3252 static void do_wrstick(DisasContext *dc, TCGv src)
3253 {
3254 #ifdef TARGET_SPARC64
3255     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3256 
3257     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3258     translator_io_start(&dc->base);
3259     gen_helper_tick_set_count(r_tickptr, src);
3260     /* End TB to handle timer interrupt */
3261     dc->base.is_jmp = DISAS_EXIT;
3262 #else
3263     qemu_build_not_reached();
3264 #endif
3265 }
3266 
3267 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3268 
3269 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3270 {
3271     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3272 
3273     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3274     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3275     translator_io_start(&dc->base);
3276     gen_helper_tick_set_limit(r_tickptr, src);
3277     /* End TB to handle timer interrupt */
3278     dc->base.is_jmp = DISAS_EXIT;
3279 }
3280 
3281 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3282 
3283 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3284 {
3285     finishing_insn(dc);
3286     save_state(dc);
3287     gen_helper_power_down(tcg_env);
3288 }
3289 
3290 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3291 
3292 static void do_wrpsr(DisasContext *dc, TCGv src)
3293 {
3294     gen_helper_wrpsr(tcg_env, src);
3295     dc->base.is_jmp = DISAS_EXIT;
3296 }
3297 
3298 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3299 
3300 static void do_wrwim(DisasContext *dc, TCGv src)
3301 {
3302     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3303     TCGv tmp = tcg_temp_new();
3304 
3305     tcg_gen_andi_tl(tmp, src, mask);
3306     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3307 }
3308 
3309 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3310 
3311 static void do_wrtpc(DisasContext *dc, TCGv src)
3312 {
3313 #ifdef TARGET_SPARC64
3314     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3315 
3316     gen_load_trap_state_at_tl(r_tsptr);
3317     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3318 #else
3319     qemu_build_not_reached();
3320 #endif
3321 }
3322 
3323 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3324 
3325 static void do_wrtnpc(DisasContext *dc, TCGv src)
3326 {
3327 #ifdef TARGET_SPARC64
3328     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3329 
3330     gen_load_trap_state_at_tl(r_tsptr);
3331     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3332 #else
3333     qemu_build_not_reached();
3334 #endif
3335 }
3336 
3337 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3338 
3339 static void do_wrtstate(DisasContext *dc, TCGv src)
3340 {
3341 #ifdef TARGET_SPARC64
3342     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3343 
3344     gen_load_trap_state_at_tl(r_tsptr);
3345     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3346 #else
3347     qemu_build_not_reached();
3348 #endif
3349 }
3350 
3351 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3352 
3353 static void do_wrtt(DisasContext *dc, TCGv src)
3354 {
3355 #ifdef TARGET_SPARC64
3356     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3357 
3358     gen_load_trap_state_at_tl(r_tsptr);
3359     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3360 #else
3361     qemu_build_not_reached();
3362 #endif
3363 }
3364 
3365 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3366 
3367 static void do_wrtick(DisasContext *dc, TCGv src)
3368 {
3369     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3370 
3371     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3372     translator_io_start(&dc->base);
3373     gen_helper_tick_set_count(r_tickptr, src);
3374     /* End TB to handle timer interrupt */
3375     dc->base.is_jmp = DISAS_EXIT;
3376 }
3377 
3378 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3379 
3380 static void do_wrtba(DisasContext *dc, TCGv src)
3381 {
3382     tcg_gen_mov_tl(cpu_tbr, src);
3383 }
3384 
3385 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3386 
3387 static void do_wrpstate(DisasContext *dc, TCGv src)
3388 {
3389     save_state(dc);
3390     if (translator_io_start(&dc->base)) {
3391         dc->base.is_jmp = DISAS_EXIT;
3392     }
3393     gen_helper_wrpstate(tcg_env, src);
3394     dc->npc = DYNAMIC_PC;
3395 }
3396 
3397 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3398 
3399 static void do_wrtl(DisasContext *dc, TCGv src)
3400 {
3401     save_state(dc);
3402     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3403     dc->npc = DYNAMIC_PC;
3404 }
3405 
3406 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3407 
3408 static void do_wrpil(DisasContext *dc, TCGv src)
3409 {
3410     if (translator_io_start(&dc->base)) {
3411         dc->base.is_jmp = DISAS_EXIT;
3412     }
3413     gen_helper_wrpil(tcg_env, src);
3414 }
3415 
3416 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3417 
3418 static void do_wrcwp(DisasContext *dc, TCGv src)
3419 {
3420     gen_helper_wrcwp(tcg_env, src);
3421 }
3422 
3423 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3424 
3425 static void do_wrcansave(DisasContext *dc, TCGv src)
3426 {
3427     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3428 }
3429 
3430 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3431 
3432 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3433 {
3434     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3435 }
3436 
3437 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3438 
3439 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3440 {
3441     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3442 }
3443 
3444 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3445 
3446 static void do_wrotherwin(DisasContext *dc, TCGv src)
3447 {
3448     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3449 }
3450 
3451 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3452 
3453 static void do_wrwstate(DisasContext *dc, TCGv src)
3454 {
3455     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3456 }
3457 
3458 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3459 
3460 static void do_wrgl(DisasContext *dc, TCGv src)
3461 {
3462     gen_helper_wrgl(tcg_env, src);
3463 }
3464 
3465 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3466 
3467 /* UA2005 strand status */
3468 static void do_wrssr(DisasContext *dc, TCGv src)
3469 {
3470     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3471 }
3472 
3473 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3474 
3475 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3476 
3477 static void do_wrhpstate(DisasContext *dc, TCGv src)
3478 {
3479     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3480     dc->base.is_jmp = DISAS_EXIT;
3481 }
3482 
3483 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3484 
3485 static void do_wrhtstate(DisasContext *dc, TCGv src)
3486 {
3487     TCGv_i32 tl = tcg_temp_new_i32();
3488     TCGv_ptr tp = tcg_temp_new_ptr();
3489 
3490     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3491     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3492     tcg_gen_shli_i32(tl, tl, 3);
3493     tcg_gen_ext_i32_ptr(tp, tl);
3494     tcg_gen_add_ptr(tp, tp, tcg_env);
3495 
3496     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3497 }
3498 
3499 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3500 
3501 static void do_wrhintp(DisasContext *dc, TCGv src)
3502 {
3503     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3504 }
3505 
3506 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3507 
3508 static void do_wrhtba(DisasContext *dc, TCGv src)
3509 {
3510     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3511 }
3512 
3513 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3514 
3515 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3516 {
3517     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3518 
3519     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3520     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3521     translator_io_start(&dc->base);
3522     gen_helper_tick_set_limit(r_tickptr, src);
3523     /* End TB to handle timer interrupt */
3524     dc->base.is_jmp = DISAS_EXIT;
3525 }
3526 
3527 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3528       do_wrhstick_cmpr)
3529 
3530 static bool do_saved_restored(DisasContext *dc, bool saved)
3531 {
3532     if (!supervisor(dc)) {
3533         return raise_priv(dc);
3534     }
3535     if (saved) {
3536         gen_helper_saved(tcg_env);
3537     } else {
3538         gen_helper_restored(tcg_env);
3539     }
3540     return advance_pc(dc);
3541 }
3542 
3543 TRANS(SAVED, 64, do_saved_restored, true)
3544 TRANS(RESTORED, 64, do_saved_restored, false)
3545 
3546 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3547 {
3548     return advance_pc(dc);
3549 }
3550 
3551 /*
3552  * TODO: Need a feature bit for sparcv8.
3553  * In the meantime, treat all 32-bit cpus like sparcv7.
3554  */
3555 TRANS(NOP_v7, 32, trans_NOP, a)
3556 TRANS(NOP_v9, 64, trans_NOP, a)
3557 
3558 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3559                          void (*func)(TCGv, TCGv, TCGv),
3560                          void (*funci)(TCGv, TCGv, target_long),
3561                          bool logic_cc)
3562 {
3563     TCGv dst, src1;
3564 
3565     /* For simplicity, we under-decoded the rs2 form. */
3566     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3567         return false;
3568     }
3569 
3570     if (logic_cc) {
3571         dst = cpu_cc_N;
3572     } else {
3573         dst = gen_dest_gpr(dc, a->rd);
3574     }
3575     src1 = gen_load_gpr(dc, a->rs1);
3576 
3577     if (a->imm || a->rs2_or_imm == 0) {
3578         if (funci) {
3579             funci(dst, src1, a->rs2_or_imm);
3580         } else {
3581             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3582         }
3583     } else {
3584         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3585     }
3586 
3587     if (logic_cc) {
3588         if (TARGET_LONG_BITS == 64) {
3589             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3590             tcg_gen_movi_tl(cpu_icc_C, 0);
3591         }
3592         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3593         tcg_gen_movi_tl(cpu_cc_C, 0);
3594         tcg_gen_movi_tl(cpu_cc_V, 0);
3595     }
3596 
3597     gen_store_gpr(dc, a->rd, dst);
3598     return advance_pc(dc);
3599 }
3600 
3601 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3602                      void (*func)(TCGv, TCGv, TCGv),
3603                      void (*funci)(TCGv, TCGv, target_long),
3604                      void (*func_cc)(TCGv, TCGv, TCGv))
3605 {
3606     if (a->cc) {
3607         return do_arith_int(dc, a, func_cc, NULL, false);
3608     }
3609     return do_arith_int(dc, a, func, funci, false);
3610 }
3611 
3612 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3613                      void (*func)(TCGv, TCGv, TCGv),
3614                      void (*funci)(TCGv, TCGv, target_long))
3615 {
3616     return do_arith_int(dc, a, func, funci, a->cc);
3617 }
3618 
3619 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3620 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3621 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3622 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3623 
3624 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3625 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3626 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3627 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3628 
3629 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3630 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3631 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3632 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3633 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3634 
3635 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3636 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3637 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3638 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3639 
3640 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3641 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3642 
3643 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3644 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3645 
3646 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3647 {
3648     /* OR with %g0 is the canonical alias for MOV. */
3649     if (!a->cc && a->rs1 == 0) {
3650         if (a->imm || a->rs2_or_imm == 0) {
3651             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3652         } else if (a->rs2_or_imm & ~0x1f) {
3653             /* For simplicity, we under-decoded the rs2 form. */
3654             return false;
3655         } else {
3656             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3657         }
3658         return advance_pc(dc);
3659     }
3660     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3661 }
3662 
3663 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3664 {
3665     TCGv_i64 t1, t2;
3666     TCGv dst;
3667 
3668     if (!avail_DIV(dc)) {
3669         return false;
3670     }
3671     /* For simplicity, we under-decoded the rs2 form. */
3672     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3673         return false;
3674     }
3675 
3676     if (unlikely(a->rs2_or_imm == 0)) {
3677         gen_exception(dc, TT_DIV_ZERO);
3678         return true;
3679     }
3680 
3681     if (a->imm) {
3682         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3683     } else {
3684         TCGLabel *lab;
3685         TCGv_i32 n2;
3686 
3687         finishing_insn(dc);
3688         flush_cond(dc);
3689 
3690         n2 = tcg_temp_new_i32();
3691         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3692 
3693         lab = delay_exception(dc, TT_DIV_ZERO);
3694         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3695 
3696         t2 = tcg_temp_new_i64();
3697 #ifdef TARGET_SPARC64
3698         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3699 #else
3700         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3701 #endif
3702     }
3703 
3704     t1 = tcg_temp_new_i64();
3705     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3706 
3707     tcg_gen_divu_i64(t1, t1, t2);
3708     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3709 
3710     dst = gen_dest_gpr(dc, a->rd);
3711     tcg_gen_trunc_i64_tl(dst, t1);
3712     gen_store_gpr(dc, a->rd, dst);
3713     return advance_pc(dc);
3714 }
3715 
3716 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3717 {
3718     TCGv dst, src1, src2;
3719 
3720     if (!avail_64(dc)) {
3721         return false;
3722     }
3723     /* For simplicity, we under-decoded the rs2 form. */
3724     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3725         return false;
3726     }
3727 
3728     if (unlikely(a->rs2_or_imm == 0)) {
3729         gen_exception(dc, TT_DIV_ZERO);
3730         return true;
3731     }
3732 
3733     if (a->imm) {
3734         src2 = tcg_constant_tl(a->rs2_or_imm);
3735     } else {
3736         TCGLabel *lab;
3737 
3738         finishing_insn(dc);
3739         flush_cond(dc);
3740 
3741         lab = delay_exception(dc, TT_DIV_ZERO);
3742         src2 = cpu_regs[a->rs2_or_imm];
3743         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3744     }
3745 
3746     dst = gen_dest_gpr(dc, a->rd);
3747     src1 = gen_load_gpr(dc, a->rs1);
3748 
3749     tcg_gen_divu_tl(dst, src1, src2);
3750     gen_store_gpr(dc, a->rd, dst);
3751     return advance_pc(dc);
3752 }
3753 
3754 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3755 {
3756     TCGv dst, src1, src2;
3757 
3758     if (!avail_64(dc)) {
3759         return false;
3760     }
3761     /* For simplicity, we under-decoded the rs2 form. */
3762     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3763         return false;
3764     }
3765 
3766     if (unlikely(a->rs2_or_imm == 0)) {
3767         gen_exception(dc, TT_DIV_ZERO);
3768         return true;
3769     }
3770 
3771     dst = gen_dest_gpr(dc, a->rd);
3772     src1 = gen_load_gpr(dc, a->rs1);
3773 
3774     if (a->imm) {
3775         if (unlikely(a->rs2_or_imm == -1)) {
3776             tcg_gen_neg_tl(dst, src1);
3777             gen_store_gpr(dc, a->rd, dst);
3778             return advance_pc(dc);
3779         }
3780         src2 = tcg_constant_tl(a->rs2_or_imm);
3781     } else {
3782         TCGLabel *lab;
3783         TCGv t1, t2;
3784 
3785         finishing_insn(dc);
3786         flush_cond(dc);
3787 
3788         lab = delay_exception(dc, TT_DIV_ZERO);
3789         src2 = cpu_regs[a->rs2_or_imm];
3790         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3791 
3792         /*
3793          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3794          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3795          */
3796         t1 = tcg_temp_new();
3797         t2 = tcg_temp_new();
3798         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3799         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3800         tcg_gen_and_tl(t1, t1, t2);
3801         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3802                            tcg_constant_tl(1), src2);
3803         src2 = t1;
3804     }
3805 
3806     tcg_gen_div_tl(dst, src1, src2);
3807     gen_store_gpr(dc, a->rd, dst);
3808     return advance_pc(dc);
3809 }
3810 
3811 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3812                      int width, bool cc, bool little_endian)
3813 {
3814     TCGv dst, s1, s2, l, r, t, m;
3815     uint64_t amask = address_mask_i(dc, -8);
3816 
3817     dst = gen_dest_gpr(dc, a->rd);
3818     s1 = gen_load_gpr(dc, a->rs1);
3819     s2 = gen_load_gpr(dc, a->rs2);
3820 
3821     if (cc) {
3822         gen_op_subcc(cpu_cc_N, s1, s2);
3823     }
3824 
3825     l = tcg_temp_new();
3826     r = tcg_temp_new();
3827     t = tcg_temp_new();
3828 
3829     switch (width) {
3830     case 8:
3831         tcg_gen_andi_tl(l, s1, 7);
3832         tcg_gen_andi_tl(r, s2, 7);
3833         tcg_gen_xori_tl(r, r, 7);
3834         m = tcg_constant_tl(0xff);
3835         break;
3836     case 16:
3837         tcg_gen_extract_tl(l, s1, 1, 2);
3838         tcg_gen_extract_tl(r, s2, 1, 2);
3839         tcg_gen_xori_tl(r, r, 3);
3840         m = tcg_constant_tl(0xf);
3841         break;
3842     case 32:
3843         tcg_gen_extract_tl(l, s1, 2, 1);
3844         tcg_gen_extract_tl(r, s2, 2, 1);
3845         tcg_gen_xori_tl(r, r, 1);
3846         m = tcg_constant_tl(0x3);
3847         break;
3848     default:
3849         abort();
3850     }
3851 
3852     /* Compute Left Edge */
3853     if (little_endian) {
3854         tcg_gen_shl_tl(l, m, l);
3855         tcg_gen_and_tl(l, l, m);
3856     } else {
3857         tcg_gen_shr_tl(l, m, l);
3858     }
3859     /* Compute Right Edge */
3860     if (little_endian) {
3861         tcg_gen_shr_tl(r, m, r);
3862     } else {
3863         tcg_gen_shl_tl(r, m, r);
3864         tcg_gen_and_tl(r, r, m);
3865     }
3866 
3867     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3868     tcg_gen_xor_tl(t, s1, s2);
3869     tcg_gen_and_tl(r, r, l);
3870     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3871 
3872     gen_store_gpr(dc, a->rd, dst);
3873     return advance_pc(dc);
3874 }
3875 
3876 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3877 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3878 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3879 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3880 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3881 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3882 
3883 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3884 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3885 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3886 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3887 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3888 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3889 
3890 static bool do_rr(DisasContext *dc, arg_r_r *a,
3891                   void (*func)(TCGv, TCGv))
3892 {
3893     TCGv dst = gen_dest_gpr(dc, a->rd);
3894     TCGv src = gen_load_gpr(dc, a->rs);
3895 
3896     func(dst, src);
3897     gen_store_gpr(dc, a->rd, dst);
3898     return advance_pc(dc);
3899 }
3900 
3901 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3902 
3903 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3904                    void (*func)(TCGv, TCGv, TCGv))
3905 {
3906     TCGv dst = gen_dest_gpr(dc, a->rd);
3907     TCGv src1 = gen_load_gpr(dc, a->rs1);
3908     TCGv src2 = gen_load_gpr(dc, a->rs2);
3909 
3910     func(dst, src1, src2);
3911     gen_store_gpr(dc, a->rd, dst);
3912     return advance_pc(dc);
3913 }
3914 
3915 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3916 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3917 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3918 
3919 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3920 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3921 
3922 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3923 {
3924 #ifdef TARGET_SPARC64
3925     TCGv tmp = tcg_temp_new();
3926 
3927     tcg_gen_add_tl(tmp, s1, s2);
3928     tcg_gen_andi_tl(dst, tmp, -8);
3929     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3930 #else
3931     g_assert_not_reached();
3932 #endif
3933 }
3934 
3935 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3936 {
3937 #ifdef TARGET_SPARC64
3938     TCGv tmp = tcg_temp_new();
3939 
3940     tcg_gen_add_tl(tmp, s1, s2);
3941     tcg_gen_andi_tl(dst, tmp, -8);
3942     tcg_gen_neg_tl(tmp, tmp);
3943     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3944 #else
3945     g_assert_not_reached();
3946 #endif
3947 }
3948 
3949 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3950 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3951 
3952 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3953 {
3954 #ifdef TARGET_SPARC64
3955     tcg_gen_add_tl(dst, s1, s2);
3956     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3957 #else
3958     g_assert_not_reached();
3959 #endif
3960 }
3961 
3962 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3963 
3964 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3965 {
3966     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3967     return true;
3968 }
3969 
3970 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
3971 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
3972 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
3973 
3974 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3975 {
3976     TCGv dst, src1, src2;
3977 
3978     /* Reject 64-bit shifts for sparc32. */
3979     if (avail_32(dc) && a->x) {
3980         return false;
3981     }
3982 
3983     src2 = tcg_temp_new();
3984     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3985     src1 = gen_load_gpr(dc, a->rs1);
3986     dst = gen_dest_gpr(dc, a->rd);
3987 
3988     if (l) {
3989         tcg_gen_shl_tl(dst, src1, src2);
3990         if (!a->x) {
3991             tcg_gen_ext32u_tl(dst, dst);
3992         }
3993     } else if (u) {
3994         if (!a->x) {
3995             tcg_gen_ext32u_tl(dst, src1);
3996             src1 = dst;
3997         }
3998         tcg_gen_shr_tl(dst, src1, src2);
3999     } else {
4000         if (!a->x) {
4001             tcg_gen_ext32s_tl(dst, src1);
4002             src1 = dst;
4003         }
4004         tcg_gen_sar_tl(dst, src1, src2);
4005     }
4006     gen_store_gpr(dc, a->rd, dst);
4007     return advance_pc(dc);
4008 }
4009 
4010 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4011 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4012 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4013 
4014 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4015 {
4016     TCGv dst, src1;
4017 
4018     /* Reject 64-bit shifts for sparc32. */
4019     if (avail_32(dc) && (a->x || a->i >= 32)) {
4020         return false;
4021     }
4022 
4023     src1 = gen_load_gpr(dc, a->rs1);
4024     dst = gen_dest_gpr(dc, a->rd);
4025 
4026     if (avail_32(dc) || a->x) {
4027         if (l) {
4028             tcg_gen_shli_tl(dst, src1, a->i);
4029         } else if (u) {
4030             tcg_gen_shri_tl(dst, src1, a->i);
4031         } else {
4032             tcg_gen_sari_tl(dst, src1, a->i);
4033         }
4034     } else {
4035         if (l) {
4036             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4037         } else if (u) {
4038             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4039         } else {
4040             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4041         }
4042     }
4043     gen_store_gpr(dc, a->rd, dst);
4044     return advance_pc(dc);
4045 }
4046 
4047 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4048 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4049 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4050 
4051 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4052 {
4053     /* For simplicity, we under-decoded the rs2 form. */
4054     if (!imm && rs2_or_imm & ~0x1f) {
4055         return NULL;
4056     }
4057     if (imm || rs2_or_imm == 0) {
4058         return tcg_constant_tl(rs2_or_imm);
4059     } else {
4060         return cpu_regs[rs2_or_imm];
4061     }
4062 }
4063 
4064 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4065 {
4066     TCGv dst = gen_load_gpr(dc, rd);
4067     TCGv c2 = tcg_constant_tl(cmp->c2);
4068 
4069     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4070     gen_store_gpr(dc, rd, dst);
4071     return advance_pc(dc);
4072 }
4073 
4074 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4075 {
4076     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4077     DisasCompare cmp;
4078 
4079     if (src2 == NULL) {
4080         return false;
4081     }
4082     gen_compare(&cmp, a->cc, a->cond, dc);
4083     return do_mov_cond(dc, &cmp, a->rd, src2);
4084 }
4085 
4086 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4087 {
4088     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4089     DisasCompare cmp;
4090 
4091     if (src2 == NULL) {
4092         return false;
4093     }
4094     gen_fcompare(&cmp, a->cc, a->cond);
4095     return do_mov_cond(dc, &cmp, a->rd, src2);
4096 }
4097 
4098 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4099 {
4100     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4101     DisasCompare cmp;
4102 
4103     if (src2 == NULL) {
4104         return false;
4105     }
4106     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4107         return false;
4108     }
4109     return do_mov_cond(dc, &cmp, a->rd, src2);
4110 }
4111 
4112 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4113                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4114 {
4115     TCGv src1, sum;
4116 
4117     /* For simplicity, we under-decoded the rs2 form. */
4118     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4119         return false;
4120     }
4121 
4122     /*
4123      * Always load the sum into a new temporary.
4124      * This is required to capture the value across a window change,
4125      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4126      */
4127     sum = tcg_temp_new();
4128     src1 = gen_load_gpr(dc, a->rs1);
4129     if (a->imm || a->rs2_or_imm == 0) {
4130         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4131     } else {
4132         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4133     }
4134     return func(dc, a->rd, sum);
4135 }
4136 
4137 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4138 {
4139     /*
4140      * Preserve pc across advance, so that we can delay
4141      * the writeback to rd until after src is consumed.
4142      */
4143     target_ulong cur_pc = dc->pc;
4144 
4145     gen_check_align(dc, src, 3);
4146 
4147     gen_mov_pc_npc(dc);
4148     tcg_gen_mov_tl(cpu_npc, src);
4149     gen_address_mask(dc, cpu_npc);
4150     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4151 
4152     dc->npc = DYNAMIC_PC_LOOKUP;
4153     return true;
4154 }
4155 
4156 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4157 
4158 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4159 {
4160     if (!supervisor(dc)) {
4161         return raise_priv(dc);
4162     }
4163 
4164     gen_check_align(dc, src, 3);
4165 
4166     gen_mov_pc_npc(dc);
4167     tcg_gen_mov_tl(cpu_npc, src);
4168     gen_helper_rett(tcg_env);
4169 
4170     dc->npc = DYNAMIC_PC;
4171     return true;
4172 }
4173 
4174 TRANS(RETT, 32, do_add_special, a, do_rett)
4175 
4176 static bool do_return(DisasContext *dc, int rd, TCGv src)
4177 {
4178     gen_check_align(dc, src, 3);
4179     gen_helper_restore(tcg_env);
4180 
4181     gen_mov_pc_npc(dc);
4182     tcg_gen_mov_tl(cpu_npc, src);
4183     gen_address_mask(dc, cpu_npc);
4184 
4185     dc->npc = DYNAMIC_PC_LOOKUP;
4186     return true;
4187 }
4188 
4189 TRANS(RETURN, 64, do_add_special, a, do_return)
4190 
4191 static bool do_save(DisasContext *dc, int rd, TCGv src)
4192 {
4193     gen_helper_save(tcg_env);
4194     gen_store_gpr(dc, rd, src);
4195     return advance_pc(dc);
4196 }
4197 
4198 TRANS(SAVE, ALL, do_add_special, a, do_save)
4199 
4200 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4201 {
4202     gen_helper_restore(tcg_env);
4203     gen_store_gpr(dc, rd, src);
4204     return advance_pc(dc);
4205 }
4206 
4207 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4208 
4209 static bool do_done_retry(DisasContext *dc, bool done)
4210 {
4211     if (!supervisor(dc)) {
4212         return raise_priv(dc);
4213     }
4214     dc->npc = DYNAMIC_PC;
4215     dc->pc = DYNAMIC_PC;
4216     translator_io_start(&dc->base);
4217     if (done) {
4218         gen_helper_done(tcg_env);
4219     } else {
4220         gen_helper_retry(tcg_env);
4221     }
4222     return true;
4223 }
4224 
4225 TRANS(DONE, 64, do_done_retry, true)
4226 TRANS(RETRY, 64, do_done_retry, false)
4227 
4228 /*
4229  * Major opcode 11 -- load and store instructions
4230  */
4231 
4232 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4233 {
4234     TCGv addr, tmp = NULL;
4235 
4236     /* For simplicity, we under-decoded the rs2 form. */
4237     if (!imm && rs2_or_imm & ~0x1f) {
4238         return NULL;
4239     }
4240 
4241     addr = gen_load_gpr(dc, rs1);
4242     if (rs2_or_imm) {
4243         tmp = tcg_temp_new();
4244         if (imm) {
4245             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4246         } else {
4247             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4248         }
4249         addr = tmp;
4250     }
4251     if (AM_CHECK(dc)) {
4252         if (!tmp) {
4253             tmp = tcg_temp_new();
4254         }
4255         tcg_gen_ext32u_tl(tmp, addr);
4256         addr = tmp;
4257     }
4258     return addr;
4259 }
4260 
4261 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4262 {
4263     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4264     DisasASI da;
4265 
4266     if (addr == NULL) {
4267         return false;
4268     }
4269     da = resolve_asi(dc, a->asi, mop);
4270 
4271     reg = gen_dest_gpr(dc, a->rd);
4272     gen_ld_asi(dc, &da, reg, addr);
4273     gen_store_gpr(dc, a->rd, reg);
4274     return advance_pc(dc);
4275 }
4276 
4277 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4278 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4279 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4280 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4281 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4282 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4283 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4284 
4285 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4286 {
4287     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4288     DisasASI da;
4289 
4290     if (addr == NULL) {
4291         return false;
4292     }
4293     da = resolve_asi(dc, a->asi, mop);
4294 
4295     reg = gen_load_gpr(dc, a->rd);
4296     gen_st_asi(dc, &da, reg, addr);
4297     return advance_pc(dc);
4298 }
4299 
4300 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4301 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4302 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4303 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4304 
4305 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4306 {
4307     TCGv addr;
4308     DisasASI da;
4309 
4310     if (a->rd & 1) {
4311         return false;
4312     }
4313     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4314     if (addr == NULL) {
4315         return false;
4316     }
4317     da = resolve_asi(dc, a->asi, MO_TEUQ);
4318     gen_ldda_asi(dc, &da, addr, a->rd);
4319     return advance_pc(dc);
4320 }
4321 
4322 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4323 {
4324     TCGv addr;
4325     DisasASI da;
4326 
4327     if (a->rd & 1) {
4328         return false;
4329     }
4330     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4331     if (addr == NULL) {
4332         return false;
4333     }
4334     da = resolve_asi(dc, a->asi, MO_TEUQ);
4335     gen_stda_asi(dc, &da, addr, a->rd);
4336     return advance_pc(dc);
4337 }
4338 
4339 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4340 {
4341     TCGv addr, reg;
4342     DisasASI da;
4343 
4344     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4345     if (addr == NULL) {
4346         return false;
4347     }
4348     da = resolve_asi(dc, a->asi, MO_UB);
4349 
4350     reg = gen_dest_gpr(dc, a->rd);
4351     gen_ldstub_asi(dc, &da, reg, addr);
4352     gen_store_gpr(dc, a->rd, reg);
4353     return advance_pc(dc);
4354 }
4355 
4356 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4357 {
4358     TCGv addr, dst, src;
4359     DisasASI da;
4360 
4361     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4362     if (addr == NULL) {
4363         return false;
4364     }
4365     da = resolve_asi(dc, a->asi, MO_TEUL);
4366 
4367     dst = gen_dest_gpr(dc, a->rd);
4368     src = gen_load_gpr(dc, a->rd);
4369     gen_swap_asi(dc, &da, dst, src, addr);
4370     gen_store_gpr(dc, a->rd, dst);
4371     return advance_pc(dc);
4372 }
4373 
4374 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4375 {
4376     TCGv addr, o, n, c;
4377     DisasASI da;
4378 
4379     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4380     if (addr == NULL) {
4381         return false;
4382     }
4383     da = resolve_asi(dc, a->asi, mop);
4384 
4385     o = gen_dest_gpr(dc, a->rd);
4386     n = gen_load_gpr(dc, a->rd);
4387     c = gen_load_gpr(dc, a->rs2_or_imm);
4388     gen_cas_asi(dc, &da, o, n, c, addr);
4389     gen_store_gpr(dc, a->rd, o);
4390     return advance_pc(dc);
4391 }
4392 
4393 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4394 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4395 
4396 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4397 {
4398     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4399     DisasASI da;
4400 
4401     if (addr == NULL) {
4402         return false;
4403     }
4404     if (gen_trap_ifnofpu(dc)) {
4405         return true;
4406     }
4407     if (sz == MO_128 && gen_trap_float128(dc)) {
4408         return true;
4409     }
4410     da = resolve_asi(dc, a->asi, MO_TE | sz);
4411     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4412     gen_update_fprs_dirty(dc, a->rd);
4413     return advance_pc(dc);
4414 }
4415 
4416 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4417 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4418 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4419 
4420 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4421 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4422 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4423 
4424 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4425 {
4426     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4427     DisasASI da;
4428 
4429     if (addr == NULL) {
4430         return false;
4431     }
4432     if (gen_trap_ifnofpu(dc)) {
4433         return true;
4434     }
4435     if (sz == MO_128 && gen_trap_float128(dc)) {
4436         return true;
4437     }
4438     da = resolve_asi(dc, a->asi, MO_TE | sz);
4439     gen_stf_asi(dc, &da, sz, addr, a->rd);
4440     return advance_pc(dc);
4441 }
4442 
4443 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4444 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4445 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4446 
4447 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4448 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4449 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4450 
4451 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4452 {
4453     if (!avail_32(dc)) {
4454         return false;
4455     }
4456     if (!supervisor(dc)) {
4457         return raise_priv(dc);
4458     }
4459     if (gen_trap_ifnofpu(dc)) {
4460         return true;
4461     }
4462     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4463     return true;
4464 }
4465 
4466 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4467 {
4468     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4469     TCGv_i32 tmp;
4470 
4471     if (addr == NULL) {
4472         return false;
4473     }
4474     if (gen_trap_ifnofpu(dc)) {
4475         return true;
4476     }
4477 
4478     tmp = tcg_temp_new_i32();
4479     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4480 
4481     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4482     /* LDFSR does not change FCC[1-3]. */
4483 
4484     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4485     return advance_pc(dc);
4486 }
4487 
4488 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4489 {
4490 #ifdef TARGET_SPARC64
4491     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4492     TCGv_i64 t64;
4493     TCGv_i32 lo, hi;
4494 
4495     if (addr == NULL) {
4496         return false;
4497     }
4498     if (gen_trap_ifnofpu(dc)) {
4499         return true;
4500     }
4501 
4502     t64 = tcg_temp_new_i64();
4503     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4504 
4505     lo = tcg_temp_new_i32();
4506     hi = cpu_fcc[3];
4507     tcg_gen_extr_i64_i32(lo, hi, t64);
4508     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4509     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4510     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4511     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4512 
4513     if (entire) {
4514         gen_helper_set_fsr_nofcc(tcg_env, lo);
4515     } else {
4516         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4517     }
4518     return advance_pc(dc);
4519 #else
4520     return false;
4521 #endif
4522 }
4523 
4524 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4525 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4526 
4527 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4528 {
4529     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4530     TCGv fsr;
4531 
4532     if (addr == NULL) {
4533         return false;
4534     }
4535     if (gen_trap_ifnofpu(dc)) {
4536         return true;
4537     }
4538 
4539     fsr = tcg_temp_new();
4540     gen_helper_get_fsr(fsr, tcg_env);
4541     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4542     return advance_pc(dc);
4543 }
4544 
4545 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4546 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4547 
4548 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4549 {
4550     if (gen_trap_ifnofpu(dc)) {
4551         return true;
4552     }
4553     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4554     return advance_pc(dc);
4555 }
4556 
4557 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4558 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4559 
4560 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4561 {
4562     if (gen_trap_ifnofpu(dc)) {
4563         return true;
4564     }
4565     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4566     return advance_pc(dc);
4567 }
4568 
4569 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4570 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4571 
4572 static bool do_ff(DisasContext *dc, arg_r_r *a,
4573                   void (*func)(TCGv_i32, TCGv_i32))
4574 {
4575     TCGv_i32 tmp;
4576 
4577     if (gen_trap_ifnofpu(dc)) {
4578         return true;
4579     }
4580 
4581     tmp = gen_load_fpr_F(dc, a->rs);
4582     func(tmp, tmp);
4583     gen_store_fpr_F(dc, a->rd, tmp);
4584     return advance_pc(dc);
4585 }
4586 
4587 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4588 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4589 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4590 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4591 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4592 
4593 static bool do_fd(DisasContext *dc, arg_r_r *a,
4594                   void (*func)(TCGv_i32, TCGv_i64))
4595 {
4596     TCGv_i32 dst;
4597     TCGv_i64 src;
4598 
4599     if (gen_trap_ifnofpu(dc)) {
4600         return true;
4601     }
4602 
4603     dst = tcg_temp_new_i32();
4604     src = gen_load_fpr_D(dc, a->rs);
4605     func(dst, src);
4606     gen_store_fpr_F(dc, a->rd, dst);
4607     return advance_pc(dc);
4608 }
4609 
4610 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4611 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4612 
4613 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4614                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4615 {
4616     TCGv_i32 tmp;
4617 
4618     if (gen_trap_ifnofpu(dc)) {
4619         return true;
4620     }
4621 
4622     tmp = gen_load_fpr_F(dc, a->rs);
4623     func(tmp, tcg_env, tmp);
4624     gen_store_fpr_F(dc, a->rd, tmp);
4625     return advance_pc(dc);
4626 }
4627 
4628 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4629 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4630 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4631 
4632 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4633                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4634 {
4635     TCGv_i32 dst;
4636     TCGv_i64 src;
4637 
4638     if (gen_trap_ifnofpu(dc)) {
4639         return true;
4640     }
4641 
4642     dst = tcg_temp_new_i32();
4643     src = gen_load_fpr_D(dc, a->rs);
4644     func(dst, tcg_env, src);
4645     gen_store_fpr_F(dc, a->rd, dst);
4646     return advance_pc(dc);
4647 }
4648 
4649 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4650 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4651 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4652 
4653 static bool do_dd(DisasContext *dc, arg_r_r *a,
4654                   void (*func)(TCGv_i64, TCGv_i64))
4655 {
4656     TCGv_i64 dst, src;
4657 
4658     if (gen_trap_ifnofpu(dc)) {
4659         return true;
4660     }
4661 
4662     dst = tcg_temp_new_i64();
4663     src = gen_load_fpr_D(dc, a->rs);
4664     func(dst, src);
4665     gen_store_fpr_D(dc, a->rd, dst);
4666     return advance_pc(dc);
4667 }
4668 
4669 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4670 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4671 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4672 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4673 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4674 
4675 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4676                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4677 {
4678     TCGv_i64 dst, src;
4679 
4680     if (gen_trap_ifnofpu(dc)) {
4681         return true;
4682     }
4683 
4684     dst = tcg_temp_new_i64();
4685     src = gen_load_fpr_D(dc, a->rs);
4686     func(dst, tcg_env, src);
4687     gen_store_fpr_D(dc, a->rd, dst);
4688     return advance_pc(dc);
4689 }
4690 
4691 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4692 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4693 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4694 
4695 static bool do_df(DisasContext *dc, arg_r_r *a,
4696                   void (*func)(TCGv_i64, TCGv_i32))
4697 {
4698     TCGv_i64 dst;
4699     TCGv_i32 src;
4700 
4701     if (gen_trap_ifnofpu(dc)) {
4702         return true;
4703     }
4704 
4705     dst = tcg_temp_new_i64();
4706     src = gen_load_fpr_F(dc, a->rs);
4707     func(dst, src);
4708     gen_store_fpr_D(dc, a->rd, dst);
4709     return advance_pc(dc);
4710 }
4711 
4712 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4713 
4714 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4715                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4716 {
4717     TCGv_i64 dst;
4718     TCGv_i32 src;
4719 
4720     if (gen_trap_ifnofpu(dc)) {
4721         return true;
4722     }
4723 
4724     dst = tcg_temp_new_i64();
4725     src = gen_load_fpr_F(dc, a->rs);
4726     func(dst, tcg_env, src);
4727     gen_store_fpr_D(dc, a->rd, dst);
4728     return advance_pc(dc);
4729 }
4730 
4731 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4732 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4733 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4734 
4735 static bool do_qq(DisasContext *dc, arg_r_r *a,
4736                   void (*func)(TCGv_i128, TCGv_i128))
4737 {
4738     TCGv_i128 t;
4739 
4740     if (gen_trap_ifnofpu(dc)) {
4741         return true;
4742     }
4743     if (gen_trap_float128(dc)) {
4744         return true;
4745     }
4746 
4747     gen_op_clear_ieee_excp_and_FTT();
4748     t = gen_load_fpr_Q(dc, a->rs);
4749     func(t, t);
4750     gen_store_fpr_Q(dc, a->rd, t);
4751     return advance_pc(dc);
4752 }
4753 
4754 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4755 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4756 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4757 
4758 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4759                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4760 {
4761     TCGv_i128 t;
4762 
4763     if (gen_trap_ifnofpu(dc)) {
4764         return true;
4765     }
4766     if (gen_trap_float128(dc)) {
4767         return true;
4768     }
4769 
4770     t = gen_load_fpr_Q(dc, a->rs);
4771     func(t, tcg_env, t);
4772     gen_store_fpr_Q(dc, a->rd, t);
4773     return advance_pc(dc);
4774 }
4775 
4776 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4777 
4778 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4779                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4780 {
4781     TCGv_i128 src;
4782     TCGv_i32 dst;
4783 
4784     if (gen_trap_ifnofpu(dc)) {
4785         return true;
4786     }
4787     if (gen_trap_float128(dc)) {
4788         return true;
4789     }
4790 
4791     src = gen_load_fpr_Q(dc, a->rs);
4792     dst = tcg_temp_new_i32();
4793     func(dst, tcg_env, src);
4794     gen_store_fpr_F(dc, a->rd, dst);
4795     return advance_pc(dc);
4796 }
4797 
4798 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4799 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4800 
4801 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4802                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4803 {
4804     TCGv_i128 src;
4805     TCGv_i64 dst;
4806 
4807     if (gen_trap_ifnofpu(dc)) {
4808         return true;
4809     }
4810     if (gen_trap_float128(dc)) {
4811         return true;
4812     }
4813 
4814     src = gen_load_fpr_Q(dc, a->rs);
4815     dst = tcg_temp_new_i64();
4816     func(dst, tcg_env, src);
4817     gen_store_fpr_D(dc, a->rd, dst);
4818     return advance_pc(dc);
4819 }
4820 
4821 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4822 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4823 
4824 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4825                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4826 {
4827     TCGv_i32 src;
4828     TCGv_i128 dst;
4829 
4830     if (gen_trap_ifnofpu(dc)) {
4831         return true;
4832     }
4833     if (gen_trap_float128(dc)) {
4834         return true;
4835     }
4836 
4837     src = gen_load_fpr_F(dc, a->rs);
4838     dst = tcg_temp_new_i128();
4839     func(dst, tcg_env, src);
4840     gen_store_fpr_Q(dc, a->rd, dst);
4841     return advance_pc(dc);
4842 }
4843 
4844 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4845 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4846 
4847 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4848                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4849 {
4850     TCGv_i64 src;
4851     TCGv_i128 dst;
4852 
4853     if (gen_trap_ifnofpu(dc)) {
4854         return true;
4855     }
4856     if (gen_trap_float128(dc)) {
4857         return true;
4858     }
4859 
4860     src = gen_load_fpr_D(dc, a->rs);
4861     dst = tcg_temp_new_i128();
4862     func(dst, tcg_env, src);
4863     gen_store_fpr_Q(dc, a->rd, dst);
4864     return advance_pc(dc);
4865 }
4866 
4867 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4868 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4869 
4870 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4871                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4872 {
4873     TCGv_i32 src1, src2;
4874 
4875     if (gen_trap_ifnofpu(dc)) {
4876         return true;
4877     }
4878 
4879     src1 = gen_load_fpr_F(dc, a->rs1);
4880     src2 = gen_load_fpr_F(dc, a->rs2);
4881     func(src1, src1, src2);
4882     gen_store_fpr_F(dc, a->rd, src1);
4883     return advance_pc(dc);
4884 }
4885 
4886 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4887 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4888 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4889 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4890 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4891 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4892 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4893 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4894 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4895 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4896 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4897 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4898 
4899 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4900 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4901 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4902 
4903 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4904 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4905 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4906 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4907 
4908 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4909                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4910 {
4911     TCGv_i32 src1, src2;
4912 
4913     if (gen_trap_ifnofpu(dc)) {
4914         return true;
4915     }
4916 
4917     src1 = gen_load_fpr_F(dc, a->rs1);
4918     src2 = gen_load_fpr_F(dc, a->rs2);
4919     func(src1, tcg_env, src1, src2);
4920     gen_store_fpr_F(dc, a->rd, src1);
4921     return advance_pc(dc);
4922 }
4923 
4924 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4925 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4926 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4927 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4928 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4929 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4930 
4931 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4932                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4933 {
4934     TCGv_i64 dst;
4935     TCGv_i32 src1, src2;
4936 
4937     if (gen_trap_ifnofpu(dc)) {
4938         return true;
4939     }
4940 
4941     dst = tcg_temp_new_i64();
4942     src1 = gen_load_fpr_F(dc, a->rs1);
4943     src2 = gen_load_fpr_F(dc, a->rs2);
4944     func(dst, src1, src2);
4945     gen_store_fpr_D(dc, a->rd, dst);
4946     return advance_pc(dc);
4947 }
4948 
4949 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4950 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4951 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4952 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4953 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4954 
4955 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4956                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4957 {
4958     TCGv_i64 dst, src2;
4959     TCGv_i32 src1;
4960 
4961     if (gen_trap_ifnofpu(dc)) {
4962         return true;
4963     }
4964 
4965     dst = tcg_temp_new_i64();
4966     src1 = gen_load_fpr_F(dc, a->rs1);
4967     src2 = gen_load_fpr_D(dc, a->rs2);
4968     func(dst, src1, src2);
4969     gen_store_fpr_D(dc, a->rd, dst);
4970     return advance_pc(dc);
4971 }
4972 
4973 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4974 
4975 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4976                         void (*func)(unsigned, uint32_t, uint32_t,
4977                                      uint32_t, uint32_t, uint32_t))
4978 {
4979     if (gen_trap_ifnofpu(dc)) {
4980         return true;
4981     }
4982 
4983     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4984          gen_offset_fpr_D(a->rs2), 8, 8);
4985     return advance_pc(dc);
4986 }
4987 
4988 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4989 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4990 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4991 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
4992 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
4993 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
4994 
4995 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
4996 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
4997 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
4998 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
4999 
5000 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5001 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5002 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5003 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5004 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5005 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5006 
5007 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5008                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5009 {
5010     TCGv_i64 dst, src1, src2;
5011 
5012     if (gen_trap_ifnofpu(dc)) {
5013         return true;
5014     }
5015 
5016     dst = tcg_temp_new_i64();
5017     src1 = gen_load_fpr_D(dc, a->rs1);
5018     src2 = gen_load_fpr_D(dc, a->rs2);
5019     func(dst, src1, src2);
5020     gen_store_fpr_D(dc, a->rd, dst);
5021     return advance_pc(dc);
5022 }
5023 
5024 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5025 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5026 
5027 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5028 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5029 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5030 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5031 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5032 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5033 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5034 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5035 
5036 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5037 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5038 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5039 
5040 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5041 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5042 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5043 
5044 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5045 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5046 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5047 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5048 
5049 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5050                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5051 {
5052     TCGv_i64 src1, src2;
5053     TCGv dst;
5054 
5055     if (gen_trap_ifnofpu(dc)) {
5056         return true;
5057     }
5058 
5059     dst = gen_dest_gpr(dc, a->rd);
5060     src1 = gen_load_fpr_D(dc, a->rs1);
5061     src2 = gen_load_fpr_D(dc, a->rs2);
5062     func(dst, src1, src2);
5063     gen_store_gpr(dc, a->rd, dst);
5064     return advance_pc(dc);
5065 }
5066 
5067 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5068 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5069 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5070 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5071 
5072 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5073 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5074 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5075 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5076 
5077 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5078 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5079 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5080 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5081 
5082 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5083 
5084 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5085                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5086 {
5087     TCGv_i64 dst, src1, src2;
5088 
5089     if (gen_trap_ifnofpu(dc)) {
5090         return true;
5091     }
5092 
5093     dst = tcg_temp_new_i64();
5094     src1 = gen_load_fpr_D(dc, a->rs1);
5095     src2 = gen_load_fpr_D(dc, a->rs2);
5096     func(dst, tcg_env, src1, src2);
5097     gen_store_fpr_D(dc, a->rd, dst);
5098     return advance_pc(dc);
5099 }
5100 
5101 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5102 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5103 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5104 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5105 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5106 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5107 
5108 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5109 {
5110     TCGv_i64 dst;
5111     TCGv_i32 src1, src2;
5112 
5113     if (gen_trap_ifnofpu(dc)) {
5114         return true;
5115     }
5116     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5117         return raise_unimpfpop(dc);
5118     }
5119 
5120     dst = tcg_temp_new_i64();
5121     src1 = gen_load_fpr_F(dc, a->rs1);
5122     src2 = gen_load_fpr_F(dc, a->rs2);
5123     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5124     gen_store_fpr_D(dc, a->rd, dst);
5125     return advance_pc(dc);
5126 }
5127 
5128 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5129 {
5130     TCGv_i64 dst;
5131     TCGv_i32 src1, src2;
5132 
5133     if (!avail_VIS3(dc)) {
5134         return false;
5135     }
5136     if (gen_trap_ifnofpu(dc)) {
5137         return true;
5138     }
5139     dst = tcg_temp_new_i64();
5140     src1 = gen_load_fpr_F(dc, a->rs1);
5141     src2 = gen_load_fpr_F(dc, a->rs2);
5142     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5143     gen_store_fpr_D(dc, a->rd, dst);
5144     return advance_pc(dc);
5145 }
5146 
5147 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5148                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5149 {
5150     TCGv_i32 dst, src1, src2, src3;
5151 
5152     if (gen_trap_ifnofpu(dc)) {
5153         return true;
5154     }
5155 
5156     src1 = gen_load_fpr_F(dc, a->rs1);
5157     src2 = gen_load_fpr_F(dc, a->rs2);
5158     src3 = gen_load_fpr_F(dc, a->rs3);
5159     dst = tcg_temp_new_i32();
5160     func(dst, src1, src2, src3);
5161     gen_store_fpr_F(dc, a->rd, dst);
5162     return advance_pc(dc);
5163 }
5164 
5165 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5166 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5167 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5168 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5169 
5170 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5171                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5172 {
5173     TCGv_i64 dst, src1, src2, src3;
5174 
5175     if (gen_trap_ifnofpu(dc)) {
5176         return true;
5177     }
5178 
5179     dst  = tcg_temp_new_i64();
5180     src1 = gen_load_fpr_D(dc, a->rs1);
5181     src2 = gen_load_fpr_D(dc, a->rs2);
5182     src3 = gen_load_fpr_D(dc, a->rs3);
5183     func(dst, src1, src2, src3);
5184     gen_store_fpr_D(dc, a->rd, dst);
5185     return advance_pc(dc);
5186 }
5187 
5188 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5189 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5190 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5191 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5192 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5193 
5194 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5195                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5196 {
5197     TCGv_i128 src1, src2;
5198 
5199     if (gen_trap_ifnofpu(dc)) {
5200         return true;
5201     }
5202     if (gen_trap_float128(dc)) {
5203         return true;
5204     }
5205 
5206     src1 = gen_load_fpr_Q(dc, a->rs1);
5207     src2 = gen_load_fpr_Q(dc, a->rs2);
5208     func(src1, tcg_env, src1, src2);
5209     gen_store_fpr_Q(dc, a->rd, src1);
5210     return advance_pc(dc);
5211 }
5212 
5213 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5214 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5215 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5216 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5217 
5218 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5219 {
5220     TCGv_i64 src1, src2;
5221     TCGv_i128 dst;
5222 
5223     if (gen_trap_ifnofpu(dc)) {
5224         return true;
5225     }
5226     if (gen_trap_float128(dc)) {
5227         return true;
5228     }
5229 
5230     src1 = gen_load_fpr_D(dc, a->rs1);
5231     src2 = gen_load_fpr_D(dc, a->rs2);
5232     dst = tcg_temp_new_i128();
5233     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5234     gen_store_fpr_Q(dc, a->rd, dst);
5235     return advance_pc(dc);
5236 }
5237 
5238 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5239                      void (*func)(DisasContext *, DisasCompare *, int, int))
5240 {
5241     DisasCompare cmp;
5242 
5243     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5244         return false;
5245     }
5246     if (gen_trap_ifnofpu(dc)) {
5247         return true;
5248     }
5249     if (is_128 && gen_trap_float128(dc)) {
5250         return true;
5251     }
5252 
5253     gen_op_clear_ieee_excp_and_FTT();
5254     func(dc, &cmp, a->rd, a->rs2);
5255     return advance_pc(dc);
5256 }
5257 
5258 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5259 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5260 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5261 
5262 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5263                       void (*func)(DisasContext *, DisasCompare *, int, int))
5264 {
5265     DisasCompare cmp;
5266 
5267     if (gen_trap_ifnofpu(dc)) {
5268         return true;
5269     }
5270     if (is_128 && gen_trap_float128(dc)) {
5271         return true;
5272     }
5273 
5274     gen_op_clear_ieee_excp_and_FTT();
5275     gen_compare(&cmp, a->cc, a->cond, dc);
5276     func(dc, &cmp, a->rd, a->rs2);
5277     return advance_pc(dc);
5278 }
5279 
5280 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5281 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5282 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5283 
5284 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5285                        void (*func)(DisasContext *, DisasCompare *, int, int))
5286 {
5287     DisasCompare cmp;
5288 
5289     if (gen_trap_ifnofpu(dc)) {
5290         return true;
5291     }
5292     if (is_128 && gen_trap_float128(dc)) {
5293         return true;
5294     }
5295 
5296     gen_op_clear_ieee_excp_and_FTT();
5297     gen_fcompare(&cmp, a->cc, a->cond);
5298     func(dc, &cmp, a->rd, a->rs2);
5299     return advance_pc(dc);
5300 }
5301 
5302 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5303 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5304 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5305 
5306 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5307 {
5308     TCGv_i32 src1, src2;
5309 
5310     if (avail_32(dc) && a->cc != 0) {
5311         return false;
5312     }
5313     if (gen_trap_ifnofpu(dc)) {
5314         return true;
5315     }
5316 
5317     src1 = gen_load_fpr_F(dc, a->rs1);
5318     src2 = gen_load_fpr_F(dc, a->rs2);
5319     if (e) {
5320         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5321     } else {
5322         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5323     }
5324     return advance_pc(dc);
5325 }
5326 
5327 TRANS(FCMPs, ALL, do_fcmps, a, false)
5328 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5329 
5330 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5331 {
5332     TCGv_i64 src1, src2;
5333 
5334     if (avail_32(dc) && a->cc != 0) {
5335         return false;
5336     }
5337     if (gen_trap_ifnofpu(dc)) {
5338         return true;
5339     }
5340 
5341     src1 = gen_load_fpr_D(dc, a->rs1);
5342     src2 = gen_load_fpr_D(dc, a->rs2);
5343     if (e) {
5344         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5345     } else {
5346         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5347     }
5348     return advance_pc(dc);
5349 }
5350 
5351 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5352 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5353 
5354 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5355 {
5356     TCGv_i128 src1, src2;
5357 
5358     if (avail_32(dc) && a->cc != 0) {
5359         return false;
5360     }
5361     if (gen_trap_ifnofpu(dc)) {
5362         return true;
5363     }
5364     if (gen_trap_float128(dc)) {
5365         return true;
5366     }
5367 
5368     src1 = gen_load_fpr_Q(dc, a->rs1);
5369     src2 = gen_load_fpr_Q(dc, a->rs2);
5370     if (e) {
5371         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5372     } else {
5373         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5374     }
5375     return advance_pc(dc);
5376 }
5377 
5378 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5379 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5380 
5381 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5382 {
5383     TCGv_i32 src1, src2;
5384 
5385     if (!avail_VIS3(dc)) {
5386         return false;
5387     }
5388     if (gen_trap_ifnofpu(dc)) {
5389         return true;
5390     }
5391 
5392     src1 = gen_load_fpr_F(dc, a->rs1);
5393     src2 = gen_load_fpr_F(dc, a->rs2);
5394     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5395     return advance_pc(dc);
5396 }
5397 
5398 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5399 {
5400     TCGv_i64 src1, src2;
5401 
5402     if (!avail_VIS3(dc)) {
5403         return false;
5404     }
5405     if (gen_trap_ifnofpu(dc)) {
5406         return true;
5407     }
5408 
5409     src1 = gen_load_fpr_D(dc, a->rs1);
5410     src2 = gen_load_fpr_D(dc, a->rs2);
5411     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5412     return advance_pc(dc);
5413 }
5414 
5415 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5416                       int (*offset)(unsigned int),
5417                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5418 {
5419     TCGv dst;
5420 
5421     if (gen_trap_ifnofpu(dc)) {
5422         return true;
5423     }
5424     dst = gen_dest_gpr(dc, a->rd);
5425     load(dst, tcg_env, offset(a->rs));
5426     gen_store_gpr(dc, a->rd, dst);
5427     return advance_pc(dc);
5428 }
5429 
5430 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5431 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5432 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5433 
5434 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5435                       int (*offset)(unsigned int),
5436                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5437 {
5438     TCGv src;
5439 
5440     if (gen_trap_ifnofpu(dc)) {
5441         return true;
5442     }
5443     src = gen_load_gpr(dc, a->rs);
5444     store(src, tcg_env, offset(a->rd));
5445     return advance_pc(dc);
5446 }
5447 
5448 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5449 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5450 
5451 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5452 {
5453     DisasContext *dc = container_of(dcbase, DisasContext, base);
5454     int bound;
5455 
5456     dc->pc = dc->base.pc_first;
5457     dc->npc = (target_ulong)dc->base.tb->cs_base;
5458     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5459     dc->def = &cpu_env(cs)->def;
5460     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5461     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5462 #ifndef CONFIG_USER_ONLY
5463     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5464 #endif
5465 #ifdef TARGET_SPARC64
5466     dc->fprs_dirty = 0;
5467     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5468 #ifndef CONFIG_USER_ONLY
5469     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5470 #endif
5471 #endif
5472     /*
5473      * if we reach a page boundary, we stop generation so that the
5474      * PC of a TT_TFAULT exception is always in the right page
5475      */
5476     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5477     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5478 }
5479 
5480 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5481 {
5482 }
5483 
5484 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5485 {
5486     DisasContext *dc = container_of(dcbase, DisasContext, base);
5487     target_ulong npc = dc->npc;
5488 
5489     if (npc & 3) {
5490         switch (npc) {
5491         case JUMP_PC:
5492             assert(dc->jump_pc[1] == dc->pc + 4);
5493             npc = dc->jump_pc[0] | JUMP_PC;
5494             break;
5495         case DYNAMIC_PC:
5496         case DYNAMIC_PC_LOOKUP:
5497             npc = DYNAMIC_PC;
5498             break;
5499         default:
5500             g_assert_not_reached();
5501         }
5502     }
5503     tcg_gen_insn_start(dc->pc, npc);
5504 }
5505 
5506 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5507 {
5508     DisasContext *dc = container_of(dcbase, DisasContext, base);
5509     unsigned int insn;
5510 
5511     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5512     dc->base.pc_next += 4;
5513 
5514     if (!decode(dc, insn)) {
5515         gen_exception(dc, TT_ILL_INSN);
5516     }
5517 
5518     if (dc->base.is_jmp == DISAS_NORETURN) {
5519         return;
5520     }
5521     if (dc->pc != dc->base.pc_next) {
5522         dc->base.is_jmp = DISAS_TOO_MANY;
5523     }
5524 }
5525 
5526 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5527 {
5528     DisasContext *dc = container_of(dcbase, DisasContext, base);
5529     DisasDelayException *e, *e_next;
5530     bool may_lookup;
5531 
5532     finishing_insn(dc);
5533 
5534     switch (dc->base.is_jmp) {
5535     case DISAS_NEXT:
5536     case DISAS_TOO_MANY:
5537         if (((dc->pc | dc->npc) & 3) == 0) {
5538             /* static PC and NPC: we can use direct chaining */
5539             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5540             break;
5541         }
5542 
5543         may_lookup = true;
5544         if (dc->pc & 3) {
5545             switch (dc->pc) {
5546             case DYNAMIC_PC_LOOKUP:
5547                 break;
5548             case DYNAMIC_PC:
5549                 may_lookup = false;
5550                 break;
5551             default:
5552                 g_assert_not_reached();
5553             }
5554         } else {
5555             tcg_gen_movi_tl(cpu_pc, dc->pc);
5556         }
5557 
5558         if (dc->npc & 3) {
5559             switch (dc->npc) {
5560             case JUMP_PC:
5561                 gen_generic_branch(dc);
5562                 break;
5563             case DYNAMIC_PC:
5564                 may_lookup = false;
5565                 break;
5566             case DYNAMIC_PC_LOOKUP:
5567                 break;
5568             default:
5569                 g_assert_not_reached();
5570             }
5571         } else {
5572             tcg_gen_movi_tl(cpu_npc, dc->npc);
5573         }
5574         if (may_lookup) {
5575             tcg_gen_lookup_and_goto_ptr();
5576         } else {
5577             tcg_gen_exit_tb(NULL, 0);
5578         }
5579         break;
5580 
5581     case DISAS_NORETURN:
5582        break;
5583 
5584     case DISAS_EXIT:
5585         /* Exit TB */
5586         save_state(dc);
5587         tcg_gen_exit_tb(NULL, 0);
5588         break;
5589 
5590     default:
5591         g_assert_not_reached();
5592     }
5593 
5594     for (e = dc->delay_excp_list; e ; e = e_next) {
5595         gen_set_label(e->lab);
5596 
5597         tcg_gen_movi_tl(cpu_pc, e->pc);
5598         if (e->npc % 4 == 0) {
5599             tcg_gen_movi_tl(cpu_npc, e->npc);
5600         }
5601         gen_helper_raise_exception(tcg_env, e->excp);
5602 
5603         e_next = e->next;
5604         g_free(e);
5605     }
5606 }
5607 
5608 static const TranslatorOps sparc_tr_ops = {
5609     .init_disas_context = sparc_tr_init_disas_context,
5610     .tb_start           = sparc_tr_tb_start,
5611     .insn_start         = sparc_tr_insn_start,
5612     .translate_insn     = sparc_tr_translate_insn,
5613     .tb_stop            = sparc_tr_tb_stop,
5614 };
5615 
5616 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5617                            vaddr pc, void *host_pc)
5618 {
5619     DisasContext dc = {};
5620 
5621     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5622 }
5623 
5624 void sparc_tcg_init(void)
5625 {
5626     static const char gregnames[32][4] = {
5627         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5628         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5629         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5630         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5631     };
5632 
5633     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5634 #ifdef TARGET_SPARC64
5635         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5636         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5637         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5638         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5639         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5640 #else
5641         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5642 #endif
5643     };
5644 
5645     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5646 #ifdef TARGET_SPARC64
5647         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5648         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5649         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5650 #endif
5651         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5652         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5653         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5654         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5655         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5656         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5657         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5658         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5659         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5660     };
5661 
5662     unsigned int i;
5663 
5664     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5665                                          offsetof(CPUSPARCState, regwptr),
5666                                          "regwptr");
5667 
5668     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5669         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5670     }
5671 
5672     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5673         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5674     }
5675 
5676     cpu_regs[0] = NULL;
5677     for (i = 1; i < 8; ++i) {
5678         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5679                                          offsetof(CPUSPARCState, gregs[i]),
5680                                          gregnames[i]);
5681     }
5682 
5683     for (i = 8; i < 32; ++i) {
5684         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5685                                          (i - 8) * sizeof(target_ulong),
5686                                          gregnames[i]);
5687     }
5688 }
5689 
5690 void sparc_restore_state_to_opc(CPUState *cs,
5691                                 const TranslationBlock *tb,
5692                                 const uint64_t *data)
5693 {
5694     CPUSPARCState *env = cpu_env(cs);
5695     target_ulong pc = data[0];
5696     target_ulong npc = data[1];
5697 
5698     env->pc = pc;
5699     if (npc == DYNAMIC_PC) {
5700         /* dynamic NPC: already stored */
5701     } else if (npc & JUMP_PC) {
5702         /* jump PC: use 'cond' and the jump targets of the translation */
5703         if (env->cond) {
5704             env->npc = npc & ~3;
5705         } else {
5706             env->npc = pc + 4;
5707         }
5708     } else {
5709         env->npc = npc;
5710     }
5711 }
5712