xref: /qemu/target/sparc/translate.c (revision 680af1b4)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
93 # define MAXTL_MASK                             0
94 #endif
95 
96 /* Dynamic PC, must exit to main loop. */
97 #define DYNAMIC_PC         1
98 /* Dynamic PC, one of two values according to jump_pc[T2]. */
99 #define JUMP_PC            2
100 /* Dynamic PC, may lookup next TB. */
101 #define DYNAMIC_PC_LOOKUP  3
102 
103 #define DISAS_EXIT  DISAS_TARGET_0
104 
105 /* global register indexes */
106 static TCGv_ptr cpu_regwptr;
107 static TCGv cpu_pc, cpu_npc;
108 static TCGv cpu_regs[32];
109 static TCGv cpu_y;
110 static TCGv cpu_tbr;
111 static TCGv cpu_cond;
112 static TCGv cpu_cc_N;
113 static TCGv cpu_cc_V;
114 static TCGv cpu_icc_Z;
115 static TCGv cpu_icc_C;
116 #ifdef TARGET_SPARC64
117 static TCGv cpu_xcc_Z;
118 static TCGv cpu_xcc_C;
119 static TCGv_i32 cpu_fprs;
120 static TCGv cpu_gsr;
121 #else
122 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
123 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
124 #endif
125 
126 #ifdef TARGET_SPARC64
127 #define cpu_cc_Z  cpu_xcc_Z
128 #define cpu_cc_C  cpu_xcc_C
129 #else
130 #define cpu_cc_Z  cpu_icc_Z
131 #define cpu_cc_C  cpu_icc_C
132 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
133 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
134 #endif
135 
136 /* Floating point comparison registers */
137 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
138 
139 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
140 #ifdef TARGET_SPARC64
141 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
142 # define env64_field_offsetof(X)  env_field_offsetof(X)
143 #else
144 # define env32_field_offsetof(X)  env_field_offsetof(X)
145 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
146 #endif
147 
148 typedef struct DisasCompare {
149     TCGCond cond;
150     TCGv c1;
151     int c2;
152 } DisasCompare;
153 
154 typedef struct DisasDelayException {
155     struct DisasDelayException *next;
156     TCGLabel *lab;
157     TCGv_i32 excp;
158     /* Saved state at parent insn. */
159     target_ulong pc;
160     target_ulong npc;
161 } DisasDelayException;
162 
163 typedef struct DisasContext {
164     DisasContextBase base;
165     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
166     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
167 
168     /* Used when JUMP_PC value is used. */
169     DisasCompare jump;
170     target_ulong jump_pc[2];
171 
172     int mem_idx;
173     bool cpu_cond_live;
174     bool fpu_enabled;
175     bool address_mask_32bit;
176 #ifndef CONFIG_USER_ONLY
177     bool supervisor;
178 #ifdef TARGET_SPARC64
179     bool hypervisor;
180 #endif
181 #endif
182 
183     sparc_def_t *def;
184 #ifdef TARGET_SPARC64
185     int fprs_dirty;
186     int asi;
187 #endif
188     DisasDelayException *delay_excp_list;
189 } DisasContext;
190 
191 // This function uses non-native bit order
192 #define GET_FIELD(X, FROM, TO)                                  \
193     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
194 
195 // This function uses the order in the manuals, i.e. bit 0 is 2^0
196 #define GET_FIELD_SP(X, FROM, TO)               \
197     GET_FIELD(X, 31 - (TO), 31 - (FROM))
198 
199 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
200 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
201 
202 #define UA2005_HTRAP_MASK 0xff
203 #define V8_TRAP_MASK 0x7f
204 
205 #define IS_IMM (insn & (1<<13))
206 
207 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
208 {
209 #if defined(TARGET_SPARC64)
210     int bit = (rd < 32) ? 1 : 2;
211     /* If we know we've already set this bit within the TB,
212        we can avoid setting it again.  */
213     if (!(dc->fprs_dirty & bit)) {
214         dc->fprs_dirty |= bit;
215         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
216     }
217 #endif
218 }
219 
220 /* floating point registers moves */
221 
222 static int gen_offset_fpr_F(unsigned int reg)
223 {
224     int ret;
225 
226     tcg_debug_assert(reg < 32);
227     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
228     if (reg & 1) {
229         ret += offsetof(CPU_DoubleU, l.lower);
230     } else {
231         ret += offsetof(CPU_DoubleU, l.upper);
232     }
233     return ret;
234 }
235 
236 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
237 {
238     TCGv_i32 ret = tcg_temp_new_i32();
239     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
240     return ret;
241 }
242 
243 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
244 {
245     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static int gen_offset_fpr_D(unsigned int reg)
250 {
251     tcg_debug_assert(reg < 64);
252     tcg_debug_assert(reg % 2 == 0);
253     return offsetof(CPUSPARCState, fpr[reg / 2]);
254 }
255 
256 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
257 {
258     TCGv_i64 ret = tcg_temp_new_i64();
259     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
260     return ret;
261 }
262 
263 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
264 {
265     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
266     gen_update_fprs_dirty(dc, dst);
267 }
268 
269 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
270 {
271     TCGv_i128 ret = tcg_temp_new_i128();
272     TCGv_i64 h = gen_load_fpr_D(dc, src);
273     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
274 
275     tcg_gen_concat_i64_i128(ret, l, h);
276     return ret;
277 }
278 
279 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
280 {
281     TCGv_i64 h = tcg_temp_new_i64();
282     TCGv_i64 l = tcg_temp_new_i64();
283 
284     tcg_gen_extr_i128_i64(l, h, v);
285     gen_store_fpr_D(dc, dst, h);
286     gen_store_fpr_D(dc, dst + 2, l);
287 }
288 
289 /* moves */
290 #ifdef CONFIG_USER_ONLY
291 #define supervisor(dc) 0
292 #define hypervisor(dc) 0
293 #else
294 #ifdef TARGET_SPARC64
295 #define hypervisor(dc) (dc->hypervisor)
296 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #else
298 #define supervisor(dc) (dc->supervisor)
299 #define hypervisor(dc) 0
300 #endif
301 #endif
302 
303 #if !defined(TARGET_SPARC64)
304 # define AM_CHECK(dc)  false
305 #elif defined(TARGET_ABI32)
306 # define AM_CHECK(dc)  true
307 #elif defined(CONFIG_USER_ONLY)
308 # define AM_CHECK(dc)  false
309 #else
310 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
311 #endif
312 
313 static void gen_address_mask(DisasContext *dc, TCGv addr)
314 {
315     if (AM_CHECK(dc)) {
316         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317     }
318 }
319 
320 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
321 {
322     return AM_CHECK(dc) ? (uint32_t)addr : addr;
323 }
324 
325 static TCGv gen_load_gpr(DisasContext *dc, int reg)
326 {
327     if (reg > 0) {
328         assert(reg < 32);
329         return cpu_regs[reg];
330     } else {
331         TCGv t = tcg_temp_new();
332         tcg_gen_movi_tl(t, 0);
333         return t;
334     }
335 }
336 
337 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
338 {
339     if (reg > 0) {
340         assert(reg < 32);
341         tcg_gen_mov_tl(cpu_regs[reg], v);
342     }
343 }
344 
345 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         return cpu_regs[reg];
350     } else {
351         return tcg_temp_new();
352     }
353 }
354 
355 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
356 {
357     return translator_use_goto_tb(&s->base, pc) &&
358            translator_use_goto_tb(&s->base, npc);
359 }
360 
361 static void gen_goto_tb(DisasContext *s, int tb_num,
362                         target_ulong pc, target_ulong npc)
363 {
364     if (use_goto_tb(s, pc, npc))  {
365         /* jump to same page: we can use a direct jump */
366         tcg_gen_goto_tb(tb_num);
367         tcg_gen_movi_tl(cpu_pc, pc);
368         tcg_gen_movi_tl(cpu_npc, npc);
369         tcg_gen_exit_tb(s->base.tb, tb_num);
370     } else {
371         /* jump to another page: we can use an indirect jump */
372         tcg_gen_movi_tl(cpu_pc, pc);
373         tcg_gen_movi_tl(cpu_npc, npc);
374         tcg_gen_lookup_and_goto_ptr();
375     }
376 }
377 
378 static TCGv gen_carry32(void)
379 {
380     if (TARGET_LONG_BITS == 64) {
381         TCGv t = tcg_temp_new();
382         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
383         return t;
384     }
385     return cpu_icc_C;
386 }
387 
388 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
389 {
390     TCGv z = tcg_constant_tl(0);
391 
392     if (cin) {
393         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
394         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
395     } else {
396         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
397     }
398     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
399     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
400     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
401     if (TARGET_LONG_BITS == 64) {
402         /*
403          * Carry-in to bit 32 is result ^ src1 ^ src2.
404          * We already have the src xor term in Z, from computation of V.
405          */
406         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
407         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
408     }
409     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
410     tcg_gen_mov_tl(dst, cpu_cc_N);
411 }
412 
413 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
414 {
415     gen_op_addcc_int(dst, src1, src2, NULL);
416 }
417 
418 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420     TCGv t = tcg_temp_new();
421 
422     /* Save the tag bits around modification of dst. */
423     tcg_gen_or_tl(t, src1, src2);
424 
425     gen_op_addcc(dst, src1, src2);
426 
427     /* Incorprate tag bits into icc.V */
428     tcg_gen_andi_tl(t, t, 3);
429     tcg_gen_neg_tl(t, t);
430     tcg_gen_ext32u_tl(t, t);
431     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
432 }
433 
434 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
435 {
436     tcg_gen_add_tl(dst, src1, src2);
437     tcg_gen_add_tl(dst, dst, gen_carry32());
438 }
439 
440 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
441 {
442     gen_op_addcc_int(dst, src1, src2, gen_carry32());
443 }
444 
445 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
446 {
447     tcg_gen_add_tl(dst, src1, src2);
448     tcg_gen_add_tl(dst, dst, cpu_cc_C);
449 }
450 
451 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
452 {
453     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
454 }
455 
456 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
457 {
458     TCGv z = tcg_constant_tl(0);
459 
460     if (cin) {
461         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
462         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
463     } else {
464         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
465     }
466     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
467     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
468     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
469     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
470 #ifdef TARGET_SPARC64
471     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
472     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
473 #endif
474     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
475     tcg_gen_mov_tl(dst, cpu_cc_N);
476 }
477 
478 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
479 {
480     gen_op_subcc_int(dst, src1, src2, NULL);
481 }
482 
483 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485     TCGv t = tcg_temp_new();
486 
487     /* Save the tag bits around modification of dst. */
488     tcg_gen_or_tl(t, src1, src2);
489 
490     gen_op_subcc(dst, src1, src2);
491 
492     /* Incorprate tag bits into icc.V */
493     tcg_gen_andi_tl(t, t, 3);
494     tcg_gen_neg_tl(t, t);
495     tcg_gen_ext32u_tl(t, t);
496     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
497 }
498 
499 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
500 {
501     tcg_gen_sub_tl(dst, src1, src2);
502     tcg_gen_sub_tl(dst, dst, gen_carry32());
503 }
504 
505 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
506 {
507     gen_op_subcc_int(dst, src1, src2, gen_carry32());
508 }
509 
510 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
511 {
512     TCGv zero = tcg_constant_tl(0);
513     TCGv one = tcg_constant_tl(1);
514     TCGv t_src1 = tcg_temp_new();
515     TCGv t_src2 = tcg_temp_new();
516     TCGv t0 = tcg_temp_new();
517 
518     tcg_gen_ext32u_tl(t_src1, src1);
519     tcg_gen_ext32u_tl(t_src2, src2);
520 
521     /*
522      * if (!(env->y & 1))
523      *   src2 = 0;
524      */
525     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
526 
527     /*
528      * b2 = src1 & 1;
529      * y = (b2 << 31) | (y >> 1);
530      */
531     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
532     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
533 
534     // b1 = N ^ V;
535     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
536 
537     /*
538      * src1 = (b1 << 31) | (src1 >> 1)
539      */
540     tcg_gen_andi_tl(t0, t0, 1u << 31);
541     tcg_gen_shri_tl(t_src1, t_src1, 1);
542     tcg_gen_or_tl(t_src1, t_src1, t0);
543 
544     gen_op_addcc(dst, t_src1, t_src2);
545 }
546 
547 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
548 {
549 #if TARGET_LONG_BITS == 32
550     if (sign_ext) {
551         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
552     } else {
553         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
554     }
555 #else
556     TCGv t0 = tcg_temp_new_i64();
557     TCGv t1 = tcg_temp_new_i64();
558 
559     if (sign_ext) {
560         tcg_gen_ext32s_i64(t0, src1);
561         tcg_gen_ext32s_i64(t1, src2);
562     } else {
563         tcg_gen_ext32u_i64(t0, src1);
564         tcg_gen_ext32u_i64(t1, src2);
565     }
566 
567     tcg_gen_mul_i64(dst, t0, t1);
568     tcg_gen_shri_i64(cpu_y, dst, 32);
569 #endif
570 }
571 
572 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
573 {
574     /* zero-extend truncated operands before multiplication */
575     gen_op_multiply(dst, src1, src2, 0);
576 }
577 
578 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
579 {
580     /* sign-extend truncated operands before multiplication */
581     gen_op_multiply(dst, src1, src2, 1);
582 }
583 
584 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
585 {
586     TCGv discard = tcg_temp_new();
587     tcg_gen_mulu2_tl(discard, dst, src1, src2);
588 }
589 
590 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
591 {
592 #ifdef TARGET_SPARC64
593     gen_helper_sdiv(dst, tcg_env, src1, src2);
594     tcg_gen_ext32s_tl(dst, dst);
595 #else
596     TCGv_i64 t64 = tcg_temp_new_i64();
597     gen_helper_sdiv(t64, tcg_env, src1, src2);
598     tcg_gen_trunc_i64_tl(dst, t64);
599 #endif
600 }
601 
602 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
603 {
604     TCGv_i64 t64;
605 
606 #ifdef TARGET_SPARC64
607     t64 = cpu_cc_V;
608 #else
609     t64 = tcg_temp_new_i64();
610 #endif
611 
612     gen_helper_udiv(t64, tcg_env, src1, src2);
613 
614 #ifdef TARGET_SPARC64
615     tcg_gen_ext32u_tl(cpu_cc_N, t64);
616     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
617     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
618     tcg_gen_movi_tl(cpu_icc_C, 0);
619 #else
620     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
621 #endif
622     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
623     tcg_gen_movi_tl(cpu_cc_C, 0);
624     tcg_gen_mov_tl(dst, cpu_cc_N);
625 }
626 
627 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
628 {
629     TCGv_i64 t64;
630 
631 #ifdef TARGET_SPARC64
632     t64 = cpu_cc_V;
633 #else
634     t64 = tcg_temp_new_i64();
635 #endif
636 
637     gen_helper_sdiv(t64, tcg_env, src1, src2);
638 
639 #ifdef TARGET_SPARC64
640     tcg_gen_ext32s_tl(cpu_cc_N, t64);
641     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
642     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
643     tcg_gen_movi_tl(cpu_icc_C, 0);
644 #else
645     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
646 #endif
647     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
648     tcg_gen_movi_tl(cpu_cc_C, 0);
649     tcg_gen_mov_tl(dst, cpu_cc_N);
650 }
651 
652 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
653 {
654     gen_helper_taddcctv(dst, tcg_env, src1, src2);
655 }
656 
657 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
658 {
659     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
660 }
661 
662 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
663 {
664     tcg_gen_ctpop_tl(dst, src2);
665 }
666 
667 static void gen_op_lzcnt(TCGv dst, TCGv src)
668 {
669     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
670 }
671 
672 #ifndef TARGET_SPARC64
673 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
674 {
675     g_assert_not_reached();
676 }
677 #endif
678 
679 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
680 {
681     gen_helper_array8(dst, src1, src2);
682     tcg_gen_shli_tl(dst, dst, 1);
683 }
684 
685 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
686 {
687     gen_helper_array8(dst, src1, src2);
688     tcg_gen_shli_tl(dst, dst, 2);
689 }
690 
691 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
692 {
693 #ifdef TARGET_SPARC64
694     gen_helper_fpack16(dst, cpu_gsr, src);
695 #else
696     g_assert_not_reached();
697 #endif
698 }
699 
700 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
701 {
702 #ifdef TARGET_SPARC64
703     gen_helper_fpackfix(dst, cpu_gsr, src);
704 #else
705     g_assert_not_reached();
706 #endif
707 }
708 
709 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
710 {
711 #ifdef TARGET_SPARC64
712     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
713 #else
714     g_assert_not_reached();
715 #endif
716 }
717 
718 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
719 {
720     TCGv_i32 t[2];
721 
722     for (int i = 0; i < 2; i++) {
723         TCGv_i32 u = tcg_temp_new_i32();
724         TCGv_i32 v = tcg_temp_new_i32();
725 
726         tcg_gen_sextract_i32(u, src1, i * 16, 16);
727         tcg_gen_sextract_i32(v, src2, i * 16, 16);
728         tcg_gen_add_i32(u, u, v);
729         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
730         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
731         t[i] = u;
732     }
733     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
734 }
735 
736 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
737 {
738     TCGv_i32 t[2];
739 
740     for (int i = 0; i < 2; i++) {
741         TCGv_i32 u = tcg_temp_new_i32();
742         TCGv_i32 v = tcg_temp_new_i32();
743 
744         tcg_gen_sextract_i32(u, src1, i * 16, 16);
745         tcg_gen_sextract_i32(v, src2, i * 16, 16);
746         tcg_gen_sub_i32(u, u, v);
747         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
748         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
749         t[i] = u;
750     }
751     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
752 }
753 
754 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
755 {
756     TCGv_i32 r = tcg_temp_new_i32();
757     TCGv_i32 t = tcg_temp_new_i32();
758     TCGv_i32 v = tcg_temp_new_i32();
759     TCGv_i32 z = tcg_constant_i32(0);
760 
761     tcg_gen_add_i32(r, src1, src2);
762     tcg_gen_xor_i32(t, src1, src2);
763     tcg_gen_xor_i32(v, r, src2);
764     tcg_gen_andc_i32(v, v, t);
765 
766     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
767     tcg_gen_addi_i32(t, t, INT32_MAX);
768 
769     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
770 }
771 
772 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
773 {
774     TCGv_i32 r = tcg_temp_new_i32();
775     TCGv_i32 t = tcg_temp_new_i32();
776     TCGv_i32 v = tcg_temp_new_i32();
777     TCGv_i32 z = tcg_constant_i32(0);
778 
779     tcg_gen_sub_i32(r, src1, src2);
780     tcg_gen_xor_i32(t, src1, src2);
781     tcg_gen_xor_i32(v, r, src1);
782     tcg_gen_and_i32(v, v, t);
783 
784     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
785     tcg_gen_addi_i32(t, t, INT32_MAX);
786 
787     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
788 }
789 
790 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
791 {
792 #ifdef TARGET_SPARC64
793     TCGv t1, t2, shift;
794 
795     t1 = tcg_temp_new();
796     t2 = tcg_temp_new();
797     shift = tcg_temp_new();
798 
799     tcg_gen_andi_tl(shift, cpu_gsr, 7);
800     tcg_gen_shli_tl(shift, shift, 3);
801     tcg_gen_shl_tl(t1, s1, shift);
802 
803     /*
804      * A shift of 64 does not produce 0 in TCG.  Divide this into a
805      * shift of (up to 63) followed by a constant shift of 1.
806      */
807     tcg_gen_xori_tl(shift, shift, 63);
808     tcg_gen_shr_tl(t2, s2, shift);
809     tcg_gen_shri_tl(t2, t2, 1);
810 
811     tcg_gen_or_tl(dst, t1, t2);
812 #else
813     g_assert_not_reached();
814 #endif
815 }
816 
817 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
818 {
819 #ifdef TARGET_SPARC64
820     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
821 #else
822     g_assert_not_reached();
823 #endif
824 }
825 
826 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
827 {
828 #ifdef TARGET_SPARC64
829     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
830 #else
831     g_assert_not_reached();
832 #endif
833 }
834 
835 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
836 {
837     tcg_gen_ext16s_i32(src2, src2);
838     gen_helper_fmul8x16a(dst, src1, src2);
839 }
840 
841 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
842 {
843     tcg_gen_sari_i32(src2, src2, 16);
844     gen_helper_fmul8x16a(dst, src1, src2);
845 }
846 
847 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
848 {
849     TCGv_i32 t0 = tcg_temp_new_i32();
850     TCGv_i32 t1 = tcg_temp_new_i32();
851     TCGv_i32 t2 = tcg_temp_new_i32();
852 
853     tcg_gen_ext8u_i32(t0, src1);
854     tcg_gen_ext16s_i32(t1, src2);
855     tcg_gen_mul_i32(t0, t0, t1);
856 
857     tcg_gen_extract_i32(t1, src1, 16, 8);
858     tcg_gen_sextract_i32(t2, src2, 16, 16);
859     tcg_gen_mul_i32(t1, t1, t2);
860 
861     tcg_gen_concat_i32_i64(dst, t0, t1);
862 }
863 
864 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
865 {
866     TCGv_i32 t0 = tcg_temp_new_i32();
867     TCGv_i32 t1 = tcg_temp_new_i32();
868     TCGv_i32 t2 = tcg_temp_new_i32();
869 
870     /*
871      * The insn description talks about extracting the upper 8 bits
872      * of the signed 16-bit input rs1, performing the multiply, then
873      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
874      * the rs1 input, which avoids the need for two shifts.
875      */
876     tcg_gen_ext16s_i32(t0, src1);
877     tcg_gen_andi_i32(t0, t0, ~0xff);
878     tcg_gen_ext16s_i32(t1, src2);
879     tcg_gen_mul_i32(t0, t0, t1);
880 
881     tcg_gen_sextract_i32(t1, src1, 16, 16);
882     tcg_gen_andi_i32(t1, t1, ~0xff);
883     tcg_gen_sextract_i32(t2, src2, 16, 16);
884     tcg_gen_mul_i32(t1, t1, t2);
885 
886     tcg_gen_concat_i32_i64(dst, t0, t1);
887 }
888 
889 #ifdef TARGET_SPARC64
890 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
891                              TCGv_vec src1, TCGv_vec src2)
892 {
893     TCGv_vec a = tcg_temp_new_vec_matching(dst);
894     TCGv_vec c = tcg_temp_new_vec_matching(dst);
895 
896     tcg_gen_add_vec(vece, a, src1, src2);
897     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
898     /* Vector cmp produces -1 for true, so subtract to add carry. */
899     tcg_gen_sub_vec(vece, dst, a, c);
900 }
901 
902 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
903                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
904 {
905     static const TCGOpcode vecop_list[] = {
906         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
907     };
908     static const GVecGen3 op = {
909         .fni8 = gen_helper_fchksm16,
910         .fniv = gen_vec_fchksm16,
911         .opt_opc = vecop_list,
912         .vece = MO_16,
913     };
914     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
915 }
916 
917 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
918                             TCGv_vec src1, TCGv_vec src2)
919 {
920     TCGv_vec t = tcg_temp_new_vec_matching(dst);
921 
922     tcg_gen_or_vec(vece, t, src1, src2);
923     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
924     tcg_gen_sari_vec(vece, src1, src1, 1);
925     tcg_gen_sari_vec(vece, src2, src2, 1);
926     tcg_gen_add_vec(vece, dst, src1, src2);
927     tcg_gen_add_vec(vece, dst, dst, t);
928 }
929 
930 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
931                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
932 {
933     static const TCGOpcode vecop_list[] = {
934         INDEX_op_add_vec, INDEX_op_sari_vec,
935     };
936     static const GVecGen3 op = {
937         .fni8 = gen_helper_fmean16,
938         .fniv = gen_vec_fmean16,
939         .opt_opc = vecop_list,
940         .vece = MO_16,
941     };
942     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
943 }
944 #else
945 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
946 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
947 #endif
948 
949 static void finishing_insn(DisasContext *dc)
950 {
951     /*
952      * From here, there is no future path through an unwinding exception.
953      * If the current insn cannot raise an exception, the computation of
954      * cpu_cond may be able to be elided.
955      */
956     if (dc->cpu_cond_live) {
957         tcg_gen_discard_tl(cpu_cond);
958         dc->cpu_cond_live = false;
959     }
960 }
961 
962 static void gen_generic_branch(DisasContext *dc)
963 {
964     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
965     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
966     TCGv c2 = tcg_constant_tl(dc->jump.c2);
967 
968     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
969 }
970 
971 /* call this function before using the condition register as it may
972    have been set for a jump */
973 static void flush_cond(DisasContext *dc)
974 {
975     if (dc->npc == JUMP_PC) {
976         gen_generic_branch(dc);
977         dc->npc = DYNAMIC_PC_LOOKUP;
978     }
979 }
980 
981 static void save_npc(DisasContext *dc)
982 {
983     if (dc->npc & 3) {
984         switch (dc->npc) {
985         case JUMP_PC:
986             gen_generic_branch(dc);
987             dc->npc = DYNAMIC_PC_LOOKUP;
988             break;
989         case DYNAMIC_PC:
990         case DYNAMIC_PC_LOOKUP:
991             break;
992         default:
993             g_assert_not_reached();
994         }
995     } else {
996         tcg_gen_movi_tl(cpu_npc, dc->npc);
997     }
998 }
999 
1000 static void save_state(DisasContext *dc)
1001 {
1002     tcg_gen_movi_tl(cpu_pc, dc->pc);
1003     save_npc(dc);
1004 }
1005 
1006 static void gen_exception(DisasContext *dc, int which)
1007 {
1008     finishing_insn(dc);
1009     save_state(dc);
1010     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1011     dc->base.is_jmp = DISAS_NORETURN;
1012 }
1013 
1014 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1015 {
1016     DisasDelayException *e = g_new0(DisasDelayException, 1);
1017 
1018     e->next = dc->delay_excp_list;
1019     dc->delay_excp_list = e;
1020 
1021     e->lab = gen_new_label();
1022     e->excp = excp;
1023     e->pc = dc->pc;
1024     /* Caller must have used flush_cond before branch. */
1025     assert(e->npc != JUMP_PC);
1026     e->npc = dc->npc;
1027 
1028     return e->lab;
1029 }
1030 
1031 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1032 {
1033     return delay_exceptionv(dc, tcg_constant_i32(excp));
1034 }
1035 
1036 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1037 {
1038     TCGv t = tcg_temp_new();
1039     TCGLabel *lab;
1040 
1041     tcg_gen_andi_tl(t, addr, mask);
1042 
1043     flush_cond(dc);
1044     lab = delay_exception(dc, TT_UNALIGNED);
1045     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1046 }
1047 
1048 static void gen_mov_pc_npc(DisasContext *dc)
1049 {
1050     finishing_insn(dc);
1051 
1052     if (dc->npc & 3) {
1053         switch (dc->npc) {
1054         case JUMP_PC:
1055             gen_generic_branch(dc);
1056             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1057             dc->pc = DYNAMIC_PC_LOOKUP;
1058             break;
1059         case DYNAMIC_PC:
1060         case DYNAMIC_PC_LOOKUP:
1061             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1062             dc->pc = dc->npc;
1063             break;
1064         default:
1065             g_assert_not_reached();
1066         }
1067     } else {
1068         dc->pc = dc->npc;
1069     }
1070 }
1071 
1072 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1073                         DisasContext *dc)
1074 {
1075     TCGv t1;
1076 
1077     cmp->c1 = t1 = tcg_temp_new();
1078     cmp->c2 = 0;
1079 
1080     switch (cond & 7) {
1081     case 0x0: /* never */
1082         cmp->cond = TCG_COND_NEVER;
1083         cmp->c1 = tcg_constant_tl(0);
1084         break;
1085 
1086     case 0x1: /* eq: Z */
1087         cmp->cond = TCG_COND_EQ;
1088         if (TARGET_LONG_BITS == 32 || xcc) {
1089             tcg_gen_mov_tl(t1, cpu_cc_Z);
1090         } else {
1091             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1092         }
1093         break;
1094 
1095     case 0x2: /* le: Z | (N ^ V) */
1096         /*
1097          * Simplify:
1098          *   cc_Z || (N ^ V) < 0        NE
1099          *   cc_Z && !((N ^ V) < 0)     EQ
1100          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1101          */
1102         cmp->cond = TCG_COND_EQ;
1103         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1104         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1105         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1106         if (TARGET_LONG_BITS == 64 && !xcc) {
1107             tcg_gen_ext32u_tl(t1, t1);
1108         }
1109         break;
1110 
1111     case 0x3: /* lt: N ^ V */
1112         cmp->cond = TCG_COND_LT;
1113         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1114         if (TARGET_LONG_BITS == 64 && !xcc) {
1115             tcg_gen_ext32s_tl(t1, t1);
1116         }
1117         break;
1118 
1119     case 0x4: /* leu: Z | C */
1120         /*
1121          * Simplify:
1122          *   cc_Z == 0 || cc_C != 0     NE
1123          *   cc_Z != 0 && cc_C == 0     EQ
1124          *   cc_Z & (cc_C ? 0 : -1)     EQ
1125          *   cc_Z & (cc_C - 1)          EQ
1126          */
1127         cmp->cond = TCG_COND_EQ;
1128         if (TARGET_LONG_BITS == 32 || xcc) {
1129             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1130             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1131         } else {
1132             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1133             tcg_gen_subi_tl(t1, t1, 1);
1134             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1135             tcg_gen_ext32u_tl(t1, t1);
1136         }
1137         break;
1138 
1139     case 0x5: /* ltu: C */
1140         cmp->cond = TCG_COND_NE;
1141         if (TARGET_LONG_BITS == 32 || xcc) {
1142             tcg_gen_mov_tl(t1, cpu_cc_C);
1143         } else {
1144             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1145         }
1146         break;
1147 
1148     case 0x6: /* neg: N */
1149         cmp->cond = TCG_COND_LT;
1150         if (TARGET_LONG_BITS == 32 || xcc) {
1151             tcg_gen_mov_tl(t1, cpu_cc_N);
1152         } else {
1153             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1154         }
1155         break;
1156 
1157     case 0x7: /* vs: V */
1158         cmp->cond = TCG_COND_LT;
1159         if (TARGET_LONG_BITS == 32 || xcc) {
1160             tcg_gen_mov_tl(t1, cpu_cc_V);
1161         } else {
1162             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1163         }
1164         break;
1165     }
1166     if (cond & 8) {
1167         cmp->cond = tcg_invert_cond(cmp->cond);
1168     }
1169 }
1170 
1171 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1172 {
1173     TCGv_i32 fcc = cpu_fcc[cc];
1174     TCGv_i32 c1 = fcc;
1175     int c2 = 0;
1176     TCGCond tcond;
1177 
1178     /*
1179      * FCC values:
1180      * 0 =
1181      * 1 <
1182      * 2 >
1183      * 3 unordered
1184      */
1185     switch (cond & 7) {
1186     case 0x0: /* fbn */
1187         tcond = TCG_COND_NEVER;
1188         break;
1189     case 0x1: /* fbne : !0 */
1190         tcond = TCG_COND_NE;
1191         break;
1192     case 0x2: /* fblg : 1 or 2 */
1193         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1194         c1 = tcg_temp_new_i32();
1195         tcg_gen_addi_i32(c1, fcc, -1);
1196         c2 = 1;
1197         tcond = TCG_COND_LEU;
1198         break;
1199     case 0x3: /* fbul : 1 or 3 */
1200         c1 = tcg_temp_new_i32();
1201         tcg_gen_andi_i32(c1, fcc, 1);
1202         tcond = TCG_COND_NE;
1203         break;
1204     case 0x4: /* fbl  : 1 */
1205         c2 = 1;
1206         tcond = TCG_COND_EQ;
1207         break;
1208     case 0x5: /* fbug : 2 or 3 */
1209         c2 = 2;
1210         tcond = TCG_COND_GEU;
1211         break;
1212     case 0x6: /* fbg  : 2 */
1213         c2 = 2;
1214         tcond = TCG_COND_EQ;
1215         break;
1216     case 0x7: /* fbu  : 3 */
1217         c2 = 3;
1218         tcond = TCG_COND_EQ;
1219         break;
1220     }
1221     if (cond & 8) {
1222         tcond = tcg_invert_cond(tcond);
1223     }
1224 
1225     cmp->cond = tcond;
1226     cmp->c2 = c2;
1227     cmp->c1 = tcg_temp_new();
1228     tcg_gen_extu_i32_tl(cmp->c1, c1);
1229 }
1230 
1231 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1232 {
1233     static const TCGCond cond_reg[4] = {
1234         TCG_COND_NEVER,  /* reserved */
1235         TCG_COND_EQ,
1236         TCG_COND_LE,
1237         TCG_COND_LT,
1238     };
1239     TCGCond tcond;
1240 
1241     if ((cond & 3) == 0) {
1242         return false;
1243     }
1244     tcond = cond_reg[cond & 3];
1245     if (cond & 4) {
1246         tcond = tcg_invert_cond(tcond);
1247     }
1248 
1249     cmp->cond = tcond;
1250     cmp->c1 = tcg_temp_new();
1251     cmp->c2 = 0;
1252     tcg_gen_mov_tl(cmp->c1, r_src);
1253     return true;
1254 }
1255 
1256 static void gen_op_clear_ieee_excp_and_FTT(void)
1257 {
1258     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1259                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1260 }
1261 
1262 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1263 {
1264     gen_op_clear_ieee_excp_and_FTT();
1265     tcg_gen_mov_i32(dst, src);
1266 }
1267 
1268 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1269 {
1270     gen_op_clear_ieee_excp_and_FTT();
1271     tcg_gen_xori_i32(dst, src, 1u << 31);
1272 }
1273 
1274 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1275 {
1276     gen_op_clear_ieee_excp_and_FTT();
1277     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1278 }
1279 
1280 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1281 {
1282     gen_op_clear_ieee_excp_and_FTT();
1283     tcg_gen_mov_i64(dst, src);
1284 }
1285 
1286 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1287 {
1288     gen_op_clear_ieee_excp_and_FTT();
1289     tcg_gen_xori_i64(dst, src, 1ull << 63);
1290 }
1291 
1292 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1293 {
1294     gen_op_clear_ieee_excp_and_FTT();
1295     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1296 }
1297 
1298 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1299 {
1300     TCGv_i64 l = tcg_temp_new_i64();
1301     TCGv_i64 h = tcg_temp_new_i64();
1302 
1303     tcg_gen_extr_i128_i64(l, h, src);
1304     tcg_gen_xori_i64(h, h, 1ull << 63);
1305     tcg_gen_concat_i64_i128(dst, l, h);
1306 }
1307 
1308 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1309 {
1310     TCGv_i64 l = tcg_temp_new_i64();
1311     TCGv_i64 h = tcg_temp_new_i64();
1312 
1313     tcg_gen_extr_i128_i64(l, h, src);
1314     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1315     tcg_gen_concat_i64_i128(dst, l, h);
1316 }
1317 
1318 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1319 {
1320     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1321 }
1322 
1323 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1324 {
1325     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1326 }
1327 
1328 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1329 {
1330     int op = float_muladd_negate_c;
1331     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1332 }
1333 
1334 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1335 {
1336     int op = float_muladd_negate_c;
1337     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1338 }
1339 
1340 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1341 {
1342     int op = float_muladd_negate_c | float_muladd_negate_result;
1343     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1344 }
1345 
1346 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1347 {
1348     int op = float_muladd_negate_c | float_muladd_negate_result;
1349     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1350 }
1351 
1352 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1353 {
1354     int op = float_muladd_negate_result;
1355     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1356 }
1357 
1358 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1359 {
1360     int op = float_muladd_negate_result;
1361     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1362 }
1363 
1364 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1365 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1366 {
1367     TCGv_i32 one = tcg_constant_i32(float32_one);
1368     int op = float_muladd_halve_result;
1369     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1370 }
1371 
1372 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1373 {
1374     TCGv_i64 one = tcg_constant_i64(float64_one);
1375     int op = float_muladd_halve_result;
1376     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1377 }
1378 
1379 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1380 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1381 {
1382     TCGv_i32 one = tcg_constant_i32(float32_one);
1383     int op = float_muladd_negate_c | float_muladd_halve_result;
1384     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1385 }
1386 
1387 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1388 {
1389     TCGv_i64 one = tcg_constant_i64(float64_one);
1390     int op = float_muladd_negate_c | float_muladd_halve_result;
1391     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1392 }
1393 
1394 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1395 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1396 {
1397     TCGv_i32 one = tcg_constant_i32(float32_one);
1398     int op = float_muladd_negate_result | float_muladd_halve_result;
1399     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1400 }
1401 
1402 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1403 {
1404     TCGv_i64 one = tcg_constant_i64(float64_one);
1405     int op = float_muladd_negate_result | float_muladd_halve_result;
1406     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1407 }
1408 
1409 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1410 {
1411     /*
1412      * CEXC is only set when succesfully completing an FPop,
1413      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1414      * Thus we can simply store FTT into this field.
1415      */
1416     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1417                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1418     gen_exception(dc, TT_FP_EXCP);
1419 }
1420 
1421 static int gen_trap_ifnofpu(DisasContext *dc)
1422 {
1423 #if !defined(CONFIG_USER_ONLY)
1424     if (!dc->fpu_enabled) {
1425         gen_exception(dc, TT_NFPU_INSN);
1426         return 1;
1427     }
1428 #endif
1429     return 0;
1430 }
1431 
1432 /* asi moves */
1433 typedef enum {
1434     GET_ASI_HELPER,
1435     GET_ASI_EXCP,
1436     GET_ASI_DIRECT,
1437     GET_ASI_DTWINX,
1438     GET_ASI_CODE,
1439     GET_ASI_BLOCK,
1440     GET_ASI_SHORT,
1441     GET_ASI_BCOPY,
1442     GET_ASI_BFILL,
1443 } ASIType;
1444 
1445 typedef struct {
1446     ASIType type;
1447     int asi;
1448     int mem_idx;
1449     MemOp memop;
1450 } DisasASI;
1451 
1452 /*
1453  * Build DisasASI.
1454  * For asi == -1, treat as non-asi.
1455  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1456  */
1457 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1458 {
1459     ASIType type = GET_ASI_HELPER;
1460     int mem_idx = dc->mem_idx;
1461 
1462     if (asi == -1) {
1463         /* Artificial "non-asi" case. */
1464         type = GET_ASI_DIRECT;
1465         goto done;
1466     }
1467 
1468 #ifndef TARGET_SPARC64
1469     /* Before v9, all asis are immediate and privileged.  */
1470     if (asi < 0) {
1471         gen_exception(dc, TT_ILL_INSN);
1472         type = GET_ASI_EXCP;
1473     } else if (supervisor(dc)
1474                /* Note that LEON accepts ASI_USERDATA in user mode, for
1475                   use with CASA.  Also note that previous versions of
1476                   QEMU allowed (and old versions of gcc emitted) ASI_P
1477                   for LEON, which is incorrect.  */
1478                || (asi == ASI_USERDATA
1479                    && (dc->def->features & CPU_FEATURE_CASA))) {
1480         switch (asi) {
1481         case ASI_USERDATA:    /* User data access */
1482             mem_idx = MMU_USER_IDX;
1483             type = GET_ASI_DIRECT;
1484             break;
1485         case ASI_KERNELDATA:  /* Supervisor data access */
1486             mem_idx = MMU_KERNEL_IDX;
1487             type = GET_ASI_DIRECT;
1488             break;
1489         case ASI_USERTXT:     /* User text access */
1490             mem_idx = MMU_USER_IDX;
1491             type = GET_ASI_CODE;
1492             break;
1493         case ASI_KERNELTXT:   /* Supervisor text access */
1494             mem_idx = MMU_KERNEL_IDX;
1495             type = GET_ASI_CODE;
1496             break;
1497         case ASI_M_BYPASS:    /* MMU passthrough */
1498         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1499             mem_idx = MMU_PHYS_IDX;
1500             type = GET_ASI_DIRECT;
1501             break;
1502         case ASI_M_BCOPY: /* Block copy, sta access */
1503             mem_idx = MMU_KERNEL_IDX;
1504             type = GET_ASI_BCOPY;
1505             break;
1506         case ASI_M_BFILL: /* Block fill, stda access */
1507             mem_idx = MMU_KERNEL_IDX;
1508             type = GET_ASI_BFILL;
1509             break;
1510         }
1511 
1512         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1513          * permissions check in get_physical_address(..).
1514          */
1515         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1516     } else {
1517         gen_exception(dc, TT_PRIV_INSN);
1518         type = GET_ASI_EXCP;
1519     }
1520 #else
1521     if (asi < 0) {
1522         asi = dc->asi;
1523     }
1524     /* With v9, all asis below 0x80 are privileged.  */
1525     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1526        down that bit into DisasContext.  For the moment that's ok,
1527        since the direct implementations below doesn't have any ASIs
1528        in the restricted [0x30, 0x7f] range, and the check will be
1529        done properly in the helper.  */
1530     if (!supervisor(dc) && asi < 0x80) {
1531         gen_exception(dc, TT_PRIV_ACT);
1532         type = GET_ASI_EXCP;
1533     } else {
1534         switch (asi) {
1535         case ASI_REAL:      /* Bypass */
1536         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1537         case ASI_REAL_L:    /* Bypass LE */
1538         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1539         case ASI_TWINX_REAL:   /* Real address, twinx */
1540         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1541         case ASI_QUAD_LDD_PHYS:
1542         case ASI_QUAD_LDD_PHYS_L:
1543             mem_idx = MMU_PHYS_IDX;
1544             break;
1545         case ASI_N:  /* Nucleus */
1546         case ASI_NL: /* Nucleus LE */
1547         case ASI_TWINX_N:
1548         case ASI_TWINX_NL:
1549         case ASI_NUCLEUS_QUAD_LDD:
1550         case ASI_NUCLEUS_QUAD_LDD_L:
1551             if (hypervisor(dc)) {
1552                 mem_idx = MMU_PHYS_IDX;
1553             } else {
1554                 mem_idx = MMU_NUCLEUS_IDX;
1555             }
1556             break;
1557         case ASI_AIUP:  /* As if user primary */
1558         case ASI_AIUPL: /* As if user primary LE */
1559         case ASI_TWINX_AIUP:
1560         case ASI_TWINX_AIUP_L:
1561         case ASI_BLK_AIUP_4V:
1562         case ASI_BLK_AIUP_L_4V:
1563         case ASI_BLK_AIUP:
1564         case ASI_BLK_AIUPL:
1565             mem_idx = MMU_USER_IDX;
1566             break;
1567         case ASI_AIUS:  /* As if user secondary */
1568         case ASI_AIUSL: /* As if user secondary LE */
1569         case ASI_TWINX_AIUS:
1570         case ASI_TWINX_AIUS_L:
1571         case ASI_BLK_AIUS_4V:
1572         case ASI_BLK_AIUS_L_4V:
1573         case ASI_BLK_AIUS:
1574         case ASI_BLK_AIUSL:
1575             mem_idx = MMU_USER_SECONDARY_IDX;
1576             break;
1577         case ASI_S:  /* Secondary */
1578         case ASI_SL: /* Secondary LE */
1579         case ASI_TWINX_S:
1580         case ASI_TWINX_SL:
1581         case ASI_BLK_COMMIT_S:
1582         case ASI_BLK_S:
1583         case ASI_BLK_SL:
1584         case ASI_FL8_S:
1585         case ASI_FL8_SL:
1586         case ASI_FL16_S:
1587         case ASI_FL16_SL:
1588             if (mem_idx == MMU_USER_IDX) {
1589                 mem_idx = MMU_USER_SECONDARY_IDX;
1590             } else if (mem_idx == MMU_KERNEL_IDX) {
1591                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1592             }
1593             break;
1594         case ASI_P:  /* Primary */
1595         case ASI_PL: /* Primary LE */
1596         case ASI_TWINX_P:
1597         case ASI_TWINX_PL:
1598         case ASI_BLK_COMMIT_P:
1599         case ASI_BLK_P:
1600         case ASI_BLK_PL:
1601         case ASI_FL8_P:
1602         case ASI_FL8_PL:
1603         case ASI_FL16_P:
1604         case ASI_FL16_PL:
1605             break;
1606         }
1607         switch (asi) {
1608         case ASI_REAL:
1609         case ASI_REAL_IO:
1610         case ASI_REAL_L:
1611         case ASI_REAL_IO_L:
1612         case ASI_N:
1613         case ASI_NL:
1614         case ASI_AIUP:
1615         case ASI_AIUPL:
1616         case ASI_AIUS:
1617         case ASI_AIUSL:
1618         case ASI_S:
1619         case ASI_SL:
1620         case ASI_P:
1621         case ASI_PL:
1622             type = GET_ASI_DIRECT;
1623             break;
1624         case ASI_TWINX_REAL:
1625         case ASI_TWINX_REAL_L:
1626         case ASI_TWINX_N:
1627         case ASI_TWINX_NL:
1628         case ASI_TWINX_AIUP:
1629         case ASI_TWINX_AIUP_L:
1630         case ASI_TWINX_AIUS:
1631         case ASI_TWINX_AIUS_L:
1632         case ASI_TWINX_P:
1633         case ASI_TWINX_PL:
1634         case ASI_TWINX_S:
1635         case ASI_TWINX_SL:
1636         case ASI_QUAD_LDD_PHYS:
1637         case ASI_QUAD_LDD_PHYS_L:
1638         case ASI_NUCLEUS_QUAD_LDD:
1639         case ASI_NUCLEUS_QUAD_LDD_L:
1640             type = GET_ASI_DTWINX;
1641             break;
1642         case ASI_BLK_COMMIT_P:
1643         case ASI_BLK_COMMIT_S:
1644         case ASI_BLK_AIUP_4V:
1645         case ASI_BLK_AIUP_L_4V:
1646         case ASI_BLK_AIUP:
1647         case ASI_BLK_AIUPL:
1648         case ASI_BLK_AIUS_4V:
1649         case ASI_BLK_AIUS_L_4V:
1650         case ASI_BLK_AIUS:
1651         case ASI_BLK_AIUSL:
1652         case ASI_BLK_S:
1653         case ASI_BLK_SL:
1654         case ASI_BLK_P:
1655         case ASI_BLK_PL:
1656             type = GET_ASI_BLOCK;
1657             break;
1658         case ASI_FL8_S:
1659         case ASI_FL8_SL:
1660         case ASI_FL8_P:
1661         case ASI_FL8_PL:
1662             memop = MO_UB;
1663             type = GET_ASI_SHORT;
1664             break;
1665         case ASI_FL16_S:
1666         case ASI_FL16_SL:
1667         case ASI_FL16_P:
1668         case ASI_FL16_PL:
1669             memop = MO_TEUW;
1670             type = GET_ASI_SHORT;
1671             break;
1672         }
1673         /* The little-endian asis all have bit 3 set.  */
1674         if (asi & 8) {
1675             memop ^= MO_BSWAP;
1676         }
1677     }
1678 #endif
1679 
1680  done:
1681     return (DisasASI){ type, asi, mem_idx, memop };
1682 }
1683 
1684 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1685 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1686                               TCGv_i32 asi, TCGv_i32 mop)
1687 {
1688     g_assert_not_reached();
1689 }
1690 
1691 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1692                               TCGv_i32 asi, TCGv_i32 mop)
1693 {
1694     g_assert_not_reached();
1695 }
1696 #endif
1697 
1698 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1699 {
1700     switch (da->type) {
1701     case GET_ASI_EXCP:
1702         break;
1703     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1704         gen_exception(dc, TT_ILL_INSN);
1705         break;
1706     case GET_ASI_DIRECT:
1707         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1708         break;
1709 
1710     case GET_ASI_CODE:
1711 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1712         {
1713             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1714             TCGv_i64 t64 = tcg_temp_new_i64();
1715 
1716             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1717             tcg_gen_trunc_i64_tl(dst, t64);
1718         }
1719         break;
1720 #else
1721         g_assert_not_reached();
1722 #endif
1723 
1724     default:
1725         {
1726             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1727             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1728 
1729             save_state(dc);
1730 #ifdef TARGET_SPARC64
1731             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1732 #else
1733             {
1734                 TCGv_i64 t64 = tcg_temp_new_i64();
1735                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1736                 tcg_gen_trunc_i64_tl(dst, t64);
1737             }
1738 #endif
1739         }
1740         break;
1741     }
1742 }
1743 
1744 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1745 {
1746     switch (da->type) {
1747     case GET_ASI_EXCP:
1748         break;
1749 
1750     case GET_ASI_DTWINX: /* Reserved for stda.  */
1751         if (TARGET_LONG_BITS == 32) {
1752             gen_exception(dc, TT_ILL_INSN);
1753             break;
1754         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1755             /* Pre OpenSPARC CPUs don't have these */
1756             gen_exception(dc, TT_ILL_INSN);
1757             break;
1758         }
1759         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1760         /* fall through */
1761 
1762     case GET_ASI_DIRECT:
1763         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1764         break;
1765 
1766     case GET_ASI_BCOPY:
1767         assert(TARGET_LONG_BITS == 32);
1768         /*
1769          * Copy 32 bytes from the address in SRC to ADDR.
1770          *
1771          * From Ross RT625 hyperSPARC manual, section 4.6:
1772          * "Block Copy and Block Fill will work only on cache line boundaries."
1773          *
1774          * It does not specify if an unaliged address is truncated or trapped.
1775          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1776          * is obviously wrong.  The only place I can see this used is in the
1777          * Linux kernel which begins with page alignment, advancing by 32,
1778          * so is always aligned.  Assume truncation as the simpler option.
1779          *
1780          * Since the loads and stores are paired, allow the copy to happen
1781          * in the host endianness.  The copy need not be atomic.
1782          */
1783         {
1784             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1785             TCGv saddr = tcg_temp_new();
1786             TCGv daddr = tcg_temp_new();
1787             TCGv_i128 tmp = tcg_temp_new_i128();
1788 
1789             tcg_gen_andi_tl(saddr, src, -32);
1790             tcg_gen_andi_tl(daddr, addr, -32);
1791             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1792             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1793             tcg_gen_addi_tl(saddr, saddr, 16);
1794             tcg_gen_addi_tl(daddr, daddr, 16);
1795             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1796             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1797         }
1798         break;
1799 
1800     default:
1801         {
1802             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1803             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1804 
1805             save_state(dc);
1806 #ifdef TARGET_SPARC64
1807             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1808 #else
1809             {
1810                 TCGv_i64 t64 = tcg_temp_new_i64();
1811                 tcg_gen_extu_tl_i64(t64, src);
1812                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1813             }
1814 #endif
1815 
1816             /* A write to a TLB register may alter page maps.  End the TB. */
1817             dc->npc = DYNAMIC_PC;
1818         }
1819         break;
1820     }
1821 }
1822 
1823 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1824                          TCGv dst, TCGv src, TCGv addr)
1825 {
1826     switch (da->type) {
1827     case GET_ASI_EXCP:
1828         break;
1829     case GET_ASI_DIRECT:
1830         tcg_gen_atomic_xchg_tl(dst, addr, src,
1831                                da->mem_idx, da->memop | MO_ALIGN);
1832         break;
1833     default:
1834         /* ??? Should be DAE_invalid_asi.  */
1835         gen_exception(dc, TT_DATA_ACCESS);
1836         break;
1837     }
1838 }
1839 
1840 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1841                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1842 {
1843     switch (da->type) {
1844     case GET_ASI_EXCP:
1845         return;
1846     case GET_ASI_DIRECT:
1847         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1848                                   da->mem_idx, da->memop | MO_ALIGN);
1849         break;
1850     default:
1851         /* ??? Should be DAE_invalid_asi.  */
1852         gen_exception(dc, TT_DATA_ACCESS);
1853         break;
1854     }
1855 }
1856 
1857 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1858 {
1859     switch (da->type) {
1860     case GET_ASI_EXCP:
1861         break;
1862     case GET_ASI_DIRECT:
1863         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1864                                da->mem_idx, MO_UB);
1865         break;
1866     default:
1867         /* ??? In theory, this should be raise DAE_invalid_asi.
1868            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1869         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1870             gen_helper_exit_atomic(tcg_env);
1871         } else {
1872             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1873             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1874             TCGv_i64 s64, t64;
1875 
1876             save_state(dc);
1877             t64 = tcg_temp_new_i64();
1878             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1879 
1880             s64 = tcg_constant_i64(0xff);
1881             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1882 
1883             tcg_gen_trunc_i64_tl(dst, t64);
1884 
1885             /* End the TB.  */
1886             dc->npc = DYNAMIC_PC;
1887         }
1888         break;
1889     }
1890 }
1891 
1892 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1893                         TCGv addr, int rd)
1894 {
1895     MemOp memop = da->memop;
1896     MemOp size = memop & MO_SIZE;
1897     TCGv_i32 d32;
1898     TCGv_i64 d64, l64;
1899     TCGv addr_tmp;
1900 
1901     /* TODO: Use 128-bit load/store below. */
1902     if (size == MO_128) {
1903         memop = (memop & ~MO_SIZE) | MO_64;
1904     }
1905 
1906     switch (da->type) {
1907     case GET_ASI_EXCP:
1908         break;
1909 
1910     case GET_ASI_DIRECT:
1911         memop |= MO_ALIGN_4;
1912         switch (size) {
1913         case MO_32:
1914             d32 = tcg_temp_new_i32();
1915             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1916             gen_store_fpr_F(dc, rd, d32);
1917             break;
1918 
1919         case MO_64:
1920             d64 = tcg_temp_new_i64();
1921             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1922             gen_store_fpr_D(dc, rd, d64);
1923             break;
1924 
1925         case MO_128:
1926             d64 = tcg_temp_new_i64();
1927             l64 = tcg_temp_new_i64();
1928             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1929             addr_tmp = tcg_temp_new();
1930             tcg_gen_addi_tl(addr_tmp, addr, 8);
1931             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1932             gen_store_fpr_D(dc, rd, d64);
1933             gen_store_fpr_D(dc, rd + 2, l64);
1934             break;
1935         default:
1936             g_assert_not_reached();
1937         }
1938         break;
1939 
1940     case GET_ASI_BLOCK:
1941         /* Valid for lddfa on aligned registers only.  */
1942         if (orig_size == MO_64 && (rd & 7) == 0) {
1943             /* The first operation checks required alignment.  */
1944             addr_tmp = tcg_temp_new();
1945             d64 = tcg_temp_new_i64();
1946             for (int i = 0; ; ++i) {
1947                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1948                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1949                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1950                 if (i == 7) {
1951                     break;
1952                 }
1953                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1954                 addr = addr_tmp;
1955             }
1956         } else {
1957             gen_exception(dc, TT_ILL_INSN);
1958         }
1959         break;
1960 
1961     case GET_ASI_SHORT:
1962         /* Valid for lddfa only.  */
1963         if (orig_size == MO_64) {
1964             d64 = tcg_temp_new_i64();
1965             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
1966             gen_store_fpr_D(dc, rd, d64);
1967         } else {
1968             gen_exception(dc, TT_ILL_INSN);
1969         }
1970         break;
1971 
1972     default:
1973         {
1974             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1975             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1976 
1977             save_state(dc);
1978             /* According to the table in the UA2011 manual, the only
1979                other asis that are valid for ldfa/lddfa/ldqfa are
1980                the NO_FAULT asis.  We still need a helper for these,
1981                but we can just use the integer asi helper for them.  */
1982             switch (size) {
1983             case MO_32:
1984                 d64 = tcg_temp_new_i64();
1985                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1986                 d32 = tcg_temp_new_i32();
1987                 tcg_gen_extrl_i64_i32(d32, d64);
1988                 gen_store_fpr_F(dc, rd, d32);
1989                 break;
1990             case MO_64:
1991                 d64 = tcg_temp_new_i64();
1992                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1993                 gen_store_fpr_D(dc, rd, d64);
1994                 break;
1995             case MO_128:
1996                 d64 = tcg_temp_new_i64();
1997                 l64 = tcg_temp_new_i64();
1998                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1999                 addr_tmp = tcg_temp_new();
2000                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2001                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2002                 gen_store_fpr_D(dc, rd, d64);
2003                 gen_store_fpr_D(dc, rd + 2, l64);
2004                 break;
2005             default:
2006                 g_assert_not_reached();
2007             }
2008         }
2009         break;
2010     }
2011 }
2012 
2013 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2014                         TCGv addr, int rd)
2015 {
2016     MemOp memop = da->memop;
2017     MemOp size = memop & MO_SIZE;
2018     TCGv_i32 d32;
2019     TCGv_i64 d64;
2020     TCGv addr_tmp;
2021 
2022     /* TODO: Use 128-bit load/store below. */
2023     if (size == MO_128) {
2024         memop = (memop & ~MO_SIZE) | MO_64;
2025     }
2026 
2027     switch (da->type) {
2028     case GET_ASI_EXCP:
2029         break;
2030 
2031     case GET_ASI_DIRECT:
2032         memop |= MO_ALIGN_4;
2033         switch (size) {
2034         case MO_32:
2035             d32 = gen_load_fpr_F(dc, rd);
2036             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2037             break;
2038         case MO_64:
2039             d64 = gen_load_fpr_D(dc, rd);
2040             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2041             break;
2042         case MO_128:
2043             /* Only 4-byte alignment required.  However, it is legal for the
2044                cpu to signal the alignment fault, and the OS trap handler is
2045                required to fix it up.  Requiring 16-byte alignment here avoids
2046                having to probe the second page before performing the first
2047                write.  */
2048             d64 = gen_load_fpr_D(dc, rd);
2049             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2050             addr_tmp = tcg_temp_new();
2051             tcg_gen_addi_tl(addr_tmp, addr, 8);
2052             d64 = gen_load_fpr_D(dc, rd + 2);
2053             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2054             break;
2055         default:
2056             g_assert_not_reached();
2057         }
2058         break;
2059 
2060     case GET_ASI_BLOCK:
2061         /* Valid for stdfa on aligned registers only.  */
2062         if (orig_size == MO_64 && (rd & 7) == 0) {
2063             /* The first operation checks required alignment.  */
2064             addr_tmp = tcg_temp_new();
2065             for (int i = 0; ; ++i) {
2066                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2067                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2068                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2069                 if (i == 7) {
2070                     break;
2071                 }
2072                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2073                 addr = addr_tmp;
2074             }
2075         } else {
2076             gen_exception(dc, TT_ILL_INSN);
2077         }
2078         break;
2079 
2080     case GET_ASI_SHORT:
2081         /* Valid for stdfa only.  */
2082         if (orig_size == MO_64) {
2083             d64 = gen_load_fpr_D(dc, rd);
2084             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2085         } else {
2086             gen_exception(dc, TT_ILL_INSN);
2087         }
2088         break;
2089 
2090     default:
2091         /* According to the table in the UA2011 manual, the only
2092            other asis that are valid for ldfa/lddfa/ldqfa are
2093            the PST* asis, which aren't currently handled.  */
2094         gen_exception(dc, TT_ILL_INSN);
2095         break;
2096     }
2097 }
2098 
2099 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2100 {
2101     TCGv hi = gen_dest_gpr(dc, rd);
2102     TCGv lo = gen_dest_gpr(dc, rd + 1);
2103 
2104     switch (da->type) {
2105     case GET_ASI_EXCP:
2106         return;
2107 
2108     case GET_ASI_DTWINX:
2109 #ifdef TARGET_SPARC64
2110         {
2111             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2112             TCGv_i128 t = tcg_temp_new_i128();
2113 
2114             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2115             /*
2116              * Note that LE twinx acts as if each 64-bit register result is
2117              * byte swapped.  We perform one 128-bit LE load, so must swap
2118              * the order of the writebacks.
2119              */
2120             if ((mop & MO_BSWAP) == MO_TE) {
2121                 tcg_gen_extr_i128_i64(lo, hi, t);
2122             } else {
2123                 tcg_gen_extr_i128_i64(hi, lo, t);
2124             }
2125         }
2126         break;
2127 #else
2128         g_assert_not_reached();
2129 #endif
2130 
2131     case GET_ASI_DIRECT:
2132         {
2133             TCGv_i64 tmp = tcg_temp_new_i64();
2134 
2135             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2136 
2137             /* Note that LE ldda acts as if each 32-bit register
2138                result is byte swapped.  Having just performed one
2139                64-bit bswap, we need now to swap the writebacks.  */
2140             if ((da->memop & MO_BSWAP) == MO_TE) {
2141                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2142             } else {
2143                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2144             }
2145         }
2146         break;
2147 
2148     case GET_ASI_CODE:
2149 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2150         {
2151             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2152             TCGv_i64 tmp = tcg_temp_new_i64();
2153 
2154             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2155 
2156             /* See above.  */
2157             if ((da->memop & MO_BSWAP) == MO_TE) {
2158                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2159             } else {
2160                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2161             }
2162         }
2163         break;
2164 #else
2165         g_assert_not_reached();
2166 #endif
2167 
2168     default:
2169         /* ??? In theory we've handled all of the ASIs that are valid
2170            for ldda, and this should raise DAE_invalid_asi.  However,
2171            real hardware allows others.  This can be seen with e.g.
2172            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2173         {
2174             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2175             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2176             TCGv_i64 tmp = tcg_temp_new_i64();
2177 
2178             save_state(dc);
2179             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2180 
2181             /* See above.  */
2182             if ((da->memop & MO_BSWAP) == MO_TE) {
2183                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2184             } else {
2185                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2186             }
2187         }
2188         break;
2189     }
2190 
2191     gen_store_gpr(dc, rd, hi);
2192     gen_store_gpr(dc, rd + 1, lo);
2193 }
2194 
2195 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2196 {
2197     TCGv hi = gen_load_gpr(dc, rd);
2198     TCGv lo = gen_load_gpr(dc, rd + 1);
2199 
2200     switch (da->type) {
2201     case GET_ASI_EXCP:
2202         break;
2203 
2204     case GET_ASI_DTWINX:
2205 #ifdef TARGET_SPARC64
2206         {
2207             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2208             TCGv_i128 t = tcg_temp_new_i128();
2209 
2210             /*
2211              * Note that LE twinx acts as if each 64-bit register result is
2212              * byte swapped.  We perform one 128-bit LE store, so must swap
2213              * the order of the construction.
2214              */
2215             if ((mop & MO_BSWAP) == MO_TE) {
2216                 tcg_gen_concat_i64_i128(t, lo, hi);
2217             } else {
2218                 tcg_gen_concat_i64_i128(t, hi, lo);
2219             }
2220             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2221         }
2222         break;
2223 #else
2224         g_assert_not_reached();
2225 #endif
2226 
2227     case GET_ASI_DIRECT:
2228         {
2229             TCGv_i64 t64 = tcg_temp_new_i64();
2230 
2231             /* Note that LE stda acts as if each 32-bit register result is
2232                byte swapped.  We will perform one 64-bit LE store, so now
2233                we must swap the order of the construction.  */
2234             if ((da->memop & MO_BSWAP) == MO_TE) {
2235                 tcg_gen_concat_tl_i64(t64, lo, hi);
2236             } else {
2237                 tcg_gen_concat_tl_i64(t64, hi, lo);
2238             }
2239             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2240         }
2241         break;
2242 
2243     case GET_ASI_BFILL:
2244         assert(TARGET_LONG_BITS == 32);
2245         /*
2246          * Store 32 bytes of [rd:rd+1] to ADDR.
2247          * See comments for GET_ASI_COPY above.
2248          */
2249         {
2250             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2251             TCGv_i64 t8 = tcg_temp_new_i64();
2252             TCGv_i128 t16 = tcg_temp_new_i128();
2253             TCGv daddr = tcg_temp_new();
2254 
2255             tcg_gen_concat_tl_i64(t8, lo, hi);
2256             tcg_gen_concat_i64_i128(t16, t8, t8);
2257             tcg_gen_andi_tl(daddr, addr, -32);
2258             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2259             tcg_gen_addi_tl(daddr, daddr, 16);
2260             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2261         }
2262         break;
2263 
2264     default:
2265         /* ??? In theory we've handled all of the ASIs that are valid
2266            for stda, and this should raise DAE_invalid_asi.  */
2267         {
2268             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2269             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2270             TCGv_i64 t64 = tcg_temp_new_i64();
2271 
2272             /* See above.  */
2273             if ((da->memop & MO_BSWAP) == MO_TE) {
2274                 tcg_gen_concat_tl_i64(t64, lo, hi);
2275             } else {
2276                 tcg_gen_concat_tl_i64(t64, hi, lo);
2277             }
2278 
2279             save_state(dc);
2280             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2281         }
2282         break;
2283     }
2284 }
2285 
2286 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2287 {
2288 #ifdef TARGET_SPARC64
2289     TCGv_i32 c32, zero, dst, s1, s2;
2290     TCGv_i64 c64 = tcg_temp_new_i64();
2291 
2292     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2293        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2294        the later.  */
2295     c32 = tcg_temp_new_i32();
2296     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2297     tcg_gen_extrl_i64_i32(c32, c64);
2298 
2299     s1 = gen_load_fpr_F(dc, rs);
2300     s2 = gen_load_fpr_F(dc, rd);
2301     dst = tcg_temp_new_i32();
2302     zero = tcg_constant_i32(0);
2303 
2304     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2305 
2306     gen_store_fpr_F(dc, rd, dst);
2307 #else
2308     qemu_build_not_reached();
2309 #endif
2310 }
2311 
2312 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2313 {
2314 #ifdef TARGET_SPARC64
2315     TCGv_i64 dst = tcg_temp_new_i64();
2316     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2317                         gen_load_fpr_D(dc, rs),
2318                         gen_load_fpr_D(dc, rd));
2319     gen_store_fpr_D(dc, rd, dst);
2320 #else
2321     qemu_build_not_reached();
2322 #endif
2323 }
2324 
2325 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2326 {
2327 #ifdef TARGET_SPARC64
2328     TCGv c2 = tcg_constant_tl(cmp->c2);
2329     TCGv_i64 h = tcg_temp_new_i64();
2330     TCGv_i64 l = tcg_temp_new_i64();
2331 
2332     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2333                         gen_load_fpr_D(dc, rs),
2334                         gen_load_fpr_D(dc, rd));
2335     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2336                         gen_load_fpr_D(dc, rs + 2),
2337                         gen_load_fpr_D(dc, rd + 2));
2338     gen_store_fpr_D(dc, rd, h);
2339     gen_store_fpr_D(dc, rd + 2, l);
2340 #else
2341     qemu_build_not_reached();
2342 #endif
2343 }
2344 
2345 #ifdef TARGET_SPARC64
2346 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2347 {
2348     TCGv_i32 r_tl = tcg_temp_new_i32();
2349 
2350     /* load env->tl into r_tl */
2351     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2352 
2353     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2354     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2355 
2356     /* calculate offset to current trap state from env->ts, reuse r_tl */
2357     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2358     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2359 
2360     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2361     {
2362         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2363         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2364         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2365     }
2366 }
2367 #endif
2368 
2369 static int extract_dfpreg(DisasContext *dc, int x)
2370 {
2371     int r = x & 0x1e;
2372 #ifdef TARGET_SPARC64
2373     r |= (x & 1) << 5;
2374 #endif
2375     return r;
2376 }
2377 
2378 static int extract_qfpreg(DisasContext *dc, int x)
2379 {
2380     int r = x & 0x1c;
2381 #ifdef TARGET_SPARC64
2382     r |= (x & 1) << 5;
2383 #endif
2384     return r;
2385 }
2386 
2387 /* Include the auto-generated decoder.  */
2388 #include "decode-insns.c.inc"
2389 
2390 #define TRANS(NAME, AVAIL, FUNC, ...) \
2391     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2392     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2393 
2394 #define avail_ALL(C)      true
2395 #ifdef TARGET_SPARC64
2396 # define avail_32(C)      false
2397 # define avail_ASR17(C)   false
2398 # define avail_CASA(C)    true
2399 # define avail_DIV(C)     true
2400 # define avail_MUL(C)     true
2401 # define avail_POWERDOWN(C) false
2402 # define avail_64(C)      true
2403 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2404 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2405 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2406 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2407 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2408 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2409 # define avail_VIS3B(C)   avail_VIS3(C)
2410 #else
2411 # define avail_32(C)      true
2412 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2413 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2414 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2415 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2416 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2417 # define avail_64(C)      false
2418 # define avail_FMAF(C)    false
2419 # define avail_GL(C)      false
2420 # define avail_HYPV(C)    false
2421 # define avail_VIS1(C)    false
2422 # define avail_VIS2(C)    false
2423 # define avail_VIS3(C)    false
2424 # define avail_VIS3B(C)   false
2425 #endif
2426 
2427 /* Default case for non jump instructions. */
2428 static bool advance_pc(DisasContext *dc)
2429 {
2430     TCGLabel *l1;
2431 
2432     finishing_insn(dc);
2433 
2434     if (dc->npc & 3) {
2435         switch (dc->npc) {
2436         case DYNAMIC_PC:
2437         case DYNAMIC_PC_LOOKUP:
2438             dc->pc = dc->npc;
2439             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2440             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2441             break;
2442 
2443         case JUMP_PC:
2444             /* we can do a static jump */
2445             l1 = gen_new_label();
2446             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2447 
2448             /* jump not taken */
2449             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2450 
2451             /* jump taken */
2452             gen_set_label(l1);
2453             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2454 
2455             dc->base.is_jmp = DISAS_NORETURN;
2456             break;
2457 
2458         default:
2459             g_assert_not_reached();
2460         }
2461     } else {
2462         dc->pc = dc->npc;
2463         dc->npc = dc->npc + 4;
2464     }
2465     return true;
2466 }
2467 
2468 /*
2469  * Major opcodes 00 and 01 -- branches, call, and sethi
2470  */
2471 
2472 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2473                               bool annul, int disp)
2474 {
2475     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2476     target_ulong npc;
2477 
2478     finishing_insn(dc);
2479 
2480     if (cmp->cond == TCG_COND_ALWAYS) {
2481         if (annul) {
2482             dc->pc = dest;
2483             dc->npc = dest + 4;
2484         } else {
2485             gen_mov_pc_npc(dc);
2486             dc->npc = dest;
2487         }
2488         return true;
2489     }
2490 
2491     if (cmp->cond == TCG_COND_NEVER) {
2492         npc = dc->npc;
2493         if (npc & 3) {
2494             gen_mov_pc_npc(dc);
2495             if (annul) {
2496                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2497             }
2498             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2499         } else {
2500             dc->pc = npc + (annul ? 4 : 0);
2501             dc->npc = dc->pc + 4;
2502         }
2503         return true;
2504     }
2505 
2506     flush_cond(dc);
2507     npc = dc->npc;
2508 
2509     if (annul) {
2510         TCGLabel *l1 = gen_new_label();
2511 
2512         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2513         gen_goto_tb(dc, 0, npc, dest);
2514         gen_set_label(l1);
2515         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2516 
2517         dc->base.is_jmp = DISAS_NORETURN;
2518     } else {
2519         if (npc & 3) {
2520             switch (npc) {
2521             case DYNAMIC_PC:
2522             case DYNAMIC_PC_LOOKUP:
2523                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2524                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2525                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2526                                    cmp->c1, tcg_constant_tl(cmp->c2),
2527                                    tcg_constant_tl(dest), cpu_npc);
2528                 dc->pc = npc;
2529                 break;
2530             default:
2531                 g_assert_not_reached();
2532             }
2533         } else {
2534             dc->pc = npc;
2535             dc->npc = JUMP_PC;
2536             dc->jump = *cmp;
2537             dc->jump_pc[0] = dest;
2538             dc->jump_pc[1] = npc + 4;
2539 
2540             /* The condition for cpu_cond is always NE -- normalize. */
2541             if (cmp->cond == TCG_COND_NE) {
2542                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2543             } else {
2544                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2545             }
2546             dc->cpu_cond_live = true;
2547         }
2548     }
2549     return true;
2550 }
2551 
2552 static bool raise_priv(DisasContext *dc)
2553 {
2554     gen_exception(dc, TT_PRIV_INSN);
2555     return true;
2556 }
2557 
2558 static bool raise_unimpfpop(DisasContext *dc)
2559 {
2560     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2561     return true;
2562 }
2563 
2564 static bool gen_trap_float128(DisasContext *dc)
2565 {
2566     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2567         return false;
2568     }
2569     return raise_unimpfpop(dc);
2570 }
2571 
2572 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2573 {
2574     DisasCompare cmp;
2575 
2576     gen_compare(&cmp, a->cc, a->cond, dc);
2577     return advance_jump_cond(dc, &cmp, a->a, a->i);
2578 }
2579 
2580 TRANS(Bicc, ALL, do_bpcc, a)
2581 TRANS(BPcc,  64, do_bpcc, a)
2582 
2583 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2584 {
2585     DisasCompare cmp;
2586 
2587     if (gen_trap_ifnofpu(dc)) {
2588         return true;
2589     }
2590     gen_fcompare(&cmp, a->cc, a->cond);
2591     return advance_jump_cond(dc, &cmp, a->a, a->i);
2592 }
2593 
2594 TRANS(FBPfcc,  64, do_fbpfcc, a)
2595 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2596 
2597 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2598 {
2599     DisasCompare cmp;
2600 
2601     if (!avail_64(dc)) {
2602         return false;
2603     }
2604     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2605         return false;
2606     }
2607     return advance_jump_cond(dc, &cmp, a->a, a->i);
2608 }
2609 
2610 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2611 {
2612     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2613 
2614     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2615     gen_mov_pc_npc(dc);
2616     dc->npc = target;
2617     return true;
2618 }
2619 
2620 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2621 {
2622     /*
2623      * For sparc32, always generate the no-coprocessor exception.
2624      * For sparc64, always generate illegal instruction.
2625      */
2626 #ifdef TARGET_SPARC64
2627     return false;
2628 #else
2629     gen_exception(dc, TT_NCP_INSN);
2630     return true;
2631 #endif
2632 }
2633 
2634 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2635 {
2636     /* Special-case %g0 because that's the canonical nop.  */
2637     if (a->rd) {
2638         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2639     }
2640     return advance_pc(dc);
2641 }
2642 
2643 /*
2644  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2645  */
2646 
2647 static bool do_tcc(DisasContext *dc, int cond, int cc,
2648                    int rs1, bool imm, int rs2_or_imm)
2649 {
2650     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2651                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2652     DisasCompare cmp;
2653     TCGLabel *lab;
2654     TCGv_i32 trap;
2655 
2656     /* Trap never.  */
2657     if (cond == 0) {
2658         return advance_pc(dc);
2659     }
2660 
2661     /*
2662      * Immediate traps are the most common case.  Since this value is
2663      * live across the branch, it really pays to evaluate the constant.
2664      */
2665     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2666         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2667     } else {
2668         trap = tcg_temp_new_i32();
2669         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2670         if (imm) {
2671             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2672         } else {
2673             TCGv_i32 t2 = tcg_temp_new_i32();
2674             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2675             tcg_gen_add_i32(trap, trap, t2);
2676         }
2677         tcg_gen_andi_i32(trap, trap, mask);
2678         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2679     }
2680 
2681     finishing_insn(dc);
2682 
2683     /* Trap always.  */
2684     if (cond == 8) {
2685         save_state(dc);
2686         gen_helper_raise_exception(tcg_env, trap);
2687         dc->base.is_jmp = DISAS_NORETURN;
2688         return true;
2689     }
2690 
2691     /* Conditional trap.  */
2692     flush_cond(dc);
2693     lab = delay_exceptionv(dc, trap);
2694     gen_compare(&cmp, cc, cond, dc);
2695     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2696 
2697     return advance_pc(dc);
2698 }
2699 
2700 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2701 {
2702     if (avail_32(dc) && a->cc) {
2703         return false;
2704     }
2705     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2706 }
2707 
2708 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2709 {
2710     if (avail_64(dc)) {
2711         return false;
2712     }
2713     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2714 }
2715 
2716 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2717 {
2718     if (avail_32(dc)) {
2719         return false;
2720     }
2721     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2722 }
2723 
2724 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2725 {
2726     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2727     return advance_pc(dc);
2728 }
2729 
2730 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2731 {
2732     if (avail_32(dc)) {
2733         return false;
2734     }
2735     if (a->mmask) {
2736         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2737         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2738     }
2739     if (a->cmask) {
2740         /* For #Sync, etc, end the TB to recognize interrupts. */
2741         dc->base.is_jmp = DISAS_EXIT;
2742     }
2743     return advance_pc(dc);
2744 }
2745 
2746 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2747                           TCGv (*func)(DisasContext *, TCGv))
2748 {
2749     if (!priv) {
2750         return raise_priv(dc);
2751     }
2752     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2753     return advance_pc(dc);
2754 }
2755 
2756 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2757 {
2758     return cpu_y;
2759 }
2760 
2761 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2762 {
2763     /*
2764      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2765      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2766      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2767      */
2768     if (avail_64(dc) && a->rs1 != 0) {
2769         return false;
2770     }
2771     return do_rd_special(dc, true, a->rd, do_rdy);
2772 }
2773 
2774 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2775 {
2776     gen_helper_rdasr17(dst, tcg_env);
2777     return dst;
2778 }
2779 
2780 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2781 
2782 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2783 {
2784     gen_helper_rdccr(dst, tcg_env);
2785     return dst;
2786 }
2787 
2788 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2789 
2790 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2791 {
2792 #ifdef TARGET_SPARC64
2793     return tcg_constant_tl(dc->asi);
2794 #else
2795     qemu_build_not_reached();
2796 #endif
2797 }
2798 
2799 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2800 
2801 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2802 {
2803     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2804 
2805     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2806     if (translator_io_start(&dc->base)) {
2807         dc->base.is_jmp = DISAS_EXIT;
2808     }
2809     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2810                               tcg_constant_i32(dc->mem_idx));
2811     return dst;
2812 }
2813 
2814 /* TODO: non-priv access only allowed when enabled. */
2815 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2816 
2817 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2818 {
2819     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2820 }
2821 
2822 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2823 
2824 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2825 {
2826     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2827     return dst;
2828 }
2829 
2830 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2831 
2832 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2833 {
2834     gen_trap_ifnofpu(dc);
2835     return cpu_gsr;
2836 }
2837 
2838 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2839 
2840 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2841 {
2842     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2843     return dst;
2844 }
2845 
2846 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2847 
2848 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2849 {
2850     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2851     return dst;
2852 }
2853 
2854 /* TODO: non-priv access only allowed when enabled. */
2855 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2856 
2857 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2858 {
2859     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2860 
2861     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2862     if (translator_io_start(&dc->base)) {
2863         dc->base.is_jmp = DISAS_EXIT;
2864     }
2865     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2866                               tcg_constant_i32(dc->mem_idx));
2867     return dst;
2868 }
2869 
2870 /* TODO: non-priv access only allowed when enabled. */
2871 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2872 
2873 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2874 {
2875     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2876     return dst;
2877 }
2878 
2879 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2880 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2881 
2882 /*
2883  * UltraSPARC-T1 Strand status.
2884  * HYPV check maybe not enough, UA2005 & UA2007 describe
2885  * this ASR as impl. dep
2886  */
2887 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2888 {
2889     return tcg_constant_tl(1);
2890 }
2891 
2892 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2893 
2894 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2895 {
2896     gen_helper_rdpsr(dst, tcg_env);
2897     return dst;
2898 }
2899 
2900 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2901 
2902 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2903 {
2904     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2905     return dst;
2906 }
2907 
2908 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2909 
2910 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2911 {
2912     TCGv_i32 tl = tcg_temp_new_i32();
2913     TCGv_ptr tp = tcg_temp_new_ptr();
2914 
2915     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2916     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2917     tcg_gen_shli_i32(tl, tl, 3);
2918     tcg_gen_ext_i32_ptr(tp, tl);
2919     tcg_gen_add_ptr(tp, tp, tcg_env);
2920 
2921     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2922     return dst;
2923 }
2924 
2925 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2926 
2927 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2928 {
2929     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2930     return dst;
2931 }
2932 
2933 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2934 
2935 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2936 {
2937     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2938     return dst;
2939 }
2940 
2941 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2942 
2943 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2944 {
2945     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2946     return dst;
2947 }
2948 
2949 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2950 
2951 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2952 {
2953     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2954     return dst;
2955 }
2956 
2957 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2958       do_rdhstick_cmpr)
2959 
2960 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2961 {
2962     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2963     return dst;
2964 }
2965 
2966 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2967 
2968 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2969 {
2970 #ifdef TARGET_SPARC64
2971     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2972 
2973     gen_load_trap_state_at_tl(r_tsptr);
2974     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2975     return dst;
2976 #else
2977     qemu_build_not_reached();
2978 #endif
2979 }
2980 
2981 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2982 
2983 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2984 {
2985 #ifdef TARGET_SPARC64
2986     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2987 
2988     gen_load_trap_state_at_tl(r_tsptr);
2989     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2990     return dst;
2991 #else
2992     qemu_build_not_reached();
2993 #endif
2994 }
2995 
2996 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2997 
2998 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2999 {
3000 #ifdef TARGET_SPARC64
3001     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3002 
3003     gen_load_trap_state_at_tl(r_tsptr);
3004     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3005     return dst;
3006 #else
3007     qemu_build_not_reached();
3008 #endif
3009 }
3010 
3011 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3012 
3013 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3014 {
3015 #ifdef TARGET_SPARC64
3016     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3017 
3018     gen_load_trap_state_at_tl(r_tsptr);
3019     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3020     return dst;
3021 #else
3022     qemu_build_not_reached();
3023 #endif
3024 }
3025 
3026 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3027 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3028 
3029 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3030 {
3031     return cpu_tbr;
3032 }
3033 
3034 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3035 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3036 
3037 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3038 {
3039     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3040     return dst;
3041 }
3042 
3043 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3044 
3045 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3046 {
3047     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3048     return dst;
3049 }
3050 
3051 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3052 
3053 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3054 {
3055     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3056     return dst;
3057 }
3058 
3059 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3060 
3061 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3062 {
3063     gen_helper_rdcwp(dst, tcg_env);
3064     return dst;
3065 }
3066 
3067 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3068 
3069 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3070 {
3071     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3072     return dst;
3073 }
3074 
3075 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3076 
3077 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3078 {
3079     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3080     return dst;
3081 }
3082 
3083 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3084       do_rdcanrestore)
3085 
3086 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3087 {
3088     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3089     return dst;
3090 }
3091 
3092 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3093 
3094 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3095 {
3096     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3097     return dst;
3098 }
3099 
3100 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3101 
3102 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3103 {
3104     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3105     return dst;
3106 }
3107 
3108 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3109 
3110 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3111 {
3112     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3113     return dst;
3114 }
3115 
3116 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3117 
3118 /* UA2005 strand status */
3119 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3120 {
3121     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3122     return dst;
3123 }
3124 
3125 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3126 
3127 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3128 {
3129     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3130     return dst;
3131 }
3132 
3133 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3134 
3135 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3136 {
3137     if (avail_64(dc)) {
3138         gen_helper_flushw(tcg_env);
3139         return advance_pc(dc);
3140     }
3141     return false;
3142 }
3143 
3144 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3145                           void (*func)(DisasContext *, TCGv))
3146 {
3147     TCGv src;
3148 
3149     /* For simplicity, we under-decoded the rs2 form. */
3150     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3151         return false;
3152     }
3153     if (!priv) {
3154         return raise_priv(dc);
3155     }
3156 
3157     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3158         src = tcg_constant_tl(a->rs2_or_imm);
3159     } else {
3160         TCGv src1 = gen_load_gpr(dc, a->rs1);
3161         if (a->rs2_or_imm == 0) {
3162             src = src1;
3163         } else {
3164             src = tcg_temp_new();
3165             if (a->imm) {
3166                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3167             } else {
3168                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3169             }
3170         }
3171     }
3172     func(dc, src);
3173     return advance_pc(dc);
3174 }
3175 
3176 static void do_wry(DisasContext *dc, TCGv src)
3177 {
3178     tcg_gen_ext32u_tl(cpu_y, src);
3179 }
3180 
3181 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3182 
3183 static void do_wrccr(DisasContext *dc, TCGv src)
3184 {
3185     gen_helper_wrccr(tcg_env, src);
3186 }
3187 
3188 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3189 
3190 static void do_wrasi(DisasContext *dc, TCGv src)
3191 {
3192     TCGv tmp = tcg_temp_new();
3193 
3194     tcg_gen_ext8u_tl(tmp, src);
3195     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3196     /* End TB to notice changed ASI. */
3197     dc->base.is_jmp = DISAS_EXIT;
3198 }
3199 
3200 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3201 
3202 static void do_wrfprs(DisasContext *dc, TCGv src)
3203 {
3204 #ifdef TARGET_SPARC64
3205     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3206     dc->fprs_dirty = 0;
3207     dc->base.is_jmp = DISAS_EXIT;
3208 #else
3209     qemu_build_not_reached();
3210 #endif
3211 }
3212 
3213 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3214 
3215 static void do_wrgsr(DisasContext *dc, TCGv src)
3216 {
3217     gen_trap_ifnofpu(dc);
3218     tcg_gen_mov_tl(cpu_gsr, src);
3219 }
3220 
3221 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3222 
3223 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3224 {
3225     gen_helper_set_softint(tcg_env, src);
3226 }
3227 
3228 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3229 
3230 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3231 {
3232     gen_helper_clear_softint(tcg_env, src);
3233 }
3234 
3235 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3236 
3237 static void do_wrsoftint(DisasContext *dc, TCGv src)
3238 {
3239     gen_helper_write_softint(tcg_env, src);
3240 }
3241 
3242 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3243 
3244 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3245 {
3246     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3247 
3248     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3249     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3250     translator_io_start(&dc->base);
3251     gen_helper_tick_set_limit(r_tickptr, src);
3252     /* End TB to handle timer interrupt */
3253     dc->base.is_jmp = DISAS_EXIT;
3254 }
3255 
3256 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3257 
3258 static void do_wrstick(DisasContext *dc, TCGv src)
3259 {
3260 #ifdef TARGET_SPARC64
3261     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3262 
3263     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3264     translator_io_start(&dc->base);
3265     gen_helper_tick_set_count(r_tickptr, src);
3266     /* End TB to handle timer interrupt */
3267     dc->base.is_jmp = DISAS_EXIT;
3268 #else
3269     qemu_build_not_reached();
3270 #endif
3271 }
3272 
3273 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3274 
3275 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3276 {
3277     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3278 
3279     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3280     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3281     translator_io_start(&dc->base);
3282     gen_helper_tick_set_limit(r_tickptr, src);
3283     /* End TB to handle timer interrupt */
3284     dc->base.is_jmp = DISAS_EXIT;
3285 }
3286 
3287 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3288 
3289 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3290 {
3291     finishing_insn(dc);
3292     save_state(dc);
3293     gen_helper_power_down(tcg_env);
3294 }
3295 
3296 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3297 
3298 static void do_wrpsr(DisasContext *dc, TCGv src)
3299 {
3300     gen_helper_wrpsr(tcg_env, src);
3301     dc->base.is_jmp = DISAS_EXIT;
3302 }
3303 
3304 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3305 
3306 static void do_wrwim(DisasContext *dc, TCGv src)
3307 {
3308     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3309     TCGv tmp = tcg_temp_new();
3310 
3311     tcg_gen_andi_tl(tmp, src, mask);
3312     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3313 }
3314 
3315 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3316 
3317 static void do_wrtpc(DisasContext *dc, TCGv src)
3318 {
3319 #ifdef TARGET_SPARC64
3320     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3321 
3322     gen_load_trap_state_at_tl(r_tsptr);
3323     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3324 #else
3325     qemu_build_not_reached();
3326 #endif
3327 }
3328 
3329 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3330 
3331 static void do_wrtnpc(DisasContext *dc, TCGv src)
3332 {
3333 #ifdef TARGET_SPARC64
3334     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3335 
3336     gen_load_trap_state_at_tl(r_tsptr);
3337     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3338 #else
3339     qemu_build_not_reached();
3340 #endif
3341 }
3342 
3343 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3344 
3345 static void do_wrtstate(DisasContext *dc, TCGv src)
3346 {
3347 #ifdef TARGET_SPARC64
3348     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3349 
3350     gen_load_trap_state_at_tl(r_tsptr);
3351     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3352 #else
3353     qemu_build_not_reached();
3354 #endif
3355 }
3356 
3357 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3358 
3359 static void do_wrtt(DisasContext *dc, TCGv src)
3360 {
3361 #ifdef TARGET_SPARC64
3362     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3363 
3364     gen_load_trap_state_at_tl(r_tsptr);
3365     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3366 #else
3367     qemu_build_not_reached();
3368 #endif
3369 }
3370 
3371 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3372 
3373 static void do_wrtick(DisasContext *dc, TCGv src)
3374 {
3375     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3376 
3377     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3378     translator_io_start(&dc->base);
3379     gen_helper_tick_set_count(r_tickptr, src);
3380     /* End TB to handle timer interrupt */
3381     dc->base.is_jmp = DISAS_EXIT;
3382 }
3383 
3384 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3385 
3386 static void do_wrtba(DisasContext *dc, TCGv src)
3387 {
3388     tcg_gen_mov_tl(cpu_tbr, src);
3389 }
3390 
3391 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3392 
3393 static void do_wrpstate(DisasContext *dc, TCGv src)
3394 {
3395     save_state(dc);
3396     if (translator_io_start(&dc->base)) {
3397         dc->base.is_jmp = DISAS_EXIT;
3398     }
3399     gen_helper_wrpstate(tcg_env, src);
3400     dc->npc = DYNAMIC_PC;
3401 }
3402 
3403 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3404 
3405 static void do_wrtl(DisasContext *dc, TCGv src)
3406 {
3407     save_state(dc);
3408     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3409     dc->npc = DYNAMIC_PC;
3410 }
3411 
3412 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3413 
3414 static void do_wrpil(DisasContext *dc, TCGv src)
3415 {
3416     if (translator_io_start(&dc->base)) {
3417         dc->base.is_jmp = DISAS_EXIT;
3418     }
3419     gen_helper_wrpil(tcg_env, src);
3420 }
3421 
3422 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3423 
3424 static void do_wrcwp(DisasContext *dc, TCGv src)
3425 {
3426     gen_helper_wrcwp(tcg_env, src);
3427 }
3428 
3429 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3430 
3431 static void do_wrcansave(DisasContext *dc, TCGv src)
3432 {
3433     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3434 }
3435 
3436 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3437 
3438 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3439 {
3440     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3441 }
3442 
3443 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3444 
3445 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3446 {
3447     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3448 }
3449 
3450 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3451 
3452 static void do_wrotherwin(DisasContext *dc, TCGv src)
3453 {
3454     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3455 }
3456 
3457 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3458 
3459 static void do_wrwstate(DisasContext *dc, TCGv src)
3460 {
3461     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3462 }
3463 
3464 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3465 
3466 static void do_wrgl(DisasContext *dc, TCGv src)
3467 {
3468     gen_helper_wrgl(tcg_env, src);
3469 }
3470 
3471 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3472 
3473 /* UA2005 strand status */
3474 static void do_wrssr(DisasContext *dc, TCGv src)
3475 {
3476     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3477 }
3478 
3479 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3480 
3481 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3482 
3483 static void do_wrhpstate(DisasContext *dc, TCGv src)
3484 {
3485     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3486     dc->base.is_jmp = DISAS_EXIT;
3487 }
3488 
3489 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3490 
3491 static void do_wrhtstate(DisasContext *dc, TCGv src)
3492 {
3493     TCGv_i32 tl = tcg_temp_new_i32();
3494     TCGv_ptr tp = tcg_temp_new_ptr();
3495 
3496     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3497     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3498     tcg_gen_shli_i32(tl, tl, 3);
3499     tcg_gen_ext_i32_ptr(tp, tl);
3500     tcg_gen_add_ptr(tp, tp, tcg_env);
3501 
3502     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3503 }
3504 
3505 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3506 
3507 static void do_wrhintp(DisasContext *dc, TCGv src)
3508 {
3509     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3510 }
3511 
3512 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3513 
3514 static void do_wrhtba(DisasContext *dc, TCGv src)
3515 {
3516     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3517 }
3518 
3519 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3520 
3521 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3522 {
3523     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3524 
3525     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3526     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3527     translator_io_start(&dc->base);
3528     gen_helper_tick_set_limit(r_tickptr, src);
3529     /* End TB to handle timer interrupt */
3530     dc->base.is_jmp = DISAS_EXIT;
3531 }
3532 
3533 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3534       do_wrhstick_cmpr)
3535 
3536 static bool do_saved_restored(DisasContext *dc, bool saved)
3537 {
3538     if (!supervisor(dc)) {
3539         return raise_priv(dc);
3540     }
3541     if (saved) {
3542         gen_helper_saved(tcg_env);
3543     } else {
3544         gen_helper_restored(tcg_env);
3545     }
3546     return advance_pc(dc);
3547 }
3548 
3549 TRANS(SAVED, 64, do_saved_restored, true)
3550 TRANS(RESTORED, 64, do_saved_restored, false)
3551 
3552 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3553 {
3554     return advance_pc(dc);
3555 }
3556 
3557 /*
3558  * TODO: Need a feature bit for sparcv8.
3559  * In the meantime, treat all 32-bit cpus like sparcv7.
3560  */
3561 TRANS(NOP_v7, 32, trans_NOP, a)
3562 TRANS(NOP_v9, 64, trans_NOP, a)
3563 
3564 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3565                          void (*func)(TCGv, TCGv, TCGv),
3566                          void (*funci)(TCGv, TCGv, target_long),
3567                          bool logic_cc)
3568 {
3569     TCGv dst, src1;
3570 
3571     /* For simplicity, we under-decoded the rs2 form. */
3572     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3573         return false;
3574     }
3575 
3576     if (logic_cc) {
3577         dst = cpu_cc_N;
3578     } else {
3579         dst = gen_dest_gpr(dc, a->rd);
3580     }
3581     src1 = gen_load_gpr(dc, a->rs1);
3582 
3583     if (a->imm || a->rs2_or_imm == 0) {
3584         if (funci) {
3585             funci(dst, src1, a->rs2_or_imm);
3586         } else {
3587             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3588         }
3589     } else {
3590         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3591     }
3592 
3593     if (logic_cc) {
3594         if (TARGET_LONG_BITS == 64) {
3595             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3596             tcg_gen_movi_tl(cpu_icc_C, 0);
3597         }
3598         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3599         tcg_gen_movi_tl(cpu_cc_C, 0);
3600         tcg_gen_movi_tl(cpu_cc_V, 0);
3601     }
3602 
3603     gen_store_gpr(dc, a->rd, dst);
3604     return advance_pc(dc);
3605 }
3606 
3607 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3608                      void (*func)(TCGv, TCGv, TCGv),
3609                      void (*funci)(TCGv, TCGv, target_long),
3610                      void (*func_cc)(TCGv, TCGv, TCGv))
3611 {
3612     if (a->cc) {
3613         return do_arith_int(dc, a, func_cc, NULL, false);
3614     }
3615     return do_arith_int(dc, a, func, funci, false);
3616 }
3617 
3618 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3619                      void (*func)(TCGv, TCGv, TCGv),
3620                      void (*funci)(TCGv, TCGv, target_long))
3621 {
3622     return do_arith_int(dc, a, func, funci, a->cc);
3623 }
3624 
3625 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3626 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3627 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3628 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3629 
3630 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3631 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3632 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3633 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3634 
3635 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3636 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3637 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3638 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3639 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3640 
3641 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3642 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3643 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3644 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3645 
3646 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3647 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3648 
3649 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3650 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3651 
3652 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3653 {
3654     /* OR with %g0 is the canonical alias for MOV. */
3655     if (!a->cc && a->rs1 == 0) {
3656         if (a->imm || a->rs2_or_imm == 0) {
3657             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3658         } else if (a->rs2_or_imm & ~0x1f) {
3659             /* For simplicity, we under-decoded the rs2 form. */
3660             return false;
3661         } else {
3662             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3663         }
3664         return advance_pc(dc);
3665     }
3666     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3667 }
3668 
3669 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3670 {
3671     TCGv_i64 t1, t2;
3672     TCGv dst;
3673 
3674     if (!avail_DIV(dc)) {
3675         return false;
3676     }
3677     /* For simplicity, we under-decoded the rs2 form. */
3678     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3679         return false;
3680     }
3681 
3682     if (unlikely(a->rs2_or_imm == 0)) {
3683         gen_exception(dc, TT_DIV_ZERO);
3684         return true;
3685     }
3686 
3687     if (a->imm) {
3688         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3689     } else {
3690         TCGLabel *lab;
3691         TCGv_i32 n2;
3692 
3693         finishing_insn(dc);
3694         flush_cond(dc);
3695 
3696         n2 = tcg_temp_new_i32();
3697         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3698 
3699         lab = delay_exception(dc, TT_DIV_ZERO);
3700         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3701 
3702         t2 = tcg_temp_new_i64();
3703 #ifdef TARGET_SPARC64
3704         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3705 #else
3706         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3707 #endif
3708     }
3709 
3710     t1 = tcg_temp_new_i64();
3711     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3712 
3713     tcg_gen_divu_i64(t1, t1, t2);
3714     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3715 
3716     dst = gen_dest_gpr(dc, a->rd);
3717     tcg_gen_trunc_i64_tl(dst, t1);
3718     gen_store_gpr(dc, a->rd, dst);
3719     return advance_pc(dc);
3720 }
3721 
3722 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3723 {
3724     TCGv dst, src1, src2;
3725 
3726     if (!avail_64(dc)) {
3727         return false;
3728     }
3729     /* For simplicity, we under-decoded the rs2 form. */
3730     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3731         return false;
3732     }
3733 
3734     if (unlikely(a->rs2_or_imm == 0)) {
3735         gen_exception(dc, TT_DIV_ZERO);
3736         return true;
3737     }
3738 
3739     if (a->imm) {
3740         src2 = tcg_constant_tl(a->rs2_or_imm);
3741     } else {
3742         TCGLabel *lab;
3743 
3744         finishing_insn(dc);
3745         flush_cond(dc);
3746 
3747         lab = delay_exception(dc, TT_DIV_ZERO);
3748         src2 = cpu_regs[a->rs2_or_imm];
3749         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3750     }
3751 
3752     dst = gen_dest_gpr(dc, a->rd);
3753     src1 = gen_load_gpr(dc, a->rs1);
3754 
3755     tcg_gen_divu_tl(dst, src1, src2);
3756     gen_store_gpr(dc, a->rd, dst);
3757     return advance_pc(dc);
3758 }
3759 
3760 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3761 {
3762     TCGv dst, src1, src2;
3763 
3764     if (!avail_64(dc)) {
3765         return false;
3766     }
3767     /* For simplicity, we under-decoded the rs2 form. */
3768     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3769         return false;
3770     }
3771 
3772     if (unlikely(a->rs2_or_imm == 0)) {
3773         gen_exception(dc, TT_DIV_ZERO);
3774         return true;
3775     }
3776 
3777     dst = gen_dest_gpr(dc, a->rd);
3778     src1 = gen_load_gpr(dc, a->rs1);
3779 
3780     if (a->imm) {
3781         if (unlikely(a->rs2_or_imm == -1)) {
3782             tcg_gen_neg_tl(dst, src1);
3783             gen_store_gpr(dc, a->rd, dst);
3784             return advance_pc(dc);
3785         }
3786         src2 = tcg_constant_tl(a->rs2_or_imm);
3787     } else {
3788         TCGLabel *lab;
3789         TCGv t1, t2;
3790 
3791         finishing_insn(dc);
3792         flush_cond(dc);
3793 
3794         lab = delay_exception(dc, TT_DIV_ZERO);
3795         src2 = cpu_regs[a->rs2_or_imm];
3796         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3797 
3798         /*
3799          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3800          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3801          */
3802         t1 = tcg_temp_new();
3803         t2 = tcg_temp_new();
3804         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3805         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3806         tcg_gen_and_tl(t1, t1, t2);
3807         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3808                            tcg_constant_tl(1), src2);
3809         src2 = t1;
3810     }
3811 
3812     tcg_gen_div_tl(dst, src1, src2);
3813     gen_store_gpr(dc, a->rd, dst);
3814     return advance_pc(dc);
3815 }
3816 
3817 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3818                      int width, bool cc, bool little_endian)
3819 {
3820     TCGv dst, s1, s2, l, r, t, m;
3821     uint64_t amask = address_mask_i(dc, -8);
3822 
3823     dst = gen_dest_gpr(dc, a->rd);
3824     s1 = gen_load_gpr(dc, a->rs1);
3825     s2 = gen_load_gpr(dc, a->rs2);
3826 
3827     if (cc) {
3828         gen_op_subcc(cpu_cc_N, s1, s2);
3829     }
3830 
3831     l = tcg_temp_new();
3832     r = tcg_temp_new();
3833     t = tcg_temp_new();
3834 
3835     switch (width) {
3836     case 8:
3837         tcg_gen_andi_tl(l, s1, 7);
3838         tcg_gen_andi_tl(r, s2, 7);
3839         tcg_gen_xori_tl(r, r, 7);
3840         m = tcg_constant_tl(0xff);
3841         break;
3842     case 16:
3843         tcg_gen_extract_tl(l, s1, 1, 2);
3844         tcg_gen_extract_tl(r, s2, 1, 2);
3845         tcg_gen_xori_tl(r, r, 3);
3846         m = tcg_constant_tl(0xf);
3847         break;
3848     case 32:
3849         tcg_gen_extract_tl(l, s1, 2, 1);
3850         tcg_gen_extract_tl(r, s2, 2, 1);
3851         tcg_gen_xori_tl(r, r, 1);
3852         m = tcg_constant_tl(0x3);
3853         break;
3854     default:
3855         abort();
3856     }
3857 
3858     /* Compute Left Edge */
3859     if (little_endian) {
3860         tcg_gen_shl_tl(l, m, l);
3861         tcg_gen_and_tl(l, l, m);
3862     } else {
3863         tcg_gen_shr_tl(l, m, l);
3864     }
3865     /* Compute Right Edge */
3866     if (little_endian) {
3867         tcg_gen_shr_tl(r, m, r);
3868     } else {
3869         tcg_gen_shl_tl(r, m, r);
3870         tcg_gen_and_tl(r, r, m);
3871     }
3872 
3873     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3874     tcg_gen_xor_tl(t, s1, s2);
3875     tcg_gen_and_tl(r, r, l);
3876     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3877 
3878     gen_store_gpr(dc, a->rd, dst);
3879     return advance_pc(dc);
3880 }
3881 
3882 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3883 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3884 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3885 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3886 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3887 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3888 
3889 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3890 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3891 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3892 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3893 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3894 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3895 
3896 static bool do_rr(DisasContext *dc, arg_r_r *a,
3897                   void (*func)(TCGv, TCGv))
3898 {
3899     TCGv dst = gen_dest_gpr(dc, a->rd);
3900     TCGv src = gen_load_gpr(dc, a->rs);
3901 
3902     func(dst, src);
3903     gen_store_gpr(dc, a->rd, dst);
3904     return advance_pc(dc);
3905 }
3906 
3907 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3908 
3909 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3910                    void (*func)(TCGv, TCGv, TCGv))
3911 {
3912     TCGv dst = gen_dest_gpr(dc, a->rd);
3913     TCGv src1 = gen_load_gpr(dc, a->rs1);
3914     TCGv src2 = gen_load_gpr(dc, a->rs2);
3915 
3916     func(dst, src1, src2);
3917     gen_store_gpr(dc, a->rd, dst);
3918     return advance_pc(dc);
3919 }
3920 
3921 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3922 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3923 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3924 
3925 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3926 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3927 
3928 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3929 
3930 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3931 {
3932 #ifdef TARGET_SPARC64
3933     TCGv tmp = tcg_temp_new();
3934 
3935     tcg_gen_add_tl(tmp, s1, s2);
3936     tcg_gen_andi_tl(dst, tmp, -8);
3937     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3938 #else
3939     g_assert_not_reached();
3940 #endif
3941 }
3942 
3943 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3944 {
3945 #ifdef TARGET_SPARC64
3946     TCGv tmp = tcg_temp_new();
3947 
3948     tcg_gen_add_tl(tmp, s1, s2);
3949     tcg_gen_andi_tl(dst, tmp, -8);
3950     tcg_gen_neg_tl(tmp, tmp);
3951     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3952 #else
3953     g_assert_not_reached();
3954 #endif
3955 }
3956 
3957 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3958 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3959 
3960 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3961 {
3962 #ifdef TARGET_SPARC64
3963     tcg_gen_add_tl(dst, s1, s2);
3964     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3965 #else
3966     g_assert_not_reached();
3967 #endif
3968 }
3969 
3970 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3971 
3972 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
3973 {
3974     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
3975     return true;
3976 }
3977 
3978 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
3979 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
3980 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
3981 
3982 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3983 {
3984     TCGv dst, src1, src2;
3985 
3986     /* Reject 64-bit shifts for sparc32. */
3987     if (avail_32(dc) && a->x) {
3988         return false;
3989     }
3990 
3991     src2 = tcg_temp_new();
3992     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3993     src1 = gen_load_gpr(dc, a->rs1);
3994     dst = gen_dest_gpr(dc, a->rd);
3995 
3996     if (l) {
3997         tcg_gen_shl_tl(dst, src1, src2);
3998         if (!a->x) {
3999             tcg_gen_ext32u_tl(dst, dst);
4000         }
4001     } else if (u) {
4002         if (!a->x) {
4003             tcg_gen_ext32u_tl(dst, src1);
4004             src1 = dst;
4005         }
4006         tcg_gen_shr_tl(dst, src1, src2);
4007     } else {
4008         if (!a->x) {
4009             tcg_gen_ext32s_tl(dst, src1);
4010             src1 = dst;
4011         }
4012         tcg_gen_sar_tl(dst, src1, src2);
4013     }
4014     gen_store_gpr(dc, a->rd, dst);
4015     return advance_pc(dc);
4016 }
4017 
4018 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4019 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4020 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4021 
4022 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4023 {
4024     TCGv dst, src1;
4025 
4026     /* Reject 64-bit shifts for sparc32. */
4027     if (avail_32(dc) && (a->x || a->i >= 32)) {
4028         return false;
4029     }
4030 
4031     src1 = gen_load_gpr(dc, a->rs1);
4032     dst = gen_dest_gpr(dc, a->rd);
4033 
4034     if (avail_32(dc) || a->x) {
4035         if (l) {
4036             tcg_gen_shli_tl(dst, src1, a->i);
4037         } else if (u) {
4038             tcg_gen_shri_tl(dst, src1, a->i);
4039         } else {
4040             tcg_gen_sari_tl(dst, src1, a->i);
4041         }
4042     } else {
4043         if (l) {
4044             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4045         } else if (u) {
4046             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4047         } else {
4048             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4049         }
4050     }
4051     gen_store_gpr(dc, a->rd, dst);
4052     return advance_pc(dc);
4053 }
4054 
4055 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4056 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4057 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4058 
4059 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4060 {
4061     /* For simplicity, we under-decoded the rs2 form. */
4062     if (!imm && rs2_or_imm & ~0x1f) {
4063         return NULL;
4064     }
4065     if (imm || rs2_or_imm == 0) {
4066         return tcg_constant_tl(rs2_or_imm);
4067     } else {
4068         return cpu_regs[rs2_or_imm];
4069     }
4070 }
4071 
4072 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4073 {
4074     TCGv dst = gen_load_gpr(dc, rd);
4075     TCGv c2 = tcg_constant_tl(cmp->c2);
4076 
4077     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4078     gen_store_gpr(dc, rd, dst);
4079     return advance_pc(dc);
4080 }
4081 
4082 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4083 {
4084     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4085     DisasCompare cmp;
4086 
4087     if (src2 == NULL) {
4088         return false;
4089     }
4090     gen_compare(&cmp, a->cc, a->cond, dc);
4091     return do_mov_cond(dc, &cmp, a->rd, src2);
4092 }
4093 
4094 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4095 {
4096     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4097     DisasCompare cmp;
4098 
4099     if (src2 == NULL) {
4100         return false;
4101     }
4102     gen_fcompare(&cmp, a->cc, a->cond);
4103     return do_mov_cond(dc, &cmp, a->rd, src2);
4104 }
4105 
4106 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4107 {
4108     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4109     DisasCompare cmp;
4110 
4111     if (src2 == NULL) {
4112         return false;
4113     }
4114     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4115         return false;
4116     }
4117     return do_mov_cond(dc, &cmp, a->rd, src2);
4118 }
4119 
4120 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4121                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4122 {
4123     TCGv src1, sum;
4124 
4125     /* For simplicity, we under-decoded the rs2 form. */
4126     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4127         return false;
4128     }
4129 
4130     /*
4131      * Always load the sum into a new temporary.
4132      * This is required to capture the value across a window change,
4133      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4134      */
4135     sum = tcg_temp_new();
4136     src1 = gen_load_gpr(dc, a->rs1);
4137     if (a->imm || a->rs2_or_imm == 0) {
4138         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4139     } else {
4140         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4141     }
4142     return func(dc, a->rd, sum);
4143 }
4144 
4145 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4146 {
4147     /*
4148      * Preserve pc across advance, so that we can delay
4149      * the writeback to rd until after src is consumed.
4150      */
4151     target_ulong cur_pc = dc->pc;
4152 
4153     gen_check_align(dc, src, 3);
4154 
4155     gen_mov_pc_npc(dc);
4156     tcg_gen_mov_tl(cpu_npc, src);
4157     gen_address_mask(dc, cpu_npc);
4158     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4159 
4160     dc->npc = DYNAMIC_PC_LOOKUP;
4161     return true;
4162 }
4163 
4164 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4165 
4166 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4167 {
4168     if (!supervisor(dc)) {
4169         return raise_priv(dc);
4170     }
4171 
4172     gen_check_align(dc, src, 3);
4173 
4174     gen_mov_pc_npc(dc);
4175     tcg_gen_mov_tl(cpu_npc, src);
4176     gen_helper_rett(tcg_env);
4177 
4178     dc->npc = DYNAMIC_PC;
4179     return true;
4180 }
4181 
4182 TRANS(RETT, 32, do_add_special, a, do_rett)
4183 
4184 static bool do_return(DisasContext *dc, int rd, TCGv src)
4185 {
4186     gen_check_align(dc, src, 3);
4187     gen_helper_restore(tcg_env);
4188 
4189     gen_mov_pc_npc(dc);
4190     tcg_gen_mov_tl(cpu_npc, src);
4191     gen_address_mask(dc, cpu_npc);
4192 
4193     dc->npc = DYNAMIC_PC_LOOKUP;
4194     return true;
4195 }
4196 
4197 TRANS(RETURN, 64, do_add_special, a, do_return)
4198 
4199 static bool do_save(DisasContext *dc, int rd, TCGv src)
4200 {
4201     gen_helper_save(tcg_env);
4202     gen_store_gpr(dc, rd, src);
4203     return advance_pc(dc);
4204 }
4205 
4206 TRANS(SAVE, ALL, do_add_special, a, do_save)
4207 
4208 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4209 {
4210     gen_helper_restore(tcg_env);
4211     gen_store_gpr(dc, rd, src);
4212     return advance_pc(dc);
4213 }
4214 
4215 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4216 
4217 static bool do_done_retry(DisasContext *dc, bool done)
4218 {
4219     if (!supervisor(dc)) {
4220         return raise_priv(dc);
4221     }
4222     dc->npc = DYNAMIC_PC;
4223     dc->pc = DYNAMIC_PC;
4224     translator_io_start(&dc->base);
4225     if (done) {
4226         gen_helper_done(tcg_env);
4227     } else {
4228         gen_helper_retry(tcg_env);
4229     }
4230     return true;
4231 }
4232 
4233 TRANS(DONE, 64, do_done_retry, true)
4234 TRANS(RETRY, 64, do_done_retry, false)
4235 
4236 /*
4237  * Major opcode 11 -- load and store instructions
4238  */
4239 
4240 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4241 {
4242     TCGv addr, tmp = NULL;
4243 
4244     /* For simplicity, we under-decoded the rs2 form. */
4245     if (!imm && rs2_or_imm & ~0x1f) {
4246         return NULL;
4247     }
4248 
4249     addr = gen_load_gpr(dc, rs1);
4250     if (rs2_or_imm) {
4251         tmp = tcg_temp_new();
4252         if (imm) {
4253             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4254         } else {
4255             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4256         }
4257         addr = tmp;
4258     }
4259     if (AM_CHECK(dc)) {
4260         if (!tmp) {
4261             tmp = tcg_temp_new();
4262         }
4263         tcg_gen_ext32u_tl(tmp, addr);
4264         addr = tmp;
4265     }
4266     return addr;
4267 }
4268 
4269 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4270 {
4271     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4272     DisasASI da;
4273 
4274     if (addr == NULL) {
4275         return false;
4276     }
4277     da = resolve_asi(dc, a->asi, mop);
4278 
4279     reg = gen_dest_gpr(dc, a->rd);
4280     gen_ld_asi(dc, &da, reg, addr);
4281     gen_store_gpr(dc, a->rd, reg);
4282     return advance_pc(dc);
4283 }
4284 
4285 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4286 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4287 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4288 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4289 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4290 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4291 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4292 
4293 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4294 {
4295     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4296     DisasASI da;
4297 
4298     if (addr == NULL) {
4299         return false;
4300     }
4301     da = resolve_asi(dc, a->asi, mop);
4302 
4303     reg = gen_load_gpr(dc, a->rd);
4304     gen_st_asi(dc, &da, reg, addr);
4305     return advance_pc(dc);
4306 }
4307 
4308 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4309 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4310 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4311 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4312 
4313 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4314 {
4315     TCGv addr;
4316     DisasASI da;
4317 
4318     if (a->rd & 1) {
4319         return false;
4320     }
4321     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4322     if (addr == NULL) {
4323         return false;
4324     }
4325     da = resolve_asi(dc, a->asi, MO_TEUQ);
4326     gen_ldda_asi(dc, &da, addr, a->rd);
4327     return advance_pc(dc);
4328 }
4329 
4330 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4331 {
4332     TCGv addr;
4333     DisasASI da;
4334 
4335     if (a->rd & 1) {
4336         return false;
4337     }
4338     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4339     if (addr == NULL) {
4340         return false;
4341     }
4342     da = resolve_asi(dc, a->asi, MO_TEUQ);
4343     gen_stda_asi(dc, &da, addr, a->rd);
4344     return advance_pc(dc);
4345 }
4346 
4347 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4348 {
4349     TCGv addr, reg;
4350     DisasASI da;
4351 
4352     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4353     if (addr == NULL) {
4354         return false;
4355     }
4356     da = resolve_asi(dc, a->asi, MO_UB);
4357 
4358     reg = gen_dest_gpr(dc, a->rd);
4359     gen_ldstub_asi(dc, &da, reg, addr);
4360     gen_store_gpr(dc, a->rd, reg);
4361     return advance_pc(dc);
4362 }
4363 
4364 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4365 {
4366     TCGv addr, dst, src;
4367     DisasASI da;
4368 
4369     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4370     if (addr == NULL) {
4371         return false;
4372     }
4373     da = resolve_asi(dc, a->asi, MO_TEUL);
4374 
4375     dst = gen_dest_gpr(dc, a->rd);
4376     src = gen_load_gpr(dc, a->rd);
4377     gen_swap_asi(dc, &da, dst, src, addr);
4378     gen_store_gpr(dc, a->rd, dst);
4379     return advance_pc(dc);
4380 }
4381 
4382 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4383 {
4384     TCGv addr, o, n, c;
4385     DisasASI da;
4386 
4387     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4388     if (addr == NULL) {
4389         return false;
4390     }
4391     da = resolve_asi(dc, a->asi, mop);
4392 
4393     o = gen_dest_gpr(dc, a->rd);
4394     n = gen_load_gpr(dc, a->rd);
4395     c = gen_load_gpr(dc, a->rs2_or_imm);
4396     gen_cas_asi(dc, &da, o, n, c, addr);
4397     gen_store_gpr(dc, a->rd, o);
4398     return advance_pc(dc);
4399 }
4400 
4401 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4402 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4403 
4404 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4405 {
4406     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4407     DisasASI da;
4408 
4409     if (addr == NULL) {
4410         return false;
4411     }
4412     if (gen_trap_ifnofpu(dc)) {
4413         return true;
4414     }
4415     if (sz == MO_128 && gen_trap_float128(dc)) {
4416         return true;
4417     }
4418     da = resolve_asi(dc, a->asi, MO_TE | sz);
4419     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4420     gen_update_fprs_dirty(dc, a->rd);
4421     return advance_pc(dc);
4422 }
4423 
4424 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4425 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4426 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4427 
4428 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4429 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4430 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4431 
4432 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4433 {
4434     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4435     DisasASI da;
4436 
4437     if (addr == NULL) {
4438         return false;
4439     }
4440     if (gen_trap_ifnofpu(dc)) {
4441         return true;
4442     }
4443     if (sz == MO_128 && gen_trap_float128(dc)) {
4444         return true;
4445     }
4446     da = resolve_asi(dc, a->asi, MO_TE | sz);
4447     gen_stf_asi(dc, &da, sz, addr, a->rd);
4448     return advance_pc(dc);
4449 }
4450 
4451 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4452 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4453 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4454 
4455 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4456 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4457 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4458 
4459 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4460 {
4461     if (!avail_32(dc)) {
4462         return false;
4463     }
4464     if (!supervisor(dc)) {
4465         return raise_priv(dc);
4466     }
4467     if (gen_trap_ifnofpu(dc)) {
4468         return true;
4469     }
4470     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4471     return true;
4472 }
4473 
4474 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4475 {
4476     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4477     TCGv_i32 tmp;
4478 
4479     if (addr == NULL) {
4480         return false;
4481     }
4482     if (gen_trap_ifnofpu(dc)) {
4483         return true;
4484     }
4485 
4486     tmp = tcg_temp_new_i32();
4487     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4488 
4489     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4490     /* LDFSR does not change FCC[1-3]. */
4491 
4492     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4493     return advance_pc(dc);
4494 }
4495 
4496 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4497 {
4498 #ifdef TARGET_SPARC64
4499     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4500     TCGv_i64 t64;
4501     TCGv_i32 lo, hi;
4502 
4503     if (addr == NULL) {
4504         return false;
4505     }
4506     if (gen_trap_ifnofpu(dc)) {
4507         return true;
4508     }
4509 
4510     t64 = tcg_temp_new_i64();
4511     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4512 
4513     lo = tcg_temp_new_i32();
4514     hi = cpu_fcc[3];
4515     tcg_gen_extr_i64_i32(lo, hi, t64);
4516     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4517     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4518     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4519     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4520 
4521     if (entire) {
4522         gen_helper_set_fsr_nofcc(tcg_env, lo);
4523     } else {
4524         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4525     }
4526     return advance_pc(dc);
4527 #else
4528     return false;
4529 #endif
4530 }
4531 
4532 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4533 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4534 
4535 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4536 {
4537     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4538     TCGv fsr;
4539 
4540     if (addr == NULL) {
4541         return false;
4542     }
4543     if (gen_trap_ifnofpu(dc)) {
4544         return true;
4545     }
4546 
4547     fsr = tcg_temp_new();
4548     gen_helper_get_fsr(fsr, tcg_env);
4549     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4550     return advance_pc(dc);
4551 }
4552 
4553 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4554 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4555 
4556 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4557 {
4558     if (gen_trap_ifnofpu(dc)) {
4559         return true;
4560     }
4561     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4562     return advance_pc(dc);
4563 }
4564 
4565 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4566 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4567 
4568 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4569 {
4570     if (gen_trap_ifnofpu(dc)) {
4571         return true;
4572     }
4573     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4574     return advance_pc(dc);
4575 }
4576 
4577 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4578 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4579 
4580 static bool do_ff(DisasContext *dc, arg_r_r *a,
4581                   void (*func)(TCGv_i32, TCGv_i32))
4582 {
4583     TCGv_i32 tmp;
4584 
4585     if (gen_trap_ifnofpu(dc)) {
4586         return true;
4587     }
4588 
4589     tmp = gen_load_fpr_F(dc, a->rs);
4590     func(tmp, tmp);
4591     gen_store_fpr_F(dc, a->rd, tmp);
4592     return advance_pc(dc);
4593 }
4594 
4595 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4596 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4597 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4598 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4599 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4600 
4601 static bool do_fd(DisasContext *dc, arg_r_r *a,
4602                   void (*func)(TCGv_i32, TCGv_i64))
4603 {
4604     TCGv_i32 dst;
4605     TCGv_i64 src;
4606 
4607     if (gen_trap_ifnofpu(dc)) {
4608         return true;
4609     }
4610 
4611     dst = tcg_temp_new_i32();
4612     src = gen_load_fpr_D(dc, a->rs);
4613     func(dst, src);
4614     gen_store_fpr_F(dc, a->rd, dst);
4615     return advance_pc(dc);
4616 }
4617 
4618 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4619 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4620 
4621 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4622                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4623 {
4624     TCGv_i32 tmp;
4625 
4626     if (gen_trap_ifnofpu(dc)) {
4627         return true;
4628     }
4629 
4630     tmp = gen_load_fpr_F(dc, a->rs);
4631     func(tmp, tcg_env, tmp);
4632     gen_store_fpr_F(dc, a->rd, tmp);
4633     return advance_pc(dc);
4634 }
4635 
4636 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4637 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4638 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4639 
4640 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4641                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4642 {
4643     TCGv_i32 dst;
4644     TCGv_i64 src;
4645 
4646     if (gen_trap_ifnofpu(dc)) {
4647         return true;
4648     }
4649 
4650     dst = tcg_temp_new_i32();
4651     src = gen_load_fpr_D(dc, a->rs);
4652     func(dst, tcg_env, src);
4653     gen_store_fpr_F(dc, a->rd, dst);
4654     return advance_pc(dc);
4655 }
4656 
4657 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4658 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4659 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4660 
4661 static bool do_dd(DisasContext *dc, arg_r_r *a,
4662                   void (*func)(TCGv_i64, TCGv_i64))
4663 {
4664     TCGv_i64 dst, src;
4665 
4666     if (gen_trap_ifnofpu(dc)) {
4667         return true;
4668     }
4669 
4670     dst = tcg_temp_new_i64();
4671     src = gen_load_fpr_D(dc, a->rs);
4672     func(dst, src);
4673     gen_store_fpr_D(dc, a->rd, dst);
4674     return advance_pc(dc);
4675 }
4676 
4677 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4678 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4679 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4680 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4681 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4682 
4683 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4684                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4685 {
4686     TCGv_i64 dst, src;
4687 
4688     if (gen_trap_ifnofpu(dc)) {
4689         return true;
4690     }
4691 
4692     dst = tcg_temp_new_i64();
4693     src = gen_load_fpr_D(dc, a->rs);
4694     func(dst, tcg_env, src);
4695     gen_store_fpr_D(dc, a->rd, dst);
4696     return advance_pc(dc);
4697 }
4698 
4699 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4700 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4701 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4702 
4703 static bool do_df(DisasContext *dc, arg_r_r *a,
4704                   void (*func)(TCGv_i64, TCGv_i32))
4705 {
4706     TCGv_i64 dst;
4707     TCGv_i32 src;
4708 
4709     if (gen_trap_ifnofpu(dc)) {
4710         return true;
4711     }
4712 
4713     dst = tcg_temp_new_i64();
4714     src = gen_load_fpr_F(dc, a->rs);
4715     func(dst, src);
4716     gen_store_fpr_D(dc, a->rd, dst);
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4721 
4722 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4723                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4724 {
4725     TCGv_i64 dst;
4726     TCGv_i32 src;
4727 
4728     if (gen_trap_ifnofpu(dc)) {
4729         return true;
4730     }
4731 
4732     dst = tcg_temp_new_i64();
4733     src = gen_load_fpr_F(dc, a->rs);
4734     func(dst, tcg_env, src);
4735     gen_store_fpr_D(dc, a->rd, dst);
4736     return advance_pc(dc);
4737 }
4738 
4739 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4740 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4741 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4742 
4743 static bool do_qq(DisasContext *dc, arg_r_r *a,
4744                   void (*func)(TCGv_i128, TCGv_i128))
4745 {
4746     TCGv_i128 t;
4747 
4748     if (gen_trap_ifnofpu(dc)) {
4749         return true;
4750     }
4751     if (gen_trap_float128(dc)) {
4752         return true;
4753     }
4754 
4755     gen_op_clear_ieee_excp_and_FTT();
4756     t = gen_load_fpr_Q(dc, a->rs);
4757     func(t, t);
4758     gen_store_fpr_Q(dc, a->rd, t);
4759     return advance_pc(dc);
4760 }
4761 
4762 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4763 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4764 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4765 
4766 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4767                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4768 {
4769     TCGv_i128 t;
4770 
4771     if (gen_trap_ifnofpu(dc)) {
4772         return true;
4773     }
4774     if (gen_trap_float128(dc)) {
4775         return true;
4776     }
4777 
4778     t = gen_load_fpr_Q(dc, a->rs);
4779     func(t, tcg_env, t);
4780     gen_store_fpr_Q(dc, a->rd, t);
4781     return advance_pc(dc);
4782 }
4783 
4784 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4785 
4786 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4787                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4788 {
4789     TCGv_i128 src;
4790     TCGv_i32 dst;
4791 
4792     if (gen_trap_ifnofpu(dc)) {
4793         return true;
4794     }
4795     if (gen_trap_float128(dc)) {
4796         return true;
4797     }
4798 
4799     src = gen_load_fpr_Q(dc, a->rs);
4800     dst = tcg_temp_new_i32();
4801     func(dst, tcg_env, src);
4802     gen_store_fpr_F(dc, a->rd, dst);
4803     return advance_pc(dc);
4804 }
4805 
4806 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4807 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4808 
4809 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4810                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4811 {
4812     TCGv_i128 src;
4813     TCGv_i64 dst;
4814 
4815     if (gen_trap_ifnofpu(dc)) {
4816         return true;
4817     }
4818     if (gen_trap_float128(dc)) {
4819         return true;
4820     }
4821 
4822     src = gen_load_fpr_Q(dc, a->rs);
4823     dst = tcg_temp_new_i64();
4824     func(dst, tcg_env, src);
4825     gen_store_fpr_D(dc, a->rd, dst);
4826     return advance_pc(dc);
4827 }
4828 
4829 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4830 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4831 
4832 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4833                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4834 {
4835     TCGv_i32 src;
4836     TCGv_i128 dst;
4837 
4838     if (gen_trap_ifnofpu(dc)) {
4839         return true;
4840     }
4841     if (gen_trap_float128(dc)) {
4842         return true;
4843     }
4844 
4845     src = gen_load_fpr_F(dc, a->rs);
4846     dst = tcg_temp_new_i128();
4847     func(dst, tcg_env, src);
4848     gen_store_fpr_Q(dc, a->rd, dst);
4849     return advance_pc(dc);
4850 }
4851 
4852 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4853 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4854 
4855 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4856                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4857 {
4858     TCGv_i64 src;
4859     TCGv_i128 dst;
4860 
4861     if (gen_trap_ifnofpu(dc)) {
4862         return true;
4863     }
4864     if (gen_trap_float128(dc)) {
4865         return true;
4866     }
4867 
4868     src = gen_load_fpr_D(dc, a->rs);
4869     dst = tcg_temp_new_i128();
4870     func(dst, tcg_env, src);
4871     gen_store_fpr_Q(dc, a->rd, dst);
4872     return advance_pc(dc);
4873 }
4874 
4875 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4876 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4877 
4878 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4879                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4880 {
4881     TCGv_i32 src1, src2;
4882 
4883     if (gen_trap_ifnofpu(dc)) {
4884         return true;
4885     }
4886 
4887     src1 = gen_load_fpr_F(dc, a->rs1);
4888     src2 = gen_load_fpr_F(dc, a->rs2);
4889     func(src1, src1, src2);
4890     gen_store_fpr_F(dc, a->rd, src1);
4891     return advance_pc(dc);
4892 }
4893 
4894 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4895 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4896 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4897 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4898 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4899 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4900 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4901 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4902 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4903 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4904 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4905 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4906 
4907 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4908 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4909 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4910 
4911 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4912 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4913 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4914 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4915 
4916 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4917                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4918 {
4919     TCGv_i32 src1, src2;
4920 
4921     if (gen_trap_ifnofpu(dc)) {
4922         return true;
4923     }
4924 
4925     src1 = gen_load_fpr_F(dc, a->rs1);
4926     src2 = gen_load_fpr_F(dc, a->rs2);
4927     func(src1, tcg_env, src1, src2);
4928     gen_store_fpr_F(dc, a->rd, src1);
4929     return advance_pc(dc);
4930 }
4931 
4932 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4933 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4934 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4935 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4936 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
4937 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
4938 
4939 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4940                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4941 {
4942     TCGv_i64 dst;
4943     TCGv_i32 src1, src2;
4944 
4945     if (gen_trap_ifnofpu(dc)) {
4946         return true;
4947     }
4948 
4949     dst = tcg_temp_new_i64();
4950     src1 = gen_load_fpr_F(dc, a->rs1);
4951     src2 = gen_load_fpr_F(dc, a->rs2);
4952     func(dst, src1, src2);
4953     gen_store_fpr_D(dc, a->rd, dst);
4954     return advance_pc(dc);
4955 }
4956 
4957 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4958 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4959 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4960 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4961 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4962 
4963 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4964                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4965 {
4966     TCGv_i64 dst, src2;
4967     TCGv_i32 src1;
4968 
4969     if (gen_trap_ifnofpu(dc)) {
4970         return true;
4971     }
4972 
4973     dst = tcg_temp_new_i64();
4974     src1 = gen_load_fpr_F(dc, a->rs1);
4975     src2 = gen_load_fpr_D(dc, a->rs2);
4976     func(dst, src1, src2);
4977     gen_store_fpr_D(dc, a->rd, dst);
4978     return advance_pc(dc);
4979 }
4980 
4981 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4982 
4983 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
4984                         void (*func)(unsigned, uint32_t, uint32_t,
4985                                      uint32_t, uint32_t, uint32_t))
4986 {
4987     if (gen_trap_ifnofpu(dc)) {
4988         return true;
4989     }
4990 
4991     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
4992          gen_offset_fpr_D(a->rs2), 8, 8);
4993     return advance_pc(dc);
4994 }
4995 
4996 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
4997 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
4998 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
4999 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5000 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5001 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5002 
5003 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5004 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5005 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5006 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5007 
5008 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5009 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5010 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5011 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5012 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5013 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5014 
5015 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5016                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5017 {
5018     TCGv_i64 dst, src1, src2;
5019 
5020     if (gen_trap_ifnofpu(dc)) {
5021         return true;
5022     }
5023 
5024     dst = tcg_temp_new_i64();
5025     src1 = gen_load_fpr_D(dc, a->rs1);
5026     src2 = gen_load_fpr_D(dc, a->rs2);
5027     func(dst, src1, src2);
5028     gen_store_fpr_D(dc, a->rd, dst);
5029     return advance_pc(dc);
5030 }
5031 
5032 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5033 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5034 
5035 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5036 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5037 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5038 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5039 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5040 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5041 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5042 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5043 
5044 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5045 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
5046 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5047 
5048 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5049 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5050 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5051 
5052 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5053 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5054 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5055 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5056 
5057 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5058                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5059 {
5060     TCGv_i64 src1, src2;
5061     TCGv dst;
5062 
5063     if (gen_trap_ifnofpu(dc)) {
5064         return true;
5065     }
5066 
5067     dst = gen_dest_gpr(dc, a->rd);
5068     src1 = gen_load_fpr_D(dc, a->rs1);
5069     src2 = gen_load_fpr_D(dc, a->rs2);
5070     func(dst, src1, src2);
5071     gen_store_gpr(dc, a->rd, dst);
5072     return advance_pc(dc);
5073 }
5074 
5075 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5076 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5077 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5078 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5079 
5080 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5081 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5082 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5083 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5084 
5085 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5086 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5087 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5088 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5089 
5090 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5091 
5092 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5093                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5094 {
5095     TCGv_i64 dst, src1, src2;
5096 
5097     if (gen_trap_ifnofpu(dc)) {
5098         return true;
5099     }
5100 
5101     dst = tcg_temp_new_i64();
5102     src1 = gen_load_fpr_D(dc, a->rs1);
5103     src2 = gen_load_fpr_D(dc, a->rs2);
5104     func(dst, tcg_env, src1, src2);
5105     gen_store_fpr_D(dc, a->rd, dst);
5106     return advance_pc(dc);
5107 }
5108 
5109 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5110 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5111 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5112 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5113 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5114 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5115 
5116 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5117 {
5118     TCGv_i64 dst;
5119     TCGv_i32 src1, src2;
5120 
5121     if (gen_trap_ifnofpu(dc)) {
5122         return true;
5123     }
5124     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5125         return raise_unimpfpop(dc);
5126     }
5127 
5128     dst = tcg_temp_new_i64();
5129     src1 = gen_load_fpr_F(dc, a->rs1);
5130     src2 = gen_load_fpr_F(dc, a->rs2);
5131     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5132     gen_store_fpr_D(dc, a->rd, dst);
5133     return advance_pc(dc);
5134 }
5135 
5136 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5137 {
5138     TCGv_i64 dst;
5139     TCGv_i32 src1, src2;
5140 
5141     if (!avail_VIS3(dc)) {
5142         return false;
5143     }
5144     if (gen_trap_ifnofpu(dc)) {
5145         return true;
5146     }
5147     dst = tcg_temp_new_i64();
5148     src1 = gen_load_fpr_F(dc, a->rs1);
5149     src2 = gen_load_fpr_F(dc, a->rs2);
5150     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5151     gen_store_fpr_D(dc, a->rd, dst);
5152     return advance_pc(dc);
5153 }
5154 
5155 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5156                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5157 {
5158     TCGv_i32 dst, src1, src2, src3;
5159 
5160     if (gen_trap_ifnofpu(dc)) {
5161         return true;
5162     }
5163 
5164     src1 = gen_load_fpr_F(dc, a->rs1);
5165     src2 = gen_load_fpr_F(dc, a->rs2);
5166     src3 = gen_load_fpr_F(dc, a->rs3);
5167     dst = tcg_temp_new_i32();
5168     func(dst, src1, src2, src3);
5169     gen_store_fpr_F(dc, a->rd, dst);
5170     return advance_pc(dc);
5171 }
5172 
5173 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5174 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5175 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5176 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5177 
5178 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5179                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5180 {
5181     TCGv_i64 dst, src1, src2, src3;
5182 
5183     if (gen_trap_ifnofpu(dc)) {
5184         return true;
5185     }
5186 
5187     dst  = tcg_temp_new_i64();
5188     src1 = gen_load_fpr_D(dc, a->rs1);
5189     src2 = gen_load_fpr_D(dc, a->rs2);
5190     src3 = gen_load_fpr_D(dc, a->rs3);
5191     func(dst, src1, src2, src3);
5192     gen_store_fpr_D(dc, a->rd, dst);
5193     return advance_pc(dc);
5194 }
5195 
5196 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5197 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5198 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5199 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5200 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5201 
5202 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5203                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5204 {
5205     TCGv_i128 src1, src2;
5206 
5207     if (gen_trap_ifnofpu(dc)) {
5208         return true;
5209     }
5210     if (gen_trap_float128(dc)) {
5211         return true;
5212     }
5213 
5214     src1 = gen_load_fpr_Q(dc, a->rs1);
5215     src2 = gen_load_fpr_Q(dc, a->rs2);
5216     func(src1, tcg_env, src1, src2);
5217     gen_store_fpr_Q(dc, a->rd, src1);
5218     return advance_pc(dc);
5219 }
5220 
5221 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5222 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5223 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5224 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5225 
5226 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5227 {
5228     TCGv_i64 src1, src2;
5229     TCGv_i128 dst;
5230 
5231     if (gen_trap_ifnofpu(dc)) {
5232         return true;
5233     }
5234     if (gen_trap_float128(dc)) {
5235         return true;
5236     }
5237 
5238     src1 = gen_load_fpr_D(dc, a->rs1);
5239     src2 = gen_load_fpr_D(dc, a->rs2);
5240     dst = tcg_temp_new_i128();
5241     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5242     gen_store_fpr_Q(dc, a->rd, dst);
5243     return advance_pc(dc);
5244 }
5245 
5246 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5247                      void (*func)(DisasContext *, DisasCompare *, int, int))
5248 {
5249     DisasCompare cmp;
5250 
5251     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5252         return false;
5253     }
5254     if (gen_trap_ifnofpu(dc)) {
5255         return true;
5256     }
5257     if (is_128 && gen_trap_float128(dc)) {
5258         return true;
5259     }
5260 
5261     gen_op_clear_ieee_excp_and_FTT();
5262     func(dc, &cmp, a->rd, a->rs2);
5263     return advance_pc(dc);
5264 }
5265 
5266 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5267 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5268 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5269 
5270 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5271                       void (*func)(DisasContext *, DisasCompare *, int, int))
5272 {
5273     DisasCompare cmp;
5274 
5275     if (gen_trap_ifnofpu(dc)) {
5276         return true;
5277     }
5278     if (is_128 && gen_trap_float128(dc)) {
5279         return true;
5280     }
5281 
5282     gen_op_clear_ieee_excp_and_FTT();
5283     gen_compare(&cmp, a->cc, a->cond, dc);
5284     func(dc, &cmp, a->rd, a->rs2);
5285     return advance_pc(dc);
5286 }
5287 
5288 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5289 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5290 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5291 
5292 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5293                        void (*func)(DisasContext *, DisasCompare *, int, int))
5294 {
5295     DisasCompare cmp;
5296 
5297     if (gen_trap_ifnofpu(dc)) {
5298         return true;
5299     }
5300     if (is_128 && gen_trap_float128(dc)) {
5301         return true;
5302     }
5303 
5304     gen_op_clear_ieee_excp_and_FTT();
5305     gen_fcompare(&cmp, a->cc, a->cond);
5306     func(dc, &cmp, a->rd, a->rs2);
5307     return advance_pc(dc);
5308 }
5309 
5310 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5311 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5312 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5313 
5314 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5315 {
5316     TCGv_i32 src1, src2;
5317 
5318     if (avail_32(dc) && a->cc != 0) {
5319         return false;
5320     }
5321     if (gen_trap_ifnofpu(dc)) {
5322         return true;
5323     }
5324 
5325     src1 = gen_load_fpr_F(dc, a->rs1);
5326     src2 = gen_load_fpr_F(dc, a->rs2);
5327     if (e) {
5328         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5329     } else {
5330         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5331     }
5332     return advance_pc(dc);
5333 }
5334 
5335 TRANS(FCMPs, ALL, do_fcmps, a, false)
5336 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5337 
5338 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5339 {
5340     TCGv_i64 src1, src2;
5341 
5342     if (avail_32(dc) && a->cc != 0) {
5343         return false;
5344     }
5345     if (gen_trap_ifnofpu(dc)) {
5346         return true;
5347     }
5348 
5349     src1 = gen_load_fpr_D(dc, a->rs1);
5350     src2 = gen_load_fpr_D(dc, a->rs2);
5351     if (e) {
5352         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5353     } else {
5354         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5355     }
5356     return advance_pc(dc);
5357 }
5358 
5359 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5360 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5361 
5362 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5363 {
5364     TCGv_i128 src1, src2;
5365 
5366     if (avail_32(dc) && a->cc != 0) {
5367         return false;
5368     }
5369     if (gen_trap_ifnofpu(dc)) {
5370         return true;
5371     }
5372     if (gen_trap_float128(dc)) {
5373         return true;
5374     }
5375 
5376     src1 = gen_load_fpr_Q(dc, a->rs1);
5377     src2 = gen_load_fpr_Q(dc, a->rs2);
5378     if (e) {
5379         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5380     } else {
5381         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5382     }
5383     return advance_pc(dc);
5384 }
5385 
5386 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5387 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5388 
5389 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5390 {
5391     TCGv_i32 src1, src2;
5392 
5393     if (!avail_VIS3(dc)) {
5394         return false;
5395     }
5396     if (gen_trap_ifnofpu(dc)) {
5397         return true;
5398     }
5399 
5400     src1 = gen_load_fpr_F(dc, a->rs1);
5401     src2 = gen_load_fpr_F(dc, a->rs2);
5402     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5403     return advance_pc(dc);
5404 }
5405 
5406 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5407 {
5408     TCGv_i64 src1, src2;
5409 
5410     if (!avail_VIS3(dc)) {
5411         return false;
5412     }
5413     if (gen_trap_ifnofpu(dc)) {
5414         return true;
5415     }
5416 
5417     src1 = gen_load_fpr_D(dc, a->rs1);
5418     src2 = gen_load_fpr_D(dc, a->rs2);
5419     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5420     return advance_pc(dc);
5421 }
5422 
5423 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5424                       int (*offset)(unsigned int),
5425                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5426 {
5427     TCGv dst;
5428 
5429     if (gen_trap_ifnofpu(dc)) {
5430         return true;
5431     }
5432     dst = gen_dest_gpr(dc, a->rd);
5433     load(dst, tcg_env, offset(a->rs));
5434     gen_store_gpr(dc, a->rd, dst);
5435     return advance_pc(dc);
5436 }
5437 
5438 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5439 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5440 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5441 
5442 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5443                       int (*offset)(unsigned int),
5444                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5445 {
5446     TCGv src;
5447 
5448     if (gen_trap_ifnofpu(dc)) {
5449         return true;
5450     }
5451     src = gen_load_gpr(dc, a->rs);
5452     store(src, tcg_env, offset(a->rd));
5453     return advance_pc(dc);
5454 }
5455 
5456 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5457 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5458 
5459 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5460 {
5461     DisasContext *dc = container_of(dcbase, DisasContext, base);
5462     int bound;
5463 
5464     dc->pc = dc->base.pc_first;
5465     dc->npc = (target_ulong)dc->base.tb->cs_base;
5466     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5467     dc->def = &cpu_env(cs)->def;
5468     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5469     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5470 #ifndef CONFIG_USER_ONLY
5471     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5472 #endif
5473 #ifdef TARGET_SPARC64
5474     dc->fprs_dirty = 0;
5475     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5476 #ifndef CONFIG_USER_ONLY
5477     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5478 #endif
5479 #endif
5480     /*
5481      * if we reach a page boundary, we stop generation so that the
5482      * PC of a TT_TFAULT exception is always in the right page
5483      */
5484     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5485     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5486 }
5487 
5488 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5489 {
5490 }
5491 
5492 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5493 {
5494     DisasContext *dc = container_of(dcbase, DisasContext, base);
5495     target_ulong npc = dc->npc;
5496 
5497     if (npc & 3) {
5498         switch (npc) {
5499         case JUMP_PC:
5500             assert(dc->jump_pc[1] == dc->pc + 4);
5501             npc = dc->jump_pc[0] | JUMP_PC;
5502             break;
5503         case DYNAMIC_PC:
5504         case DYNAMIC_PC_LOOKUP:
5505             npc = DYNAMIC_PC;
5506             break;
5507         default:
5508             g_assert_not_reached();
5509         }
5510     }
5511     tcg_gen_insn_start(dc->pc, npc);
5512 }
5513 
5514 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5515 {
5516     DisasContext *dc = container_of(dcbase, DisasContext, base);
5517     unsigned int insn;
5518 
5519     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5520     dc->base.pc_next += 4;
5521 
5522     if (!decode(dc, insn)) {
5523         gen_exception(dc, TT_ILL_INSN);
5524     }
5525 
5526     if (dc->base.is_jmp == DISAS_NORETURN) {
5527         return;
5528     }
5529     if (dc->pc != dc->base.pc_next) {
5530         dc->base.is_jmp = DISAS_TOO_MANY;
5531     }
5532 }
5533 
5534 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5535 {
5536     DisasContext *dc = container_of(dcbase, DisasContext, base);
5537     DisasDelayException *e, *e_next;
5538     bool may_lookup;
5539 
5540     finishing_insn(dc);
5541 
5542     switch (dc->base.is_jmp) {
5543     case DISAS_NEXT:
5544     case DISAS_TOO_MANY:
5545         if (((dc->pc | dc->npc) & 3) == 0) {
5546             /* static PC and NPC: we can use direct chaining */
5547             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5548             break;
5549         }
5550 
5551         may_lookup = true;
5552         if (dc->pc & 3) {
5553             switch (dc->pc) {
5554             case DYNAMIC_PC_LOOKUP:
5555                 break;
5556             case DYNAMIC_PC:
5557                 may_lookup = false;
5558                 break;
5559             default:
5560                 g_assert_not_reached();
5561             }
5562         } else {
5563             tcg_gen_movi_tl(cpu_pc, dc->pc);
5564         }
5565 
5566         if (dc->npc & 3) {
5567             switch (dc->npc) {
5568             case JUMP_PC:
5569                 gen_generic_branch(dc);
5570                 break;
5571             case DYNAMIC_PC:
5572                 may_lookup = false;
5573                 break;
5574             case DYNAMIC_PC_LOOKUP:
5575                 break;
5576             default:
5577                 g_assert_not_reached();
5578             }
5579         } else {
5580             tcg_gen_movi_tl(cpu_npc, dc->npc);
5581         }
5582         if (may_lookup) {
5583             tcg_gen_lookup_and_goto_ptr();
5584         } else {
5585             tcg_gen_exit_tb(NULL, 0);
5586         }
5587         break;
5588 
5589     case DISAS_NORETURN:
5590        break;
5591 
5592     case DISAS_EXIT:
5593         /* Exit TB */
5594         save_state(dc);
5595         tcg_gen_exit_tb(NULL, 0);
5596         break;
5597 
5598     default:
5599         g_assert_not_reached();
5600     }
5601 
5602     for (e = dc->delay_excp_list; e ; e = e_next) {
5603         gen_set_label(e->lab);
5604 
5605         tcg_gen_movi_tl(cpu_pc, e->pc);
5606         if (e->npc % 4 == 0) {
5607             tcg_gen_movi_tl(cpu_npc, e->npc);
5608         }
5609         gen_helper_raise_exception(tcg_env, e->excp);
5610 
5611         e_next = e->next;
5612         g_free(e);
5613     }
5614 }
5615 
5616 static const TranslatorOps sparc_tr_ops = {
5617     .init_disas_context = sparc_tr_init_disas_context,
5618     .tb_start           = sparc_tr_tb_start,
5619     .insn_start         = sparc_tr_insn_start,
5620     .translate_insn     = sparc_tr_translate_insn,
5621     .tb_stop            = sparc_tr_tb_stop,
5622 };
5623 
5624 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5625                            vaddr pc, void *host_pc)
5626 {
5627     DisasContext dc = {};
5628 
5629     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5630 }
5631 
5632 void sparc_tcg_init(void)
5633 {
5634     static const char gregnames[32][4] = {
5635         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5636         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5637         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5638         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5639     };
5640 
5641     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5642 #ifdef TARGET_SPARC64
5643         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5644         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5645         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5646         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5647         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5648 #else
5649         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5650 #endif
5651     };
5652 
5653     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5654 #ifdef TARGET_SPARC64
5655         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5656         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5657         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5658 #endif
5659         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5660         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5661         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5662         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5663         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5664         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5665         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5666         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5667         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5668     };
5669 
5670     unsigned int i;
5671 
5672     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5673                                          offsetof(CPUSPARCState, regwptr),
5674                                          "regwptr");
5675 
5676     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5677         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5678     }
5679 
5680     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5681         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5682     }
5683 
5684     cpu_regs[0] = NULL;
5685     for (i = 1; i < 8; ++i) {
5686         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5687                                          offsetof(CPUSPARCState, gregs[i]),
5688                                          gregnames[i]);
5689     }
5690 
5691     for (i = 8; i < 32; ++i) {
5692         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5693                                          (i - 8) * sizeof(target_ulong),
5694                                          gregnames[i]);
5695     }
5696 }
5697 
5698 void sparc_restore_state_to_opc(CPUState *cs,
5699                                 const TranslationBlock *tb,
5700                                 const uint64_t *data)
5701 {
5702     CPUSPARCState *env = cpu_env(cs);
5703     target_ulong pc = data[0];
5704     target_ulong npc = data[1];
5705 
5706     env->pc = pc;
5707     if (npc == DYNAMIC_PC) {
5708         /* dynamic NPC: already stored */
5709     } else if (npc & JUMP_PC) {
5710         /* jump PC: use 'cond' and the jump targets of the translation */
5711         if (env->cond) {
5712             env->npc = npc & ~3;
5713         } else {
5714             env->npc = pc + 4;
5715         }
5716     } else {
5717         env->npc = npc;
5718     }
5719 }
5720