xref: /qemu/target/sparc/translate.c (revision 5a165e26)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK                             0
102 #endif
103 
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC         1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC            2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP  3
110 
111 #define DISAS_EXIT  DISAS_TARGET_0
112 
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133 
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z  cpu_xcc_Z
136 #define cpu_cc_C  cpu_xcc_C
137 #else
138 #define cpu_cc_Z  cpu_icc_Z
139 #define cpu_cc_C  cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143 
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146 
147 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X)  env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X)  env_field_offsetof(X)
153 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
154 #endif
155 
156 typedef struct DisasCompare {
157     TCGCond cond;
158     TCGv c1;
159     int c2;
160 } DisasCompare;
161 
162 typedef struct DisasDelayException {
163     struct DisasDelayException *next;
164     TCGLabel *lab;
165     TCGv_i32 excp;
166     /* Saved state at parent insn. */
167     target_ulong pc;
168     target_ulong npc;
169 } DisasDelayException;
170 
171 typedef struct DisasContext {
172     DisasContextBase base;
173     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
174     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175 
176     /* Used when JUMP_PC value is used. */
177     DisasCompare jump;
178     target_ulong jump_pc[2];
179 
180     int mem_idx;
181     bool cpu_cond_live;
182     bool fpu_enabled;
183     bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185     bool supervisor;
186 #ifdef TARGET_SPARC64
187     bool hypervisor;
188 #else
189     bool fsr_qne;
190 #endif
191 #endif
192 
193     sparc_def_t *def;
194 #ifdef TARGET_SPARC64
195     int fprs_dirty;
196     int asi;
197 #endif
198     DisasDelayException *delay_excp_list;
199 } DisasContext;
200 
201 // This function uses non-native bit order
202 #define GET_FIELD(X, FROM, TO)                                  \
203     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
204 
205 // This function uses the order in the manuals, i.e. bit 0 is 2^0
206 #define GET_FIELD_SP(X, FROM, TO)               \
207     GET_FIELD(X, 31 - (TO), 31 - (FROM))
208 
209 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
210 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
211 
212 #define UA2005_HTRAP_MASK 0xff
213 #define V8_TRAP_MASK 0x7f
214 
215 #define IS_IMM (insn & (1<<13))
216 
217 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
218 {
219 #if defined(TARGET_SPARC64)
220     int bit = (rd < 32) ? 1 : 2;
221     /* If we know we've already set this bit within the TB,
222        we can avoid setting it again.  */
223     if (!(dc->fprs_dirty & bit)) {
224         dc->fprs_dirty |= bit;
225         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
226     }
227 #endif
228 }
229 
230 /* floating point registers moves */
231 
232 static int gen_offset_fpr_F(unsigned int reg)
233 {
234     int ret;
235 
236     tcg_debug_assert(reg < 32);
237     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
238     if (reg & 1) {
239         ret += offsetof(CPU_DoubleU, l.lower);
240     } else {
241         ret += offsetof(CPU_DoubleU, l.upper);
242     }
243     return ret;
244 }
245 
246 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
247 {
248     TCGv_i32 ret = tcg_temp_new_i32();
249     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
250     return ret;
251 }
252 
253 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
254 {
255     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
256     gen_update_fprs_dirty(dc, dst);
257 }
258 
259 static int gen_offset_fpr_D(unsigned int reg)
260 {
261     tcg_debug_assert(reg < 64);
262     tcg_debug_assert(reg % 2 == 0);
263     return offsetof(CPUSPARCState, fpr[reg / 2]);
264 }
265 
266 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
267 {
268     TCGv_i64 ret = tcg_temp_new_i64();
269     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
270     return ret;
271 }
272 
273 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
274 {
275     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
276     gen_update_fprs_dirty(dc, dst);
277 }
278 
279 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
280 {
281     TCGv_i128 ret = tcg_temp_new_i128();
282     TCGv_i64 h = gen_load_fpr_D(dc, src);
283     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
284 
285     tcg_gen_concat_i64_i128(ret, l, h);
286     return ret;
287 }
288 
289 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
290 {
291     TCGv_i64 h = tcg_temp_new_i64();
292     TCGv_i64 l = tcg_temp_new_i64();
293 
294     tcg_gen_extr_i128_i64(l, h, v);
295     gen_store_fpr_D(dc, dst, h);
296     gen_store_fpr_D(dc, dst + 2, l);
297 }
298 
299 /* moves */
300 #ifdef CONFIG_USER_ONLY
301 #define supervisor(dc) 0
302 #define hypervisor(dc) 0
303 #else
304 #ifdef TARGET_SPARC64
305 #define hypervisor(dc) (dc->hypervisor)
306 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
307 #else
308 #define supervisor(dc) (dc->supervisor)
309 #define hypervisor(dc) 0
310 #endif
311 #endif
312 
313 #if !defined(TARGET_SPARC64)
314 # define AM_CHECK(dc)  false
315 #elif defined(TARGET_ABI32)
316 # define AM_CHECK(dc)  true
317 #elif defined(CONFIG_USER_ONLY)
318 # define AM_CHECK(dc)  false
319 #else
320 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
321 #endif
322 
323 static void gen_address_mask(DisasContext *dc, TCGv addr)
324 {
325     if (AM_CHECK(dc)) {
326         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
327     }
328 }
329 
330 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
331 {
332     return AM_CHECK(dc) ? (uint32_t)addr : addr;
333 }
334 
335 static TCGv gen_load_gpr(DisasContext *dc, int reg)
336 {
337     if (reg > 0) {
338         assert(reg < 32);
339         return cpu_regs[reg];
340     } else {
341         TCGv t = tcg_temp_new();
342         tcg_gen_movi_tl(t, 0);
343         return t;
344     }
345 }
346 
347 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
348 {
349     if (reg > 0) {
350         assert(reg < 32);
351         tcg_gen_mov_tl(cpu_regs[reg], v);
352     }
353 }
354 
355 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
356 {
357     if (reg > 0) {
358         assert(reg < 32);
359         return cpu_regs[reg];
360     } else {
361         return tcg_temp_new();
362     }
363 }
364 
365 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
366 {
367     return translator_use_goto_tb(&s->base, pc) &&
368            translator_use_goto_tb(&s->base, npc);
369 }
370 
371 static void gen_goto_tb(DisasContext *s, int tb_num,
372                         target_ulong pc, target_ulong npc)
373 {
374     if (use_goto_tb(s, pc, npc))  {
375         /* jump to same page: we can use a direct jump */
376         tcg_gen_goto_tb(tb_num);
377         tcg_gen_movi_tl(cpu_pc, pc);
378         tcg_gen_movi_tl(cpu_npc, npc);
379         tcg_gen_exit_tb(s->base.tb, tb_num);
380     } else {
381         /* jump to another page: we can use an indirect jump */
382         tcg_gen_movi_tl(cpu_pc, pc);
383         tcg_gen_movi_tl(cpu_npc, npc);
384         tcg_gen_lookup_and_goto_ptr();
385     }
386 }
387 
388 static TCGv gen_carry32(void)
389 {
390     if (TARGET_LONG_BITS == 64) {
391         TCGv t = tcg_temp_new();
392         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
393         return t;
394     }
395     return cpu_icc_C;
396 }
397 
398 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
399 {
400     TCGv z = tcg_constant_tl(0);
401 
402     if (cin) {
403         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
404         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
405     } else {
406         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
407     }
408     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
409     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
410     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
411     if (TARGET_LONG_BITS == 64) {
412         /*
413          * Carry-in to bit 32 is result ^ src1 ^ src2.
414          * We already have the src xor term in Z, from computation of V.
415          */
416         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
417         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
418     }
419     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
420     tcg_gen_mov_tl(dst, cpu_cc_N);
421 }
422 
423 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
424 {
425     gen_op_addcc_int(dst, src1, src2, NULL);
426 }
427 
428 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
429 {
430     TCGv t = tcg_temp_new();
431 
432     /* Save the tag bits around modification of dst. */
433     tcg_gen_or_tl(t, src1, src2);
434 
435     gen_op_addcc(dst, src1, src2);
436 
437     /* Incorprate tag bits into icc.V */
438     tcg_gen_andi_tl(t, t, 3);
439     tcg_gen_neg_tl(t, t);
440     tcg_gen_ext32u_tl(t, t);
441     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
442 }
443 
444 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
445 {
446     tcg_gen_add_tl(dst, src1, src2);
447     tcg_gen_add_tl(dst, dst, gen_carry32());
448 }
449 
450 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
451 {
452     gen_op_addcc_int(dst, src1, src2, gen_carry32());
453 }
454 
455 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
456 {
457     tcg_gen_add_tl(dst, src1, src2);
458     tcg_gen_add_tl(dst, dst, cpu_cc_C);
459 }
460 
461 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
462 {
463     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
464 }
465 
466 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
467 {
468     TCGv z = tcg_constant_tl(0);
469 
470     if (cin) {
471         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
472         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
473     } else {
474         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
475     }
476     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
477     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
478     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
479     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
480 #ifdef TARGET_SPARC64
481     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
482     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
483 #endif
484     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
485     tcg_gen_mov_tl(dst, cpu_cc_N);
486 }
487 
488 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
489 {
490     gen_op_subcc_int(dst, src1, src2, NULL);
491 }
492 
493 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
494 {
495     TCGv t = tcg_temp_new();
496 
497     /* Save the tag bits around modification of dst. */
498     tcg_gen_or_tl(t, src1, src2);
499 
500     gen_op_subcc(dst, src1, src2);
501 
502     /* Incorprate tag bits into icc.V */
503     tcg_gen_andi_tl(t, t, 3);
504     tcg_gen_neg_tl(t, t);
505     tcg_gen_ext32u_tl(t, t);
506     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
507 }
508 
509 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
510 {
511     tcg_gen_sub_tl(dst, src1, src2);
512     tcg_gen_sub_tl(dst, dst, gen_carry32());
513 }
514 
515 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
516 {
517     gen_op_subcc_int(dst, src1, src2, gen_carry32());
518 }
519 
520 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
521 {
522     tcg_gen_sub_tl(dst, src1, src2);
523     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
524 }
525 
526 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
527 {
528     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
529 }
530 
531 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
532 {
533     TCGv zero = tcg_constant_tl(0);
534     TCGv one = tcg_constant_tl(1);
535     TCGv t_src1 = tcg_temp_new();
536     TCGv t_src2 = tcg_temp_new();
537     TCGv t0 = tcg_temp_new();
538 
539     tcg_gen_ext32u_tl(t_src1, src1);
540     tcg_gen_ext32u_tl(t_src2, src2);
541 
542     /*
543      * if (!(env->y & 1))
544      *   src2 = 0;
545      */
546     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
547 
548     /*
549      * b2 = src1 & 1;
550      * y = (b2 << 31) | (y >> 1);
551      */
552     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
553     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
554 
555     // b1 = N ^ V;
556     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
557 
558     /*
559      * src1 = (b1 << 31) | (src1 >> 1)
560      */
561     tcg_gen_andi_tl(t0, t0, 1u << 31);
562     tcg_gen_shri_tl(t_src1, t_src1, 1);
563     tcg_gen_or_tl(t_src1, t_src1, t0);
564 
565     gen_op_addcc(dst, t_src1, t_src2);
566 }
567 
568 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
569 {
570 #if TARGET_LONG_BITS == 32
571     if (sign_ext) {
572         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
573     } else {
574         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
575     }
576 #else
577     TCGv t0 = tcg_temp_new_i64();
578     TCGv t1 = tcg_temp_new_i64();
579 
580     if (sign_ext) {
581         tcg_gen_ext32s_i64(t0, src1);
582         tcg_gen_ext32s_i64(t1, src2);
583     } else {
584         tcg_gen_ext32u_i64(t0, src1);
585         tcg_gen_ext32u_i64(t1, src2);
586     }
587 
588     tcg_gen_mul_i64(dst, t0, t1);
589     tcg_gen_shri_i64(cpu_y, dst, 32);
590 #endif
591 }
592 
593 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
594 {
595     /* zero-extend truncated operands before multiplication */
596     gen_op_multiply(dst, src1, src2, 0);
597 }
598 
599 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
600 {
601     /* sign-extend truncated operands before multiplication */
602     gen_op_multiply(dst, src1, src2, 1);
603 }
604 
605 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
606 {
607     TCGv discard = tcg_temp_new();
608     tcg_gen_mulu2_tl(discard, dst, src1, src2);
609 }
610 
611 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
612                            TCGv_i64 src2, TCGv_i64 src3)
613 {
614     TCGv_i64 t = tcg_temp_new_i64();
615 
616     tcg_gen_mul_i64(t, src1, src2);
617     tcg_gen_add_i64(dst, src3, t);
618 }
619 
620 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
621                              TCGv_i64 src2, TCGv_i64 src3)
622 {
623     TCGv_i64 l = tcg_temp_new_i64();
624     TCGv_i64 h = tcg_temp_new_i64();
625     TCGv_i64 z = tcg_constant_i64(0);
626 
627     tcg_gen_mulu2_i64(l, h, src1, src2);
628     tcg_gen_add2_i64(l, dst, l, h, src3, z);
629 }
630 
631 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
632 {
633 #ifdef TARGET_SPARC64
634     gen_helper_sdiv(dst, tcg_env, src1, src2);
635     tcg_gen_ext32s_tl(dst, dst);
636 #else
637     TCGv_i64 t64 = tcg_temp_new_i64();
638     gen_helper_sdiv(t64, tcg_env, src1, src2);
639     tcg_gen_trunc_i64_tl(dst, t64);
640 #endif
641 }
642 
643 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
644 {
645     TCGv_i64 t64;
646 
647 #ifdef TARGET_SPARC64
648     t64 = cpu_cc_V;
649 #else
650     t64 = tcg_temp_new_i64();
651 #endif
652 
653     gen_helper_udiv(t64, tcg_env, src1, src2);
654 
655 #ifdef TARGET_SPARC64
656     tcg_gen_ext32u_tl(cpu_cc_N, t64);
657     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
658     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
659     tcg_gen_movi_tl(cpu_icc_C, 0);
660 #else
661     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
662 #endif
663     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
664     tcg_gen_movi_tl(cpu_cc_C, 0);
665     tcg_gen_mov_tl(dst, cpu_cc_N);
666 }
667 
668 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
669 {
670     TCGv_i64 t64;
671 
672 #ifdef TARGET_SPARC64
673     t64 = cpu_cc_V;
674 #else
675     t64 = tcg_temp_new_i64();
676 #endif
677 
678     gen_helper_sdiv(t64, tcg_env, src1, src2);
679 
680 #ifdef TARGET_SPARC64
681     tcg_gen_ext32s_tl(cpu_cc_N, t64);
682     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
683     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
684     tcg_gen_movi_tl(cpu_icc_C, 0);
685 #else
686     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
687 #endif
688     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
689     tcg_gen_movi_tl(cpu_cc_C, 0);
690     tcg_gen_mov_tl(dst, cpu_cc_N);
691 }
692 
693 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
694 {
695     gen_helper_taddcctv(dst, tcg_env, src1, src2);
696 }
697 
698 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
699 {
700     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
701 }
702 
703 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
704 {
705     tcg_gen_ctpop_tl(dst, src2);
706 }
707 
708 static void gen_op_lzcnt(TCGv dst, TCGv src)
709 {
710     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
711 }
712 
713 #ifndef TARGET_SPARC64
714 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
715 {
716     g_assert_not_reached();
717 }
718 #endif
719 
720 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
721 {
722     gen_helper_array8(dst, src1, src2);
723     tcg_gen_shli_tl(dst, dst, 1);
724 }
725 
726 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
727 {
728     gen_helper_array8(dst, src1, src2);
729     tcg_gen_shli_tl(dst, dst, 2);
730 }
731 
732 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
733 {
734 #ifdef TARGET_SPARC64
735     gen_helper_fpack16(dst, cpu_gsr, src);
736 #else
737     g_assert_not_reached();
738 #endif
739 }
740 
741 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
742 {
743 #ifdef TARGET_SPARC64
744     gen_helper_fpackfix(dst, cpu_gsr, src);
745 #else
746     g_assert_not_reached();
747 #endif
748 }
749 
750 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
751 {
752 #ifdef TARGET_SPARC64
753     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
754 #else
755     g_assert_not_reached();
756 #endif
757 }
758 
759 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
760 {
761     TCGv_i32 t[2];
762 
763     for (int i = 0; i < 2; i++) {
764         TCGv_i32 u = tcg_temp_new_i32();
765         TCGv_i32 v = tcg_temp_new_i32();
766 
767         tcg_gen_sextract_i32(u, src1, i * 16, 16);
768         tcg_gen_sextract_i32(v, src2, i * 16, 16);
769         tcg_gen_add_i32(u, u, v);
770         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
771         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
772         t[i] = u;
773     }
774     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
775 }
776 
777 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
778 {
779     TCGv_i32 t[2];
780 
781     for (int i = 0; i < 2; i++) {
782         TCGv_i32 u = tcg_temp_new_i32();
783         TCGv_i32 v = tcg_temp_new_i32();
784 
785         tcg_gen_sextract_i32(u, src1, i * 16, 16);
786         tcg_gen_sextract_i32(v, src2, i * 16, 16);
787         tcg_gen_sub_i32(u, u, v);
788         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
789         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
790         t[i] = u;
791     }
792     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
793 }
794 
795 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
796 {
797     TCGv_i32 r = tcg_temp_new_i32();
798     TCGv_i32 t = tcg_temp_new_i32();
799     TCGv_i32 v = tcg_temp_new_i32();
800     TCGv_i32 z = tcg_constant_i32(0);
801 
802     tcg_gen_add_i32(r, src1, src2);
803     tcg_gen_xor_i32(t, src1, src2);
804     tcg_gen_xor_i32(v, r, src2);
805     tcg_gen_andc_i32(v, v, t);
806 
807     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
808     tcg_gen_addi_i32(t, t, INT32_MAX);
809 
810     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
811 }
812 
813 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
814 {
815     TCGv_i32 r = tcg_temp_new_i32();
816     TCGv_i32 t = tcg_temp_new_i32();
817     TCGv_i32 v = tcg_temp_new_i32();
818     TCGv_i32 z = tcg_constant_i32(0);
819 
820     tcg_gen_sub_i32(r, src1, src2);
821     tcg_gen_xor_i32(t, src1, src2);
822     tcg_gen_xor_i32(v, r, src1);
823     tcg_gen_and_i32(v, v, t);
824 
825     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
826     tcg_gen_addi_i32(t, t, INT32_MAX);
827 
828     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
829 }
830 
831 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
832                                 TCGv_i64 s2, TCGv gsr)
833 {
834 #ifdef TARGET_SPARC64
835     TCGv t1, t2, shift;
836 
837     t1 = tcg_temp_new();
838     t2 = tcg_temp_new();
839     shift = tcg_temp_new();
840 
841     tcg_gen_andi_tl(shift, gsr, 7);
842     tcg_gen_shli_tl(shift, shift, 3);
843     tcg_gen_shl_tl(t1, s1, shift);
844 
845     /*
846      * A shift of 64 does not produce 0 in TCG.  Divide this into a
847      * shift of (up to 63) followed by a constant shift of 1.
848      */
849     tcg_gen_xori_tl(shift, shift, 63);
850     tcg_gen_shr_tl(t2, s2, shift);
851     tcg_gen_shri_tl(t2, t2, 1);
852 
853     tcg_gen_or_tl(dst, t1, t2);
854 #else
855     g_assert_not_reached();
856 #endif
857 }
858 
859 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
860 {
861     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
862 }
863 
864 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
865 {
866 #ifdef TARGET_SPARC64
867     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
868 #else
869     g_assert_not_reached();
870 #endif
871 }
872 
873 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
874 {
875 #ifdef TARGET_SPARC64
876     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
877 #else
878     g_assert_not_reached();
879 #endif
880 }
881 
882 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
883 {
884     tcg_gen_ext16s_i32(src2, src2);
885     gen_helper_fmul8x16a(dst, src1, src2);
886 }
887 
888 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
889 {
890     tcg_gen_sari_i32(src2, src2, 16);
891     gen_helper_fmul8x16a(dst, src1, src2);
892 }
893 
894 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
895 {
896     TCGv_i32 t0 = tcg_temp_new_i32();
897     TCGv_i32 t1 = tcg_temp_new_i32();
898     TCGv_i32 t2 = tcg_temp_new_i32();
899 
900     tcg_gen_ext8u_i32(t0, src1);
901     tcg_gen_ext16s_i32(t1, src2);
902     tcg_gen_mul_i32(t0, t0, t1);
903 
904     tcg_gen_extract_i32(t1, src1, 16, 8);
905     tcg_gen_sextract_i32(t2, src2, 16, 16);
906     tcg_gen_mul_i32(t1, t1, t2);
907 
908     tcg_gen_concat_i32_i64(dst, t0, t1);
909 }
910 
911 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
912 {
913     TCGv_i32 t0 = tcg_temp_new_i32();
914     TCGv_i32 t1 = tcg_temp_new_i32();
915     TCGv_i32 t2 = tcg_temp_new_i32();
916 
917     /*
918      * The insn description talks about extracting the upper 8 bits
919      * of the signed 16-bit input rs1, performing the multiply, then
920      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
921      * the rs1 input, which avoids the need for two shifts.
922      */
923     tcg_gen_ext16s_i32(t0, src1);
924     tcg_gen_andi_i32(t0, t0, ~0xff);
925     tcg_gen_ext16s_i32(t1, src2);
926     tcg_gen_mul_i32(t0, t0, t1);
927 
928     tcg_gen_sextract_i32(t1, src1, 16, 16);
929     tcg_gen_andi_i32(t1, t1, ~0xff);
930     tcg_gen_sextract_i32(t2, src2, 16, 16);
931     tcg_gen_mul_i32(t1, t1, t2);
932 
933     tcg_gen_concat_i32_i64(dst, t0, t1);
934 }
935 
936 #ifdef TARGET_SPARC64
937 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
938                              TCGv_vec src1, TCGv_vec src2)
939 {
940     TCGv_vec a = tcg_temp_new_vec_matching(dst);
941     TCGv_vec c = tcg_temp_new_vec_matching(dst);
942 
943     tcg_gen_add_vec(vece, a, src1, src2);
944     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
945     /* Vector cmp produces -1 for true, so subtract to add carry. */
946     tcg_gen_sub_vec(vece, dst, a, c);
947 }
948 
949 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
950                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
951 {
952     static const TCGOpcode vecop_list[] = {
953         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
954     };
955     static const GVecGen3 op = {
956         .fni8 = gen_helper_fchksm16,
957         .fniv = gen_vec_fchksm16,
958         .opt_opc = vecop_list,
959         .vece = MO_16,
960     };
961     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
962 }
963 
964 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
965                             TCGv_vec src1, TCGv_vec src2)
966 {
967     TCGv_vec t = tcg_temp_new_vec_matching(dst);
968 
969     tcg_gen_or_vec(vece, t, src1, src2);
970     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
971     tcg_gen_sari_vec(vece, src1, src1, 1);
972     tcg_gen_sari_vec(vece, src2, src2, 1);
973     tcg_gen_add_vec(vece, dst, src1, src2);
974     tcg_gen_add_vec(vece, dst, dst, t);
975 }
976 
977 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
978                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
979 {
980     static const TCGOpcode vecop_list[] = {
981         INDEX_op_add_vec, INDEX_op_sari_vec,
982     };
983     static const GVecGen3 op = {
984         .fni8 = gen_helper_fmean16,
985         .fniv = gen_vec_fmean16,
986         .opt_opc = vecop_list,
987         .vece = MO_16,
988     };
989     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
990 }
991 #else
992 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
993 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
994 #endif
995 
996 static void finishing_insn(DisasContext *dc)
997 {
998     /*
999      * From here, there is no future path through an unwinding exception.
1000      * If the current insn cannot raise an exception, the computation of
1001      * cpu_cond may be able to be elided.
1002      */
1003     if (dc->cpu_cond_live) {
1004         tcg_gen_discard_tl(cpu_cond);
1005         dc->cpu_cond_live = false;
1006     }
1007 }
1008 
1009 static void gen_generic_branch(DisasContext *dc)
1010 {
1011     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1012     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1013     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1014 
1015     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1016 }
1017 
1018 /* call this function before using the condition register as it may
1019    have been set for a jump */
1020 static void flush_cond(DisasContext *dc)
1021 {
1022     if (dc->npc == JUMP_PC) {
1023         gen_generic_branch(dc);
1024         dc->npc = DYNAMIC_PC_LOOKUP;
1025     }
1026 }
1027 
1028 static void save_npc(DisasContext *dc)
1029 {
1030     if (dc->npc & 3) {
1031         switch (dc->npc) {
1032         case JUMP_PC:
1033             gen_generic_branch(dc);
1034             dc->npc = DYNAMIC_PC_LOOKUP;
1035             break;
1036         case DYNAMIC_PC:
1037         case DYNAMIC_PC_LOOKUP:
1038             break;
1039         default:
1040             g_assert_not_reached();
1041         }
1042     } else {
1043         tcg_gen_movi_tl(cpu_npc, dc->npc);
1044     }
1045 }
1046 
1047 static void save_state(DisasContext *dc)
1048 {
1049     tcg_gen_movi_tl(cpu_pc, dc->pc);
1050     save_npc(dc);
1051 }
1052 
1053 static void gen_exception(DisasContext *dc, int which)
1054 {
1055     finishing_insn(dc);
1056     save_state(dc);
1057     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1058     dc->base.is_jmp = DISAS_NORETURN;
1059 }
1060 
1061 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1062 {
1063     DisasDelayException *e = g_new0(DisasDelayException, 1);
1064 
1065     e->next = dc->delay_excp_list;
1066     dc->delay_excp_list = e;
1067 
1068     e->lab = gen_new_label();
1069     e->excp = excp;
1070     e->pc = dc->pc;
1071     /* Caller must have used flush_cond before branch. */
1072     assert(e->npc != JUMP_PC);
1073     e->npc = dc->npc;
1074 
1075     return e->lab;
1076 }
1077 
1078 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1079 {
1080     return delay_exceptionv(dc, tcg_constant_i32(excp));
1081 }
1082 
1083 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1084 {
1085     TCGv t = tcg_temp_new();
1086     TCGLabel *lab;
1087 
1088     tcg_gen_andi_tl(t, addr, mask);
1089 
1090     flush_cond(dc);
1091     lab = delay_exception(dc, TT_UNALIGNED);
1092     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1093 }
1094 
1095 static void gen_mov_pc_npc(DisasContext *dc)
1096 {
1097     finishing_insn(dc);
1098 
1099     if (dc->npc & 3) {
1100         switch (dc->npc) {
1101         case JUMP_PC:
1102             gen_generic_branch(dc);
1103             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1104             dc->pc = DYNAMIC_PC_LOOKUP;
1105             break;
1106         case DYNAMIC_PC:
1107         case DYNAMIC_PC_LOOKUP:
1108             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1109             dc->pc = dc->npc;
1110             break;
1111         default:
1112             g_assert_not_reached();
1113         }
1114     } else {
1115         dc->pc = dc->npc;
1116     }
1117 }
1118 
1119 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1120                         DisasContext *dc)
1121 {
1122     TCGv t1;
1123 
1124     cmp->c1 = t1 = tcg_temp_new();
1125     cmp->c2 = 0;
1126 
1127     switch (cond & 7) {
1128     case 0x0: /* never */
1129         cmp->cond = TCG_COND_NEVER;
1130         cmp->c1 = tcg_constant_tl(0);
1131         break;
1132 
1133     case 0x1: /* eq: Z */
1134         cmp->cond = TCG_COND_EQ;
1135         if (TARGET_LONG_BITS == 32 || xcc) {
1136             tcg_gen_mov_tl(t1, cpu_cc_Z);
1137         } else {
1138             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1139         }
1140         break;
1141 
1142     case 0x2: /* le: Z | (N ^ V) */
1143         /*
1144          * Simplify:
1145          *   cc_Z || (N ^ V) < 0        NE
1146          *   cc_Z && !((N ^ V) < 0)     EQ
1147          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1148          */
1149         cmp->cond = TCG_COND_EQ;
1150         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1151         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1152         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1153         if (TARGET_LONG_BITS == 64 && !xcc) {
1154             tcg_gen_ext32u_tl(t1, t1);
1155         }
1156         break;
1157 
1158     case 0x3: /* lt: N ^ V */
1159         cmp->cond = TCG_COND_LT;
1160         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1161         if (TARGET_LONG_BITS == 64 && !xcc) {
1162             tcg_gen_ext32s_tl(t1, t1);
1163         }
1164         break;
1165 
1166     case 0x4: /* leu: Z | C */
1167         /*
1168          * Simplify:
1169          *   cc_Z == 0 || cc_C != 0     NE
1170          *   cc_Z != 0 && cc_C == 0     EQ
1171          *   cc_Z & (cc_C ? 0 : -1)     EQ
1172          *   cc_Z & (cc_C - 1)          EQ
1173          */
1174         cmp->cond = TCG_COND_EQ;
1175         if (TARGET_LONG_BITS == 32 || xcc) {
1176             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1177             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1178         } else {
1179             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1180             tcg_gen_subi_tl(t1, t1, 1);
1181             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1182             tcg_gen_ext32u_tl(t1, t1);
1183         }
1184         break;
1185 
1186     case 0x5: /* ltu: C */
1187         cmp->cond = TCG_COND_NE;
1188         if (TARGET_LONG_BITS == 32 || xcc) {
1189             tcg_gen_mov_tl(t1, cpu_cc_C);
1190         } else {
1191             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1192         }
1193         break;
1194 
1195     case 0x6: /* neg: N */
1196         cmp->cond = TCG_COND_LT;
1197         if (TARGET_LONG_BITS == 32 || xcc) {
1198             tcg_gen_mov_tl(t1, cpu_cc_N);
1199         } else {
1200             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1201         }
1202         break;
1203 
1204     case 0x7: /* vs: V */
1205         cmp->cond = TCG_COND_LT;
1206         if (TARGET_LONG_BITS == 32 || xcc) {
1207             tcg_gen_mov_tl(t1, cpu_cc_V);
1208         } else {
1209             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1210         }
1211         break;
1212     }
1213     if (cond & 8) {
1214         cmp->cond = tcg_invert_cond(cmp->cond);
1215     }
1216 }
1217 
1218 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1219 {
1220     TCGv_i32 fcc = cpu_fcc[cc];
1221     TCGv_i32 c1 = fcc;
1222     int c2 = 0;
1223     TCGCond tcond;
1224 
1225     /*
1226      * FCC values:
1227      * 0 =
1228      * 1 <
1229      * 2 >
1230      * 3 unordered
1231      */
1232     switch (cond & 7) {
1233     case 0x0: /* fbn */
1234         tcond = TCG_COND_NEVER;
1235         break;
1236     case 0x1: /* fbne : !0 */
1237         tcond = TCG_COND_NE;
1238         break;
1239     case 0x2: /* fblg : 1 or 2 */
1240         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1241         c1 = tcg_temp_new_i32();
1242         tcg_gen_addi_i32(c1, fcc, -1);
1243         c2 = 1;
1244         tcond = TCG_COND_LEU;
1245         break;
1246     case 0x3: /* fbul : 1 or 3 */
1247         c1 = tcg_temp_new_i32();
1248         tcg_gen_andi_i32(c1, fcc, 1);
1249         tcond = TCG_COND_NE;
1250         break;
1251     case 0x4: /* fbl  : 1 */
1252         c2 = 1;
1253         tcond = TCG_COND_EQ;
1254         break;
1255     case 0x5: /* fbug : 2 or 3 */
1256         c2 = 2;
1257         tcond = TCG_COND_GEU;
1258         break;
1259     case 0x6: /* fbg  : 2 */
1260         c2 = 2;
1261         tcond = TCG_COND_EQ;
1262         break;
1263     case 0x7: /* fbu  : 3 */
1264         c2 = 3;
1265         tcond = TCG_COND_EQ;
1266         break;
1267     }
1268     if (cond & 8) {
1269         tcond = tcg_invert_cond(tcond);
1270     }
1271 
1272     cmp->cond = tcond;
1273     cmp->c2 = c2;
1274     cmp->c1 = tcg_temp_new();
1275     tcg_gen_extu_i32_tl(cmp->c1, c1);
1276 }
1277 
1278 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1279 {
1280     static const TCGCond cond_reg[4] = {
1281         TCG_COND_NEVER,  /* reserved */
1282         TCG_COND_EQ,
1283         TCG_COND_LE,
1284         TCG_COND_LT,
1285     };
1286     TCGCond tcond;
1287 
1288     if ((cond & 3) == 0) {
1289         return false;
1290     }
1291     tcond = cond_reg[cond & 3];
1292     if (cond & 4) {
1293         tcond = tcg_invert_cond(tcond);
1294     }
1295 
1296     cmp->cond = tcond;
1297     cmp->c1 = tcg_temp_new();
1298     cmp->c2 = 0;
1299     tcg_gen_mov_tl(cmp->c1, r_src);
1300     return true;
1301 }
1302 
1303 static void gen_op_clear_ieee_excp_and_FTT(void)
1304 {
1305     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1306                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1307 }
1308 
1309 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1310 {
1311     gen_op_clear_ieee_excp_and_FTT();
1312     tcg_gen_mov_i32(dst, src);
1313 }
1314 
1315 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1316 {
1317     gen_op_clear_ieee_excp_and_FTT();
1318     tcg_gen_xori_i32(dst, src, 1u << 31);
1319 }
1320 
1321 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1322 {
1323     gen_op_clear_ieee_excp_and_FTT();
1324     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1325 }
1326 
1327 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1328 {
1329     gen_op_clear_ieee_excp_and_FTT();
1330     tcg_gen_mov_i64(dst, src);
1331 }
1332 
1333 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1334 {
1335     gen_op_clear_ieee_excp_and_FTT();
1336     tcg_gen_xori_i64(dst, src, 1ull << 63);
1337 }
1338 
1339 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1340 {
1341     gen_op_clear_ieee_excp_and_FTT();
1342     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1343 }
1344 
1345 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1346 {
1347     TCGv_i64 l = tcg_temp_new_i64();
1348     TCGv_i64 h = tcg_temp_new_i64();
1349 
1350     tcg_gen_extr_i128_i64(l, h, src);
1351     tcg_gen_xori_i64(h, h, 1ull << 63);
1352     tcg_gen_concat_i64_i128(dst, l, h);
1353 }
1354 
1355 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1356 {
1357     TCGv_i64 l = tcg_temp_new_i64();
1358     TCGv_i64 h = tcg_temp_new_i64();
1359 
1360     tcg_gen_extr_i128_i64(l, h, src);
1361     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1362     tcg_gen_concat_i64_i128(dst, l, h);
1363 }
1364 
1365 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1366 {
1367     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1368 }
1369 
1370 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1371 {
1372     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1373 }
1374 
1375 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1376 {
1377     int op = float_muladd_negate_c;
1378     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1379 }
1380 
1381 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1382 {
1383     int op = float_muladd_negate_c;
1384     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1385 }
1386 
1387 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1388 {
1389     int op = float_muladd_negate_c | float_muladd_negate_result;
1390     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1391 }
1392 
1393 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1394 {
1395     int op = float_muladd_negate_c | float_muladd_negate_result;
1396     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1397 }
1398 
1399 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1400 {
1401     int op = float_muladd_negate_result;
1402     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1403 }
1404 
1405 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1406 {
1407     int op = float_muladd_negate_result;
1408     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1409 }
1410 
1411 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1412 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1413 {
1414     TCGv_i32 one = tcg_constant_i32(float32_one);
1415     int op = float_muladd_halve_result;
1416     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1417 }
1418 
1419 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1420 {
1421     TCGv_i64 one = tcg_constant_i64(float64_one);
1422     int op = float_muladd_halve_result;
1423     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1424 }
1425 
1426 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1427 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1428 {
1429     TCGv_i32 one = tcg_constant_i32(float32_one);
1430     int op = float_muladd_negate_c | float_muladd_halve_result;
1431     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1432 }
1433 
1434 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1435 {
1436     TCGv_i64 one = tcg_constant_i64(float64_one);
1437     int op = float_muladd_negate_c | float_muladd_halve_result;
1438     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1439 }
1440 
1441 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1442 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1443 {
1444     TCGv_i32 one = tcg_constant_i32(float32_one);
1445     int op = float_muladd_negate_result | float_muladd_halve_result;
1446     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1447 }
1448 
1449 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1450 {
1451     TCGv_i64 one = tcg_constant_i64(float64_one);
1452     int op = float_muladd_negate_result | float_muladd_halve_result;
1453     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1454 }
1455 
1456 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1457 {
1458     /*
1459      * CEXC is only set when succesfully completing an FPop,
1460      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1461      * Thus we can simply store FTT into this field.
1462      */
1463     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1464                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1465     gen_exception(dc, TT_FP_EXCP);
1466 }
1467 
1468 static int gen_trap_ifnofpu(DisasContext *dc)
1469 {
1470 #if !defined(CONFIG_USER_ONLY)
1471     if (!dc->fpu_enabled) {
1472         gen_exception(dc, TT_NFPU_INSN);
1473         return 1;
1474     }
1475 #endif
1476     return 0;
1477 }
1478 
1479 /* asi moves */
1480 typedef enum {
1481     GET_ASI_HELPER,
1482     GET_ASI_EXCP,
1483     GET_ASI_DIRECT,
1484     GET_ASI_DTWINX,
1485     GET_ASI_CODE,
1486     GET_ASI_BLOCK,
1487     GET_ASI_SHORT,
1488     GET_ASI_BCOPY,
1489     GET_ASI_BFILL,
1490 } ASIType;
1491 
1492 typedef struct {
1493     ASIType type;
1494     int asi;
1495     int mem_idx;
1496     MemOp memop;
1497 } DisasASI;
1498 
1499 /*
1500  * Build DisasASI.
1501  * For asi == -1, treat as non-asi.
1502  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1503  */
1504 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1505 {
1506     ASIType type = GET_ASI_HELPER;
1507     int mem_idx = dc->mem_idx;
1508 
1509     if (asi == -1) {
1510         /* Artificial "non-asi" case. */
1511         type = GET_ASI_DIRECT;
1512         goto done;
1513     }
1514 
1515 #ifndef TARGET_SPARC64
1516     /* Before v9, all asis are immediate and privileged.  */
1517     if (asi < 0) {
1518         gen_exception(dc, TT_ILL_INSN);
1519         type = GET_ASI_EXCP;
1520     } else if (supervisor(dc)
1521                /* Note that LEON accepts ASI_USERDATA in user mode, for
1522                   use with CASA.  Also note that previous versions of
1523                   QEMU allowed (and old versions of gcc emitted) ASI_P
1524                   for LEON, which is incorrect.  */
1525                || (asi == ASI_USERDATA
1526                    && (dc->def->features & CPU_FEATURE_CASA))) {
1527         switch (asi) {
1528         case ASI_USERDATA:    /* User data access */
1529             mem_idx = MMU_USER_IDX;
1530             type = GET_ASI_DIRECT;
1531             break;
1532         case ASI_KERNELDATA:  /* Supervisor data access */
1533             mem_idx = MMU_KERNEL_IDX;
1534             type = GET_ASI_DIRECT;
1535             break;
1536         case ASI_USERTXT:     /* User text access */
1537             mem_idx = MMU_USER_IDX;
1538             type = GET_ASI_CODE;
1539             break;
1540         case ASI_KERNELTXT:   /* Supervisor text access */
1541             mem_idx = MMU_KERNEL_IDX;
1542             type = GET_ASI_CODE;
1543             break;
1544         case ASI_M_BYPASS:    /* MMU passthrough */
1545         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1546             mem_idx = MMU_PHYS_IDX;
1547             type = GET_ASI_DIRECT;
1548             break;
1549         case ASI_M_BCOPY: /* Block copy, sta access */
1550             mem_idx = MMU_KERNEL_IDX;
1551             type = GET_ASI_BCOPY;
1552             break;
1553         case ASI_M_BFILL: /* Block fill, stda access */
1554             mem_idx = MMU_KERNEL_IDX;
1555             type = GET_ASI_BFILL;
1556             break;
1557         }
1558 
1559         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1560          * permissions check in get_physical_address(..).
1561          */
1562         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1563     } else {
1564         gen_exception(dc, TT_PRIV_INSN);
1565         type = GET_ASI_EXCP;
1566     }
1567 #else
1568     if (asi < 0) {
1569         asi = dc->asi;
1570     }
1571     /* With v9, all asis below 0x80 are privileged.  */
1572     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1573        down that bit into DisasContext.  For the moment that's ok,
1574        since the direct implementations below doesn't have any ASIs
1575        in the restricted [0x30, 0x7f] range, and the check will be
1576        done properly in the helper.  */
1577     if (!supervisor(dc) && asi < 0x80) {
1578         gen_exception(dc, TT_PRIV_ACT);
1579         type = GET_ASI_EXCP;
1580     } else {
1581         switch (asi) {
1582         case ASI_REAL:      /* Bypass */
1583         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1584         case ASI_REAL_L:    /* Bypass LE */
1585         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1586         case ASI_TWINX_REAL:   /* Real address, twinx */
1587         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1588         case ASI_QUAD_LDD_PHYS:
1589         case ASI_QUAD_LDD_PHYS_L:
1590             mem_idx = MMU_PHYS_IDX;
1591             break;
1592         case ASI_N:  /* Nucleus */
1593         case ASI_NL: /* Nucleus LE */
1594         case ASI_TWINX_N:
1595         case ASI_TWINX_NL:
1596         case ASI_NUCLEUS_QUAD_LDD:
1597         case ASI_NUCLEUS_QUAD_LDD_L:
1598             if (hypervisor(dc)) {
1599                 mem_idx = MMU_PHYS_IDX;
1600             } else {
1601                 mem_idx = MMU_NUCLEUS_IDX;
1602             }
1603             break;
1604         case ASI_AIUP:  /* As if user primary */
1605         case ASI_AIUPL: /* As if user primary LE */
1606         case ASI_TWINX_AIUP:
1607         case ASI_TWINX_AIUP_L:
1608         case ASI_BLK_AIUP_4V:
1609         case ASI_BLK_AIUP_L_4V:
1610         case ASI_BLK_AIUP:
1611         case ASI_BLK_AIUPL:
1612         case ASI_MON_AIUP:
1613             mem_idx = MMU_USER_IDX;
1614             break;
1615         case ASI_AIUS:  /* As if user secondary */
1616         case ASI_AIUSL: /* As if user secondary LE */
1617         case ASI_TWINX_AIUS:
1618         case ASI_TWINX_AIUS_L:
1619         case ASI_BLK_AIUS_4V:
1620         case ASI_BLK_AIUS_L_4V:
1621         case ASI_BLK_AIUS:
1622         case ASI_BLK_AIUSL:
1623         case ASI_MON_AIUS:
1624             mem_idx = MMU_USER_SECONDARY_IDX;
1625             break;
1626         case ASI_S:  /* Secondary */
1627         case ASI_SL: /* Secondary LE */
1628         case ASI_TWINX_S:
1629         case ASI_TWINX_SL:
1630         case ASI_BLK_COMMIT_S:
1631         case ASI_BLK_S:
1632         case ASI_BLK_SL:
1633         case ASI_FL8_S:
1634         case ASI_FL8_SL:
1635         case ASI_FL16_S:
1636         case ASI_FL16_SL:
1637         case ASI_MON_S:
1638             if (mem_idx == MMU_USER_IDX) {
1639                 mem_idx = MMU_USER_SECONDARY_IDX;
1640             } else if (mem_idx == MMU_KERNEL_IDX) {
1641                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1642             }
1643             break;
1644         case ASI_P:  /* Primary */
1645         case ASI_PL: /* Primary LE */
1646         case ASI_TWINX_P:
1647         case ASI_TWINX_PL:
1648         case ASI_BLK_COMMIT_P:
1649         case ASI_BLK_P:
1650         case ASI_BLK_PL:
1651         case ASI_FL8_P:
1652         case ASI_FL8_PL:
1653         case ASI_FL16_P:
1654         case ASI_FL16_PL:
1655         case ASI_MON_P:
1656             break;
1657         }
1658         switch (asi) {
1659         case ASI_REAL:
1660         case ASI_REAL_IO:
1661         case ASI_REAL_L:
1662         case ASI_REAL_IO_L:
1663         case ASI_N:
1664         case ASI_NL:
1665         case ASI_AIUP:
1666         case ASI_AIUPL:
1667         case ASI_AIUS:
1668         case ASI_AIUSL:
1669         case ASI_S:
1670         case ASI_SL:
1671         case ASI_P:
1672         case ASI_PL:
1673         case ASI_MON_P:
1674         case ASI_MON_S:
1675         case ASI_MON_AIUP:
1676         case ASI_MON_AIUS:
1677             type = GET_ASI_DIRECT;
1678             break;
1679         case ASI_TWINX_REAL:
1680         case ASI_TWINX_REAL_L:
1681         case ASI_TWINX_N:
1682         case ASI_TWINX_NL:
1683         case ASI_TWINX_AIUP:
1684         case ASI_TWINX_AIUP_L:
1685         case ASI_TWINX_AIUS:
1686         case ASI_TWINX_AIUS_L:
1687         case ASI_TWINX_P:
1688         case ASI_TWINX_PL:
1689         case ASI_TWINX_S:
1690         case ASI_TWINX_SL:
1691         case ASI_QUAD_LDD_PHYS:
1692         case ASI_QUAD_LDD_PHYS_L:
1693         case ASI_NUCLEUS_QUAD_LDD:
1694         case ASI_NUCLEUS_QUAD_LDD_L:
1695             type = GET_ASI_DTWINX;
1696             break;
1697         case ASI_BLK_COMMIT_P:
1698         case ASI_BLK_COMMIT_S:
1699         case ASI_BLK_AIUP_4V:
1700         case ASI_BLK_AIUP_L_4V:
1701         case ASI_BLK_AIUP:
1702         case ASI_BLK_AIUPL:
1703         case ASI_BLK_AIUS_4V:
1704         case ASI_BLK_AIUS_L_4V:
1705         case ASI_BLK_AIUS:
1706         case ASI_BLK_AIUSL:
1707         case ASI_BLK_S:
1708         case ASI_BLK_SL:
1709         case ASI_BLK_P:
1710         case ASI_BLK_PL:
1711             type = GET_ASI_BLOCK;
1712             break;
1713         case ASI_FL8_S:
1714         case ASI_FL8_SL:
1715         case ASI_FL8_P:
1716         case ASI_FL8_PL:
1717             memop = MO_UB;
1718             type = GET_ASI_SHORT;
1719             break;
1720         case ASI_FL16_S:
1721         case ASI_FL16_SL:
1722         case ASI_FL16_P:
1723         case ASI_FL16_PL:
1724             memop = MO_TEUW;
1725             type = GET_ASI_SHORT;
1726             break;
1727         }
1728         /* The little-endian asis all have bit 3 set.  */
1729         if (asi & 8) {
1730             memop ^= MO_BSWAP;
1731         }
1732     }
1733 #endif
1734 
1735  done:
1736     return (DisasASI){ type, asi, mem_idx, memop };
1737 }
1738 
1739 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1740 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1741                               TCGv_i32 asi, TCGv_i32 mop)
1742 {
1743     g_assert_not_reached();
1744 }
1745 
1746 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1747                               TCGv_i32 asi, TCGv_i32 mop)
1748 {
1749     g_assert_not_reached();
1750 }
1751 #endif
1752 
1753 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1754 {
1755     switch (da->type) {
1756     case GET_ASI_EXCP:
1757         break;
1758     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1759         gen_exception(dc, TT_ILL_INSN);
1760         break;
1761     case GET_ASI_DIRECT:
1762         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1763         break;
1764 
1765     case GET_ASI_CODE:
1766 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1767         {
1768             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1769             TCGv_i64 t64 = tcg_temp_new_i64();
1770 
1771             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1772             tcg_gen_trunc_i64_tl(dst, t64);
1773         }
1774         break;
1775 #else
1776         g_assert_not_reached();
1777 #endif
1778 
1779     default:
1780         {
1781             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1782             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1783 
1784             save_state(dc);
1785 #ifdef TARGET_SPARC64
1786             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1787 #else
1788             {
1789                 TCGv_i64 t64 = tcg_temp_new_i64();
1790                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1791                 tcg_gen_trunc_i64_tl(dst, t64);
1792             }
1793 #endif
1794         }
1795         break;
1796     }
1797 }
1798 
1799 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1800 {
1801     switch (da->type) {
1802     case GET_ASI_EXCP:
1803         break;
1804 
1805     case GET_ASI_DTWINX: /* Reserved for stda.  */
1806         if (TARGET_LONG_BITS == 32) {
1807             gen_exception(dc, TT_ILL_INSN);
1808             break;
1809         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1810             /* Pre OpenSPARC CPUs don't have these */
1811             gen_exception(dc, TT_ILL_INSN);
1812             break;
1813         }
1814         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1815         /* fall through */
1816 
1817     case GET_ASI_DIRECT:
1818         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1819         break;
1820 
1821     case GET_ASI_BCOPY:
1822         assert(TARGET_LONG_BITS == 32);
1823         /*
1824          * Copy 32 bytes from the address in SRC to ADDR.
1825          *
1826          * From Ross RT625 hyperSPARC manual, section 4.6:
1827          * "Block Copy and Block Fill will work only on cache line boundaries."
1828          *
1829          * It does not specify if an unaliged address is truncated or trapped.
1830          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1831          * is obviously wrong.  The only place I can see this used is in the
1832          * Linux kernel which begins with page alignment, advancing by 32,
1833          * so is always aligned.  Assume truncation as the simpler option.
1834          *
1835          * Since the loads and stores are paired, allow the copy to happen
1836          * in the host endianness.  The copy need not be atomic.
1837          */
1838         {
1839             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1840             TCGv saddr = tcg_temp_new();
1841             TCGv daddr = tcg_temp_new();
1842             TCGv_i128 tmp = tcg_temp_new_i128();
1843 
1844             tcg_gen_andi_tl(saddr, src, -32);
1845             tcg_gen_andi_tl(daddr, addr, -32);
1846             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1847             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1848             tcg_gen_addi_tl(saddr, saddr, 16);
1849             tcg_gen_addi_tl(daddr, daddr, 16);
1850             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1851             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1852         }
1853         break;
1854 
1855     default:
1856         {
1857             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1858             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1859 
1860             save_state(dc);
1861 #ifdef TARGET_SPARC64
1862             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1863 #else
1864             {
1865                 TCGv_i64 t64 = tcg_temp_new_i64();
1866                 tcg_gen_extu_tl_i64(t64, src);
1867                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1868             }
1869 #endif
1870 
1871             /* A write to a TLB register may alter page maps.  End the TB. */
1872             dc->npc = DYNAMIC_PC;
1873         }
1874         break;
1875     }
1876 }
1877 
1878 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1879                          TCGv dst, TCGv src, TCGv addr)
1880 {
1881     switch (da->type) {
1882     case GET_ASI_EXCP:
1883         break;
1884     case GET_ASI_DIRECT:
1885         tcg_gen_atomic_xchg_tl(dst, addr, src,
1886                                da->mem_idx, da->memop | MO_ALIGN);
1887         break;
1888     default:
1889         /* ??? Should be DAE_invalid_asi.  */
1890         gen_exception(dc, TT_DATA_ACCESS);
1891         break;
1892     }
1893 }
1894 
1895 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1896                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1897 {
1898     switch (da->type) {
1899     case GET_ASI_EXCP:
1900         return;
1901     case GET_ASI_DIRECT:
1902         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1903                                   da->mem_idx, da->memop | MO_ALIGN);
1904         break;
1905     default:
1906         /* ??? Should be DAE_invalid_asi.  */
1907         gen_exception(dc, TT_DATA_ACCESS);
1908         break;
1909     }
1910 }
1911 
1912 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1913 {
1914     switch (da->type) {
1915     case GET_ASI_EXCP:
1916         break;
1917     case GET_ASI_DIRECT:
1918         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1919                                da->mem_idx, MO_UB);
1920         break;
1921     default:
1922         /* ??? In theory, this should be raise DAE_invalid_asi.
1923            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1924         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1925             gen_helper_exit_atomic(tcg_env);
1926         } else {
1927             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1928             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1929             TCGv_i64 s64, t64;
1930 
1931             save_state(dc);
1932             t64 = tcg_temp_new_i64();
1933             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1934 
1935             s64 = tcg_constant_i64(0xff);
1936             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1937 
1938             tcg_gen_trunc_i64_tl(dst, t64);
1939 
1940             /* End the TB.  */
1941             dc->npc = DYNAMIC_PC;
1942         }
1943         break;
1944     }
1945 }
1946 
1947 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1948                         TCGv addr, int rd)
1949 {
1950     MemOp memop = da->memop;
1951     MemOp size = memop & MO_SIZE;
1952     TCGv_i32 d32;
1953     TCGv_i64 d64, l64;
1954     TCGv addr_tmp;
1955 
1956     /* TODO: Use 128-bit load/store below. */
1957     if (size == MO_128) {
1958         memop = (memop & ~MO_SIZE) | MO_64;
1959     }
1960 
1961     switch (da->type) {
1962     case GET_ASI_EXCP:
1963         break;
1964 
1965     case GET_ASI_DIRECT:
1966         memop |= MO_ALIGN_4;
1967         switch (size) {
1968         case MO_32:
1969             d32 = tcg_temp_new_i32();
1970             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1971             gen_store_fpr_F(dc, rd, d32);
1972             break;
1973 
1974         case MO_64:
1975             d64 = tcg_temp_new_i64();
1976             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1977             gen_store_fpr_D(dc, rd, d64);
1978             break;
1979 
1980         case MO_128:
1981             d64 = tcg_temp_new_i64();
1982             l64 = tcg_temp_new_i64();
1983             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1984             addr_tmp = tcg_temp_new();
1985             tcg_gen_addi_tl(addr_tmp, addr, 8);
1986             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1987             gen_store_fpr_D(dc, rd, d64);
1988             gen_store_fpr_D(dc, rd + 2, l64);
1989             break;
1990         default:
1991             g_assert_not_reached();
1992         }
1993         break;
1994 
1995     case GET_ASI_BLOCK:
1996         /* Valid for lddfa on aligned registers only.  */
1997         if (orig_size == MO_64 && (rd & 7) == 0) {
1998             /* The first operation checks required alignment.  */
1999             addr_tmp = tcg_temp_new();
2000             d64 = tcg_temp_new_i64();
2001             for (int i = 0; ; ++i) {
2002                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2003                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2004                 gen_store_fpr_D(dc, rd + 2 * i, d64);
2005                 if (i == 7) {
2006                     break;
2007                 }
2008                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2009                 addr = addr_tmp;
2010             }
2011         } else {
2012             gen_exception(dc, TT_ILL_INSN);
2013         }
2014         break;
2015 
2016     case GET_ASI_SHORT:
2017         /* Valid for lddfa only.  */
2018         if (orig_size == MO_64) {
2019             d64 = tcg_temp_new_i64();
2020             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2021             gen_store_fpr_D(dc, rd, d64);
2022         } else {
2023             gen_exception(dc, TT_ILL_INSN);
2024         }
2025         break;
2026 
2027     default:
2028         {
2029             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2030             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2031 
2032             save_state(dc);
2033             /* According to the table in the UA2011 manual, the only
2034                other asis that are valid for ldfa/lddfa/ldqfa are
2035                the NO_FAULT asis.  We still need a helper for these,
2036                but we can just use the integer asi helper for them.  */
2037             switch (size) {
2038             case MO_32:
2039                 d64 = tcg_temp_new_i64();
2040                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2041                 d32 = tcg_temp_new_i32();
2042                 tcg_gen_extrl_i64_i32(d32, d64);
2043                 gen_store_fpr_F(dc, rd, d32);
2044                 break;
2045             case MO_64:
2046                 d64 = tcg_temp_new_i64();
2047                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2048                 gen_store_fpr_D(dc, rd, d64);
2049                 break;
2050             case MO_128:
2051                 d64 = tcg_temp_new_i64();
2052                 l64 = tcg_temp_new_i64();
2053                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2054                 addr_tmp = tcg_temp_new();
2055                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2056                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2057                 gen_store_fpr_D(dc, rd, d64);
2058                 gen_store_fpr_D(dc, rd + 2, l64);
2059                 break;
2060             default:
2061                 g_assert_not_reached();
2062             }
2063         }
2064         break;
2065     }
2066 }
2067 
2068 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2069                         TCGv addr, int rd)
2070 {
2071     MemOp memop = da->memop;
2072     MemOp size = memop & MO_SIZE;
2073     TCGv_i32 d32;
2074     TCGv_i64 d64;
2075     TCGv addr_tmp;
2076 
2077     /* TODO: Use 128-bit load/store below. */
2078     if (size == MO_128) {
2079         memop = (memop & ~MO_SIZE) | MO_64;
2080     }
2081 
2082     switch (da->type) {
2083     case GET_ASI_EXCP:
2084         break;
2085 
2086     case GET_ASI_DIRECT:
2087         memop |= MO_ALIGN_4;
2088         switch (size) {
2089         case MO_32:
2090             d32 = gen_load_fpr_F(dc, rd);
2091             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2092             break;
2093         case MO_64:
2094             d64 = gen_load_fpr_D(dc, rd);
2095             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2096             break;
2097         case MO_128:
2098             /* Only 4-byte alignment required.  However, it is legal for the
2099                cpu to signal the alignment fault, and the OS trap handler is
2100                required to fix it up.  Requiring 16-byte alignment here avoids
2101                having to probe the second page before performing the first
2102                write.  */
2103             d64 = gen_load_fpr_D(dc, rd);
2104             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2105             addr_tmp = tcg_temp_new();
2106             tcg_gen_addi_tl(addr_tmp, addr, 8);
2107             d64 = gen_load_fpr_D(dc, rd + 2);
2108             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2109             break;
2110         default:
2111             g_assert_not_reached();
2112         }
2113         break;
2114 
2115     case GET_ASI_BLOCK:
2116         /* Valid for stdfa on aligned registers only.  */
2117         if (orig_size == MO_64 && (rd & 7) == 0) {
2118             /* The first operation checks required alignment.  */
2119             addr_tmp = tcg_temp_new();
2120             for (int i = 0; ; ++i) {
2121                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2122                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2123                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2124                 if (i == 7) {
2125                     break;
2126                 }
2127                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2128                 addr = addr_tmp;
2129             }
2130         } else {
2131             gen_exception(dc, TT_ILL_INSN);
2132         }
2133         break;
2134 
2135     case GET_ASI_SHORT:
2136         /* Valid for stdfa only.  */
2137         if (orig_size == MO_64) {
2138             d64 = gen_load_fpr_D(dc, rd);
2139             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2140         } else {
2141             gen_exception(dc, TT_ILL_INSN);
2142         }
2143         break;
2144 
2145     default:
2146         /* According to the table in the UA2011 manual, the only
2147            other asis that are valid for ldfa/lddfa/ldqfa are
2148            the PST* asis, which aren't currently handled.  */
2149         gen_exception(dc, TT_ILL_INSN);
2150         break;
2151     }
2152 }
2153 
2154 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2155 {
2156     TCGv hi = gen_dest_gpr(dc, rd);
2157     TCGv lo = gen_dest_gpr(dc, rd + 1);
2158 
2159     switch (da->type) {
2160     case GET_ASI_EXCP:
2161         return;
2162 
2163     case GET_ASI_DTWINX:
2164 #ifdef TARGET_SPARC64
2165         {
2166             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2167             TCGv_i128 t = tcg_temp_new_i128();
2168 
2169             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2170             /*
2171              * Note that LE twinx acts as if each 64-bit register result is
2172              * byte swapped.  We perform one 128-bit LE load, so must swap
2173              * the order of the writebacks.
2174              */
2175             if ((mop & MO_BSWAP) == MO_TE) {
2176                 tcg_gen_extr_i128_i64(lo, hi, t);
2177             } else {
2178                 tcg_gen_extr_i128_i64(hi, lo, t);
2179             }
2180         }
2181         break;
2182 #else
2183         g_assert_not_reached();
2184 #endif
2185 
2186     case GET_ASI_DIRECT:
2187         {
2188             TCGv_i64 tmp = tcg_temp_new_i64();
2189 
2190             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2191 
2192             /* Note that LE ldda acts as if each 32-bit register
2193                result is byte swapped.  Having just performed one
2194                64-bit bswap, we need now to swap the writebacks.  */
2195             if ((da->memop & MO_BSWAP) == MO_TE) {
2196                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2197             } else {
2198                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2199             }
2200         }
2201         break;
2202 
2203     case GET_ASI_CODE:
2204 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2205         {
2206             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2207             TCGv_i64 tmp = tcg_temp_new_i64();
2208 
2209             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2210 
2211             /* See above.  */
2212             if ((da->memop & MO_BSWAP) == MO_TE) {
2213                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2214             } else {
2215                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2216             }
2217         }
2218         break;
2219 #else
2220         g_assert_not_reached();
2221 #endif
2222 
2223     default:
2224         /* ??? In theory we've handled all of the ASIs that are valid
2225            for ldda, and this should raise DAE_invalid_asi.  However,
2226            real hardware allows others.  This can be seen with e.g.
2227            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2228         {
2229             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2230             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2231             TCGv_i64 tmp = tcg_temp_new_i64();
2232 
2233             save_state(dc);
2234             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2235 
2236             /* See above.  */
2237             if ((da->memop & MO_BSWAP) == MO_TE) {
2238                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2239             } else {
2240                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2241             }
2242         }
2243         break;
2244     }
2245 
2246     gen_store_gpr(dc, rd, hi);
2247     gen_store_gpr(dc, rd + 1, lo);
2248 }
2249 
2250 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2251 {
2252     TCGv hi = gen_load_gpr(dc, rd);
2253     TCGv lo = gen_load_gpr(dc, rd + 1);
2254 
2255     switch (da->type) {
2256     case GET_ASI_EXCP:
2257         break;
2258 
2259     case GET_ASI_DTWINX:
2260 #ifdef TARGET_SPARC64
2261         {
2262             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2263             TCGv_i128 t = tcg_temp_new_i128();
2264 
2265             /*
2266              * Note that LE twinx acts as if each 64-bit register result is
2267              * byte swapped.  We perform one 128-bit LE store, so must swap
2268              * the order of the construction.
2269              */
2270             if ((mop & MO_BSWAP) == MO_TE) {
2271                 tcg_gen_concat_i64_i128(t, lo, hi);
2272             } else {
2273                 tcg_gen_concat_i64_i128(t, hi, lo);
2274             }
2275             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2276         }
2277         break;
2278 #else
2279         g_assert_not_reached();
2280 #endif
2281 
2282     case GET_ASI_DIRECT:
2283         {
2284             TCGv_i64 t64 = tcg_temp_new_i64();
2285 
2286             /* Note that LE stda acts as if each 32-bit register result is
2287                byte swapped.  We will perform one 64-bit LE store, so now
2288                we must swap the order of the construction.  */
2289             if ((da->memop & MO_BSWAP) == MO_TE) {
2290                 tcg_gen_concat_tl_i64(t64, lo, hi);
2291             } else {
2292                 tcg_gen_concat_tl_i64(t64, hi, lo);
2293             }
2294             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2295         }
2296         break;
2297 
2298     case GET_ASI_BFILL:
2299         assert(TARGET_LONG_BITS == 32);
2300         /*
2301          * Store 32 bytes of [rd:rd+1] to ADDR.
2302          * See comments for GET_ASI_COPY above.
2303          */
2304         {
2305             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2306             TCGv_i64 t8 = tcg_temp_new_i64();
2307             TCGv_i128 t16 = tcg_temp_new_i128();
2308             TCGv daddr = tcg_temp_new();
2309 
2310             tcg_gen_concat_tl_i64(t8, lo, hi);
2311             tcg_gen_concat_i64_i128(t16, t8, t8);
2312             tcg_gen_andi_tl(daddr, addr, -32);
2313             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2314             tcg_gen_addi_tl(daddr, daddr, 16);
2315             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2316         }
2317         break;
2318 
2319     default:
2320         /* ??? In theory we've handled all of the ASIs that are valid
2321            for stda, and this should raise DAE_invalid_asi.  */
2322         {
2323             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2324             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2325             TCGv_i64 t64 = tcg_temp_new_i64();
2326 
2327             /* See above.  */
2328             if ((da->memop & MO_BSWAP) == MO_TE) {
2329                 tcg_gen_concat_tl_i64(t64, lo, hi);
2330             } else {
2331                 tcg_gen_concat_tl_i64(t64, hi, lo);
2332             }
2333 
2334             save_state(dc);
2335             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2336         }
2337         break;
2338     }
2339 }
2340 
2341 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2342 {
2343 #ifdef TARGET_SPARC64
2344     TCGv_i32 c32, zero, dst, s1, s2;
2345     TCGv_i64 c64 = tcg_temp_new_i64();
2346 
2347     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2348        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2349        the later.  */
2350     c32 = tcg_temp_new_i32();
2351     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2352     tcg_gen_extrl_i64_i32(c32, c64);
2353 
2354     s1 = gen_load_fpr_F(dc, rs);
2355     s2 = gen_load_fpr_F(dc, rd);
2356     dst = tcg_temp_new_i32();
2357     zero = tcg_constant_i32(0);
2358 
2359     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2360 
2361     gen_store_fpr_F(dc, rd, dst);
2362 #else
2363     qemu_build_not_reached();
2364 #endif
2365 }
2366 
2367 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2368 {
2369 #ifdef TARGET_SPARC64
2370     TCGv_i64 dst = tcg_temp_new_i64();
2371     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2372                         gen_load_fpr_D(dc, rs),
2373                         gen_load_fpr_D(dc, rd));
2374     gen_store_fpr_D(dc, rd, dst);
2375 #else
2376     qemu_build_not_reached();
2377 #endif
2378 }
2379 
2380 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2381 {
2382 #ifdef TARGET_SPARC64
2383     TCGv c2 = tcg_constant_tl(cmp->c2);
2384     TCGv_i64 h = tcg_temp_new_i64();
2385     TCGv_i64 l = tcg_temp_new_i64();
2386 
2387     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2388                         gen_load_fpr_D(dc, rs),
2389                         gen_load_fpr_D(dc, rd));
2390     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2391                         gen_load_fpr_D(dc, rs + 2),
2392                         gen_load_fpr_D(dc, rd + 2));
2393     gen_store_fpr_D(dc, rd, h);
2394     gen_store_fpr_D(dc, rd + 2, l);
2395 #else
2396     qemu_build_not_reached();
2397 #endif
2398 }
2399 
2400 #ifdef TARGET_SPARC64
2401 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2402 {
2403     TCGv_i32 r_tl = tcg_temp_new_i32();
2404 
2405     /* load env->tl into r_tl */
2406     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2407 
2408     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2409     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2410 
2411     /* calculate offset to current trap state from env->ts, reuse r_tl */
2412     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2413     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2414 
2415     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2416     {
2417         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2418         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2419         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2420     }
2421 }
2422 #endif
2423 
2424 static int extract_dfpreg(DisasContext *dc, int x)
2425 {
2426     int r = x & 0x1e;
2427 #ifdef TARGET_SPARC64
2428     r |= (x & 1) << 5;
2429 #endif
2430     return r;
2431 }
2432 
2433 static int extract_qfpreg(DisasContext *dc, int x)
2434 {
2435     int r = x & 0x1c;
2436 #ifdef TARGET_SPARC64
2437     r |= (x & 1) << 5;
2438 #endif
2439     return r;
2440 }
2441 
2442 /* Include the auto-generated decoder.  */
2443 #include "decode-insns.c.inc"
2444 
2445 #define TRANS(NAME, AVAIL, FUNC, ...) \
2446     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2447     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2448 
2449 #define avail_ALL(C)      true
2450 #ifdef TARGET_SPARC64
2451 # define avail_32(C)      false
2452 # define avail_ASR17(C)   false
2453 # define avail_CASA(C)    true
2454 # define avail_DIV(C)     true
2455 # define avail_MUL(C)     true
2456 # define avail_POWERDOWN(C) false
2457 # define avail_64(C)      true
2458 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2459 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2460 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2461 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2462 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2463 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2464 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2465 # define avail_VIS3B(C)   avail_VIS3(C)
2466 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2467 #else
2468 # define avail_32(C)      true
2469 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2470 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2471 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2472 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2473 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2474 # define avail_64(C)      false
2475 # define avail_FMAF(C)    false
2476 # define avail_GL(C)      false
2477 # define avail_HYPV(C)    false
2478 # define avail_IMA(C)     false
2479 # define avail_VIS1(C)    false
2480 # define avail_VIS2(C)    false
2481 # define avail_VIS3(C)    false
2482 # define avail_VIS3B(C)   false
2483 # define avail_VIS4(C)    false
2484 #endif
2485 
2486 /* Default case for non jump instructions. */
2487 static bool advance_pc(DisasContext *dc)
2488 {
2489     TCGLabel *l1;
2490 
2491     finishing_insn(dc);
2492 
2493     if (dc->npc & 3) {
2494         switch (dc->npc) {
2495         case DYNAMIC_PC:
2496         case DYNAMIC_PC_LOOKUP:
2497             dc->pc = dc->npc;
2498             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2499             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2500             break;
2501 
2502         case JUMP_PC:
2503             /* we can do a static jump */
2504             l1 = gen_new_label();
2505             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2506 
2507             /* jump not taken */
2508             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2509 
2510             /* jump taken */
2511             gen_set_label(l1);
2512             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2513 
2514             dc->base.is_jmp = DISAS_NORETURN;
2515             break;
2516 
2517         default:
2518             g_assert_not_reached();
2519         }
2520     } else {
2521         dc->pc = dc->npc;
2522         dc->npc = dc->npc + 4;
2523     }
2524     return true;
2525 }
2526 
2527 /*
2528  * Major opcodes 00 and 01 -- branches, call, and sethi
2529  */
2530 
2531 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2532                               bool annul, int disp)
2533 {
2534     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2535     target_ulong npc;
2536 
2537     finishing_insn(dc);
2538 
2539     if (cmp->cond == TCG_COND_ALWAYS) {
2540         if (annul) {
2541             dc->pc = dest;
2542             dc->npc = dest + 4;
2543         } else {
2544             gen_mov_pc_npc(dc);
2545             dc->npc = dest;
2546         }
2547         return true;
2548     }
2549 
2550     if (cmp->cond == TCG_COND_NEVER) {
2551         npc = dc->npc;
2552         if (npc & 3) {
2553             gen_mov_pc_npc(dc);
2554             if (annul) {
2555                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2556             }
2557             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2558         } else {
2559             dc->pc = npc + (annul ? 4 : 0);
2560             dc->npc = dc->pc + 4;
2561         }
2562         return true;
2563     }
2564 
2565     flush_cond(dc);
2566     npc = dc->npc;
2567 
2568     if (annul) {
2569         TCGLabel *l1 = gen_new_label();
2570 
2571         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2572         gen_goto_tb(dc, 0, npc, dest);
2573         gen_set_label(l1);
2574         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2575 
2576         dc->base.is_jmp = DISAS_NORETURN;
2577     } else {
2578         if (npc & 3) {
2579             switch (npc) {
2580             case DYNAMIC_PC:
2581             case DYNAMIC_PC_LOOKUP:
2582                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2583                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2584                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2585                                    cmp->c1, tcg_constant_tl(cmp->c2),
2586                                    tcg_constant_tl(dest), cpu_npc);
2587                 dc->pc = npc;
2588                 break;
2589             default:
2590                 g_assert_not_reached();
2591             }
2592         } else {
2593             dc->pc = npc;
2594             dc->npc = JUMP_PC;
2595             dc->jump = *cmp;
2596             dc->jump_pc[0] = dest;
2597             dc->jump_pc[1] = npc + 4;
2598 
2599             /* The condition for cpu_cond is always NE -- normalize. */
2600             if (cmp->cond == TCG_COND_NE) {
2601                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2602             } else {
2603                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2604             }
2605             dc->cpu_cond_live = true;
2606         }
2607     }
2608     return true;
2609 }
2610 
2611 static bool raise_priv(DisasContext *dc)
2612 {
2613     gen_exception(dc, TT_PRIV_INSN);
2614     return true;
2615 }
2616 
2617 static bool raise_unimpfpop(DisasContext *dc)
2618 {
2619     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2620     return true;
2621 }
2622 
2623 static bool gen_trap_float128(DisasContext *dc)
2624 {
2625     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2626         return false;
2627     }
2628     return raise_unimpfpop(dc);
2629 }
2630 
2631 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2632 {
2633     DisasCompare cmp;
2634 
2635     gen_compare(&cmp, a->cc, a->cond, dc);
2636     return advance_jump_cond(dc, &cmp, a->a, a->i);
2637 }
2638 
2639 TRANS(Bicc, ALL, do_bpcc, a)
2640 TRANS(BPcc,  64, do_bpcc, a)
2641 
2642 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2643 {
2644     DisasCompare cmp;
2645 
2646     if (gen_trap_ifnofpu(dc)) {
2647         return true;
2648     }
2649     gen_fcompare(&cmp, a->cc, a->cond);
2650     return advance_jump_cond(dc, &cmp, a->a, a->i);
2651 }
2652 
2653 TRANS(FBPfcc,  64, do_fbpfcc, a)
2654 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2655 
2656 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2657 {
2658     DisasCompare cmp;
2659 
2660     if (!avail_64(dc)) {
2661         return false;
2662     }
2663     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2664         return false;
2665     }
2666     return advance_jump_cond(dc, &cmp, a->a, a->i);
2667 }
2668 
2669 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2670 {
2671     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2672 
2673     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2674     gen_mov_pc_npc(dc);
2675     dc->npc = target;
2676     return true;
2677 }
2678 
2679 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2680 {
2681     /*
2682      * For sparc32, always generate the no-coprocessor exception.
2683      * For sparc64, always generate illegal instruction.
2684      */
2685 #ifdef TARGET_SPARC64
2686     return false;
2687 #else
2688     gen_exception(dc, TT_NCP_INSN);
2689     return true;
2690 #endif
2691 }
2692 
2693 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2694 {
2695     /* Special-case %g0 because that's the canonical nop.  */
2696     if (a->rd) {
2697         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2698     }
2699     return advance_pc(dc);
2700 }
2701 
2702 /*
2703  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2704  */
2705 
2706 static bool do_tcc(DisasContext *dc, int cond, int cc,
2707                    int rs1, bool imm, int rs2_or_imm)
2708 {
2709     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2710                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2711     DisasCompare cmp;
2712     TCGLabel *lab;
2713     TCGv_i32 trap;
2714 
2715     /* Trap never.  */
2716     if (cond == 0) {
2717         return advance_pc(dc);
2718     }
2719 
2720     /*
2721      * Immediate traps are the most common case.  Since this value is
2722      * live across the branch, it really pays to evaluate the constant.
2723      */
2724     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2725         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2726     } else {
2727         trap = tcg_temp_new_i32();
2728         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2729         if (imm) {
2730             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2731         } else {
2732             TCGv_i32 t2 = tcg_temp_new_i32();
2733             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2734             tcg_gen_add_i32(trap, trap, t2);
2735         }
2736         tcg_gen_andi_i32(trap, trap, mask);
2737         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2738     }
2739 
2740     finishing_insn(dc);
2741 
2742     /* Trap always.  */
2743     if (cond == 8) {
2744         save_state(dc);
2745         gen_helper_raise_exception(tcg_env, trap);
2746         dc->base.is_jmp = DISAS_NORETURN;
2747         return true;
2748     }
2749 
2750     /* Conditional trap.  */
2751     flush_cond(dc);
2752     lab = delay_exceptionv(dc, trap);
2753     gen_compare(&cmp, cc, cond, dc);
2754     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2755 
2756     return advance_pc(dc);
2757 }
2758 
2759 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2760 {
2761     if (avail_32(dc) && a->cc) {
2762         return false;
2763     }
2764     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2765 }
2766 
2767 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2768 {
2769     if (avail_64(dc)) {
2770         return false;
2771     }
2772     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2773 }
2774 
2775 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2776 {
2777     if (avail_32(dc)) {
2778         return false;
2779     }
2780     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2781 }
2782 
2783 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2784 {
2785     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2786     return advance_pc(dc);
2787 }
2788 
2789 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2790 {
2791     if (avail_32(dc)) {
2792         return false;
2793     }
2794     if (a->mmask) {
2795         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2796         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2797     }
2798     if (a->cmask) {
2799         /* For #Sync, etc, end the TB to recognize interrupts. */
2800         dc->base.is_jmp = DISAS_EXIT;
2801     }
2802     return advance_pc(dc);
2803 }
2804 
2805 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2806                           TCGv (*func)(DisasContext *, TCGv))
2807 {
2808     if (!priv) {
2809         return raise_priv(dc);
2810     }
2811     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2812     return advance_pc(dc);
2813 }
2814 
2815 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2816 {
2817     return cpu_y;
2818 }
2819 
2820 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2821 {
2822     /*
2823      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2824      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2825      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2826      */
2827     if (avail_64(dc) && a->rs1 != 0) {
2828         return false;
2829     }
2830     return do_rd_special(dc, true, a->rd, do_rdy);
2831 }
2832 
2833 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2834 {
2835     gen_helper_rdasr17(dst, tcg_env);
2836     return dst;
2837 }
2838 
2839 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2840 
2841 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2842 {
2843     gen_helper_rdccr(dst, tcg_env);
2844     return dst;
2845 }
2846 
2847 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2848 
2849 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2850 {
2851 #ifdef TARGET_SPARC64
2852     return tcg_constant_tl(dc->asi);
2853 #else
2854     qemu_build_not_reached();
2855 #endif
2856 }
2857 
2858 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2859 
2860 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2861 {
2862     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2863 
2864     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2865     if (translator_io_start(&dc->base)) {
2866         dc->base.is_jmp = DISAS_EXIT;
2867     }
2868     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2869                               tcg_constant_i32(dc->mem_idx));
2870     return dst;
2871 }
2872 
2873 /* TODO: non-priv access only allowed when enabled. */
2874 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2875 
2876 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2877 {
2878     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2879 }
2880 
2881 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2882 
2883 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2884 {
2885     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2886     return dst;
2887 }
2888 
2889 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2890 
2891 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2892 {
2893     gen_trap_ifnofpu(dc);
2894     return cpu_gsr;
2895 }
2896 
2897 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2898 
2899 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2900 {
2901     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2902     return dst;
2903 }
2904 
2905 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2906 
2907 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2908 {
2909     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2910     return dst;
2911 }
2912 
2913 /* TODO: non-priv access only allowed when enabled. */
2914 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2915 
2916 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2917 {
2918     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2919 
2920     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2921     if (translator_io_start(&dc->base)) {
2922         dc->base.is_jmp = DISAS_EXIT;
2923     }
2924     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2925                               tcg_constant_i32(dc->mem_idx));
2926     return dst;
2927 }
2928 
2929 /* TODO: non-priv access only allowed when enabled. */
2930 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2931 
2932 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2933 {
2934     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2935     return dst;
2936 }
2937 
2938 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2939 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2940 
2941 /*
2942  * UltraSPARC-T1 Strand status.
2943  * HYPV check maybe not enough, UA2005 & UA2007 describe
2944  * this ASR as impl. dep
2945  */
2946 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2947 {
2948     return tcg_constant_tl(1);
2949 }
2950 
2951 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2952 
2953 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2954 {
2955     gen_helper_rdpsr(dst, tcg_env);
2956     return dst;
2957 }
2958 
2959 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2960 
2961 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2962 {
2963     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2964     return dst;
2965 }
2966 
2967 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2968 
2969 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2970 {
2971     TCGv_i32 tl = tcg_temp_new_i32();
2972     TCGv_ptr tp = tcg_temp_new_ptr();
2973 
2974     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2975     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2976     tcg_gen_shli_i32(tl, tl, 3);
2977     tcg_gen_ext_i32_ptr(tp, tl);
2978     tcg_gen_add_ptr(tp, tp, tcg_env);
2979 
2980     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2981     return dst;
2982 }
2983 
2984 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2985 
2986 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2987 {
2988     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2989     return dst;
2990 }
2991 
2992 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2993 
2994 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2995 {
2996     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2997     return dst;
2998 }
2999 
3000 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3001 
3002 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3003 {
3004     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3005     return dst;
3006 }
3007 
3008 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3009 
3010 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3011 {
3012     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3013     return dst;
3014 }
3015 
3016 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3017       do_rdhstick_cmpr)
3018 
3019 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3020 {
3021     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3022     return dst;
3023 }
3024 
3025 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3026 
3027 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3028 {
3029 #ifdef TARGET_SPARC64
3030     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3031 
3032     gen_load_trap_state_at_tl(r_tsptr);
3033     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3034     return dst;
3035 #else
3036     qemu_build_not_reached();
3037 #endif
3038 }
3039 
3040 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3041 
3042 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3043 {
3044 #ifdef TARGET_SPARC64
3045     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3046 
3047     gen_load_trap_state_at_tl(r_tsptr);
3048     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3049     return dst;
3050 #else
3051     qemu_build_not_reached();
3052 #endif
3053 }
3054 
3055 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3056 
3057 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3058 {
3059 #ifdef TARGET_SPARC64
3060     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3061 
3062     gen_load_trap_state_at_tl(r_tsptr);
3063     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3064     return dst;
3065 #else
3066     qemu_build_not_reached();
3067 #endif
3068 }
3069 
3070 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3071 
3072 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3073 {
3074 #ifdef TARGET_SPARC64
3075     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3076 
3077     gen_load_trap_state_at_tl(r_tsptr);
3078     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3079     return dst;
3080 #else
3081     qemu_build_not_reached();
3082 #endif
3083 }
3084 
3085 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3086 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3087 
3088 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3089 {
3090     return cpu_tbr;
3091 }
3092 
3093 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3094 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3095 
3096 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3097 {
3098     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3099     return dst;
3100 }
3101 
3102 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3103 
3104 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3105 {
3106     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3107     return dst;
3108 }
3109 
3110 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3111 
3112 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3113 {
3114     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3115     return dst;
3116 }
3117 
3118 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3119 
3120 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3121 {
3122     gen_helper_rdcwp(dst, tcg_env);
3123     return dst;
3124 }
3125 
3126 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3127 
3128 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3129 {
3130     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3131     return dst;
3132 }
3133 
3134 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3135 
3136 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3137 {
3138     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3139     return dst;
3140 }
3141 
3142 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3143       do_rdcanrestore)
3144 
3145 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3146 {
3147     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3148     return dst;
3149 }
3150 
3151 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3152 
3153 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3154 {
3155     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3156     return dst;
3157 }
3158 
3159 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3160 
3161 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3162 {
3163     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3164     return dst;
3165 }
3166 
3167 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3168 
3169 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3170 {
3171     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3172     return dst;
3173 }
3174 
3175 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3176 
3177 /* UA2005 strand status */
3178 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3179 {
3180     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3181     return dst;
3182 }
3183 
3184 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3185 
3186 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3187 {
3188     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3189     return dst;
3190 }
3191 
3192 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3193 
3194 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3195 {
3196     if (avail_64(dc)) {
3197         gen_helper_flushw(tcg_env);
3198         return advance_pc(dc);
3199     }
3200     return false;
3201 }
3202 
3203 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3204                           void (*func)(DisasContext *, TCGv))
3205 {
3206     TCGv src;
3207 
3208     /* For simplicity, we under-decoded the rs2 form. */
3209     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3210         return false;
3211     }
3212     if (!priv) {
3213         return raise_priv(dc);
3214     }
3215 
3216     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3217         src = tcg_constant_tl(a->rs2_or_imm);
3218     } else {
3219         TCGv src1 = gen_load_gpr(dc, a->rs1);
3220         if (a->rs2_or_imm == 0) {
3221             src = src1;
3222         } else {
3223             src = tcg_temp_new();
3224             if (a->imm) {
3225                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3226             } else {
3227                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3228             }
3229         }
3230     }
3231     func(dc, src);
3232     return advance_pc(dc);
3233 }
3234 
3235 static void do_wry(DisasContext *dc, TCGv src)
3236 {
3237     tcg_gen_ext32u_tl(cpu_y, src);
3238 }
3239 
3240 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3241 
3242 static void do_wrccr(DisasContext *dc, TCGv src)
3243 {
3244     gen_helper_wrccr(tcg_env, src);
3245 }
3246 
3247 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3248 
3249 static void do_wrasi(DisasContext *dc, TCGv src)
3250 {
3251     TCGv tmp = tcg_temp_new();
3252 
3253     tcg_gen_ext8u_tl(tmp, src);
3254     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3255     /* End TB to notice changed ASI. */
3256     dc->base.is_jmp = DISAS_EXIT;
3257 }
3258 
3259 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3260 
3261 static void do_wrfprs(DisasContext *dc, TCGv src)
3262 {
3263 #ifdef TARGET_SPARC64
3264     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3265     dc->fprs_dirty = 0;
3266     dc->base.is_jmp = DISAS_EXIT;
3267 #else
3268     qemu_build_not_reached();
3269 #endif
3270 }
3271 
3272 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3273 
3274 static void do_wrgsr(DisasContext *dc, TCGv src)
3275 {
3276     gen_trap_ifnofpu(dc);
3277     tcg_gen_mov_tl(cpu_gsr, src);
3278 }
3279 
3280 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3281 
3282 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3283 {
3284     gen_helper_set_softint(tcg_env, src);
3285 }
3286 
3287 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3288 
3289 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3290 {
3291     gen_helper_clear_softint(tcg_env, src);
3292 }
3293 
3294 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3295 
3296 static void do_wrsoftint(DisasContext *dc, TCGv src)
3297 {
3298     gen_helper_write_softint(tcg_env, src);
3299 }
3300 
3301 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3302 
3303 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3304 {
3305     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3306 
3307     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3308     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3309     translator_io_start(&dc->base);
3310     gen_helper_tick_set_limit(r_tickptr, src);
3311     /* End TB to handle timer interrupt */
3312     dc->base.is_jmp = DISAS_EXIT;
3313 }
3314 
3315 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3316 
3317 static void do_wrstick(DisasContext *dc, TCGv src)
3318 {
3319 #ifdef TARGET_SPARC64
3320     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3321 
3322     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3323     translator_io_start(&dc->base);
3324     gen_helper_tick_set_count(r_tickptr, src);
3325     /* End TB to handle timer interrupt */
3326     dc->base.is_jmp = DISAS_EXIT;
3327 #else
3328     qemu_build_not_reached();
3329 #endif
3330 }
3331 
3332 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3333 
3334 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3335 {
3336     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3337 
3338     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3339     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3340     translator_io_start(&dc->base);
3341     gen_helper_tick_set_limit(r_tickptr, src);
3342     /* End TB to handle timer interrupt */
3343     dc->base.is_jmp = DISAS_EXIT;
3344 }
3345 
3346 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3347 
3348 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3349 {
3350     finishing_insn(dc);
3351     save_state(dc);
3352     gen_helper_power_down(tcg_env);
3353 }
3354 
3355 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3356 
3357 static void do_wrmwait(DisasContext *dc, TCGv src)
3358 {
3359     /*
3360      * TODO: This is a stub version of mwait, which merely recognizes
3361      * interrupts immediately and does not wait.
3362      */
3363     dc->base.is_jmp = DISAS_EXIT;
3364 }
3365 
3366 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3367 
3368 static void do_wrpsr(DisasContext *dc, TCGv src)
3369 {
3370     gen_helper_wrpsr(tcg_env, src);
3371     dc->base.is_jmp = DISAS_EXIT;
3372 }
3373 
3374 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3375 
3376 static void do_wrwim(DisasContext *dc, TCGv src)
3377 {
3378     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3379     TCGv tmp = tcg_temp_new();
3380 
3381     tcg_gen_andi_tl(tmp, src, mask);
3382     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3383 }
3384 
3385 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3386 
3387 static void do_wrtpc(DisasContext *dc, TCGv src)
3388 {
3389 #ifdef TARGET_SPARC64
3390     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3391 
3392     gen_load_trap_state_at_tl(r_tsptr);
3393     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3394 #else
3395     qemu_build_not_reached();
3396 #endif
3397 }
3398 
3399 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3400 
3401 static void do_wrtnpc(DisasContext *dc, TCGv src)
3402 {
3403 #ifdef TARGET_SPARC64
3404     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3405 
3406     gen_load_trap_state_at_tl(r_tsptr);
3407     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3408 #else
3409     qemu_build_not_reached();
3410 #endif
3411 }
3412 
3413 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3414 
3415 static void do_wrtstate(DisasContext *dc, TCGv src)
3416 {
3417 #ifdef TARGET_SPARC64
3418     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3419 
3420     gen_load_trap_state_at_tl(r_tsptr);
3421     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3422 #else
3423     qemu_build_not_reached();
3424 #endif
3425 }
3426 
3427 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3428 
3429 static void do_wrtt(DisasContext *dc, TCGv src)
3430 {
3431 #ifdef TARGET_SPARC64
3432     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3433 
3434     gen_load_trap_state_at_tl(r_tsptr);
3435     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3436 #else
3437     qemu_build_not_reached();
3438 #endif
3439 }
3440 
3441 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3442 
3443 static void do_wrtick(DisasContext *dc, TCGv src)
3444 {
3445     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3446 
3447     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3448     translator_io_start(&dc->base);
3449     gen_helper_tick_set_count(r_tickptr, src);
3450     /* End TB to handle timer interrupt */
3451     dc->base.is_jmp = DISAS_EXIT;
3452 }
3453 
3454 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3455 
3456 static void do_wrtba(DisasContext *dc, TCGv src)
3457 {
3458     tcg_gen_mov_tl(cpu_tbr, src);
3459 }
3460 
3461 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3462 
3463 static void do_wrpstate(DisasContext *dc, TCGv src)
3464 {
3465     save_state(dc);
3466     if (translator_io_start(&dc->base)) {
3467         dc->base.is_jmp = DISAS_EXIT;
3468     }
3469     gen_helper_wrpstate(tcg_env, src);
3470     dc->npc = DYNAMIC_PC;
3471 }
3472 
3473 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3474 
3475 static void do_wrtl(DisasContext *dc, TCGv src)
3476 {
3477     save_state(dc);
3478     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3479     dc->npc = DYNAMIC_PC;
3480 }
3481 
3482 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3483 
3484 static void do_wrpil(DisasContext *dc, TCGv src)
3485 {
3486     if (translator_io_start(&dc->base)) {
3487         dc->base.is_jmp = DISAS_EXIT;
3488     }
3489     gen_helper_wrpil(tcg_env, src);
3490 }
3491 
3492 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3493 
3494 static void do_wrcwp(DisasContext *dc, TCGv src)
3495 {
3496     gen_helper_wrcwp(tcg_env, src);
3497 }
3498 
3499 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3500 
3501 static void do_wrcansave(DisasContext *dc, TCGv src)
3502 {
3503     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3504 }
3505 
3506 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3507 
3508 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3509 {
3510     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3511 }
3512 
3513 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3514 
3515 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3516 {
3517     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3518 }
3519 
3520 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3521 
3522 static void do_wrotherwin(DisasContext *dc, TCGv src)
3523 {
3524     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3525 }
3526 
3527 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3528 
3529 static void do_wrwstate(DisasContext *dc, TCGv src)
3530 {
3531     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3532 }
3533 
3534 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3535 
3536 static void do_wrgl(DisasContext *dc, TCGv src)
3537 {
3538     gen_helper_wrgl(tcg_env, src);
3539 }
3540 
3541 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3542 
3543 /* UA2005 strand status */
3544 static void do_wrssr(DisasContext *dc, TCGv src)
3545 {
3546     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3547 }
3548 
3549 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3550 
3551 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3552 
3553 static void do_wrhpstate(DisasContext *dc, TCGv src)
3554 {
3555     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3556     dc->base.is_jmp = DISAS_EXIT;
3557 }
3558 
3559 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3560 
3561 static void do_wrhtstate(DisasContext *dc, TCGv src)
3562 {
3563     TCGv_i32 tl = tcg_temp_new_i32();
3564     TCGv_ptr tp = tcg_temp_new_ptr();
3565 
3566     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3567     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3568     tcg_gen_shli_i32(tl, tl, 3);
3569     tcg_gen_ext_i32_ptr(tp, tl);
3570     tcg_gen_add_ptr(tp, tp, tcg_env);
3571 
3572     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3573 }
3574 
3575 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3576 
3577 static void do_wrhintp(DisasContext *dc, TCGv src)
3578 {
3579     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3580 }
3581 
3582 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3583 
3584 static void do_wrhtba(DisasContext *dc, TCGv src)
3585 {
3586     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3587 }
3588 
3589 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3590 
3591 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3592 {
3593     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3594 
3595     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3596     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3597     translator_io_start(&dc->base);
3598     gen_helper_tick_set_limit(r_tickptr, src);
3599     /* End TB to handle timer interrupt */
3600     dc->base.is_jmp = DISAS_EXIT;
3601 }
3602 
3603 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3604       do_wrhstick_cmpr)
3605 
3606 static bool do_saved_restored(DisasContext *dc, bool saved)
3607 {
3608     if (!supervisor(dc)) {
3609         return raise_priv(dc);
3610     }
3611     if (saved) {
3612         gen_helper_saved(tcg_env);
3613     } else {
3614         gen_helper_restored(tcg_env);
3615     }
3616     return advance_pc(dc);
3617 }
3618 
3619 TRANS(SAVED, 64, do_saved_restored, true)
3620 TRANS(RESTORED, 64, do_saved_restored, false)
3621 
3622 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3623 {
3624     return advance_pc(dc);
3625 }
3626 
3627 /*
3628  * TODO: Need a feature bit for sparcv8.
3629  * In the meantime, treat all 32-bit cpus like sparcv7.
3630  */
3631 TRANS(NOP_v7, 32, trans_NOP, a)
3632 TRANS(NOP_v9, 64, trans_NOP, a)
3633 
3634 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3635                          void (*func)(TCGv, TCGv, TCGv),
3636                          void (*funci)(TCGv, TCGv, target_long),
3637                          bool logic_cc)
3638 {
3639     TCGv dst, src1;
3640 
3641     /* For simplicity, we under-decoded the rs2 form. */
3642     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3643         return false;
3644     }
3645 
3646     if (logic_cc) {
3647         dst = cpu_cc_N;
3648     } else {
3649         dst = gen_dest_gpr(dc, a->rd);
3650     }
3651     src1 = gen_load_gpr(dc, a->rs1);
3652 
3653     if (a->imm || a->rs2_or_imm == 0) {
3654         if (funci) {
3655             funci(dst, src1, a->rs2_or_imm);
3656         } else {
3657             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3658         }
3659     } else {
3660         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3661     }
3662 
3663     if (logic_cc) {
3664         if (TARGET_LONG_BITS == 64) {
3665             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3666             tcg_gen_movi_tl(cpu_icc_C, 0);
3667         }
3668         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3669         tcg_gen_movi_tl(cpu_cc_C, 0);
3670         tcg_gen_movi_tl(cpu_cc_V, 0);
3671     }
3672 
3673     gen_store_gpr(dc, a->rd, dst);
3674     return advance_pc(dc);
3675 }
3676 
3677 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3678                      void (*func)(TCGv, TCGv, TCGv),
3679                      void (*funci)(TCGv, TCGv, target_long),
3680                      void (*func_cc)(TCGv, TCGv, TCGv))
3681 {
3682     if (a->cc) {
3683         return do_arith_int(dc, a, func_cc, NULL, false);
3684     }
3685     return do_arith_int(dc, a, func, funci, false);
3686 }
3687 
3688 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3689                      void (*func)(TCGv, TCGv, TCGv),
3690                      void (*funci)(TCGv, TCGv, target_long))
3691 {
3692     return do_arith_int(dc, a, func, funci, a->cc);
3693 }
3694 
3695 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3696 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3697 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3698 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3699 
3700 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3701 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3702 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3703 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3704 
3705 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3706 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3707 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3708 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3709 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3710 
3711 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3712 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3713 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3714 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3715 
3716 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3717 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3718 
3719 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3720 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3721 
3722 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3723 {
3724     /* OR with %g0 is the canonical alias for MOV. */
3725     if (!a->cc && a->rs1 == 0) {
3726         if (a->imm || a->rs2_or_imm == 0) {
3727             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3728         } else if (a->rs2_or_imm & ~0x1f) {
3729             /* For simplicity, we under-decoded the rs2 form. */
3730             return false;
3731         } else {
3732             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3733         }
3734         return advance_pc(dc);
3735     }
3736     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3737 }
3738 
3739 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3740 {
3741     TCGv_i64 t1, t2;
3742     TCGv dst;
3743 
3744     if (!avail_DIV(dc)) {
3745         return false;
3746     }
3747     /* For simplicity, we under-decoded the rs2 form. */
3748     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3749         return false;
3750     }
3751 
3752     if (unlikely(a->rs2_or_imm == 0)) {
3753         gen_exception(dc, TT_DIV_ZERO);
3754         return true;
3755     }
3756 
3757     if (a->imm) {
3758         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3759     } else {
3760         TCGLabel *lab;
3761         TCGv_i32 n2;
3762 
3763         finishing_insn(dc);
3764         flush_cond(dc);
3765 
3766         n2 = tcg_temp_new_i32();
3767         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3768 
3769         lab = delay_exception(dc, TT_DIV_ZERO);
3770         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3771 
3772         t2 = tcg_temp_new_i64();
3773 #ifdef TARGET_SPARC64
3774         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3775 #else
3776         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3777 #endif
3778     }
3779 
3780     t1 = tcg_temp_new_i64();
3781     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3782 
3783     tcg_gen_divu_i64(t1, t1, t2);
3784     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3785 
3786     dst = gen_dest_gpr(dc, a->rd);
3787     tcg_gen_trunc_i64_tl(dst, t1);
3788     gen_store_gpr(dc, a->rd, dst);
3789     return advance_pc(dc);
3790 }
3791 
3792 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3793 {
3794     TCGv dst, src1, src2;
3795 
3796     if (!avail_64(dc)) {
3797         return false;
3798     }
3799     /* For simplicity, we under-decoded the rs2 form. */
3800     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3801         return false;
3802     }
3803 
3804     if (unlikely(a->rs2_or_imm == 0)) {
3805         gen_exception(dc, TT_DIV_ZERO);
3806         return true;
3807     }
3808 
3809     if (a->imm) {
3810         src2 = tcg_constant_tl(a->rs2_or_imm);
3811     } else {
3812         TCGLabel *lab;
3813 
3814         finishing_insn(dc);
3815         flush_cond(dc);
3816 
3817         lab = delay_exception(dc, TT_DIV_ZERO);
3818         src2 = cpu_regs[a->rs2_or_imm];
3819         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3820     }
3821 
3822     dst = gen_dest_gpr(dc, a->rd);
3823     src1 = gen_load_gpr(dc, a->rs1);
3824 
3825     tcg_gen_divu_tl(dst, src1, src2);
3826     gen_store_gpr(dc, a->rd, dst);
3827     return advance_pc(dc);
3828 }
3829 
3830 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3831 {
3832     TCGv dst, src1, src2;
3833 
3834     if (!avail_64(dc)) {
3835         return false;
3836     }
3837     /* For simplicity, we under-decoded the rs2 form. */
3838     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3839         return false;
3840     }
3841 
3842     if (unlikely(a->rs2_or_imm == 0)) {
3843         gen_exception(dc, TT_DIV_ZERO);
3844         return true;
3845     }
3846 
3847     dst = gen_dest_gpr(dc, a->rd);
3848     src1 = gen_load_gpr(dc, a->rs1);
3849 
3850     if (a->imm) {
3851         if (unlikely(a->rs2_or_imm == -1)) {
3852             tcg_gen_neg_tl(dst, src1);
3853             gen_store_gpr(dc, a->rd, dst);
3854             return advance_pc(dc);
3855         }
3856         src2 = tcg_constant_tl(a->rs2_or_imm);
3857     } else {
3858         TCGLabel *lab;
3859         TCGv t1, t2;
3860 
3861         finishing_insn(dc);
3862         flush_cond(dc);
3863 
3864         lab = delay_exception(dc, TT_DIV_ZERO);
3865         src2 = cpu_regs[a->rs2_or_imm];
3866         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3867 
3868         /*
3869          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3870          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3871          */
3872         t1 = tcg_temp_new();
3873         t2 = tcg_temp_new();
3874         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3875         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3876         tcg_gen_and_tl(t1, t1, t2);
3877         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3878                            tcg_constant_tl(1), src2);
3879         src2 = t1;
3880     }
3881 
3882     tcg_gen_div_tl(dst, src1, src2);
3883     gen_store_gpr(dc, a->rd, dst);
3884     return advance_pc(dc);
3885 }
3886 
3887 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3888                      int width, bool cc, bool little_endian)
3889 {
3890     TCGv dst, s1, s2, l, r, t, m;
3891     uint64_t amask = address_mask_i(dc, -8);
3892 
3893     dst = gen_dest_gpr(dc, a->rd);
3894     s1 = gen_load_gpr(dc, a->rs1);
3895     s2 = gen_load_gpr(dc, a->rs2);
3896 
3897     if (cc) {
3898         gen_op_subcc(cpu_cc_N, s1, s2);
3899     }
3900 
3901     l = tcg_temp_new();
3902     r = tcg_temp_new();
3903     t = tcg_temp_new();
3904 
3905     switch (width) {
3906     case 8:
3907         tcg_gen_andi_tl(l, s1, 7);
3908         tcg_gen_andi_tl(r, s2, 7);
3909         tcg_gen_xori_tl(r, r, 7);
3910         m = tcg_constant_tl(0xff);
3911         break;
3912     case 16:
3913         tcg_gen_extract_tl(l, s1, 1, 2);
3914         tcg_gen_extract_tl(r, s2, 1, 2);
3915         tcg_gen_xori_tl(r, r, 3);
3916         m = tcg_constant_tl(0xf);
3917         break;
3918     case 32:
3919         tcg_gen_extract_tl(l, s1, 2, 1);
3920         tcg_gen_extract_tl(r, s2, 2, 1);
3921         tcg_gen_xori_tl(r, r, 1);
3922         m = tcg_constant_tl(0x3);
3923         break;
3924     default:
3925         abort();
3926     }
3927 
3928     /* Compute Left Edge */
3929     if (little_endian) {
3930         tcg_gen_shl_tl(l, m, l);
3931         tcg_gen_and_tl(l, l, m);
3932     } else {
3933         tcg_gen_shr_tl(l, m, l);
3934     }
3935     /* Compute Right Edge */
3936     if (little_endian) {
3937         tcg_gen_shr_tl(r, m, r);
3938     } else {
3939         tcg_gen_shl_tl(r, m, r);
3940         tcg_gen_and_tl(r, r, m);
3941     }
3942 
3943     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3944     tcg_gen_xor_tl(t, s1, s2);
3945     tcg_gen_and_tl(r, r, l);
3946     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3947 
3948     gen_store_gpr(dc, a->rd, dst);
3949     return advance_pc(dc);
3950 }
3951 
3952 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3953 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3954 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3955 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3956 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3957 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3958 
3959 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3960 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3961 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3962 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3963 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3964 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3965 
3966 static bool do_rr(DisasContext *dc, arg_r_r *a,
3967                   void (*func)(TCGv, TCGv))
3968 {
3969     TCGv dst = gen_dest_gpr(dc, a->rd);
3970     TCGv src = gen_load_gpr(dc, a->rs);
3971 
3972     func(dst, src);
3973     gen_store_gpr(dc, a->rd, dst);
3974     return advance_pc(dc);
3975 }
3976 
3977 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3978 
3979 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3980                    void (*func)(TCGv, TCGv, TCGv))
3981 {
3982     TCGv dst = gen_dest_gpr(dc, a->rd);
3983     TCGv src1 = gen_load_gpr(dc, a->rs1);
3984     TCGv src2 = gen_load_gpr(dc, a->rs2);
3985 
3986     func(dst, src1, src2);
3987     gen_store_gpr(dc, a->rd, dst);
3988     return advance_pc(dc);
3989 }
3990 
3991 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3992 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3993 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3994 
3995 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3996 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3997 
3998 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
3999 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4000 
4001 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4002 
4003 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4004 {
4005 #ifdef TARGET_SPARC64
4006     TCGv tmp = tcg_temp_new();
4007 
4008     tcg_gen_add_tl(tmp, s1, s2);
4009     tcg_gen_andi_tl(dst, tmp, -8);
4010     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4011 #else
4012     g_assert_not_reached();
4013 #endif
4014 }
4015 
4016 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4017 {
4018 #ifdef TARGET_SPARC64
4019     TCGv tmp = tcg_temp_new();
4020 
4021     tcg_gen_add_tl(tmp, s1, s2);
4022     tcg_gen_andi_tl(dst, tmp, -8);
4023     tcg_gen_neg_tl(tmp, tmp);
4024     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4025 #else
4026     g_assert_not_reached();
4027 #endif
4028 }
4029 
4030 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4031 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4032 
4033 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4034 {
4035 #ifdef TARGET_SPARC64
4036     tcg_gen_add_tl(dst, s1, s2);
4037     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4038 #else
4039     g_assert_not_reached();
4040 #endif
4041 }
4042 
4043 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4044 
4045 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4046 {
4047     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4048     return true;
4049 }
4050 
4051 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4052 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4053 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4054 
4055 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4056 {
4057     TCGv dst, src1, src2;
4058 
4059     /* Reject 64-bit shifts for sparc32. */
4060     if (avail_32(dc) && a->x) {
4061         return false;
4062     }
4063 
4064     src2 = tcg_temp_new();
4065     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4066     src1 = gen_load_gpr(dc, a->rs1);
4067     dst = gen_dest_gpr(dc, a->rd);
4068 
4069     if (l) {
4070         tcg_gen_shl_tl(dst, src1, src2);
4071         if (!a->x) {
4072             tcg_gen_ext32u_tl(dst, dst);
4073         }
4074     } else if (u) {
4075         if (!a->x) {
4076             tcg_gen_ext32u_tl(dst, src1);
4077             src1 = dst;
4078         }
4079         tcg_gen_shr_tl(dst, src1, src2);
4080     } else {
4081         if (!a->x) {
4082             tcg_gen_ext32s_tl(dst, src1);
4083             src1 = dst;
4084         }
4085         tcg_gen_sar_tl(dst, src1, src2);
4086     }
4087     gen_store_gpr(dc, a->rd, dst);
4088     return advance_pc(dc);
4089 }
4090 
4091 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4092 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4093 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4094 
4095 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4096 {
4097     TCGv dst, src1;
4098 
4099     /* Reject 64-bit shifts for sparc32. */
4100     if (avail_32(dc) && (a->x || a->i >= 32)) {
4101         return false;
4102     }
4103 
4104     src1 = gen_load_gpr(dc, a->rs1);
4105     dst = gen_dest_gpr(dc, a->rd);
4106 
4107     if (avail_32(dc) || a->x) {
4108         if (l) {
4109             tcg_gen_shli_tl(dst, src1, a->i);
4110         } else if (u) {
4111             tcg_gen_shri_tl(dst, src1, a->i);
4112         } else {
4113             tcg_gen_sari_tl(dst, src1, a->i);
4114         }
4115     } else {
4116         if (l) {
4117             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4118         } else if (u) {
4119             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4120         } else {
4121             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4122         }
4123     }
4124     gen_store_gpr(dc, a->rd, dst);
4125     return advance_pc(dc);
4126 }
4127 
4128 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4129 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4130 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4131 
4132 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4133 {
4134     /* For simplicity, we under-decoded the rs2 form. */
4135     if (!imm && rs2_or_imm & ~0x1f) {
4136         return NULL;
4137     }
4138     if (imm || rs2_or_imm == 0) {
4139         return tcg_constant_tl(rs2_or_imm);
4140     } else {
4141         return cpu_regs[rs2_or_imm];
4142     }
4143 }
4144 
4145 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4146 {
4147     TCGv dst = gen_load_gpr(dc, rd);
4148     TCGv c2 = tcg_constant_tl(cmp->c2);
4149 
4150     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4151     gen_store_gpr(dc, rd, dst);
4152     return advance_pc(dc);
4153 }
4154 
4155 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4156 {
4157     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4158     DisasCompare cmp;
4159 
4160     if (src2 == NULL) {
4161         return false;
4162     }
4163     gen_compare(&cmp, a->cc, a->cond, dc);
4164     return do_mov_cond(dc, &cmp, a->rd, src2);
4165 }
4166 
4167 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4168 {
4169     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4170     DisasCompare cmp;
4171 
4172     if (src2 == NULL) {
4173         return false;
4174     }
4175     gen_fcompare(&cmp, a->cc, a->cond);
4176     return do_mov_cond(dc, &cmp, a->rd, src2);
4177 }
4178 
4179 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4180 {
4181     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4182     DisasCompare cmp;
4183 
4184     if (src2 == NULL) {
4185         return false;
4186     }
4187     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4188         return false;
4189     }
4190     return do_mov_cond(dc, &cmp, a->rd, src2);
4191 }
4192 
4193 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4194                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4195 {
4196     TCGv src1, sum;
4197 
4198     /* For simplicity, we under-decoded the rs2 form. */
4199     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4200         return false;
4201     }
4202 
4203     /*
4204      * Always load the sum into a new temporary.
4205      * This is required to capture the value across a window change,
4206      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4207      */
4208     sum = tcg_temp_new();
4209     src1 = gen_load_gpr(dc, a->rs1);
4210     if (a->imm || a->rs2_or_imm == 0) {
4211         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4212     } else {
4213         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4214     }
4215     return func(dc, a->rd, sum);
4216 }
4217 
4218 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4219 {
4220     /*
4221      * Preserve pc across advance, so that we can delay
4222      * the writeback to rd until after src is consumed.
4223      */
4224     target_ulong cur_pc = dc->pc;
4225 
4226     gen_check_align(dc, src, 3);
4227 
4228     gen_mov_pc_npc(dc);
4229     tcg_gen_mov_tl(cpu_npc, src);
4230     gen_address_mask(dc, cpu_npc);
4231     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4232 
4233     dc->npc = DYNAMIC_PC_LOOKUP;
4234     return true;
4235 }
4236 
4237 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4238 
4239 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4240 {
4241     if (!supervisor(dc)) {
4242         return raise_priv(dc);
4243     }
4244 
4245     gen_check_align(dc, src, 3);
4246 
4247     gen_mov_pc_npc(dc);
4248     tcg_gen_mov_tl(cpu_npc, src);
4249     gen_helper_rett(tcg_env);
4250 
4251     dc->npc = DYNAMIC_PC;
4252     return true;
4253 }
4254 
4255 TRANS(RETT, 32, do_add_special, a, do_rett)
4256 
4257 static bool do_return(DisasContext *dc, int rd, TCGv src)
4258 {
4259     gen_check_align(dc, src, 3);
4260     gen_helper_restore(tcg_env);
4261 
4262     gen_mov_pc_npc(dc);
4263     tcg_gen_mov_tl(cpu_npc, src);
4264     gen_address_mask(dc, cpu_npc);
4265 
4266     dc->npc = DYNAMIC_PC_LOOKUP;
4267     return true;
4268 }
4269 
4270 TRANS(RETURN, 64, do_add_special, a, do_return)
4271 
4272 static bool do_save(DisasContext *dc, int rd, TCGv src)
4273 {
4274     gen_helper_save(tcg_env);
4275     gen_store_gpr(dc, rd, src);
4276     return advance_pc(dc);
4277 }
4278 
4279 TRANS(SAVE, ALL, do_add_special, a, do_save)
4280 
4281 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4282 {
4283     gen_helper_restore(tcg_env);
4284     gen_store_gpr(dc, rd, src);
4285     return advance_pc(dc);
4286 }
4287 
4288 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4289 
4290 static bool do_done_retry(DisasContext *dc, bool done)
4291 {
4292     if (!supervisor(dc)) {
4293         return raise_priv(dc);
4294     }
4295     dc->npc = DYNAMIC_PC;
4296     dc->pc = DYNAMIC_PC;
4297     translator_io_start(&dc->base);
4298     if (done) {
4299         gen_helper_done(tcg_env);
4300     } else {
4301         gen_helper_retry(tcg_env);
4302     }
4303     return true;
4304 }
4305 
4306 TRANS(DONE, 64, do_done_retry, true)
4307 TRANS(RETRY, 64, do_done_retry, false)
4308 
4309 /*
4310  * Major opcode 11 -- load and store instructions
4311  */
4312 
4313 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4314 {
4315     TCGv addr, tmp = NULL;
4316 
4317     /* For simplicity, we under-decoded the rs2 form. */
4318     if (!imm && rs2_or_imm & ~0x1f) {
4319         return NULL;
4320     }
4321 
4322     addr = gen_load_gpr(dc, rs1);
4323     if (rs2_or_imm) {
4324         tmp = tcg_temp_new();
4325         if (imm) {
4326             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4327         } else {
4328             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4329         }
4330         addr = tmp;
4331     }
4332     if (AM_CHECK(dc)) {
4333         if (!tmp) {
4334             tmp = tcg_temp_new();
4335         }
4336         tcg_gen_ext32u_tl(tmp, addr);
4337         addr = tmp;
4338     }
4339     return addr;
4340 }
4341 
4342 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4343 {
4344     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4345     DisasASI da;
4346 
4347     if (addr == NULL) {
4348         return false;
4349     }
4350     da = resolve_asi(dc, a->asi, mop);
4351 
4352     reg = gen_dest_gpr(dc, a->rd);
4353     gen_ld_asi(dc, &da, reg, addr);
4354     gen_store_gpr(dc, a->rd, reg);
4355     return advance_pc(dc);
4356 }
4357 
4358 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4359 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4360 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4361 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4362 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4363 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4364 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4365 
4366 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4367 {
4368     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4369     DisasASI da;
4370 
4371     if (addr == NULL) {
4372         return false;
4373     }
4374     da = resolve_asi(dc, a->asi, mop);
4375 
4376     reg = gen_load_gpr(dc, a->rd);
4377     gen_st_asi(dc, &da, reg, addr);
4378     return advance_pc(dc);
4379 }
4380 
4381 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4382 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4383 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4384 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4385 
4386 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4387 {
4388     TCGv addr;
4389     DisasASI da;
4390 
4391     if (a->rd & 1) {
4392         return false;
4393     }
4394     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4395     if (addr == NULL) {
4396         return false;
4397     }
4398     da = resolve_asi(dc, a->asi, MO_TEUQ);
4399     gen_ldda_asi(dc, &da, addr, a->rd);
4400     return advance_pc(dc);
4401 }
4402 
4403 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4404 {
4405     TCGv addr;
4406     DisasASI da;
4407 
4408     if (a->rd & 1) {
4409         return false;
4410     }
4411     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4412     if (addr == NULL) {
4413         return false;
4414     }
4415     da = resolve_asi(dc, a->asi, MO_TEUQ);
4416     gen_stda_asi(dc, &da, addr, a->rd);
4417     return advance_pc(dc);
4418 }
4419 
4420 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4421 {
4422     TCGv addr, reg;
4423     DisasASI da;
4424 
4425     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4426     if (addr == NULL) {
4427         return false;
4428     }
4429     da = resolve_asi(dc, a->asi, MO_UB);
4430 
4431     reg = gen_dest_gpr(dc, a->rd);
4432     gen_ldstub_asi(dc, &da, reg, addr);
4433     gen_store_gpr(dc, a->rd, reg);
4434     return advance_pc(dc);
4435 }
4436 
4437 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4438 {
4439     TCGv addr, dst, src;
4440     DisasASI da;
4441 
4442     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4443     if (addr == NULL) {
4444         return false;
4445     }
4446     da = resolve_asi(dc, a->asi, MO_TEUL);
4447 
4448     dst = gen_dest_gpr(dc, a->rd);
4449     src = gen_load_gpr(dc, a->rd);
4450     gen_swap_asi(dc, &da, dst, src, addr);
4451     gen_store_gpr(dc, a->rd, dst);
4452     return advance_pc(dc);
4453 }
4454 
4455 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4456 {
4457     TCGv addr, o, n, c;
4458     DisasASI da;
4459 
4460     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4461     if (addr == NULL) {
4462         return false;
4463     }
4464     da = resolve_asi(dc, a->asi, mop);
4465 
4466     o = gen_dest_gpr(dc, a->rd);
4467     n = gen_load_gpr(dc, a->rd);
4468     c = gen_load_gpr(dc, a->rs2_or_imm);
4469     gen_cas_asi(dc, &da, o, n, c, addr);
4470     gen_store_gpr(dc, a->rd, o);
4471     return advance_pc(dc);
4472 }
4473 
4474 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4475 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4476 
4477 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4478 {
4479     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4480     DisasASI da;
4481 
4482     if (addr == NULL) {
4483         return false;
4484     }
4485     if (gen_trap_ifnofpu(dc)) {
4486         return true;
4487     }
4488     if (sz == MO_128 && gen_trap_float128(dc)) {
4489         return true;
4490     }
4491     da = resolve_asi(dc, a->asi, MO_TE | sz);
4492     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4493     gen_update_fprs_dirty(dc, a->rd);
4494     return advance_pc(dc);
4495 }
4496 
4497 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4498 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4499 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4500 
4501 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4502 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4503 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4504 
4505 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4506 {
4507     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4508     DisasASI da;
4509 
4510     if (addr == NULL) {
4511         return false;
4512     }
4513     if (gen_trap_ifnofpu(dc)) {
4514         return true;
4515     }
4516     if (sz == MO_128 && gen_trap_float128(dc)) {
4517         return true;
4518     }
4519     da = resolve_asi(dc, a->asi, MO_TE | sz);
4520     gen_stf_asi(dc, &da, sz, addr, a->rd);
4521     return advance_pc(dc);
4522 }
4523 
4524 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4525 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4526 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4527 
4528 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4529 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4530 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4531 
4532 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4533 {
4534     if (!avail_32(dc)) {
4535         return false;
4536     }
4537     if (!supervisor(dc)) {
4538         return raise_priv(dc);
4539     }
4540     if (gen_trap_ifnofpu(dc)) {
4541         return true;
4542     }
4543     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4544     return true;
4545 }
4546 
4547 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4548 {
4549     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4550     TCGv_i32 tmp;
4551 
4552     if (addr == NULL) {
4553         return false;
4554     }
4555     if (gen_trap_ifnofpu(dc)) {
4556         return true;
4557     }
4558 
4559     tmp = tcg_temp_new_i32();
4560     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4561 
4562     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4563     /* LDFSR does not change FCC[1-3]. */
4564 
4565     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4566     return advance_pc(dc);
4567 }
4568 
4569 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4570 {
4571 #ifdef TARGET_SPARC64
4572     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4573     TCGv_i64 t64;
4574     TCGv_i32 lo, hi;
4575 
4576     if (addr == NULL) {
4577         return false;
4578     }
4579     if (gen_trap_ifnofpu(dc)) {
4580         return true;
4581     }
4582 
4583     t64 = tcg_temp_new_i64();
4584     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4585 
4586     lo = tcg_temp_new_i32();
4587     hi = cpu_fcc[3];
4588     tcg_gen_extr_i64_i32(lo, hi, t64);
4589     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4590     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4591     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4592     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4593 
4594     if (entire) {
4595         gen_helper_set_fsr_nofcc(tcg_env, lo);
4596     } else {
4597         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4598     }
4599     return advance_pc(dc);
4600 #else
4601     return false;
4602 #endif
4603 }
4604 
4605 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4606 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4607 
4608 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4609 {
4610     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4611     TCGv fsr;
4612 
4613     if (addr == NULL) {
4614         return false;
4615     }
4616     if (gen_trap_ifnofpu(dc)) {
4617         return true;
4618     }
4619 
4620     fsr = tcg_temp_new();
4621     gen_helper_get_fsr(fsr, tcg_env);
4622     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4623     return advance_pc(dc);
4624 }
4625 
4626 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4627 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4628 
4629 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4630 {
4631     if (gen_trap_ifnofpu(dc)) {
4632         return true;
4633     }
4634     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4635     return advance_pc(dc);
4636 }
4637 
4638 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4639 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4640 
4641 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4642 {
4643     if (gen_trap_ifnofpu(dc)) {
4644         return true;
4645     }
4646     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4647     return advance_pc(dc);
4648 }
4649 
4650 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4651 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4652 
4653 static bool do_ff(DisasContext *dc, arg_r_r *a,
4654                   void (*func)(TCGv_i32, TCGv_i32))
4655 {
4656     TCGv_i32 tmp;
4657 
4658     if (gen_trap_ifnofpu(dc)) {
4659         return true;
4660     }
4661 
4662     tmp = gen_load_fpr_F(dc, a->rs);
4663     func(tmp, tmp);
4664     gen_store_fpr_F(dc, a->rd, tmp);
4665     return advance_pc(dc);
4666 }
4667 
4668 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4669 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4670 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4671 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4672 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4673 
4674 static bool do_fd(DisasContext *dc, arg_r_r *a,
4675                   void (*func)(TCGv_i32, TCGv_i64))
4676 {
4677     TCGv_i32 dst;
4678     TCGv_i64 src;
4679 
4680     if (gen_trap_ifnofpu(dc)) {
4681         return true;
4682     }
4683 
4684     dst = tcg_temp_new_i32();
4685     src = gen_load_fpr_D(dc, a->rs);
4686     func(dst, src);
4687     gen_store_fpr_F(dc, a->rd, dst);
4688     return advance_pc(dc);
4689 }
4690 
4691 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4692 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4693 
4694 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4695                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4696 {
4697     TCGv_i32 tmp;
4698 
4699     if (gen_trap_ifnofpu(dc)) {
4700         return true;
4701     }
4702 
4703     tmp = gen_load_fpr_F(dc, a->rs);
4704     func(tmp, tcg_env, tmp);
4705     gen_store_fpr_F(dc, a->rd, tmp);
4706     return advance_pc(dc);
4707 }
4708 
4709 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4710 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4711 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4712 
4713 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4714                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4715 {
4716     TCGv_i32 dst;
4717     TCGv_i64 src;
4718 
4719     if (gen_trap_ifnofpu(dc)) {
4720         return true;
4721     }
4722 
4723     dst = tcg_temp_new_i32();
4724     src = gen_load_fpr_D(dc, a->rs);
4725     func(dst, tcg_env, src);
4726     gen_store_fpr_F(dc, a->rd, dst);
4727     return advance_pc(dc);
4728 }
4729 
4730 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4731 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4732 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4733 
4734 static bool do_dd(DisasContext *dc, arg_r_r *a,
4735                   void (*func)(TCGv_i64, TCGv_i64))
4736 {
4737     TCGv_i64 dst, src;
4738 
4739     if (gen_trap_ifnofpu(dc)) {
4740         return true;
4741     }
4742 
4743     dst = tcg_temp_new_i64();
4744     src = gen_load_fpr_D(dc, a->rs);
4745     func(dst, src);
4746     gen_store_fpr_D(dc, a->rd, dst);
4747     return advance_pc(dc);
4748 }
4749 
4750 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4751 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4752 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4753 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4754 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4755 
4756 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4757                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4758 {
4759     TCGv_i64 dst, src;
4760 
4761     if (gen_trap_ifnofpu(dc)) {
4762         return true;
4763     }
4764 
4765     dst = tcg_temp_new_i64();
4766     src = gen_load_fpr_D(dc, a->rs);
4767     func(dst, tcg_env, src);
4768     gen_store_fpr_D(dc, a->rd, dst);
4769     return advance_pc(dc);
4770 }
4771 
4772 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4773 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4774 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4775 
4776 static bool do_df(DisasContext *dc, arg_r_r *a,
4777                   void (*func)(TCGv_i64, TCGv_i32))
4778 {
4779     TCGv_i64 dst;
4780     TCGv_i32 src;
4781 
4782     if (gen_trap_ifnofpu(dc)) {
4783         return true;
4784     }
4785 
4786     dst = tcg_temp_new_i64();
4787     src = gen_load_fpr_F(dc, a->rs);
4788     func(dst, src);
4789     gen_store_fpr_D(dc, a->rd, dst);
4790     return advance_pc(dc);
4791 }
4792 
4793 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4794 
4795 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4796                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4797 {
4798     TCGv_i64 dst;
4799     TCGv_i32 src;
4800 
4801     if (gen_trap_ifnofpu(dc)) {
4802         return true;
4803     }
4804 
4805     dst = tcg_temp_new_i64();
4806     src = gen_load_fpr_F(dc, a->rs);
4807     func(dst, tcg_env, src);
4808     gen_store_fpr_D(dc, a->rd, dst);
4809     return advance_pc(dc);
4810 }
4811 
4812 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4813 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4814 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4815 
4816 static bool do_qq(DisasContext *dc, arg_r_r *a,
4817                   void (*func)(TCGv_i128, TCGv_i128))
4818 {
4819     TCGv_i128 t;
4820 
4821     if (gen_trap_ifnofpu(dc)) {
4822         return true;
4823     }
4824     if (gen_trap_float128(dc)) {
4825         return true;
4826     }
4827 
4828     gen_op_clear_ieee_excp_and_FTT();
4829     t = gen_load_fpr_Q(dc, a->rs);
4830     func(t, t);
4831     gen_store_fpr_Q(dc, a->rd, t);
4832     return advance_pc(dc);
4833 }
4834 
4835 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4836 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4837 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4838 
4839 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4840                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4841 {
4842     TCGv_i128 t;
4843 
4844     if (gen_trap_ifnofpu(dc)) {
4845         return true;
4846     }
4847     if (gen_trap_float128(dc)) {
4848         return true;
4849     }
4850 
4851     t = gen_load_fpr_Q(dc, a->rs);
4852     func(t, tcg_env, t);
4853     gen_store_fpr_Q(dc, a->rd, t);
4854     return advance_pc(dc);
4855 }
4856 
4857 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4858 
4859 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4860                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4861 {
4862     TCGv_i128 src;
4863     TCGv_i32 dst;
4864 
4865     if (gen_trap_ifnofpu(dc)) {
4866         return true;
4867     }
4868     if (gen_trap_float128(dc)) {
4869         return true;
4870     }
4871 
4872     src = gen_load_fpr_Q(dc, a->rs);
4873     dst = tcg_temp_new_i32();
4874     func(dst, tcg_env, src);
4875     gen_store_fpr_F(dc, a->rd, dst);
4876     return advance_pc(dc);
4877 }
4878 
4879 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4880 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4881 
4882 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4883                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4884 {
4885     TCGv_i128 src;
4886     TCGv_i64 dst;
4887 
4888     if (gen_trap_ifnofpu(dc)) {
4889         return true;
4890     }
4891     if (gen_trap_float128(dc)) {
4892         return true;
4893     }
4894 
4895     src = gen_load_fpr_Q(dc, a->rs);
4896     dst = tcg_temp_new_i64();
4897     func(dst, tcg_env, src);
4898     gen_store_fpr_D(dc, a->rd, dst);
4899     return advance_pc(dc);
4900 }
4901 
4902 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4903 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4904 
4905 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4906                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4907 {
4908     TCGv_i32 src;
4909     TCGv_i128 dst;
4910 
4911     if (gen_trap_ifnofpu(dc)) {
4912         return true;
4913     }
4914     if (gen_trap_float128(dc)) {
4915         return true;
4916     }
4917 
4918     src = gen_load_fpr_F(dc, a->rs);
4919     dst = tcg_temp_new_i128();
4920     func(dst, tcg_env, src);
4921     gen_store_fpr_Q(dc, a->rd, dst);
4922     return advance_pc(dc);
4923 }
4924 
4925 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4926 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4927 
4928 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4929                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4930 {
4931     TCGv_i64 src;
4932     TCGv_i128 dst;
4933 
4934     if (gen_trap_ifnofpu(dc)) {
4935         return true;
4936     }
4937     if (gen_trap_float128(dc)) {
4938         return true;
4939     }
4940 
4941     src = gen_load_fpr_D(dc, a->rs);
4942     dst = tcg_temp_new_i128();
4943     func(dst, tcg_env, src);
4944     gen_store_fpr_Q(dc, a->rd, dst);
4945     return advance_pc(dc);
4946 }
4947 
4948 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4949 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4950 
4951 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4952                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4953 {
4954     TCGv_i32 src1, src2;
4955 
4956     if (gen_trap_ifnofpu(dc)) {
4957         return true;
4958     }
4959 
4960     src1 = gen_load_fpr_F(dc, a->rs1);
4961     src2 = gen_load_fpr_F(dc, a->rs2);
4962     func(src1, src1, src2);
4963     gen_store_fpr_F(dc, a->rd, src1);
4964     return advance_pc(dc);
4965 }
4966 
4967 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4968 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4969 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4970 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4971 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4972 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4973 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4974 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4975 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4976 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4977 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4978 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4979 
4980 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4981 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4982 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4983 
4984 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4985 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4986 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4987 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4988 
4989 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4990                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4991 {
4992     TCGv_i32 src1, src2;
4993 
4994     if (gen_trap_ifnofpu(dc)) {
4995         return true;
4996     }
4997 
4998     src1 = gen_load_fpr_F(dc, a->rs1);
4999     src2 = gen_load_fpr_F(dc, a->rs2);
5000     func(src1, tcg_env, src1, src2);
5001     gen_store_fpr_F(dc, a->rd, src1);
5002     return advance_pc(dc);
5003 }
5004 
5005 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5006 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5007 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5008 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5009 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5010 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5011 
5012 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5013                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5014 {
5015     TCGv_i64 dst;
5016     TCGv_i32 src1, src2;
5017 
5018     if (gen_trap_ifnofpu(dc)) {
5019         return true;
5020     }
5021 
5022     dst = tcg_temp_new_i64();
5023     src1 = gen_load_fpr_F(dc, a->rs1);
5024     src2 = gen_load_fpr_F(dc, a->rs2);
5025     func(dst, src1, src2);
5026     gen_store_fpr_D(dc, a->rd, dst);
5027     return advance_pc(dc);
5028 }
5029 
5030 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5031 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5032 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5033 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5034 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5035 
5036 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5037                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5038 {
5039     TCGv_i64 dst, src2;
5040     TCGv_i32 src1;
5041 
5042     if (gen_trap_ifnofpu(dc)) {
5043         return true;
5044     }
5045 
5046     dst = tcg_temp_new_i64();
5047     src1 = gen_load_fpr_F(dc, a->rs1);
5048     src2 = gen_load_fpr_D(dc, a->rs2);
5049     func(dst, src1, src2);
5050     gen_store_fpr_D(dc, a->rd, dst);
5051     return advance_pc(dc);
5052 }
5053 
5054 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5055 
5056 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5057                         void (*func)(unsigned, uint32_t, uint32_t,
5058                                      uint32_t, uint32_t, uint32_t))
5059 {
5060     if (gen_trap_ifnofpu(dc)) {
5061         return true;
5062     }
5063 
5064     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5065          gen_offset_fpr_D(a->rs2), 8, 8);
5066     return advance_pc(dc);
5067 }
5068 
5069 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5070 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5071 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5072 
5073 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5074 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5075 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5076 
5077 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5078 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5079 
5080 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5081 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5082 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5083 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5084 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5085 
5086 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5087 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5088 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5089 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5090 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5091 
5092 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5093 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5094 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5095 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5096 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5097 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5098 
5099 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5100 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5101 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5102 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5103 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5104 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5105 
5106 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5107 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5108 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5109 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5110 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5111 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5112 
5113 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5114                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5115 {
5116     TCGv_i64 dst, src1, src2;
5117 
5118     if (gen_trap_ifnofpu(dc)) {
5119         return true;
5120     }
5121 
5122     dst = tcg_temp_new_i64();
5123     src1 = gen_load_fpr_D(dc, a->rs1);
5124     src2 = gen_load_fpr_D(dc, a->rs2);
5125     func(dst, src1, src2);
5126     gen_store_fpr_D(dc, a->rd, dst);
5127     return advance_pc(dc);
5128 }
5129 
5130 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5131 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5132 
5133 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5134 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5135 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5136 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5137 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5138 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5139 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5140 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5141 
5142 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5143 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5144 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5145 
5146 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5147 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5148 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5149 
5150 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5151 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5152 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5153 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5154 
5155 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5156                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5157 {
5158     TCGv_i64 src1, src2;
5159     TCGv dst;
5160 
5161     if (gen_trap_ifnofpu(dc)) {
5162         return true;
5163     }
5164 
5165     dst = gen_dest_gpr(dc, a->rd);
5166     src1 = gen_load_fpr_D(dc, a->rs1);
5167     src2 = gen_load_fpr_D(dc, a->rs2);
5168     func(dst, src1, src2);
5169     gen_store_gpr(dc, a->rd, dst);
5170     return advance_pc(dc);
5171 }
5172 
5173 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5174 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5175 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5176 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5177 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5178 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5179 
5180 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5181 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5182 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5183 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5184 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5185 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5186 
5187 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5188 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5189 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5190 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5191 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5192 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5193 
5194 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5195 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5196 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5197 
5198 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5199                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5200 {
5201     TCGv_i64 dst, src1, src2;
5202 
5203     if (gen_trap_ifnofpu(dc)) {
5204         return true;
5205     }
5206 
5207     dst = tcg_temp_new_i64();
5208     src1 = gen_load_fpr_D(dc, a->rs1);
5209     src2 = gen_load_fpr_D(dc, a->rs2);
5210     func(dst, tcg_env, src1, src2);
5211     gen_store_fpr_D(dc, a->rd, dst);
5212     return advance_pc(dc);
5213 }
5214 
5215 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5216 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5217 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5218 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5219 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5220 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5221 
5222 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5223 {
5224     TCGv_i64 dst;
5225     TCGv_i32 src1, src2;
5226 
5227     if (gen_trap_ifnofpu(dc)) {
5228         return true;
5229     }
5230     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5231         return raise_unimpfpop(dc);
5232     }
5233 
5234     dst = tcg_temp_new_i64();
5235     src1 = gen_load_fpr_F(dc, a->rs1);
5236     src2 = gen_load_fpr_F(dc, a->rs2);
5237     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5238     gen_store_fpr_D(dc, a->rd, dst);
5239     return advance_pc(dc);
5240 }
5241 
5242 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5243 {
5244     TCGv_i64 dst;
5245     TCGv_i32 src1, src2;
5246 
5247     if (!avail_VIS3(dc)) {
5248         return false;
5249     }
5250     if (gen_trap_ifnofpu(dc)) {
5251         return true;
5252     }
5253     dst = tcg_temp_new_i64();
5254     src1 = gen_load_fpr_F(dc, a->rs1);
5255     src2 = gen_load_fpr_F(dc, a->rs2);
5256     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5257     gen_store_fpr_D(dc, a->rd, dst);
5258     return advance_pc(dc);
5259 }
5260 
5261 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5262                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5263 {
5264     TCGv_i32 dst, src1, src2, src3;
5265 
5266     if (gen_trap_ifnofpu(dc)) {
5267         return true;
5268     }
5269 
5270     src1 = gen_load_fpr_F(dc, a->rs1);
5271     src2 = gen_load_fpr_F(dc, a->rs2);
5272     src3 = gen_load_fpr_F(dc, a->rs3);
5273     dst = tcg_temp_new_i32();
5274     func(dst, src1, src2, src3);
5275     gen_store_fpr_F(dc, a->rd, dst);
5276     return advance_pc(dc);
5277 }
5278 
5279 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5280 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5281 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5282 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5283 
5284 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5285                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5286 {
5287     TCGv_i64 dst, src1, src2, src3;
5288 
5289     if (gen_trap_ifnofpu(dc)) {
5290         return true;
5291     }
5292 
5293     dst  = tcg_temp_new_i64();
5294     src1 = gen_load_fpr_D(dc, a->rs1);
5295     src2 = gen_load_fpr_D(dc, a->rs2);
5296     src3 = gen_load_fpr_D(dc, a->rs3);
5297     func(dst, src1, src2, src3);
5298     gen_store_fpr_D(dc, a->rd, dst);
5299     return advance_pc(dc);
5300 }
5301 
5302 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5303 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5304 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5305 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5306 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5307 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5308 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5309 
5310 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5311 {
5312     TCGv_i64 dst, src1, src2;
5313     TCGv src3;
5314 
5315     if (!avail_VIS4(dc)) {
5316         return false;
5317     }
5318     if (gen_trap_ifnofpu(dc)) {
5319         return true;
5320     }
5321 
5322     dst  = tcg_temp_new_i64();
5323     src1 = gen_load_fpr_D(dc, a->rd);
5324     src2 = gen_load_fpr_D(dc, a->rs2);
5325     src3 = gen_load_gpr(dc, a->rs1);
5326     gen_op_faligndata_i(dst, src1, src2, src3);
5327     gen_store_fpr_D(dc, a->rd, dst);
5328     return advance_pc(dc);
5329 }
5330 
5331 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5332                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5333 {
5334     TCGv_i128 src1, src2;
5335 
5336     if (gen_trap_ifnofpu(dc)) {
5337         return true;
5338     }
5339     if (gen_trap_float128(dc)) {
5340         return true;
5341     }
5342 
5343     src1 = gen_load_fpr_Q(dc, a->rs1);
5344     src2 = gen_load_fpr_Q(dc, a->rs2);
5345     func(src1, tcg_env, src1, src2);
5346     gen_store_fpr_Q(dc, a->rd, src1);
5347     return advance_pc(dc);
5348 }
5349 
5350 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5351 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5352 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5353 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5354 
5355 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5356 {
5357     TCGv_i64 src1, src2;
5358     TCGv_i128 dst;
5359 
5360     if (gen_trap_ifnofpu(dc)) {
5361         return true;
5362     }
5363     if (gen_trap_float128(dc)) {
5364         return true;
5365     }
5366 
5367     src1 = gen_load_fpr_D(dc, a->rs1);
5368     src2 = gen_load_fpr_D(dc, a->rs2);
5369     dst = tcg_temp_new_i128();
5370     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5371     gen_store_fpr_Q(dc, a->rd, dst);
5372     return advance_pc(dc);
5373 }
5374 
5375 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5376                      void (*func)(DisasContext *, DisasCompare *, int, int))
5377 {
5378     DisasCompare cmp;
5379 
5380     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5381         return false;
5382     }
5383     if (gen_trap_ifnofpu(dc)) {
5384         return true;
5385     }
5386     if (is_128 && gen_trap_float128(dc)) {
5387         return true;
5388     }
5389 
5390     gen_op_clear_ieee_excp_and_FTT();
5391     func(dc, &cmp, a->rd, a->rs2);
5392     return advance_pc(dc);
5393 }
5394 
5395 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5396 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5397 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5398 
5399 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5400                       void (*func)(DisasContext *, DisasCompare *, int, int))
5401 {
5402     DisasCompare cmp;
5403 
5404     if (gen_trap_ifnofpu(dc)) {
5405         return true;
5406     }
5407     if (is_128 && gen_trap_float128(dc)) {
5408         return true;
5409     }
5410 
5411     gen_op_clear_ieee_excp_and_FTT();
5412     gen_compare(&cmp, a->cc, a->cond, dc);
5413     func(dc, &cmp, a->rd, a->rs2);
5414     return advance_pc(dc);
5415 }
5416 
5417 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5418 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5419 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5420 
5421 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5422                        void (*func)(DisasContext *, DisasCompare *, int, int))
5423 {
5424     DisasCompare cmp;
5425 
5426     if (gen_trap_ifnofpu(dc)) {
5427         return true;
5428     }
5429     if (is_128 && gen_trap_float128(dc)) {
5430         return true;
5431     }
5432 
5433     gen_op_clear_ieee_excp_and_FTT();
5434     gen_fcompare(&cmp, a->cc, a->cond);
5435     func(dc, &cmp, a->rd, a->rs2);
5436     return advance_pc(dc);
5437 }
5438 
5439 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5440 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5441 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5442 
5443 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5444 {
5445     TCGv_i32 src1, src2;
5446 
5447     if (avail_32(dc) && a->cc != 0) {
5448         return false;
5449     }
5450     if (gen_trap_ifnofpu(dc)) {
5451         return true;
5452     }
5453 
5454     src1 = gen_load_fpr_F(dc, a->rs1);
5455     src2 = gen_load_fpr_F(dc, a->rs2);
5456     if (e) {
5457         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5458     } else {
5459         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5460     }
5461     return advance_pc(dc);
5462 }
5463 
5464 TRANS(FCMPs, ALL, do_fcmps, a, false)
5465 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5466 
5467 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5468 {
5469     TCGv_i64 src1, src2;
5470 
5471     if (avail_32(dc) && a->cc != 0) {
5472         return false;
5473     }
5474     if (gen_trap_ifnofpu(dc)) {
5475         return true;
5476     }
5477 
5478     src1 = gen_load_fpr_D(dc, a->rs1);
5479     src2 = gen_load_fpr_D(dc, a->rs2);
5480     if (e) {
5481         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5482     } else {
5483         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5484     }
5485     return advance_pc(dc);
5486 }
5487 
5488 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5489 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5490 
5491 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5492 {
5493     TCGv_i128 src1, src2;
5494 
5495     if (avail_32(dc) && a->cc != 0) {
5496         return false;
5497     }
5498     if (gen_trap_ifnofpu(dc)) {
5499         return true;
5500     }
5501     if (gen_trap_float128(dc)) {
5502         return true;
5503     }
5504 
5505     src1 = gen_load_fpr_Q(dc, a->rs1);
5506     src2 = gen_load_fpr_Q(dc, a->rs2);
5507     if (e) {
5508         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5509     } else {
5510         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5511     }
5512     return advance_pc(dc);
5513 }
5514 
5515 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5516 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5517 
5518 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5519 {
5520     TCGv_i32 src1, src2;
5521 
5522     if (!avail_VIS3(dc)) {
5523         return false;
5524     }
5525     if (gen_trap_ifnofpu(dc)) {
5526         return true;
5527     }
5528 
5529     src1 = gen_load_fpr_F(dc, a->rs1);
5530     src2 = gen_load_fpr_F(dc, a->rs2);
5531     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5532     return advance_pc(dc);
5533 }
5534 
5535 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5536 {
5537     TCGv_i64 src1, src2;
5538 
5539     if (!avail_VIS3(dc)) {
5540         return false;
5541     }
5542     if (gen_trap_ifnofpu(dc)) {
5543         return true;
5544     }
5545 
5546     src1 = gen_load_fpr_D(dc, a->rs1);
5547     src2 = gen_load_fpr_D(dc, a->rs2);
5548     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5549     return advance_pc(dc);
5550 }
5551 
5552 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5553                       int (*offset)(unsigned int),
5554                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5555 {
5556     TCGv dst;
5557 
5558     if (gen_trap_ifnofpu(dc)) {
5559         return true;
5560     }
5561     dst = gen_dest_gpr(dc, a->rd);
5562     load(dst, tcg_env, offset(a->rs));
5563     gen_store_gpr(dc, a->rd, dst);
5564     return advance_pc(dc);
5565 }
5566 
5567 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5568 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5569 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5570 
5571 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5572                       int (*offset)(unsigned int),
5573                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5574 {
5575     TCGv src;
5576 
5577     if (gen_trap_ifnofpu(dc)) {
5578         return true;
5579     }
5580     src = gen_load_gpr(dc, a->rs);
5581     store(src, tcg_env, offset(a->rd));
5582     return advance_pc(dc);
5583 }
5584 
5585 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5586 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5587 
5588 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5589 {
5590     DisasContext *dc = container_of(dcbase, DisasContext, base);
5591     int bound;
5592 
5593     dc->pc = dc->base.pc_first;
5594     dc->npc = (target_ulong)dc->base.tb->cs_base;
5595     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5596     dc->def = &cpu_env(cs)->def;
5597     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5598     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5599 #ifndef CONFIG_USER_ONLY
5600     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5601 # ifdef TARGET_SPARC64
5602     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5603 # else
5604     dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5605 # endif
5606 #endif
5607 #ifdef TARGET_SPARC64
5608     dc->fprs_dirty = 0;
5609     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5610 #endif
5611     /*
5612      * if we reach a page boundary, we stop generation so that the
5613      * PC of a TT_TFAULT exception is always in the right page
5614      */
5615     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5616     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5617 }
5618 
5619 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5620 {
5621 }
5622 
5623 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5624 {
5625     DisasContext *dc = container_of(dcbase, DisasContext, base);
5626     target_ulong npc = dc->npc;
5627 
5628     if (npc & 3) {
5629         switch (npc) {
5630         case JUMP_PC:
5631             assert(dc->jump_pc[1] == dc->pc + 4);
5632             npc = dc->jump_pc[0] | JUMP_PC;
5633             break;
5634         case DYNAMIC_PC:
5635         case DYNAMIC_PC_LOOKUP:
5636             npc = DYNAMIC_PC;
5637             break;
5638         default:
5639             g_assert_not_reached();
5640         }
5641     }
5642     tcg_gen_insn_start(dc->pc, npc);
5643 }
5644 
5645 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5646 {
5647     DisasContext *dc = container_of(dcbase, DisasContext, base);
5648     unsigned int insn;
5649 
5650     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5651     dc->base.pc_next += 4;
5652 
5653     if (!decode(dc, insn)) {
5654         gen_exception(dc, TT_ILL_INSN);
5655     }
5656 
5657     if (dc->base.is_jmp == DISAS_NORETURN) {
5658         return;
5659     }
5660     if (dc->pc != dc->base.pc_next) {
5661         dc->base.is_jmp = DISAS_TOO_MANY;
5662     }
5663 }
5664 
5665 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5666 {
5667     DisasContext *dc = container_of(dcbase, DisasContext, base);
5668     DisasDelayException *e, *e_next;
5669     bool may_lookup;
5670 
5671     finishing_insn(dc);
5672 
5673     switch (dc->base.is_jmp) {
5674     case DISAS_NEXT:
5675     case DISAS_TOO_MANY:
5676         if (((dc->pc | dc->npc) & 3) == 0) {
5677             /* static PC and NPC: we can use direct chaining */
5678             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5679             break;
5680         }
5681 
5682         may_lookup = true;
5683         if (dc->pc & 3) {
5684             switch (dc->pc) {
5685             case DYNAMIC_PC_LOOKUP:
5686                 break;
5687             case DYNAMIC_PC:
5688                 may_lookup = false;
5689                 break;
5690             default:
5691                 g_assert_not_reached();
5692             }
5693         } else {
5694             tcg_gen_movi_tl(cpu_pc, dc->pc);
5695         }
5696 
5697         if (dc->npc & 3) {
5698             switch (dc->npc) {
5699             case JUMP_PC:
5700                 gen_generic_branch(dc);
5701                 break;
5702             case DYNAMIC_PC:
5703                 may_lookup = false;
5704                 break;
5705             case DYNAMIC_PC_LOOKUP:
5706                 break;
5707             default:
5708                 g_assert_not_reached();
5709             }
5710         } else {
5711             tcg_gen_movi_tl(cpu_npc, dc->npc);
5712         }
5713         if (may_lookup) {
5714             tcg_gen_lookup_and_goto_ptr();
5715         } else {
5716             tcg_gen_exit_tb(NULL, 0);
5717         }
5718         break;
5719 
5720     case DISAS_NORETURN:
5721        break;
5722 
5723     case DISAS_EXIT:
5724         /* Exit TB */
5725         save_state(dc);
5726         tcg_gen_exit_tb(NULL, 0);
5727         break;
5728 
5729     default:
5730         g_assert_not_reached();
5731     }
5732 
5733     for (e = dc->delay_excp_list; e ; e = e_next) {
5734         gen_set_label(e->lab);
5735 
5736         tcg_gen_movi_tl(cpu_pc, e->pc);
5737         if (e->npc % 4 == 0) {
5738             tcg_gen_movi_tl(cpu_npc, e->npc);
5739         }
5740         gen_helper_raise_exception(tcg_env, e->excp);
5741 
5742         e_next = e->next;
5743         g_free(e);
5744     }
5745 }
5746 
5747 static const TranslatorOps sparc_tr_ops = {
5748     .init_disas_context = sparc_tr_init_disas_context,
5749     .tb_start           = sparc_tr_tb_start,
5750     .insn_start         = sparc_tr_insn_start,
5751     .translate_insn     = sparc_tr_translate_insn,
5752     .tb_stop            = sparc_tr_tb_stop,
5753 };
5754 
5755 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5756                            vaddr pc, void *host_pc)
5757 {
5758     DisasContext dc = {};
5759 
5760     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5761 }
5762 
5763 void sparc_tcg_init(void)
5764 {
5765     static const char gregnames[32][4] = {
5766         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5767         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5768         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5769         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5770     };
5771 
5772     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5773 #ifdef TARGET_SPARC64
5774         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5775         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5776         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5777         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5778         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5779 #else
5780         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5781 #endif
5782     };
5783 
5784     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5785 #ifdef TARGET_SPARC64
5786         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5787         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5788         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5789 #endif
5790         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5791         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5792         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5793         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5794         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5795         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5796         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5797         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5798         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5799     };
5800 
5801     unsigned int i;
5802 
5803     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5804                                          offsetof(CPUSPARCState, regwptr),
5805                                          "regwptr");
5806 
5807     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5808         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5809     }
5810 
5811     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5812         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5813     }
5814 
5815     cpu_regs[0] = NULL;
5816     for (i = 1; i < 8; ++i) {
5817         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5818                                          offsetof(CPUSPARCState, gregs[i]),
5819                                          gregnames[i]);
5820     }
5821 
5822     for (i = 8; i < 32; ++i) {
5823         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5824                                          (i - 8) * sizeof(target_ulong),
5825                                          gregnames[i]);
5826     }
5827 }
5828 
5829 void sparc_restore_state_to_opc(CPUState *cs,
5830                                 const TranslationBlock *tb,
5831                                 const uint64_t *data)
5832 {
5833     CPUSPARCState *env = cpu_env(cs);
5834     target_ulong pc = data[0];
5835     target_ulong npc = data[1];
5836 
5837     env->pc = pc;
5838     if (npc == DYNAMIC_PC) {
5839         /* dynamic NPC: already stored */
5840     } else if (npc & JUMP_PC) {
5841         /* jump PC: use 'cond' and the jump targets of the translation */
5842         if (env->cond) {
5843             env->npc = npc & ~3;
5844         } else {
5845             env->npc = pc + 4;
5846         }
5847     } else {
5848         env->npc = npc;
5849     }
5850 }
5851