xref: /qemu/target/sparc/translate.c (revision 02326733)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
50 # define gen_helper_restored(E)                 qemu_build_not_reached()
51 # define gen_helper_retry(E)                    qemu_build_not_reached()
52 # define gen_helper_saved(E)                    qemu_build_not_reached()
53 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
54 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
55 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
56 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
57 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
58 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
60 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
61 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
63 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
87 # define MAXTL_MASK                             0
88 #endif
89 
90 /* Dynamic PC, must exit to main loop. */
91 #define DYNAMIC_PC         1
92 /* Dynamic PC, one of two values according to jump_pc[T2]. */
93 #define JUMP_PC            2
94 /* Dynamic PC, may lookup next TB. */
95 #define DYNAMIC_PC_LOOKUP  3
96 
97 #define DISAS_EXIT  DISAS_TARGET_0
98 
99 /* global register indexes */
100 static TCGv_ptr cpu_regwptr;
101 static TCGv cpu_pc, cpu_npc;
102 static TCGv cpu_regs[32];
103 static TCGv cpu_y;
104 static TCGv cpu_tbr;
105 static TCGv cpu_cond;
106 static TCGv cpu_cc_N;
107 static TCGv cpu_cc_V;
108 static TCGv cpu_icc_Z;
109 static TCGv cpu_icc_C;
110 #ifdef TARGET_SPARC64
111 static TCGv cpu_xcc_Z;
112 static TCGv cpu_xcc_C;
113 static TCGv_i32 cpu_fprs;
114 static TCGv cpu_gsr;
115 #else
116 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
117 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
118 #endif
119 
120 #ifdef TARGET_SPARC64
121 #define cpu_cc_Z  cpu_xcc_Z
122 #define cpu_cc_C  cpu_xcc_C
123 #else
124 #define cpu_cc_Z  cpu_icc_Z
125 #define cpu_cc_C  cpu_icc_C
126 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
127 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
128 #endif
129 
130 /* Floating point registers */
131 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
132 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
133 
134 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X)  env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X)  env_field_offsetof(X)
140 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
141 #endif
142 
143 typedef struct DisasCompare {
144     TCGCond cond;
145     TCGv c1;
146     int c2;
147 } DisasCompare;
148 
149 typedef struct DisasDelayException {
150     struct DisasDelayException *next;
151     TCGLabel *lab;
152     TCGv_i32 excp;
153     /* Saved state at parent insn. */
154     target_ulong pc;
155     target_ulong npc;
156 } DisasDelayException;
157 
158 typedef struct DisasContext {
159     DisasContextBase base;
160     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
161     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162 
163     /* Used when JUMP_PC value is used. */
164     DisasCompare jump;
165     target_ulong jump_pc[2];
166 
167     int mem_idx;
168     bool cpu_cond_live;
169     bool fpu_enabled;
170     bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172     bool supervisor;
173 #ifdef TARGET_SPARC64
174     bool hypervisor;
175 #endif
176 #endif
177 
178     sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180     int fprs_dirty;
181     int asi;
182 #endif
183     DisasDelayException *delay_excp_list;
184 } DisasContext;
185 
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO)                                  \
188     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189 
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO)               \
192     GET_FIELD(X, 31 - (TO), 31 - (FROM))
193 
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196 
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204 
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207 
208 #define IS_IMM (insn & (1<<13))
209 
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213     int bit = (rd < 32) ? 1 : 2;
214     /* If we know we've already set this bit within the TB,
215        we can avoid setting it again.  */
216     if (!(dc->fprs_dirty & bit)) {
217         dc->fprs_dirty |= bit;
218         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219     }
220 #endif
221 }
222 
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226     TCGv_i32 ret = tcg_temp_new_i32();
227     if (src & 1) {
228         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229     } else {
230         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231     }
232     return ret;
233 }
234 
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237     TCGv_i64 t = tcg_temp_new_i64();
238 
239     tcg_gen_extu_i32_i64(t, v);
240     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241                         (dst & 1 ? 0 : 32), 32);
242     gen_update_fprs_dirty(dc, dst);
243 }
244 
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
246 {
247     src = DFPREG(src);
248     return cpu_fpr[src / 2];
249 }
250 
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
252 {
253     dst = DFPREG(dst);
254     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255     gen_update_fprs_dirty(dc, dst);
256 }
257 
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
259 {
260     return cpu_fpr[DFPREG(dst) / 2];
261 }
262 
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265     TCGv_i128 ret = tcg_temp_new_i128();
266 
267     src = QFPREG(src);
268     tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269     return ret;
270 }
271 
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
273 {
274     dst = DFPREG(dst);
275     tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276     gen_update_fprs_dirty(dc, dst);
277 }
278 
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
283 #else
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
287 #else
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
290 #endif
291 #endif
292 
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc)  false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc)  true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc)  false
299 #else
300 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
301 #endif
302 
303 static void gen_address_mask(DisasContext *dc, TCGv addr)
304 {
305     if (AM_CHECK(dc)) {
306         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
307     }
308 }
309 
310 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
311 {
312     return AM_CHECK(dc) ? (uint32_t)addr : addr;
313 }
314 
315 static TCGv gen_load_gpr(DisasContext *dc, int reg)
316 {
317     if (reg > 0) {
318         assert(reg < 32);
319         return cpu_regs[reg];
320     } else {
321         TCGv t = tcg_temp_new();
322         tcg_gen_movi_tl(t, 0);
323         return t;
324     }
325 }
326 
327 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
328 {
329     if (reg > 0) {
330         assert(reg < 32);
331         tcg_gen_mov_tl(cpu_regs[reg], v);
332     }
333 }
334 
335 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
336 {
337     if (reg > 0) {
338         assert(reg < 32);
339         return cpu_regs[reg];
340     } else {
341         return tcg_temp_new();
342     }
343 }
344 
345 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
346 {
347     return translator_use_goto_tb(&s->base, pc) &&
348            translator_use_goto_tb(&s->base, npc);
349 }
350 
351 static void gen_goto_tb(DisasContext *s, int tb_num,
352                         target_ulong pc, target_ulong npc)
353 {
354     if (use_goto_tb(s, pc, npc))  {
355         /* jump to same page: we can use a direct jump */
356         tcg_gen_goto_tb(tb_num);
357         tcg_gen_movi_tl(cpu_pc, pc);
358         tcg_gen_movi_tl(cpu_npc, npc);
359         tcg_gen_exit_tb(s->base.tb, tb_num);
360     } else {
361         /* jump to another page: we can use an indirect jump */
362         tcg_gen_movi_tl(cpu_pc, pc);
363         tcg_gen_movi_tl(cpu_npc, npc);
364         tcg_gen_lookup_and_goto_ptr();
365     }
366 }
367 
368 static TCGv gen_carry32(void)
369 {
370     if (TARGET_LONG_BITS == 64) {
371         TCGv t = tcg_temp_new();
372         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
373         return t;
374     }
375     return cpu_icc_C;
376 }
377 
378 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
379 {
380     TCGv z = tcg_constant_tl(0);
381 
382     if (cin) {
383         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
384         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
385     } else {
386         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
387     }
388     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
389     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
390     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
391     if (TARGET_LONG_BITS == 64) {
392         /*
393          * Carry-in to bit 32 is result ^ src1 ^ src2.
394          * We already have the src xor term in Z, from computation of V.
395          */
396         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
397         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
398     }
399     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
400     tcg_gen_mov_tl(dst, cpu_cc_N);
401 }
402 
403 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
404 {
405     gen_op_addcc_int(dst, src1, src2, NULL);
406 }
407 
408 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
409 {
410     TCGv t = tcg_temp_new();
411 
412     /* Save the tag bits around modification of dst. */
413     tcg_gen_or_tl(t, src1, src2);
414 
415     gen_op_addcc(dst, src1, src2);
416 
417     /* Incorprate tag bits into icc.V */
418     tcg_gen_andi_tl(t, t, 3);
419     tcg_gen_neg_tl(t, t);
420     tcg_gen_ext32u_tl(t, t);
421     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
422 }
423 
424 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
425 {
426     tcg_gen_add_tl(dst, src1, src2);
427     tcg_gen_add_tl(dst, dst, gen_carry32());
428 }
429 
430 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
431 {
432     gen_op_addcc_int(dst, src1, src2, gen_carry32());
433 }
434 
435 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
436 {
437     TCGv z = tcg_constant_tl(0);
438 
439     if (cin) {
440         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
441         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
442     } else {
443         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
444     }
445     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
446     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
447     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
448     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
449 #ifdef TARGET_SPARC64
450     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
451     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
452 #endif
453     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
454     tcg_gen_mov_tl(dst, cpu_cc_N);
455 }
456 
457 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
458 {
459     gen_op_subcc_int(dst, src1, src2, NULL);
460 }
461 
462 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
463 {
464     TCGv t = tcg_temp_new();
465 
466     /* Save the tag bits around modification of dst. */
467     tcg_gen_or_tl(t, src1, src2);
468 
469     gen_op_subcc(dst, src1, src2);
470 
471     /* Incorprate tag bits into icc.V */
472     tcg_gen_andi_tl(t, t, 3);
473     tcg_gen_neg_tl(t, t);
474     tcg_gen_ext32u_tl(t, t);
475     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
476 }
477 
478 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
479 {
480     tcg_gen_sub_tl(dst, src1, src2);
481     tcg_gen_sub_tl(dst, dst, gen_carry32());
482 }
483 
484 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
485 {
486     gen_op_subcc_int(dst, src1, src2, gen_carry32());
487 }
488 
489 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
490 {
491     TCGv zero = tcg_constant_tl(0);
492     TCGv one = tcg_constant_tl(1);
493     TCGv t_src1 = tcg_temp_new();
494     TCGv t_src2 = tcg_temp_new();
495     TCGv t0 = tcg_temp_new();
496 
497     tcg_gen_ext32u_tl(t_src1, src1);
498     tcg_gen_ext32u_tl(t_src2, src2);
499 
500     /*
501      * if (!(env->y & 1))
502      *   src2 = 0;
503      */
504     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
505 
506     /*
507      * b2 = src1 & 1;
508      * y = (b2 << 31) | (y >> 1);
509      */
510     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
511     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
512 
513     // b1 = N ^ V;
514     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
515 
516     /*
517      * src1 = (b1 << 31) | (src1 >> 1)
518      */
519     tcg_gen_andi_tl(t0, t0, 1u << 31);
520     tcg_gen_shri_tl(t_src1, t_src1, 1);
521     tcg_gen_or_tl(t_src1, t_src1, t0);
522 
523     gen_op_addcc(dst, t_src1, t_src2);
524 }
525 
526 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
527 {
528 #if TARGET_LONG_BITS == 32
529     if (sign_ext) {
530         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
531     } else {
532         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
533     }
534 #else
535     TCGv t0 = tcg_temp_new_i64();
536     TCGv t1 = tcg_temp_new_i64();
537 
538     if (sign_ext) {
539         tcg_gen_ext32s_i64(t0, src1);
540         tcg_gen_ext32s_i64(t1, src2);
541     } else {
542         tcg_gen_ext32u_i64(t0, src1);
543         tcg_gen_ext32u_i64(t1, src2);
544     }
545 
546     tcg_gen_mul_i64(dst, t0, t1);
547     tcg_gen_shri_i64(cpu_y, dst, 32);
548 #endif
549 }
550 
551 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
552 {
553     /* zero-extend truncated operands before multiplication */
554     gen_op_multiply(dst, src1, src2, 0);
555 }
556 
557 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
558 {
559     /* sign-extend truncated operands before multiplication */
560     gen_op_multiply(dst, src1, src2, 1);
561 }
562 
563 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
564 {
565 #ifdef TARGET_SPARC64
566     gen_helper_sdiv(dst, tcg_env, src1, src2);
567     tcg_gen_ext32s_tl(dst, dst);
568 #else
569     TCGv_i64 t64 = tcg_temp_new_i64();
570     gen_helper_sdiv(t64, tcg_env, src1, src2);
571     tcg_gen_trunc_i64_tl(dst, t64);
572 #endif
573 }
574 
575 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
576 {
577     TCGv_i64 t64;
578 
579 #ifdef TARGET_SPARC64
580     t64 = cpu_cc_V;
581 #else
582     t64 = tcg_temp_new_i64();
583 #endif
584 
585     gen_helper_udiv(t64, tcg_env, src1, src2);
586 
587 #ifdef TARGET_SPARC64
588     tcg_gen_ext32u_tl(cpu_cc_N, t64);
589     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
590     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
591     tcg_gen_movi_tl(cpu_icc_C, 0);
592 #else
593     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
594 #endif
595     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
596     tcg_gen_movi_tl(cpu_cc_C, 0);
597     tcg_gen_mov_tl(dst, cpu_cc_N);
598 }
599 
600 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
601 {
602     TCGv_i64 t64;
603 
604 #ifdef TARGET_SPARC64
605     t64 = cpu_cc_V;
606 #else
607     t64 = tcg_temp_new_i64();
608 #endif
609 
610     gen_helper_sdiv(t64, tcg_env, src1, src2);
611 
612 #ifdef TARGET_SPARC64
613     tcg_gen_ext32s_tl(cpu_cc_N, t64);
614     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
615     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
616     tcg_gen_movi_tl(cpu_icc_C, 0);
617 #else
618     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
619 #endif
620     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
621     tcg_gen_movi_tl(cpu_cc_C, 0);
622     tcg_gen_mov_tl(dst, cpu_cc_N);
623 }
624 
625 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
626 {
627     gen_helper_taddcctv(dst, tcg_env, src1, src2);
628 }
629 
630 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
631 {
632     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
633 }
634 
635 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
636 {
637     tcg_gen_ctpop_tl(dst, src2);
638 }
639 
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
642 {
643     g_assert_not_reached();
644 }
645 #endif
646 
647 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
648 {
649     gen_helper_array8(dst, src1, src2);
650     tcg_gen_shli_tl(dst, dst, 1);
651 }
652 
653 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
654 {
655     gen_helper_array8(dst, src1, src2);
656     tcg_gen_shli_tl(dst, dst, 2);
657 }
658 
659 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
660 {
661 #ifdef TARGET_SPARC64
662     gen_helper_fpack16(dst, cpu_gsr, src);
663 #else
664     g_assert_not_reached();
665 #endif
666 }
667 
668 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
669 {
670 #ifdef TARGET_SPARC64
671     gen_helper_fpackfix(dst, cpu_gsr, src);
672 #else
673     g_assert_not_reached();
674 #endif
675 }
676 
677 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
678 {
679 #ifdef TARGET_SPARC64
680     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
681 #else
682     g_assert_not_reached();
683 #endif
684 }
685 
686 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
687 {
688 #ifdef TARGET_SPARC64
689     TCGv t1, t2, shift;
690 
691     t1 = tcg_temp_new();
692     t2 = tcg_temp_new();
693     shift = tcg_temp_new();
694 
695     tcg_gen_andi_tl(shift, cpu_gsr, 7);
696     tcg_gen_shli_tl(shift, shift, 3);
697     tcg_gen_shl_tl(t1, s1, shift);
698 
699     /*
700      * A shift of 64 does not produce 0 in TCG.  Divide this into a
701      * shift of (up to 63) followed by a constant shift of 1.
702      */
703     tcg_gen_xori_tl(shift, shift, 63);
704     tcg_gen_shr_tl(t2, s2, shift);
705     tcg_gen_shri_tl(t2, t2, 1);
706 
707     tcg_gen_or_tl(dst, t1, t2);
708 #else
709     g_assert_not_reached();
710 #endif
711 }
712 
713 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
714 {
715 #ifdef TARGET_SPARC64
716     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
717 #else
718     g_assert_not_reached();
719 #endif
720 }
721 
722 static void finishing_insn(DisasContext *dc)
723 {
724     /*
725      * From here, there is no future path through an unwinding exception.
726      * If the current insn cannot raise an exception, the computation of
727      * cpu_cond may be able to be elided.
728      */
729     if (dc->cpu_cond_live) {
730         tcg_gen_discard_tl(cpu_cond);
731         dc->cpu_cond_live = false;
732     }
733 }
734 
735 static void gen_generic_branch(DisasContext *dc)
736 {
737     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
738     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
739     TCGv c2 = tcg_constant_tl(dc->jump.c2);
740 
741     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
742 }
743 
744 /* call this function before using the condition register as it may
745    have been set for a jump */
746 static void flush_cond(DisasContext *dc)
747 {
748     if (dc->npc == JUMP_PC) {
749         gen_generic_branch(dc);
750         dc->npc = DYNAMIC_PC_LOOKUP;
751     }
752 }
753 
754 static void save_npc(DisasContext *dc)
755 {
756     if (dc->npc & 3) {
757         switch (dc->npc) {
758         case JUMP_PC:
759             gen_generic_branch(dc);
760             dc->npc = DYNAMIC_PC_LOOKUP;
761             break;
762         case DYNAMIC_PC:
763         case DYNAMIC_PC_LOOKUP:
764             break;
765         default:
766             g_assert_not_reached();
767         }
768     } else {
769         tcg_gen_movi_tl(cpu_npc, dc->npc);
770     }
771 }
772 
773 static void save_state(DisasContext *dc)
774 {
775     tcg_gen_movi_tl(cpu_pc, dc->pc);
776     save_npc(dc);
777 }
778 
779 static void gen_exception(DisasContext *dc, int which)
780 {
781     finishing_insn(dc);
782     save_state(dc);
783     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
784     dc->base.is_jmp = DISAS_NORETURN;
785 }
786 
787 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
788 {
789     DisasDelayException *e = g_new0(DisasDelayException, 1);
790 
791     e->next = dc->delay_excp_list;
792     dc->delay_excp_list = e;
793 
794     e->lab = gen_new_label();
795     e->excp = excp;
796     e->pc = dc->pc;
797     /* Caller must have used flush_cond before branch. */
798     assert(e->npc != JUMP_PC);
799     e->npc = dc->npc;
800 
801     return e->lab;
802 }
803 
804 static TCGLabel *delay_exception(DisasContext *dc, int excp)
805 {
806     return delay_exceptionv(dc, tcg_constant_i32(excp));
807 }
808 
809 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
810 {
811     TCGv t = tcg_temp_new();
812     TCGLabel *lab;
813 
814     tcg_gen_andi_tl(t, addr, mask);
815 
816     flush_cond(dc);
817     lab = delay_exception(dc, TT_UNALIGNED);
818     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
819 }
820 
821 static void gen_mov_pc_npc(DisasContext *dc)
822 {
823     finishing_insn(dc);
824 
825     if (dc->npc & 3) {
826         switch (dc->npc) {
827         case JUMP_PC:
828             gen_generic_branch(dc);
829             tcg_gen_mov_tl(cpu_pc, cpu_npc);
830             dc->pc = DYNAMIC_PC_LOOKUP;
831             break;
832         case DYNAMIC_PC:
833         case DYNAMIC_PC_LOOKUP:
834             tcg_gen_mov_tl(cpu_pc, cpu_npc);
835             dc->pc = dc->npc;
836             break;
837         default:
838             g_assert_not_reached();
839         }
840     } else {
841         dc->pc = dc->npc;
842     }
843 }
844 
845 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
846                         DisasContext *dc)
847 {
848     TCGv t1;
849 
850     cmp->c1 = t1 = tcg_temp_new();
851     cmp->c2 = 0;
852 
853     switch (cond & 7) {
854     case 0x0: /* never */
855         cmp->cond = TCG_COND_NEVER;
856         cmp->c1 = tcg_constant_tl(0);
857         break;
858 
859     case 0x1: /* eq: Z */
860         cmp->cond = TCG_COND_EQ;
861         if (TARGET_LONG_BITS == 32 || xcc) {
862             tcg_gen_mov_tl(t1, cpu_cc_Z);
863         } else {
864             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
865         }
866         break;
867 
868     case 0x2: /* le: Z | (N ^ V) */
869         /*
870          * Simplify:
871          *   cc_Z || (N ^ V) < 0        NE
872          *   cc_Z && !((N ^ V) < 0)     EQ
873          *   cc_Z & ~((N ^ V) >> TLB)   EQ
874          */
875         cmp->cond = TCG_COND_EQ;
876         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
877         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
878         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
879         if (TARGET_LONG_BITS == 64 && !xcc) {
880             tcg_gen_ext32u_tl(t1, t1);
881         }
882         break;
883 
884     case 0x3: /* lt: N ^ V */
885         cmp->cond = TCG_COND_LT;
886         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
887         if (TARGET_LONG_BITS == 64 && !xcc) {
888             tcg_gen_ext32s_tl(t1, t1);
889         }
890         break;
891 
892     case 0x4: /* leu: Z | C */
893         /*
894          * Simplify:
895          *   cc_Z == 0 || cc_C != 0     NE
896          *   cc_Z != 0 && cc_C == 0     EQ
897          *   cc_Z & (cc_C ? 0 : -1)     EQ
898          *   cc_Z & (cc_C - 1)          EQ
899          */
900         cmp->cond = TCG_COND_EQ;
901         if (TARGET_LONG_BITS == 32 || xcc) {
902             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
903             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
904         } else {
905             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
906             tcg_gen_subi_tl(t1, t1, 1);
907             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
908             tcg_gen_ext32u_tl(t1, t1);
909         }
910         break;
911 
912     case 0x5: /* ltu: C */
913         cmp->cond = TCG_COND_NE;
914         if (TARGET_LONG_BITS == 32 || xcc) {
915             tcg_gen_mov_tl(t1, cpu_cc_C);
916         } else {
917             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
918         }
919         break;
920 
921     case 0x6: /* neg: N */
922         cmp->cond = TCG_COND_LT;
923         if (TARGET_LONG_BITS == 32 || xcc) {
924             tcg_gen_mov_tl(t1, cpu_cc_N);
925         } else {
926             tcg_gen_ext32s_tl(t1, cpu_cc_N);
927         }
928         break;
929 
930     case 0x7: /* vs: V */
931         cmp->cond = TCG_COND_LT;
932         if (TARGET_LONG_BITS == 32 || xcc) {
933             tcg_gen_mov_tl(t1, cpu_cc_V);
934         } else {
935             tcg_gen_ext32s_tl(t1, cpu_cc_V);
936         }
937         break;
938     }
939     if (cond & 8) {
940         cmp->cond = tcg_invert_cond(cmp->cond);
941     }
942 }
943 
944 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
945 {
946     TCGv_i32 fcc = cpu_fcc[cc];
947     TCGv_i32 c1 = fcc;
948     int c2 = 0;
949     TCGCond tcond;
950 
951     /*
952      * FCC values:
953      * 0 =
954      * 1 <
955      * 2 >
956      * 3 unordered
957      */
958     switch (cond & 7) {
959     case 0x0: /* fbn */
960         tcond = TCG_COND_NEVER;
961         break;
962     case 0x1: /* fbne : !0 */
963         tcond = TCG_COND_NE;
964         break;
965     case 0x2: /* fblg : 1 or 2 */
966         /* fcc in {1,2} - 1 -> fcc in {0,1} */
967         c1 = tcg_temp_new_i32();
968         tcg_gen_addi_i32(c1, fcc, -1);
969         c2 = 1;
970         tcond = TCG_COND_LEU;
971         break;
972     case 0x3: /* fbul : 1 or 3 */
973         c1 = tcg_temp_new_i32();
974         tcg_gen_andi_i32(c1, fcc, 1);
975         tcond = TCG_COND_NE;
976         break;
977     case 0x4: /* fbl  : 1 */
978         c2 = 1;
979         tcond = TCG_COND_EQ;
980         break;
981     case 0x5: /* fbug : 2 or 3 */
982         c2 = 2;
983         tcond = TCG_COND_GEU;
984         break;
985     case 0x6: /* fbg  : 2 */
986         c2 = 2;
987         tcond = TCG_COND_EQ;
988         break;
989     case 0x7: /* fbu  : 3 */
990         c2 = 3;
991         tcond = TCG_COND_EQ;
992         break;
993     }
994     if (cond & 8) {
995         tcond = tcg_invert_cond(tcond);
996     }
997 
998     cmp->cond = tcond;
999     cmp->c2 = c2;
1000     cmp->c1 = tcg_temp_new();
1001     tcg_gen_extu_i32_tl(cmp->c1, c1);
1002 }
1003 
1004 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1005 {
1006     static const TCGCond cond_reg[4] = {
1007         TCG_COND_NEVER,  /* reserved */
1008         TCG_COND_EQ,
1009         TCG_COND_LE,
1010         TCG_COND_LT,
1011     };
1012     TCGCond tcond;
1013 
1014     if ((cond & 3) == 0) {
1015         return false;
1016     }
1017     tcond = cond_reg[cond & 3];
1018     if (cond & 4) {
1019         tcond = tcg_invert_cond(tcond);
1020     }
1021 
1022     cmp->cond = tcond;
1023     cmp->c1 = tcg_temp_new();
1024     cmp->c2 = 0;
1025     tcg_gen_mov_tl(cmp->c1, r_src);
1026     return true;
1027 }
1028 
1029 static void gen_op_clear_ieee_excp_and_FTT(void)
1030 {
1031     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1032                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1033 }
1034 
1035 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1036 {
1037     gen_op_clear_ieee_excp_and_FTT();
1038     tcg_gen_mov_i32(dst, src);
1039 }
1040 
1041 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1042 {
1043     gen_op_clear_ieee_excp_and_FTT();
1044     tcg_gen_xori_i32(dst, src, 1u << 31);
1045 }
1046 
1047 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1048 {
1049     gen_op_clear_ieee_excp_and_FTT();
1050     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1051 }
1052 
1053 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1054 {
1055     gen_op_clear_ieee_excp_and_FTT();
1056     tcg_gen_mov_i64(dst, src);
1057 }
1058 
1059 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1060 {
1061     gen_op_clear_ieee_excp_and_FTT();
1062     tcg_gen_xori_i64(dst, src, 1ull << 63);
1063 }
1064 
1065 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1066 {
1067     gen_op_clear_ieee_excp_and_FTT();
1068     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1069 }
1070 
1071 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1072 {
1073     TCGv_i64 l = tcg_temp_new_i64();
1074     TCGv_i64 h = tcg_temp_new_i64();
1075 
1076     tcg_gen_extr_i128_i64(l, h, src);
1077     tcg_gen_xori_i64(h, h, 1ull << 63);
1078     tcg_gen_concat_i64_i128(dst, l, h);
1079 }
1080 
1081 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1082 {
1083     TCGv_i64 l = tcg_temp_new_i64();
1084     TCGv_i64 h = tcg_temp_new_i64();
1085 
1086     tcg_gen_extr_i128_i64(l, h, src);
1087     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1088     tcg_gen_concat_i64_i128(dst, l, h);
1089 }
1090 
1091 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1092 {
1093     /*
1094      * CEXC is only set when succesfully completing an FPop,
1095      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1096      * Thus we can simply store FTT into this field.
1097      */
1098     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1099                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1100     gen_exception(dc, TT_FP_EXCP);
1101 }
1102 
1103 static int gen_trap_ifnofpu(DisasContext *dc)
1104 {
1105 #if !defined(CONFIG_USER_ONLY)
1106     if (!dc->fpu_enabled) {
1107         gen_exception(dc, TT_NFPU_INSN);
1108         return 1;
1109     }
1110 #endif
1111     return 0;
1112 }
1113 
1114 /* asi moves */
1115 typedef enum {
1116     GET_ASI_HELPER,
1117     GET_ASI_EXCP,
1118     GET_ASI_DIRECT,
1119     GET_ASI_DTWINX,
1120     GET_ASI_CODE,
1121     GET_ASI_BLOCK,
1122     GET_ASI_SHORT,
1123     GET_ASI_BCOPY,
1124     GET_ASI_BFILL,
1125 } ASIType;
1126 
1127 typedef struct {
1128     ASIType type;
1129     int asi;
1130     int mem_idx;
1131     MemOp memop;
1132 } DisasASI;
1133 
1134 /*
1135  * Build DisasASI.
1136  * For asi == -1, treat as non-asi.
1137  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1138  */
1139 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1140 {
1141     ASIType type = GET_ASI_HELPER;
1142     int mem_idx = dc->mem_idx;
1143 
1144     if (asi == -1) {
1145         /* Artificial "non-asi" case. */
1146         type = GET_ASI_DIRECT;
1147         goto done;
1148     }
1149 
1150 #ifndef TARGET_SPARC64
1151     /* Before v9, all asis are immediate and privileged.  */
1152     if (asi < 0) {
1153         gen_exception(dc, TT_ILL_INSN);
1154         type = GET_ASI_EXCP;
1155     } else if (supervisor(dc)
1156                /* Note that LEON accepts ASI_USERDATA in user mode, for
1157                   use with CASA.  Also note that previous versions of
1158                   QEMU allowed (and old versions of gcc emitted) ASI_P
1159                   for LEON, which is incorrect.  */
1160                || (asi == ASI_USERDATA
1161                    && (dc->def->features & CPU_FEATURE_CASA))) {
1162         switch (asi) {
1163         case ASI_USERDATA:    /* User data access */
1164             mem_idx = MMU_USER_IDX;
1165             type = GET_ASI_DIRECT;
1166             break;
1167         case ASI_KERNELDATA:  /* Supervisor data access */
1168             mem_idx = MMU_KERNEL_IDX;
1169             type = GET_ASI_DIRECT;
1170             break;
1171         case ASI_USERTXT:     /* User text access */
1172             mem_idx = MMU_USER_IDX;
1173             type = GET_ASI_CODE;
1174             break;
1175         case ASI_KERNELTXT:   /* Supervisor text access */
1176             mem_idx = MMU_KERNEL_IDX;
1177             type = GET_ASI_CODE;
1178             break;
1179         case ASI_M_BYPASS:    /* MMU passthrough */
1180         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1181             mem_idx = MMU_PHYS_IDX;
1182             type = GET_ASI_DIRECT;
1183             break;
1184         case ASI_M_BCOPY: /* Block copy, sta access */
1185             mem_idx = MMU_KERNEL_IDX;
1186             type = GET_ASI_BCOPY;
1187             break;
1188         case ASI_M_BFILL: /* Block fill, stda access */
1189             mem_idx = MMU_KERNEL_IDX;
1190             type = GET_ASI_BFILL;
1191             break;
1192         }
1193 
1194         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1195          * permissions check in get_physical_address(..).
1196          */
1197         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1198     } else {
1199         gen_exception(dc, TT_PRIV_INSN);
1200         type = GET_ASI_EXCP;
1201     }
1202 #else
1203     if (asi < 0) {
1204         asi = dc->asi;
1205     }
1206     /* With v9, all asis below 0x80 are privileged.  */
1207     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1208        down that bit into DisasContext.  For the moment that's ok,
1209        since the direct implementations below doesn't have any ASIs
1210        in the restricted [0x30, 0x7f] range, and the check will be
1211        done properly in the helper.  */
1212     if (!supervisor(dc) && asi < 0x80) {
1213         gen_exception(dc, TT_PRIV_ACT);
1214         type = GET_ASI_EXCP;
1215     } else {
1216         switch (asi) {
1217         case ASI_REAL:      /* Bypass */
1218         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1219         case ASI_REAL_L:    /* Bypass LE */
1220         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1221         case ASI_TWINX_REAL:   /* Real address, twinx */
1222         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1223         case ASI_QUAD_LDD_PHYS:
1224         case ASI_QUAD_LDD_PHYS_L:
1225             mem_idx = MMU_PHYS_IDX;
1226             break;
1227         case ASI_N:  /* Nucleus */
1228         case ASI_NL: /* Nucleus LE */
1229         case ASI_TWINX_N:
1230         case ASI_TWINX_NL:
1231         case ASI_NUCLEUS_QUAD_LDD:
1232         case ASI_NUCLEUS_QUAD_LDD_L:
1233             if (hypervisor(dc)) {
1234                 mem_idx = MMU_PHYS_IDX;
1235             } else {
1236                 mem_idx = MMU_NUCLEUS_IDX;
1237             }
1238             break;
1239         case ASI_AIUP:  /* As if user primary */
1240         case ASI_AIUPL: /* As if user primary LE */
1241         case ASI_TWINX_AIUP:
1242         case ASI_TWINX_AIUP_L:
1243         case ASI_BLK_AIUP_4V:
1244         case ASI_BLK_AIUP_L_4V:
1245         case ASI_BLK_AIUP:
1246         case ASI_BLK_AIUPL:
1247             mem_idx = MMU_USER_IDX;
1248             break;
1249         case ASI_AIUS:  /* As if user secondary */
1250         case ASI_AIUSL: /* As if user secondary LE */
1251         case ASI_TWINX_AIUS:
1252         case ASI_TWINX_AIUS_L:
1253         case ASI_BLK_AIUS_4V:
1254         case ASI_BLK_AIUS_L_4V:
1255         case ASI_BLK_AIUS:
1256         case ASI_BLK_AIUSL:
1257             mem_idx = MMU_USER_SECONDARY_IDX;
1258             break;
1259         case ASI_S:  /* Secondary */
1260         case ASI_SL: /* Secondary LE */
1261         case ASI_TWINX_S:
1262         case ASI_TWINX_SL:
1263         case ASI_BLK_COMMIT_S:
1264         case ASI_BLK_S:
1265         case ASI_BLK_SL:
1266         case ASI_FL8_S:
1267         case ASI_FL8_SL:
1268         case ASI_FL16_S:
1269         case ASI_FL16_SL:
1270             if (mem_idx == MMU_USER_IDX) {
1271                 mem_idx = MMU_USER_SECONDARY_IDX;
1272             } else if (mem_idx == MMU_KERNEL_IDX) {
1273                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1274             }
1275             break;
1276         case ASI_P:  /* Primary */
1277         case ASI_PL: /* Primary LE */
1278         case ASI_TWINX_P:
1279         case ASI_TWINX_PL:
1280         case ASI_BLK_COMMIT_P:
1281         case ASI_BLK_P:
1282         case ASI_BLK_PL:
1283         case ASI_FL8_P:
1284         case ASI_FL8_PL:
1285         case ASI_FL16_P:
1286         case ASI_FL16_PL:
1287             break;
1288         }
1289         switch (asi) {
1290         case ASI_REAL:
1291         case ASI_REAL_IO:
1292         case ASI_REAL_L:
1293         case ASI_REAL_IO_L:
1294         case ASI_N:
1295         case ASI_NL:
1296         case ASI_AIUP:
1297         case ASI_AIUPL:
1298         case ASI_AIUS:
1299         case ASI_AIUSL:
1300         case ASI_S:
1301         case ASI_SL:
1302         case ASI_P:
1303         case ASI_PL:
1304             type = GET_ASI_DIRECT;
1305             break;
1306         case ASI_TWINX_REAL:
1307         case ASI_TWINX_REAL_L:
1308         case ASI_TWINX_N:
1309         case ASI_TWINX_NL:
1310         case ASI_TWINX_AIUP:
1311         case ASI_TWINX_AIUP_L:
1312         case ASI_TWINX_AIUS:
1313         case ASI_TWINX_AIUS_L:
1314         case ASI_TWINX_P:
1315         case ASI_TWINX_PL:
1316         case ASI_TWINX_S:
1317         case ASI_TWINX_SL:
1318         case ASI_QUAD_LDD_PHYS:
1319         case ASI_QUAD_LDD_PHYS_L:
1320         case ASI_NUCLEUS_QUAD_LDD:
1321         case ASI_NUCLEUS_QUAD_LDD_L:
1322             type = GET_ASI_DTWINX;
1323             break;
1324         case ASI_BLK_COMMIT_P:
1325         case ASI_BLK_COMMIT_S:
1326         case ASI_BLK_AIUP_4V:
1327         case ASI_BLK_AIUP_L_4V:
1328         case ASI_BLK_AIUP:
1329         case ASI_BLK_AIUPL:
1330         case ASI_BLK_AIUS_4V:
1331         case ASI_BLK_AIUS_L_4V:
1332         case ASI_BLK_AIUS:
1333         case ASI_BLK_AIUSL:
1334         case ASI_BLK_S:
1335         case ASI_BLK_SL:
1336         case ASI_BLK_P:
1337         case ASI_BLK_PL:
1338             type = GET_ASI_BLOCK;
1339             break;
1340         case ASI_FL8_S:
1341         case ASI_FL8_SL:
1342         case ASI_FL8_P:
1343         case ASI_FL8_PL:
1344             memop = MO_UB;
1345             type = GET_ASI_SHORT;
1346             break;
1347         case ASI_FL16_S:
1348         case ASI_FL16_SL:
1349         case ASI_FL16_P:
1350         case ASI_FL16_PL:
1351             memop = MO_TEUW;
1352             type = GET_ASI_SHORT;
1353             break;
1354         }
1355         /* The little-endian asis all have bit 3 set.  */
1356         if (asi & 8) {
1357             memop ^= MO_BSWAP;
1358         }
1359     }
1360 #endif
1361 
1362  done:
1363     return (DisasASI){ type, asi, mem_idx, memop };
1364 }
1365 
1366 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1367 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1368                               TCGv_i32 asi, TCGv_i32 mop)
1369 {
1370     g_assert_not_reached();
1371 }
1372 
1373 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1374                               TCGv_i32 asi, TCGv_i32 mop)
1375 {
1376     g_assert_not_reached();
1377 }
1378 #endif
1379 
1380 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1381 {
1382     switch (da->type) {
1383     case GET_ASI_EXCP:
1384         break;
1385     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1386         gen_exception(dc, TT_ILL_INSN);
1387         break;
1388     case GET_ASI_DIRECT:
1389         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1390         break;
1391 
1392     case GET_ASI_CODE:
1393 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1394         {
1395             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1396             TCGv_i64 t64 = tcg_temp_new_i64();
1397 
1398             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1399             tcg_gen_trunc_i64_tl(dst, t64);
1400         }
1401         break;
1402 #else
1403         g_assert_not_reached();
1404 #endif
1405 
1406     default:
1407         {
1408             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1409             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1410 
1411             save_state(dc);
1412 #ifdef TARGET_SPARC64
1413             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1414 #else
1415             {
1416                 TCGv_i64 t64 = tcg_temp_new_i64();
1417                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1418                 tcg_gen_trunc_i64_tl(dst, t64);
1419             }
1420 #endif
1421         }
1422         break;
1423     }
1424 }
1425 
1426 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1427 {
1428     switch (da->type) {
1429     case GET_ASI_EXCP:
1430         break;
1431 
1432     case GET_ASI_DTWINX: /* Reserved for stda.  */
1433         if (TARGET_LONG_BITS == 32) {
1434             gen_exception(dc, TT_ILL_INSN);
1435             break;
1436         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1437             /* Pre OpenSPARC CPUs don't have these */
1438             gen_exception(dc, TT_ILL_INSN);
1439             break;
1440         }
1441         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1442         /* fall through */
1443 
1444     case GET_ASI_DIRECT:
1445         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1446         break;
1447 
1448     case GET_ASI_BCOPY:
1449         assert(TARGET_LONG_BITS == 32);
1450         /*
1451          * Copy 32 bytes from the address in SRC to ADDR.
1452          *
1453          * From Ross RT625 hyperSPARC manual, section 4.6:
1454          * "Block Copy and Block Fill will work only on cache line boundaries."
1455          *
1456          * It does not specify if an unaliged address is truncated or trapped.
1457          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1458          * is obviously wrong.  The only place I can see this used is in the
1459          * Linux kernel which begins with page alignment, advancing by 32,
1460          * so is always aligned.  Assume truncation as the simpler option.
1461          *
1462          * Since the loads and stores are paired, allow the copy to happen
1463          * in the host endianness.  The copy need not be atomic.
1464          */
1465         {
1466             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1467             TCGv saddr = tcg_temp_new();
1468             TCGv daddr = tcg_temp_new();
1469             TCGv_i128 tmp = tcg_temp_new_i128();
1470 
1471             tcg_gen_andi_tl(saddr, src, -32);
1472             tcg_gen_andi_tl(daddr, addr, -32);
1473             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1474             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1475             tcg_gen_addi_tl(saddr, saddr, 16);
1476             tcg_gen_addi_tl(daddr, daddr, 16);
1477             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1478             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1479         }
1480         break;
1481 
1482     default:
1483         {
1484             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1485             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1486 
1487             save_state(dc);
1488 #ifdef TARGET_SPARC64
1489             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1490 #else
1491             {
1492                 TCGv_i64 t64 = tcg_temp_new_i64();
1493                 tcg_gen_extu_tl_i64(t64, src);
1494                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1495             }
1496 #endif
1497 
1498             /* A write to a TLB register may alter page maps.  End the TB. */
1499             dc->npc = DYNAMIC_PC;
1500         }
1501         break;
1502     }
1503 }
1504 
1505 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1506                          TCGv dst, TCGv src, TCGv addr)
1507 {
1508     switch (da->type) {
1509     case GET_ASI_EXCP:
1510         break;
1511     case GET_ASI_DIRECT:
1512         tcg_gen_atomic_xchg_tl(dst, addr, src,
1513                                da->mem_idx, da->memop | MO_ALIGN);
1514         break;
1515     default:
1516         /* ??? Should be DAE_invalid_asi.  */
1517         gen_exception(dc, TT_DATA_ACCESS);
1518         break;
1519     }
1520 }
1521 
1522 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1523                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1524 {
1525     switch (da->type) {
1526     case GET_ASI_EXCP:
1527         return;
1528     case GET_ASI_DIRECT:
1529         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1530                                   da->mem_idx, da->memop | MO_ALIGN);
1531         break;
1532     default:
1533         /* ??? Should be DAE_invalid_asi.  */
1534         gen_exception(dc, TT_DATA_ACCESS);
1535         break;
1536     }
1537 }
1538 
1539 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1540 {
1541     switch (da->type) {
1542     case GET_ASI_EXCP:
1543         break;
1544     case GET_ASI_DIRECT:
1545         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1546                                da->mem_idx, MO_UB);
1547         break;
1548     default:
1549         /* ??? In theory, this should be raise DAE_invalid_asi.
1550            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1551         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1552             gen_helper_exit_atomic(tcg_env);
1553         } else {
1554             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1555             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1556             TCGv_i64 s64, t64;
1557 
1558             save_state(dc);
1559             t64 = tcg_temp_new_i64();
1560             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1561 
1562             s64 = tcg_constant_i64(0xff);
1563             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1564 
1565             tcg_gen_trunc_i64_tl(dst, t64);
1566 
1567             /* End the TB.  */
1568             dc->npc = DYNAMIC_PC;
1569         }
1570         break;
1571     }
1572 }
1573 
1574 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1575                         TCGv addr, int rd)
1576 {
1577     MemOp memop = da->memop;
1578     MemOp size = memop & MO_SIZE;
1579     TCGv_i32 d32;
1580     TCGv_i64 d64;
1581     TCGv addr_tmp;
1582 
1583     /* TODO: Use 128-bit load/store below. */
1584     if (size == MO_128) {
1585         memop = (memop & ~MO_SIZE) | MO_64;
1586     }
1587 
1588     switch (da->type) {
1589     case GET_ASI_EXCP:
1590         break;
1591 
1592     case GET_ASI_DIRECT:
1593         memop |= MO_ALIGN_4;
1594         switch (size) {
1595         case MO_32:
1596             d32 = tcg_temp_new_i32();
1597             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1598             gen_store_fpr_F(dc, rd, d32);
1599             break;
1600 
1601         case MO_64:
1602             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1603             break;
1604 
1605         case MO_128:
1606             d64 = tcg_temp_new_i64();
1607             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1608             addr_tmp = tcg_temp_new();
1609             tcg_gen_addi_tl(addr_tmp, addr, 8);
1610             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1611             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1612             break;
1613         default:
1614             g_assert_not_reached();
1615         }
1616         break;
1617 
1618     case GET_ASI_BLOCK:
1619         /* Valid for lddfa on aligned registers only.  */
1620         if (orig_size == MO_64 && (rd & 7) == 0) {
1621             /* The first operation checks required alignment.  */
1622             addr_tmp = tcg_temp_new();
1623             for (int i = 0; ; ++i) {
1624                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1625                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1626                 if (i == 7) {
1627                     break;
1628                 }
1629                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1630                 addr = addr_tmp;
1631             }
1632         } else {
1633             gen_exception(dc, TT_ILL_INSN);
1634         }
1635         break;
1636 
1637     case GET_ASI_SHORT:
1638         /* Valid for lddfa only.  */
1639         if (orig_size == MO_64) {
1640             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1641                                 memop | MO_ALIGN);
1642         } else {
1643             gen_exception(dc, TT_ILL_INSN);
1644         }
1645         break;
1646 
1647     default:
1648         {
1649             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1650             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1651 
1652             save_state(dc);
1653             /* According to the table in the UA2011 manual, the only
1654                other asis that are valid for ldfa/lddfa/ldqfa are
1655                the NO_FAULT asis.  We still need a helper for these,
1656                but we can just use the integer asi helper for them.  */
1657             switch (size) {
1658             case MO_32:
1659                 d64 = tcg_temp_new_i64();
1660                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1661                 d32 = tcg_temp_new_i32();
1662                 tcg_gen_extrl_i64_i32(d32, d64);
1663                 gen_store_fpr_F(dc, rd, d32);
1664                 break;
1665             case MO_64:
1666                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1667                                   r_asi, r_mop);
1668                 break;
1669             case MO_128:
1670                 d64 = tcg_temp_new_i64();
1671                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1672                 addr_tmp = tcg_temp_new();
1673                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1674                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1675                                   r_asi, r_mop);
1676                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1677                 break;
1678             default:
1679                 g_assert_not_reached();
1680             }
1681         }
1682         break;
1683     }
1684 }
1685 
1686 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1687                         TCGv addr, int rd)
1688 {
1689     MemOp memop = da->memop;
1690     MemOp size = memop & MO_SIZE;
1691     TCGv_i32 d32;
1692     TCGv addr_tmp;
1693 
1694     /* TODO: Use 128-bit load/store below. */
1695     if (size == MO_128) {
1696         memop = (memop & ~MO_SIZE) | MO_64;
1697     }
1698 
1699     switch (da->type) {
1700     case GET_ASI_EXCP:
1701         break;
1702 
1703     case GET_ASI_DIRECT:
1704         memop |= MO_ALIGN_4;
1705         switch (size) {
1706         case MO_32:
1707             d32 = gen_load_fpr_F(dc, rd);
1708             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1709             break;
1710         case MO_64:
1711             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1712                                 memop | MO_ALIGN_4);
1713             break;
1714         case MO_128:
1715             /* Only 4-byte alignment required.  However, it is legal for the
1716                cpu to signal the alignment fault, and the OS trap handler is
1717                required to fix it up.  Requiring 16-byte alignment here avoids
1718                having to probe the second page before performing the first
1719                write.  */
1720             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1721                                 memop | MO_ALIGN_16);
1722             addr_tmp = tcg_temp_new();
1723             tcg_gen_addi_tl(addr_tmp, addr, 8);
1724             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1725             break;
1726         default:
1727             g_assert_not_reached();
1728         }
1729         break;
1730 
1731     case GET_ASI_BLOCK:
1732         /* Valid for stdfa on aligned registers only.  */
1733         if (orig_size == MO_64 && (rd & 7) == 0) {
1734             /* The first operation checks required alignment.  */
1735             addr_tmp = tcg_temp_new();
1736             for (int i = 0; ; ++i) {
1737                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1738                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1739                 if (i == 7) {
1740                     break;
1741                 }
1742                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1743                 addr = addr_tmp;
1744             }
1745         } else {
1746             gen_exception(dc, TT_ILL_INSN);
1747         }
1748         break;
1749 
1750     case GET_ASI_SHORT:
1751         /* Valid for stdfa only.  */
1752         if (orig_size == MO_64) {
1753             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1754                                 memop | MO_ALIGN);
1755         } else {
1756             gen_exception(dc, TT_ILL_INSN);
1757         }
1758         break;
1759 
1760     default:
1761         /* According to the table in the UA2011 manual, the only
1762            other asis that are valid for ldfa/lddfa/ldqfa are
1763            the PST* asis, which aren't currently handled.  */
1764         gen_exception(dc, TT_ILL_INSN);
1765         break;
1766     }
1767 }
1768 
1769 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1770 {
1771     TCGv hi = gen_dest_gpr(dc, rd);
1772     TCGv lo = gen_dest_gpr(dc, rd + 1);
1773 
1774     switch (da->type) {
1775     case GET_ASI_EXCP:
1776         return;
1777 
1778     case GET_ASI_DTWINX:
1779 #ifdef TARGET_SPARC64
1780         {
1781             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1782             TCGv_i128 t = tcg_temp_new_i128();
1783 
1784             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1785             /*
1786              * Note that LE twinx acts as if each 64-bit register result is
1787              * byte swapped.  We perform one 128-bit LE load, so must swap
1788              * the order of the writebacks.
1789              */
1790             if ((mop & MO_BSWAP) == MO_TE) {
1791                 tcg_gen_extr_i128_i64(lo, hi, t);
1792             } else {
1793                 tcg_gen_extr_i128_i64(hi, lo, t);
1794             }
1795         }
1796         break;
1797 #else
1798         g_assert_not_reached();
1799 #endif
1800 
1801     case GET_ASI_DIRECT:
1802         {
1803             TCGv_i64 tmp = tcg_temp_new_i64();
1804 
1805             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1806 
1807             /* Note that LE ldda acts as if each 32-bit register
1808                result is byte swapped.  Having just performed one
1809                64-bit bswap, we need now to swap the writebacks.  */
1810             if ((da->memop & MO_BSWAP) == MO_TE) {
1811                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1812             } else {
1813                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1814             }
1815         }
1816         break;
1817 
1818     case GET_ASI_CODE:
1819 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1820         {
1821             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1822             TCGv_i64 tmp = tcg_temp_new_i64();
1823 
1824             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
1825 
1826             /* See above.  */
1827             if ((da->memop & MO_BSWAP) == MO_TE) {
1828                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1829             } else {
1830                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1831             }
1832         }
1833         break;
1834 #else
1835         g_assert_not_reached();
1836 #endif
1837 
1838     default:
1839         /* ??? In theory we've handled all of the ASIs that are valid
1840            for ldda, and this should raise DAE_invalid_asi.  However,
1841            real hardware allows others.  This can be seen with e.g.
1842            FreeBSD 10.3 wrt ASI_IC_TAG.  */
1843         {
1844             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1845             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1846             TCGv_i64 tmp = tcg_temp_new_i64();
1847 
1848             save_state(dc);
1849             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1850 
1851             /* See above.  */
1852             if ((da->memop & MO_BSWAP) == MO_TE) {
1853                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1854             } else {
1855                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1856             }
1857         }
1858         break;
1859     }
1860 
1861     gen_store_gpr(dc, rd, hi);
1862     gen_store_gpr(dc, rd + 1, lo);
1863 }
1864 
1865 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1866 {
1867     TCGv hi = gen_load_gpr(dc, rd);
1868     TCGv lo = gen_load_gpr(dc, rd + 1);
1869 
1870     switch (da->type) {
1871     case GET_ASI_EXCP:
1872         break;
1873 
1874     case GET_ASI_DTWINX:
1875 #ifdef TARGET_SPARC64
1876         {
1877             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1878             TCGv_i128 t = tcg_temp_new_i128();
1879 
1880             /*
1881              * Note that LE twinx acts as if each 64-bit register result is
1882              * byte swapped.  We perform one 128-bit LE store, so must swap
1883              * the order of the construction.
1884              */
1885             if ((mop & MO_BSWAP) == MO_TE) {
1886                 tcg_gen_concat_i64_i128(t, lo, hi);
1887             } else {
1888                 tcg_gen_concat_i64_i128(t, hi, lo);
1889             }
1890             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
1891         }
1892         break;
1893 #else
1894         g_assert_not_reached();
1895 #endif
1896 
1897     case GET_ASI_DIRECT:
1898         {
1899             TCGv_i64 t64 = tcg_temp_new_i64();
1900 
1901             /* Note that LE stda acts as if each 32-bit register result is
1902                byte swapped.  We will perform one 64-bit LE store, so now
1903                we must swap the order of the construction.  */
1904             if ((da->memop & MO_BSWAP) == MO_TE) {
1905                 tcg_gen_concat_tl_i64(t64, lo, hi);
1906             } else {
1907                 tcg_gen_concat_tl_i64(t64, hi, lo);
1908             }
1909             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
1910         }
1911         break;
1912 
1913     case GET_ASI_BFILL:
1914         assert(TARGET_LONG_BITS == 32);
1915         /*
1916          * Store 32 bytes of [rd:rd+1] to ADDR.
1917          * See comments for GET_ASI_COPY above.
1918          */
1919         {
1920             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
1921             TCGv_i64 t8 = tcg_temp_new_i64();
1922             TCGv_i128 t16 = tcg_temp_new_i128();
1923             TCGv daddr = tcg_temp_new();
1924 
1925             tcg_gen_concat_tl_i64(t8, lo, hi);
1926             tcg_gen_concat_i64_i128(t16, t8, t8);
1927             tcg_gen_andi_tl(daddr, addr, -32);
1928             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1929             tcg_gen_addi_tl(daddr, daddr, 16);
1930             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1931         }
1932         break;
1933 
1934     default:
1935         /* ??? In theory we've handled all of the ASIs that are valid
1936            for stda, and this should raise DAE_invalid_asi.  */
1937         {
1938             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1939             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1940             TCGv_i64 t64 = tcg_temp_new_i64();
1941 
1942             /* See above.  */
1943             if ((da->memop & MO_BSWAP) == MO_TE) {
1944                 tcg_gen_concat_tl_i64(t64, lo, hi);
1945             } else {
1946                 tcg_gen_concat_tl_i64(t64, hi, lo);
1947             }
1948 
1949             save_state(dc);
1950             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1951         }
1952         break;
1953     }
1954 }
1955 
1956 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1957 {
1958 #ifdef TARGET_SPARC64
1959     TCGv_i32 c32, zero, dst, s1, s2;
1960     TCGv_i64 c64 = tcg_temp_new_i64();
1961 
1962     /* We have two choices here: extend the 32 bit data and use movcond_i64,
1963        or fold the comparison down to 32 bits and use movcond_i32.  Choose
1964        the later.  */
1965     c32 = tcg_temp_new_i32();
1966     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
1967     tcg_gen_extrl_i64_i32(c32, c64);
1968 
1969     s1 = gen_load_fpr_F(dc, rs);
1970     s2 = gen_load_fpr_F(dc, rd);
1971     dst = tcg_temp_new_i32();
1972     zero = tcg_constant_i32(0);
1973 
1974     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
1975 
1976     gen_store_fpr_F(dc, rd, dst);
1977 #else
1978     qemu_build_not_reached();
1979 #endif
1980 }
1981 
1982 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1983 {
1984 #ifdef TARGET_SPARC64
1985     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
1986     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
1987                         gen_load_fpr_D(dc, rs),
1988                         gen_load_fpr_D(dc, rd));
1989     gen_store_fpr_D(dc, rd, dst);
1990 #else
1991     qemu_build_not_reached();
1992 #endif
1993 }
1994 
1995 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
1996 {
1997 #ifdef TARGET_SPARC64
1998     int qd = QFPREG(rd);
1999     int qs = QFPREG(rs);
2000     TCGv c2 = tcg_constant_tl(cmp->c2);
2001 
2002     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2003                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2004     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2005                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2006 
2007     gen_update_fprs_dirty(dc, qd);
2008 #else
2009     qemu_build_not_reached();
2010 #endif
2011 }
2012 
2013 #ifdef TARGET_SPARC64
2014 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2015 {
2016     TCGv_i32 r_tl = tcg_temp_new_i32();
2017 
2018     /* load env->tl into r_tl */
2019     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2020 
2021     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2022     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2023 
2024     /* calculate offset to current trap state from env->ts, reuse r_tl */
2025     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2026     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2027 
2028     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2029     {
2030         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2031         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2032         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2033     }
2034 }
2035 #endif
2036 
2037 static int extract_dfpreg(DisasContext *dc, int x)
2038 {
2039     return DFPREG(x);
2040 }
2041 
2042 static int extract_qfpreg(DisasContext *dc, int x)
2043 {
2044     return QFPREG(x);
2045 }
2046 
2047 /* Include the auto-generated decoder.  */
2048 #include "decode-insns.c.inc"
2049 
2050 #define TRANS(NAME, AVAIL, FUNC, ...) \
2051     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2052     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2053 
2054 #define avail_ALL(C)      true
2055 #ifdef TARGET_SPARC64
2056 # define avail_32(C)      false
2057 # define avail_ASR17(C)   false
2058 # define avail_CASA(C)    true
2059 # define avail_DIV(C)     true
2060 # define avail_MUL(C)     true
2061 # define avail_POWERDOWN(C) false
2062 # define avail_64(C)      true
2063 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2064 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2065 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2066 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2067 #else
2068 # define avail_32(C)      true
2069 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2070 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2071 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2072 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2073 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2074 # define avail_64(C)      false
2075 # define avail_GL(C)      false
2076 # define avail_HYPV(C)    false
2077 # define avail_VIS1(C)    false
2078 # define avail_VIS2(C)    false
2079 #endif
2080 
2081 /* Default case for non jump instructions. */
2082 static bool advance_pc(DisasContext *dc)
2083 {
2084     TCGLabel *l1;
2085 
2086     finishing_insn(dc);
2087 
2088     if (dc->npc & 3) {
2089         switch (dc->npc) {
2090         case DYNAMIC_PC:
2091         case DYNAMIC_PC_LOOKUP:
2092             dc->pc = dc->npc;
2093             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2094             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2095             break;
2096 
2097         case JUMP_PC:
2098             /* we can do a static jump */
2099             l1 = gen_new_label();
2100             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2101 
2102             /* jump not taken */
2103             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2104 
2105             /* jump taken */
2106             gen_set_label(l1);
2107             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2108 
2109             dc->base.is_jmp = DISAS_NORETURN;
2110             break;
2111 
2112         default:
2113             g_assert_not_reached();
2114         }
2115     } else {
2116         dc->pc = dc->npc;
2117         dc->npc = dc->npc + 4;
2118     }
2119     return true;
2120 }
2121 
2122 /*
2123  * Major opcodes 00 and 01 -- branches, call, and sethi
2124  */
2125 
2126 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2127                               bool annul, int disp)
2128 {
2129     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2130     target_ulong npc;
2131 
2132     finishing_insn(dc);
2133 
2134     if (cmp->cond == TCG_COND_ALWAYS) {
2135         if (annul) {
2136             dc->pc = dest;
2137             dc->npc = dest + 4;
2138         } else {
2139             gen_mov_pc_npc(dc);
2140             dc->npc = dest;
2141         }
2142         return true;
2143     }
2144 
2145     if (cmp->cond == TCG_COND_NEVER) {
2146         npc = dc->npc;
2147         if (npc & 3) {
2148             gen_mov_pc_npc(dc);
2149             if (annul) {
2150                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2151             }
2152             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2153         } else {
2154             dc->pc = npc + (annul ? 4 : 0);
2155             dc->npc = dc->pc + 4;
2156         }
2157         return true;
2158     }
2159 
2160     flush_cond(dc);
2161     npc = dc->npc;
2162 
2163     if (annul) {
2164         TCGLabel *l1 = gen_new_label();
2165 
2166         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2167         gen_goto_tb(dc, 0, npc, dest);
2168         gen_set_label(l1);
2169         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2170 
2171         dc->base.is_jmp = DISAS_NORETURN;
2172     } else {
2173         if (npc & 3) {
2174             switch (npc) {
2175             case DYNAMIC_PC:
2176             case DYNAMIC_PC_LOOKUP:
2177                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2178                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2179                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2180                                    cmp->c1, tcg_constant_tl(cmp->c2),
2181                                    tcg_constant_tl(dest), cpu_npc);
2182                 dc->pc = npc;
2183                 break;
2184             default:
2185                 g_assert_not_reached();
2186             }
2187         } else {
2188             dc->pc = npc;
2189             dc->npc = JUMP_PC;
2190             dc->jump = *cmp;
2191             dc->jump_pc[0] = dest;
2192             dc->jump_pc[1] = npc + 4;
2193 
2194             /* The condition for cpu_cond is always NE -- normalize. */
2195             if (cmp->cond == TCG_COND_NE) {
2196                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2197             } else {
2198                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2199             }
2200             dc->cpu_cond_live = true;
2201         }
2202     }
2203     return true;
2204 }
2205 
2206 static bool raise_priv(DisasContext *dc)
2207 {
2208     gen_exception(dc, TT_PRIV_INSN);
2209     return true;
2210 }
2211 
2212 static bool raise_unimpfpop(DisasContext *dc)
2213 {
2214     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2215     return true;
2216 }
2217 
2218 static bool gen_trap_float128(DisasContext *dc)
2219 {
2220     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2221         return false;
2222     }
2223     return raise_unimpfpop(dc);
2224 }
2225 
2226 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2227 {
2228     DisasCompare cmp;
2229 
2230     gen_compare(&cmp, a->cc, a->cond, dc);
2231     return advance_jump_cond(dc, &cmp, a->a, a->i);
2232 }
2233 
2234 TRANS(Bicc, ALL, do_bpcc, a)
2235 TRANS(BPcc,  64, do_bpcc, a)
2236 
2237 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2238 {
2239     DisasCompare cmp;
2240 
2241     if (gen_trap_ifnofpu(dc)) {
2242         return true;
2243     }
2244     gen_fcompare(&cmp, a->cc, a->cond);
2245     return advance_jump_cond(dc, &cmp, a->a, a->i);
2246 }
2247 
2248 TRANS(FBPfcc,  64, do_fbpfcc, a)
2249 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2250 
2251 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2252 {
2253     DisasCompare cmp;
2254 
2255     if (!avail_64(dc)) {
2256         return false;
2257     }
2258     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2259         return false;
2260     }
2261     return advance_jump_cond(dc, &cmp, a->a, a->i);
2262 }
2263 
2264 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2265 {
2266     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2267 
2268     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2269     gen_mov_pc_npc(dc);
2270     dc->npc = target;
2271     return true;
2272 }
2273 
2274 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2275 {
2276     /*
2277      * For sparc32, always generate the no-coprocessor exception.
2278      * For sparc64, always generate illegal instruction.
2279      */
2280 #ifdef TARGET_SPARC64
2281     return false;
2282 #else
2283     gen_exception(dc, TT_NCP_INSN);
2284     return true;
2285 #endif
2286 }
2287 
2288 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2289 {
2290     /* Special-case %g0 because that's the canonical nop.  */
2291     if (a->rd) {
2292         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2293     }
2294     return advance_pc(dc);
2295 }
2296 
2297 /*
2298  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2299  */
2300 
2301 static bool do_tcc(DisasContext *dc, int cond, int cc,
2302                    int rs1, bool imm, int rs2_or_imm)
2303 {
2304     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2305                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2306     DisasCompare cmp;
2307     TCGLabel *lab;
2308     TCGv_i32 trap;
2309 
2310     /* Trap never.  */
2311     if (cond == 0) {
2312         return advance_pc(dc);
2313     }
2314 
2315     /*
2316      * Immediate traps are the most common case.  Since this value is
2317      * live across the branch, it really pays to evaluate the constant.
2318      */
2319     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2320         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2321     } else {
2322         trap = tcg_temp_new_i32();
2323         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2324         if (imm) {
2325             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2326         } else {
2327             TCGv_i32 t2 = tcg_temp_new_i32();
2328             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2329             tcg_gen_add_i32(trap, trap, t2);
2330         }
2331         tcg_gen_andi_i32(trap, trap, mask);
2332         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2333     }
2334 
2335     finishing_insn(dc);
2336 
2337     /* Trap always.  */
2338     if (cond == 8) {
2339         save_state(dc);
2340         gen_helper_raise_exception(tcg_env, trap);
2341         dc->base.is_jmp = DISAS_NORETURN;
2342         return true;
2343     }
2344 
2345     /* Conditional trap.  */
2346     flush_cond(dc);
2347     lab = delay_exceptionv(dc, trap);
2348     gen_compare(&cmp, cc, cond, dc);
2349     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2350 
2351     return advance_pc(dc);
2352 }
2353 
2354 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2355 {
2356     if (avail_32(dc) && a->cc) {
2357         return false;
2358     }
2359     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2360 }
2361 
2362 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2363 {
2364     if (avail_64(dc)) {
2365         return false;
2366     }
2367     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2368 }
2369 
2370 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2371 {
2372     if (avail_32(dc)) {
2373         return false;
2374     }
2375     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2376 }
2377 
2378 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2379 {
2380     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2381     return advance_pc(dc);
2382 }
2383 
2384 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2385 {
2386     if (avail_32(dc)) {
2387         return false;
2388     }
2389     if (a->mmask) {
2390         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2391         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2392     }
2393     if (a->cmask) {
2394         /* For #Sync, etc, end the TB to recognize interrupts. */
2395         dc->base.is_jmp = DISAS_EXIT;
2396     }
2397     return advance_pc(dc);
2398 }
2399 
2400 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2401                           TCGv (*func)(DisasContext *, TCGv))
2402 {
2403     if (!priv) {
2404         return raise_priv(dc);
2405     }
2406     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2407     return advance_pc(dc);
2408 }
2409 
2410 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2411 {
2412     return cpu_y;
2413 }
2414 
2415 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2416 {
2417     /*
2418      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2419      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2420      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2421      */
2422     if (avail_64(dc) && a->rs1 != 0) {
2423         return false;
2424     }
2425     return do_rd_special(dc, true, a->rd, do_rdy);
2426 }
2427 
2428 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2429 {
2430     gen_helper_rdasr17(dst, tcg_env);
2431     return dst;
2432 }
2433 
2434 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2435 
2436 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2437 {
2438     gen_helper_rdccr(dst, tcg_env);
2439     return dst;
2440 }
2441 
2442 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2443 
2444 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2445 {
2446 #ifdef TARGET_SPARC64
2447     return tcg_constant_tl(dc->asi);
2448 #else
2449     qemu_build_not_reached();
2450 #endif
2451 }
2452 
2453 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2454 
2455 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2456 {
2457     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2458 
2459     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2460     if (translator_io_start(&dc->base)) {
2461         dc->base.is_jmp = DISAS_EXIT;
2462     }
2463     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2464                               tcg_constant_i32(dc->mem_idx));
2465     return dst;
2466 }
2467 
2468 /* TODO: non-priv access only allowed when enabled. */
2469 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2470 
2471 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2472 {
2473     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2474 }
2475 
2476 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2477 
2478 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2479 {
2480     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2481     return dst;
2482 }
2483 
2484 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2485 
2486 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2487 {
2488     gen_trap_ifnofpu(dc);
2489     return cpu_gsr;
2490 }
2491 
2492 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2493 
2494 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2495 {
2496     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2497     return dst;
2498 }
2499 
2500 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2501 
2502 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2503 {
2504     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2505     return dst;
2506 }
2507 
2508 /* TODO: non-priv access only allowed when enabled. */
2509 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2510 
2511 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2512 {
2513     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2514 
2515     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2516     if (translator_io_start(&dc->base)) {
2517         dc->base.is_jmp = DISAS_EXIT;
2518     }
2519     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2520                               tcg_constant_i32(dc->mem_idx));
2521     return dst;
2522 }
2523 
2524 /* TODO: non-priv access only allowed when enabled. */
2525 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2526 
2527 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2528 {
2529     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2530     return dst;
2531 }
2532 
2533 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2534 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2535 
2536 /*
2537  * UltraSPARC-T1 Strand status.
2538  * HYPV check maybe not enough, UA2005 & UA2007 describe
2539  * this ASR as impl. dep
2540  */
2541 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2542 {
2543     return tcg_constant_tl(1);
2544 }
2545 
2546 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2547 
2548 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2549 {
2550     gen_helper_rdpsr(dst, tcg_env);
2551     return dst;
2552 }
2553 
2554 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2555 
2556 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2557 {
2558     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2559     return dst;
2560 }
2561 
2562 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2563 
2564 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2565 {
2566     TCGv_i32 tl = tcg_temp_new_i32();
2567     TCGv_ptr tp = tcg_temp_new_ptr();
2568 
2569     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2570     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2571     tcg_gen_shli_i32(tl, tl, 3);
2572     tcg_gen_ext_i32_ptr(tp, tl);
2573     tcg_gen_add_ptr(tp, tp, tcg_env);
2574 
2575     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2576     return dst;
2577 }
2578 
2579 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2580 
2581 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2582 {
2583     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2584     return dst;
2585 }
2586 
2587 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2588 
2589 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2590 {
2591     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2592     return dst;
2593 }
2594 
2595 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2596 
2597 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2598 {
2599     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2600     return dst;
2601 }
2602 
2603 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2604 
2605 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2606 {
2607     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2608     return dst;
2609 }
2610 
2611 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2612       do_rdhstick_cmpr)
2613 
2614 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2615 {
2616     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2617     return dst;
2618 }
2619 
2620 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2621 
2622 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2623 {
2624 #ifdef TARGET_SPARC64
2625     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2626 
2627     gen_load_trap_state_at_tl(r_tsptr);
2628     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2629     return dst;
2630 #else
2631     qemu_build_not_reached();
2632 #endif
2633 }
2634 
2635 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2636 
2637 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2638 {
2639 #ifdef TARGET_SPARC64
2640     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2641 
2642     gen_load_trap_state_at_tl(r_tsptr);
2643     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2644     return dst;
2645 #else
2646     qemu_build_not_reached();
2647 #endif
2648 }
2649 
2650 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2651 
2652 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2653 {
2654 #ifdef TARGET_SPARC64
2655     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2656 
2657     gen_load_trap_state_at_tl(r_tsptr);
2658     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2659     return dst;
2660 #else
2661     qemu_build_not_reached();
2662 #endif
2663 }
2664 
2665 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2666 
2667 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2668 {
2669 #ifdef TARGET_SPARC64
2670     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2671 
2672     gen_load_trap_state_at_tl(r_tsptr);
2673     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2674     return dst;
2675 #else
2676     qemu_build_not_reached();
2677 #endif
2678 }
2679 
2680 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2681 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2682 
2683 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2684 {
2685     return cpu_tbr;
2686 }
2687 
2688 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2689 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2690 
2691 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2692 {
2693     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2694     return dst;
2695 }
2696 
2697 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2698 
2699 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2700 {
2701     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2702     return dst;
2703 }
2704 
2705 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2706 
2707 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2708 {
2709     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2710     return dst;
2711 }
2712 
2713 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2714 
2715 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2716 {
2717     gen_helper_rdcwp(dst, tcg_env);
2718     return dst;
2719 }
2720 
2721 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2722 
2723 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2724 {
2725     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2726     return dst;
2727 }
2728 
2729 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2730 
2731 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2732 {
2733     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2734     return dst;
2735 }
2736 
2737 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2738       do_rdcanrestore)
2739 
2740 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2741 {
2742     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2743     return dst;
2744 }
2745 
2746 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2747 
2748 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2749 {
2750     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2751     return dst;
2752 }
2753 
2754 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2755 
2756 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2757 {
2758     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2759     return dst;
2760 }
2761 
2762 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2763 
2764 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2765 {
2766     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2767     return dst;
2768 }
2769 
2770 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2771 
2772 /* UA2005 strand status */
2773 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2774 {
2775     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2776     return dst;
2777 }
2778 
2779 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2780 
2781 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2782 {
2783     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2784     return dst;
2785 }
2786 
2787 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2788 
2789 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2790 {
2791     if (avail_64(dc)) {
2792         gen_helper_flushw(tcg_env);
2793         return advance_pc(dc);
2794     }
2795     return false;
2796 }
2797 
2798 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2799                           void (*func)(DisasContext *, TCGv))
2800 {
2801     TCGv src;
2802 
2803     /* For simplicity, we under-decoded the rs2 form. */
2804     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2805         return false;
2806     }
2807     if (!priv) {
2808         return raise_priv(dc);
2809     }
2810 
2811     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2812         src = tcg_constant_tl(a->rs2_or_imm);
2813     } else {
2814         TCGv src1 = gen_load_gpr(dc, a->rs1);
2815         if (a->rs2_or_imm == 0) {
2816             src = src1;
2817         } else {
2818             src = tcg_temp_new();
2819             if (a->imm) {
2820                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2821             } else {
2822                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2823             }
2824         }
2825     }
2826     func(dc, src);
2827     return advance_pc(dc);
2828 }
2829 
2830 static void do_wry(DisasContext *dc, TCGv src)
2831 {
2832     tcg_gen_ext32u_tl(cpu_y, src);
2833 }
2834 
2835 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2836 
2837 static void do_wrccr(DisasContext *dc, TCGv src)
2838 {
2839     gen_helper_wrccr(tcg_env, src);
2840 }
2841 
2842 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2843 
2844 static void do_wrasi(DisasContext *dc, TCGv src)
2845 {
2846     TCGv tmp = tcg_temp_new();
2847 
2848     tcg_gen_ext8u_tl(tmp, src);
2849     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2850     /* End TB to notice changed ASI. */
2851     dc->base.is_jmp = DISAS_EXIT;
2852 }
2853 
2854 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2855 
2856 static void do_wrfprs(DisasContext *dc, TCGv src)
2857 {
2858 #ifdef TARGET_SPARC64
2859     tcg_gen_trunc_tl_i32(cpu_fprs, src);
2860     dc->fprs_dirty = 0;
2861     dc->base.is_jmp = DISAS_EXIT;
2862 #else
2863     qemu_build_not_reached();
2864 #endif
2865 }
2866 
2867 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2868 
2869 static void do_wrgsr(DisasContext *dc, TCGv src)
2870 {
2871     gen_trap_ifnofpu(dc);
2872     tcg_gen_mov_tl(cpu_gsr, src);
2873 }
2874 
2875 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
2876 
2877 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
2878 {
2879     gen_helper_set_softint(tcg_env, src);
2880 }
2881 
2882 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
2883 
2884 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
2885 {
2886     gen_helper_clear_softint(tcg_env, src);
2887 }
2888 
2889 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
2890 
2891 static void do_wrsoftint(DisasContext *dc, TCGv src)
2892 {
2893     gen_helper_write_softint(tcg_env, src);
2894 }
2895 
2896 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
2897 
2898 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
2899 {
2900     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2901 
2902     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
2903     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2904     translator_io_start(&dc->base);
2905     gen_helper_tick_set_limit(r_tickptr, src);
2906     /* End TB to handle timer interrupt */
2907     dc->base.is_jmp = DISAS_EXIT;
2908 }
2909 
2910 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
2911 
2912 static void do_wrstick(DisasContext *dc, TCGv src)
2913 {
2914 #ifdef TARGET_SPARC64
2915     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2916 
2917     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
2918     translator_io_start(&dc->base);
2919     gen_helper_tick_set_count(r_tickptr, src);
2920     /* End TB to handle timer interrupt */
2921     dc->base.is_jmp = DISAS_EXIT;
2922 #else
2923     qemu_build_not_reached();
2924 #endif
2925 }
2926 
2927 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
2928 
2929 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
2930 {
2931     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2932 
2933     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
2934     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2935     translator_io_start(&dc->base);
2936     gen_helper_tick_set_limit(r_tickptr, src);
2937     /* End TB to handle timer interrupt */
2938     dc->base.is_jmp = DISAS_EXIT;
2939 }
2940 
2941 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
2942 
2943 static void do_wrpowerdown(DisasContext *dc, TCGv src)
2944 {
2945     finishing_insn(dc);
2946     save_state(dc);
2947     gen_helper_power_down(tcg_env);
2948 }
2949 
2950 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
2951 
2952 static void do_wrpsr(DisasContext *dc, TCGv src)
2953 {
2954     gen_helper_wrpsr(tcg_env, src);
2955     dc->base.is_jmp = DISAS_EXIT;
2956 }
2957 
2958 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
2959 
2960 static void do_wrwim(DisasContext *dc, TCGv src)
2961 {
2962     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
2963     TCGv tmp = tcg_temp_new();
2964 
2965     tcg_gen_andi_tl(tmp, src, mask);
2966     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
2967 }
2968 
2969 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
2970 
2971 static void do_wrtpc(DisasContext *dc, TCGv src)
2972 {
2973 #ifdef TARGET_SPARC64
2974     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2975 
2976     gen_load_trap_state_at_tl(r_tsptr);
2977     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
2978 #else
2979     qemu_build_not_reached();
2980 #endif
2981 }
2982 
2983 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
2984 
2985 static void do_wrtnpc(DisasContext *dc, TCGv src)
2986 {
2987 #ifdef TARGET_SPARC64
2988     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2989 
2990     gen_load_trap_state_at_tl(r_tsptr);
2991     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
2992 #else
2993     qemu_build_not_reached();
2994 #endif
2995 }
2996 
2997 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
2998 
2999 static void do_wrtstate(DisasContext *dc, TCGv src)
3000 {
3001 #ifdef TARGET_SPARC64
3002     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3003 
3004     gen_load_trap_state_at_tl(r_tsptr);
3005     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3006 #else
3007     qemu_build_not_reached();
3008 #endif
3009 }
3010 
3011 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3012 
3013 static void do_wrtt(DisasContext *dc, TCGv src)
3014 {
3015 #ifdef TARGET_SPARC64
3016     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3017 
3018     gen_load_trap_state_at_tl(r_tsptr);
3019     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3020 #else
3021     qemu_build_not_reached();
3022 #endif
3023 }
3024 
3025 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3026 
3027 static void do_wrtick(DisasContext *dc, TCGv src)
3028 {
3029     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3030 
3031     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3032     translator_io_start(&dc->base);
3033     gen_helper_tick_set_count(r_tickptr, src);
3034     /* End TB to handle timer interrupt */
3035     dc->base.is_jmp = DISAS_EXIT;
3036 }
3037 
3038 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3039 
3040 static void do_wrtba(DisasContext *dc, TCGv src)
3041 {
3042     tcg_gen_mov_tl(cpu_tbr, src);
3043 }
3044 
3045 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3046 
3047 static void do_wrpstate(DisasContext *dc, TCGv src)
3048 {
3049     save_state(dc);
3050     if (translator_io_start(&dc->base)) {
3051         dc->base.is_jmp = DISAS_EXIT;
3052     }
3053     gen_helper_wrpstate(tcg_env, src);
3054     dc->npc = DYNAMIC_PC;
3055 }
3056 
3057 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3058 
3059 static void do_wrtl(DisasContext *dc, TCGv src)
3060 {
3061     save_state(dc);
3062     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3063     dc->npc = DYNAMIC_PC;
3064 }
3065 
3066 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3067 
3068 static void do_wrpil(DisasContext *dc, TCGv src)
3069 {
3070     if (translator_io_start(&dc->base)) {
3071         dc->base.is_jmp = DISAS_EXIT;
3072     }
3073     gen_helper_wrpil(tcg_env, src);
3074 }
3075 
3076 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3077 
3078 static void do_wrcwp(DisasContext *dc, TCGv src)
3079 {
3080     gen_helper_wrcwp(tcg_env, src);
3081 }
3082 
3083 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3084 
3085 static void do_wrcansave(DisasContext *dc, TCGv src)
3086 {
3087     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3088 }
3089 
3090 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3091 
3092 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3093 {
3094     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3095 }
3096 
3097 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3098 
3099 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3100 {
3101     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3102 }
3103 
3104 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3105 
3106 static void do_wrotherwin(DisasContext *dc, TCGv src)
3107 {
3108     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3109 }
3110 
3111 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3112 
3113 static void do_wrwstate(DisasContext *dc, TCGv src)
3114 {
3115     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3116 }
3117 
3118 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3119 
3120 static void do_wrgl(DisasContext *dc, TCGv src)
3121 {
3122     gen_helper_wrgl(tcg_env, src);
3123 }
3124 
3125 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3126 
3127 /* UA2005 strand status */
3128 static void do_wrssr(DisasContext *dc, TCGv src)
3129 {
3130     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3131 }
3132 
3133 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3134 
3135 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3136 
3137 static void do_wrhpstate(DisasContext *dc, TCGv src)
3138 {
3139     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3140     dc->base.is_jmp = DISAS_EXIT;
3141 }
3142 
3143 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3144 
3145 static void do_wrhtstate(DisasContext *dc, TCGv src)
3146 {
3147     TCGv_i32 tl = tcg_temp_new_i32();
3148     TCGv_ptr tp = tcg_temp_new_ptr();
3149 
3150     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3151     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3152     tcg_gen_shli_i32(tl, tl, 3);
3153     tcg_gen_ext_i32_ptr(tp, tl);
3154     tcg_gen_add_ptr(tp, tp, tcg_env);
3155 
3156     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3157 }
3158 
3159 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3160 
3161 static void do_wrhintp(DisasContext *dc, TCGv src)
3162 {
3163     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3164 }
3165 
3166 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3167 
3168 static void do_wrhtba(DisasContext *dc, TCGv src)
3169 {
3170     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3171 }
3172 
3173 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3174 
3175 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3176 {
3177     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3178 
3179     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3180     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3181     translator_io_start(&dc->base);
3182     gen_helper_tick_set_limit(r_tickptr, src);
3183     /* End TB to handle timer interrupt */
3184     dc->base.is_jmp = DISAS_EXIT;
3185 }
3186 
3187 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3188       do_wrhstick_cmpr)
3189 
3190 static bool do_saved_restored(DisasContext *dc, bool saved)
3191 {
3192     if (!supervisor(dc)) {
3193         return raise_priv(dc);
3194     }
3195     if (saved) {
3196         gen_helper_saved(tcg_env);
3197     } else {
3198         gen_helper_restored(tcg_env);
3199     }
3200     return advance_pc(dc);
3201 }
3202 
3203 TRANS(SAVED, 64, do_saved_restored, true)
3204 TRANS(RESTORED, 64, do_saved_restored, false)
3205 
3206 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3207 {
3208     return advance_pc(dc);
3209 }
3210 
3211 /*
3212  * TODO: Need a feature bit for sparcv8.
3213  * In the meantime, treat all 32-bit cpus like sparcv7.
3214  */
3215 TRANS(NOP_v7, 32, trans_NOP, a)
3216 TRANS(NOP_v9, 64, trans_NOP, a)
3217 
3218 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3219                          void (*func)(TCGv, TCGv, TCGv),
3220                          void (*funci)(TCGv, TCGv, target_long),
3221                          bool logic_cc)
3222 {
3223     TCGv dst, src1;
3224 
3225     /* For simplicity, we under-decoded the rs2 form. */
3226     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3227         return false;
3228     }
3229 
3230     if (logic_cc) {
3231         dst = cpu_cc_N;
3232     } else {
3233         dst = gen_dest_gpr(dc, a->rd);
3234     }
3235     src1 = gen_load_gpr(dc, a->rs1);
3236 
3237     if (a->imm || a->rs2_or_imm == 0) {
3238         if (funci) {
3239             funci(dst, src1, a->rs2_or_imm);
3240         } else {
3241             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3242         }
3243     } else {
3244         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3245     }
3246 
3247     if (logic_cc) {
3248         if (TARGET_LONG_BITS == 64) {
3249             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3250             tcg_gen_movi_tl(cpu_icc_C, 0);
3251         }
3252         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3253         tcg_gen_movi_tl(cpu_cc_C, 0);
3254         tcg_gen_movi_tl(cpu_cc_V, 0);
3255     }
3256 
3257     gen_store_gpr(dc, a->rd, dst);
3258     return advance_pc(dc);
3259 }
3260 
3261 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3262                      void (*func)(TCGv, TCGv, TCGv),
3263                      void (*funci)(TCGv, TCGv, target_long),
3264                      void (*func_cc)(TCGv, TCGv, TCGv))
3265 {
3266     if (a->cc) {
3267         return do_arith_int(dc, a, func_cc, NULL, false);
3268     }
3269     return do_arith_int(dc, a, func, funci, false);
3270 }
3271 
3272 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3273                      void (*func)(TCGv, TCGv, TCGv),
3274                      void (*funci)(TCGv, TCGv, target_long))
3275 {
3276     return do_arith_int(dc, a, func, funci, a->cc);
3277 }
3278 
3279 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3280 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3281 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3282 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3283 
3284 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3285 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3286 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3287 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3288 
3289 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3290 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3291 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3292 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3293 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3294 
3295 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3296 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3297 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3298 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3299 
3300 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3301 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3302 
3303 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3304 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3305 
3306 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3307 {
3308     /* OR with %g0 is the canonical alias for MOV. */
3309     if (!a->cc && a->rs1 == 0) {
3310         if (a->imm || a->rs2_or_imm == 0) {
3311             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3312         } else if (a->rs2_or_imm & ~0x1f) {
3313             /* For simplicity, we under-decoded the rs2 form. */
3314             return false;
3315         } else {
3316             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3317         }
3318         return advance_pc(dc);
3319     }
3320     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3321 }
3322 
3323 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3324 {
3325     TCGv_i64 t1, t2;
3326     TCGv dst;
3327 
3328     if (!avail_DIV(dc)) {
3329         return false;
3330     }
3331     /* For simplicity, we under-decoded the rs2 form. */
3332     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3333         return false;
3334     }
3335 
3336     if (unlikely(a->rs2_or_imm == 0)) {
3337         gen_exception(dc, TT_DIV_ZERO);
3338         return true;
3339     }
3340 
3341     if (a->imm) {
3342         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3343     } else {
3344         TCGLabel *lab;
3345         TCGv_i32 n2;
3346 
3347         finishing_insn(dc);
3348         flush_cond(dc);
3349 
3350         n2 = tcg_temp_new_i32();
3351         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3352 
3353         lab = delay_exception(dc, TT_DIV_ZERO);
3354         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3355 
3356         t2 = tcg_temp_new_i64();
3357 #ifdef TARGET_SPARC64
3358         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3359 #else
3360         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3361 #endif
3362     }
3363 
3364     t1 = tcg_temp_new_i64();
3365     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3366 
3367     tcg_gen_divu_i64(t1, t1, t2);
3368     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3369 
3370     dst = gen_dest_gpr(dc, a->rd);
3371     tcg_gen_trunc_i64_tl(dst, t1);
3372     gen_store_gpr(dc, a->rd, dst);
3373     return advance_pc(dc);
3374 }
3375 
3376 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3377 {
3378     TCGv dst, src1, src2;
3379 
3380     if (!avail_64(dc)) {
3381         return false;
3382     }
3383     /* For simplicity, we under-decoded the rs2 form. */
3384     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3385         return false;
3386     }
3387 
3388     if (unlikely(a->rs2_or_imm == 0)) {
3389         gen_exception(dc, TT_DIV_ZERO);
3390         return true;
3391     }
3392 
3393     if (a->imm) {
3394         src2 = tcg_constant_tl(a->rs2_or_imm);
3395     } else {
3396         TCGLabel *lab;
3397 
3398         finishing_insn(dc);
3399         flush_cond(dc);
3400 
3401         lab = delay_exception(dc, TT_DIV_ZERO);
3402         src2 = cpu_regs[a->rs2_or_imm];
3403         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3404     }
3405 
3406     dst = gen_dest_gpr(dc, a->rd);
3407     src1 = gen_load_gpr(dc, a->rs1);
3408 
3409     tcg_gen_divu_tl(dst, src1, src2);
3410     gen_store_gpr(dc, a->rd, dst);
3411     return advance_pc(dc);
3412 }
3413 
3414 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3415 {
3416     TCGv dst, src1, src2;
3417 
3418     if (!avail_64(dc)) {
3419         return false;
3420     }
3421     /* For simplicity, we under-decoded the rs2 form. */
3422     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3423         return false;
3424     }
3425 
3426     if (unlikely(a->rs2_or_imm == 0)) {
3427         gen_exception(dc, TT_DIV_ZERO);
3428         return true;
3429     }
3430 
3431     dst = gen_dest_gpr(dc, a->rd);
3432     src1 = gen_load_gpr(dc, a->rs1);
3433 
3434     if (a->imm) {
3435         if (unlikely(a->rs2_or_imm == -1)) {
3436             tcg_gen_neg_tl(dst, src1);
3437             gen_store_gpr(dc, a->rd, dst);
3438             return advance_pc(dc);
3439         }
3440         src2 = tcg_constant_tl(a->rs2_or_imm);
3441     } else {
3442         TCGLabel *lab;
3443         TCGv t1, t2;
3444 
3445         finishing_insn(dc);
3446         flush_cond(dc);
3447 
3448         lab = delay_exception(dc, TT_DIV_ZERO);
3449         src2 = cpu_regs[a->rs2_or_imm];
3450         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3451 
3452         /*
3453          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3454          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3455          */
3456         t1 = tcg_temp_new();
3457         t2 = tcg_temp_new();
3458         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3459         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3460         tcg_gen_and_tl(t1, t1, t2);
3461         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3462                            tcg_constant_tl(1), src2);
3463         src2 = t1;
3464     }
3465 
3466     tcg_gen_div_tl(dst, src1, src2);
3467     gen_store_gpr(dc, a->rd, dst);
3468     return advance_pc(dc);
3469 }
3470 
3471 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3472                      int width, bool cc, bool left)
3473 {
3474     TCGv dst, s1, s2, lo1, lo2;
3475     uint64_t amask, tabl, tabr;
3476     int shift, imask, omask;
3477 
3478     dst = gen_dest_gpr(dc, a->rd);
3479     s1 = gen_load_gpr(dc, a->rs1);
3480     s2 = gen_load_gpr(dc, a->rs2);
3481 
3482     if (cc) {
3483         gen_op_subcc(cpu_cc_N, s1, s2);
3484     }
3485 
3486     /*
3487      * Theory of operation: there are two tables, left and right (not to
3488      * be confused with the left and right versions of the opcode).  These
3489      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3490      * these tables are loaded into two constants, TABL and TABR below.
3491      * The operation index = (input & imask) << shift calculates the index
3492      * into the constant, while val = (table >> index) & omask calculates
3493      * the value we're looking for.
3494      */
3495     switch (width) {
3496     case 8:
3497         imask = 0x7;
3498         shift = 3;
3499         omask = 0xff;
3500         if (left) {
3501             tabl = 0x80c0e0f0f8fcfeffULL;
3502             tabr = 0xff7f3f1f0f070301ULL;
3503         } else {
3504             tabl = 0x0103070f1f3f7fffULL;
3505             tabr = 0xfffefcf8f0e0c080ULL;
3506         }
3507         break;
3508     case 16:
3509         imask = 0x6;
3510         shift = 1;
3511         omask = 0xf;
3512         if (left) {
3513             tabl = 0x8cef;
3514             tabr = 0xf731;
3515         } else {
3516             tabl = 0x137f;
3517             tabr = 0xfec8;
3518         }
3519         break;
3520     case 32:
3521         imask = 0x4;
3522         shift = 0;
3523         omask = 0x3;
3524         if (left) {
3525             tabl = (2 << 2) | 3;
3526             tabr = (3 << 2) | 1;
3527         } else {
3528             tabl = (1 << 2) | 3;
3529             tabr = (3 << 2) | 2;
3530         }
3531         break;
3532     default:
3533         abort();
3534     }
3535 
3536     lo1 = tcg_temp_new();
3537     lo2 = tcg_temp_new();
3538     tcg_gen_andi_tl(lo1, s1, imask);
3539     tcg_gen_andi_tl(lo2, s2, imask);
3540     tcg_gen_shli_tl(lo1, lo1, shift);
3541     tcg_gen_shli_tl(lo2, lo2, shift);
3542 
3543     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3544     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3545     tcg_gen_andi_tl(lo1, lo1, omask);
3546     tcg_gen_andi_tl(lo2, lo2, omask);
3547 
3548     amask = address_mask_i(dc, -8);
3549     tcg_gen_andi_tl(s1, s1, amask);
3550     tcg_gen_andi_tl(s2, s2, amask);
3551 
3552     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3553     tcg_gen_and_tl(lo2, lo2, lo1);
3554     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3555 
3556     gen_store_gpr(dc, a->rd, dst);
3557     return advance_pc(dc);
3558 }
3559 
3560 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3561 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3562 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3563 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3564 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3565 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3566 
3567 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3568 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3569 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3570 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3571 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3572 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3573 
3574 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3575                    void (*func)(TCGv, TCGv, TCGv))
3576 {
3577     TCGv dst = gen_dest_gpr(dc, a->rd);
3578     TCGv src1 = gen_load_gpr(dc, a->rs1);
3579     TCGv src2 = gen_load_gpr(dc, a->rs2);
3580 
3581     func(dst, src1, src2);
3582     gen_store_gpr(dc, a->rd, dst);
3583     return advance_pc(dc);
3584 }
3585 
3586 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3587 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3588 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3589 
3590 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3591 {
3592 #ifdef TARGET_SPARC64
3593     TCGv tmp = tcg_temp_new();
3594 
3595     tcg_gen_add_tl(tmp, s1, s2);
3596     tcg_gen_andi_tl(dst, tmp, -8);
3597     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3598 #else
3599     g_assert_not_reached();
3600 #endif
3601 }
3602 
3603 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3604 {
3605 #ifdef TARGET_SPARC64
3606     TCGv tmp = tcg_temp_new();
3607 
3608     tcg_gen_add_tl(tmp, s1, s2);
3609     tcg_gen_andi_tl(dst, tmp, -8);
3610     tcg_gen_neg_tl(tmp, tmp);
3611     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3612 #else
3613     g_assert_not_reached();
3614 #endif
3615 }
3616 
3617 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3618 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3619 
3620 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3621 {
3622 #ifdef TARGET_SPARC64
3623     tcg_gen_add_tl(dst, s1, s2);
3624     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3625 #else
3626     g_assert_not_reached();
3627 #endif
3628 }
3629 
3630 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3631 
3632 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3633 {
3634     TCGv dst, src1, src2;
3635 
3636     /* Reject 64-bit shifts for sparc32. */
3637     if (avail_32(dc) && a->x) {
3638         return false;
3639     }
3640 
3641     src2 = tcg_temp_new();
3642     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3643     src1 = gen_load_gpr(dc, a->rs1);
3644     dst = gen_dest_gpr(dc, a->rd);
3645 
3646     if (l) {
3647         tcg_gen_shl_tl(dst, src1, src2);
3648         if (!a->x) {
3649             tcg_gen_ext32u_tl(dst, dst);
3650         }
3651     } else if (u) {
3652         if (!a->x) {
3653             tcg_gen_ext32u_tl(dst, src1);
3654             src1 = dst;
3655         }
3656         tcg_gen_shr_tl(dst, src1, src2);
3657     } else {
3658         if (!a->x) {
3659             tcg_gen_ext32s_tl(dst, src1);
3660             src1 = dst;
3661         }
3662         tcg_gen_sar_tl(dst, src1, src2);
3663     }
3664     gen_store_gpr(dc, a->rd, dst);
3665     return advance_pc(dc);
3666 }
3667 
3668 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3669 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3670 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3671 
3672 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3673 {
3674     TCGv dst, src1;
3675 
3676     /* Reject 64-bit shifts for sparc32. */
3677     if (avail_32(dc) && (a->x || a->i >= 32)) {
3678         return false;
3679     }
3680 
3681     src1 = gen_load_gpr(dc, a->rs1);
3682     dst = gen_dest_gpr(dc, a->rd);
3683 
3684     if (avail_32(dc) || a->x) {
3685         if (l) {
3686             tcg_gen_shli_tl(dst, src1, a->i);
3687         } else if (u) {
3688             tcg_gen_shri_tl(dst, src1, a->i);
3689         } else {
3690             tcg_gen_sari_tl(dst, src1, a->i);
3691         }
3692     } else {
3693         if (l) {
3694             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3695         } else if (u) {
3696             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3697         } else {
3698             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3699         }
3700     }
3701     gen_store_gpr(dc, a->rd, dst);
3702     return advance_pc(dc);
3703 }
3704 
3705 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3706 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3707 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3708 
3709 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3710 {
3711     /* For simplicity, we under-decoded the rs2 form. */
3712     if (!imm && rs2_or_imm & ~0x1f) {
3713         return NULL;
3714     }
3715     if (imm || rs2_or_imm == 0) {
3716         return tcg_constant_tl(rs2_or_imm);
3717     } else {
3718         return cpu_regs[rs2_or_imm];
3719     }
3720 }
3721 
3722 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3723 {
3724     TCGv dst = gen_load_gpr(dc, rd);
3725     TCGv c2 = tcg_constant_tl(cmp->c2);
3726 
3727     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3728     gen_store_gpr(dc, rd, dst);
3729     return advance_pc(dc);
3730 }
3731 
3732 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3733 {
3734     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3735     DisasCompare cmp;
3736 
3737     if (src2 == NULL) {
3738         return false;
3739     }
3740     gen_compare(&cmp, a->cc, a->cond, dc);
3741     return do_mov_cond(dc, &cmp, a->rd, src2);
3742 }
3743 
3744 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3745 {
3746     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3747     DisasCompare cmp;
3748 
3749     if (src2 == NULL) {
3750         return false;
3751     }
3752     gen_fcompare(&cmp, a->cc, a->cond);
3753     return do_mov_cond(dc, &cmp, a->rd, src2);
3754 }
3755 
3756 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3757 {
3758     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3759     DisasCompare cmp;
3760 
3761     if (src2 == NULL) {
3762         return false;
3763     }
3764     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3765         return false;
3766     }
3767     return do_mov_cond(dc, &cmp, a->rd, src2);
3768 }
3769 
3770 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3771                            bool (*func)(DisasContext *dc, int rd, TCGv src))
3772 {
3773     TCGv src1, sum;
3774 
3775     /* For simplicity, we under-decoded the rs2 form. */
3776     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3777         return false;
3778     }
3779 
3780     /*
3781      * Always load the sum into a new temporary.
3782      * This is required to capture the value across a window change,
3783      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3784      */
3785     sum = tcg_temp_new();
3786     src1 = gen_load_gpr(dc, a->rs1);
3787     if (a->imm || a->rs2_or_imm == 0) {
3788         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3789     } else {
3790         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3791     }
3792     return func(dc, a->rd, sum);
3793 }
3794 
3795 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3796 {
3797     /*
3798      * Preserve pc across advance, so that we can delay
3799      * the writeback to rd until after src is consumed.
3800      */
3801     target_ulong cur_pc = dc->pc;
3802 
3803     gen_check_align(dc, src, 3);
3804 
3805     gen_mov_pc_npc(dc);
3806     tcg_gen_mov_tl(cpu_npc, src);
3807     gen_address_mask(dc, cpu_npc);
3808     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3809 
3810     dc->npc = DYNAMIC_PC_LOOKUP;
3811     return true;
3812 }
3813 
3814 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3815 
3816 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3817 {
3818     if (!supervisor(dc)) {
3819         return raise_priv(dc);
3820     }
3821 
3822     gen_check_align(dc, src, 3);
3823 
3824     gen_mov_pc_npc(dc);
3825     tcg_gen_mov_tl(cpu_npc, src);
3826     gen_helper_rett(tcg_env);
3827 
3828     dc->npc = DYNAMIC_PC;
3829     return true;
3830 }
3831 
3832 TRANS(RETT, 32, do_add_special, a, do_rett)
3833 
3834 static bool do_return(DisasContext *dc, int rd, TCGv src)
3835 {
3836     gen_check_align(dc, src, 3);
3837     gen_helper_restore(tcg_env);
3838 
3839     gen_mov_pc_npc(dc);
3840     tcg_gen_mov_tl(cpu_npc, src);
3841     gen_address_mask(dc, cpu_npc);
3842 
3843     dc->npc = DYNAMIC_PC_LOOKUP;
3844     return true;
3845 }
3846 
3847 TRANS(RETURN, 64, do_add_special, a, do_return)
3848 
3849 static bool do_save(DisasContext *dc, int rd, TCGv src)
3850 {
3851     gen_helper_save(tcg_env);
3852     gen_store_gpr(dc, rd, src);
3853     return advance_pc(dc);
3854 }
3855 
3856 TRANS(SAVE, ALL, do_add_special, a, do_save)
3857 
3858 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3859 {
3860     gen_helper_restore(tcg_env);
3861     gen_store_gpr(dc, rd, src);
3862     return advance_pc(dc);
3863 }
3864 
3865 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3866 
3867 static bool do_done_retry(DisasContext *dc, bool done)
3868 {
3869     if (!supervisor(dc)) {
3870         return raise_priv(dc);
3871     }
3872     dc->npc = DYNAMIC_PC;
3873     dc->pc = DYNAMIC_PC;
3874     translator_io_start(&dc->base);
3875     if (done) {
3876         gen_helper_done(tcg_env);
3877     } else {
3878         gen_helper_retry(tcg_env);
3879     }
3880     return true;
3881 }
3882 
3883 TRANS(DONE, 64, do_done_retry, true)
3884 TRANS(RETRY, 64, do_done_retry, false)
3885 
3886 /*
3887  * Major opcode 11 -- load and store instructions
3888  */
3889 
3890 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3891 {
3892     TCGv addr, tmp = NULL;
3893 
3894     /* For simplicity, we under-decoded the rs2 form. */
3895     if (!imm && rs2_or_imm & ~0x1f) {
3896         return NULL;
3897     }
3898 
3899     addr = gen_load_gpr(dc, rs1);
3900     if (rs2_or_imm) {
3901         tmp = tcg_temp_new();
3902         if (imm) {
3903             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
3904         } else {
3905             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
3906         }
3907         addr = tmp;
3908     }
3909     if (AM_CHECK(dc)) {
3910         if (!tmp) {
3911             tmp = tcg_temp_new();
3912         }
3913         tcg_gen_ext32u_tl(tmp, addr);
3914         addr = tmp;
3915     }
3916     return addr;
3917 }
3918 
3919 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3920 {
3921     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3922     DisasASI da;
3923 
3924     if (addr == NULL) {
3925         return false;
3926     }
3927     da = resolve_asi(dc, a->asi, mop);
3928 
3929     reg = gen_dest_gpr(dc, a->rd);
3930     gen_ld_asi(dc, &da, reg, addr);
3931     gen_store_gpr(dc, a->rd, reg);
3932     return advance_pc(dc);
3933 }
3934 
3935 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
3936 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
3937 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
3938 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
3939 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
3940 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
3941 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
3942 
3943 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3944 {
3945     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3946     DisasASI da;
3947 
3948     if (addr == NULL) {
3949         return false;
3950     }
3951     da = resolve_asi(dc, a->asi, mop);
3952 
3953     reg = gen_load_gpr(dc, a->rd);
3954     gen_st_asi(dc, &da, reg, addr);
3955     return advance_pc(dc);
3956 }
3957 
3958 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
3959 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
3960 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
3961 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
3962 
3963 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
3964 {
3965     TCGv addr;
3966     DisasASI da;
3967 
3968     if (a->rd & 1) {
3969         return false;
3970     }
3971     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3972     if (addr == NULL) {
3973         return false;
3974     }
3975     da = resolve_asi(dc, a->asi, MO_TEUQ);
3976     gen_ldda_asi(dc, &da, addr, a->rd);
3977     return advance_pc(dc);
3978 }
3979 
3980 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
3981 {
3982     TCGv addr;
3983     DisasASI da;
3984 
3985     if (a->rd & 1) {
3986         return false;
3987     }
3988     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3989     if (addr == NULL) {
3990         return false;
3991     }
3992     da = resolve_asi(dc, a->asi, MO_TEUQ);
3993     gen_stda_asi(dc, &da, addr, a->rd);
3994     return advance_pc(dc);
3995 }
3996 
3997 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
3998 {
3999     TCGv addr, reg;
4000     DisasASI da;
4001 
4002     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4003     if (addr == NULL) {
4004         return false;
4005     }
4006     da = resolve_asi(dc, a->asi, MO_UB);
4007 
4008     reg = gen_dest_gpr(dc, a->rd);
4009     gen_ldstub_asi(dc, &da, reg, addr);
4010     gen_store_gpr(dc, a->rd, reg);
4011     return advance_pc(dc);
4012 }
4013 
4014 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4015 {
4016     TCGv addr, dst, src;
4017     DisasASI da;
4018 
4019     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4020     if (addr == NULL) {
4021         return false;
4022     }
4023     da = resolve_asi(dc, a->asi, MO_TEUL);
4024 
4025     dst = gen_dest_gpr(dc, a->rd);
4026     src = gen_load_gpr(dc, a->rd);
4027     gen_swap_asi(dc, &da, dst, src, addr);
4028     gen_store_gpr(dc, a->rd, dst);
4029     return advance_pc(dc);
4030 }
4031 
4032 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4033 {
4034     TCGv addr, o, n, c;
4035     DisasASI da;
4036 
4037     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4038     if (addr == NULL) {
4039         return false;
4040     }
4041     da = resolve_asi(dc, a->asi, mop);
4042 
4043     o = gen_dest_gpr(dc, a->rd);
4044     n = gen_load_gpr(dc, a->rd);
4045     c = gen_load_gpr(dc, a->rs2_or_imm);
4046     gen_cas_asi(dc, &da, o, n, c, addr);
4047     gen_store_gpr(dc, a->rd, o);
4048     return advance_pc(dc);
4049 }
4050 
4051 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4052 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4053 
4054 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4055 {
4056     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4057     DisasASI da;
4058 
4059     if (addr == NULL) {
4060         return false;
4061     }
4062     if (gen_trap_ifnofpu(dc)) {
4063         return true;
4064     }
4065     if (sz == MO_128 && gen_trap_float128(dc)) {
4066         return true;
4067     }
4068     da = resolve_asi(dc, a->asi, MO_TE | sz);
4069     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4070     gen_update_fprs_dirty(dc, a->rd);
4071     return advance_pc(dc);
4072 }
4073 
4074 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4075 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4076 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4077 
4078 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4079 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4080 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4081 
4082 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4083 {
4084     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4085     DisasASI da;
4086 
4087     if (addr == NULL) {
4088         return false;
4089     }
4090     if (gen_trap_ifnofpu(dc)) {
4091         return true;
4092     }
4093     if (sz == MO_128 && gen_trap_float128(dc)) {
4094         return true;
4095     }
4096     da = resolve_asi(dc, a->asi, MO_TE | sz);
4097     gen_stf_asi(dc, &da, sz, addr, a->rd);
4098     return advance_pc(dc);
4099 }
4100 
4101 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4102 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4103 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4104 
4105 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4106 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4107 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4108 
4109 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4110 {
4111     if (!avail_32(dc)) {
4112         return false;
4113     }
4114     if (!supervisor(dc)) {
4115         return raise_priv(dc);
4116     }
4117     if (gen_trap_ifnofpu(dc)) {
4118         return true;
4119     }
4120     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4121     return true;
4122 }
4123 
4124 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4125 {
4126     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4127     TCGv_i32 tmp;
4128 
4129     if (addr == NULL) {
4130         return false;
4131     }
4132     if (gen_trap_ifnofpu(dc)) {
4133         return true;
4134     }
4135 
4136     tmp = tcg_temp_new_i32();
4137     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4138 
4139     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4140     /* LDFSR does not change FCC[1-3]. */
4141 
4142     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4143     return advance_pc(dc);
4144 }
4145 
4146 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4147 {
4148 #ifdef TARGET_SPARC64
4149     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4150     TCGv_i64 t64;
4151     TCGv_i32 lo, hi;
4152 
4153     if (addr == NULL) {
4154         return false;
4155     }
4156     if (gen_trap_ifnofpu(dc)) {
4157         return true;
4158     }
4159 
4160     t64 = tcg_temp_new_i64();
4161     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4162 
4163     lo = tcg_temp_new_i32();
4164     hi = cpu_fcc[3];
4165     tcg_gen_extr_i64_i32(lo, hi, t64);
4166     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4167     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4168     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4169     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4170 
4171     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4172     return advance_pc(dc);
4173 #else
4174     return false;
4175 #endif
4176 }
4177 
4178 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4179 {
4180     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4181     TCGv fsr;
4182 
4183     if (addr == NULL) {
4184         return false;
4185     }
4186     if (gen_trap_ifnofpu(dc)) {
4187         return true;
4188     }
4189 
4190     fsr = tcg_temp_new();
4191     gen_helper_get_fsr(fsr, tcg_env);
4192     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4193     return advance_pc(dc);
4194 }
4195 
4196 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4197 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4198 
4199 static bool do_fc(DisasContext *dc, int rd, bool c)
4200 {
4201     uint64_t mask;
4202 
4203     if (gen_trap_ifnofpu(dc)) {
4204         return true;
4205     }
4206 
4207     if (rd & 1) {
4208         mask = MAKE_64BIT_MASK(0, 32);
4209     } else {
4210         mask = MAKE_64BIT_MASK(32, 32);
4211     }
4212     if (c) {
4213         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4214     } else {
4215         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4216     }
4217     gen_update_fprs_dirty(dc, rd);
4218     return advance_pc(dc);
4219 }
4220 
4221 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4222 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4223 
4224 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4225 {
4226     if (gen_trap_ifnofpu(dc)) {
4227         return true;
4228     }
4229 
4230     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4231     gen_update_fprs_dirty(dc, rd);
4232     return advance_pc(dc);
4233 }
4234 
4235 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4236 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4237 
4238 static bool do_ff(DisasContext *dc, arg_r_r *a,
4239                   void (*func)(TCGv_i32, TCGv_i32))
4240 {
4241     TCGv_i32 tmp;
4242 
4243     if (gen_trap_ifnofpu(dc)) {
4244         return true;
4245     }
4246 
4247     tmp = gen_load_fpr_F(dc, a->rs);
4248     func(tmp, tmp);
4249     gen_store_fpr_F(dc, a->rd, tmp);
4250     return advance_pc(dc);
4251 }
4252 
4253 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4254 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4255 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4256 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4257 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4258 
4259 static bool do_fd(DisasContext *dc, arg_r_r *a,
4260                   void (*func)(TCGv_i32, TCGv_i64))
4261 {
4262     TCGv_i32 dst;
4263     TCGv_i64 src;
4264 
4265     if (gen_trap_ifnofpu(dc)) {
4266         return true;
4267     }
4268 
4269     dst = tcg_temp_new_i32();
4270     src = gen_load_fpr_D(dc, a->rs);
4271     func(dst, src);
4272     gen_store_fpr_F(dc, a->rd, dst);
4273     return advance_pc(dc);
4274 }
4275 
4276 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4277 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4278 
4279 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4280                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4281 {
4282     TCGv_i32 tmp;
4283 
4284     if (gen_trap_ifnofpu(dc)) {
4285         return true;
4286     }
4287 
4288     tmp = gen_load_fpr_F(dc, a->rs);
4289     func(tmp, tcg_env, tmp);
4290     gen_store_fpr_F(dc, a->rd, tmp);
4291     return advance_pc(dc);
4292 }
4293 
4294 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4295 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4296 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4297 
4298 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4299                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4300 {
4301     TCGv_i32 dst;
4302     TCGv_i64 src;
4303 
4304     if (gen_trap_ifnofpu(dc)) {
4305         return true;
4306     }
4307 
4308     dst = tcg_temp_new_i32();
4309     src = gen_load_fpr_D(dc, a->rs);
4310     func(dst, tcg_env, src);
4311     gen_store_fpr_F(dc, a->rd, dst);
4312     return advance_pc(dc);
4313 }
4314 
4315 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4316 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4317 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4318 
4319 static bool do_dd(DisasContext *dc, arg_r_r *a,
4320                   void (*func)(TCGv_i64, TCGv_i64))
4321 {
4322     TCGv_i64 dst, src;
4323 
4324     if (gen_trap_ifnofpu(dc)) {
4325         return true;
4326     }
4327 
4328     dst = gen_dest_fpr_D(dc, a->rd);
4329     src = gen_load_fpr_D(dc, a->rs);
4330     func(dst, src);
4331     gen_store_fpr_D(dc, a->rd, dst);
4332     return advance_pc(dc);
4333 }
4334 
4335 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4336 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4337 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4338 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4339 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4340 
4341 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4342                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4343 {
4344     TCGv_i64 dst, src;
4345 
4346     if (gen_trap_ifnofpu(dc)) {
4347         return true;
4348     }
4349 
4350     dst = gen_dest_fpr_D(dc, a->rd);
4351     src = gen_load_fpr_D(dc, a->rs);
4352     func(dst, tcg_env, src);
4353     gen_store_fpr_D(dc, a->rd, dst);
4354     return advance_pc(dc);
4355 }
4356 
4357 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4358 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4359 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4360 
4361 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4362                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4363 {
4364     TCGv_i64 dst;
4365     TCGv_i32 src;
4366 
4367     if (gen_trap_ifnofpu(dc)) {
4368         return true;
4369     }
4370 
4371     dst = gen_dest_fpr_D(dc, a->rd);
4372     src = gen_load_fpr_F(dc, a->rs);
4373     func(dst, tcg_env, src);
4374     gen_store_fpr_D(dc, a->rd, dst);
4375     return advance_pc(dc);
4376 }
4377 
4378 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4379 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4380 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4381 
4382 static bool do_qq(DisasContext *dc, arg_r_r *a,
4383                   void (*func)(TCGv_i128, TCGv_i128))
4384 {
4385     TCGv_i128 t;
4386 
4387     if (gen_trap_ifnofpu(dc)) {
4388         return true;
4389     }
4390     if (gen_trap_float128(dc)) {
4391         return true;
4392     }
4393 
4394     gen_op_clear_ieee_excp_and_FTT();
4395     t = gen_load_fpr_Q(dc, a->rs);
4396     func(t, t);
4397     gen_store_fpr_Q(dc, a->rd, t);
4398     return advance_pc(dc);
4399 }
4400 
4401 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4402 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4403 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4404 
4405 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4406                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4407 {
4408     TCGv_i128 t;
4409 
4410     if (gen_trap_ifnofpu(dc)) {
4411         return true;
4412     }
4413     if (gen_trap_float128(dc)) {
4414         return true;
4415     }
4416 
4417     t = gen_load_fpr_Q(dc, a->rs);
4418     func(t, tcg_env, t);
4419     gen_store_fpr_Q(dc, a->rd, t);
4420     return advance_pc(dc);
4421 }
4422 
4423 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4424 
4425 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4426                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4427 {
4428     TCGv_i128 src;
4429     TCGv_i32 dst;
4430 
4431     if (gen_trap_ifnofpu(dc)) {
4432         return true;
4433     }
4434     if (gen_trap_float128(dc)) {
4435         return true;
4436     }
4437 
4438     src = gen_load_fpr_Q(dc, a->rs);
4439     dst = tcg_temp_new_i32();
4440     func(dst, tcg_env, src);
4441     gen_store_fpr_F(dc, a->rd, dst);
4442     return advance_pc(dc);
4443 }
4444 
4445 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4446 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4447 
4448 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4449                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4450 {
4451     TCGv_i128 src;
4452     TCGv_i64 dst;
4453 
4454     if (gen_trap_ifnofpu(dc)) {
4455         return true;
4456     }
4457     if (gen_trap_float128(dc)) {
4458         return true;
4459     }
4460 
4461     src = gen_load_fpr_Q(dc, a->rs);
4462     dst = gen_dest_fpr_D(dc, a->rd);
4463     func(dst, tcg_env, src);
4464     gen_store_fpr_D(dc, a->rd, dst);
4465     return advance_pc(dc);
4466 }
4467 
4468 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4469 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4470 
4471 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4472                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4473 {
4474     TCGv_i32 src;
4475     TCGv_i128 dst;
4476 
4477     if (gen_trap_ifnofpu(dc)) {
4478         return true;
4479     }
4480     if (gen_trap_float128(dc)) {
4481         return true;
4482     }
4483 
4484     src = gen_load_fpr_F(dc, a->rs);
4485     dst = tcg_temp_new_i128();
4486     func(dst, tcg_env, src);
4487     gen_store_fpr_Q(dc, a->rd, dst);
4488     return advance_pc(dc);
4489 }
4490 
4491 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4492 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4493 
4494 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4495                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4496 {
4497     TCGv_i64 src;
4498     TCGv_i128 dst;
4499 
4500     if (gen_trap_ifnofpu(dc)) {
4501         return true;
4502     }
4503     if (gen_trap_float128(dc)) {
4504         return true;
4505     }
4506 
4507     src = gen_load_fpr_D(dc, a->rs);
4508     dst = tcg_temp_new_i128();
4509     func(dst, tcg_env, src);
4510     gen_store_fpr_Q(dc, a->rd, dst);
4511     return advance_pc(dc);
4512 }
4513 
4514 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4515 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4516 
4517 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4518                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4519 {
4520     TCGv_i32 src1, src2;
4521 
4522     if (gen_trap_ifnofpu(dc)) {
4523         return true;
4524     }
4525 
4526     src1 = gen_load_fpr_F(dc, a->rs1);
4527     src2 = gen_load_fpr_F(dc, a->rs2);
4528     func(src1, src1, src2);
4529     gen_store_fpr_F(dc, a->rd, src1);
4530     return advance_pc(dc);
4531 }
4532 
4533 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4534 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4535 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4536 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4537 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4538 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4539 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4540 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4541 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4542 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4543 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4544 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4545 
4546 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4547                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4548 {
4549     TCGv_i32 src1, src2;
4550 
4551     if (gen_trap_ifnofpu(dc)) {
4552         return true;
4553     }
4554 
4555     src1 = gen_load_fpr_F(dc, a->rs1);
4556     src2 = gen_load_fpr_F(dc, a->rs2);
4557     func(src1, tcg_env, src1, src2);
4558     gen_store_fpr_F(dc, a->rd, src1);
4559     return advance_pc(dc);
4560 }
4561 
4562 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4563 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4564 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4565 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4566 
4567 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4568                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4569 {
4570     TCGv_i64 dst, src1, src2;
4571 
4572     if (gen_trap_ifnofpu(dc)) {
4573         return true;
4574     }
4575 
4576     dst = gen_dest_fpr_D(dc, a->rd);
4577     src1 = gen_load_fpr_D(dc, a->rs1);
4578     src2 = gen_load_fpr_D(dc, a->rs2);
4579     func(dst, src1, src2);
4580     gen_store_fpr_D(dc, a->rd, dst);
4581     return advance_pc(dc);
4582 }
4583 
4584 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4585 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4586 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4587 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4588 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4589 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4590 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4591 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4592 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4593 
4594 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4595 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4596 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4597 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4598 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4599 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4600 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4601 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4602 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4603 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4604 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4605 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4606 
4607 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4608 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4609 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4610 
4611 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4612                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4613 {
4614     TCGv_i64 src1, src2;
4615     TCGv dst;
4616 
4617     if (gen_trap_ifnofpu(dc)) {
4618         return true;
4619     }
4620 
4621     dst = gen_dest_gpr(dc, a->rd);
4622     src1 = gen_load_fpr_D(dc, a->rs1);
4623     src2 = gen_load_fpr_D(dc, a->rs2);
4624     func(dst, src1, src2);
4625     gen_store_gpr(dc, a->rd, dst);
4626     return advance_pc(dc);
4627 }
4628 
4629 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4630 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4631 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4632 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4633 
4634 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4635 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4636 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4637 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4638 
4639 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4640                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4641 {
4642     TCGv_i64 dst, src1, src2;
4643 
4644     if (gen_trap_ifnofpu(dc)) {
4645         return true;
4646     }
4647 
4648     dst = gen_dest_fpr_D(dc, a->rd);
4649     src1 = gen_load_fpr_D(dc, a->rs1);
4650     src2 = gen_load_fpr_D(dc, a->rs2);
4651     func(dst, tcg_env, src1, src2);
4652     gen_store_fpr_D(dc, a->rd, dst);
4653     return advance_pc(dc);
4654 }
4655 
4656 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4657 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4658 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4659 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4660 
4661 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4662 {
4663     TCGv_i64 dst;
4664     TCGv_i32 src1, src2;
4665 
4666     if (gen_trap_ifnofpu(dc)) {
4667         return true;
4668     }
4669     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4670         return raise_unimpfpop(dc);
4671     }
4672 
4673     dst = gen_dest_fpr_D(dc, a->rd);
4674     src1 = gen_load_fpr_F(dc, a->rs1);
4675     src2 = gen_load_fpr_F(dc, a->rs2);
4676     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4677     gen_store_fpr_D(dc, a->rd, dst);
4678     return advance_pc(dc);
4679 }
4680 
4681 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4682                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4683 {
4684     TCGv_i64 dst, src0, src1, src2;
4685 
4686     if (gen_trap_ifnofpu(dc)) {
4687         return true;
4688     }
4689 
4690     dst  = gen_dest_fpr_D(dc, a->rd);
4691     src0 = gen_load_fpr_D(dc, a->rd);
4692     src1 = gen_load_fpr_D(dc, a->rs1);
4693     src2 = gen_load_fpr_D(dc, a->rs2);
4694     func(dst, src0, src1, src2);
4695     gen_store_fpr_D(dc, a->rd, dst);
4696     return advance_pc(dc);
4697 }
4698 
4699 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4700 
4701 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4702                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4703 {
4704     TCGv_i128 src1, src2;
4705 
4706     if (gen_trap_ifnofpu(dc)) {
4707         return true;
4708     }
4709     if (gen_trap_float128(dc)) {
4710         return true;
4711     }
4712 
4713     src1 = gen_load_fpr_Q(dc, a->rs1);
4714     src2 = gen_load_fpr_Q(dc, a->rs2);
4715     func(src1, tcg_env, src1, src2);
4716     gen_store_fpr_Q(dc, a->rd, src1);
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4721 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4722 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4723 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4724 
4725 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4726 {
4727     TCGv_i64 src1, src2;
4728     TCGv_i128 dst;
4729 
4730     if (gen_trap_ifnofpu(dc)) {
4731         return true;
4732     }
4733     if (gen_trap_float128(dc)) {
4734         return true;
4735     }
4736 
4737     src1 = gen_load_fpr_D(dc, a->rs1);
4738     src2 = gen_load_fpr_D(dc, a->rs2);
4739     dst = tcg_temp_new_i128();
4740     gen_helper_fdmulq(dst, tcg_env, src1, src2);
4741     gen_store_fpr_Q(dc, a->rd, dst);
4742     return advance_pc(dc);
4743 }
4744 
4745 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4746                      void (*func)(DisasContext *, DisasCompare *, int, int))
4747 {
4748     DisasCompare cmp;
4749 
4750     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4751         return false;
4752     }
4753     if (gen_trap_ifnofpu(dc)) {
4754         return true;
4755     }
4756     if (is_128 && gen_trap_float128(dc)) {
4757         return true;
4758     }
4759 
4760     gen_op_clear_ieee_excp_and_FTT();
4761     func(dc, &cmp, a->rd, a->rs2);
4762     return advance_pc(dc);
4763 }
4764 
4765 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4766 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4767 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4768 
4769 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4770                       void (*func)(DisasContext *, DisasCompare *, int, int))
4771 {
4772     DisasCompare cmp;
4773 
4774     if (gen_trap_ifnofpu(dc)) {
4775         return true;
4776     }
4777     if (is_128 && gen_trap_float128(dc)) {
4778         return true;
4779     }
4780 
4781     gen_op_clear_ieee_excp_and_FTT();
4782     gen_compare(&cmp, a->cc, a->cond, dc);
4783     func(dc, &cmp, a->rd, a->rs2);
4784     return advance_pc(dc);
4785 }
4786 
4787 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4788 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4789 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4790 
4791 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4792                        void (*func)(DisasContext *, DisasCompare *, int, int))
4793 {
4794     DisasCompare cmp;
4795 
4796     if (gen_trap_ifnofpu(dc)) {
4797         return true;
4798     }
4799     if (is_128 && gen_trap_float128(dc)) {
4800         return true;
4801     }
4802 
4803     gen_op_clear_ieee_excp_and_FTT();
4804     gen_fcompare(&cmp, a->cc, a->cond);
4805     func(dc, &cmp, a->rd, a->rs2);
4806     return advance_pc(dc);
4807 }
4808 
4809 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4810 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4811 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4812 
4813 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4814 {
4815     TCGv_i32 src1, src2;
4816 
4817     if (avail_32(dc) && a->cc != 0) {
4818         return false;
4819     }
4820     if (gen_trap_ifnofpu(dc)) {
4821         return true;
4822     }
4823 
4824     src1 = gen_load_fpr_F(dc, a->rs1);
4825     src2 = gen_load_fpr_F(dc, a->rs2);
4826     if (e) {
4827         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
4828     } else {
4829         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
4830     }
4831     return advance_pc(dc);
4832 }
4833 
4834 TRANS(FCMPs, ALL, do_fcmps, a, false)
4835 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4836 
4837 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4838 {
4839     TCGv_i64 src1, src2;
4840 
4841     if (avail_32(dc) && a->cc != 0) {
4842         return false;
4843     }
4844     if (gen_trap_ifnofpu(dc)) {
4845         return true;
4846     }
4847 
4848     src1 = gen_load_fpr_D(dc, a->rs1);
4849     src2 = gen_load_fpr_D(dc, a->rs2);
4850     if (e) {
4851         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
4852     } else {
4853         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
4854     }
4855     return advance_pc(dc);
4856 }
4857 
4858 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4859 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4860 
4861 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4862 {
4863     TCGv_i128 src1, src2;
4864 
4865     if (avail_32(dc) && a->cc != 0) {
4866         return false;
4867     }
4868     if (gen_trap_ifnofpu(dc)) {
4869         return true;
4870     }
4871     if (gen_trap_float128(dc)) {
4872         return true;
4873     }
4874 
4875     src1 = gen_load_fpr_Q(dc, a->rs1);
4876     src2 = gen_load_fpr_Q(dc, a->rs2);
4877     if (e) {
4878         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
4879     } else {
4880         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
4881     }
4882     return advance_pc(dc);
4883 }
4884 
4885 TRANS(FCMPq, ALL, do_fcmpq, a, false)
4886 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
4887 
4888 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4889 {
4890     DisasContext *dc = container_of(dcbase, DisasContext, base);
4891     int bound;
4892 
4893     dc->pc = dc->base.pc_first;
4894     dc->npc = (target_ulong)dc->base.tb->cs_base;
4895     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
4896     dc->def = &cpu_env(cs)->def;
4897     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
4898     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
4899 #ifndef CONFIG_USER_ONLY
4900     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
4901 #endif
4902 #ifdef TARGET_SPARC64
4903     dc->fprs_dirty = 0;
4904     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
4905 #ifndef CONFIG_USER_ONLY
4906     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
4907 #endif
4908 #endif
4909     /*
4910      * if we reach a page boundary, we stop generation so that the
4911      * PC of a TT_TFAULT exception is always in the right page
4912      */
4913     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
4914     dc->base.max_insns = MIN(dc->base.max_insns, bound);
4915 }
4916 
4917 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
4918 {
4919 }
4920 
4921 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4922 {
4923     DisasContext *dc = container_of(dcbase, DisasContext, base);
4924     target_ulong npc = dc->npc;
4925 
4926     if (npc & 3) {
4927         switch (npc) {
4928         case JUMP_PC:
4929             assert(dc->jump_pc[1] == dc->pc + 4);
4930             npc = dc->jump_pc[0] | JUMP_PC;
4931             break;
4932         case DYNAMIC_PC:
4933         case DYNAMIC_PC_LOOKUP:
4934             npc = DYNAMIC_PC;
4935             break;
4936         default:
4937             g_assert_not_reached();
4938         }
4939     }
4940     tcg_gen_insn_start(dc->pc, npc);
4941 }
4942 
4943 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4944 {
4945     DisasContext *dc = container_of(dcbase, DisasContext, base);
4946     unsigned int insn;
4947 
4948     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
4949     dc->base.pc_next += 4;
4950 
4951     if (!decode(dc, insn)) {
4952         gen_exception(dc, TT_ILL_INSN);
4953     }
4954 
4955     if (dc->base.is_jmp == DISAS_NORETURN) {
4956         return;
4957     }
4958     if (dc->pc != dc->base.pc_next) {
4959         dc->base.is_jmp = DISAS_TOO_MANY;
4960     }
4961 }
4962 
4963 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4964 {
4965     DisasContext *dc = container_of(dcbase, DisasContext, base);
4966     DisasDelayException *e, *e_next;
4967     bool may_lookup;
4968 
4969     finishing_insn(dc);
4970 
4971     switch (dc->base.is_jmp) {
4972     case DISAS_NEXT:
4973     case DISAS_TOO_MANY:
4974         if (((dc->pc | dc->npc) & 3) == 0) {
4975             /* static PC and NPC: we can use direct chaining */
4976             gen_goto_tb(dc, 0, dc->pc, dc->npc);
4977             break;
4978         }
4979 
4980         may_lookup = true;
4981         if (dc->pc & 3) {
4982             switch (dc->pc) {
4983             case DYNAMIC_PC_LOOKUP:
4984                 break;
4985             case DYNAMIC_PC:
4986                 may_lookup = false;
4987                 break;
4988             default:
4989                 g_assert_not_reached();
4990             }
4991         } else {
4992             tcg_gen_movi_tl(cpu_pc, dc->pc);
4993         }
4994 
4995         if (dc->npc & 3) {
4996             switch (dc->npc) {
4997             case JUMP_PC:
4998                 gen_generic_branch(dc);
4999                 break;
5000             case DYNAMIC_PC:
5001                 may_lookup = false;
5002                 break;
5003             case DYNAMIC_PC_LOOKUP:
5004                 break;
5005             default:
5006                 g_assert_not_reached();
5007             }
5008         } else {
5009             tcg_gen_movi_tl(cpu_npc, dc->npc);
5010         }
5011         if (may_lookup) {
5012             tcg_gen_lookup_and_goto_ptr();
5013         } else {
5014             tcg_gen_exit_tb(NULL, 0);
5015         }
5016         break;
5017 
5018     case DISAS_NORETURN:
5019        break;
5020 
5021     case DISAS_EXIT:
5022         /* Exit TB */
5023         save_state(dc);
5024         tcg_gen_exit_tb(NULL, 0);
5025         break;
5026 
5027     default:
5028         g_assert_not_reached();
5029     }
5030 
5031     for (e = dc->delay_excp_list; e ; e = e_next) {
5032         gen_set_label(e->lab);
5033 
5034         tcg_gen_movi_tl(cpu_pc, e->pc);
5035         if (e->npc % 4 == 0) {
5036             tcg_gen_movi_tl(cpu_npc, e->npc);
5037         }
5038         gen_helper_raise_exception(tcg_env, e->excp);
5039 
5040         e_next = e->next;
5041         g_free(e);
5042     }
5043 }
5044 
5045 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5046                                CPUState *cpu, FILE *logfile)
5047 {
5048     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5049     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5050 }
5051 
5052 static const TranslatorOps sparc_tr_ops = {
5053     .init_disas_context = sparc_tr_init_disas_context,
5054     .tb_start           = sparc_tr_tb_start,
5055     .insn_start         = sparc_tr_insn_start,
5056     .translate_insn     = sparc_tr_translate_insn,
5057     .tb_stop            = sparc_tr_tb_stop,
5058     .disas_log          = sparc_tr_disas_log,
5059 };
5060 
5061 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5062                            vaddr pc, void *host_pc)
5063 {
5064     DisasContext dc = {};
5065 
5066     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5067 }
5068 
5069 void sparc_tcg_init(void)
5070 {
5071     static const char gregnames[32][4] = {
5072         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5073         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5074         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5075         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5076     };
5077     static const char fregnames[32][4] = {
5078         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5079         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5080         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5081         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5082     };
5083 
5084     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5085 #ifdef TARGET_SPARC64
5086         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5087         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5088         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5089         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5090         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5091 #else
5092         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5093 #endif
5094     };
5095 
5096     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5097 #ifdef TARGET_SPARC64
5098         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5099         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5100         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5101 #endif
5102         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5103         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5104         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5105         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5106         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5107         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5108         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5109         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5110         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5111     };
5112 
5113     unsigned int i;
5114 
5115     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5116                                          offsetof(CPUSPARCState, regwptr),
5117                                          "regwptr");
5118 
5119     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5120         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5121     }
5122 
5123     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5124         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5125     }
5126 
5127     cpu_regs[0] = NULL;
5128     for (i = 1; i < 8; ++i) {
5129         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5130                                          offsetof(CPUSPARCState, gregs[i]),
5131                                          gregnames[i]);
5132     }
5133 
5134     for (i = 8; i < 32; ++i) {
5135         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5136                                          (i - 8) * sizeof(target_ulong),
5137                                          gregnames[i]);
5138     }
5139 
5140     for (i = 0; i < TARGET_DPREGS; i++) {
5141         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5142                                             offsetof(CPUSPARCState, fpr[i]),
5143                                             fregnames[i]);
5144     }
5145 }
5146 
5147 void sparc_restore_state_to_opc(CPUState *cs,
5148                                 const TranslationBlock *tb,
5149                                 const uint64_t *data)
5150 {
5151     CPUSPARCState *env = cpu_env(cs);
5152     target_ulong pc = data[0];
5153     target_ulong npc = data[1];
5154 
5155     env->pc = pc;
5156     if (npc == DYNAMIC_PC) {
5157         /* dynamic NPC: already stored */
5158     } else if (npc & JUMP_PC) {
5159         /* jump PC: use 'cond' and the jump targets of the translation */
5160         if (env->cond) {
5161             env->npc = npc & ~3;
5162         } else {
5163             env->npc = pc + 4;
5164         }
5165     } else {
5166         env->npc = npc;
5167     }
5168 }
5169