xref: /qemu/target/sparc/translate.c (revision 4abc8923)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
84 # define MAXTL_MASK                             0
85 #endif
86 
87 /* Dynamic PC, must exit to main loop. */
88 #define DYNAMIC_PC         1
89 /* Dynamic PC, one of two values according to jump_pc[T2]. */
90 #define JUMP_PC            2
91 /* Dynamic PC, may lookup next TB. */
92 #define DYNAMIC_PC_LOOKUP  3
93 
94 #define DISAS_EXIT  DISAS_TARGET_0
95 
96 /* global register indexes */
97 static TCGv_ptr cpu_regwptr;
98 static TCGv cpu_pc, cpu_npc;
99 static TCGv cpu_regs[32];
100 static TCGv cpu_y;
101 static TCGv cpu_tbr;
102 static TCGv cpu_cond;
103 static TCGv cpu_cc_N;
104 static TCGv cpu_cc_V;
105 static TCGv cpu_icc_Z;
106 static TCGv cpu_icc_C;
107 #ifdef TARGET_SPARC64
108 static TCGv cpu_xcc_Z;
109 static TCGv cpu_xcc_C;
110 static TCGv_i32 cpu_fprs;
111 static TCGv cpu_gsr;
112 #else
113 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
114 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
115 #endif
116 
117 #ifdef TARGET_SPARC64
118 #define cpu_cc_Z  cpu_xcc_Z
119 #define cpu_cc_C  cpu_xcc_C
120 #else
121 #define cpu_cc_Z  cpu_icc_Z
122 #define cpu_cc_C  cpu_icc_C
123 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
124 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
125 #endif
126 
127 /* Floating point registers */
128 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
129 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
130 
131 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
132 #ifdef TARGET_SPARC64
133 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
134 # define env64_field_offsetof(X)  env_field_offsetof(X)
135 #else
136 # define env32_field_offsetof(X)  env_field_offsetof(X)
137 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
138 #endif
139 
140 typedef struct DisasCompare {
141     TCGCond cond;
142     TCGv c1;
143     int c2;
144 } DisasCompare;
145 
146 typedef struct DisasDelayException {
147     struct DisasDelayException *next;
148     TCGLabel *lab;
149     TCGv_i32 excp;
150     /* Saved state at parent insn. */
151     target_ulong pc;
152     target_ulong npc;
153 } DisasDelayException;
154 
155 typedef struct DisasContext {
156     DisasContextBase base;
157     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
158     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
159 
160     /* Used when JUMP_PC value is used. */
161     DisasCompare jump;
162     target_ulong jump_pc[2];
163 
164     int mem_idx;
165     bool cpu_cond_live;
166     bool fpu_enabled;
167     bool address_mask_32bit;
168 #ifndef CONFIG_USER_ONLY
169     bool supervisor;
170 #ifdef TARGET_SPARC64
171     bool hypervisor;
172 #endif
173 #endif
174 
175     sparc_def_t *def;
176 #ifdef TARGET_SPARC64
177     int fprs_dirty;
178     int asi;
179 #endif
180     DisasDelayException *delay_excp_list;
181 } DisasContext;
182 
183 // This function uses non-native bit order
184 #define GET_FIELD(X, FROM, TO)                                  \
185     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
186 
187 // This function uses the order in the manuals, i.e. bit 0 is 2^0
188 #define GET_FIELD_SP(X, FROM, TO)               \
189     GET_FIELD(X, 31 - (TO), 31 - (FROM))
190 
191 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
192 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
193 
194 #ifdef TARGET_SPARC64
195 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
196 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
197 #else
198 #define DFPREG(r) (r & 0x1e)
199 #define QFPREG(r) (r & 0x1c)
200 #endif
201 
202 #define UA2005_HTRAP_MASK 0xff
203 #define V8_TRAP_MASK 0x7f
204 
205 #define IS_IMM (insn & (1<<13))
206 
207 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
208 {
209 #if defined(TARGET_SPARC64)
210     int bit = (rd < 32) ? 1 : 2;
211     /* If we know we've already set this bit within the TB,
212        we can avoid setting it again.  */
213     if (!(dc->fprs_dirty & bit)) {
214         dc->fprs_dirty |= bit;
215         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
216     }
217 #endif
218 }
219 
220 /* floating point registers moves */
221 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
222 {
223     TCGv_i32 ret = tcg_temp_new_i32();
224     if (src & 1) {
225         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
226     } else {
227         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
228     }
229     return ret;
230 }
231 
232 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
233 {
234     TCGv_i64 t = tcg_temp_new_i64();
235 
236     tcg_gen_extu_i32_i64(t, v);
237     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
238                         (dst & 1 ? 0 : 32), 32);
239     gen_update_fprs_dirty(dc, dst);
240 }
241 
242 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
243 {
244     src = DFPREG(src);
245     return cpu_fpr[src / 2];
246 }
247 
248 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
249 {
250     dst = DFPREG(dst);
251     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
252     gen_update_fprs_dirty(dc, dst);
253 }
254 
255 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
256 {
257     return cpu_fpr[DFPREG(dst) / 2];
258 }
259 
260 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
261 {
262     TCGv_i128 ret = tcg_temp_new_i128();
263 
264     src = QFPREG(src);
265     tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
266     return ret;
267 }
268 
269 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
270 {
271     dst = DFPREG(dst);
272     tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
273     gen_update_fprs_dirty(dc, dst);
274 }
275 
276 /* moves */
277 #ifdef CONFIG_USER_ONLY
278 #define supervisor(dc) 0
279 #define hypervisor(dc) 0
280 #else
281 #ifdef TARGET_SPARC64
282 #define hypervisor(dc) (dc->hypervisor)
283 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
284 #else
285 #define supervisor(dc) (dc->supervisor)
286 #define hypervisor(dc) 0
287 #endif
288 #endif
289 
290 #if !defined(TARGET_SPARC64)
291 # define AM_CHECK(dc)  false
292 #elif defined(TARGET_ABI32)
293 # define AM_CHECK(dc)  true
294 #elif defined(CONFIG_USER_ONLY)
295 # define AM_CHECK(dc)  false
296 #else
297 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
298 #endif
299 
300 static void gen_address_mask(DisasContext *dc, TCGv addr)
301 {
302     if (AM_CHECK(dc)) {
303         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
304     }
305 }
306 
307 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
308 {
309     return AM_CHECK(dc) ? (uint32_t)addr : addr;
310 }
311 
312 static TCGv gen_load_gpr(DisasContext *dc, int reg)
313 {
314     if (reg > 0) {
315         assert(reg < 32);
316         return cpu_regs[reg];
317     } else {
318         TCGv t = tcg_temp_new();
319         tcg_gen_movi_tl(t, 0);
320         return t;
321     }
322 }
323 
324 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
325 {
326     if (reg > 0) {
327         assert(reg < 32);
328         tcg_gen_mov_tl(cpu_regs[reg], v);
329     }
330 }
331 
332 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
333 {
334     if (reg > 0) {
335         assert(reg < 32);
336         return cpu_regs[reg];
337     } else {
338         return tcg_temp_new();
339     }
340 }
341 
342 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
343 {
344     return translator_use_goto_tb(&s->base, pc) &&
345            translator_use_goto_tb(&s->base, npc);
346 }
347 
348 static void gen_goto_tb(DisasContext *s, int tb_num,
349                         target_ulong pc, target_ulong npc)
350 {
351     if (use_goto_tb(s, pc, npc))  {
352         /* jump to same page: we can use a direct jump */
353         tcg_gen_goto_tb(tb_num);
354         tcg_gen_movi_tl(cpu_pc, pc);
355         tcg_gen_movi_tl(cpu_npc, npc);
356         tcg_gen_exit_tb(s->base.tb, tb_num);
357     } else {
358         /* jump to another page: we can use an indirect jump */
359         tcg_gen_movi_tl(cpu_pc, pc);
360         tcg_gen_movi_tl(cpu_npc, npc);
361         tcg_gen_lookup_and_goto_ptr();
362     }
363 }
364 
365 static TCGv gen_carry32(void)
366 {
367     if (TARGET_LONG_BITS == 64) {
368         TCGv t = tcg_temp_new();
369         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
370         return t;
371     }
372     return cpu_icc_C;
373 }
374 
375 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
376 {
377     TCGv z = tcg_constant_tl(0);
378 
379     if (cin) {
380         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
381         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
382     } else {
383         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
384     }
385     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
386     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
387     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
388     if (TARGET_LONG_BITS == 64) {
389         /*
390          * Carry-in to bit 32 is result ^ src1 ^ src2.
391          * We already have the src xor term in Z, from computation of V.
392          */
393         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
394         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
395     }
396     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
397     tcg_gen_mov_tl(dst, cpu_cc_N);
398 }
399 
400 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
401 {
402     gen_op_addcc_int(dst, src1, src2, NULL);
403 }
404 
405 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
406 {
407     TCGv t = tcg_temp_new();
408 
409     /* Save the tag bits around modification of dst. */
410     tcg_gen_or_tl(t, src1, src2);
411 
412     gen_op_addcc(dst, src1, src2);
413 
414     /* Incorprate tag bits into icc.V */
415     tcg_gen_andi_tl(t, t, 3);
416     tcg_gen_neg_tl(t, t);
417     tcg_gen_ext32u_tl(t, t);
418     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
419 }
420 
421 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
422 {
423     tcg_gen_add_tl(dst, src1, src2);
424     tcg_gen_add_tl(dst, dst, gen_carry32());
425 }
426 
427 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
428 {
429     gen_op_addcc_int(dst, src1, src2, gen_carry32());
430 }
431 
432 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
433 {
434     TCGv z = tcg_constant_tl(0);
435 
436     if (cin) {
437         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
438         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
439     } else {
440         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
441     }
442     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
443     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
444     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
445     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
446 #ifdef TARGET_SPARC64
447     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
448     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
449 #endif
450     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
451     tcg_gen_mov_tl(dst, cpu_cc_N);
452 }
453 
454 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
455 {
456     gen_op_subcc_int(dst, src1, src2, NULL);
457 }
458 
459 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
460 {
461     TCGv t = tcg_temp_new();
462 
463     /* Save the tag bits around modification of dst. */
464     tcg_gen_or_tl(t, src1, src2);
465 
466     gen_op_subcc(dst, src1, src2);
467 
468     /* Incorprate tag bits into icc.V */
469     tcg_gen_andi_tl(t, t, 3);
470     tcg_gen_neg_tl(t, t);
471     tcg_gen_ext32u_tl(t, t);
472     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
473 }
474 
475 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
476 {
477     tcg_gen_sub_tl(dst, src1, src2);
478     tcg_gen_sub_tl(dst, dst, gen_carry32());
479 }
480 
481 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
482 {
483     gen_op_subcc_int(dst, src1, src2, gen_carry32());
484 }
485 
486 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
487 {
488     TCGv zero = tcg_constant_tl(0);
489     TCGv one = tcg_constant_tl(1);
490     TCGv t_src1 = tcg_temp_new();
491     TCGv t_src2 = tcg_temp_new();
492     TCGv t0 = tcg_temp_new();
493 
494     tcg_gen_ext32u_tl(t_src1, src1);
495     tcg_gen_ext32u_tl(t_src2, src2);
496 
497     /*
498      * if (!(env->y & 1))
499      *   src2 = 0;
500      */
501     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
502 
503     /*
504      * b2 = src1 & 1;
505      * y = (b2 << 31) | (y >> 1);
506      */
507     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
508     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
509 
510     // b1 = N ^ V;
511     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
512 
513     /*
514      * src1 = (b1 << 31) | (src1 >> 1)
515      */
516     tcg_gen_andi_tl(t0, t0, 1u << 31);
517     tcg_gen_shri_tl(t_src1, t_src1, 1);
518     tcg_gen_or_tl(t_src1, t_src1, t0);
519 
520     gen_op_addcc(dst, t_src1, t_src2);
521 }
522 
523 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
524 {
525 #if TARGET_LONG_BITS == 32
526     if (sign_ext) {
527         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
528     } else {
529         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
530     }
531 #else
532     TCGv t0 = tcg_temp_new_i64();
533     TCGv t1 = tcg_temp_new_i64();
534 
535     if (sign_ext) {
536         tcg_gen_ext32s_i64(t0, src1);
537         tcg_gen_ext32s_i64(t1, src2);
538     } else {
539         tcg_gen_ext32u_i64(t0, src1);
540         tcg_gen_ext32u_i64(t1, src2);
541     }
542 
543     tcg_gen_mul_i64(dst, t0, t1);
544     tcg_gen_shri_i64(cpu_y, dst, 32);
545 #endif
546 }
547 
548 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
549 {
550     /* zero-extend truncated operands before multiplication */
551     gen_op_multiply(dst, src1, src2, 0);
552 }
553 
554 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
555 {
556     /* sign-extend truncated operands before multiplication */
557     gen_op_multiply(dst, src1, src2, 1);
558 }
559 
560 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
561 {
562 #ifdef TARGET_SPARC64
563     gen_helper_sdiv(dst, tcg_env, src1, src2);
564     tcg_gen_ext32s_tl(dst, dst);
565 #else
566     TCGv_i64 t64 = tcg_temp_new_i64();
567     gen_helper_sdiv(t64, tcg_env, src1, src2);
568     tcg_gen_trunc_i64_tl(dst, t64);
569 #endif
570 }
571 
572 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
573 {
574     TCGv_i64 t64;
575 
576 #ifdef TARGET_SPARC64
577     t64 = cpu_cc_V;
578 #else
579     t64 = tcg_temp_new_i64();
580 #endif
581 
582     gen_helper_udiv(t64, tcg_env, src1, src2);
583 
584 #ifdef TARGET_SPARC64
585     tcg_gen_ext32u_tl(cpu_cc_N, t64);
586     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
587     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
588     tcg_gen_movi_tl(cpu_icc_C, 0);
589 #else
590     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
591 #endif
592     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
593     tcg_gen_movi_tl(cpu_cc_C, 0);
594     tcg_gen_mov_tl(dst, cpu_cc_N);
595 }
596 
597 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
598 {
599     TCGv_i64 t64;
600 
601 #ifdef TARGET_SPARC64
602     t64 = cpu_cc_V;
603 #else
604     t64 = tcg_temp_new_i64();
605 #endif
606 
607     gen_helper_sdiv(t64, tcg_env, src1, src2);
608 
609 #ifdef TARGET_SPARC64
610     tcg_gen_ext32s_tl(cpu_cc_N, t64);
611     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
612     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
613     tcg_gen_movi_tl(cpu_icc_C, 0);
614 #else
615     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
616 #endif
617     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
618     tcg_gen_movi_tl(cpu_cc_C, 0);
619     tcg_gen_mov_tl(dst, cpu_cc_N);
620 }
621 
622 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
623 {
624     gen_helper_taddcctv(dst, tcg_env, src1, src2);
625 }
626 
627 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
628 {
629     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
630 }
631 
632 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
633 {
634     tcg_gen_ctpop_tl(dst, src2);
635 }
636 
637 #ifndef TARGET_SPARC64
638 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
639 {
640     g_assert_not_reached();
641 }
642 #endif
643 
644 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
645 {
646     gen_helper_array8(dst, src1, src2);
647     tcg_gen_shli_tl(dst, dst, 1);
648 }
649 
650 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
651 {
652     gen_helper_array8(dst, src1, src2);
653     tcg_gen_shli_tl(dst, dst, 2);
654 }
655 
656 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
657 {
658 #ifdef TARGET_SPARC64
659     gen_helper_fpack16(dst, cpu_gsr, src);
660 #else
661     g_assert_not_reached();
662 #endif
663 }
664 
665 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
666 {
667 #ifdef TARGET_SPARC64
668     gen_helper_fpackfix(dst, cpu_gsr, src);
669 #else
670     g_assert_not_reached();
671 #endif
672 }
673 
674 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
675 {
676 #ifdef TARGET_SPARC64
677     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
678 #else
679     g_assert_not_reached();
680 #endif
681 }
682 
683 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
684 {
685 #ifdef TARGET_SPARC64
686     TCGv t1, t2, shift;
687 
688     t1 = tcg_temp_new();
689     t2 = tcg_temp_new();
690     shift = tcg_temp_new();
691 
692     tcg_gen_andi_tl(shift, cpu_gsr, 7);
693     tcg_gen_shli_tl(shift, shift, 3);
694     tcg_gen_shl_tl(t1, s1, shift);
695 
696     /*
697      * A shift of 64 does not produce 0 in TCG.  Divide this into a
698      * shift of (up to 63) followed by a constant shift of 1.
699      */
700     tcg_gen_xori_tl(shift, shift, 63);
701     tcg_gen_shr_tl(t2, s2, shift);
702     tcg_gen_shri_tl(t2, t2, 1);
703 
704     tcg_gen_or_tl(dst, t1, t2);
705 #else
706     g_assert_not_reached();
707 #endif
708 }
709 
710 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
711 {
712 #ifdef TARGET_SPARC64
713     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
714 #else
715     g_assert_not_reached();
716 #endif
717 }
718 
719 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
720 {
721     tcg_gen_ext16s_i32(src2, src2);
722     gen_helper_fmul8x16a(dst, src1, src2);
723 }
724 
725 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
726 {
727     tcg_gen_sari_i32(src2, src2, 16);
728     gen_helper_fmul8x16a(dst, src1, src2);
729 }
730 
731 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
732 {
733     TCGv_i32 t0 = tcg_temp_new_i32();
734     TCGv_i32 t1 = tcg_temp_new_i32();
735     TCGv_i32 t2 = tcg_temp_new_i32();
736 
737     tcg_gen_ext8u_i32(t0, src1);
738     tcg_gen_ext16s_i32(t1, src2);
739     tcg_gen_mul_i32(t0, t0, t1);
740 
741     tcg_gen_extract_i32(t1, src1, 16, 8);
742     tcg_gen_sextract_i32(t2, src2, 16, 16);
743     tcg_gen_mul_i32(t1, t1, t2);
744 
745     tcg_gen_concat_i32_i64(dst, t0, t1);
746 }
747 
748 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
749 {
750     TCGv_i32 t0 = tcg_temp_new_i32();
751     TCGv_i32 t1 = tcg_temp_new_i32();
752     TCGv_i32 t2 = tcg_temp_new_i32();
753 
754     /*
755      * The insn description talks about extracting the upper 8 bits
756      * of the signed 16-bit input rs1, performing the multiply, then
757      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
758      * the rs1 input, which avoids the need for two shifts.
759      */
760     tcg_gen_ext16s_i32(t0, src1);
761     tcg_gen_andi_i32(t0, t0, ~0xff);
762     tcg_gen_ext16s_i32(t1, src2);
763     tcg_gen_mul_i32(t0, t0, t1);
764 
765     tcg_gen_sextract_i32(t1, src1, 16, 16);
766     tcg_gen_andi_i32(t1, t1, ~0xff);
767     tcg_gen_sextract_i32(t2, src2, 16, 16);
768     tcg_gen_mul_i32(t1, t1, t2);
769 
770     tcg_gen_concat_i32_i64(dst, t0, t1);
771 }
772 
773 static void finishing_insn(DisasContext *dc)
774 {
775     /*
776      * From here, there is no future path through an unwinding exception.
777      * If the current insn cannot raise an exception, the computation of
778      * cpu_cond may be able to be elided.
779      */
780     if (dc->cpu_cond_live) {
781         tcg_gen_discard_tl(cpu_cond);
782         dc->cpu_cond_live = false;
783     }
784 }
785 
786 static void gen_generic_branch(DisasContext *dc)
787 {
788     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
789     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
790     TCGv c2 = tcg_constant_tl(dc->jump.c2);
791 
792     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
793 }
794 
795 /* call this function before using the condition register as it may
796    have been set for a jump */
797 static void flush_cond(DisasContext *dc)
798 {
799     if (dc->npc == JUMP_PC) {
800         gen_generic_branch(dc);
801         dc->npc = DYNAMIC_PC_LOOKUP;
802     }
803 }
804 
805 static void save_npc(DisasContext *dc)
806 {
807     if (dc->npc & 3) {
808         switch (dc->npc) {
809         case JUMP_PC:
810             gen_generic_branch(dc);
811             dc->npc = DYNAMIC_PC_LOOKUP;
812             break;
813         case DYNAMIC_PC:
814         case DYNAMIC_PC_LOOKUP:
815             break;
816         default:
817             g_assert_not_reached();
818         }
819     } else {
820         tcg_gen_movi_tl(cpu_npc, dc->npc);
821     }
822 }
823 
824 static void save_state(DisasContext *dc)
825 {
826     tcg_gen_movi_tl(cpu_pc, dc->pc);
827     save_npc(dc);
828 }
829 
830 static void gen_exception(DisasContext *dc, int which)
831 {
832     finishing_insn(dc);
833     save_state(dc);
834     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
835     dc->base.is_jmp = DISAS_NORETURN;
836 }
837 
838 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
839 {
840     DisasDelayException *e = g_new0(DisasDelayException, 1);
841 
842     e->next = dc->delay_excp_list;
843     dc->delay_excp_list = e;
844 
845     e->lab = gen_new_label();
846     e->excp = excp;
847     e->pc = dc->pc;
848     /* Caller must have used flush_cond before branch. */
849     assert(e->npc != JUMP_PC);
850     e->npc = dc->npc;
851 
852     return e->lab;
853 }
854 
855 static TCGLabel *delay_exception(DisasContext *dc, int excp)
856 {
857     return delay_exceptionv(dc, tcg_constant_i32(excp));
858 }
859 
860 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
861 {
862     TCGv t = tcg_temp_new();
863     TCGLabel *lab;
864 
865     tcg_gen_andi_tl(t, addr, mask);
866 
867     flush_cond(dc);
868     lab = delay_exception(dc, TT_UNALIGNED);
869     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
870 }
871 
872 static void gen_mov_pc_npc(DisasContext *dc)
873 {
874     finishing_insn(dc);
875 
876     if (dc->npc & 3) {
877         switch (dc->npc) {
878         case JUMP_PC:
879             gen_generic_branch(dc);
880             tcg_gen_mov_tl(cpu_pc, cpu_npc);
881             dc->pc = DYNAMIC_PC_LOOKUP;
882             break;
883         case DYNAMIC_PC:
884         case DYNAMIC_PC_LOOKUP:
885             tcg_gen_mov_tl(cpu_pc, cpu_npc);
886             dc->pc = dc->npc;
887             break;
888         default:
889             g_assert_not_reached();
890         }
891     } else {
892         dc->pc = dc->npc;
893     }
894 }
895 
896 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
897                         DisasContext *dc)
898 {
899     TCGv t1;
900 
901     cmp->c1 = t1 = tcg_temp_new();
902     cmp->c2 = 0;
903 
904     switch (cond & 7) {
905     case 0x0: /* never */
906         cmp->cond = TCG_COND_NEVER;
907         cmp->c1 = tcg_constant_tl(0);
908         break;
909 
910     case 0x1: /* eq: Z */
911         cmp->cond = TCG_COND_EQ;
912         if (TARGET_LONG_BITS == 32 || xcc) {
913             tcg_gen_mov_tl(t1, cpu_cc_Z);
914         } else {
915             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
916         }
917         break;
918 
919     case 0x2: /* le: Z | (N ^ V) */
920         /*
921          * Simplify:
922          *   cc_Z || (N ^ V) < 0        NE
923          *   cc_Z && !((N ^ V) < 0)     EQ
924          *   cc_Z & ~((N ^ V) >> TLB)   EQ
925          */
926         cmp->cond = TCG_COND_EQ;
927         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
928         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
929         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
930         if (TARGET_LONG_BITS == 64 && !xcc) {
931             tcg_gen_ext32u_tl(t1, t1);
932         }
933         break;
934 
935     case 0x3: /* lt: N ^ V */
936         cmp->cond = TCG_COND_LT;
937         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
938         if (TARGET_LONG_BITS == 64 && !xcc) {
939             tcg_gen_ext32s_tl(t1, t1);
940         }
941         break;
942 
943     case 0x4: /* leu: Z | C */
944         /*
945          * Simplify:
946          *   cc_Z == 0 || cc_C != 0     NE
947          *   cc_Z != 0 && cc_C == 0     EQ
948          *   cc_Z & (cc_C ? 0 : -1)     EQ
949          *   cc_Z & (cc_C - 1)          EQ
950          */
951         cmp->cond = TCG_COND_EQ;
952         if (TARGET_LONG_BITS == 32 || xcc) {
953             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
954             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
955         } else {
956             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
957             tcg_gen_subi_tl(t1, t1, 1);
958             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
959             tcg_gen_ext32u_tl(t1, t1);
960         }
961         break;
962 
963     case 0x5: /* ltu: C */
964         cmp->cond = TCG_COND_NE;
965         if (TARGET_LONG_BITS == 32 || xcc) {
966             tcg_gen_mov_tl(t1, cpu_cc_C);
967         } else {
968             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
969         }
970         break;
971 
972     case 0x6: /* neg: N */
973         cmp->cond = TCG_COND_LT;
974         if (TARGET_LONG_BITS == 32 || xcc) {
975             tcg_gen_mov_tl(t1, cpu_cc_N);
976         } else {
977             tcg_gen_ext32s_tl(t1, cpu_cc_N);
978         }
979         break;
980 
981     case 0x7: /* vs: V */
982         cmp->cond = TCG_COND_LT;
983         if (TARGET_LONG_BITS == 32 || xcc) {
984             tcg_gen_mov_tl(t1, cpu_cc_V);
985         } else {
986             tcg_gen_ext32s_tl(t1, cpu_cc_V);
987         }
988         break;
989     }
990     if (cond & 8) {
991         cmp->cond = tcg_invert_cond(cmp->cond);
992     }
993 }
994 
995 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
996 {
997     TCGv_i32 fcc = cpu_fcc[cc];
998     TCGv_i32 c1 = fcc;
999     int c2 = 0;
1000     TCGCond tcond;
1001 
1002     /*
1003      * FCC values:
1004      * 0 =
1005      * 1 <
1006      * 2 >
1007      * 3 unordered
1008      */
1009     switch (cond & 7) {
1010     case 0x0: /* fbn */
1011         tcond = TCG_COND_NEVER;
1012         break;
1013     case 0x1: /* fbne : !0 */
1014         tcond = TCG_COND_NE;
1015         break;
1016     case 0x2: /* fblg : 1 or 2 */
1017         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1018         c1 = tcg_temp_new_i32();
1019         tcg_gen_addi_i32(c1, fcc, -1);
1020         c2 = 1;
1021         tcond = TCG_COND_LEU;
1022         break;
1023     case 0x3: /* fbul : 1 or 3 */
1024         c1 = tcg_temp_new_i32();
1025         tcg_gen_andi_i32(c1, fcc, 1);
1026         tcond = TCG_COND_NE;
1027         break;
1028     case 0x4: /* fbl  : 1 */
1029         c2 = 1;
1030         tcond = TCG_COND_EQ;
1031         break;
1032     case 0x5: /* fbug : 2 or 3 */
1033         c2 = 2;
1034         tcond = TCG_COND_GEU;
1035         break;
1036     case 0x6: /* fbg  : 2 */
1037         c2 = 2;
1038         tcond = TCG_COND_EQ;
1039         break;
1040     case 0x7: /* fbu  : 3 */
1041         c2 = 3;
1042         tcond = TCG_COND_EQ;
1043         break;
1044     }
1045     if (cond & 8) {
1046         tcond = tcg_invert_cond(tcond);
1047     }
1048 
1049     cmp->cond = tcond;
1050     cmp->c2 = c2;
1051     cmp->c1 = tcg_temp_new();
1052     tcg_gen_extu_i32_tl(cmp->c1, c1);
1053 }
1054 
1055 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1056 {
1057     static const TCGCond cond_reg[4] = {
1058         TCG_COND_NEVER,  /* reserved */
1059         TCG_COND_EQ,
1060         TCG_COND_LE,
1061         TCG_COND_LT,
1062     };
1063     TCGCond tcond;
1064 
1065     if ((cond & 3) == 0) {
1066         return false;
1067     }
1068     tcond = cond_reg[cond & 3];
1069     if (cond & 4) {
1070         tcond = tcg_invert_cond(tcond);
1071     }
1072 
1073     cmp->cond = tcond;
1074     cmp->c1 = tcg_temp_new();
1075     cmp->c2 = 0;
1076     tcg_gen_mov_tl(cmp->c1, r_src);
1077     return true;
1078 }
1079 
1080 static void gen_op_clear_ieee_excp_and_FTT(void)
1081 {
1082     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1083                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1084 }
1085 
1086 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1087 {
1088     gen_op_clear_ieee_excp_and_FTT();
1089     tcg_gen_mov_i32(dst, src);
1090 }
1091 
1092 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1093 {
1094     gen_op_clear_ieee_excp_and_FTT();
1095     tcg_gen_xori_i32(dst, src, 1u << 31);
1096 }
1097 
1098 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1099 {
1100     gen_op_clear_ieee_excp_and_FTT();
1101     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1102 }
1103 
1104 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1105 {
1106     gen_op_clear_ieee_excp_and_FTT();
1107     tcg_gen_mov_i64(dst, src);
1108 }
1109 
1110 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1111 {
1112     gen_op_clear_ieee_excp_and_FTT();
1113     tcg_gen_xori_i64(dst, src, 1ull << 63);
1114 }
1115 
1116 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1117 {
1118     gen_op_clear_ieee_excp_and_FTT();
1119     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1120 }
1121 
1122 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1123 {
1124     TCGv_i64 l = tcg_temp_new_i64();
1125     TCGv_i64 h = tcg_temp_new_i64();
1126 
1127     tcg_gen_extr_i128_i64(l, h, src);
1128     tcg_gen_xori_i64(h, h, 1ull << 63);
1129     tcg_gen_concat_i64_i128(dst, l, h);
1130 }
1131 
1132 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1133 {
1134     TCGv_i64 l = tcg_temp_new_i64();
1135     TCGv_i64 h = tcg_temp_new_i64();
1136 
1137     tcg_gen_extr_i128_i64(l, h, src);
1138     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1139     tcg_gen_concat_i64_i128(dst, l, h);
1140 }
1141 
1142 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1143 {
1144     /*
1145      * CEXC is only set when succesfully completing an FPop,
1146      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1147      * Thus we can simply store FTT into this field.
1148      */
1149     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1150                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1151     gen_exception(dc, TT_FP_EXCP);
1152 }
1153 
1154 static int gen_trap_ifnofpu(DisasContext *dc)
1155 {
1156 #if !defined(CONFIG_USER_ONLY)
1157     if (!dc->fpu_enabled) {
1158         gen_exception(dc, TT_NFPU_INSN);
1159         return 1;
1160     }
1161 #endif
1162     return 0;
1163 }
1164 
1165 /* asi moves */
1166 typedef enum {
1167     GET_ASI_HELPER,
1168     GET_ASI_EXCP,
1169     GET_ASI_DIRECT,
1170     GET_ASI_DTWINX,
1171     GET_ASI_CODE,
1172     GET_ASI_BLOCK,
1173     GET_ASI_SHORT,
1174     GET_ASI_BCOPY,
1175     GET_ASI_BFILL,
1176 } ASIType;
1177 
1178 typedef struct {
1179     ASIType type;
1180     int asi;
1181     int mem_idx;
1182     MemOp memop;
1183 } DisasASI;
1184 
1185 /*
1186  * Build DisasASI.
1187  * For asi == -1, treat as non-asi.
1188  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1189  */
1190 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1191 {
1192     ASIType type = GET_ASI_HELPER;
1193     int mem_idx = dc->mem_idx;
1194 
1195     if (asi == -1) {
1196         /* Artificial "non-asi" case. */
1197         type = GET_ASI_DIRECT;
1198         goto done;
1199     }
1200 
1201 #ifndef TARGET_SPARC64
1202     /* Before v9, all asis are immediate and privileged.  */
1203     if (asi < 0) {
1204         gen_exception(dc, TT_ILL_INSN);
1205         type = GET_ASI_EXCP;
1206     } else if (supervisor(dc)
1207                /* Note that LEON accepts ASI_USERDATA in user mode, for
1208                   use with CASA.  Also note that previous versions of
1209                   QEMU allowed (and old versions of gcc emitted) ASI_P
1210                   for LEON, which is incorrect.  */
1211                || (asi == ASI_USERDATA
1212                    && (dc->def->features & CPU_FEATURE_CASA))) {
1213         switch (asi) {
1214         case ASI_USERDATA:    /* User data access */
1215             mem_idx = MMU_USER_IDX;
1216             type = GET_ASI_DIRECT;
1217             break;
1218         case ASI_KERNELDATA:  /* Supervisor data access */
1219             mem_idx = MMU_KERNEL_IDX;
1220             type = GET_ASI_DIRECT;
1221             break;
1222         case ASI_USERTXT:     /* User text access */
1223             mem_idx = MMU_USER_IDX;
1224             type = GET_ASI_CODE;
1225             break;
1226         case ASI_KERNELTXT:   /* Supervisor text access */
1227             mem_idx = MMU_KERNEL_IDX;
1228             type = GET_ASI_CODE;
1229             break;
1230         case ASI_M_BYPASS:    /* MMU passthrough */
1231         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1232             mem_idx = MMU_PHYS_IDX;
1233             type = GET_ASI_DIRECT;
1234             break;
1235         case ASI_M_BCOPY: /* Block copy, sta access */
1236             mem_idx = MMU_KERNEL_IDX;
1237             type = GET_ASI_BCOPY;
1238             break;
1239         case ASI_M_BFILL: /* Block fill, stda access */
1240             mem_idx = MMU_KERNEL_IDX;
1241             type = GET_ASI_BFILL;
1242             break;
1243         }
1244 
1245         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1246          * permissions check in get_physical_address(..).
1247          */
1248         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1249     } else {
1250         gen_exception(dc, TT_PRIV_INSN);
1251         type = GET_ASI_EXCP;
1252     }
1253 #else
1254     if (asi < 0) {
1255         asi = dc->asi;
1256     }
1257     /* With v9, all asis below 0x80 are privileged.  */
1258     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1259        down that bit into DisasContext.  For the moment that's ok,
1260        since the direct implementations below doesn't have any ASIs
1261        in the restricted [0x30, 0x7f] range, and the check will be
1262        done properly in the helper.  */
1263     if (!supervisor(dc) && asi < 0x80) {
1264         gen_exception(dc, TT_PRIV_ACT);
1265         type = GET_ASI_EXCP;
1266     } else {
1267         switch (asi) {
1268         case ASI_REAL:      /* Bypass */
1269         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1270         case ASI_REAL_L:    /* Bypass LE */
1271         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1272         case ASI_TWINX_REAL:   /* Real address, twinx */
1273         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1274         case ASI_QUAD_LDD_PHYS:
1275         case ASI_QUAD_LDD_PHYS_L:
1276             mem_idx = MMU_PHYS_IDX;
1277             break;
1278         case ASI_N:  /* Nucleus */
1279         case ASI_NL: /* Nucleus LE */
1280         case ASI_TWINX_N:
1281         case ASI_TWINX_NL:
1282         case ASI_NUCLEUS_QUAD_LDD:
1283         case ASI_NUCLEUS_QUAD_LDD_L:
1284             if (hypervisor(dc)) {
1285                 mem_idx = MMU_PHYS_IDX;
1286             } else {
1287                 mem_idx = MMU_NUCLEUS_IDX;
1288             }
1289             break;
1290         case ASI_AIUP:  /* As if user primary */
1291         case ASI_AIUPL: /* As if user primary LE */
1292         case ASI_TWINX_AIUP:
1293         case ASI_TWINX_AIUP_L:
1294         case ASI_BLK_AIUP_4V:
1295         case ASI_BLK_AIUP_L_4V:
1296         case ASI_BLK_AIUP:
1297         case ASI_BLK_AIUPL:
1298             mem_idx = MMU_USER_IDX;
1299             break;
1300         case ASI_AIUS:  /* As if user secondary */
1301         case ASI_AIUSL: /* As if user secondary LE */
1302         case ASI_TWINX_AIUS:
1303         case ASI_TWINX_AIUS_L:
1304         case ASI_BLK_AIUS_4V:
1305         case ASI_BLK_AIUS_L_4V:
1306         case ASI_BLK_AIUS:
1307         case ASI_BLK_AIUSL:
1308             mem_idx = MMU_USER_SECONDARY_IDX;
1309             break;
1310         case ASI_S:  /* Secondary */
1311         case ASI_SL: /* Secondary LE */
1312         case ASI_TWINX_S:
1313         case ASI_TWINX_SL:
1314         case ASI_BLK_COMMIT_S:
1315         case ASI_BLK_S:
1316         case ASI_BLK_SL:
1317         case ASI_FL8_S:
1318         case ASI_FL8_SL:
1319         case ASI_FL16_S:
1320         case ASI_FL16_SL:
1321             if (mem_idx == MMU_USER_IDX) {
1322                 mem_idx = MMU_USER_SECONDARY_IDX;
1323             } else if (mem_idx == MMU_KERNEL_IDX) {
1324                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1325             }
1326             break;
1327         case ASI_P:  /* Primary */
1328         case ASI_PL: /* Primary LE */
1329         case ASI_TWINX_P:
1330         case ASI_TWINX_PL:
1331         case ASI_BLK_COMMIT_P:
1332         case ASI_BLK_P:
1333         case ASI_BLK_PL:
1334         case ASI_FL8_P:
1335         case ASI_FL8_PL:
1336         case ASI_FL16_P:
1337         case ASI_FL16_PL:
1338             break;
1339         }
1340         switch (asi) {
1341         case ASI_REAL:
1342         case ASI_REAL_IO:
1343         case ASI_REAL_L:
1344         case ASI_REAL_IO_L:
1345         case ASI_N:
1346         case ASI_NL:
1347         case ASI_AIUP:
1348         case ASI_AIUPL:
1349         case ASI_AIUS:
1350         case ASI_AIUSL:
1351         case ASI_S:
1352         case ASI_SL:
1353         case ASI_P:
1354         case ASI_PL:
1355             type = GET_ASI_DIRECT;
1356             break;
1357         case ASI_TWINX_REAL:
1358         case ASI_TWINX_REAL_L:
1359         case ASI_TWINX_N:
1360         case ASI_TWINX_NL:
1361         case ASI_TWINX_AIUP:
1362         case ASI_TWINX_AIUP_L:
1363         case ASI_TWINX_AIUS:
1364         case ASI_TWINX_AIUS_L:
1365         case ASI_TWINX_P:
1366         case ASI_TWINX_PL:
1367         case ASI_TWINX_S:
1368         case ASI_TWINX_SL:
1369         case ASI_QUAD_LDD_PHYS:
1370         case ASI_QUAD_LDD_PHYS_L:
1371         case ASI_NUCLEUS_QUAD_LDD:
1372         case ASI_NUCLEUS_QUAD_LDD_L:
1373             type = GET_ASI_DTWINX;
1374             break;
1375         case ASI_BLK_COMMIT_P:
1376         case ASI_BLK_COMMIT_S:
1377         case ASI_BLK_AIUP_4V:
1378         case ASI_BLK_AIUP_L_4V:
1379         case ASI_BLK_AIUP:
1380         case ASI_BLK_AIUPL:
1381         case ASI_BLK_AIUS_4V:
1382         case ASI_BLK_AIUS_L_4V:
1383         case ASI_BLK_AIUS:
1384         case ASI_BLK_AIUSL:
1385         case ASI_BLK_S:
1386         case ASI_BLK_SL:
1387         case ASI_BLK_P:
1388         case ASI_BLK_PL:
1389             type = GET_ASI_BLOCK;
1390             break;
1391         case ASI_FL8_S:
1392         case ASI_FL8_SL:
1393         case ASI_FL8_P:
1394         case ASI_FL8_PL:
1395             memop = MO_UB;
1396             type = GET_ASI_SHORT;
1397             break;
1398         case ASI_FL16_S:
1399         case ASI_FL16_SL:
1400         case ASI_FL16_P:
1401         case ASI_FL16_PL:
1402             memop = MO_TEUW;
1403             type = GET_ASI_SHORT;
1404             break;
1405         }
1406         /* The little-endian asis all have bit 3 set.  */
1407         if (asi & 8) {
1408             memop ^= MO_BSWAP;
1409         }
1410     }
1411 #endif
1412 
1413  done:
1414     return (DisasASI){ type, asi, mem_idx, memop };
1415 }
1416 
1417 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1418 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1419                               TCGv_i32 asi, TCGv_i32 mop)
1420 {
1421     g_assert_not_reached();
1422 }
1423 
1424 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1425                               TCGv_i32 asi, TCGv_i32 mop)
1426 {
1427     g_assert_not_reached();
1428 }
1429 #endif
1430 
1431 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1432 {
1433     switch (da->type) {
1434     case GET_ASI_EXCP:
1435         break;
1436     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1437         gen_exception(dc, TT_ILL_INSN);
1438         break;
1439     case GET_ASI_DIRECT:
1440         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1441         break;
1442 
1443     case GET_ASI_CODE:
1444 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1445         {
1446             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1447             TCGv_i64 t64 = tcg_temp_new_i64();
1448 
1449             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1450             tcg_gen_trunc_i64_tl(dst, t64);
1451         }
1452         break;
1453 #else
1454         g_assert_not_reached();
1455 #endif
1456 
1457     default:
1458         {
1459             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1460             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1461 
1462             save_state(dc);
1463 #ifdef TARGET_SPARC64
1464             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1465 #else
1466             {
1467                 TCGv_i64 t64 = tcg_temp_new_i64();
1468                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1469                 tcg_gen_trunc_i64_tl(dst, t64);
1470             }
1471 #endif
1472         }
1473         break;
1474     }
1475 }
1476 
1477 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1478 {
1479     switch (da->type) {
1480     case GET_ASI_EXCP:
1481         break;
1482 
1483     case GET_ASI_DTWINX: /* Reserved for stda.  */
1484         if (TARGET_LONG_BITS == 32) {
1485             gen_exception(dc, TT_ILL_INSN);
1486             break;
1487         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1488             /* Pre OpenSPARC CPUs don't have these */
1489             gen_exception(dc, TT_ILL_INSN);
1490             break;
1491         }
1492         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1493         /* fall through */
1494 
1495     case GET_ASI_DIRECT:
1496         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1497         break;
1498 
1499     case GET_ASI_BCOPY:
1500         assert(TARGET_LONG_BITS == 32);
1501         /*
1502          * Copy 32 bytes from the address in SRC to ADDR.
1503          *
1504          * From Ross RT625 hyperSPARC manual, section 4.6:
1505          * "Block Copy and Block Fill will work only on cache line boundaries."
1506          *
1507          * It does not specify if an unaliged address is truncated or trapped.
1508          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1509          * is obviously wrong.  The only place I can see this used is in the
1510          * Linux kernel which begins with page alignment, advancing by 32,
1511          * so is always aligned.  Assume truncation as the simpler option.
1512          *
1513          * Since the loads and stores are paired, allow the copy to happen
1514          * in the host endianness.  The copy need not be atomic.
1515          */
1516         {
1517             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1518             TCGv saddr = tcg_temp_new();
1519             TCGv daddr = tcg_temp_new();
1520             TCGv_i128 tmp = tcg_temp_new_i128();
1521 
1522             tcg_gen_andi_tl(saddr, src, -32);
1523             tcg_gen_andi_tl(daddr, addr, -32);
1524             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1525             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1526             tcg_gen_addi_tl(saddr, saddr, 16);
1527             tcg_gen_addi_tl(daddr, daddr, 16);
1528             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1529             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1530         }
1531         break;
1532 
1533     default:
1534         {
1535             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1536             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1537 
1538             save_state(dc);
1539 #ifdef TARGET_SPARC64
1540             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1541 #else
1542             {
1543                 TCGv_i64 t64 = tcg_temp_new_i64();
1544                 tcg_gen_extu_tl_i64(t64, src);
1545                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1546             }
1547 #endif
1548 
1549             /* A write to a TLB register may alter page maps.  End the TB. */
1550             dc->npc = DYNAMIC_PC;
1551         }
1552         break;
1553     }
1554 }
1555 
1556 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1557                          TCGv dst, TCGv src, TCGv addr)
1558 {
1559     switch (da->type) {
1560     case GET_ASI_EXCP:
1561         break;
1562     case GET_ASI_DIRECT:
1563         tcg_gen_atomic_xchg_tl(dst, addr, src,
1564                                da->mem_idx, da->memop | MO_ALIGN);
1565         break;
1566     default:
1567         /* ??? Should be DAE_invalid_asi.  */
1568         gen_exception(dc, TT_DATA_ACCESS);
1569         break;
1570     }
1571 }
1572 
1573 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1574                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1575 {
1576     switch (da->type) {
1577     case GET_ASI_EXCP:
1578         return;
1579     case GET_ASI_DIRECT:
1580         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1581                                   da->mem_idx, da->memop | MO_ALIGN);
1582         break;
1583     default:
1584         /* ??? Should be DAE_invalid_asi.  */
1585         gen_exception(dc, TT_DATA_ACCESS);
1586         break;
1587     }
1588 }
1589 
1590 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1591 {
1592     switch (da->type) {
1593     case GET_ASI_EXCP:
1594         break;
1595     case GET_ASI_DIRECT:
1596         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1597                                da->mem_idx, MO_UB);
1598         break;
1599     default:
1600         /* ??? In theory, this should be raise DAE_invalid_asi.
1601            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1602         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1603             gen_helper_exit_atomic(tcg_env);
1604         } else {
1605             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1606             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1607             TCGv_i64 s64, t64;
1608 
1609             save_state(dc);
1610             t64 = tcg_temp_new_i64();
1611             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1612 
1613             s64 = tcg_constant_i64(0xff);
1614             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1615 
1616             tcg_gen_trunc_i64_tl(dst, t64);
1617 
1618             /* End the TB.  */
1619             dc->npc = DYNAMIC_PC;
1620         }
1621         break;
1622     }
1623 }
1624 
1625 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1626                         TCGv addr, int rd)
1627 {
1628     MemOp memop = da->memop;
1629     MemOp size = memop & MO_SIZE;
1630     TCGv_i32 d32;
1631     TCGv_i64 d64;
1632     TCGv addr_tmp;
1633 
1634     /* TODO: Use 128-bit load/store below. */
1635     if (size == MO_128) {
1636         memop = (memop & ~MO_SIZE) | MO_64;
1637     }
1638 
1639     switch (da->type) {
1640     case GET_ASI_EXCP:
1641         break;
1642 
1643     case GET_ASI_DIRECT:
1644         memop |= MO_ALIGN_4;
1645         switch (size) {
1646         case MO_32:
1647             d32 = tcg_temp_new_i32();
1648             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1649             gen_store_fpr_F(dc, rd, d32);
1650             break;
1651 
1652         case MO_64:
1653             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1654             break;
1655 
1656         case MO_128:
1657             d64 = tcg_temp_new_i64();
1658             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1659             addr_tmp = tcg_temp_new();
1660             tcg_gen_addi_tl(addr_tmp, addr, 8);
1661             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1662             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1663             break;
1664         default:
1665             g_assert_not_reached();
1666         }
1667         break;
1668 
1669     case GET_ASI_BLOCK:
1670         /* Valid for lddfa on aligned registers only.  */
1671         if (orig_size == MO_64 && (rd & 7) == 0) {
1672             /* The first operation checks required alignment.  */
1673             addr_tmp = tcg_temp_new();
1674             for (int i = 0; ; ++i) {
1675                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1676                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1677                 if (i == 7) {
1678                     break;
1679                 }
1680                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1681                 addr = addr_tmp;
1682             }
1683         } else {
1684             gen_exception(dc, TT_ILL_INSN);
1685         }
1686         break;
1687 
1688     case GET_ASI_SHORT:
1689         /* Valid for lddfa only.  */
1690         if (orig_size == MO_64) {
1691             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1692                                 memop | MO_ALIGN);
1693         } else {
1694             gen_exception(dc, TT_ILL_INSN);
1695         }
1696         break;
1697 
1698     default:
1699         {
1700             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1701             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1702 
1703             save_state(dc);
1704             /* According to the table in the UA2011 manual, the only
1705                other asis that are valid for ldfa/lddfa/ldqfa are
1706                the NO_FAULT asis.  We still need a helper for these,
1707                but we can just use the integer asi helper for them.  */
1708             switch (size) {
1709             case MO_32:
1710                 d64 = tcg_temp_new_i64();
1711                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1712                 d32 = tcg_temp_new_i32();
1713                 tcg_gen_extrl_i64_i32(d32, d64);
1714                 gen_store_fpr_F(dc, rd, d32);
1715                 break;
1716             case MO_64:
1717                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1718                                   r_asi, r_mop);
1719                 break;
1720             case MO_128:
1721                 d64 = tcg_temp_new_i64();
1722                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1723                 addr_tmp = tcg_temp_new();
1724                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1725                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1726                                   r_asi, r_mop);
1727                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1728                 break;
1729             default:
1730                 g_assert_not_reached();
1731             }
1732         }
1733         break;
1734     }
1735 }
1736 
1737 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1738                         TCGv addr, int rd)
1739 {
1740     MemOp memop = da->memop;
1741     MemOp size = memop & MO_SIZE;
1742     TCGv_i32 d32;
1743     TCGv addr_tmp;
1744 
1745     /* TODO: Use 128-bit load/store below. */
1746     if (size == MO_128) {
1747         memop = (memop & ~MO_SIZE) | MO_64;
1748     }
1749 
1750     switch (da->type) {
1751     case GET_ASI_EXCP:
1752         break;
1753 
1754     case GET_ASI_DIRECT:
1755         memop |= MO_ALIGN_4;
1756         switch (size) {
1757         case MO_32:
1758             d32 = gen_load_fpr_F(dc, rd);
1759             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1760             break;
1761         case MO_64:
1762             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1763                                 memop | MO_ALIGN_4);
1764             break;
1765         case MO_128:
1766             /* Only 4-byte alignment required.  However, it is legal for the
1767                cpu to signal the alignment fault, and the OS trap handler is
1768                required to fix it up.  Requiring 16-byte alignment here avoids
1769                having to probe the second page before performing the first
1770                write.  */
1771             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1772                                 memop | MO_ALIGN_16);
1773             addr_tmp = tcg_temp_new();
1774             tcg_gen_addi_tl(addr_tmp, addr, 8);
1775             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1776             break;
1777         default:
1778             g_assert_not_reached();
1779         }
1780         break;
1781 
1782     case GET_ASI_BLOCK:
1783         /* Valid for stdfa on aligned registers only.  */
1784         if (orig_size == MO_64 && (rd & 7) == 0) {
1785             /* The first operation checks required alignment.  */
1786             addr_tmp = tcg_temp_new();
1787             for (int i = 0; ; ++i) {
1788                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1789                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1790                 if (i == 7) {
1791                     break;
1792                 }
1793                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1794                 addr = addr_tmp;
1795             }
1796         } else {
1797             gen_exception(dc, TT_ILL_INSN);
1798         }
1799         break;
1800 
1801     case GET_ASI_SHORT:
1802         /* Valid for stdfa only.  */
1803         if (orig_size == MO_64) {
1804             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1805                                 memop | MO_ALIGN);
1806         } else {
1807             gen_exception(dc, TT_ILL_INSN);
1808         }
1809         break;
1810 
1811     default:
1812         /* According to the table in the UA2011 manual, the only
1813            other asis that are valid for ldfa/lddfa/ldqfa are
1814            the PST* asis, which aren't currently handled.  */
1815         gen_exception(dc, TT_ILL_INSN);
1816         break;
1817     }
1818 }
1819 
1820 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1821 {
1822     TCGv hi = gen_dest_gpr(dc, rd);
1823     TCGv lo = gen_dest_gpr(dc, rd + 1);
1824 
1825     switch (da->type) {
1826     case GET_ASI_EXCP:
1827         return;
1828 
1829     case GET_ASI_DTWINX:
1830 #ifdef TARGET_SPARC64
1831         {
1832             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1833             TCGv_i128 t = tcg_temp_new_i128();
1834 
1835             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1836             /*
1837              * Note that LE twinx acts as if each 64-bit register result is
1838              * byte swapped.  We perform one 128-bit LE load, so must swap
1839              * the order of the writebacks.
1840              */
1841             if ((mop & MO_BSWAP) == MO_TE) {
1842                 tcg_gen_extr_i128_i64(lo, hi, t);
1843             } else {
1844                 tcg_gen_extr_i128_i64(hi, lo, t);
1845             }
1846         }
1847         break;
1848 #else
1849         g_assert_not_reached();
1850 #endif
1851 
1852     case GET_ASI_DIRECT:
1853         {
1854             TCGv_i64 tmp = tcg_temp_new_i64();
1855 
1856             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1857 
1858             /* Note that LE ldda acts as if each 32-bit register
1859                result is byte swapped.  Having just performed one
1860                64-bit bswap, we need now to swap the writebacks.  */
1861             if ((da->memop & MO_BSWAP) == MO_TE) {
1862                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1863             } else {
1864                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1865             }
1866         }
1867         break;
1868 
1869     case GET_ASI_CODE:
1870 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1871         {
1872             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1873             TCGv_i64 tmp = tcg_temp_new_i64();
1874 
1875             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
1876 
1877             /* See above.  */
1878             if ((da->memop & MO_BSWAP) == MO_TE) {
1879                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1880             } else {
1881                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1882             }
1883         }
1884         break;
1885 #else
1886         g_assert_not_reached();
1887 #endif
1888 
1889     default:
1890         /* ??? In theory we've handled all of the ASIs that are valid
1891            for ldda, and this should raise DAE_invalid_asi.  However,
1892            real hardware allows others.  This can be seen with e.g.
1893            FreeBSD 10.3 wrt ASI_IC_TAG.  */
1894         {
1895             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1896             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1897             TCGv_i64 tmp = tcg_temp_new_i64();
1898 
1899             save_state(dc);
1900             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1901 
1902             /* See above.  */
1903             if ((da->memop & MO_BSWAP) == MO_TE) {
1904                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1905             } else {
1906                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1907             }
1908         }
1909         break;
1910     }
1911 
1912     gen_store_gpr(dc, rd, hi);
1913     gen_store_gpr(dc, rd + 1, lo);
1914 }
1915 
1916 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1917 {
1918     TCGv hi = gen_load_gpr(dc, rd);
1919     TCGv lo = gen_load_gpr(dc, rd + 1);
1920 
1921     switch (da->type) {
1922     case GET_ASI_EXCP:
1923         break;
1924 
1925     case GET_ASI_DTWINX:
1926 #ifdef TARGET_SPARC64
1927         {
1928             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1929             TCGv_i128 t = tcg_temp_new_i128();
1930 
1931             /*
1932              * Note that LE twinx acts as if each 64-bit register result is
1933              * byte swapped.  We perform one 128-bit LE store, so must swap
1934              * the order of the construction.
1935              */
1936             if ((mop & MO_BSWAP) == MO_TE) {
1937                 tcg_gen_concat_i64_i128(t, lo, hi);
1938             } else {
1939                 tcg_gen_concat_i64_i128(t, hi, lo);
1940             }
1941             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
1942         }
1943         break;
1944 #else
1945         g_assert_not_reached();
1946 #endif
1947 
1948     case GET_ASI_DIRECT:
1949         {
1950             TCGv_i64 t64 = tcg_temp_new_i64();
1951 
1952             /* Note that LE stda acts as if each 32-bit register result is
1953                byte swapped.  We will perform one 64-bit LE store, so now
1954                we must swap the order of the construction.  */
1955             if ((da->memop & MO_BSWAP) == MO_TE) {
1956                 tcg_gen_concat_tl_i64(t64, lo, hi);
1957             } else {
1958                 tcg_gen_concat_tl_i64(t64, hi, lo);
1959             }
1960             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
1961         }
1962         break;
1963 
1964     case GET_ASI_BFILL:
1965         assert(TARGET_LONG_BITS == 32);
1966         /*
1967          * Store 32 bytes of [rd:rd+1] to ADDR.
1968          * See comments for GET_ASI_COPY above.
1969          */
1970         {
1971             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
1972             TCGv_i64 t8 = tcg_temp_new_i64();
1973             TCGv_i128 t16 = tcg_temp_new_i128();
1974             TCGv daddr = tcg_temp_new();
1975 
1976             tcg_gen_concat_tl_i64(t8, lo, hi);
1977             tcg_gen_concat_i64_i128(t16, t8, t8);
1978             tcg_gen_andi_tl(daddr, addr, -32);
1979             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1980             tcg_gen_addi_tl(daddr, daddr, 16);
1981             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1982         }
1983         break;
1984 
1985     default:
1986         /* ??? In theory we've handled all of the ASIs that are valid
1987            for stda, and this should raise DAE_invalid_asi.  */
1988         {
1989             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1990             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1991             TCGv_i64 t64 = tcg_temp_new_i64();
1992 
1993             /* See above.  */
1994             if ((da->memop & MO_BSWAP) == MO_TE) {
1995                 tcg_gen_concat_tl_i64(t64, lo, hi);
1996             } else {
1997                 tcg_gen_concat_tl_i64(t64, hi, lo);
1998             }
1999 
2000             save_state(dc);
2001             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2002         }
2003         break;
2004     }
2005 }
2006 
2007 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2008 {
2009 #ifdef TARGET_SPARC64
2010     TCGv_i32 c32, zero, dst, s1, s2;
2011     TCGv_i64 c64 = tcg_temp_new_i64();
2012 
2013     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2014        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2015        the later.  */
2016     c32 = tcg_temp_new_i32();
2017     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2018     tcg_gen_extrl_i64_i32(c32, c64);
2019 
2020     s1 = gen_load_fpr_F(dc, rs);
2021     s2 = gen_load_fpr_F(dc, rd);
2022     dst = tcg_temp_new_i32();
2023     zero = tcg_constant_i32(0);
2024 
2025     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2026 
2027     gen_store_fpr_F(dc, rd, dst);
2028 #else
2029     qemu_build_not_reached();
2030 #endif
2031 }
2032 
2033 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2034 {
2035 #ifdef TARGET_SPARC64
2036     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2037     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2038                         gen_load_fpr_D(dc, rs),
2039                         gen_load_fpr_D(dc, rd));
2040     gen_store_fpr_D(dc, rd, dst);
2041 #else
2042     qemu_build_not_reached();
2043 #endif
2044 }
2045 
2046 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2047 {
2048 #ifdef TARGET_SPARC64
2049     int qd = QFPREG(rd);
2050     int qs = QFPREG(rs);
2051     TCGv c2 = tcg_constant_tl(cmp->c2);
2052 
2053     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2054                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2055     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2056                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2057 
2058     gen_update_fprs_dirty(dc, qd);
2059 #else
2060     qemu_build_not_reached();
2061 #endif
2062 }
2063 
2064 #ifdef TARGET_SPARC64
2065 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2066 {
2067     TCGv_i32 r_tl = tcg_temp_new_i32();
2068 
2069     /* load env->tl into r_tl */
2070     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2071 
2072     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2073     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2074 
2075     /* calculate offset to current trap state from env->ts, reuse r_tl */
2076     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2077     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2078 
2079     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2080     {
2081         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2082         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2083         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2084     }
2085 }
2086 #endif
2087 
2088 static int extract_dfpreg(DisasContext *dc, int x)
2089 {
2090     return DFPREG(x);
2091 }
2092 
2093 static int extract_qfpreg(DisasContext *dc, int x)
2094 {
2095     return QFPREG(x);
2096 }
2097 
2098 /* Include the auto-generated decoder.  */
2099 #include "decode-insns.c.inc"
2100 
2101 #define TRANS(NAME, AVAIL, FUNC, ...) \
2102     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2103     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2104 
2105 #define avail_ALL(C)      true
2106 #ifdef TARGET_SPARC64
2107 # define avail_32(C)      false
2108 # define avail_ASR17(C)   false
2109 # define avail_CASA(C)    true
2110 # define avail_DIV(C)     true
2111 # define avail_MUL(C)     true
2112 # define avail_POWERDOWN(C) false
2113 # define avail_64(C)      true
2114 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2115 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2116 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2117 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2118 #else
2119 # define avail_32(C)      true
2120 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2121 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2122 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2123 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2124 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2125 # define avail_64(C)      false
2126 # define avail_GL(C)      false
2127 # define avail_HYPV(C)    false
2128 # define avail_VIS1(C)    false
2129 # define avail_VIS2(C)    false
2130 #endif
2131 
2132 /* Default case for non jump instructions. */
2133 static bool advance_pc(DisasContext *dc)
2134 {
2135     TCGLabel *l1;
2136 
2137     finishing_insn(dc);
2138 
2139     if (dc->npc & 3) {
2140         switch (dc->npc) {
2141         case DYNAMIC_PC:
2142         case DYNAMIC_PC_LOOKUP:
2143             dc->pc = dc->npc;
2144             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2145             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2146             break;
2147 
2148         case JUMP_PC:
2149             /* we can do a static jump */
2150             l1 = gen_new_label();
2151             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2152 
2153             /* jump not taken */
2154             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2155 
2156             /* jump taken */
2157             gen_set_label(l1);
2158             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2159 
2160             dc->base.is_jmp = DISAS_NORETURN;
2161             break;
2162 
2163         default:
2164             g_assert_not_reached();
2165         }
2166     } else {
2167         dc->pc = dc->npc;
2168         dc->npc = dc->npc + 4;
2169     }
2170     return true;
2171 }
2172 
2173 /*
2174  * Major opcodes 00 and 01 -- branches, call, and sethi
2175  */
2176 
2177 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2178                               bool annul, int disp)
2179 {
2180     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2181     target_ulong npc;
2182 
2183     finishing_insn(dc);
2184 
2185     if (cmp->cond == TCG_COND_ALWAYS) {
2186         if (annul) {
2187             dc->pc = dest;
2188             dc->npc = dest + 4;
2189         } else {
2190             gen_mov_pc_npc(dc);
2191             dc->npc = dest;
2192         }
2193         return true;
2194     }
2195 
2196     if (cmp->cond == TCG_COND_NEVER) {
2197         npc = dc->npc;
2198         if (npc & 3) {
2199             gen_mov_pc_npc(dc);
2200             if (annul) {
2201                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2202             }
2203             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2204         } else {
2205             dc->pc = npc + (annul ? 4 : 0);
2206             dc->npc = dc->pc + 4;
2207         }
2208         return true;
2209     }
2210 
2211     flush_cond(dc);
2212     npc = dc->npc;
2213 
2214     if (annul) {
2215         TCGLabel *l1 = gen_new_label();
2216 
2217         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2218         gen_goto_tb(dc, 0, npc, dest);
2219         gen_set_label(l1);
2220         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2221 
2222         dc->base.is_jmp = DISAS_NORETURN;
2223     } else {
2224         if (npc & 3) {
2225             switch (npc) {
2226             case DYNAMIC_PC:
2227             case DYNAMIC_PC_LOOKUP:
2228                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2229                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2230                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2231                                    cmp->c1, tcg_constant_tl(cmp->c2),
2232                                    tcg_constant_tl(dest), cpu_npc);
2233                 dc->pc = npc;
2234                 break;
2235             default:
2236                 g_assert_not_reached();
2237             }
2238         } else {
2239             dc->pc = npc;
2240             dc->npc = JUMP_PC;
2241             dc->jump = *cmp;
2242             dc->jump_pc[0] = dest;
2243             dc->jump_pc[1] = npc + 4;
2244 
2245             /* The condition for cpu_cond is always NE -- normalize. */
2246             if (cmp->cond == TCG_COND_NE) {
2247                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2248             } else {
2249                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2250             }
2251             dc->cpu_cond_live = true;
2252         }
2253     }
2254     return true;
2255 }
2256 
2257 static bool raise_priv(DisasContext *dc)
2258 {
2259     gen_exception(dc, TT_PRIV_INSN);
2260     return true;
2261 }
2262 
2263 static bool raise_unimpfpop(DisasContext *dc)
2264 {
2265     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2266     return true;
2267 }
2268 
2269 static bool gen_trap_float128(DisasContext *dc)
2270 {
2271     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2272         return false;
2273     }
2274     return raise_unimpfpop(dc);
2275 }
2276 
2277 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2278 {
2279     DisasCompare cmp;
2280 
2281     gen_compare(&cmp, a->cc, a->cond, dc);
2282     return advance_jump_cond(dc, &cmp, a->a, a->i);
2283 }
2284 
2285 TRANS(Bicc, ALL, do_bpcc, a)
2286 TRANS(BPcc,  64, do_bpcc, a)
2287 
2288 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2289 {
2290     DisasCompare cmp;
2291 
2292     if (gen_trap_ifnofpu(dc)) {
2293         return true;
2294     }
2295     gen_fcompare(&cmp, a->cc, a->cond);
2296     return advance_jump_cond(dc, &cmp, a->a, a->i);
2297 }
2298 
2299 TRANS(FBPfcc,  64, do_fbpfcc, a)
2300 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2301 
2302 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2303 {
2304     DisasCompare cmp;
2305 
2306     if (!avail_64(dc)) {
2307         return false;
2308     }
2309     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2310         return false;
2311     }
2312     return advance_jump_cond(dc, &cmp, a->a, a->i);
2313 }
2314 
2315 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2316 {
2317     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2318 
2319     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2320     gen_mov_pc_npc(dc);
2321     dc->npc = target;
2322     return true;
2323 }
2324 
2325 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2326 {
2327     /*
2328      * For sparc32, always generate the no-coprocessor exception.
2329      * For sparc64, always generate illegal instruction.
2330      */
2331 #ifdef TARGET_SPARC64
2332     return false;
2333 #else
2334     gen_exception(dc, TT_NCP_INSN);
2335     return true;
2336 #endif
2337 }
2338 
2339 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2340 {
2341     /* Special-case %g0 because that's the canonical nop.  */
2342     if (a->rd) {
2343         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2344     }
2345     return advance_pc(dc);
2346 }
2347 
2348 /*
2349  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2350  */
2351 
2352 static bool do_tcc(DisasContext *dc, int cond, int cc,
2353                    int rs1, bool imm, int rs2_or_imm)
2354 {
2355     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2356                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2357     DisasCompare cmp;
2358     TCGLabel *lab;
2359     TCGv_i32 trap;
2360 
2361     /* Trap never.  */
2362     if (cond == 0) {
2363         return advance_pc(dc);
2364     }
2365 
2366     /*
2367      * Immediate traps are the most common case.  Since this value is
2368      * live across the branch, it really pays to evaluate the constant.
2369      */
2370     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2371         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2372     } else {
2373         trap = tcg_temp_new_i32();
2374         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2375         if (imm) {
2376             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2377         } else {
2378             TCGv_i32 t2 = tcg_temp_new_i32();
2379             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2380             tcg_gen_add_i32(trap, trap, t2);
2381         }
2382         tcg_gen_andi_i32(trap, trap, mask);
2383         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2384     }
2385 
2386     finishing_insn(dc);
2387 
2388     /* Trap always.  */
2389     if (cond == 8) {
2390         save_state(dc);
2391         gen_helper_raise_exception(tcg_env, trap);
2392         dc->base.is_jmp = DISAS_NORETURN;
2393         return true;
2394     }
2395 
2396     /* Conditional trap.  */
2397     flush_cond(dc);
2398     lab = delay_exceptionv(dc, trap);
2399     gen_compare(&cmp, cc, cond, dc);
2400     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2401 
2402     return advance_pc(dc);
2403 }
2404 
2405 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2406 {
2407     if (avail_32(dc) && a->cc) {
2408         return false;
2409     }
2410     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2411 }
2412 
2413 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2414 {
2415     if (avail_64(dc)) {
2416         return false;
2417     }
2418     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2419 }
2420 
2421 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2422 {
2423     if (avail_32(dc)) {
2424         return false;
2425     }
2426     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2427 }
2428 
2429 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2430 {
2431     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2432     return advance_pc(dc);
2433 }
2434 
2435 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2436 {
2437     if (avail_32(dc)) {
2438         return false;
2439     }
2440     if (a->mmask) {
2441         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2442         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2443     }
2444     if (a->cmask) {
2445         /* For #Sync, etc, end the TB to recognize interrupts. */
2446         dc->base.is_jmp = DISAS_EXIT;
2447     }
2448     return advance_pc(dc);
2449 }
2450 
2451 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2452                           TCGv (*func)(DisasContext *, TCGv))
2453 {
2454     if (!priv) {
2455         return raise_priv(dc);
2456     }
2457     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2458     return advance_pc(dc);
2459 }
2460 
2461 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2462 {
2463     return cpu_y;
2464 }
2465 
2466 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2467 {
2468     /*
2469      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2470      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2471      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2472      */
2473     if (avail_64(dc) && a->rs1 != 0) {
2474         return false;
2475     }
2476     return do_rd_special(dc, true, a->rd, do_rdy);
2477 }
2478 
2479 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2480 {
2481     gen_helper_rdasr17(dst, tcg_env);
2482     return dst;
2483 }
2484 
2485 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2486 
2487 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2488 {
2489     gen_helper_rdccr(dst, tcg_env);
2490     return dst;
2491 }
2492 
2493 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2494 
2495 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2496 {
2497 #ifdef TARGET_SPARC64
2498     return tcg_constant_tl(dc->asi);
2499 #else
2500     qemu_build_not_reached();
2501 #endif
2502 }
2503 
2504 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2505 
2506 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2507 {
2508     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2509 
2510     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2511     if (translator_io_start(&dc->base)) {
2512         dc->base.is_jmp = DISAS_EXIT;
2513     }
2514     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2515                               tcg_constant_i32(dc->mem_idx));
2516     return dst;
2517 }
2518 
2519 /* TODO: non-priv access only allowed when enabled. */
2520 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2521 
2522 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2523 {
2524     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2525 }
2526 
2527 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2528 
2529 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2530 {
2531     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2532     return dst;
2533 }
2534 
2535 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2536 
2537 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2538 {
2539     gen_trap_ifnofpu(dc);
2540     return cpu_gsr;
2541 }
2542 
2543 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2544 
2545 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2546 {
2547     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2548     return dst;
2549 }
2550 
2551 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2552 
2553 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2554 {
2555     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2556     return dst;
2557 }
2558 
2559 /* TODO: non-priv access only allowed when enabled. */
2560 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2561 
2562 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2563 {
2564     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2565 
2566     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2567     if (translator_io_start(&dc->base)) {
2568         dc->base.is_jmp = DISAS_EXIT;
2569     }
2570     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2571                               tcg_constant_i32(dc->mem_idx));
2572     return dst;
2573 }
2574 
2575 /* TODO: non-priv access only allowed when enabled. */
2576 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2577 
2578 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2579 {
2580     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2581     return dst;
2582 }
2583 
2584 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2585 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2586 
2587 /*
2588  * UltraSPARC-T1 Strand status.
2589  * HYPV check maybe not enough, UA2005 & UA2007 describe
2590  * this ASR as impl. dep
2591  */
2592 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2593 {
2594     return tcg_constant_tl(1);
2595 }
2596 
2597 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2598 
2599 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2600 {
2601     gen_helper_rdpsr(dst, tcg_env);
2602     return dst;
2603 }
2604 
2605 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2606 
2607 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2608 {
2609     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2610     return dst;
2611 }
2612 
2613 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2614 
2615 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2616 {
2617     TCGv_i32 tl = tcg_temp_new_i32();
2618     TCGv_ptr tp = tcg_temp_new_ptr();
2619 
2620     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2621     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2622     tcg_gen_shli_i32(tl, tl, 3);
2623     tcg_gen_ext_i32_ptr(tp, tl);
2624     tcg_gen_add_ptr(tp, tp, tcg_env);
2625 
2626     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2627     return dst;
2628 }
2629 
2630 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2631 
2632 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2633 {
2634     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2635     return dst;
2636 }
2637 
2638 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2639 
2640 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2641 {
2642     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2643     return dst;
2644 }
2645 
2646 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2647 
2648 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2649 {
2650     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2651     return dst;
2652 }
2653 
2654 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2655 
2656 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2657 {
2658     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2659     return dst;
2660 }
2661 
2662 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2663       do_rdhstick_cmpr)
2664 
2665 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2666 {
2667     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2668     return dst;
2669 }
2670 
2671 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2672 
2673 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2674 {
2675 #ifdef TARGET_SPARC64
2676     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2677 
2678     gen_load_trap_state_at_tl(r_tsptr);
2679     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2680     return dst;
2681 #else
2682     qemu_build_not_reached();
2683 #endif
2684 }
2685 
2686 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2687 
2688 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2689 {
2690 #ifdef TARGET_SPARC64
2691     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2692 
2693     gen_load_trap_state_at_tl(r_tsptr);
2694     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2695     return dst;
2696 #else
2697     qemu_build_not_reached();
2698 #endif
2699 }
2700 
2701 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2702 
2703 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2704 {
2705 #ifdef TARGET_SPARC64
2706     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2707 
2708     gen_load_trap_state_at_tl(r_tsptr);
2709     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2710     return dst;
2711 #else
2712     qemu_build_not_reached();
2713 #endif
2714 }
2715 
2716 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2717 
2718 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2719 {
2720 #ifdef TARGET_SPARC64
2721     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2722 
2723     gen_load_trap_state_at_tl(r_tsptr);
2724     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2725     return dst;
2726 #else
2727     qemu_build_not_reached();
2728 #endif
2729 }
2730 
2731 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2732 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2733 
2734 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2735 {
2736     return cpu_tbr;
2737 }
2738 
2739 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2740 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2741 
2742 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2743 {
2744     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2745     return dst;
2746 }
2747 
2748 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2749 
2750 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2751 {
2752     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2753     return dst;
2754 }
2755 
2756 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2757 
2758 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2759 {
2760     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2761     return dst;
2762 }
2763 
2764 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2765 
2766 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2767 {
2768     gen_helper_rdcwp(dst, tcg_env);
2769     return dst;
2770 }
2771 
2772 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2773 
2774 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2775 {
2776     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2777     return dst;
2778 }
2779 
2780 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2781 
2782 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2783 {
2784     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2785     return dst;
2786 }
2787 
2788 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2789       do_rdcanrestore)
2790 
2791 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2792 {
2793     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2794     return dst;
2795 }
2796 
2797 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2798 
2799 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2800 {
2801     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2802     return dst;
2803 }
2804 
2805 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2806 
2807 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2808 {
2809     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2810     return dst;
2811 }
2812 
2813 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2814 
2815 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2816 {
2817     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2818     return dst;
2819 }
2820 
2821 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2822 
2823 /* UA2005 strand status */
2824 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2825 {
2826     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2827     return dst;
2828 }
2829 
2830 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2831 
2832 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2833 {
2834     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2835     return dst;
2836 }
2837 
2838 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2839 
2840 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2841 {
2842     if (avail_64(dc)) {
2843         gen_helper_flushw(tcg_env);
2844         return advance_pc(dc);
2845     }
2846     return false;
2847 }
2848 
2849 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2850                           void (*func)(DisasContext *, TCGv))
2851 {
2852     TCGv src;
2853 
2854     /* For simplicity, we under-decoded the rs2 form. */
2855     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2856         return false;
2857     }
2858     if (!priv) {
2859         return raise_priv(dc);
2860     }
2861 
2862     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2863         src = tcg_constant_tl(a->rs2_or_imm);
2864     } else {
2865         TCGv src1 = gen_load_gpr(dc, a->rs1);
2866         if (a->rs2_or_imm == 0) {
2867             src = src1;
2868         } else {
2869             src = tcg_temp_new();
2870             if (a->imm) {
2871                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2872             } else {
2873                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2874             }
2875         }
2876     }
2877     func(dc, src);
2878     return advance_pc(dc);
2879 }
2880 
2881 static void do_wry(DisasContext *dc, TCGv src)
2882 {
2883     tcg_gen_ext32u_tl(cpu_y, src);
2884 }
2885 
2886 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2887 
2888 static void do_wrccr(DisasContext *dc, TCGv src)
2889 {
2890     gen_helper_wrccr(tcg_env, src);
2891 }
2892 
2893 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2894 
2895 static void do_wrasi(DisasContext *dc, TCGv src)
2896 {
2897     TCGv tmp = tcg_temp_new();
2898 
2899     tcg_gen_ext8u_tl(tmp, src);
2900     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2901     /* End TB to notice changed ASI. */
2902     dc->base.is_jmp = DISAS_EXIT;
2903 }
2904 
2905 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2906 
2907 static void do_wrfprs(DisasContext *dc, TCGv src)
2908 {
2909 #ifdef TARGET_SPARC64
2910     tcg_gen_trunc_tl_i32(cpu_fprs, src);
2911     dc->fprs_dirty = 0;
2912     dc->base.is_jmp = DISAS_EXIT;
2913 #else
2914     qemu_build_not_reached();
2915 #endif
2916 }
2917 
2918 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2919 
2920 static void do_wrgsr(DisasContext *dc, TCGv src)
2921 {
2922     gen_trap_ifnofpu(dc);
2923     tcg_gen_mov_tl(cpu_gsr, src);
2924 }
2925 
2926 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
2927 
2928 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
2929 {
2930     gen_helper_set_softint(tcg_env, src);
2931 }
2932 
2933 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
2934 
2935 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
2936 {
2937     gen_helper_clear_softint(tcg_env, src);
2938 }
2939 
2940 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
2941 
2942 static void do_wrsoftint(DisasContext *dc, TCGv src)
2943 {
2944     gen_helper_write_softint(tcg_env, src);
2945 }
2946 
2947 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
2948 
2949 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
2950 {
2951     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2952 
2953     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
2954     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2955     translator_io_start(&dc->base);
2956     gen_helper_tick_set_limit(r_tickptr, src);
2957     /* End TB to handle timer interrupt */
2958     dc->base.is_jmp = DISAS_EXIT;
2959 }
2960 
2961 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
2962 
2963 static void do_wrstick(DisasContext *dc, TCGv src)
2964 {
2965 #ifdef TARGET_SPARC64
2966     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2967 
2968     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
2969     translator_io_start(&dc->base);
2970     gen_helper_tick_set_count(r_tickptr, src);
2971     /* End TB to handle timer interrupt */
2972     dc->base.is_jmp = DISAS_EXIT;
2973 #else
2974     qemu_build_not_reached();
2975 #endif
2976 }
2977 
2978 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
2979 
2980 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
2981 {
2982     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2983 
2984     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
2985     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2986     translator_io_start(&dc->base);
2987     gen_helper_tick_set_limit(r_tickptr, src);
2988     /* End TB to handle timer interrupt */
2989     dc->base.is_jmp = DISAS_EXIT;
2990 }
2991 
2992 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
2993 
2994 static void do_wrpowerdown(DisasContext *dc, TCGv src)
2995 {
2996     finishing_insn(dc);
2997     save_state(dc);
2998     gen_helper_power_down(tcg_env);
2999 }
3000 
3001 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3002 
3003 static void do_wrpsr(DisasContext *dc, TCGv src)
3004 {
3005     gen_helper_wrpsr(tcg_env, src);
3006     dc->base.is_jmp = DISAS_EXIT;
3007 }
3008 
3009 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3010 
3011 static void do_wrwim(DisasContext *dc, TCGv src)
3012 {
3013     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3014     TCGv tmp = tcg_temp_new();
3015 
3016     tcg_gen_andi_tl(tmp, src, mask);
3017     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3018 }
3019 
3020 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3021 
3022 static void do_wrtpc(DisasContext *dc, TCGv src)
3023 {
3024 #ifdef TARGET_SPARC64
3025     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3026 
3027     gen_load_trap_state_at_tl(r_tsptr);
3028     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3029 #else
3030     qemu_build_not_reached();
3031 #endif
3032 }
3033 
3034 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3035 
3036 static void do_wrtnpc(DisasContext *dc, TCGv src)
3037 {
3038 #ifdef TARGET_SPARC64
3039     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3040 
3041     gen_load_trap_state_at_tl(r_tsptr);
3042     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3043 #else
3044     qemu_build_not_reached();
3045 #endif
3046 }
3047 
3048 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3049 
3050 static void do_wrtstate(DisasContext *dc, TCGv src)
3051 {
3052 #ifdef TARGET_SPARC64
3053     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3054 
3055     gen_load_trap_state_at_tl(r_tsptr);
3056     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3057 #else
3058     qemu_build_not_reached();
3059 #endif
3060 }
3061 
3062 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3063 
3064 static void do_wrtt(DisasContext *dc, TCGv src)
3065 {
3066 #ifdef TARGET_SPARC64
3067     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3068 
3069     gen_load_trap_state_at_tl(r_tsptr);
3070     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3071 #else
3072     qemu_build_not_reached();
3073 #endif
3074 }
3075 
3076 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3077 
3078 static void do_wrtick(DisasContext *dc, TCGv src)
3079 {
3080     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3081 
3082     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3083     translator_io_start(&dc->base);
3084     gen_helper_tick_set_count(r_tickptr, src);
3085     /* End TB to handle timer interrupt */
3086     dc->base.is_jmp = DISAS_EXIT;
3087 }
3088 
3089 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3090 
3091 static void do_wrtba(DisasContext *dc, TCGv src)
3092 {
3093     tcg_gen_mov_tl(cpu_tbr, src);
3094 }
3095 
3096 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3097 
3098 static void do_wrpstate(DisasContext *dc, TCGv src)
3099 {
3100     save_state(dc);
3101     if (translator_io_start(&dc->base)) {
3102         dc->base.is_jmp = DISAS_EXIT;
3103     }
3104     gen_helper_wrpstate(tcg_env, src);
3105     dc->npc = DYNAMIC_PC;
3106 }
3107 
3108 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3109 
3110 static void do_wrtl(DisasContext *dc, TCGv src)
3111 {
3112     save_state(dc);
3113     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3114     dc->npc = DYNAMIC_PC;
3115 }
3116 
3117 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3118 
3119 static void do_wrpil(DisasContext *dc, TCGv src)
3120 {
3121     if (translator_io_start(&dc->base)) {
3122         dc->base.is_jmp = DISAS_EXIT;
3123     }
3124     gen_helper_wrpil(tcg_env, src);
3125 }
3126 
3127 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3128 
3129 static void do_wrcwp(DisasContext *dc, TCGv src)
3130 {
3131     gen_helper_wrcwp(tcg_env, src);
3132 }
3133 
3134 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3135 
3136 static void do_wrcansave(DisasContext *dc, TCGv src)
3137 {
3138     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3139 }
3140 
3141 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3142 
3143 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3144 {
3145     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3146 }
3147 
3148 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3149 
3150 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3151 {
3152     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3153 }
3154 
3155 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3156 
3157 static void do_wrotherwin(DisasContext *dc, TCGv src)
3158 {
3159     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3160 }
3161 
3162 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3163 
3164 static void do_wrwstate(DisasContext *dc, TCGv src)
3165 {
3166     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3167 }
3168 
3169 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3170 
3171 static void do_wrgl(DisasContext *dc, TCGv src)
3172 {
3173     gen_helper_wrgl(tcg_env, src);
3174 }
3175 
3176 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3177 
3178 /* UA2005 strand status */
3179 static void do_wrssr(DisasContext *dc, TCGv src)
3180 {
3181     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3182 }
3183 
3184 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3185 
3186 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3187 
3188 static void do_wrhpstate(DisasContext *dc, TCGv src)
3189 {
3190     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3191     dc->base.is_jmp = DISAS_EXIT;
3192 }
3193 
3194 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3195 
3196 static void do_wrhtstate(DisasContext *dc, TCGv src)
3197 {
3198     TCGv_i32 tl = tcg_temp_new_i32();
3199     TCGv_ptr tp = tcg_temp_new_ptr();
3200 
3201     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3202     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3203     tcg_gen_shli_i32(tl, tl, 3);
3204     tcg_gen_ext_i32_ptr(tp, tl);
3205     tcg_gen_add_ptr(tp, tp, tcg_env);
3206 
3207     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3208 }
3209 
3210 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3211 
3212 static void do_wrhintp(DisasContext *dc, TCGv src)
3213 {
3214     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3215 }
3216 
3217 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3218 
3219 static void do_wrhtba(DisasContext *dc, TCGv src)
3220 {
3221     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3222 }
3223 
3224 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3225 
3226 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3227 {
3228     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3229 
3230     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3231     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3232     translator_io_start(&dc->base);
3233     gen_helper_tick_set_limit(r_tickptr, src);
3234     /* End TB to handle timer interrupt */
3235     dc->base.is_jmp = DISAS_EXIT;
3236 }
3237 
3238 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3239       do_wrhstick_cmpr)
3240 
3241 static bool do_saved_restored(DisasContext *dc, bool saved)
3242 {
3243     if (!supervisor(dc)) {
3244         return raise_priv(dc);
3245     }
3246     if (saved) {
3247         gen_helper_saved(tcg_env);
3248     } else {
3249         gen_helper_restored(tcg_env);
3250     }
3251     return advance_pc(dc);
3252 }
3253 
3254 TRANS(SAVED, 64, do_saved_restored, true)
3255 TRANS(RESTORED, 64, do_saved_restored, false)
3256 
3257 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3258 {
3259     return advance_pc(dc);
3260 }
3261 
3262 /*
3263  * TODO: Need a feature bit for sparcv8.
3264  * In the meantime, treat all 32-bit cpus like sparcv7.
3265  */
3266 TRANS(NOP_v7, 32, trans_NOP, a)
3267 TRANS(NOP_v9, 64, trans_NOP, a)
3268 
3269 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3270                          void (*func)(TCGv, TCGv, TCGv),
3271                          void (*funci)(TCGv, TCGv, target_long),
3272                          bool logic_cc)
3273 {
3274     TCGv dst, src1;
3275 
3276     /* For simplicity, we under-decoded the rs2 form. */
3277     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3278         return false;
3279     }
3280 
3281     if (logic_cc) {
3282         dst = cpu_cc_N;
3283     } else {
3284         dst = gen_dest_gpr(dc, a->rd);
3285     }
3286     src1 = gen_load_gpr(dc, a->rs1);
3287 
3288     if (a->imm || a->rs2_or_imm == 0) {
3289         if (funci) {
3290             funci(dst, src1, a->rs2_or_imm);
3291         } else {
3292             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3293         }
3294     } else {
3295         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3296     }
3297 
3298     if (logic_cc) {
3299         if (TARGET_LONG_BITS == 64) {
3300             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3301             tcg_gen_movi_tl(cpu_icc_C, 0);
3302         }
3303         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3304         tcg_gen_movi_tl(cpu_cc_C, 0);
3305         tcg_gen_movi_tl(cpu_cc_V, 0);
3306     }
3307 
3308     gen_store_gpr(dc, a->rd, dst);
3309     return advance_pc(dc);
3310 }
3311 
3312 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3313                      void (*func)(TCGv, TCGv, TCGv),
3314                      void (*funci)(TCGv, TCGv, target_long),
3315                      void (*func_cc)(TCGv, TCGv, TCGv))
3316 {
3317     if (a->cc) {
3318         return do_arith_int(dc, a, func_cc, NULL, false);
3319     }
3320     return do_arith_int(dc, a, func, funci, false);
3321 }
3322 
3323 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3324                      void (*func)(TCGv, TCGv, TCGv),
3325                      void (*funci)(TCGv, TCGv, target_long))
3326 {
3327     return do_arith_int(dc, a, func, funci, a->cc);
3328 }
3329 
3330 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3331 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3332 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3333 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3334 
3335 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3336 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3337 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3338 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3339 
3340 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3341 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3342 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3343 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3344 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3345 
3346 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3347 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3348 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3349 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3350 
3351 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3352 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3353 
3354 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3355 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3356 
3357 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3358 {
3359     /* OR with %g0 is the canonical alias for MOV. */
3360     if (!a->cc && a->rs1 == 0) {
3361         if (a->imm || a->rs2_or_imm == 0) {
3362             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3363         } else if (a->rs2_or_imm & ~0x1f) {
3364             /* For simplicity, we under-decoded the rs2 form. */
3365             return false;
3366         } else {
3367             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3368         }
3369         return advance_pc(dc);
3370     }
3371     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3372 }
3373 
3374 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3375 {
3376     TCGv_i64 t1, t2;
3377     TCGv dst;
3378 
3379     if (!avail_DIV(dc)) {
3380         return false;
3381     }
3382     /* For simplicity, we under-decoded the rs2 form. */
3383     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3384         return false;
3385     }
3386 
3387     if (unlikely(a->rs2_or_imm == 0)) {
3388         gen_exception(dc, TT_DIV_ZERO);
3389         return true;
3390     }
3391 
3392     if (a->imm) {
3393         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3394     } else {
3395         TCGLabel *lab;
3396         TCGv_i32 n2;
3397 
3398         finishing_insn(dc);
3399         flush_cond(dc);
3400 
3401         n2 = tcg_temp_new_i32();
3402         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3403 
3404         lab = delay_exception(dc, TT_DIV_ZERO);
3405         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3406 
3407         t2 = tcg_temp_new_i64();
3408 #ifdef TARGET_SPARC64
3409         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3410 #else
3411         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3412 #endif
3413     }
3414 
3415     t1 = tcg_temp_new_i64();
3416     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3417 
3418     tcg_gen_divu_i64(t1, t1, t2);
3419     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3420 
3421     dst = gen_dest_gpr(dc, a->rd);
3422     tcg_gen_trunc_i64_tl(dst, t1);
3423     gen_store_gpr(dc, a->rd, dst);
3424     return advance_pc(dc);
3425 }
3426 
3427 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3428 {
3429     TCGv dst, src1, src2;
3430 
3431     if (!avail_64(dc)) {
3432         return false;
3433     }
3434     /* For simplicity, we under-decoded the rs2 form. */
3435     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3436         return false;
3437     }
3438 
3439     if (unlikely(a->rs2_or_imm == 0)) {
3440         gen_exception(dc, TT_DIV_ZERO);
3441         return true;
3442     }
3443 
3444     if (a->imm) {
3445         src2 = tcg_constant_tl(a->rs2_or_imm);
3446     } else {
3447         TCGLabel *lab;
3448 
3449         finishing_insn(dc);
3450         flush_cond(dc);
3451 
3452         lab = delay_exception(dc, TT_DIV_ZERO);
3453         src2 = cpu_regs[a->rs2_or_imm];
3454         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3455     }
3456 
3457     dst = gen_dest_gpr(dc, a->rd);
3458     src1 = gen_load_gpr(dc, a->rs1);
3459 
3460     tcg_gen_divu_tl(dst, src1, src2);
3461     gen_store_gpr(dc, a->rd, dst);
3462     return advance_pc(dc);
3463 }
3464 
3465 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3466 {
3467     TCGv dst, src1, src2;
3468 
3469     if (!avail_64(dc)) {
3470         return false;
3471     }
3472     /* For simplicity, we under-decoded the rs2 form. */
3473     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3474         return false;
3475     }
3476 
3477     if (unlikely(a->rs2_or_imm == 0)) {
3478         gen_exception(dc, TT_DIV_ZERO);
3479         return true;
3480     }
3481 
3482     dst = gen_dest_gpr(dc, a->rd);
3483     src1 = gen_load_gpr(dc, a->rs1);
3484 
3485     if (a->imm) {
3486         if (unlikely(a->rs2_or_imm == -1)) {
3487             tcg_gen_neg_tl(dst, src1);
3488             gen_store_gpr(dc, a->rd, dst);
3489             return advance_pc(dc);
3490         }
3491         src2 = tcg_constant_tl(a->rs2_or_imm);
3492     } else {
3493         TCGLabel *lab;
3494         TCGv t1, t2;
3495 
3496         finishing_insn(dc);
3497         flush_cond(dc);
3498 
3499         lab = delay_exception(dc, TT_DIV_ZERO);
3500         src2 = cpu_regs[a->rs2_or_imm];
3501         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3502 
3503         /*
3504          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3505          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3506          */
3507         t1 = tcg_temp_new();
3508         t2 = tcg_temp_new();
3509         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3510         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3511         tcg_gen_and_tl(t1, t1, t2);
3512         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3513                            tcg_constant_tl(1), src2);
3514         src2 = t1;
3515     }
3516 
3517     tcg_gen_div_tl(dst, src1, src2);
3518     gen_store_gpr(dc, a->rd, dst);
3519     return advance_pc(dc);
3520 }
3521 
3522 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3523                      int width, bool cc, bool left)
3524 {
3525     TCGv dst, s1, s2, lo1, lo2;
3526     uint64_t amask, tabl, tabr;
3527     int shift, imask, omask;
3528 
3529     dst = gen_dest_gpr(dc, a->rd);
3530     s1 = gen_load_gpr(dc, a->rs1);
3531     s2 = gen_load_gpr(dc, a->rs2);
3532 
3533     if (cc) {
3534         gen_op_subcc(cpu_cc_N, s1, s2);
3535     }
3536 
3537     /*
3538      * Theory of operation: there are two tables, left and right (not to
3539      * be confused with the left and right versions of the opcode).  These
3540      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3541      * these tables are loaded into two constants, TABL and TABR below.
3542      * The operation index = (input & imask) << shift calculates the index
3543      * into the constant, while val = (table >> index) & omask calculates
3544      * the value we're looking for.
3545      */
3546     switch (width) {
3547     case 8:
3548         imask = 0x7;
3549         shift = 3;
3550         omask = 0xff;
3551         if (left) {
3552             tabl = 0x80c0e0f0f8fcfeffULL;
3553             tabr = 0xff7f3f1f0f070301ULL;
3554         } else {
3555             tabl = 0x0103070f1f3f7fffULL;
3556             tabr = 0xfffefcf8f0e0c080ULL;
3557         }
3558         break;
3559     case 16:
3560         imask = 0x6;
3561         shift = 1;
3562         omask = 0xf;
3563         if (left) {
3564             tabl = 0x8cef;
3565             tabr = 0xf731;
3566         } else {
3567             tabl = 0x137f;
3568             tabr = 0xfec8;
3569         }
3570         break;
3571     case 32:
3572         imask = 0x4;
3573         shift = 0;
3574         omask = 0x3;
3575         if (left) {
3576             tabl = (2 << 2) | 3;
3577             tabr = (3 << 2) | 1;
3578         } else {
3579             tabl = (1 << 2) | 3;
3580             tabr = (3 << 2) | 2;
3581         }
3582         break;
3583     default:
3584         abort();
3585     }
3586 
3587     lo1 = tcg_temp_new();
3588     lo2 = tcg_temp_new();
3589     tcg_gen_andi_tl(lo1, s1, imask);
3590     tcg_gen_andi_tl(lo2, s2, imask);
3591     tcg_gen_shli_tl(lo1, lo1, shift);
3592     tcg_gen_shli_tl(lo2, lo2, shift);
3593 
3594     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3595     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3596     tcg_gen_andi_tl(lo1, lo1, omask);
3597     tcg_gen_andi_tl(lo2, lo2, omask);
3598 
3599     amask = address_mask_i(dc, -8);
3600     tcg_gen_andi_tl(s1, s1, amask);
3601     tcg_gen_andi_tl(s2, s2, amask);
3602 
3603     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3604     tcg_gen_and_tl(lo2, lo2, lo1);
3605     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3606 
3607     gen_store_gpr(dc, a->rd, dst);
3608     return advance_pc(dc);
3609 }
3610 
3611 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3612 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3613 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3614 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3615 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3616 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3617 
3618 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3619 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3620 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3621 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3622 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3623 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3624 
3625 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3626                    void (*func)(TCGv, TCGv, TCGv))
3627 {
3628     TCGv dst = gen_dest_gpr(dc, a->rd);
3629     TCGv src1 = gen_load_gpr(dc, a->rs1);
3630     TCGv src2 = gen_load_gpr(dc, a->rs2);
3631 
3632     func(dst, src1, src2);
3633     gen_store_gpr(dc, a->rd, dst);
3634     return advance_pc(dc);
3635 }
3636 
3637 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3638 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3639 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3640 
3641 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3642 {
3643 #ifdef TARGET_SPARC64
3644     TCGv tmp = tcg_temp_new();
3645 
3646     tcg_gen_add_tl(tmp, s1, s2);
3647     tcg_gen_andi_tl(dst, tmp, -8);
3648     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3649 #else
3650     g_assert_not_reached();
3651 #endif
3652 }
3653 
3654 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3655 {
3656 #ifdef TARGET_SPARC64
3657     TCGv tmp = tcg_temp_new();
3658 
3659     tcg_gen_add_tl(tmp, s1, s2);
3660     tcg_gen_andi_tl(dst, tmp, -8);
3661     tcg_gen_neg_tl(tmp, tmp);
3662     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3663 #else
3664     g_assert_not_reached();
3665 #endif
3666 }
3667 
3668 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3669 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3670 
3671 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3672 {
3673 #ifdef TARGET_SPARC64
3674     tcg_gen_add_tl(dst, s1, s2);
3675     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3676 #else
3677     g_assert_not_reached();
3678 #endif
3679 }
3680 
3681 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3682 
3683 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3684 {
3685     TCGv dst, src1, src2;
3686 
3687     /* Reject 64-bit shifts for sparc32. */
3688     if (avail_32(dc) && a->x) {
3689         return false;
3690     }
3691 
3692     src2 = tcg_temp_new();
3693     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3694     src1 = gen_load_gpr(dc, a->rs1);
3695     dst = gen_dest_gpr(dc, a->rd);
3696 
3697     if (l) {
3698         tcg_gen_shl_tl(dst, src1, src2);
3699         if (!a->x) {
3700             tcg_gen_ext32u_tl(dst, dst);
3701         }
3702     } else if (u) {
3703         if (!a->x) {
3704             tcg_gen_ext32u_tl(dst, src1);
3705             src1 = dst;
3706         }
3707         tcg_gen_shr_tl(dst, src1, src2);
3708     } else {
3709         if (!a->x) {
3710             tcg_gen_ext32s_tl(dst, src1);
3711             src1 = dst;
3712         }
3713         tcg_gen_sar_tl(dst, src1, src2);
3714     }
3715     gen_store_gpr(dc, a->rd, dst);
3716     return advance_pc(dc);
3717 }
3718 
3719 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3720 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3721 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3722 
3723 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3724 {
3725     TCGv dst, src1;
3726 
3727     /* Reject 64-bit shifts for sparc32. */
3728     if (avail_32(dc) && (a->x || a->i >= 32)) {
3729         return false;
3730     }
3731 
3732     src1 = gen_load_gpr(dc, a->rs1);
3733     dst = gen_dest_gpr(dc, a->rd);
3734 
3735     if (avail_32(dc) || a->x) {
3736         if (l) {
3737             tcg_gen_shli_tl(dst, src1, a->i);
3738         } else if (u) {
3739             tcg_gen_shri_tl(dst, src1, a->i);
3740         } else {
3741             tcg_gen_sari_tl(dst, src1, a->i);
3742         }
3743     } else {
3744         if (l) {
3745             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3746         } else if (u) {
3747             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3748         } else {
3749             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3750         }
3751     }
3752     gen_store_gpr(dc, a->rd, dst);
3753     return advance_pc(dc);
3754 }
3755 
3756 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3757 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3758 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3759 
3760 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3761 {
3762     /* For simplicity, we under-decoded the rs2 form. */
3763     if (!imm && rs2_or_imm & ~0x1f) {
3764         return NULL;
3765     }
3766     if (imm || rs2_or_imm == 0) {
3767         return tcg_constant_tl(rs2_or_imm);
3768     } else {
3769         return cpu_regs[rs2_or_imm];
3770     }
3771 }
3772 
3773 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3774 {
3775     TCGv dst = gen_load_gpr(dc, rd);
3776     TCGv c2 = tcg_constant_tl(cmp->c2);
3777 
3778     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3779     gen_store_gpr(dc, rd, dst);
3780     return advance_pc(dc);
3781 }
3782 
3783 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3784 {
3785     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3786     DisasCompare cmp;
3787 
3788     if (src2 == NULL) {
3789         return false;
3790     }
3791     gen_compare(&cmp, a->cc, a->cond, dc);
3792     return do_mov_cond(dc, &cmp, a->rd, src2);
3793 }
3794 
3795 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3796 {
3797     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3798     DisasCompare cmp;
3799 
3800     if (src2 == NULL) {
3801         return false;
3802     }
3803     gen_fcompare(&cmp, a->cc, a->cond);
3804     return do_mov_cond(dc, &cmp, a->rd, src2);
3805 }
3806 
3807 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3808 {
3809     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3810     DisasCompare cmp;
3811 
3812     if (src2 == NULL) {
3813         return false;
3814     }
3815     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3816         return false;
3817     }
3818     return do_mov_cond(dc, &cmp, a->rd, src2);
3819 }
3820 
3821 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3822                            bool (*func)(DisasContext *dc, int rd, TCGv src))
3823 {
3824     TCGv src1, sum;
3825 
3826     /* For simplicity, we under-decoded the rs2 form. */
3827     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3828         return false;
3829     }
3830 
3831     /*
3832      * Always load the sum into a new temporary.
3833      * This is required to capture the value across a window change,
3834      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3835      */
3836     sum = tcg_temp_new();
3837     src1 = gen_load_gpr(dc, a->rs1);
3838     if (a->imm || a->rs2_or_imm == 0) {
3839         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3840     } else {
3841         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3842     }
3843     return func(dc, a->rd, sum);
3844 }
3845 
3846 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3847 {
3848     /*
3849      * Preserve pc across advance, so that we can delay
3850      * the writeback to rd until after src is consumed.
3851      */
3852     target_ulong cur_pc = dc->pc;
3853 
3854     gen_check_align(dc, src, 3);
3855 
3856     gen_mov_pc_npc(dc);
3857     tcg_gen_mov_tl(cpu_npc, src);
3858     gen_address_mask(dc, cpu_npc);
3859     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3860 
3861     dc->npc = DYNAMIC_PC_LOOKUP;
3862     return true;
3863 }
3864 
3865 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3866 
3867 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3868 {
3869     if (!supervisor(dc)) {
3870         return raise_priv(dc);
3871     }
3872 
3873     gen_check_align(dc, src, 3);
3874 
3875     gen_mov_pc_npc(dc);
3876     tcg_gen_mov_tl(cpu_npc, src);
3877     gen_helper_rett(tcg_env);
3878 
3879     dc->npc = DYNAMIC_PC;
3880     return true;
3881 }
3882 
3883 TRANS(RETT, 32, do_add_special, a, do_rett)
3884 
3885 static bool do_return(DisasContext *dc, int rd, TCGv src)
3886 {
3887     gen_check_align(dc, src, 3);
3888     gen_helper_restore(tcg_env);
3889 
3890     gen_mov_pc_npc(dc);
3891     tcg_gen_mov_tl(cpu_npc, src);
3892     gen_address_mask(dc, cpu_npc);
3893 
3894     dc->npc = DYNAMIC_PC_LOOKUP;
3895     return true;
3896 }
3897 
3898 TRANS(RETURN, 64, do_add_special, a, do_return)
3899 
3900 static bool do_save(DisasContext *dc, int rd, TCGv src)
3901 {
3902     gen_helper_save(tcg_env);
3903     gen_store_gpr(dc, rd, src);
3904     return advance_pc(dc);
3905 }
3906 
3907 TRANS(SAVE, ALL, do_add_special, a, do_save)
3908 
3909 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3910 {
3911     gen_helper_restore(tcg_env);
3912     gen_store_gpr(dc, rd, src);
3913     return advance_pc(dc);
3914 }
3915 
3916 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3917 
3918 static bool do_done_retry(DisasContext *dc, bool done)
3919 {
3920     if (!supervisor(dc)) {
3921         return raise_priv(dc);
3922     }
3923     dc->npc = DYNAMIC_PC;
3924     dc->pc = DYNAMIC_PC;
3925     translator_io_start(&dc->base);
3926     if (done) {
3927         gen_helper_done(tcg_env);
3928     } else {
3929         gen_helper_retry(tcg_env);
3930     }
3931     return true;
3932 }
3933 
3934 TRANS(DONE, 64, do_done_retry, true)
3935 TRANS(RETRY, 64, do_done_retry, false)
3936 
3937 /*
3938  * Major opcode 11 -- load and store instructions
3939  */
3940 
3941 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3942 {
3943     TCGv addr, tmp = NULL;
3944 
3945     /* For simplicity, we under-decoded the rs2 form. */
3946     if (!imm && rs2_or_imm & ~0x1f) {
3947         return NULL;
3948     }
3949 
3950     addr = gen_load_gpr(dc, rs1);
3951     if (rs2_or_imm) {
3952         tmp = tcg_temp_new();
3953         if (imm) {
3954             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
3955         } else {
3956             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
3957         }
3958         addr = tmp;
3959     }
3960     if (AM_CHECK(dc)) {
3961         if (!tmp) {
3962             tmp = tcg_temp_new();
3963         }
3964         tcg_gen_ext32u_tl(tmp, addr);
3965         addr = tmp;
3966     }
3967     return addr;
3968 }
3969 
3970 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3971 {
3972     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3973     DisasASI da;
3974 
3975     if (addr == NULL) {
3976         return false;
3977     }
3978     da = resolve_asi(dc, a->asi, mop);
3979 
3980     reg = gen_dest_gpr(dc, a->rd);
3981     gen_ld_asi(dc, &da, reg, addr);
3982     gen_store_gpr(dc, a->rd, reg);
3983     return advance_pc(dc);
3984 }
3985 
3986 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
3987 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
3988 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
3989 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
3990 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
3991 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
3992 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
3993 
3994 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3995 {
3996     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3997     DisasASI da;
3998 
3999     if (addr == NULL) {
4000         return false;
4001     }
4002     da = resolve_asi(dc, a->asi, mop);
4003 
4004     reg = gen_load_gpr(dc, a->rd);
4005     gen_st_asi(dc, &da, reg, addr);
4006     return advance_pc(dc);
4007 }
4008 
4009 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4010 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4011 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4012 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4013 
4014 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4015 {
4016     TCGv addr;
4017     DisasASI da;
4018 
4019     if (a->rd & 1) {
4020         return false;
4021     }
4022     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4023     if (addr == NULL) {
4024         return false;
4025     }
4026     da = resolve_asi(dc, a->asi, MO_TEUQ);
4027     gen_ldda_asi(dc, &da, addr, a->rd);
4028     return advance_pc(dc);
4029 }
4030 
4031 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4032 {
4033     TCGv addr;
4034     DisasASI da;
4035 
4036     if (a->rd & 1) {
4037         return false;
4038     }
4039     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4040     if (addr == NULL) {
4041         return false;
4042     }
4043     da = resolve_asi(dc, a->asi, MO_TEUQ);
4044     gen_stda_asi(dc, &da, addr, a->rd);
4045     return advance_pc(dc);
4046 }
4047 
4048 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4049 {
4050     TCGv addr, reg;
4051     DisasASI da;
4052 
4053     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4054     if (addr == NULL) {
4055         return false;
4056     }
4057     da = resolve_asi(dc, a->asi, MO_UB);
4058 
4059     reg = gen_dest_gpr(dc, a->rd);
4060     gen_ldstub_asi(dc, &da, reg, addr);
4061     gen_store_gpr(dc, a->rd, reg);
4062     return advance_pc(dc);
4063 }
4064 
4065 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4066 {
4067     TCGv addr, dst, src;
4068     DisasASI da;
4069 
4070     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4071     if (addr == NULL) {
4072         return false;
4073     }
4074     da = resolve_asi(dc, a->asi, MO_TEUL);
4075 
4076     dst = gen_dest_gpr(dc, a->rd);
4077     src = gen_load_gpr(dc, a->rd);
4078     gen_swap_asi(dc, &da, dst, src, addr);
4079     gen_store_gpr(dc, a->rd, dst);
4080     return advance_pc(dc);
4081 }
4082 
4083 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4084 {
4085     TCGv addr, o, n, c;
4086     DisasASI da;
4087 
4088     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4089     if (addr == NULL) {
4090         return false;
4091     }
4092     da = resolve_asi(dc, a->asi, mop);
4093 
4094     o = gen_dest_gpr(dc, a->rd);
4095     n = gen_load_gpr(dc, a->rd);
4096     c = gen_load_gpr(dc, a->rs2_or_imm);
4097     gen_cas_asi(dc, &da, o, n, c, addr);
4098     gen_store_gpr(dc, a->rd, o);
4099     return advance_pc(dc);
4100 }
4101 
4102 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4103 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4104 
4105 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4106 {
4107     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4108     DisasASI da;
4109 
4110     if (addr == NULL) {
4111         return false;
4112     }
4113     if (gen_trap_ifnofpu(dc)) {
4114         return true;
4115     }
4116     if (sz == MO_128 && gen_trap_float128(dc)) {
4117         return true;
4118     }
4119     da = resolve_asi(dc, a->asi, MO_TE | sz);
4120     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4121     gen_update_fprs_dirty(dc, a->rd);
4122     return advance_pc(dc);
4123 }
4124 
4125 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4126 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4127 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4128 
4129 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4130 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4131 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4132 
4133 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4134 {
4135     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4136     DisasASI da;
4137 
4138     if (addr == NULL) {
4139         return false;
4140     }
4141     if (gen_trap_ifnofpu(dc)) {
4142         return true;
4143     }
4144     if (sz == MO_128 && gen_trap_float128(dc)) {
4145         return true;
4146     }
4147     da = resolve_asi(dc, a->asi, MO_TE | sz);
4148     gen_stf_asi(dc, &da, sz, addr, a->rd);
4149     return advance_pc(dc);
4150 }
4151 
4152 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4153 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4154 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4155 
4156 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4157 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4158 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4159 
4160 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4161 {
4162     if (!avail_32(dc)) {
4163         return false;
4164     }
4165     if (!supervisor(dc)) {
4166         return raise_priv(dc);
4167     }
4168     if (gen_trap_ifnofpu(dc)) {
4169         return true;
4170     }
4171     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4172     return true;
4173 }
4174 
4175 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4176 {
4177     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4178     TCGv_i32 tmp;
4179 
4180     if (addr == NULL) {
4181         return false;
4182     }
4183     if (gen_trap_ifnofpu(dc)) {
4184         return true;
4185     }
4186 
4187     tmp = tcg_temp_new_i32();
4188     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4189 
4190     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4191     /* LDFSR does not change FCC[1-3]. */
4192 
4193     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4194     return advance_pc(dc);
4195 }
4196 
4197 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4198 {
4199 #ifdef TARGET_SPARC64
4200     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4201     TCGv_i64 t64;
4202     TCGv_i32 lo, hi;
4203 
4204     if (addr == NULL) {
4205         return false;
4206     }
4207     if (gen_trap_ifnofpu(dc)) {
4208         return true;
4209     }
4210 
4211     t64 = tcg_temp_new_i64();
4212     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4213 
4214     lo = tcg_temp_new_i32();
4215     hi = cpu_fcc[3];
4216     tcg_gen_extr_i64_i32(lo, hi, t64);
4217     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4218     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4219     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4220     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4221 
4222     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4223     return advance_pc(dc);
4224 #else
4225     return false;
4226 #endif
4227 }
4228 
4229 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4230 {
4231     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4232     TCGv fsr;
4233 
4234     if (addr == NULL) {
4235         return false;
4236     }
4237     if (gen_trap_ifnofpu(dc)) {
4238         return true;
4239     }
4240 
4241     fsr = tcg_temp_new();
4242     gen_helper_get_fsr(fsr, tcg_env);
4243     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4244     return advance_pc(dc);
4245 }
4246 
4247 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4248 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4249 
4250 static bool do_fc(DisasContext *dc, int rd, bool c)
4251 {
4252     uint64_t mask;
4253 
4254     if (gen_trap_ifnofpu(dc)) {
4255         return true;
4256     }
4257 
4258     if (rd & 1) {
4259         mask = MAKE_64BIT_MASK(0, 32);
4260     } else {
4261         mask = MAKE_64BIT_MASK(32, 32);
4262     }
4263     if (c) {
4264         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4265     } else {
4266         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4267     }
4268     gen_update_fprs_dirty(dc, rd);
4269     return advance_pc(dc);
4270 }
4271 
4272 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4273 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4274 
4275 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4276 {
4277     if (gen_trap_ifnofpu(dc)) {
4278         return true;
4279     }
4280 
4281     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4282     gen_update_fprs_dirty(dc, rd);
4283     return advance_pc(dc);
4284 }
4285 
4286 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4287 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4288 
4289 static bool do_ff(DisasContext *dc, arg_r_r *a,
4290                   void (*func)(TCGv_i32, TCGv_i32))
4291 {
4292     TCGv_i32 tmp;
4293 
4294     if (gen_trap_ifnofpu(dc)) {
4295         return true;
4296     }
4297 
4298     tmp = gen_load_fpr_F(dc, a->rs);
4299     func(tmp, tmp);
4300     gen_store_fpr_F(dc, a->rd, tmp);
4301     return advance_pc(dc);
4302 }
4303 
4304 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4305 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4306 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4307 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4308 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4309 
4310 static bool do_fd(DisasContext *dc, arg_r_r *a,
4311                   void (*func)(TCGv_i32, TCGv_i64))
4312 {
4313     TCGv_i32 dst;
4314     TCGv_i64 src;
4315 
4316     if (gen_trap_ifnofpu(dc)) {
4317         return true;
4318     }
4319 
4320     dst = tcg_temp_new_i32();
4321     src = gen_load_fpr_D(dc, a->rs);
4322     func(dst, src);
4323     gen_store_fpr_F(dc, a->rd, dst);
4324     return advance_pc(dc);
4325 }
4326 
4327 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4328 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4329 
4330 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4331                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4332 {
4333     TCGv_i32 tmp;
4334 
4335     if (gen_trap_ifnofpu(dc)) {
4336         return true;
4337     }
4338 
4339     tmp = gen_load_fpr_F(dc, a->rs);
4340     func(tmp, tcg_env, tmp);
4341     gen_store_fpr_F(dc, a->rd, tmp);
4342     return advance_pc(dc);
4343 }
4344 
4345 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4346 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4347 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4348 
4349 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4350                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4351 {
4352     TCGv_i32 dst;
4353     TCGv_i64 src;
4354 
4355     if (gen_trap_ifnofpu(dc)) {
4356         return true;
4357     }
4358 
4359     dst = tcg_temp_new_i32();
4360     src = gen_load_fpr_D(dc, a->rs);
4361     func(dst, tcg_env, src);
4362     gen_store_fpr_F(dc, a->rd, dst);
4363     return advance_pc(dc);
4364 }
4365 
4366 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4367 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4368 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4369 
4370 static bool do_dd(DisasContext *dc, arg_r_r *a,
4371                   void (*func)(TCGv_i64, TCGv_i64))
4372 {
4373     TCGv_i64 dst, src;
4374 
4375     if (gen_trap_ifnofpu(dc)) {
4376         return true;
4377     }
4378 
4379     dst = gen_dest_fpr_D(dc, a->rd);
4380     src = gen_load_fpr_D(dc, a->rs);
4381     func(dst, src);
4382     gen_store_fpr_D(dc, a->rd, dst);
4383     return advance_pc(dc);
4384 }
4385 
4386 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4387 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4388 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4389 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4390 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4391 
4392 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4393                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4394 {
4395     TCGv_i64 dst, src;
4396 
4397     if (gen_trap_ifnofpu(dc)) {
4398         return true;
4399     }
4400 
4401     dst = gen_dest_fpr_D(dc, a->rd);
4402     src = gen_load_fpr_D(dc, a->rs);
4403     func(dst, tcg_env, src);
4404     gen_store_fpr_D(dc, a->rd, dst);
4405     return advance_pc(dc);
4406 }
4407 
4408 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4409 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4410 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4411 
4412 static bool do_df(DisasContext *dc, arg_r_r *a,
4413                   void (*func)(TCGv_i64, TCGv_i32))
4414 {
4415     TCGv_i64 dst;
4416     TCGv_i32 src;
4417 
4418     if (gen_trap_ifnofpu(dc)) {
4419         return true;
4420     }
4421 
4422     dst = tcg_temp_new_i64();
4423     src = gen_load_fpr_F(dc, a->rs);
4424     func(dst, src);
4425     gen_store_fpr_D(dc, a->rd, dst);
4426     return advance_pc(dc);
4427 }
4428 
4429 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4430 
4431 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4432                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4433 {
4434     TCGv_i64 dst;
4435     TCGv_i32 src;
4436 
4437     if (gen_trap_ifnofpu(dc)) {
4438         return true;
4439     }
4440 
4441     dst = gen_dest_fpr_D(dc, a->rd);
4442     src = gen_load_fpr_F(dc, a->rs);
4443     func(dst, tcg_env, src);
4444     gen_store_fpr_D(dc, a->rd, dst);
4445     return advance_pc(dc);
4446 }
4447 
4448 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4449 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4450 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4451 
4452 static bool do_qq(DisasContext *dc, arg_r_r *a,
4453                   void (*func)(TCGv_i128, TCGv_i128))
4454 {
4455     TCGv_i128 t;
4456 
4457     if (gen_trap_ifnofpu(dc)) {
4458         return true;
4459     }
4460     if (gen_trap_float128(dc)) {
4461         return true;
4462     }
4463 
4464     gen_op_clear_ieee_excp_and_FTT();
4465     t = gen_load_fpr_Q(dc, a->rs);
4466     func(t, t);
4467     gen_store_fpr_Q(dc, a->rd, t);
4468     return advance_pc(dc);
4469 }
4470 
4471 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4472 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4473 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4474 
4475 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4476                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4477 {
4478     TCGv_i128 t;
4479 
4480     if (gen_trap_ifnofpu(dc)) {
4481         return true;
4482     }
4483     if (gen_trap_float128(dc)) {
4484         return true;
4485     }
4486 
4487     t = gen_load_fpr_Q(dc, a->rs);
4488     func(t, tcg_env, t);
4489     gen_store_fpr_Q(dc, a->rd, t);
4490     return advance_pc(dc);
4491 }
4492 
4493 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4494 
4495 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4496                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4497 {
4498     TCGv_i128 src;
4499     TCGv_i32 dst;
4500 
4501     if (gen_trap_ifnofpu(dc)) {
4502         return true;
4503     }
4504     if (gen_trap_float128(dc)) {
4505         return true;
4506     }
4507 
4508     src = gen_load_fpr_Q(dc, a->rs);
4509     dst = tcg_temp_new_i32();
4510     func(dst, tcg_env, src);
4511     gen_store_fpr_F(dc, a->rd, dst);
4512     return advance_pc(dc);
4513 }
4514 
4515 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4516 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4517 
4518 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4519                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4520 {
4521     TCGv_i128 src;
4522     TCGv_i64 dst;
4523 
4524     if (gen_trap_ifnofpu(dc)) {
4525         return true;
4526     }
4527     if (gen_trap_float128(dc)) {
4528         return true;
4529     }
4530 
4531     src = gen_load_fpr_Q(dc, a->rs);
4532     dst = gen_dest_fpr_D(dc, a->rd);
4533     func(dst, tcg_env, src);
4534     gen_store_fpr_D(dc, a->rd, dst);
4535     return advance_pc(dc);
4536 }
4537 
4538 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4539 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4540 
4541 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4542                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4543 {
4544     TCGv_i32 src;
4545     TCGv_i128 dst;
4546 
4547     if (gen_trap_ifnofpu(dc)) {
4548         return true;
4549     }
4550     if (gen_trap_float128(dc)) {
4551         return true;
4552     }
4553 
4554     src = gen_load_fpr_F(dc, a->rs);
4555     dst = tcg_temp_new_i128();
4556     func(dst, tcg_env, src);
4557     gen_store_fpr_Q(dc, a->rd, dst);
4558     return advance_pc(dc);
4559 }
4560 
4561 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4562 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4563 
4564 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4565                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4566 {
4567     TCGv_i64 src;
4568     TCGv_i128 dst;
4569 
4570     if (gen_trap_ifnofpu(dc)) {
4571         return true;
4572     }
4573     if (gen_trap_float128(dc)) {
4574         return true;
4575     }
4576 
4577     src = gen_load_fpr_D(dc, a->rs);
4578     dst = tcg_temp_new_i128();
4579     func(dst, tcg_env, src);
4580     gen_store_fpr_Q(dc, a->rd, dst);
4581     return advance_pc(dc);
4582 }
4583 
4584 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4585 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4586 
4587 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4588                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4589 {
4590     TCGv_i32 src1, src2;
4591 
4592     if (gen_trap_ifnofpu(dc)) {
4593         return true;
4594     }
4595 
4596     src1 = gen_load_fpr_F(dc, a->rs1);
4597     src2 = gen_load_fpr_F(dc, a->rs2);
4598     func(src1, src1, src2);
4599     gen_store_fpr_F(dc, a->rd, src1);
4600     return advance_pc(dc);
4601 }
4602 
4603 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4604 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4605 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4606 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4607 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4608 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4609 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4610 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4611 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4612 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4613 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4614 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4615 
4616 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4617                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4618 {
4619     TCGv_i32 src1, src2;
4620 
4621     if (gen_trap_ifnofpu(dc)) {
4622         return true;
4623     }
4624 
4625     src1 = gen_load_fpr_F(dc, a->rs1);
4626     src2 = gen_load_fpr_F(dc, a->rs2);
4627     func(src1, tcg_env, src1, src2);
4628     gen_store_fpr_F(dc, a->rd, src1);
4629     return advance_pc(dc);
4630 }
4631 
4632 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4633 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4634 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4635 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4636 
4637 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4638                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4639 {
4640     TCGv_i64 dst;
4641     TCGv_i32 src1, src2;
4642 
4643     if (gen_trap_ifnofpu(dc)) {
4644         return true;
4645     }
4646 
4647     dst = gen_dest_fpr_D(dc, a->rd);
4648     src1 = gen_load_fpr_F(dc, a->rs1);
4649     src2 = gen_load_fpr_F(dc, a->rs2);
4650     func(dst, src1, src2);
4651     gen_store_fpr_D(dc, a->rd, dst);
4652     return advance_pc(dc);
4653 }
4654 
4655 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4656 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4657 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4658 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4659 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4660 
4661 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4662                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4663 {
4664     TCGv_i64 dst, src2;
4665     TCGv_i32 src1;
4666 
4667     if (gen_trap_ifnofpu(dc)) {
4668         return true;
4669     }
4670 
4671     dst = gen_dest_fpr_D(dc, a->rd);
4672     src1 = gen_load_fpr_F(dc, a->rs1);
4673     src2 = gen_load_fpr_D(dc, a->rs2);
4674     func(dst, src1, src2);
4675     gen_store_fpr_D(dc, a->rd, dst);
4676     return advance_pc(dc);
4677 }
4678 
4679 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4680 
4681 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4682                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4683 {
4684     TCGv_i64 dst, src1, src2;
4685 
4686     if (gen_trap_ifnofpu(dc)) {
4687         return true;
4688     }
4689 
4690     dst = gen_dest_fpr_D(dc, a->rd);
4691     src1 = gen_load_fpr_D(dc, a->rs1);
4692     src2 = gen_load_fpr_D(dc, a->rs2);
4693     func(dst, src1, src2);
4694     gen_store_fpr_D(dc, a->rd, dst);
4695     return advance_pc(dc);
4696 }
4697 
4698 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4699 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4700 
4701 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4702 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4703 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4704 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4705 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4706 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4707 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4708 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4709 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4710 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4711 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4712 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4713 
4714 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4715 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4716 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4717 
4718 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4719                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4720 {
4721     TCGv_i64 src1, src2;
4722     TCGv dst;
4723 
4724     if (gen_trap_ifnofpu(dc)) {
4725         return true;
4726     }
4727 
4728     dst = gen_dest_gpr(dc, a->rd);
4729     src1 = gen_load_fpr_D(dc, a->rs1);
4730     src2 = gen_load_fpr_D(dc, a->rs2);
4731     func(dst, src1, src2);
4732     gen_store_gpr(dc, a->rd, dst);
4733     return advance_pc(dc);
4734 }
4735 
4736 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4737 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4738 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4739 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4740 
4741 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4742 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4743 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4744 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4745 
4746 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4747                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4748 {
4749     TCGv_i64 dst, src1, src2;
4750 
4751     if (gen_trap_ifnofpu(dc)) {
4752         return true;
4753     }
4754 
4755     dst = gen_dest_fpr_D(dc, a->rd);
4756     src1 = gen_load_fpr_D(dc, a->rs1);
4757     src2 = gen_load_fpr_D(dc, a->rs2);
4758     func(dst, tcg_env, src1, src2);
4759     gen_store_fpr_D(dc, a->rd, dst);
4760     return advance_pc(dc);
4761 }
4762 
4763 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4764 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4765 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4766 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4767 
4768 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4769 {
4770     TCGv_i64 dst;
4771     TCGv_i32 src1, src2;
4772 
4773     if (gen_trap_ifnofpu(dc)) {
4774         return true;
4775     }
4776     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4777         return raise_unimpfpop(dc);
4778     }
4779 
4780     dst = gen_dest_fpr_D(dc, a->rd);
4781     src1 = gen_load_fpr_F(dc, a->rs1);
4782     src2 = gen_load_fpr_F(dc, a->rs2);
4783     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4784     gen_store_fpr_D(dc, a->rd, dst);
4785     return advance_pc(dc);
4786 }
4787 
4788 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4789                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4790 {
4791     TCGv_i64 dst, src0, src1, src2;
4792 
4793     if (gen_trap_ifnofpu(dc)) {
4794         return true;
4795     }
4796 
4797     dst  = gen_dest_fpr_D(dc, a->rd);
4798     src0 = gen_load_fpr_D(dc, a->rd);
4799     src1 = gen_load_fpr_D(dc, a->rs1);
4800     src2 = gen_load_fpr_D(dc, a->rs2);
4801     func(dst, src0, src1, src2);
4802     gen_store_fpr_D(dc, a->rd, dst);
4803     return advance_pc(dc);
4804 }
4805 
4806 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4807 
4808 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4809                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4810 {
4811     TCGv_i128 src1, src2;
4812 
4813     if (gen_trap_ifnofpu(dc)) {
4814         return true;
4815     }
4816     if (gen_trap_float128(dc)) {
4817         return true;
4818     }
4819 
4820     src1 = gen_load_fpr_Q(dc, a->rs1);
4821     src2 = gen_load_fpr_Q(dc, a->rs2);
4822     func(src1, tcg_env, src1, src2);
4823     gen_store_fpr_Q(dc, a->rd, src1);
4824     return advance_pc(dc);
4825 }
4826 
4827 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4828 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4829 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4830 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4831 
4832 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4833 {
4834     TCGv_i64 src1, src2;
4835     TCGv_i128 dst;
4836 
4837     if (gen_trap_ifnofpu(dc)) {
4838         return true;
4839     }
4840     if (gen_trap_float128(dc)) {
4841         return true;
4842     }
4843 
4844     src1 = gen_load_fpr_D(dc, a->rs1);
4845     src2 = gen_load_fpr_D(dc, a->rs2);
4846     dst = tcg_temp_new_i128();
4847     gen_helper_fdmulq(dst, tcg_env, src1, src2);
4848     gen_store_fpr_Q(dc, a->rd, dst);
4849     return advance_pc(dc);
4850 }
4851 
4852 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4853                      void (*func)(DisasContext *, DisasCompare *, int, int))
4854 {
4855     DisasCompare cmp;
4856 
4857     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4858         return false;
4859     }
4860     if (gen_trap_ifnofpu(dc)) {
4861         return true;
4862     }
4863     if (is_128 && gen_trap_float128(dc)) {
4864         return true;
4865     }
4866 
4867     gen_op_clear_ieee_excp_and_FTT();
4868     func(dc, &cmp, a->rd, a->rs2);
4869     return advance_pc(dc);
4870 }
4871 
4872 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4873 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4874 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4875 
4876 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4877                       void (*func)(DisasContext *, DisasCompare *, int, int))
4878 {
4879     DisasCompare cmp;
4880 
4881     if (gen_trap_ifnofpu(dc)) {
4882         return true;
4883     }
4884     if (is_128 && gen_trap_float128(dc)) {
4885         return true;
4886     }
4887 
4888     gen_op_clear_ieee_excp_and_FTT();
4889     gen_compare(&cmp, a->cc, a->cond, dc);
4890     func(dc, &cmp, a->rd, a->rs2);
4891     return advance_pc(dc);
4892 }
4893 
4894 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4895 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4896 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4897 
4898 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4899                        void (*func)(DisasContext *, DisasCompare *, int, int))
4900 {
4901     DisasCompare cmp;
4902 
4903     if (gen_trap_ifnofpu(dc)) {
4904         return true;
4905     }
4906     if (is_128 && gen_trap_float128(dc)) {
4907         return true;
4908     }
4909 
4910     gen_op_clear_ieee_excp_and_FTT();
4911     gen_fcompare(&cmp, a->cc, a->cond);
4912     func(dc, &cmp, a->rd, a->rs2);
4913     return advance_pc(dc);
4914 }
4915 
4916 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4917 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4918 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4919 
4920 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4921 {
4922     TCGv_i32 src1, src2;
4923 
4924     if (avail_32(dc) && a->cc != 0) {
4925         return false;
4926     }
4927     if (gen_trap_ifnofpu(dc)) {
4928         return true;
4929     }
4930 
4931     src1 = gen_load_fpr_F(dc, a->rs1);
4932     src2 = gen_load_fpr_F(dc, a->rs2);
4933     if (e) {
4934         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
4935     } else {
4936         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
4937     }
4938     return advance_pc(dc);
4939 }
4940 
4941 TRANS(FCMPs, ALL, do_fcmps, a, false)
4942 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4943 
4944 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4945 {
4946     TCGv_i64 src1, src2;
4947 
4948     if (avail_32(dc) && a->cc != 0) {
4949         return false;
4950     }
4951     if (gen_trap_ifnofpu(dc)) {
4952         return true;
4953     }
4954 
4955     src1 = gen_load_fpr_D(dc, a->rs1);
4956     src2 = gen_load_fpr_D(dc, a->rs2);
4957     if (e) {
4958         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
4959     } else {
4960         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
4961     }
4962     return advance_pc(dc);
4963 }
4964 
4965 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4966 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4967 
4968 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4969 {
4970     TCGv_i128 src1, src2;
4971 
4972     if (avail_32(dc) && a->cc != 0) {
4973         return false;
4974     }
4975     if (gen_trap_ifnofpu(dc)) {
4976         return true;
4977     }
4978     if (gen_trap_float128(dc)) {
4979         return true;
4980     }
4981 
4982     src1 = gen_load_fpr_Q(dc, a->rs1);
4983     src2 = gen_load_fpr_Q(dc, a->rs2);
4984     if (e) {
4985         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
4986     } else {
4987         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
4988     }
4989     return advance_pc(dc);
4990 }
4991 
4992 TRANS(FCMPq, ALL, do_fcmpq, a, false)
4993 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
4994 
4995 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4996 {
4997     DisasContext *dc = container_of(dcbase, DisasContext, base);
4998     int bound;
4999 
5000     dc->pc = dc->base.pc_first;
5001     dc->npc = (target_ulong)dc->base.tb->cs_base;
5002     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5003     dc->def = &cpu_env(cs)->def;
5004     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5005     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5006 #ifndef CONFIG_USER_ONLY
5007     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5008 #endif
5009 #ifdef TARGET_SPARC64
5010     dc->fprs_dirty = 0;
5011     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5012 #ifndef CONFIG_USER_ONLY
5013     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5014 #endif
5015 #endif
5016     /*
5017      * if we reach a page boundary, we stop generation so that the
5018      * PC of a TT_TFAULT exception is always in the right page
5019      */
5020     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5021     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5022 }
5023 
5024 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5025 {
5026 }
5027 
5028 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5029 {
5030     DisasContext *dc = container_of(dcbase, DisasContext, base);
5031     target_ulong npc = dc->npc;
5032 
5033     if (npc & 3) {
5034         switch (npc) {
5035         case JUMP_PC:
5036             assert(dc->jump_pc[1] == dc->pc + 4);
5037             npc = dc->jump_pc[0] | JUMP_PC;
5038             break;
5039         case DYNAMIC_PC:
5040         case DYNAMIC_PC_LOOKUP:
5041             npc = DYNAMIC_PC;
5042             break;
5043         default:
5044             g_assert_not_reached();
5045         }
5046     }
5047     tcg_gen_insn_start(dc->pc, npc);
5048 }
5049 
5050 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5051 {
5052     DisasContext *dc = container_of(dcbase, DisasContext, base);
5053     unsigned int insn;
5054 
5055     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5056     dc->base.pc_next += 4;
5057 
5058     if (!decode(dc, insn)) {
5059         gen_exception(dc, TT_ILL_INSN);
5060     }
5061 
5062     if (dc->base.is_jmp == DISAS_NORETURN) {
5063         return;
5064     }
5065     if (dc->pc != dc->base.pc_next) {
5066         dc->base.is_jmp = DISAS_TOO_MANY;
5067     }
5068 }
5069 
5070 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5071 {
5072     DisasContext *dc = container_of(dcbase, DisasContext, base);
5073     DisasDelayException *e, *e_next;
5074     bool may_lookup;
5075 
5076     finishing_insn(dc);
5077 
5078     switch (dc->base.is_jmp) {
5079     case DISAS_NEXT:
5080     case DISAS_TOO_MANY:
5081         if (((dc->pc | dc->npc) & 3) == 0) {
5082             /* static PC and NPC: we can use direct chaining */
5083             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5084             break;
5085         }
5086 
5087         may_lookup = true;
5088         if (dc->pc & 3) {
5089             switch (dc->pc) {
5090             case DYNAMIC_PC_LOOKUP:
5091                 break;
5092             case DYNAMIC_PC:
5093                 may_lookup = false;
5094                 break;
5095             default:
5096                 g_assert_not_reached();
5097             }
5098         } else {
5099             tcg_gen_movi_tl(cpu_pc, dc->pc);
5100         }
5101 
5102         if (dc->npc & 3) {
5103             switch (dc->npc) {
5104             case JUMP_PC:
5105                 gen_generic_branch(dc);
5106                 break;
5107             case DYNAMIC_PC:
5108                 may_lookup = false;
5109                 break;
5110             case DYNAMIC_PC_LOOKUP:
5111                 break;
5112             default:
5113                 g_assert_not_reached();
5114             }
5115         } else {
5116             tcg_gen_movi_tl(cpu_npc, dc->npc);
5117         }
5118         if (may_lookup) {
5119             tcg_gen_lookup_and_goto_ptr();
5120         } else {
5121             tcg_gen_exit_tb(NULL, 0);
5122         }
5123         break;
5124 
5125     case DISAS_NORETURN:
5126        break;
5127 
5128     case DISAS_EXIT:
5129         /* Exit TB */
5130         save_state(dc);
5131         tcg_gen_exit_tb(NULL, 0);
5132         break;
5133 
5134     default:
5135         g_assert_not_reached();
5136     }
5137 
5138     for (e = dc->delay_excp_list; e ; e = e_next) {
5139         gen_set_label(e->lab);
5140 
5141         tcg_gen_movi_tl(cpu_pc, e->pc);
5142         if (e->npc % 4 == 0) {
5143             tcg_gen_movi_tl(cpu_npc, e->npc);
5144         }
5145         gen_helper_raise_exception(tcg_env, e->excp);
5146 
5147         e_next = e->next;
5148         g_free(e);
5149     }
5150 }
5151 
5152 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5153                                CPUState *cpu, FILE *logfile)
5154 {
5155     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5156     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5157 }
5158 
5159 static const TranslatorOps sparc_tr_ops = {
5160     .init_disas_context = sparc_tr_init_disas_context,
5161     .tb_start           = sparc_tr_tb_start,
5162     .insn_start         = sparc_tr_insn_start,
5163     .translate_insn     = sparc_tr_translate_insn,
5164     .tb_stop            = sparc_tr_tb_stop,
5165     .disas_log          = sparc_tr_disas_log,
5166 };
5167 
5168 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5169                            vaddr pc, void *host_pc)
5170 {
5171     DisasContext dc = {};
5172 
5173     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5174 }
5175 
5176 void sparc_tcg_init(void)
5177 {
5178     static const char gregnames[32][4] = {
5179         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5180         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5181         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5182         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5183     };
5184     static const char fregnames[32][4] = {
5185         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5186         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5187         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5188         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5189     };
5190 
5191     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5192 #ifdef TARGET_SPARC64
5193         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5194         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5195         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5196         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5197         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5198 #else
5199         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5200 #endif
5201     };
5202 
5203     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5204 #ifdef TARGET_SPARC64
5205         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5206         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5207         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5208 #endif
5209         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5210         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5211         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5212         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5213         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5214         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5215         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5216         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5217         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5218     };
5219 
5220     unsigned int i;
5221 
5222     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5223                                          offsetof(CPUSPARCState, regwptr),
5224                                          "regwptr");
5225 
5226     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5227         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5228     }
5229 
5230     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5231         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5232     }
5233 
5234     cpu_regs[0] = NULL;
5235     for (i = 1; i < 8; ++i) {
5236         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5237                                          offsetof(CPUSPARCState, gregs[i]),
5238                                          gregnames[i]);
5239     }
5240 
5241     for (i = 8; i < 32; ++i) {
5242         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5243                                          (i - 8) * sizeof(target_ulong),
5244                                          gregnames[i]);
5245     }
5246 
5247     for (i = 0; i < TARGET_DPREGS; i++) {
5248         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5249                                             offsetof(CPUSPARCState, fpr[i]),
5250                                             fregnames[i]);
5251     }
5252 }
5253 
5254 void sparc_restore_state_to_opc(CPUState *cs,
5255                                 const TranslationBlock *tb,
5256                                 const uint64_t *data)
5257 {
5258     CPUSPARCState *env = cpu_env(cs);
5259     target_ulong pc = data[0];
5260     target_ulong npc = data[1];
5261 
5262     env->pc = pc;
5263     if (npc == DYNAMIC_PC) {
5264         /* dynamic NPC: already stored */
5265     } else if (npc & JUMP_PC) {
5266         /* jump PC: use 'cond' and the jump targets of the translation */
5267         if (env->cond) {
5268             env->npc = npc & ~3;
5269         } else {
5270             env->npc = pc + 4;
5271         }
5272     } else {
5273         env->npc = npc;
5274     }
5275 }
5276