1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
64 # define gen_helper_cmask8 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32 ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16 ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32 ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK 0
102 #endif
103
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC 1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC 2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP 3
110
111 #define DISAS_EXIT DISAS_TARGET_0
112
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z cpu_xcc_Z
136 #define cpu_cc_C cpu_xcc_C
137 #else
138 #define cpu_cc_Z cpu_icc_Z
139 #define cpu_cc_C cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146
147 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X) env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X) env_field_offsetof(X)
153 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
154 #endif
155
156 typedef struct DisasCompare {
157 TCGCond cond;
158 TCGv c1;
159 int c2;
160 } DisasCompare;
161
162 typedef struct DisasDelayException {
163 struct DisasDelayException *next;
164 TCGLabel *lab;
165 TCGv_i32 excp;
166 /* Saved state at parent insn. */
167 target_ulong pc;
168 target_ulong npc;
169 } DisasDelayException;
170
171 typedef struct DisasContext {
172 DisasContextBase base;
173 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
174 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175
176 /* Used when JUMP_PC value is used. */
177 DisasCompare jump;
178 target_ulong jump_pc[2];
179
180 int mem_idx;
181 bool cpu_cond_live;
182 bool fpu_enabled;
183 bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185 bool supervisor;
186 #ifdef TARGET_SPARC64
187 bool hypervisor;
188 #else
189 bool fsr_qne;
190 #endif
191 #endif
192
193 sparc_def_t *def;
194 #ifdef TARGET_SPARC64
195 int fprs_dirty;
196 int asi;
197 #endif
198 DisasDelayException *delay_excp_list;
199 } DisasContext;
200
201 // This function uses non-native bit order
202 #define GET_FIELD(X, FROM, TO) \
203 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
204
205 // This function uses the order in the manuals, i.e. bit 0 is 2^0
206 #define GET_FIELD_SP(X, FROM, TO) \
207 GET_FIELD(X, 31 - (TO), 31 - (FROM))
208
209 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
210 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
211
212 #define UA2005_HTRAP_MASK 0xff
213 #define V8_TRAP_MASK 0x7f
214
215 #define IS_IMM (insn & (1<<13))
216
gen_update_fprs_dirty(DisasContext * dc,int rd)217 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
218 {
219 #if defined(TARGET_SPARC64)
220 int bit = (rd < 32) ? 1 : 2;
221 /* If we know we've already set this bit within the TB,
222 we can avoid setting it again. */
223 if (!(dc->fprs_dirty & bit)) {
224 dc->fprs_dirty |= bit;
225 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
226 }
227 #endif
228 }
229
230 /* floating point registers moves */
231
gen_offset_fpr_F(unsigned int reg)232 static int gen_offset_fpr_F(unsigned int reg)
233 {
234 int ret;
235
236 tcg_debug_assert(reg < 32);
237 ret= offsetof(CPUSPARCState, fpr[reg / 2]);
238 if (reg & 1) {
239 ret += offsetof(CPU_DoubleU, l.lower);
240 } else {
241 ret += offsetof(CPU_DoubleU, l.upper);
242 }
243 return ret;
244 }
245
gen_load_fpr_F(DisasContext * dc,unsigned int src)246 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
247 {
248 TCGv_i32 ret = tcg_temp_new_i32();
249 tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
250 return ret;
251 }
252
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)253 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
254 {
255 tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
256 gen_update_fprs_dirty(dc, dst);
257 }
258
gen_offset_fpr_D(unsigned int reg)259 static int gen_offset_fpr_D(unsigned int reg)
260 {
261 tcg_debug_assert(reg < 64);
262 tcg_debug_assert(reg % 2 == 0);
263 return offsetof(CPUSPARCState, fpr[reg / 2]);
264 }
265
gen_load_fpr_D(DisasContext * dc,unsigned int src)266 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
267 {
268 TCGv_i64 ret = tcg_temp_new_i64();
269 tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
270 return ret;
271 }
272
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)273 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
274 {
275 tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
276 gen_update_fprs_dirty(dc, dst);
277 }
278
gen_load_fpr_Q(DisasContext * dc,unsigned int src)279 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
280 {
281 TCGv_i128 ret = tcg_temp_new_i128();
282 TCGv_i64 h = gen_load_fpr_D(dc, src);
283 TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
284
285 tcg_gen_concat_i64_i128(ret, l, h);
286 return ret;
287 }
288
gen_store_fpr_Q(DisasContext * dc,unsigned int dst,TCGv_i128 v)289 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
290 {
291 TCGv_i64 h = tcg_temp_new_i64();
292 TCGv_i64 l = tcg_temp_new_i64();
293
294 tcg_gen_extr_i128_i64(l, h, v);
295 gen_store_fpr_D(dc, dst, h);
296 gen_store_fpr_D(dc, dst + 2, l);
297 }
298
299 /* moves */
300 #ifdef CONFIG_USER_ONLY
301 #define supervisor(dc) 0
302 #define hypervisor(dc) 0
303 #else
304 #ifdef TARGET_SPARC64
305 #define hypervisor(dc) (dc->hypervisor)
306 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
307 #else
308 #define supervisor(dc) (dc->supervisor)
309 #define hypervisor(dc) 0
310 #endif
311 #endif
312
313 #if !defined(TARGET_SPARC64)
314 # define AM_CHECK(dc) false
315 #elif defined(TARGET_ABI32)
316 # define AM_CHECK(dc) true
317 #elif defined(CONFIG_USER_ONLY)
318 # define AM_CHECK(dc) false
319 #else
320 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
321 #endif
322
gen_address_mask(DisasContext * dc,TCGv addr)323 static void gen_address_mask(DisasContext *dc, TCGv addr)
324 {
325 if (AM_CHECK(dc)) {
326 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
327 }
328 }
329
address_mask_i(DisasContext * dc,target_ulong addr)330 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
331 {
332 return AM_CHECK(dc) ? (uint32_t)addr : addr;
333 }
334
gen_load_gpr(DisasContext * dc,int reg)335 static TCGv gen_load_gpr(DisasContext *dc, int reg)
336 {
337 if (reg > 0) {
338 assert(reg < 32);
339 return cpu_regs[reg];
340 } else {
341 TCGv t = tcg_temp_new();
342 tcg_gen_movi_tl(t, 0);
343 return t;
344 }
345 }
346
gen_store_gpr(DisasContext * dc,int reg,TCGv v)347 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
348 {
349 if (reg > 0) {
350 assert(reg < 32);
351 tcg_gen_mov_tl(cpu_regs[reg], v);
352 }
353 }
354
gen_dest_gpr(DisasContext * dc,int reg)355 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
356 {
357 if (reg > 0) {
358 assert(reg < 32);
359 return cpu_regs[reg];
360 } else {
361 return tcg_temp_new();
362 }
363 }
364
use_goto_tb(DisasContext * s,target_ulong pc,target_ulong npc)365 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
366 {
367 return translator_use_goto_tb(&s->base, pc) &&
368 translator_use_goto_tb(&s->base, npc);
369 }
370
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)371 static void gen_goto_tb(DisasContext *s, int tb_num,
372 target_ulong pc, target_ulong npc)
373 {
374 if (use_goto_tb(s, pc, npc)) {
375 /* jump to same page: we can use a direct jump */
376 tcg_gen_goto_tb(tb_num);
377 tcg_gen_movi_tl(cpu_pc, pc);
378 tcg_gen_movi_tl(cpu_npc, npc);
379 tcg_gen_exit_tb(s->base.tb, tb_num);
380 } else {
381 /* jump to another page: we can use an indirect jump */
382 tcg_gen_movi_tl(cpu_pc, pc);
383 tcg_gen_movi_tl(cpu_npc, npc);
384 tcg_gen_lookup_and_goto_ptr();
385 }
386 }
387
gen_carry32(void)388 static TCGv gen_carry32(void)
389 {
390 if (TARGET_LONG_BITS == 64) {
391 TCGv t = tcg_temp_new();
392 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
393 return t;
394 }
395 return cpu_icc_C;
396 }
397
gen_op_addcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)398 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
399 {
400 TCGv z = tcg_constant_tl(0);
401
402 if (cin) {
403 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
404 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
405 } else {
406 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
407 }
408 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
409 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
410 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
411 if (TARGET_LONG_BITS == 64) {
412 /*
413 * Carry-in to bit 32 is result ^ src1 ^ src2.
414 * We already have the src xor term in Z, from computation of V.
415 */
416 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
417 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
418 }
419 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
420 tcg_gen_mov_tl(dst, cpu_cc_N);
421 }
422
gen_op_addcc(TCGv dst,TCGv src1,TCGv src2)423 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
424 {
425 gen_op_addcc_int(dst, src1, src2, NULL);
426 }
427
gen_op_taddcc(TCGv dst,TCGv src1,TCGv src2)428 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
429 {
430 TCGv t = tcg_temp_new();
431
432 /* Save the tag bits around modification of dst. */
433 tcg_gen_or_tl(t, src1, src2);
434
435 gen_op_addcc(dst, src1, src2);
436
437 /* Incorprate tag bits into icc.V */
438 tcg_gen_andi_tl(t, t, 3);
439 tcg_gen_neg_tl(t, t);
440 tcg_gen_ext32u_tl(t, t);
441 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
442 }
443
gen_op_addc(TCGv dst,TCGv src1,TCGv src2)444 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
445 {
446 tcg_gen_add_tl(dst, src1, src2);
447 tcg_gen_add_tl(dst, dst, gen_carry32());
448 }
449
gen_op_addccc(TCGv dst,TCGv src1,TCGv src2)450 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
451 {
452 gen_op_addcc_int(dst, src1, src2, gen_carry32());
453 }
454
gen_op_addxc(TCGv dst,TCGv src1,TCGv src2)455 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
456 {
457 tcg_gen_add_tl(dst, src1, src2);
458 tcg_gen_add_tl(dst, dst, cpu_cc_C);
459 }
460
gen_op_addxccc(TCGv dst,TCGv src1,TCGv src2)461 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
462 {
463 gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
464 }
465
gen_op_subcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)466 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
467 {
468 TCGv z = tcg_constant_tl(0);
469
470 if (cin) {
471 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
472 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
473 } else {
474 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
475 }
476 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
477 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
478 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
479 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
480 #ifdef TARGET_SPARC64
481 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
482 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
483 #endif
484 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
485 tcg_gen_mov_tl(dst, cpu_cc_N);
486 }
487
gen_op_subcc(TCGv dst,TCGv src1,TCGv src2)488 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
489 {
490 gen_op_subcc_int(dst, src1, src2, NULL);
491 }
492
gen_op_tsubcc(TCGv dst,TCGv src1,TCGv src2)493 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
494 {
495 TCGv t = tcg_temp_new();
496
497 /* Save the tag bits around modification of dst. */
498 tcg_gen_or_tl(t, src1, src2);
499
500 gen_op_subcc(dst, src1, src2);
501
502 /* Incorprate tag bits into icc.V */
503 tcg_gen_andi_tl(t, t, 3);
504 tcg_gen_neg_tl(t, t);
505 tcg_gen_ext32u_tl(t, t);
506 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
507 }
508
gen_op_subc(TCGv dst,TCGv src1,TCGv src2)509 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
510 {
511 tcg_gen_sub_tl(dst, src1, src2);
512 tcg_gen_sub_tl(dst, dst, gen_carry32());
513 }
514
gen_op_subccc(TCGv dst,TCGv src1,TCGv src2)515 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
516 {
517 gen_op_subcc_int(dst, src1, src2, gen_carry32());
518 }
519
gen_op_subxc(TCGv dst,TCGv src1,TCGv src2)520 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
521 {
522 tcg_gen_sub_tl(dst, src1, src2);
523 tcg_gen_sub_tl(dst, dst, cpu_cc_C);
524 }
525
gen_op_subxccc(TCGv dst,TCGv src1,TCGv src2)526 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
527 {
528 gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
529 }
530
gen_op_mulscc(TCGv dst,TCGv src1,TCGv src2)531 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
532 {
533 TCGv zero = tcg_constant_tl(0);
534 TCGv one = tcg_constant_tl(1);
535 TCGv t_src1 = tcg_temp_new();
536 TCGv t_src2 = tcg_temp_new();
537 TCGv t0 = tcg_temp_new();
538
539 tcg_gen_ext32u_tl(t_src1, src1);
540 tcg_gen_ext32u_tl(t_src2, src2);
541
542 /*
543 * if (!(env->y & 1))
544 * src2 = 0;
545 */
546 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
547
548 /*
549 * b2 = src1 & 1;
550 * y = (b2 << 31) | (y >> 1);
551 */
552 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
553 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
554
555 // b1 = N ^ V;
556 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
557
558 /*
559 * src1 = (b1 << 31) | (src1 >> 1)
560 */
561 tcg_gen_andi_tl(t0, t0, 1u << 31);
562 tcg_gen_shri_tl(t_src1, t_src1, 1);
563 tcg_gen_or_tl(t_src1, t_src1, t0);
564
565 gen_op_addcc(dst, t_src1, t_src2);
566 }
567
gen_op_multiply(TCGv dst,TCGv src1,TCGv src2,int sign_ext)568 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
569 {
570 #if TARGET_LONG_BITS == 32
571 if (sign_ext) {
572 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
573 } else {
574 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
575 }
576 #else
577 TCGv t0 = tcg_temp_new_i64();
578 TCGv t1 = tcg_temp_new_i64();
579
580 if (sign_ext) {
581 tcg_gen_ext32s_i64(t0, src1);
582 tcg_gen_ext32s_i64(t1, src2);
583 } else {
584 tcg_gen_ext32u_i64(t0, src1);
585 tcg_gen_ext32u_i64(t1, src2);
586 }
587
588 tcg_gen_mul_i64(dst, t0, t1);
589 tcg_gen_shri_i64(cpu_y, dst, 32);
590 #endif
591 }
592
gen_op_umul(TCGv dst,TCGv src1,TCGv src2)593 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
594 {
595 /* zero-extend truncated operands before multiplication */
596 gen_op_multiply(dst, src1, src2, 0);
597 }
598
gen_op_smul(TCGv dst,TCGv src1,TCGv src2)599 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
600 {
601 /* sign-extend truncated operands before multiplication */
602 gen_op_multiply(dst, src1, src2, 1);
603 }
604
gen_op_umulxhi(TCGv dst,TCGv src1,TCGv src2)605 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
606 {
607 TCGv discard = tcg_temp_new();
608 tcg_gen_mulu2_tl(discard, dst, src1, src2);
609 }
610
gen_op_fpmaddx(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)611 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
612 TCGv_i64 src2, TCGv_i64 src3)
613 {
614 TCGv_i64 t = tcg_temp_new_i64();
615
616 tcg_gen_mul_i64(t, src1, src2);
617 tcg_gen_add_i64(dst, src3, t);
618 }
619
gen_op_fpmaddxhi(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)620 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
621 TCGv_i64 src2, TCGv_i64 src3)
622 {
623 TCGv_i64 l = tcg_temp_new_i64();
624 TCGv_i64 h = tcg_temp_new_i64();
625 TCGv_i64 z = tcg_constant_i64(0);
626
627 tcg_gen_mulu2_i64(l, h, src1, src2);
628 tcg_gen_add2_i64(l, dst, l, h, src3, z);
629 }
630
gen_op_sdiv(TCGv dst,TCGv src1,TCGv src2)631 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
632 {
633 #ifdef TARGET_SPARC64
634 gen_helper_sdiv(dst, tcg_env, src1, src2);
635 tcg_gen_ext32s_tl(dst, dst);
636 #else
637 TCGv_i64 t64 = tcg_temp_new_i64();
638 gen_helper_sdiv(t64, tcg_env, src1, src2);
639 tcg_gen_trunc_i64_tl(dst, t64);
640 #endif
641 }
642
gen_op_udivcc(TCGv dst,TCGv src1,TCGv src2)643 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
644 {
645 TCGv_i64 t64;
646
647 #ifdef TARGET_SPARC64
648 t64 = cpu_cc_V;
649 #else
650 t64 = tcg_temp_new_i64();
651 #endif
652
653 gen_helper_udiv(t64, tcg_env, src1, src2);
654
655 #ifdef TARGET_SPARC64
656 tcg_gen_ext32u_tl(cpu_cc_N, t64);
657 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
658 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
659 tcg_gen_movi_tl(cpu_icc_C, 0);
660 #else
661 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
662 #endif
663 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
664 tcg_gen_movi_tl(cpu_cc_C, 0);
665 tcg_gen_mov_tl(dst, cpu_cc_N);
666 }
667
gen_op_sdivcc(TCGv dst,TCGv src1,TCGv src2)668 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
669 {
670 TCGv_i64 t64;
671
672 #ifdef TARGET_SPARC64
673 t64 = cpu_cc_V;
674 #else
675 t64 = tcg_temp_new_i64();
676 #endif
677
678 gen_helper_sdiv(t64, tcg_env, src1, src2);
679
680 #ifdef TARGET_SPARC64
681 tcg_gen_ext32s_tl(cpu_cc_N, t64);
682 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
683 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
684 tcg_gen_movi_tl(cpu_icc_C, 0);
685 #else
686 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
687 #endif
688 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
689 tcg_gen_movi_tl(cpu_cc_C, 0);
690 tcg_gen_mov_tl(dst, cpu_cc_N);
691 }
692
gen_op_taddcctv(TCGv dst,TCGv src1,TCGv src2)693 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
694 {
695 gen_helper_taddcctv(dst, tcg_env, src1, src2);
696 }
697
gen_op_tsubcctv(TCGv dst,TCGv src1,TCGv src2)698 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
699 {
700 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
701 }
702
gen_op_popc(TCGv dst,TCGv src1,TCGv src2)703 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
704 {
705 tcg_gen_ctpop_tl(dst, src2);
706 }
707
gen_op_lzcnt(TCGv dst,TCGv src)708 static void gen_op_lzcnt(TCGv dst, TCGv src)
709 {
710 tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
711 }
712
713 #ifndef TARGET_SPARC64
gen_helper_array8(TCGv dst,TCGv src1,TCGv src2)714 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
715 {
716 g_assert_not_reached();
717 }
718 #endif
719
gen_op_array16(TCGv dst,TCGv src1,TCGv src2)720 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
721 {
722 gen_helper_array8(dst, src1, src2);
723 tcg_gen_shli_tl(dst, dst, 1);
724 }
725
gen_op_array32(TCGv dst,TCGv src1,TCGv src2)726 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
727 {
728 gen_helper_array8(dst, src1, src2);
729 tcg_gen_shli_tl(dst, dst, 2);
730 }
731
gen_op_fpack16(TCGv_i32 dst,TCGv_i64 src)732 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
733 {
734 #ifdef TARGET_SPARC64
735 gen_helper_fpack16(dst, cpu_gsr, src);
736 #else
737 g_assert_not_reached();
738 #endif
739 }
740
gen_op_fpackfix(TCGv_i32 dst,TCGv_i64 src)741 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
742 {
743 #ifdef TARGET_SPARC64
744 gen_helper_fpackfix(dst, cpu_gsr, src);
745 #else
746 g_assert_not_reached();
747 #endif
748 }
749
gen_op_fpack32(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)750 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
751 {
752 #ifdef TARGET_SPARC64
753 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
754 #else
755 g_assert_not_reached();
756 #endif
757 }
758
gen_op_fpadds16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)759 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
760 {
761 TCGv_i32 t[2];
762
763 for (int i = 0; i < 2; i++) {
764 TCGv_i32 u = tcg_temp_new_i32();
765 TCGv_i32 v = tcg_temp_new_i32();
766
767 tcg_gen_sextract_i32(u, src1, i * 16, 16);
768 tcg_gen_sextract_i32(v, src2, i * 16, 16);
769 tcg_gen_add_i32(u, u, v);
770 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
771 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
772 t[i] = u;
773 }
774 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
775 }
776
gen_op_fpsubs16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)777 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
778 {
779 TCGv_i32 t[2];
780
781 for (int i = 0; i < 2; i++) {
782 TCGv_i32 u = tcg_temp_new_i32();
783 TCGv_i32 v = tcg_temp_new_i32();
784
785 tcg_gen_sextract_i32(u, src1, i * 16, 16);
786 tcg_gen_sextract_i32(v, src2, i * 16, 16);
787 tcg_gen_sub_i32(u, u, v);
788 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
789 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
790 t[i] = u;
791 }
792 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
793 }
794
gen_op_fpadds32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)795 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
796 {
797 TCGv_i32 r = tcg_temp_new_i32();
798 TCGv_i32 t = tcg_temp_new_i32();
799 TCGv_i32 v = tcg_temp_new_i32();
800 TCGv_i32 z = tcg_constant_i32(0);
801
802 tcg_gen_add_i32(r, src1, src2);
803 tcg_gen_xor_i32(t, src1, src2);
804 tcg_gen_xor_i32(v, r, src2);
805 tcg_gen_andc_i32(v, v, t);
806
807 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
808 tcg_gen_addi_i32(t, t, INT32_MAX);
809
810 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
811 }
812
gen_op_fpsubs32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)813 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
814 {
815 TCGv_i32 r = tcg_temp_new_i32();
816 TCGv_i32 t = tcg_temp_new_i32();
817 TCGv_i32 v = tcg_temp_new_i32();
818 TCGv_i32 z = tcg_constant_i32(0);
819
820 tcg_gen_sub_i32(r, src1, src2);
821 tcg_gen_xor_i32(t, src1, src2);
822 tcg_gen_xor_i32(v, r, src1);
823 tcg_gen_and_i32(v, v, t);
824
825 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
826 tcg_gen_addi_i32(t, t, INT32_MAX);
827
828 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
829 }
830
gen_op_faligndata_i(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2,TCGv gsr)831 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
832 TCGv_i64 s2, TCGv gsr)
833 {
834 #ifdef TARGET_SPARC64
835 TCGv t1, t2, shift;
836
837 t1 = tcg_temp_new();
838 t2 = tcg_temp_new();
839 shift = tcg_temp_new();
840
841 tcg_gen_andi_tl(shift, gsr, 7);
842 tcg_gen_shli_tl(shift, shift, 3);
843 tcg_gen_shl_tl(t1, s1, shift);
844
845 /*
846 * A shift of 64 does not produce 0 in TCG. Divide this into a
847 * shift of (up to 63) followed by a constant shift of 1.
848 */
849 tcg_gen_xori_tl(shift, shift, 63);
850 tcg_gen_shr_tl(t2, s2, shift);
851 tcg_gen_shri_tl(t2, t2, 1);
852
853 tcg_gen_or_tl(dst, t1, t2);
854 #else
855 g_assert_not_reached();
856 #endif
857 }
858
gen_op_faligndata_g(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2)859 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
860 {
861 gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
862 }
863
gen_op_bshuffle(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)864 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
865 {
866 #ifdef TARGET_SPARC64
867 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
868 #else
869 g_assert_not_reached();
870 #endif
871 }
872
gen_op_pdistn(TCGv dst,TCGv_i64 src1,TCGv_i64 src2)873 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
874 {
875 #ifdef TARGET_SPARC64
876 gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
877 #else
878 g_assert_not_reached();
879 #endif
880 }
881
gen_op_fmul8x16al(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)882 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
883 {
884 tcg_gen_ext16s_i32(src2, src2);
885 gen_helper_fmul8x16a(dst, src1, src2);
886 }
887
gen_op_fmul8x16au(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)888 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
889 {
890 tcg_gen_sari_i32(src2, src2, 16);
891 gen_helper_fmul8x16a(dst, src1, src2);
892 }
893
gen_op_fmuld8ulx16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)894 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
895 {
896 TCGv_i32 t0 = tcg_temp_new_i32();
897 TCGv_i32 t1 = tcg_temp_new_i32();
898 TCGv_i32 t2 = tcg_temp_new_i32();
899
900 tcg_gen_ext8u_i32(t0, src1);
901 tcg_gen_ext16s_i32(t1, src2);
902 tcg_gen_mul_i32(t0, t0, t1);
903
904 tcg_gen_extract_i32(t1, src1, 16, 8);
905 tcg_gen_sextract_i32(t2, src2, 16, 16);
906 tcg_gen_mul_i32(t1, t1, t2);
907
908 tcg_gen_concat_i32_i64(dst, t0, t1);
909 }
910
gen_op_fmuld8sux16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)911 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
912 {
913 TCGv_i32 t0 = tcg_temp_new_i32();
914 TCGv_i32 t1 = tcg_temp_new_i32();
915 TCGv_i32 t2 = tcg_temp_new_i32();
916
917 /*
918 * The insn description talks about extracting the upper 8 bits
919 * of the signed 16-bit input rs1, performing the multiply, then
920 * shifting left by 8 bits. Instead, zap the lower 8 bits of
921 * the rs1 input, which avoids the need for two shifts.
922 */
923 tcg_gen_ext16s_i32(t0, src1);
924 tcg_gen_andi_i32(t0, t0, ~0xff);
925 tcg_gen_ext16s_i32(t1, src2);
926 tcg_gen_mul_i32(t0, t0, t1);
927
928 tcg_gen_sextract_i32(t1, src1, 16, 16);
929 tcg_gen_andi_i32(t1, t1, ~0xff);
930 tcg_gen_sextract_i32(t2, src2, 16, 16);
931 tcg_gen_mul_i32(t1, t1, t2);
932
933 tcg_gen_concat_i32_i64(dst, t0, t1);
934 }
935
936 #ifdef TARGET_SPARC64
gen_vec_fchksm16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)937 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
938 TCGv_vec src1, TCGv_vec src2)
939 {
940 TCGv_vec a = tcg_temp_new_vec_matching(dst);
941 TCGv_vec c = tcg_temp_new_vec_matching(dst);
942
943 tcg_gen_add_vec(vece, a, src1, src2);
944 tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
945 /* Vector cmp produces -1 for true, so subtract to add carry. */
946 tcg_gen_sub_vec(vece, dst, a, c);
947 }
948
gen_op_fchksm16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)949 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
950 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
951 {
952 static const TCGOpcode vecop_list[] = {
953 INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
954 };
955 static const GVecGen3 op = {
956 .fni8 = gen_helper_fchksm16,
957 .fniv = gen_vec_fchksm16,
958 .opt_opc = vecop_list,
959 .vece = MO_16,
960 };
961 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
962 }
963
gen_vec_fmean16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)964 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
965 TCGv_vec src1, TCGv_vec src2)
966 {
967 TCGv_vec t = tcg_temp_new_vec_matching(dst);
968
969 tcg_gen_or_vec(vece, t, src1, src2);
970 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
971 tcg_gen_sari_vec(vece, src1, src1, 1);
972 tcg_gen_sari_vec(vece, src2, src2, 1);
973 tcg_gen_add_vec(vece, dst, src1, src2);
974 tcg_gen_add_vec(vece, dst, dst, t);
975 }
976
gen_op_fmean16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)977 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
978 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
979 {
980 static const TCGOpcode vecop_list[] = {
981 INDEX_op_add_vec, INDEX_op_sari_vec,
982 };
983 static const GVecGen3 op = {
984 .fni8 = gen_helper_fmean16,
985 .fniv = gen_vec_fmean16,
986 .opt_opc = vecop_list,
987 .vece = MO_16,
988 };
989 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
990 }
991 #else
992 #define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
993 #define gen_op_fmean16 ({ qemu_build_not_reached(); NULL; })
994 #endif
995
finishing_insn(DisasContext * dc)996 static void finishing_insn(DisasContext *dc)
997 {
998 /*
999 * From here, there is no future path through an unwinding exception.
1000 * If the current insn cannot raise an exception, the computation of
1001 * cpu_cond may be able to be elided.
1002 */
1003 if (dc->cpu_cond_live) {
1004 tcg_gen_discard_tl(cpu_cond);
1005 dc->cpu_cond_live = false;
1006 }
1007 }
1008
gen_generic_branch(DisasContext * dc)1009 static void gen_generic_branch(DisasContext *dc)
1010 {
1011 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1012 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1013 TCGv c2 = tcg_constant_tl(dc->jump.c2);
1014
1015 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1016 }
1017
1018 /* call this function before using the condition register as it may
1019 have been set for a jump */
flush_cond(DisasContext * dc)1020 static void flush_cond(DisasContext *dc)
1021 {
1022 if (dc->npc == JUMP_PC) {
1023 gen_generic_branch(dc);
1024 dc->npc = DYNAMIC_PC_LOOKUP;
1025 }
1026 }
1027
save_npc(DisasContext * dc)1028 static void save_npc(DisasContext *dc)
1029 {
1030 if (dc->npc & 3) {
1031 switch (dc->npc) {
1032 case JUMP_PC:
1033 gen_generic_branch(dc);
1034 dc->npc = DYNAMIC_PC_LOOKUP;
1035 break;
1036 case DYNAMIC_PC:
1037 case DYNAMIC_PC_LOOKUP:
1038 break;
1039 default:
1040 g_assert_not_reached();
1041 }
1042 } else {
1043 tcg_gen_movi_tl(cpu_npc, dc->npc);
1044 }
1045 }
1046
save_state(DisasContext * dc)1047 static void save_state(DisasContext *dc)
1048 {
1049 tcg_gen_movi_tl(cpu_pc, dc->pc);
1050 save_npc(dc);
1051 }
1052
gen_exception(DisasContext * dc,int which)1053 static void gen_exception(DisasContext *dc, int which)
1054 {
1055 finishing_insn(dc);
1056 save_state(dc);
1057 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1058 dc->base.is_jmp = DISAS_NORETURN;
1059 }
1060
delay_exceptionv(DisasContext * dc,TCGv_i32 excp)1061 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1062 {
1063 DisasDelayException *e = g_new0(DisasDelayException, 1);
1064
1065 e->next = dc->delay_excp_list;
1066 dc->delay_excp_list = e;
1067
1068 e->lab = gen_new_label();
1069 e->excp = excp;
1070 e->pc = dc->pc;
1071 /* Caller must have used flush_cond before branch. */
1072 assert(e->npc != JUMP_PC);
1073 e->npc = dc->npc;
1074
1075 return e->lab;
1076 }
1077
delay_exception(DisasContext * dc,int excp)1078 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1079 {
1080 return delay_exceptionv(dc, tcg_constant_i32(excp));
1081 }
1082
gen_check_align(DisasContext * dc,TCGv addr,int mask)1083 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1084 {
1085 TCGv t = tcg_temp_new();
1086 TCGLabel *lab;
1087
1088 tcg_gen_andi_tl(t, addr, mask);
1089
1090 flush_cond(dc);
1091 lab = delay_exception(dc, TT_UNALIGNED);
1092 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1093 }
1094
gen_mov_pc_npc(DisasContext * dc)1095 static void gen_mov_pc_npc(DisasContext *dc)
1096 {
1097 finishing_insn(dc);
1098
1099 if (dc->npc & 3) {
1100 switch (dc->npc) {
1101 case JUMP_PC:
1102 gen_generic_branch(dc);
1103 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1104 dc->pc = DYNAMIC_PC_LOOKUP;
1105 break;
1106 case DYNAMIC_PC:
1107 case DYNAMIC_PC_LOOKUP:
1108 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1109 dc->pc = dc->npc;
1110 break;
1111 default:
1112 g_assert_not_reached();
1113 }
1114 } else {
1115 dc->pc = dc->npc;
1116 }
1117 }
1118
gen_compare(DisasCompare * cmp,bool xcc,unsigned int cond,DisasContext * dc)1119 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1120 DisasContext *dc)
1121 {
1122 TCGv t1;
1123
1124 cmp->c1 = t1 = tcg_temp_new();
1125 cmp->c2 = 0;
1126
1127 switch (cond & 7) {
1128 case 0x0: /* never */
1129 cmp->cond = TCG_COND_NEVER;
1130 cmp->c1 = tcg_constant_tl(0);
1131 break;
1132
1133 case 0x1: /* eq: Z */
1134 cmp->cond = TCG_COND_EQ;
1135 if (TARGET_LONG_BITS == 32 || xcc) {
1136 tcg_gen_mov_tl(t1, cpu_cc_Z);
1137 } else {
1138 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1139 }
1140 break;
1141
1142 case 0x2: /* le: Z | (N ^ V) */
1143 /*
1144 * Simplify:
1145 * cc_Z || (N ^ V) < 0 NE
1146 * cc_Z && !((N ^ V) < 0) EQ
1147 * cc_Z & ~((N ^ V) >> TLB) EQ
1148 */
1149 cmp->cond = TCG_COND_EQ;
1150 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1151 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1152 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1153 if (TARGET_LONG_BITS == 64 && !xcc) {
1154 tcg_gen_ext32u_tl(t1, t1);
1155 }
1156 break;
1157
1158 case 0x3: /* lt: N ^ V */
1159 cmp->cond = TCG_COND_LT;
1160 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1161 if (TARGET_LONG_BITS == 64 && !xcc) {
1162 tcg_gen_ext32s_tl(t1, t1);
1163 }
1164 break;
1165
1166 case 0x4: /* leu: Z | C */
1167 /*
1168 * Simplify:
1169 * cc_Z == 0 || cc_C != 0 NE
1170 * cc_Z != 0 && cc_C == 0 EQ
1171 * cc_Z & (cc_C ? 0 : -1) EQ
1172 * cc_Z & (cc_C - 1) EQ
1173 */
1174 cmp->cond = TCG_COND_EQ;
1175 if (TARGET_LONG_BITS == 32 || xcc) {
1176 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1177 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1178 } else {
1179 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1180 tcg_gen_subi_tl(t1, t1, 1);
1181 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1182 tcg_gen_ext32u_tl(t1, t1);
1183 }
1184 break;
1185
1186 case 0x5: /* ltu: C */
1187 cmp->cond = TCG_COND_NE;
1188 if (TARGET_LONG_BITS == 32 || xcc) {
1189 tcg_gen_mov_tl(t1, cpu_cc_C);
1190 } else {
1191 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1192 }
1193 break;
1194
1195 case 0x6: /* neg: N */
1196 cmp->cond = TCG_COND_LT;
1197 if (TARGET_LONG_BITS == 32 || xcc) {
1198 tcg_gen_mov_tl(t1, cpu_cc_N);
1199 } else {
1200 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1201 }
1202 break;
1203
1204 case 0x7: /* vs: V */
1205 cmp->cond = TCG_COND_LT;
1206 if (TARGET_LONG_BITS == 32 || xcc) {
1207 tcg_gen_mov_tl(t1, cpu_cc_V);
1208 } else {
1209 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1210 }
1211 break;
1212 }
1213 if (cond & 8) {
1214 cmp->cond = tcg_invert_cond(cmp->cond);
1215 }
1216 }
1217
gen_fcompare(DisasCompare * cmp,unsigned int cc,unsigned int cond)1218 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1219 {
1220 TCGv_i32 fcc = cpu_fcc[cc];
1221 TCGv_i32 c1 = fcc;
1222 int c2 = 0;
1223 TCGCond tcond;
1224
1225 /*
1226 * FCC values:
1227 * 0 =
1228 * 1 <
1229 * 2 >
1230 * 3 unordered
1231 */
1232 switch (cond & 7) {
1233 case 0x0: /* fbn */
1234 tcond = TCG_COND_NEVER;
1235 break;
1236 case 0x1: /* fbne : !0 */
1237 tcond = TCG_COND_NE;
1238 break;
1239 case 0x2: /* fblg : 1 or 2 */
1240 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1241 c1 = tcg_temp_new_i32();
1242 tcg_gen_addi_i32(c1, fcc, -1);
1243 c2 = 1;
1244 tcond = TCG_COND_LEU;
1245 break;
1246 case 0x3: /* fbul : 1 or 3 */
1247 c1 = tcg_temp_new_i32();
1248 tcg_gen_andi_i32(c1, fcc, 1);
1249 tcond = TCG_COND_NE;
1250 break;
1251 case 0x4: /* fbl : 1 */
1252 c2 = 1;
1253 tcond = TCG_COND_EQ;
1254 break;
1255 case 0x5: /* fbug : 2 or 3 */
1256 c2 = 2;
1257 tcond = TCG_COND_GEU;
1258 break;
1259 case 0x6: /* fbg : 2 */
1260 c2 = 2;
1261 tcond = TCG_COND_EQ;
1262 break;
1263 case 0x7: /* fbu : 3 */
1264 c2 = 3;
1265 tcond = TCG_COND_EQ;
1266 break;
1267 }
1268 if (cond & 8) {
1269 tcond = tcg_invert_cond(tcond);
1270 }
1271
1272 cmp->cond = tcond;
1273 cmp->c2 = c2;
1274 cmp->c1 = tcg_temp_new();
1275 tcg_gen_extu_i32_tl(cmp->c1, c1);
1276 }
1277
gen_compare_reg(DisasCompare * cmp,int cond,TCGv r_src)1278 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1279 {
1280 static const TCGCond cond_reg[4] = {
1281 TCG_COND_NEVER, /* reserved */
1282 TCG_COND_EQ,
1283 TCG_COND_LE,
1284 TCG_COND_LT,
1285 };
1286 TCGCond tcond;
1287
1288 if ((cond & 3) == 0) {
1289 return false;
1290 }
1291 tcond = cond_reg[cond & 3];
1292 if (cond & 4) {
1293 tcond = tcg_invert_cond(tcond);
1294 }
1295
1296 cmp->cond = tcond;
1297 cmp->c1 = tcg_temp_new();
1298 cmp->c2 = 0;
1299 tcg_gen_mov_tl(cmp->c1, r_src);
1300 return true;
1301 }
1302
gen_op_clear_ieee_excp_and_FTT(void)1303 static void gen_op_clear_ieee_excp_and_FTT(void)
1304 {
1305 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1306 offsetof(CPUSPARCState, fsr_cexc_ftt));
1307 }
1308
gen_op_fmovs(TCGv_i32 dst,TCGv_i32 src)1309 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1310 {
1311 gen_op_clear_ieee_excp_and_FTT();
1312 tcg_gen_mov_i32(dst, src);
1313 }
1314
gen_op_fnegs(TCGv_i32 dst,TCGv_i32 src)1315 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1316 {
1317 gen_op_clear_ieee_excp_and_FTT();
1318 tcg_gen_xori_i32(dst, src, 1u << 31);
1319 }
1320
gen_op_fabss(TCGv_i32 dst,TCGv_i32 src)1321 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1322 {
1323 gen_op_clear_ieee_excp_and_FTT();
1324 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1325 }
1326
gen_op_fmovd(TCGv_i64 dst,TCGv_i64 src)1327 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1328 {
1329 gen_op_clear_ieee_excp_and_FTT();
1330 tcg_gen_mov_i64(dst, src);
1331 }
1332
gen_op_fnegd(TCGv_i64 dst,TCGv_i64 src)1333 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1334 {
1335 gen_op_clear_ieee_excp_and_FTT();
1336 tcg_gen_xori_i64(dst, src, 1ull << 63);
1337 }
1338
gen_op_fabsd(TCGv_i64 dst,TCGv_i64 src)1339 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1340 {
1341 gen_op_clear_ieee_excp_and_FTT();
1342 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1343 }
1344
gen_op_fnegq(TCGv_i128 dst,TCGv_i128 src)1345 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1346 {
1347 TCGv_i64 l = tcg_temp_new_i64();
1348 TCGv_i64 h = tcg_temp_new_i64();
1349
1350 tcg_gen_extr_i128_i64(l, h, src);
1351 tcg_gen_xori_i64(h, h, 1ull << 63);
1352 tcg_gen_concat_i64_i128(dst, l, h);
1353 }
1354
gen_op_fabsq(TCGv_i128 dst,TCGv_i128 src)1355 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1356 {
1357 TCGv_i64 l = tcg_temp_new_i64();
1358 TCGv_i64 h = tcg_temp_new_i64();
1359
1360 tcg_gen_extr_i128_i64(l, h, src);
1361 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1362 tcg_gen_concat_i64_i128(dst, l, h);
1363 }
1364
gen_op_fmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1365 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1366 {
1367 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1368 }
1369
gen_op_fmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1370 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1371 {
1372 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1373 }
1374
gen_op_fmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1375 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1376 {
1377 int op = float_muladd_negate_c;
1378 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1379 }
1380
gen_op_fmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1381 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1382 {
1383 int op = float_muladd_negate_c;
1384 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1385 }
1386
gen_op_fnmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1387 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1388 {
1389 int op = float_muladd_negate_c | float_muladd_negate_result;
1390 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1391 }
1392
gen_op_fnmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1393 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1394 {
1395 int op = float_muladd_negate_c | float_muladd_negate_result;
1396 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1397 }
1398
gen_op_fnmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1399 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1400 {
1401 int op = float_muladd_negate_result;
1402 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1403 }
1404
gen_op_fnmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1405 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1406 {
1407 int op = float_muladd_negate_result;
1408 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1409 }
1410
1411 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
gen_op_fhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1412 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1413 {
1414 TCGv_i32 one = tcg_constant_i32(float32_one);
1415 int op = float_muladd_halve_result;
1416 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1417 }
1418
gen_op_fhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1419 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1420 {
1421 TCGv_i64 one = tcg_constant_i64(float64_one);
1422 int op = float_muladd_halve_result;
1423 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1424 }
1425
1426 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
gen_op_fhsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1427 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1428 {
1429 TCGv_i32 one = tcg_constant_i32(float32_one);
1430 int op = float_muladd_negate_c | float_muladd_halve_result;
1431 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1432 }
1433
gen_op_fhsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1434 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1435 {
1436 TCGv_i64 one = tcg_constant_i64(float64_one);
1437 int op = float_muladd_negate_c | float_muladd_halve_result;
1438 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1439 }
1440
1441 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
gen_op_fnhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1442 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1443 {
1444 TCGv_i32 one = tcg_constant_i32(float32_one);
1445 int op = float_muladd_negate_result | float_muladd_halve_result;
1446 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1447 }
1448
gen_op_fnhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1449 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1450 {
1451 TCGv_i64 one = tcg_constant_i64(float64_one);
1452 int op = float_muladd_negate_result | float_muladd_halve_result;
1453 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1454 }
1455
gen_op_fpexception_im(DisasContext * dc,int ftt)1456 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1457 {
1458 /*
1459 * CEXC is only set when succesfully completing an FPop,
1460 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1461 * Thus we can simply store FTT into this field.
1462 */
1463 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1464 offsetof(CPUSPARCState, fsr_cexc_ftt));
1465 gen_exception(dc, TT_FP_EXCP);
1466 }
1467
gen_trap_ifnofpu(DisasContext * dc)1468 static bool gen_trap_ifnofpu(DisasContext *dc)
1469 {
1470 #if !defined(CONFIG_USER_ONLY)
1471 if (!dc->fpu_enabled) {
1472 gen_exception(dc, TT_NFPU_INSN);
1473 return true;
1474 }
1475 #endif
1476 return false;
1477 }
1478
gen_trap_iffpexception(DisasContext * dc)1479 static bool gen_trap_iffpexception(DisasContext *dc)
1480 {
1481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1482 /*
1483 * There are 3 states for the sparc32 fpu:
1484 * Normally the fpu is in fp_execute, and all insns are allowed.
1485 * When an exception is signaled, it moves to fp_exception_pending state.
1486 * Upon seeing the next FPop, the fpu moves to fp_exception state,
1487 * populates the FQ, and generates an fp_exception trap.
1488 * The fpu remains in fp_exception state until FQ becomes empty
1489 * after execution of a STDFQ instruction. While the fpu is in
1490 * fp_exception state, and FPop, fp load or fp branch insn will
1491 * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1492 * and the insn will not be entered into the FQ.
1493 *
1494 * In QEMU, we do not model the fp_exception_pending state and
1495 * instead populate FQ and raise the exception immediately.
1496 * But we can still honor fp_exception state by noticing when
1497 * the FQ is not empty.
1498 */
1499 if (dc->fsr_qne) {
1500 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1501 return true;
1502 }
1503 #endif
1504 return false;
1505 }
1506
gen_trap_if_nofpu_fpexception(DisasContext * dc)1507 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1508 {
1509 return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1510 }
1511
1512 /* asi moves */
1513 typedef enum {
1514 GET_ASI_HELPER,
1515 GET_ASI_EXCP,
1516 GET_ASI_DIRECT,
1517 GET_ASI_DTWINX,
1518 GET_ASI_CODE,
1519 GET_ASI_BLOCK,
1520 GET_ASI_SHORT,
1521 GET_ASI_BCOPY,
1522 GET_ASI_BFILL,
1523 } ASIType;
1524
1525 typedef struct {
1526 ASIType type;
1527 int asi;
1528 int mem_idx;
1529 MemOp memop;
1530 } DisasASI;
1531
1532 /*
1533 * Build DisasASI.
1534 * For asi == -1, treat as non-asi.
1535 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1536 */
resolve_asi(DisasContext * dc,int asi,MemOp memop)1537 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1538 {
1539 ASIType type = GET_ASI_HELPER;
1540 int mem_idx = dc->mem_idx;
1541
1542 if (asi == -1) {
1543 /* Artificial "non-asi" case. */
1544 type = GET_ASI_DIRECT;
1545 goto done;
1546 }
1547
1548 #ifndef TARGET_SPARC64
1549 /* Before v9, all asis are immediate and privileged. */
1550 if (asi < 0) {
1551 gen_exception(dc, TT_ILL_INSN);
1552 type = GET_ASI_EXCP;
1553 } else if (supervisor(dc)
1554 /* Note that LEON accepts ASI_USERDATA in user mode, for
1555 use with CASA. Also note that previous versions of
1556 QEMU allowed (and old versions of gcc emitted) ASI_P
1557 for LEON, which is incorrect. */
1558 || (asi == ASI_USERDATA
1559 && (dc->def->features & CPU_FEATURE_CASA))) {
1560 switch (asi) {
1561 case ASI_USERDATA: /* User data access */
1562 mem_idx = MMU_USER_IDX;
1563 type = GET_ASI_DIRECT;
1564 break;
1565 case ASI_KERNELDATA: /* Supervisor data access */
1566 mem_idx = MMU_KERNEL_IDX;
1567 type = GET_ASI_DIRECT;
1568 break;
1569 case ASI_USERTXT: /* User text access */
1570 mem_idx = MMU_USER_IDX;
1571 type = GET_ASI_CODE;
1572 break;
1573 case ASI_KERNELTXT: /* Supervisor text access */
1574 mem_idx = MMU_KERNEL_IDX;
1575 type = GET_ASI_CODE;
1576 break;
1577 case ASI_M_BYPASS: /* MMU passthrough */
1578 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1579 mem_idx = MMU_PHYS_IDX;
1580 type = GET_ASI_DIRECT;
1581 break;
1582 case ASI_M_BCOPY: /* Block copy, sta access */
1583 mem_idx = MMU_KERNEL_IDX;
1584 type = GET_ASI_BCOPY;
1585 break;
1586 case ASI_M_BFILL: /* Block fill, stda access */
1587 mem_idx = MMU_KERNEL_IDX;
1588 type = GET_ASI_BFILL;
1589 break;
1590 }
1591
1592 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1593 * permissions check in get_physical_address(..).
1594 */
1595 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1596 } else {
1597 gen_exception(dc, TT_PRIV_INSN);
1598 type = GET_ASI_EXCP;
1599 }
1600 #else
1601 if (asi < 0) {
1602 asi = dc->asi;
1603 }
1604 /* With v9, all asis below 0x80 are privileged. */
1605 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1606 down that bit into DisasContext. For the moment that's ok,
1607 since the direct implementations below doesn't have any ASIs
1608 in the restricted [0x30, 0x7f] range, and the check will be
1609 done properly in the helper. */
1610 if (!supervisor(dc) && asi < 0x80) {
1611 gen_exception(dc, TT_PRIV_ACT);
1612 type = GET_ASI_EXCP;
1613 } else {
1614 switch (asi) {
1615 case ASI_REAL: /* Bypass */
1616 case ASI_REAL_IO: /* Bypass, non-cacheable */
1617 case ASI_REAL_L: /* Bypass LE */
1618 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1619 case ASI_TWINX_REAL: /* Real address, twinx */
1620 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1621 case ASI_QUAD_LDD_PHYS:
1622 case ASI_QUAD_LDD_PHYS_L:
1623 mem_idx = MMU_PHYS_IDX;
1624 break;
1625 case ASI_N: /* Nucleus */
1626 case ASI_NL: /* Nucleus LE */
1627 case ASI_TWINX_N:
1628 case ASI_TWINX_NL:
1629 case ASI_NUCLEUS_QUAD_LDD:
1630 case ASI_NUCLEUS_QUAD_LDD_L:
1631 if (hypervisor(dc)) {
1632 mem_idx = MMU_PHYS_IDX;
1633 } else {
1634 mem_idx = MMU_NUCLEUS_IDX;
1635 }
1636 break;
1637 case ASI_AIUP: /* As if user primary */
1638 case ASI_AIUPL: /* As if user primary LE */
1639 case ASI_TWINX_AIUP:
1640 case ASI_TWINX_AIUP_L:
1641 case ASI_BLK_AIUP_4V:
1642 case ASI_BLK_AIUP_L_4V:
1643 case ASI_BLK_AIUP:
1644 case ASI_BLK_AIUPL:
1645 case ASI_MON_AIUP:
1646 mem_idx = MMU_USER_IDX;
1647 break;
1648 case ASI_AIUS: /* As if user secondary */
1649 case ASI_AIUSL: /* As if user secondary LE */
1650 case ASI_TWINX_AIUS:
1651 case ASI_TWINX_AIUS_L:
1652 case ASI_BLK_AIUS_4V:
1653 case ASI_BLK_AIUS_L_4V:
1654 case ASI_BLK_AIUS:
1655 case ASI_BLK_AIUSL:
1656 case ASI_MON_AIUS:
1657 mem_idx = MMU_USER_SECONDARY_IDX;
1658 break;
1659 case ASI_S: /* Secondary */
1660 case ASI_SL: /* Secondary LE */
1661 case ASI_TWINX_S:
1662 case ASI_TWINX_SL:
1663 case ASI_BLK_COMMIT_S:
1664 case ASI_BLK_S:
1665 case ASI_BLK_SL:
1666 case ASI_FL8_S:
1667 case ASI_FL8_SL:
1668 case ASI_FL16_S:
1669 case ASI_FL16_SL:
1670 case ASI_MON_S:
1671 if (mem_idx == MMU_USER_IDX) {
1672 mem_idx = MMU_USER_SECONDARY_IDX;
1673 } else if (mem_idx == MMU_KERNEL_IDX) {
1674 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1675 }
1676 break;
1677 case ASI_P: /* Primary */
1678 case ASI_PL: /* Primary LE */
1679 case ASI_TWINX_P:
1680 case ASI_TWINX_PL:
1681 case ASI_BLK_COMMIT_P:
1682 case ASI_BLK_P:
1683 case ASI_BLK_PL:
1684 case ASI_FL8_P:
1685 case ASI_FL8_PL:
1686 case ASI_FL16_P:
1687 case ASI_FL16_PL:
1688 case ASI_MON_P:
1689 break;
1690 }
1691 switch (asi) {
1692 case ASI_REAL:
1693 case ASI_REAL_IO:
1694 case ASI_REAL_L:
1695 case ASI_REAL_IO_L:
1696 case ASI_N:
1697 case ASI_NL:
1698 case ASI_AIUP:
1699 case ASI_AIUPL:
1700 case ASI_AIUS:
1701 case ASI_AIUSL:
1702 case ASI_S:
1703 case ASI_SL:
1704 case ASI_P:
1705 case ASI_PL:
1706 case ASI_MON_P:
1707 case ASI_MON_S:
1708 case ASI_MON_AIUP:
1709 case ASI_MON_AIUS:
1710 type = GET_ASI_DIRECT;
1711 break;
1712 case ASI_TWINX_REAL:
1713 case ASI_TWINX_REAL_L:
1714 case ASI_TWINX_N:
1715 case ASI_TWINX_NL:
1716 case ASI_TWINX_AIUP:
1717 case ASI_TWINX_AIUP_L:
1718 case ASI_TWINX_AIUS:
1719 case ASI_TWINX_AIUS_L:
1720 case ASI_TWINX_P:
1721 case ASI_TWINX_PL:
1722 case ASI_TWINX_S:
1723 case ASI_TWINX_SL:
1724 case ASI_QUAD_LDD_PHYS:
1725 case ASI_QUAD_LDD_PHYS_L:
1726 case ASI_NUCLEUS_QUAD_LDD:
1727 case ASI_NUCLEUS_QUAD_LDD_L:
1728 type = GET_ASI_DTWINX;
1729 break;
1730 case ASI_BLK_COMMIT_P:
1731 case ASI_BLK_COMMIT_S:
1732 case ASI_BLK_AIUP_4V:
1733 case ASI_BLK_AIUP_L_4V:
1734 case ASI_BLK_AIUP:
1735 case ASI_BLK_AIUPL:
1736 case ASI_BLK_AIUS_4V:
1737 case ASI_BLK_AIUS_L_4V:
1738 case ASI_BLK_AIUS:
1739 case ASI_BLK_AIUSL:
1740 case ASI_BLK_S:
1741 case ASI_BLK_SL:
1742 case ASI_BLK_P:
1743 case ASI_BLK_PL:
1744 type = GET_ASI_BLOCK;
1745 break;
1746 case ASI_FL8_S:
1747 case ASI_FL8_SL:
1748 case ASI_FL8_P:
1749 case ASI_FL8_PL:
1750 memop = MO_UB;
1751 type = GET_ASI_SHORT;
1752 break;
1753 case ASI_FL16_S:
1754 case ASI_FL16_SL:
1755 case ASI_FL16_P:
1756 case ASI_FL16_PL:
1757 memop = MO_TEUW;
1758 type = GET_ASI_SHORT;
1759 break;
1760 }
1761 /* The little-endian asis all have bit 3 set. */
1762 if (asi & 8) {
1763 memop ^= MO_BSWAP;
1764 }
1765 }
1766 #endif
1767
1768 done:
1769 return (DisasASI){ type, asi, mem_idx, memop };
1770 }
1771
1772 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
gen_helper_ld_asi(TCGv_i64 r,TCGv_env e,TCGv a,TCGv_i32 asi,TCGv_i32 mop)1773 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1774 TCGv_i32 asi, TCGv_i32 mop)
1775 {
1776 g_assert_not_reached();
1777 }
1778
gen_helper_st_asi(TCGv_env e,TCGv a,TCGv_i64 r,TCGv_i32 asi,TCGv_i32 mop)1779 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1780 TCGv_i32 asi, TCGv_i32 mop)
1781 {
1782 g_assert_not_reached();
1783 }
1784 #endif
1785
gen_ld_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1786 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1787 {
1788 switch (da->type) {
1789 case GET_ASI_EXCP:
1790 break;
1791 case GET_ASI_DTWINX: /* Reserved for ldda. */
1792 gen_exception(dc, TT_ILL_INSN);
1793 break;
1794 case GET_ASI_DIRECT:
1795 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1796 break;
1797
1798 case GET_ASI_CODE:
1799 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1800 {
1801 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1802 TCGv_i64 t64 = tcg_temp_new_i64();
1803
1804 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1805 tcg_gen_trunc_i64_tl(dst, t64);
1806 }
1807 break;
1808 #else
1809 g_assert_not_reached();
1810 #endif
1811
1812 default:
1813 {
1814 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1815 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1816
1817 save_state(dc);
1818 #ifdef TARGET_SPARC64
1819 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1820 #else
1821 {
1822 TCGv_i64 t64 = tcg_temp_new_i64();
1823 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1824 tcg_gen_trunc_i64_tl(dst, t64);
1825 }
1826 #endif
1827 }
1828 break;
1829 }
1830 }
1831
gen_st_asi(DisasContext * dc,DisasASI * da,TCGv src,TCGv addr)1832 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1833 {
1834 switch (da->type) {
1835 case GET_ASI_EXCP:
1836 break;
1837
1838 case GET_ASI_DTWINX: /* Reserved for stda. */
1839 if (TARGET_LONG_BITS == 32) {
1840 gen_exception(dc, TT_ILL_INSN);
1841 break;
1842 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1843 /* Pre OpenSPARC CPUs don't have these */
1844 gen_exception(dc, TT_ILL_INSN);
1845 break;
1846 }
1847 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1848 /* fall through */
1849
1850 case GET_ASI_DIRECT:
1851 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1852 break;
1853
1854 case GET_ASI_BCOPY:
1855 assert(TARGET_LONG_BITS == 32);
1856 /*
1857 * Copy 32 bytes from the address in SRC to ADDR.
1858 *
1859 * From Ross RT625 hyperSPARC manual, section 4.6:
1860 * "Block Copy and Block Fill will work only on cache line boundaries."
1861 *
1862 * It does not specify if an unaliged address is truncated or trapped.
1863 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1864 * is obviously wrong. The only place I can see this used is in the
1865 * Linux kernel which begins with page alignment, advancing by 32,
1866 * so is always aligned. Assume truncation as the simpler option.
1867 *
1868 * Since the loads and stores are paired, allow the copy to happen
1869 * in the host endianness. The copy need not be atomic.
1870 */
1871 {
1872 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1873 TCGv saddr = tcg_temp_new();
1874 TCGv daddr = tcg_temp_new();
1875 TCGv_i128 tmp = tcg_temp_new_i128();
1876
1877 tcg_gen_andi_tl(saddr, src, -32);
1878 tcg_gen_andi_tl(daddr, addr, -32);
1879 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1880 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1881 tcg_gen_addi_tl(saddr, saddr, 16);
1882 tcg_gen_addi_tl(daddr, daddr, 16);
1883 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1884 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1885 }
1886 break;
1887
1888 default:
1889 {
1890 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1891 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1892
1893 save_state(dc);
1894 #ifdef TARGET_SPARC64
1895 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1896 #else
1897 {
1898 TCGv_i64 t64 = tcg_temp_new_i64();
1899 tcg_gen_extu_tl_i64(t64, src);
1900 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1901 }
1902 #endif
1903
1904 /* A write to a TLB register may alter page maps. End the TB. */
1905 dc->npc = DYNAMIC_PC;
1906 }
1907 break;
1908 }
1909 }
1910
gen_swap_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv src,TCGv addr)1911 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1912 TCGv dst, TCGv src, TCGv addr)
1913 {
1914 switch (da->type) {
1915 case GET_ASI_EXCP:
1916 break;
1917 case GET_ASI_DIRECT:
1918 tcg_gen_atomic_xchg_tl(dst, addr, src,
1919 da->mem_idx, da->memop | MO_ALIGN);
1920 break;
1921 default:
1922 /* ??? Should be DAE_invalid_asi. */
1923 gen_exception(dc, TT_DATA_ACCESS);
1924 break;
1925 }
1926 }
1927
gen_cas_asi(DisasContext * dc,DisasASI * da,TCGv oldv,TCGv newv,TCGv cmpv,TCGv addr)1928 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1929 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1930 {
1931 switch (da->type) {
1932 case GET_ASI_EXCP:
1933 return;
1934 case GET_ASI_DIRECT:
1935 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1936 da->mem_idx, da->memop | MO_ALIGN);
1937 break;
1938 default:
1939 /* ??? Should be DAE_invalid_asi. */
1940 gen_exception(dc, TT_DATA_ACCESS);
1941 break;
1942 }
1943 }
1944
gen_ldstub_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1945 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1946 {
1947 switch (da->type) {
1948 case GET_ASI_EXCP:
1949 break;
1950 case GET_ASI_DIRECT:
1951 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1952 da->mem_idx, MO_UB);
1953 break;
1954 default:
1955 /* ??? In theory, this should be raise DAE_invalid_asi.
1956 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1957 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1958 gen_helper_exit_atomic(tcg_env);
1959 } else {
1960 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1961 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1962 TCGv_i64 s64, t64;
1963
1964 save_state(dc);
1965 t64 = tcg_temp_new_i64();
1966 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1967
1968 s64 = tcg_constant_i64(0xff);
1969 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1970
1971 tcg_gen_trunc_i64_tl(dst, t64);
1972
1973 /* End the TB. */
1974 dc->npc = DYNAMIC_PC;
1975 }
1976 break;
1977 }
1978 }
1979
gen_ldf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)1980 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1981 TCGv addr, int rd)
1982 {
1983 MemOp memop = da->memop;
1984 MemOp size = memop & MO_SIZE;
1985 TCGv_i32 d32;
1986 TCGv_i64 d64, l64;
1987 TCGv addr_tmp;
1988
1989 /* TODO: Use 128-bit load/store below. */
1990 if (size == MO_128) {
1991 memop = (memop & ~MO_SIZE) | MO_64;
1992 }
1993
1994 switch (da->type) {
1995 case GET_ASI_EXCP:
1996 break;
1997
1998 case GET_ASI_DIRECT:
1999 memop |= MO_ALIGN_4;
2000 switch (size) {
2001 case MO_32:
2002 d32 = tcg_temp_new_i32();
2003 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2004 gen_store_fpr_F(dc, rd, d32);
2005 break;
2006
2007 case MO_64:
2008 d64 = tcg_temp_new_i64();
2009 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2010 gen_store_fpr_D(dc, rd, d64);
2011 break;
2012
2013 case MO_128:
2014 d64 = tcg_temp_new_i64();
2015 l64 = tcg_temp_new_i64();
2016 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2017 addr_tmp = tcg_temp_new();
2018 tcg_gen_addi_tl(addr_tmp, addr, 8);
2019 tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2020 gen_store_fpr_D(dc, rd, d64);
2021 gen_store_fpr_D(dc, rd + 2, l64);
2022 break;
2023 default:
2024 g_assert_not_reached();
2025 }
2026 break;
2027
2028 case GET_ASI_BLOCK:
2029 /* Valid for lddfa on aligned registers only. */
2030 if (orig_size == MO_64 && (rd & 7) == 0) {
2031 /* The first operation checks required alignment. */
2032 addr_tmp = tcg_temp_new();
2033 d64 = tcg_temp_new_i64();
2034 for (int i = 0; ; ++i) {
2035 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2036 memop | (i == 0 ? MO_ALIGN_64 : 0));
2037 gen_store_fpr_D(dc, rd + 2 * i, d64);
2038 if (i == 7) {
2039 break;
2040 }
2041 tcg_gen_addi_tl(addr_tmp, addr, 8);
2042 addr = addr_tmp;
2043 }
2044 } else {
2045 gen_exception(dc, TT_ILL_INSN);
2046 }
2047 break;
2048
2049 case GET_ASI_SHORT:
2050 /* Valid for lddfa only. */
2051 if (orig_size == MO_64) {
2052 d64 = tcg_temp_new_i64();
2053 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2054 gen_store_fpr_D(dc, rd, d64);
2055 } else {
2056 gen_exception(dc, TT_ILL_INSN);
2057 }
2058 break;
2059
2060 default:
2061 {
2062 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2063 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2064
2065 save_state(dc);
2066 /* According to the table in the UA2011 manual, the only
2067 other asis that are valid for ldfa/lddfa/ldqfa are
2068 the NO_FAULT asis. We still need a helper for these,
2069 but we can just use the integer asi helper for them. */
2070 switch (size) {
2071 case MO_32:
2072 d64 = tcg_temp_new_i64();
2073 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2074 d32 = tcg_temp_new_i32();
2075 tcg_gen_extrl_i64_i32(d32, d64);
2076 gen_store_fpr_F(dc, rd, d32);
2077 break;
2078 case MO_64:
2079 d64 = tcg_temp_new_i64();
2080 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2081 gen_store_fpr_D(dc, rd, d64);
2082 break;
2083 case MO_128:
2084 d64 = tcg_temp_new_i64();
2085 l64 = tcg_temp_new_i64();
2086 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2087 addr_tmp = tcg_temp_new();
2088 tcg_gen_addi_tl(addr_tmp, addr, 8);
2089 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2090 gen_store_fpr_D(dc, rd, d64);
2091 gen_store_fpr_D(dc, rd + 2, l64);
2092 break;
2093 default:
2094 g_assert_not_reached();
2095 }
2096 }
2097 break;
2098 }
2099 }
2100
gen_stf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)2101 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2102 TCGv addr, int rd)
2103 {
2104 MemOp memop = da->memop;
2105 MemOp size = memop & MO_SIZE;
2106 TCGv_i32 d32;
2107 TCGv_i64 d64;
2108 TCGv addr_tmp;
2109
2110 /* TODO: Use 128-bit load/store below. */
2111 if (size == MO_128) {
2112 memop = (memop & ~MO_SIZE) | MO_64;
2113 }
2114
2115 switch (da->type) {
2116 case GET_ASI_EXCP:
2117 break;
2118
2119 case GET_ASI_DIRECT:
2120 memop |= MO_ALIGN_4;
2121 switch (size) {
2122 case MO_32:
2123 d32 = gen_load_fpr_F(dc, rd);
2124 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2125 break;
2126 case MO_64:
2127 d64 = gen_load_fpr_D(dc, rd);
2128 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2129 break;
2130 case MO_128:
2131 /* Only 4-byte alignment required. However, it is legal for the
2132 cpu to signal the alignment fault, and the OS trap handler is
2133 required to fix it up. Requiring 16-byte alignment here avoids
2134 having to probe the second page before performing the first
2135 write. */
2136 d64 = gen_load_fpr_D(dc, rd);
2137 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2138 addr_tmp = tcg_temp_new();
2139 tcg_gen_addi_tl(addr_tmp, addr, 8);
2140 d64 = gen_load_fpr_D(dc, rd + 2);
2141 tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2142 break;
2143 default:
2144 g_assert_not_reached();
2145 }
2146 break;
2147
2148 case GET_ASI_BLOCK:
2149 /* Valid for stdfa on aligned registers only. */
2150 if (orig_size == MO_64 && (rd & 7) == 0) {
2151 /* The first operation checks required alignment. */
2152 addr_tmp = tcg_temp_new();
2153 for (int i = 0; ; ++i) {
2154 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2155 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2156 memop | (i == 0 ? MO_ALIGN_64 : 0));
2157 if (i == 7) {
2158 break;
2159 }
2160 tcg_gen_addi_tl(addr_tmp, addr, 8);
2161 addr = addr_tmp;
2162 }
2163 } else {
2164 gen_exception(dc, TT_ILL_INSN);
2165 }
2166 break;
2167
2168 case GET_ASI_SHORT:
2169 /* Valid for stdfa only. */
2170 if (orig_size == MO_64) {
2171 d64 = gen_load_fpr_D(dc, rd);
2172 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2173 } else {
2174 gen_exception(dc, TT_ILL_INSN);
2175 }
2176 break;
2177
2178 default:
2179 /* According to the table in the UA2011 manual, the only
2180 other asis that are valid for ldfa/lddfa/ldqfa are
2181 the PST* asis, which aren't currently handled. */
2182 gen_exception(dc, TT_ILL_INSN);
2183 break;
2184 }
2185 }
2186
gen_ldda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2187 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2188 {
2189 TCGv hi = gen_dest_gpr(dc, rd);
2190 TCGv lo = gen_dest_gpr(dc, rd + 1);
2191
2192 switch (da->type) {
2193 case GET_ASI_EXCP:
2194 return;
2195
2196 case GET_ASI_DTWINX:
2197 #ifdef TARGET_SPARC64
2198 {
2199 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2200 TCGv_i128 t = tcg_temp_new_i128();
2201
2202 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2203 /*
2204 * Note that LE twinx acts as if each 64-bit register result is
2205 * byte swapped. We perform one 128-bit LE load, so must swap
2206 * the order of the writebacks.
2207 */
2208 if ((mop & MO_BSWAP) == MO_TE) {
2209 tcg_gen_extr_i128_i64(lo, hi, t);
2210 } else {
2211 tcg_gen_extr_i128_i64(hi, lo, t);
2212 }
2213 }
2214 break;
2215 #else
2216 g_assert_not_reached();
2217 #endif
2218
2219 case GET_ASI_DIRECT:
2220 {
2221 TCGv_i64 tmp = tcg_temp_new_i64();
2222
2223 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2224
2225 /* Note that LE ldda acts as if each 32-bit register
2226 result is byte swapped. Having just performed one
2227 64-bit bswap, we need now to swap the writebacks. */
2228 if ((da->memop & MO_BSWAP) == MO_TE) {
2229 tcg_gen_extr_i64_tl(lo, hi, tmp);
2230 } else {
2231 tcg_gen_extr_i64_tl(hi, lo, tmp);
2232 }
2233 }
2234 break;
2235
2236 case GET_ASI_CODE:
2237 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2238 {
2239 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2240 TCGv_i64 tmp = tcg_temp_new_i64();
2241
2242 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2243
2244 /* See above. */
2245 if ((da->memop & MO_BSWAP) == MO_TE) {
2246 tcg_gen_extr_i64_tl(lo, hi, tmp);
2247 } else {
2248 tcg_gen_extr_i64_tl(hi, lo, tmp);
2249 }
2250 }
2251 break;
2252 #else
2253 g_assert_not_reached();
2254 #endif
2255
2256 default:
2257 /* ??? In theory we've handled all of the ASIs that are valid
2258 for ldda, and this should raise DAE_invalid_asi. However,
2259 real hardware allows others. This can be seen with e.g.
2260 FreeBSD 10.3 wrt ASI_IC_TAG. */
2261 {
2262 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2263 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2264 TCGv_i64 tmp = tcg_temp_new_i64();
2265
2266 save_state(dc);
2267 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2268
2269 /* See above. */
2270 if ((da->memop & MO_BSWAP) == MO_TE) {
2271 tcg_gen_extr_i64_tl(lo, hi, tmp);
2272 } else {
2273 tcg_gen_extr_i64_tl(hi, lo, tmp);
2274 }
2275 }
2276 break;
2277 }
2278
2279 gen_store_gpr(dc, rd, hi);
2280 gen_store_gpr(dc, rd + 1, lo);
2281 }
2282
gen_stda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2283 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2284 {
2285 TCGv hi = gen_load_gpr(dc, rd);
2286 TCGv lo = gen_load_gpr(dc, rd + 1);
2287
2288 switch (da->type) {
2289 case GET_ASI_EXCP:
2290 break;
2291
2292 case GET_ASI_DTWINX:
2293 #ifdef TARGET_SPARC64
2294 {
2295 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2296 TCGv_i128 t = tcg_temp_new_i128();
2297
2298 /*
2299 * Note that LE twinx acts as if each 64-bit register result is
2300 * byte swapped. We perform one 128-bit LE store, so must swap
2301 * the order of the construction.
2302 */
2303 if ((mop & MO_BSWAP) == MO_TE) {
2304 tcg_gen_concat_i64_i128(t, lo, hi);
2305 } else {
2306 tcg_gen_concat_i64_i128(t, hi, lo);
2307 }
2308 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2309 }
2310 break;
2311 #else
2312 g_assert_not_reached();
2313 #endif
2314
2315 case GET_ASI_DIRECT:
2316 {
2317 TCGv_i64 t64 = tcg_temp_new_i64();
2318
2319 /* Note that LE stda acts as if each 32-bit register result is
2320 byte swapped. We will perform one 64-bit LE store, so now
2321 we must swap the order of the construction. */
2322 if ((da->memop & MO_BSWAP) == MO_TE) {
2323 tcg_gen_concat_tl_i64(t64, lo, hi);
2324 } else {
2325 tcg_gen_concat_tl_i64(t64, hi, lo);
2326 }
2327 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2328 }
2329 break;
2330
2331 case GET_ASI_BFILL:
2332 assert(TARGET_LONG_BITS == 32);
2333 /*
2334 * Store 32 bytes of [rd:rd+1] to ADDR.
2335 * See comments for GET_ASI_COPY above.
2336 */
2337 {
2338 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2339 TCGv_i64 t8 = tcg_temp_new_i64();
2340 TCGv_i128 t16 = tcg_temp_new_i128();
2341 TCGv daddr = tcg_temp_new();
2342
2343 tcg_gen_concat_tl_i64(t8, lo, hi);
2344 tcg_gen_concat_i64_i128(t16, t8, t8);
2345 tcg_gen_andi_tl(daddr, addr, -32);
2346 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2347 tcg_gen_addi_tl(daddr, daddr, 16);
2348 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2349 }
2350 break;
2351
2352 default:
2353 /* ??? In theory we've handled all of the ASIs that are valid
2354 for stda, and this should raise DAE_invalid_asi. */
2355 {
2356 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2357 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2358 TCGv_i64 t64 = tcg_temp_new_i64();
2359
2360 /* See above. */
2361 if ((da->memop & MO_BSWAP) == MO_TE) {
2362 tcg_gen_concat_tl_i64(t64, lo, hi);
2363 } else {
2364 tcg_gen_concat_tl_i64(t64, hi, lo);
2365 }
2366
2367 save_state(dc);
2368 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2369 }
2370 break;
2371 }
2372 }
2373
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2374 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2375 {
2376 #ifdef TARGET_SPARC64
2377 TCGv_i32 c32, zero, dst, s1, s2;
2378 TCGv_i64 c64 = tcg_temp_new_i64();
2379
2380 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2381 or fold the comparison down to 32 bits and use movcond_i32. Choose
2382 the later. */
2383 c32 = tcg_temp_new_i32();
2384 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2385 tcg_gen_extrl_i64_i32(c32, c64);
2386
2387 s1 = gen_load_fpr_F(dc, rs);
2388 s2 = gen_load_fpr_F(dc, rd);
2389 dst = tcg_temp_new_i32();
2390 zero = tcg_constant_i32(0);
2391
2392 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2393
2394 gen_store_fpr_F(dc, rd, dst);
2395 #else
2396 qemu_build_not_reached();
2397 #endif
2398 }
2399
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2400 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2401 {
2402 #ifdef TARGET_SPARC64
2403 TCGv_i64 dst = tcg_temp_new_i64();
2404 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2405 gen_load_fpr_D(dc, rs),
2406 gen_load_fpr_D(dc, rd));
2407 gen_store_fpr_D(dc, rd, dst);
2408 #else
2409 qemu_build_not_reached();
2410 #endif
2411 }
2412
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2413 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2414 {
2415 #ifdef TARGET_SPARC64
2416 TCGv c2 = tcg_constant_tl(cmp->c2);
2417 TCGv_i64 h = tcg_temp_new_i64();
2418 TCGv_i64 l = tcg_temp_new_i64();
2419
2420 tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2421 gen_load_fpr_D(dc, rs),
2422 gen_load_fpr_D(dc, rd));
2423 tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2424 gen_load_fpr_D(dc, rs + 2),
2425 gen_load_fpr_D(dc, rd + 2));
2426 gen_store_fpr_D(dc, rd, h);
2427 gen_store_fpr_D(dc, rd + 2, l);
2428 #else
2429 qemu_build_not_reached();
2430 #endif
2431 }
2432
2433 #ifdef TARGET_SPARC64
gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)2434 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2435 {
2436 TCGv_i32 r_tl = tcg_temp_new_i32();
2437
2438 /* load env->tl into r_tl */
2439 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2440
2441 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2442 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2443
2444 /* calculate offset to current trap state from env->ts, reuse r_tl */
2445 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2446 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2447
2448 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2449 {
2450 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2451 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2452 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2453 }
2454 }
2455 #endif
2456
extract_dfpreg(DisasContext * dc,int x)2457 static int extract_dfpreg(DisasContext *dc, int x)
2458 {
2459 int r = x & 0x1e;
2460 #ifdef TARGET_SPARC64
2461 r |= (x & 1) << 5;
2462 #endif
2463 return r;
2464 }
2465
extract_qfpreg(DisasContext * dc,int x)2466 static int extract_qfpreg(DisasContext *dc, int x)
2467 {
2468 int r = x & 0x1c;
2469 #ifdef TARGET_SPARC64
2470 r |= (x & 1) << 5;
2471 #endif
2472 return r;
2473 }
2474
2475 /* Include the auto-generated decoder. */
2476 #include "decode-insns.c.inc"
2477
2478 #define TRANS(NAME, AVAIL, FUNC, ...) \
2479 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2480 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2481
2482 #define avail_ALL(C) true
2483 #ifdef TARGET_SPARC64
2484 # define avail_32(C) false
2485 # define avail_ASR17(C) false
2486 # define avail_CASA(C) true
2487 # define avail_DIV(C) true
2488 # define avail_MUL(C) true
2489 # define avail_POWERDOWN(C) false
2490 # define avail_64(C) true
2491 # define avail_FMAF(C) ((C)->def->features & CPU_FEATURE_FMAF)
2492 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2493 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2494 # define avail_IMA(C) ((C)->def->features & CPU_FEATURE_IMA)
2495 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2496 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2497 # define avail_VIS3(C) ((C)->def->features & CPU_FEATURE_VIS3)
2498 # define avail_VIS3B(C) avail_VIS3(C)
2499 # define avail_VIS4(C) ((C)->def->features & CPU_FEATURE_VIS4)
2500 #else
2501 # define avail_32(C) true
2502 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2503 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2504 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2505 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2506 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2507 # define avail_64(C) false
2508 # define avail_FMAF(C) false
2509 # define avail_GL(C) false
2510 # define avail_HYPV(C) false
2511 # define avail_IMA(C) false
2512 # define avail_VIS1(C) false
2513 # define avail_VIS2(C) false
2514 # define avail_VIS3(C) false
2515 # define avail_VIS3B(C) false
2516 # define avail_VIS4(C) false
2517 #endif
2518
2519 /* Default case for non jump instructions. */
advance_pc(DisasContext * dc)2520 static bool advance_pc(DisasContext *dc)
2521 {
2522 TCGLabel *l1;
2523
2524 finishing_insn(dc);
2525
2526 if (dc->npc & 3) {
2527 switch (dc->npc) {
2528 case DYNAMIC_PC:
2529 case DYNAMIC_PC_LOOKUP:
2530 dc->pc = dc->npc;
2531 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2532 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2533 break;
2534
2535 case JUMP_PC:
2536 /* we can do a static jump */
2537 l1 = gen_new_label();
2538 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2539
2540 /* jump not taken */
2541 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2542
2543 /* jump taken */
2544 gen_set_label(l1);
2545 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2546
2547 dc->base.is_jmp = DISAS_NORETURN;
2548 break;
2549
2550 default:
2551 g_assert_not_reached();
2552 }
2553 } else {
2554 dc->pc = dc->npc;
2555 dc->npc = dc->npc + 4;
2556 }
2557 return true;
2558 }
2559
2560 /*
2561 * Major opcodes 00 and 01 -- branches, call, and sethi
2562 */
2563
advance_jump_cond(DisasContext * dc,DisasCompare * cmp,bool annul,int disp)2564 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2565 bool annul, int disp)
2566 {
2567 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2568 target_ulong npc;
2569
2570 finishing_insn(dc);
2571
2572 if (cmp->cond == TCG_COND_ALWAYS) {
2573 if (annul) {
2574 dc->pc = dest;
2575 dc->npc = dest + 4;
2576 } else {
2577 gen_mov_pc_npc(dc);
2578 dc->npc = dest;
2579 }
2580 return true;
2581 }
2582
2583 if (cmp->cond == TCG_COND_NEVER) {
2584 npc = dc->npc;
2585 if (npc & 3) {
2586 gen_mov_pc_npc(dc);
2587 if (annul) {
2588 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2589 }
2590 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2591 } else {
2592 dc->pc = npc + (annul ? 4 : 0);
2593 dc->npc = dc->pc + 4;
2594 }
2595 return true;
2596 }
2597
2598 flush_cond(dc);
2599 npc = dc->npc;
2600
2601 if (annul) {
2602 TCGLabel *l1 = gen_new_label();
2603
2604 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2605 gen_goto_tb(dc, 0, npc, dest);
2606 gen_set_label(l1);
2607 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2608
2609 dc->base.is_jmp = DISAS_NORETURN;
2610 } else {
2611 if (npc & 3) {
2612 switch (npc) {
2613 case DYNAMIC_PC:
2614 case DYNAMIC_PC_LOOKUP:
2615 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2616 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2617 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2618 cmp->c1, tcg_constant_tl(cmp->c2),
2619 tcg_constant_tl(dest), cpu_npc);
2620 dc->pc = npc;
2621 break;
2622 default:
2623 g_assert_not_reached();
2624 }
2625 } else {
2626 dc->pc = npc;
2627 dc->npc = JUMP_PC;
2628 dc->jump = *cmp;
2629 dc->jump_pc[0] = dest;
2630 dc->jump_pc[1] = npc + 4;
2631
2632 /* The condition for cpu_cond is always NE -- normalize. */
2633 if (cmp->cond == TCG_COND_NE) {
2634 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2635 } else {
2636 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2637 }
2638 dc->cpu_cond_live = true;
2639 }
2640 }
2641 return true;
2642 }
2643
raise_priv(DisasContext * dc)2644 static bool raise_priv(DisasContext *dc)
2645 {
2646 gen_exception(dc, TT_PRIV_INSN);
2647 return true;
2648 }
2649
raise_unimpfpop(DisasContext * dc)2650 static bool raise_unimpfpop(DisasContext *dc)
2651 {
2652 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2653 return true;
2654 }
2655
gen_trap_float128(DisasContext * dc)2656 static bool gen_trap_float128(DisasContext *dc)
2657 {
2658 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2659 return false;
2660 }
2661 return raise_unimpfpop(dc);
2662 }
2663
do_bpcc(DisasContext * dc,arg_bcc * a)2664 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2665 {
2666 DisasCompare cmp;
2667
2668 gen_compare(&cmp, a->cc, a->cond, dc);
2669 return advance_jump_cond(dc, &cmp, a->a, a->i);
2670 }
2671
TRANS(Bicc,ALL,do_bpcc,a)2672 TRANS(Bicc, ALL, do_bpcc, a)
2673 TRANS(BPcc, 64, do_bpcc, a)
2674
2675 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2676 {
2677 DisasCompare cmp;
2678
2679 if (gen_trap_if_nofpu_fpexception(dc)) {
2680 return true;
2681 }
2682 gen_fcompare(&cmp, a->cc, a->cond);
2683 return advance_jump_cond(dc, &cmp, a->a, a->i);
2684 }
2685
2686 TRANS(FBPfcc, 64, do_fbpfcc, a)
TRANS(FBfcc,ALL,do_fbpfcc,a)2687 TRANS(FBfcc, ALL, do_fbpfcc, a)
2688
2689 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2690 {
2691 DisasCompare cmp;
2692
2693 if (!avail_64(dc)) {
2694 return false;
2695 }
2696 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2697 return false;
2698 }
2699 return advance_jump_cond(dc, &cmp, a->a, a->i);
2700 }
2701
trans_CALL(DisasContext * dc,arg_CALL * a)2702 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2703 {
2704 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2705
2706 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2707 gen_mov_pc_npc(dc);
2708 dc->npc = target;
2709 return true;
2710 }
2711
trans_NCP(DisasContext * dc,arg_NCP * a)2712 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2713 {
2714 /*
2715 * For sparc32, always generate the no-coprocessor exception.
2716 * For sparc64, always generate illegal instruction.
2717 */
2718 #ifdef TARGET_SPARC64
2719 return false;
2720 #else
2721 gen_exception(dc, TT_NCP_INSN);
2722 return true;
2723 #endif
2724 }
2725
trans_SETHI(DisasContext * dc,arg_SETHI * a)2726 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2727 {
2728 /* Special-case %g0 because that's the canonical nop. */
2729 if (a->rd) {
2730 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2731 }
2732 return advance_pc(dc);
2733 }
2734
2735 /*
2736 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2737 */
2738
do_tcc(DisasContext * dc,int cond,int cc,int rs1,bool imm,int rs2_or_imm)2739 static bool do_tcc(DisasContext *dc, int cond, int cc,
2740 int rs1, bool imm, int rs2_or_imm)
2741 {
2742 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2743 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2744 DisasCompare cmp;
2745 TCGLabel *lab;
2746 TCGv_i32 trap;
2747
2748 /* Trap never. */
2749 if (cond == 0) {
2750 return advance_pc(dc);
2751 }
2752
2753 /*
2754 * Immediate traps are the most common case. Since this value is
2755 * live across the branch, it really pays to evaluate the constant.
2756 */
2757 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2758 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2759 } else {
2760 trap = tcg_temp_new_i32();
2761 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2762 if (imm) {
2763 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2764 } else {
2765 TCGv_i32 t2 = tcg_temp_new_i32();
2766 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2767 tcg_gen_add_i32(trap, trap, t2);
2768 }
2769 tcg_gen_andi_i32(trap, trap, mask);
2770 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2771 }
2772
2773 finishing_insn(dc);
2774
2775 /* Trap always. */
2776 if (cond == 8) {
2777 save_state(dc);
2778 gen_helper_raise_exception(tcg_env, trap);
2779 dc->base.is_jmp = DISAS_NORETURN;
2780 return true;
2781 }
2782
2783 /* Conditional trap. */
2784 flush_cond(dc);
2785 lab = delay_exceptionv(dc, trap);
2786 gen_compare(&cmp, cc, cond, dc);
2787 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2788
2789 return advance_pc(dc);
2790 }
2791
trans_Tcc_r(DisasContext * dc,arg_Tcc_r * a)2792 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2793 {
2794 if (avail_32(dc) && a->cc) {
2795 return false;
2796 }
2797 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2798 }
2799
trans_Tcc_i_v7(DisasContext * dc,arg_Tcc_i_v7 * a)2800 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2801 {
2802 if (avail_64(dc)) {
2803 return false;
2804 }
2805 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2806 }
2807
trans_Tcc_i_v9(DisasContext * dc,arg_Tcc_i_v9 * a)2808 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2809 {
2810 if (avail_32(dc)) {
2811 return false;
2812 }
2813 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2814 }
2815
trans_STBAR(DisasContext * dc,arg_STBAR * a)2816 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2817 {
2818 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2819 return advance_pc(dc);
2820 }
2821
trans_MEMBAR(DisasContext * dc,arg_MEMBAR * a)2822 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2823 {
2824 if (avail_32(dc)) {
2825 return false;
2826 }
2827 if (a->mmask) {
2828 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2829 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2830 }
2831 if (a->cmask) {
2832 /* For #Sync, etc, end the TB to recognize interrupts. */
2833 dc->base.is_jmp = DISAS_EXIT;
2834 }
2835 return advance_pc(dc);
2836 }
2837
do_rd_special(DisasContext * dc,bool priv,int rd,TCGv (* func)(DisasContext *,TCGv))2838 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2839 TCGv (*func)(DisasContext *, TCGv))
2840 {
2841 if (!priv) {
2842 return raise_priv(dc);
2843 }
2844 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2845 return advance_pc(dc);
2846 }
2847
do_rdy(DisasContext * dc,TCGv dst)2848 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2849 {
2850 return cpu_y;
2851 }
2852
trans_RDY(DisasContext * dc,arg_RDY * a)2853 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2854 {
2855 /*
2856 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2857 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2858 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2859 */
2860 if (avail_64(dc) && a->rs1 != 0) {
2861 return false;
2862 }
2863 return do_rd_special(dc, true, a->rd, do_rdy);
2864 }
2865
do_rd_leon3_config(DisasContext * dc,TCGv dst)2866 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2867 {
2868 gen_helper_rdasr17(dst, tcg_env);
2869 return dst;
2870 }
2871
2872 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2873
do_rdccr(DisasContext * dc,TCGv dst)2874 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2875 {
2876 gen_helper_rdccr(dst, tcg_env);
2877 return dst;
2878 }
2879
2880 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2881
do_rdasi(DisasContext * dc,TCGv dst)2882 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2883 {
2884 #ifdef TARGET_SPARC64
2885 return tcg_constant_tl(dc->asi);
2886 #else
2887 qemu_build_not_reached();
2888 #endif
2889 }
2890
2891 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2892
do_rdtick(DisasContext * dc,TCGv dst)2893 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2894 {
2895 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2896
2897 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2898 if (translator_io_start(&dc->base)) {
2899 dc->base.is_jmp = DISAS_EXIT;
2900 }
2901 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2902 tcg_constant_i32(dc->mem_idx));
2903 return dst;
2904 }
2905
2906 /* TODO: non-priv access only allowed when enabled. */
2907 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2908
do_rdpc(DisasContext * dc,TCGv dst)2909 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2910 {
2911 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2912 }
2913
2914 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2915
do_rdfprs(DisasContext * dc,TCGv dst)2916 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2917 {
2918 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2919 return dst;
2920 }
2921
2922 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2923
do_rdgsr(DisasContext * dc,TCGv dst)2924 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2925 {
2926 gen_trap_ifnofpu(dc);
2927 return cpu_gsr;
2928 }
2929
2930 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2931
do_rdsoftint(DisasContext * dc,TCGv dst)2932 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2933 {
2934 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2935 return dst;
2936 }
2937
2938 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2939
do_rdtick_cmpr(DisasContext * dc,TCGv dst)2940 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2941 {
2942 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2943 return dst;
2944 }
2945
2946 /* TODO: non-priv access only allowed when enabled. */
2947 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2948
do_rdstick(DisasContext * dc,TCGv dst)2949 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2950 {
2951 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2952
2953 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2954 if (translator_io_start(&dc->base)) {
2955 dc->base.is_jmp = DISAS_EXIT;
2956 }
2957 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2958 tcg_constant_i32(dc->mem_idx));
2959 return dst;
2960 }
2961
2962 /* TODO: non-priv access only allowed when enabled. */
2963 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2964
do_rdstick_cmpr(DisasContext * dc,TCGv dst)2965 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2966 {
2967 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2968 return dst;
2969 }
2970
2971 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2972 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2973
2974 /*
2975 * UltraSPARC-T1 Strand status.
2976 * HYPV check maybe not enough, UA2005 & UA2007 describe
2977 * this ASR as impl. dep
2978 */
do_rdstrand_status(DisasContext * dc,TCGv dst)2979 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2980 {
2981 return tcg_constant_tl(1);
2982 }
2983
2984 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2985
do_rdpsr(DisasContext * dc,TCGv dst)2986 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2987 {
2988 gen_helper_rdpsr(dst, tcg_env);
2989 return dst;
2990 }
2991
2992 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2993
do_rdhpstate(DisasContext * dc,TCGv dst)2994 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2995 {
2996 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2997 return dst;
2998 }
2999
3000 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3001
do_rdhtstate(DisasContext * dc,TCGv dst)3002 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3003 {
3004 TCGv_i32 tl = tcg_temp_new_i32();
3005 TCGv_ptr tp = tcg_temp_new_ptr();
3006
3007 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3008 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3009 tcg_gen_shli_i32(tl, tl, 3);
3010 tcg_gen_ext_i32_ptr(tp, tl);
3011 tcg_gen_add_ptr(tp, tp, tcg_env);
3012
3013 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3014 return dst;
3015 }
3016
3017 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3018
do_rdhintp(DisasContext * dc,TCGv dst)3019 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3020 {
3021 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3022 return dst;
3023 }
3024
3025 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3026
do_rdhtba(DisasContext * dc,TCGv dst)3027 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3028 {
3029 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3030 return dst;
3031 }
3032
3033 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3034
do_rdhver(DisasContext * dc,TCGv dst)3035 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3036 {
3037 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3038 return dst;
3039 }
3040
3041 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3042
do_rdhstick_cmpr(DisasContext * dc,TCGv dst)3043 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3044 {
3045 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3046 return dst;
3047 }
3048
3049 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3050 do_rdhstick_cmpr)
3051
do_rdwim(DisasContext * dc,TCGv dst)3052 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3053 {
3054 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3055 return dst;
3056 }
3057
3058 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3059
do_rdtpc(DisasContext * dc,TCGv dst)3060 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3061 {
3062 #ifdef TARGET_SPARC64
3063 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3064
3065 gen_load_trap_state_at_tl(r_tsptr);
3066 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3067 return dst;
3068 #else
3069 qemu_build_not_reached();
3070 #endif
3071 }
3072
3073 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3074
do_rdtnpc(DisasContext * dc,TCGv dst)3075 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3076 {
3077 #ifdef TARGET_SPARC64
3078 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3079
3080 gen_load_trap_state_at_tl(r_tsptr);
3081 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3082 return dst;
3083 #else
3084 qemu_build_not_reached();
3085 #endif
3086 }
3087
3088 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3089
do_rdtstate(DisasContext * dc,TCGv dst)3090 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3091 {
3092 #ifdef TARGET_SPARC64
3093 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3094
3095 gen_load_trap_state_at_tl(r_tsptr);
3096 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3097 return dst;
3098 #else
3099 qemu_build_not_reached();
3100 #endif
3101 }
3102
3103 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3104
do_rdtt(DisasContext * dc,TCGv dst)3105 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3106 {
3107 #ifdef TARGET_SPARC64
3108 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3109
3110 gen_load_trap_state_at_tl(r_tsptr);
3111 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3112 return dst;
3113 #else
3114 qemu_build_not_reached();
3115 #endif
3116 }
3117
3118 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3119 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3120
do_rdtba(DisasContext * dc,TCGv dst)3121 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3122 {
3123 return cpu_tbr;
3124 }
3125
3126 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3127 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3128
do_rdpstate(DisasContext * dc,TCGv dst)3129 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3130 {
3131 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3132 return dst;
3133 }
3134
3135 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3136
do_rdtl(DisasContext * dc,TCGv dst)3137 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3138 {
3139 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3140 return dst;
3141 }
3142
3143 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3144
do_rdpil(DisasContext * dc,TCGv dst)3145 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3146 {
3147 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3148 return dst;
3149 }
3150
3151 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3152
do_rdcwp(DisasContext * dc,TCGv dst)3153 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3154 {
3155 gen_helper_rdcwp(dst, tcg_env);
3156 return dst;
3157 }
3158
3159 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3160
do_rdcansave(DisasContext * dc,TCGv dst)3161 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3162 {
3163 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3164 return dst;
3165 }
3166
3167 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3168
do_rdcanrestore(DisasContext * dc,TCGv dst)3169 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3170 {
3171 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3172 return dst;
3173 }
3174
3175 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3176 do_rdcanrestore)
3177
do_rdcleanwin(DisasContext * dc,TCGv dst)3178 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3179 {
3180 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3181 return dst;
3182 }
3183
3184 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3185
do_rdotherwin(DisasContext * dc,TCGv dst)3186 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3187 {
3188 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3189 return dst;
3190 }
3191
3192 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3193
do_rdwstate(DisasContext * dc,TCGv dst)3194 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3195 {
3196 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3197 return dst;
3198 }
3199
3200 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3201
do_rdgl(DisasContext * dc,TCGv dst)3202 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3203 {
3204 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3205 return dst;
3206 }
3207
3208 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3209
3210 /* UA2005 strand status */
do_rdssr(DisasContext * dc,TCGv dst)3211 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3212 {
3213 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3214 return dst;
3215 }
3216
3217 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3218
do_rdver(DisasContext * dc,TCGv dst)3219 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3220 {
3221 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3222 return dst;
3223 }
3224
3225 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3226
trans_FLUSHW(DisasContext * dc,arg_FLUSHW * a)3227 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3228 {
3229 if (avail_64(dc)) {
3230 gen_helper_flushw(tcg_env);
3231 return advance_pc(dc);
3232 }
3233 return false;
3234 }
3235
do_wr_special(DisasContext * dc,arg_r_r_ri * a,bool priv,void (* func)(DisasContext *,TCGv))3236 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3237 void (*func)(DisasContext *, TCGv))
3238 {
3239 TCGv src;
3240
3241 /* For simplicity, we under-decoded the rs2 form. */
3242 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3243 return false;
3244 }
3245 if (!priv) {
3246 return raise_priv(dc);
3247 }
3248
3249 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3250 src = tcg_constant_tl(a->rs2_or_imm);
3251 } else {
3252 TCGv src1 = gen_load_gpr(dc, a->rs1);
3253 if (a->rs2_or_imm == 0) {
3254 src = src1;
3255 } else {
3256 src = tcg_temp_new();
3257 if (a->imm) {
3258 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3259 } else {
3260 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3261 }
3262 }
3263 }
3264 func(dc, src);
3265 return advance_pc(dc);
3266 }
3267
do_wry(DisasContext * dc,TCGv src)3268 static void do_wry(DisasContext *dc, TCGv src)
3269 {
3270 tcg_gen_ext32u_tl(cpu_y, src);
3271 }
3272
TRANS(WRY,ALL,do_wr_special,a,true,do_wry)3273 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3274
3275 static void do_wrccr(DisasContext *dc, TCGv src)
3276 {
3277 gen_helper_wrccr(tcg_env, src);
3278 }
3279
3280 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3281
do_wrasi(DisasContext * dc,TCGv src)3282 static void do_wrasi(DisasContext *dc, TCGv src)
3283 {
3284 TCGv tmp = tcg_temp_new();
3285
3286 tcg_gen_ext8u_tl(tmp, src);
3287 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3288 /* End TB to notice changed ASI. */
3289 dc->base.is_jmp = DISAS_EXIT;
3290 }
3291
3292 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3293
do_wrfprs(DisasContext * dc,TCGv src)3294 static void do_wrfprs(DisasContext *dc, TCGv src)
3295 {
3296 #ifdef TARGET_SPARC64
3297 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3298 dc->fprs_dirty = 0;
3299 dc->base.is_jmp = DISAS_EXIT;
3300 #else
3301 qemu_build_not_reached();
3302 #endif
3303 }
3304
3305 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3306
do_wrgsr(DisasContext * dc,TCGv src)3307 static void do_wrgsr(DisasContext *dc, TCGv src)
3308 {
3309 gen_trap_ifnofpu(dc);
3310 tcg_gen_mov_tl(cpu_gsr, src);
3311 }
3312
3313 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3314
do_wrsoftint_set(DisasContext * dc,TCGv src)3315 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3316 {
3317 gen_helper_set_softint(tcg_env, src);
3318 }
3319
3320 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3321
do_wrsoftint_clr(DisasContext * dc,TCGv src)3322 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3323 {
3324 gen_helper_clear_softint(tcg_env, src);
3325 }
3326
3327 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3328
do_wrsoftint(DisasContext * dc,TCGv src)3329 static void do_wrsoftint(DisasContext *dc, TCGv src)
3330 {
3331 gen_helper_write_softint(tcg_env, src);
3332 }
3333
3334 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3335
do_wrtick_cmpr(DisasContext * dc,TCGv src)3336 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3337 {
3338 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3339
3340 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3341 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3342 translator_io_start(&dc->base);
3343 gen_helper_tick_set_limit(r_tickptr, src);
3344 /* End TB to handle timer interrupt */
3345 dc->base.is_jmp = DISAS_EXIT;
3346 }
3347
3348 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3349
do_wrstick(DisasContext * dc,TCGv src)3350 static void do_wrstick(DisasContext *dc, TCGv src)
3351 {
3352 #ifdef TARGET_SPARC64
3353 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3354
3355 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3356 translator_io_start(&dc->base);
3357 gen_helper_tick_set_count(r_tickptr, src);
3358 /* End TB to handle timer interrupt */
3359 dc->base.is_jmp = DISAS_EXIT;
3360 #else
3361 qemu_build_not_reached();
3362 #endif
3363 }
3364
3365 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3366
do_wrstick_cmpr(DisasContext * dc,TCGv src)3367 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3368 {
3369 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3370
3371 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3372 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3373 translator_io_start(&dc->base);
3374 gen_helper_tick_set_limit(r_tickptr, src);
3375 /* End TB to handle timer interrupt */
3376 dc->base.is_jmp = DISAS_EXIT;
3377 }
3378
3379 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3380
do_wrpowerdown(DisasContext * dc,TCGv src)3381 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3382 {
3383 finishing_insn(dc);
3384 save_state(dc);
3385 gen_helper_power_down(tcg_env);
3386 }
3387
TRANS(WRPOWERDOWN,POWERDOWN,do_wr_special,a,supervisor (dc),do_wrpowerdown)3388 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3389
3390 static void do_wrmwait(DisasContext *dc, TCGv src)
3391 {
3392 /*
3393 * TODO: This is a stub version of mwait, which merely recognizes
3394 * interrupts immediately and does not wait.
3395 */
3396 dc->base.is_jmp = DISAS_EXIT;
3397 }
3398
TRANS(WRMWAIT,VIS4,do_wr_special,a,true,do_wrmwait)3399 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3400
3401 static void do_wrpsr(DisasContext *dc, TCGv src)
3402 {
3403 gen_helper_wrpsr(tcg_env, src);
3404 dc->base.is_jmp = DISAS_EXIT;
3405 }
3406
3407 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3408
do_wrwim(DisasContext * dc,TCGv src)3409 static void do_wrwim(DisasContext *dc, TCGv src)
3410 {
3411 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3412 TCGv tmp = tcg_temp_new();
3413
3414 tcg_gen_andi_tl(tmp, src, mask);
3415 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3416 }
3417
3418 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3419
do_wrtpc(DisasContext * dc,TCGv src)3420 static void do_wrtpc(DisasContext *dc, TCGv src)
3421 {
3422 #ifdef TARGET_SPARC64
3423 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3424
3425 gen_load_trap_state_at_tl(r_tsptr);
3426 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3427 #else
3428 qemu_build_not_reached();
3429 #endif
3430 }
3431
3432 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3433
do_wrtnpc(DisasContext * dc,TCGv src)3434 static void do_wrtnpc(DisasContext *dc, TCGv src)
3435 {
3436 #ifdef TARGET_SPARC64
3437 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3438
3439 gen_load_trap_state_at_tl(r_tsptr);
3440 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3441 #else
3442 qemu_build_not_reached();
3443 #endif
3444 }
3445
3446 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3447
do_wrtstate(DisasContext * dc,TCGv src)3448 static void do_wrtstate(DisasContext *dc, TCGv src)
3449 {
3450 #ifdef TARGET_SPARC64
3451 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3452
3453 gen_load_trap_state_at_tl(r_tsptr);
3454 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3455 #else
3456 qemu_build_not_reached();
3457 #endif
3458 }
3459
3460 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3461
do_wrtt(DisasContext * dc,TCGv src)3462 static void do_wrtt(DisasContext *dc, TCGv src)
3463 {
3464 #ifdef TARGET_SPARC64
3465 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3466
3467 gen_load_trap_state_at_tl(r_tsptr);
3468 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3469 #else
3470 qemu_build_not_reached();
3471 #endif
3472 }
3473
3474 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3475
do_wrtick(DisasContext * dc,TCGv src)3476 static void do_wrtick(DisasContext *dc, TCGv src)
3477 {
3478 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3479
3480 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3481 translator_io_start(&dc->base);
3482 gen_helper_tick_set_count(r_tickptr, src);
3483 /* End TB to handle timer interrupt */
3484 dc->base.is_jmp = DISAS_EXIT;
3485 }
3486
3487 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3488
do_wrtba(DisasContext * dc,TCGv src)3489 static void do_wrtba(DisasContext *dc, TCGv src)
3490 {
3491 tcg_gen_mov_tl(cpu_tbr, src);
3492 }
3493
3494 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3495
do_wrpstate(DisasContext * dc,TCGv src)3496 static void do_wrpstate(DisasContext *dc, TCGv src)
3497 {
3498 save_state(dc);
3499 if (translator_io_start(&dc->base)) {
3500 dc->base.is_jmp = DISAS_EXIT;
3501 }
3502 gen_helper_wrpstate(tcg_env, src);
3503 dc->npc = DYNAMIC_PC;
3504 }
3505
3506 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3507
do_wrtl(DisasContext * dc,TCGv src)3508 static void do_wrtl(DisasContext *dc, TCGv src)
3509 {
3510 save_state(dc);
3511 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3512 dc->npc = DYNAMIC_PC;
3513 }
3514
3515 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3516
do_wrpil(DisasContext * dc,TCGv src)3517 static void do_wrpil(DisasContext *dc, TCGv src)
3518 {
3519 if (translator_io_start(&dc->base)) {
3520 dc->base.is_jmp = DISAS_EXIT;
3521 }
3522 gen_helper_wrpil(tcg_env, src);
3523 }
3524
3525 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3526
do_wrcwp(DisasContext * dc,TCGv src)3527 static void do_wrcwp(DisasContext *dc, TCGv src)
3528 {
3529 gen_helper_wrcwp(tcg_env, src);
3530 }
3531
3532 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3533
do_wrcansave(DisasContext * dc,TCGv src)3534 static void do_wrcansave(DisasContext *dc, TCGv src)
3535 {
3536 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3537 }
3538
3539 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3540
do_wrcanrestore(DisasContext * dc,TCGv src)3541 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3542 {
3543 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3544 }
3545
3546 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3547
do_wrcleanwin(DisasContext * dc,TCGv src)3548 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3549 {
3550 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3551 }
3552
3553 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3554
do_wrotherwin(DisasContext * dc,TCGv src)3555 static void do_wrotherwin(DisasContext *dc, TCGv src)
3556 {
3557 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3558 }
3559
3560 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3561
do_wrwstate(DisasContext * dc,TCGv src)3562 static void do_wrwstate(DisasContext *dc, TCGv src)
3563 {
3564 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3565 }
3566
3567 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3568
do_wrgl(DisasContext * dc,TCGv src)3569 static void do_wrgl(DisasContext *dc, TCGv src)
3570 {
3571 gen_helper_wrgl(tcg_env, src);
3572 }
3573
TRANS(WRPR_gl,GL,do_wr_special,a,supervisor (dc),do_wrgl)3574 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3575
3576 /* UA2005 strand status */
3577 static void do_wrssr(DisasContext *dc, TCGv src)
3578 {
3579 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3580 }
3581
TRANS(WRPR_strand_status,HYPV,do_wr_special,a,hypervisor (dc),do_wrssr)3582 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3583
3584 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3585
3586 static void do_wrhpstate(DisasContext *dc, TCGv src)
3587 {
3588 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3589 dc->base.is_jmp = DISAS_EXIT;
3590 }
3591
TRANS(WRHPR_hpstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhpstate)3592 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3593
3594 static void do_wrhtstate(DisasContext *dc, TCGv src)
3595 {
3596 TCGv_i32 tl = tcg_temp_new_i32();
3597 TCGv_ptr tp = tcg_temp_new_ptr();
3598
3599 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3600 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3601 tcg_gen_shli_i32(tl, tl, 3);
3602 tcg_gen_ext_i32_ptr(tp, tl);
3603 tcg_gen_add_ptr(tp, tp, tcg_env);
3604
3605 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3606 }
3607
TRANS(WRHPR_htstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtstate)3608 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3609
3610 static void do_wrhintp(DisasContext *dc, TCGv src)
3611 {
3612 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3613 }
3614
TRANS(WRHPR_hintp,HYPV,do_wr_special,a,hypervisor (dc),do_wrhintp)3615 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3616
3617 static void do_wrhtba(DisasContext *dc, TCGv src)
3618 {
3619 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3620 }
3621
TRANS(WRHPR_htba,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtba)3622 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3623
3624 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3625 {
3626 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3627
3628 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3629 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3630 translator_io_start(&dc->base);
3631 gen_helper_tick_set_limit(r_tickptr, src);
3632 /* End TB to handle timer interrupt */
3633 dc->base.is_jmp = DISAS_EXIT;
3634 }
3635
TRANS(WRHPR_hstick_cmpr,HYPV,do_wr_special,a,hypervisor (dc),do_wrhstick_cmpr)3636 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3637 do_wrhstick_cmpr)
3638
3639 static bool do_saved_restored(DisasContext *dc, bool saved)
3640 {
3641 if (!supervisor(dc)) {
3642 return raise_priv(dc);
3643 }
3644 if (saved) {
3645 gen_helper_saved(tcg_env);
3646 } else {
3647 gen_helper_restored(tcg_env);
3648 }
3649 return advance_pc(dc);
3650 }
3651
3652 TRANS(SAVED, 64, do_saved_restored, true)
3653 TRANS(RESTORED, 64, do_saved_restored, false)
3654
trans_NOP(DisasContext * dc,arg_NOP * a)3655 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3656 {
3657 return advance_pc(dc);
3658 }
3659
3660 /*
3661 * TODO: Need a feature bit for sparcv8.
3662 * In the meantime, treat all 32-bit cpus like sparcv7.
3663 */
3664 TRANS(NOP_v7, 32, trans_NOP, a)
3665 TRANS(NOP_v9, 64, trans_NOP, a)
3666
do_arith_int(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),bool logic_cc)3667 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3668 void (*func)(TCGv, TCGv, TCGv),
3669 void (*funci)(TCGv, TCGv, target_long),
3670 bool logic_cc)
3671 {
3672 TCGv dst, src1;
3673
3674 /* For simplicity, we under-decoded the rs2 form. */
3675 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3676 return false;
3677 }
3678
3679 if (logic_cc) {
3680 dst = cpu_cc_N;
3681 } else {
3682 dst = gen_dest_gpr(dc, a->rd);
3683 }
3684 src1 = gen_load_gpr(dc, a->rs1);
3685
3686 if (a->imm || a->rs2_or_imm == 0) {
3687 if (funci) {
3688 funci(dst, src1, a->rs2_or_imm);
3689 } else {
3690 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3691 }
3692 } else {
3693 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3694 }
3695
3696 if (logic_cc) {
3697 if (TARGET_LONG_BITS == 64) {
3698 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3699 tcg_gen_movi_tl(cpu_icc_C, 0);
3700 }
3701 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3702 tcg_gen_movi_tl(cpu_cc_C, 0);
3703 tcg_gen_movi_tl(cpu_cc_V, 0);
3704 }
3705
3706 gen_store_gpr(dc, a->rd, dst);
3707 return advance_pc(dc);
3708 }
3709
do_arith(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),void (* func_cc)(TCGv,TCGv,TCGv))3710 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3711 void (*func)(TCGv, TCGv, TCGv),
3712 void (*funci)(TCGv, TCGv, target_long),
3713 void (*func_cc)(TCGv, TCGv, TCGv))
3714 {
3715 if (a->cc) {
3716 return do_arith_int(dc, a, func_cc, NULL, false);
3717 }
3718 return do_arith_int(dc, a, func, funci, false);
3719 }
3720
do_logic(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long))3721 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3722 void (*func)(TCGv, TCGv, TCGv),
3723 void (*funci)(TCGv, TCGv, target_long))
3724 {
3725 return do_arith_int(dc, a, func, funci, a->cc);
3726 }
3727
TRANS(ADD,ALL,do_arith,a,tcg_gen_add_tl,tcg_gen_addi_tl,gen_op_addcc)3728 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3729 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3730 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3731 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3732
3733 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3734 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3735 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3736 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3737
3738 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3739 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3740 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3741 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3742 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3743
3744 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3745 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3746 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3747 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3748
3749 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3750 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3751
3752 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3753 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3754
3755 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3756 {
3757 /* OR with %g0 is the canonical alias for MOV. */
3758 if (!a->cc && a->rs1 == 0) {
3759 if (a->imm || a->rs2_or_imm == 0) {
3760 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3761 } else if (a->rs2_or_imm & ~0x1f) {
3762 /* For simplicity, we under-decoded the rs2 form. */
3763 return false;
3764 } else {
3765 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3766 }
3767 return advance_pc(dc);
3768 }
3769 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3770 }
3771
trans_UDIV(DisasContext * dc,arg_r_r_ri * a)3772 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3773 {
3774 TCGv_i64 t1, t2;
3775 TCGv dst;
3776
3777 if (!avail_DIV(dc)) {
3778 return false;
3779 }
3780 /* For simplicity, we under-decoded the rs2 form. */
3781 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3782 return false;
3783 }
3784
3785 if (unlikely(a->rs2_or_imm == 0)) {
3786 gen_exception(dc, TT_DIV_ZERO);
3787 return true;
3788 }
3789
3790 if (a->imm) {
3791 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3792 } else {
3793 TCGLabel *lab;
3794 TCGv_i32 n2;
3795
3796 finishing_insn(dc);
3797 flush_cond(dc);
3798
3799 n2 = tcg_temp_new_i32();
3800 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3801
3802 lab = delay_exception(dc, TT_DIV_ZERO);
3803 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3804
3805 t2 = tcg_temp_new_i64();
3806 #ifdef TARGET_SPARC64
3807 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3808 #else
3809 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3810 #endif
3811 }
3812
3813 t1 = tcg_temp_new_i64();
3814 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3815
3816 tcg_gen_divu_i64(t1, t1, t2);
3817 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3818
3819 dst = gen_dest_gpr(dc, a->rd);
3820 tcg_gen_trunc_i64_tl(dst, t1);
3821 gen_store_gpr(dc, a->rd, dst);
3822 return advance_pc(dc);
3823 }
3824
trans_UDIVX(DisasContext * dc,arg_r_r_ri * a)3825 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3826 {
3827 TCGv dst, src1, src2;
3828
3829 if (!avail_64(dc)) {
3830 return false;
3831 }
3832 /* For simplicity, we under-decoded the rs2 form. */
3833 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3834 return false;
3835 }
3836
3837 if (unlikely(a->rs2_or_imm == 0)) {
3838 gen_exception(dc, TT_DIV_ZERO);
3839 return true;
3840 }
3841
3842 if (a->imm) {
3843 src2 = tcg_constant_tl(a->rs2_or_imm);
3844 } else {
3845 TCGLabel *lab;
3846
3847 finishing_insn(dc);
3848 flush_cond(dc);
3849
3850 lab = delay_exception(dc, TT_DIV_ZERO);
3851 src2 = cpu_regs[a->rs2_or_imm];
3852 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3853 }
3854
3855 dst = gen_dest_gpr(dc, a->rd);
3856 src1 = gen_load_gpr(dc, a->rs1);
3857
3858 tcg_gen_divu_tl(dst, src1, src2);
3859 gen_store_gpr(dc, a->rd, dst);
3860 return advance_pc(dc);
3861 }
3862
trans_SDIVX(DisasContext * dc,arg_r_r_ri * a)3863 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3864 {
3865 TCGv dst, src1, src2;
3866
3867 if (!avail_64(dc)) {
3868 return false;
3869 }
3870 /* For simplicity, we under-decoded the rs2 form. */
3871 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3872 return false;
3873 }
3874
3875 if (unlikely(a->rs2_or_imm == 0)) {
3876 gen_exception(dc, TT_DIV_ZERO);
3877 return true;
3878 }
3879
3880 dst = gen_dest_gpr(dc, a->rd);
3881 src1 = gen_load_gpr(dc, a->rs1);
3882
3883 if (a->imm) {
3884 if (unlikely(a->rs2_or_imm == -1)) {
3885 tcg_gen_neg_tl(dst, src1);
3886 gen_store_gpr(dc, a->rd, dst);
3887 return advance_pc(dc);
3888 }
3889 src2 = tcg_constant_tl(a->rs2_or_imm);
3890 } else {
3891 TCGLabel *lab;
3892 TCGv t1, t2;
3893
3894 finishing_insn(dc);
3895 flush_cond(dc);
3896
3897 lab = delay_exception(dc, TT_DIV_ZERO);
3898 src2 = cpu_regs[a->rs2_or_imm];
3899 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3900
3901 /*
3902 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3903 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3904 */
3905 t1 = tcg_temp_new();
3906 t2 = tcg_temp_new();
3907 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3908 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3909 tcg_gen_and_tl(t1, t1, t2);
3910 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3911 tcg_constant_tl(1), src2);
3912 src2 = t1;
3913 }
3914
3915 tcg_gen_div_tl(dst, src1, src2);
3916 gen_store_gpr(dc, a->rd, dst);
3917 return advance_pc(dc);
3918 }
3919
gen_edge(DisasContext * dc,arg_r_r_r * a,int width,bool cc,bool little_endian)3920 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3921 int width, bool cc, bool little_endian)
3922 {
3923 TCGv dst, s1, s2, l, r, t, m;
3924 uint64_t amask = address_mask_i(dc, -8);
3925
3926 dst = gen_dest_gpr(dc, a->rd);
3927 s1 = gen_load_gpr(dc, a->rs1);
3928 s2 = gen_load_gpr(dc, a->rs2);
3929
3930 if (cc) {
3931 gen_op_subcc(cpu_cc_N, s1, s2);
3932 }
3933
3934 l = tcg_temp_new();
3935 r = tcg_temp_new();
3936 t = tcg_temp_new();
3937
3938 switch (width) {
3939 case 8:
3940 tcg_gen_andi_tl(l, s1, 7);
3941 tcg_gen_andi_tl(r, s2, 7);
3942 tcg_gen_xori_tl(r, r, 7);
3943 m = tcg_constant_tl(0xff);
3944 break;
3945 case 16:
3946 tcg_gen_extract_tl(l, s1, 1, 2);
3947 tcg_gen_extract_tl(r, s2, 1, 2);
3948 tcg_gen_xori_tl(r, r, 3);
3949 m = tcg_constant_tl(0xf);
3950 break;
3951 case 32:
3952 tcg_gen_extract_tl(l, s1, 2, 1);
3953 tcg_gen_extract_tl(r, s2, 2, 1);
3954 tcg_gen_xori_tl(r, r, 1);
3955 m = tcg_constant_tl(0x3);
3956 break;
3957 default:
3958 abort();
3959 }
3960
3961 /* Compute Left Edge */
3962 if (little_endian) {
3963 tcg_gen_shl_tl(l, m, l);
3964 tcg_gen_and_tl(l, l, m);
3965 } else {
3966 tcg_gen_shr_tl(l, m, l);
3967 }
3968 /* Compute Right Edge */
3969 if (little_endian) {
3970 tcg_gen_shr_tl(r, m, r);
3971 } else {
3972 tcg_gen_shl_tl(r, m, r);
3973 tcg_gen_and_tl(r, r, m);
3974 }
3975
3976 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3977 tcg_gen_xor_tl(t, s1, s2);
3978 tcg_gen_and_tl(r, r, l);
3979 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3980
3981 gen_store_gpr(dc, a->rd, dst);
3982 return advance_pc(dc);
3983 }
3984
3985 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3986 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3987 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3988 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3989 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3990 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3991
3992 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3993 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3994 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3995 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3996 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3997 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3998
do_rr(DisasContext * dc,arg_r_r * a,void (* func)(TCGv,TCGv))3999 static bool do_rr(DisasContext *dc, arg_r_r *a,
4000 void (*func)(TCGv, TCGv))
4001 {
4002 TCGv dst = gen_dest_gpr(dc, a->rd);
4003 TCGv src = gen_load_gpr(dc, a->rs);
4004
4005 func(dst, src);
4006 gen_store_gpr(dc, a->rd, dst);
4007 return advance_pc(dc);
4008 }
4009
TRANS(LZCNT,VIS3,do_rr,a,gen_op_lzcnt)4010 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4011
4012 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4013 void (*func)(TCGv, TCGv, TCGv))
4014 {
4015 TCGv dst = gen_dest_gpr(dc, a->rd);
4016 TCGv src1 = gen_load_gpr(dc, a->rs1);
4017 TCGv src2 = gen_load_gpr(dc, a->rs2);
4018
4019 func(dst, src1, src2);
4020 gen_store_gpr(dc, a->rd, dst);
4021 return advance_pc(dc);
4022 }
4023
TRANS(ARRAY8,VIS1,do_rrr,a,gen_helper_array8)4024 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4025 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4026 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4027
4028 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4029 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4030
4031 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4032 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4033
4034 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4035
4036 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4037 {
4038 #ifdef TARGET_SPARC64
4039 TCGv tmp = tcg_temp_new();
4040
4041 tcg_gen_add_tl(tmp, s1, s2);
4042 tcg_gen_andi_tl(dst, tmp, -8);
4043 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4044 #else
4045 g_assert_not_reached();
4046 #endif
4047 }
4048
gen_op_alignaddrl(TCGv dst,TCGv s1,TCGv s2)4049 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4050 {
4051 #ifdef TARGET_SPARC64
4052 TCGv tmp = tcg_temp_new();
4053
4054 tcg_gen_add_tl(tmp, s1, s2);
4055 tcg_gen_andi_tl(dst, tmp, -8);
4056 tcg_gen_neg_tl(tmp, tmp);
4057 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4058 #else
4059 g_assert_not_reached();
4060 #endif
4061 }
4062
TRANS(ALIGNADDR,VIS1,do_rrr,a,gen_op_alignaddr)4063 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4064 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4065
4066 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4067 {
4068 #ifdef TARGET_SPARC64
4069 tcg_gen_add_tl(dst, s1, s2);
4070 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4071 #else
4072 g_assert_not_reached();
4073 #endif
4074 }
4075
TRANS(BMASK,VIS2,do_rrr,a,gen_op_bmask)4076 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4077
4078 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4079 {
4080 func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4081 return true;
4082 }
4083
4084 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4085 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4086 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4087
do_shift_r(DisasContext * dc,arg_shiftr * a,bool l,bool u)4088 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4089 {
4090 TCGv dst, src1, src2;
4091
4092 /* Reject 64-bit shifts for sparc32. */
4093 if (avail_32(dc) && a->x) {
4094 return false;
4095 }
4096
4097 src2 = tcg_temp_new();
4098 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4099 src1 = gen_load_gpr(dc, a->rs1);
4100 dst = gen_dest_gpr(dc, a->rd);
4101
4102 if (l) {
4103 tcg_gen_shl_tl(dst, src1, src2);
4104 if (!a->x) {
4105 tcg_gen_ext32u_tl(dst, dst);
4106 }
4107 } else if (u) {
4108 if (!a->x) {
4109 tcg_gen_ext32u_tl(dst, src1);
4110 src1 = dst;
4111 }
4112 tcg_gen_shr_tl(dst, src1, src2);
4113 } else {
4114 if (!a->x) {
4115 tcg_gen_ext32s_tl(dst, src1);
4116 src1 = dst;
4117 }
4118 tcg_gen_sar_tl(dst, src1, src2);
4119 }
4120 gen_store_gpr(dc, a->rd, dst);
4121 return advance_pc(dc);
4122 }
4123
TRANS(SLL_r,ALL,do_shift_r,a,true,true)4124 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4125 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4126 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4127
4128 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4129 {
4130 TCGv dst, src1;
4131
4132 /* Reject 64-bit shifts for sparc32. */
4133 if (avail_32(dc) && (a->x || a->i >= 32)) {
4134 return false;
4135 }
4136
4137 src1 = gen_load_gpr(dc, a->rs1);
4138 dst = gen_dest_gpr(dc, a->rd);
4139
4140 if (avail_32(dc) || a->x) {
4141 if (l) {
4142 tcg_gen_shli_tl(dst, src1, a->i);
4143 } else if (u) {
4144 tcg_gen_shri_tl(dst, src1, a->i);
4145 } else {
4146 tcg_gen_sari_tl(dst, src1, a->i);
4147 }
4148 } else {
4149 if (l) {
4150 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4151 } else if (u) {
4152 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4153 } else {
4154 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4155 }
4156 }
4157 gen_store_gpr(dc, a->rd, dst);
4158 return advance_pc(dc);
4159 }
4160
TRANS(SLL_i,ALL,do_shift_i,a,true,true)4161 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4162 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4163 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4164
4165 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4166 {
4167 /* For simplicity, we under-decoded the rs2 form. */
4168 if (!imm && rs2_or_imm & ~0x1f) {
4169 return NULL;
4170 }
4171 if (imm || rs2_or_imm == 0) {
4172 return tcg_constant_tl(rs2_or_imm);
4173 } else {
4174 return cpu_regs[rs2_or_imm];
4175 }
4176 }
4177
do_mov_cond(DisasContext * dc,DisasCompare * cmp,int rd,TCGv src2)4178 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4179 {
4180 TCGv dst = gen_load_gpr(dc, rd);
4181 TCGv c2 = tcg_constant_tl(cmp->c2);
4182
4183 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4184 gen_store_gpr(dc, rd, dst);
4185 return advance_pc(dc);
4186 }
4187
trans_MOVcc(DisasContext * dc,arg_MOVcc * a)4188 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4189 {
4190 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4191 DisasCompare cmp;
4192
4193 if (src2 == NULL) {
4194 return false;
4195 }
4196 gen_compare(&cmp, a->cc, a->cond, dc);
4197 return do_mov_cond(dc, &cmp, a->rd, src2);
4198 }
4199
trans_MOVfcc(DisasContext * dc,arg_MOVfcc * a)4200 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4201 {
4202 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4203 DisasCompare cmp;
4204
4205 if (src2 == NULL) {
4206 return false;
4207 }
4208 gen_fcompare(&cmp, a->cc, a->cond);
4209 return do_mov_cond(dc, &cmp, a->rd, src2);
4210 }
4211
trans_MOVR(DisasContext * dc,arg_MOVR * a)4212 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4213 {
4214 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4215 DisasCompare cmp;
4216
4217 if (src2 == NULL) {
4218 return false;
4219 }
4220 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4221 return false;
4222 }
4223 return do_mov_cond(dc, &cmp, a->rd, src2);
4224 }
4225
do_add_special(DisasContext * dc,arg_r_r_ri * a,bool (* func)(DisasContext * dc,int rd,TCGv src))4226 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4227 bool (*func)(DisasContext *dc, int rd, TCGv src))
4228 {
4229 TCGv src1, sum;
4230
4231 /* For simplicity, we under-decoded the rs2 form. */
4232 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4233 return false;
4234 }
4235
4236 /*
4237 * Always load the sum into a new temporary.
4238 * This is required to capture the value across a window change,
4239 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4240 */
4241 sum = tcg_temp_new();
4242 src1 = gen_load_gpr(dc, a->rs1);
4243 if (a->imm || a->rs2_or_imm == 0) {
4244 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4245 } else {
4246 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4247 }
4248 return func(dc, a->rd, sum);
4249 }
4250
do_jmpl(DisasContext * dc,int rd,TCGv src)4251 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4252 {
4253 /*
4254 * Preserve pc across advance, so that we can delay
4255 * the writeback to rd until after src is consumed.
4256 */
4257 target_ulong cur_pc = dc->pc;
4258
4259 gen_check_align(dc, src, 3);
4260
4261 gen_mov_pc_npc(dc);
4262 tcg_gen_mov_tl(cpu_npc, src);
4263 gen_address_mask(dc, cpu_npc);
4264 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4265
4266 dc->npc = DYNAMIC_PC_LOOKUP;
4267 return true;
4268 }
4269
TRANS(JMPL,ALL,do_add_special,a,do_jmpl)4270 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4271
4272 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4273 {
4274 if (!supervisor(dc)) {
4275 return raise_priv(dc);
4276 }
4277
4278 gen_check_align(dc, src, 3);
4279
4280 gen_mov_pc_npc(dc);
4281 tcg_gen_mov_tl(cpu_npc, src);
4282 gen_helper_rett(tcg_env);
4283
4284 dc->npc = DYNAMIC_PC;
4285 return true;
4286 }
4287
4288 TRANS(RETT, 32, do_add_special, a, do_rett)
4289
do_return(DisasContext * dc,int rd,TCGv src)4290 static bool do_return(DisasContext *dc, int rd, TCGv src)
4291 {
4292 gen_check_align(dc, src, 3);
4293 gen_helper_restore(tcg_env);
4294
4295 gen_mov_pc_npc(dc);
4296 tcg_gen_mov_tl(cpu_npc, src);
4297 gen_address_mask(dc, cpu_npc);
4298
4299 dc->npc = DYNAMIC_PC_LOOKUP;
4300 return true;
4301 }
4302
4303 TRANS(RETURN, 64, do_add_special, a, do_return)
4304
do_save(DisasContext * dc,int rd,TCGv src)4305 static bool do_save(DisasContext *dc, int rd, TCGv src)
4306 {
4307 gen_helper_save(tcg_env);
4308 gen_store_gpr(dc, rd, src);
4309 return advance_pc(dc);
4310 }
4311
TRANS(SAVE,ALL,do_add_special,a,do_save)4312 TRANS(SAVE, ALL, do_add_special, a, do_save)
4313
4314 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4315 {
4316 gen_helper_restore(tcg_env);
4317 gen_store_gpr(dc, rd, src);
4318 return advance_pc(dc);
4319 }
4320
TRANS(RESTORE,ALL,do_add_special,a,do_restore)4321 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4322
4323 static bool do_done_retry(DisasContext *dc, bool done)
4324 {
4325 if (!supervisor(dc)) {
4326 return raise_priv(dc);
4327 }
4328 dc->npc = DYNAMIC_PC;
4329 dc->pc = DYNAMIC_PC;
4330 translator_io_start(&dc->base);
4331 if (done) {
4332 gen_helper_done(tcg_env);
4333 } else {
4334 gen_helper_retry(tcg_env);
4335 }
4336 return true;
4337 }
4338
4339 TRANS(DONE, 64, do_done_retry, true)
4340 TRANS(RETRY, 64, do_done_retry, false)
4341
4342 /*
4343 * Major opcode 11 -- load and store instructions
4344 */
4345
gen_ldst_addr(DisasContext * dc,int rs1,bool imm,int rs2_or_imm)4346 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4347 {
4348 TCGv addr, tmp = NULL;
4349
4350 /* For simplicity, we under-decoded the rs2 form. */
4351 if (!imm && rs2_or_imm & ~0x1f) {
4352 return NULL;
4353 }
4354
4355 addr = gen_load_gpr(dc, rs1);
4356 if (rs2_or_imm) {
4357 tmp = tcg_temp_new();
4358 if (imm) {
4359 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4360 } else {
4361 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4362 }
4363 addr = tmp;
4364 }
4365 if (AM_CHECK(dc)) {
4366 if (!tmp) {
4367 tmp = tcg_temp_new();
4368 }
4369 tcg_gen_ext32u_tl(tmp, addr);
4370 addr = tmp;
4371 }
4372 return addr;
4373 }
4374
do_ld_gpr(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4375 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4376 {
4377 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4378 DisasASI da;
4379
4380 if (addr == NULL) {
4381 return false;
4382 }
4383 da = resolve_asi(dc, a->asi, mop);
4384
4385 reg = gen_dest_gpr(dc, a->rd);
4386 gen_ld_asi(dc, &da, reg, addr);
4387 gen_store_gpr(dc, a->rd, reg);
4388 return advance_pc(dc);
4389 }
4390
TRANS(LDUW,ALL,do_ld_gpr,a,MO_TEUL)4391 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4392 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4393 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4394 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4395 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4396 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4397 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4398
4399 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4400 {
4401 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4402 DisasASI da;
4403
4404 if (addr == NULL) {
4405 return false;
4406 }
4407 da = resolve_asi(dc, a->asi, mop);
4408
4409 reg = gen_load_gpr(dc, a->rd);
4410 gen_st_asi(dc, &da, reg, addr);
4411 return advance_pc(dc);
4412 }
4413
TRANS(STW,ALL,do_st_gpr,a,MO_TEUL)4414 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4415 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4416 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4417 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4418
4419 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4420 {
4421 TCGv addr;
4422 DisasASI da;
4423
4424 if (a->rd & 1) {
4425 return false;
4426 }
4427 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4428 if (addr == NULL) {
4429 return false;
4430 }
4431 da = resolve_asi(dc, a->asi, MO_TEUQ);
4432 gen_ldda_asi(dc, &da, addr, a->rd);
4433 return advance_pc(dc);
4434 }
4435
trans_STD(DisasContext * dc,arg_r_r_ri_asi * a)4436 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4437 {
4438 TCGv addr;
4439 DisasASI da;
4440
4441 if (a->rd & 1) {
4442 return false;
4443 }
4444 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4445 if (addr == NULL) {
4446 return false;
4447 }
4448 da = resolve_asi(dc, a->asi, MO_TEUQ);
4449 gen_stda_asi(dc, &da, addr, a->rd);
4450 return advance_pc(dc);
4451 }
4452
trans_LDSTUB(DisasContext * dc,arg_r_r_ri_asi * a)4453 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4454 {
4455 TCGv addr, reg;
4456 DisasASI da;
4457
4458 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4459 if (addr == NULL) {
4460 return false;
4461 }
4462 da = resolve_asi(dc, a->asi, MO_UB);
4463
4464 reg = gen_dest_gpr(dc, a->rd);
4465 gen_ldstub_asi(dc, &da, reg, addr);
4466 gen_store_gpr(dc, a->rd, reg);
4467 return advance_pc(dc);
4468 }
4469
trans_SWAP(DisasContext * dc,arg_r_r_ri_asi * a)4470 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4471 {
4472 TCGv addr, dst, src;
4473 DisasASI da;
4474
4475 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4476 if (addr == NULL) {
4477 return false;
4478 }
4479 da = resolve_asi(dc, a->asi, MO_TEUL);
4480
4481 dst = gen_dest_gpr(dc, a->rd);
4482 src = gen_load_gpr(dc, a->rd);
4483 gen_swap_asi(dc, &da, dst, src, addr);
4484 gen_store_gpr(dc, a->rd, dst);
4485 return advance_pc(dc);
4486 }
4487
do_casa(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4488 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4489 {
4490 TCGv addr, o, n, c;
4491 DisasASI da;
4492
4493 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4494 if (addr == NULL) {
4495 return false;
4496 }
4497 da = resolve_asi(dc, a->asi, mop);
4498
4499 o = gen_dest_gpr(dc, a->rd);
4500 n = gen_load_gpr(dc, a->rd);
4501 c = gen_load_gpr(dc, a->rs2_or_imm);
4502 gen_cas_asi(dc, &da, o, n, c, addr);
4503 gen_store_gpr(dc, a->rd, o);
4504 return advance_pc(dc);
4505 }
4506
TRANS(CASA,CASA,do_casa,a,MO_TEUL)4507 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4508 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4509
4510 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4511 {
4512 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4513 DisasASI da;
4514
4515 if (addr == NULL) {
4516 return false;
4517 }
4518 if (gen_trap_if_nofpu_fpexception(dc)) {
4519 return true;
4520 }
4521 if (sz == MO_128 && gen_trap_float128(dc)) {
4522 return true;
4523 }
4524 da = resolve_asi(dc, a->asi, MO_TE | sz);
4525 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4526 gen_update_fprs_dirty(dc, a->rd);
4527 return advance_pc(dc);
4528 }
4529
TRANS(LDF,ALL,do_ld_fpr,a,MO_32)4530 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4531 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4532 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4533
4534 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4535 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4536 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4537
4538 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4539 {
4540 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4541 DisasASI da;
4542
4543 if (addr == NULL) {
4544 return false;
4545 }
4546 /* Store insns are ok in fp_exception_pending state. */
4547 if (gen_trap_ifnofpu(dc)) {
4548 return true;
4549 }
4550 if (sz == MO_128 && gen_trap_float128(dc)) {
4551 return true;
4552 }
4553 da = resolve_asi(dc, a->asi, MO_TE | sz);
4554 gen_stf_asi(dc, &da, sz, addr, a->rd);
4555 return advance_pc(dc);
4556 }
4557
TRANS(STF,ALL,do_st_fpr,a,MO_32)4558 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4559 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4560 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4561
4562 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4563 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4564 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4565
4566 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4567 {
4568 TCGv addr;
4569
4570 if (!avail_32(dc)) {
4571 return false;
4572 }
4573 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4574 if (addr == NULL) {
4575 return false;
4576 }
4577 if (!supervisor(dc)) {
4578 return raise_priv(dc);
4579 }
4580 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4581 if (gen_trap_ifnofpu(dc)) {
4582 return true;
4583 }
4584 if (!dc->fsr_qne) {
4585 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4586 return true;
4587 }
4588
4589 /* Store the single element from the queue. */
4590 TCGv_i64 fq = tcg_temp_new_i64();
4591 tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4592 tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4593
4594 /* Mark the queue empty, transitioning to fp_execute state. */
4595 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4596 offsetof(CPUSPARCState, fsr_qne));
4597 dc->fsr_qne = 0;
4598
4599 return advance_pc(dc);
4600 #else
4601 qemu_build_not_reached();
4602 #endif
4603 }
4604
trans_LDFSR(DisasContext * dc,arg_r_r_ri * a)4605 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4606 {
4607 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4608 TCGv_i32 tmp;
4609
4610 if (addr == NULL) {
4611 return false;
4612 }
4613 if (gen_trap_if_nofpu_fpexception(dc)) {
4614 return true;
4615 }
4616
4617 tmp = tcg_temp_new_i32();
4618 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4619
4620 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4621 /* LDFSR does not change FCC[1-3]. */
4622
4623 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4624 return advance_pc(dc);
4625 }
4626
do_ldxfsr(DisasContext * dc,arg_r_r_ri * a,bool entire)4627 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4628 {
4629 #ifdef TARGET_SPARC64
4630 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4631 TCGv_i64 t64;
4632 TCGv_i32 lo, hi;
4633
4634 if (addr == NULL) {
4635 return false;
4636 }
4637 if (gen_trap_if_nofpu_fpexception(dc)) {
4638 return true;
4639 }
4640
4641 t64 = tcg_temp_new_i64();
4642 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4643
4644 lo = tcg_temp_new_i32();
4645 hi = cpu_fcc[3];
4646 tcg_gen_extr_i64_i32(lo, hi, t64);
4647 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4648 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4649 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4650 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4651
4652 if (entire) {
4653 gen_helper_set_fsr_nofcc(tcg_env, lo);
4654 } else {
4655 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4656 }
4657 return advance_pc(dc);
4658 #else
4659 return false;
4660 #endif
4661 }
4662
4663 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
TRANS(LDXEFSR,VIS3B,do_ldxfsr,a,true)4664 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4665
4666 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4667 {
4668 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4669 TCGv fsr;
4670
4671 if (addr == NULL) {
4672 return false;
4673 }
4674 /* Store insns are ok in fp_exception_pending state. */
4675 if (gen_trap_ifnofpu(dc)) {
4676 return true;
4677 }
4678
4679 fsr = tcg_temp_new();
4680 gen_helper_get_fsr(fsr, tcg_env);
4681 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4682 return advance_pc(dc);
4683 }
4684
TRANS(STFSR,ALL,do_stfsr,a,MO_TEUL)4685 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4686 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4687
4688 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4689 {
4690 if (gen_trap_ifnofpu(dc)) {
4691 return true;
4692 }
4693 gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4694 return advance_pc(dc);
4695 }
4696
4697 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4698 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4699
do_dc(DisasContext * dc,int rd,int64_t c)4700 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4701 {
4702 if (gen_trap_ifnofpu(dc)) {
4703 return true;
4704 }
4705 gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4706 return advance_pc(dc);
4707 }
4708
4709 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4710 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4711
do_ff(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i32,TCGv_i32))4712 static bool do_ff(DisasContext *dc, arg_r_r *a,
4713 void (*func)(TCGv_i32, TCGv_i32))
4714 {
4715 TCGv_i32 tmp;
4716
4717 if (gen_trap_if_nofpu_fpexception(dc)) {
4718 return true;
4719 }
4720
4721 tmp = gen_load_fpr_F(dc, a->rs);
4722 func(tmp, tmp);
4723 gen_store_fpr_F(dc, a->rd, tmp);
4724 return advance_pc(dc);
4725 }
4726
TRANS(FMOVs,ALL,do_ff,a,gen_op_fmovs)4727 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4728 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4729 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4730 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4731 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4732
4733 static bool do_fd(DisasContext *dc, arg_r_r *a,
4734 void (*func)(TCGv_i32, TCGv_i64))
4735 {
4736 TCGv_i32 dst;
4737 TCGv_i64 src;
4738
4739 if (gen_trap_ifnofpu(dc)) {
4740 return true;
4741 }
4742
4743 dst = tcg_temp_new_i32();
4744 src = gen_load_fpr_D(dc, a->rs);
4745 func(dst, src);
4746 gen_store_fpr_F(dc, a->rd, dst);
4747 return advance_pc(dc);
4748 }
4749
TRANS(FPACK16,VIS1,do_fd,a,gen_op_fpack16)4750 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4751 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4752
4753 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4754 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4755 {
4756 TCGv_i32 tmp;
4757
4758 if (gen_trap_if_nofpu_fpexception(dc)) {
4759 return true;
4760 }
4761
4762 tmp = gen_load_fpr_F(dc, a->rs);
4763 func(tmp, tcg_env, tmp);
4764 gen_store_fpr_F(dc, a->rd, tmp);
4765 return advance_pc(dc);
4766 }
4767
TRANS(FSQRTs,ALL,do_env_ff,a,gen_helper_fsqrts)4768 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4769 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4770 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4771
4772 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4773 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4774 {
4775 TCGv_i32 dst;
4776 TCGv_i64 src;
4777
4778 if (gen_trap_if_nofpu_fpexception(dc)) {
4779 return true;
4780 }
4781
4782 dst = tcg_temp_new_i32();
4783 src = gen_load_fpr_D(dc, a->rs);
4784 func(dst, tcg_env, src);
4785 gen_store_fpr_F(dc, a->rd, dst);
4786 return advance_pc(dc);
4787 }
4788
TRANS(FdTOs,ALL,do_env_fd,a,gen_helper_fdtos)4789 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4790 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4791 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4792
4793 static bool do_dd(DisasContext *dc, arg_r_r *a,
4794 void (*func)(TCGv_i64, TCGv_i64))
4795 {
4796 TCGv_i64 dst, src;
4797
4798 if (gen_trap_if_nofpu_fpexception(dc)) {
4799 return true;
4800 }
4801
4802 dst = tcg_temp_new_i64();
4803 src = gen_load_fpr_D(dc, a->rs);
4804 func(dst, src);
4805 gen_store_fpr_D(dc, a->rd, dst);
4806 return advance_pc(dc);
4807 }
4808
4809 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4810 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4811 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
TRANS(FSRCd,VIS1,do_dd,a,tcg_gen_mov_i64)4812 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4813 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4814
4815 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4816 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4817 {
4818 TCGv_i64 dst, src;
4819
4820 if (gen_trap_if_nofpu_fpexception(dc)) {
4821 return true;
4822 }
4823
4824 dst = tcg_temp_new_i64();
4825 src = gen_load_fpr_D(dc, a->rs);
4826 func(dst, tcg_env, src);
4827 gen_store_fpr_D(dc, a->rd, dst);
4828 return advance_pc(dc);
4829 }
4830
TRANS(FSQRTd,ALL,do_env_dd,a,gen_helper_fsqrtd)4831 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4832 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4833 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4834
4835 static bool do_df(DisasContext *dc, arg_r_r *a,
4836 void (*func)(TCGv_i64, TCGv_i32))
4837 {
4838 TCGv_i64 dst;
4839 TCGv_i32 src;
4840
4841 if (gen_trap_ifnofpu(dc)) {
4842 return true;
4843 }
4844
4845 dst = tcg_temp_new_i64();
4846 src = gen_load_fpr_F(dc, a->rs);
4847 func(dst, src);
4848 gen_store_fpr_D(dc, a->rd, dst);
4849 return advance_pc(dc);
4850 }
4851
TRANS(FEXPAND,VIS1,do_df,a,gen_helper_fexpand)4852 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4853
4854 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4855 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4856 {
4857 TCGv_i64 dst;
4858 TCGv_i32 src;
4859
4860 if (gen_trap_if_nofpu_fpexception(dc)) {
4861 return true;
4862 }
4863
4864 dst = tcg_temp_new_i64();
4865 src = gen_load_fpr_F(dc, a->rs);
4866 func(dst, tcg_env, src);
4867 gen_store_fpr_D(dc, a->rd, dst);
4868 return advance_pc(dc);
4869 }
4870
TRANS(FiTOd,ALL,do_env_df,a,gen_helper_fitod)4871 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4872 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4873 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4874
4875 static bool do_qq(DisasContext *dc, arg_r_r *a,
4876 void (*func)(TCGv_i128, TCGv_i128))
4877 {
4878 TCGv_i128 t;
4879
4880 if (gen_trap_ifnofpu(dc)) {
4881 return true;
4882 }
4883 if (gen_trap_float128(dc)) {
4884 return true;
4885 }
4886
4887 gen_op_clear_ieee_excp_and_FTT();
4888 t = gen_load_fpr_Q(dc, a->rs);
4889 func(t, t);
4890 gen_store_fpr_Q(dc, a->rd, t);
4891 return advance_pc(dc);
4892 }
4893
4894 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4895 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4896 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4897
do_env_qq(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128))4898 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4899 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4900 {
4901 TCGv_i128 t;
4902
4903 if (gen_trap_if_nofpu_fpexception(dc)) {
4904 return true;
4905 }
4906 if (gen_trap_float128(dc)) {
4907 return true;
4908 }
4909
4910 t = gen_load_fpr_Q(dc, a->rs);
4911 func(t, tcg_env, t);
4912 gen_store_fpr_Q(dc, a->rd, t);
4913 return advance_pc(dc);
4914 }
4915
TRANS(FSQRTq,ALL,do_env_qq,a,gen_helper_fsqrtq)4916 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4917
4918 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4919 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4920 {
4921 TCGv_i128 src;
4922 TCGv_i32 dst;
4923
4924 if (gen_trap_if_nofpu_fpexception(dc)) {
4925 return true;
4926 }
4927 if (gen_trap_float128(dc)) {
4928 return true;
4929 }
4930
4931 src = gen_load_fpr_Q(dc, a->rs);
4932 dst = tcg_temp_new_i32();
4933 func(dst, tcg_env, src);
4934 gen_store_fpr_F(dc, a->rd, dst);
4935 return advance_pc(dc);
4936 }
4937
TRANS(FqTOs,ALL,do_env_fq,a,gen_helper_fqtos)4938 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4939 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4940
4941 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4942 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4943 {
4944 TCGv_i128 src;
4945 TCGv_i64 dst;
4946
4947 if (gen_trap_if_nofpu_fpexception(dc)) {
4948 return true;
4949 }
4950 if (gen_trap_float128(dc)) {
4951 return true;
4952 }
4953
4954 src = gen_load_fpr_Q(dc, a->rs);
4955 dst = tcg_temp_new_i64();
4956 func(dst, tcg_env, src);
4957 gen_store_fpr_D(dc, a->rd, dst);
4958 return advance_pc(dc);
4959 }
4960
TRANS(FqTOd,ALL,do_env_dq,a,gen_helper_fqtod)4961 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4962 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4963
4964 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4965 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4966 {
4967 TCGv_i32 src;
4968 TCGv_i128 dst;
4969
4970 if (gen_trap_if_nofpu_fpexception(dc)) {
4971 return true;
4972 }
4973 if (gen_trap_float128(dc)) {
4974 return true;
4975 }
4976
4977 src = gen_load_fpr_F(dc, a->rs);
4978 dst = tcg_temp_new_i128();
4979 func(dst, tcg_env, src);
4980 gen_store_fpr_Q(dc, a->rd, dst);
4981 return advance_pc(dc);
4982 }
4983
TRANS(FiTOq,ALL,do_env_qf,a,gen_helper_fitoq)4984 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4985 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4986
4987 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4988 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4989 {
4990 TCGv_i64 src;
4991 TCGv_i128 dst;
4992
4993 if (gen_trap_if_nofpu_fpexception(dc)) {
4994 return true;
4995 }
4996
4997 src = gen_load_fpr_D(dc, a->rs);
4998 dst = tcg_temp_new_i128();
4999 func(dst, tcg_env, src);
5000 gen_store_fpr_Q(dc, a->rd, dst);
5001 return advance_pc(dc);
5002 }
5003
TRANS(FdTOq,ALL,do_env_qd,a,gen_helper_fdtoq)5004 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5005 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5006
5007 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5008 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5009 {
5010 TCGv_i32 src1, src2;
5011
5012 if (gen_trap_ifnofpu(dc)) {
5013 return true;
5014 }
5015
5016 src1 = gen_load_fpr_F(dc, a->rs1);
5017 src2 = gen_load_fpr_F(dc, a->rs2);
5018 func(src1, src1, src2);
5019 gen_store_fpr_F(dc, a->rd, src1);
5020 return advance_pc(dc);
5021 }
5022
TRANS(FPADD16s,VIS1,do_fff,a,tcg_gen_vec_add16_i32)5023 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5024 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5025 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5026 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5027 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5028 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5029 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5030 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5031 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5032 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5033 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5034 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5035
5036 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5037 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5038 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5039
5040 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5041 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5042 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5043 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5044
5045 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5046 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5047 {
5048 TCGv_i32 src1, src2;
5049
5050 if (gen_trap_if_nofpu_fpexception(dc)) {
5051 return true;
5052 }
5053
5054 src1 = gen_load_fpr_F(dc, a->rs1);
5055 src2 = gen_load_fpr_F(dc, a->rs2);
5056 func(src1, tcg_env, src1, src2);
5057 gen_store_fpr_F(dc, a->rd, src1);
5058 return advance_pc(dc);
5059 }
5060
TRANS(FADDs,ALL,do_env_fff,a,gen_helper_fadds)5061 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5062 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5063 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5064 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5065 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5066 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5067
5068 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5069 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5070 {
5071 TCGv_i64 dst;
5072 TCGv_i32 src1, src2;
5073
5074 if (gen_trap_ifnofpu(dc)) {
5075 return true;
5076 }
5077
5078 dst = tcg_temp_new_i64();
5079 src1 = gen_load_fpr_F(dc, a->rs1);
5080 src2 = gen_load_fpr_F(dc, a->rs2);
5081 func(dst, src1, src2);
5082 gen_store_fpr_D(dc, a->rd, dst);
5083 return advance_pc(dc);
5084 }
5085
TRANS(FMUL8x16AU,VIS1,do_dff,a,gen_op_fmul8x16au)5086 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5087 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5088 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5089 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5090 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5091
5092 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5093 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5094 {
5095 TCGv_i64 dst, src2;
5096 TCGv_i32 src1;
5097
5098 if (gen_trap_ifnofpu(dc)) {
5099 return true;
5100 }
5101
5102 dst = tcg_temp_new_i64();
5103 src1 = gen_load_fpr_F(dc, a->rs1);
5104 src2 = gen_load_fpr_D(dc, a->rs2);
5105 func(dst, src1, src2);
5106 gen_store_fpr_D(dc, a->rd, dst);
5107 return advance_pc(dc);
5108 }
5109
TRANS(FMUL8x16,VIS1,do_dfd,a,gen_helper_fmul8x16)5110 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5111
5112 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5113 void (*func)(unsigned, uint32_t, uint32_t,
5114 uint32_t, uint32_t, uint32_t))
5115 {
5116 if (gen_trap_ifnofpu(dc)) {
5117 return true;
5118 }
5119
5120 func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5121 gen_offset_fpr_D(a->rs2), 8, 8);
5122 return advance_pc(dc);
5123 }
5124
TRANS(FPADD8,VIS4,do_gvec_ddd,a,MO_8,tcg_gen_gvec_add)5125 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5126 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5127 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5128
5129 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5130 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5131 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5132
5133 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5134 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5135
5136 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5137 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5138 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5139 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5140 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5141
5142 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5143 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5144 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5145 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5146 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5147
5148 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5149 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5150 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5151 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5152 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5153 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5154
5155 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5156 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5157 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5158 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5159 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5160 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5161
5162 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5163 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5164 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5165 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5166 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5167 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5168
5169 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5170 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5171 {
5172 TCGv_i64 dst, src1, src2;
5173
5174 if (gen_trap_ifnofpu(dc)) {
5175 return true;
5176 }
5177
5178 dst = tcg_temp_new_i64();
5179 src1 = gen_load_fpr_D(dc, a->rs1);
5180 src2 = gen_load_fpr_D(dc, a->rs2);
5181 func(dst, src1, src2);
5182 gen_store_fpr_D(dc, a->rd, dst);
5183 return advance_pc(dc);
5184 }
5185
TRANS(FMUL8SUx16,VIS1,do_ddd,a,gen_helper_fmul8sux16)5186 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5187 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5188
5189 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5190 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5191 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5192 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5193 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5194 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5195 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5196 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5197
5198 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5199 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5200 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5201
5202 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5203 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5204 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5205
5206 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5207 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5208 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5209 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5210
5211 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5212 void (*func)(TCGv, TCGv_i64, TCGv_i64))
5213 {
5214 TCGv_i64 src1, src2;
5215 TCGv dst;
5216
5217 if (gen_trap_ifnofpu(dc)) {
5218 return true;
5219 }
5220
5221 dst = gen_dest_gpr(dc, a->rd);
5222 src1 = gen_load_fpr_D(dc, a->rs1);
5223 src2 = gen_load_fpr_D(dc, a->rs2);
5224 func(dst, src1, src2);
5225 gen_store_gpr(dc, a->rd, dst);
5226 return advance_pc(dc);
5227 }
5228
TRANS(FPCMPLE16,VIS1,do_rdd,a,gen_helper_fcmple16)5229 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5230 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5231 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5232 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5233 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5234 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5235
5236 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5237 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5238 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5239 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5240 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5241 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5242
5243 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5244 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5245 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5246 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5247 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5248 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5249
5250 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5251 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5252 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5253
5254 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5255 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5256 {
5257 TCGv_i64 dst, src1, src2;
5258
5259 if (gen_trap_if_nofpu_fpexception(dc)) {
5260 return true;
5261 }
5262
5263 dst = tcg_temp_new_i64();
5264 src1 = gen_load_fpr_D(dc, a->rs1);
5265 src2 = gen_load_fpr_D(dc, a->rs2);
5266 func(dst, tcg_env, src1, src2);
5267 gen_store_fpr_D(dc, a->rd, dst);
5268 return advance_pc(dc);
5269 }
5270
TRANS(FADDd,ALL,do_env_ddd,a,gen_helper_faddd)5271 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5272 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5273 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5274 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5275 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5276 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5277
5278 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5279 {
5280 TCGv_i64 dst;
5281 TCGv_i32 src1, src2;
5282
5283 if (gen_trap_if_nofpu_fpexception(dc)) {
5284 return true;
5285 }
5286 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5287 return raise_unimpfpop(dc);
5288 }
5289
5290 dst = tcg_temp_new_i64();
5291 src1 = gen_load_fpr_F(dc, a->rs1);
5292 src2 = gen_load_fpr_F(dc, a->rs2);
5293 gen_helper_fsmuld(dst, tcg_env, src1, src2);
5294 gen_store_fpr_D(dc, a->rd, dst);
5295 return advance_pc(dc);
5296 }
5297
trans_FNsMULd(DisasContext * dc,arg_r_r_r * a)5298 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5299 {
5300 TCGv_i64 dst;
5301 TCGv_i32 src1, src2;
5302
5303 if (!avail_VIS3(dc)) {
5304 return false;
5305 }
5306 if (gen_trap_ifnofpu(dc)) {
5307 return true;
5308 }
5309 dst = tcg_temp_new_i64();
5310 src1 = gen_load_fpr_F(dc, a->rs1);
5311 src2 = gen_load_fpr_F(dc, a->rs2);
5312 gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5313 gen_store_fpr_D(dc, a->rd, dst);
5314 return advance_pc(dc);
5315 }
5316
do_ffff(DisasContext * dc,arg_r_r_r_r * a,void (* func)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_i32))5317 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5318 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5319 {
5320 TCGv_i32 dst, src1, src2, src3;
5321
5322 if (gen_trap_ifnofpu(dc)) {
5323 return true;
5324 }
5325
5326 src1 = gen_load_fpr_F(dc, a->rs1);
5327 src2 = gen_load_fpr_F(dc, a->rs2);
5328 src3 = gen_load_fpr_F(dc, a->rs3);
5329 dst = tcg_temp_new_i32();
5330 func(dst, src1, src2, src3);
5331 gen_store_fpr_F(dc, a->rd, dst);
5332 return advance_pc(dc);
5333 }
5334
TRANS(FMADDs,FMAF,do_ffff,a,gen_op_fmadds)5335 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5336 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5337 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5338 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5339
5340 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5341 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5342 {
5343 TCGv_i64 dst, src1, src2, src3;
5344
5345 if (gen_trap_ifnofpu(dc)) {
5346 return true;
5347 }
5348
5349 dst = tcg_temp_new_i64();
5350 src1 = gen_load_fpr_D(dc, a->rs1);
5351 src2 = gen_load_fpr_D(dc, a->rs2);
5352 src3 = gen_load_fpr_D(dc, a->rs3);
5353 func(dst, src1, src2, src3);
5354 gen_store_fpr_D(dc, a->rd, dst);
5355 return advance_pc(dc);
5356 }
5357
TRANS(PDIST,VIS1,do_dddd,a,gen_helper_pdist)5358 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5359 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5360 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5361 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5362 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5363 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5364 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5365
5366 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5367 {
5368 TCGv_i64 dst, src1, src2;
5369 TCGv src3;
5370
5371 if (!avail_VIS4(dc)) {
5372 return false;
5373 }
5374 if (gen_trap_ifnofpu(dc)) {
5375 return true;
5376 }
5377
5378 dst = tcg_temp_new_i64();
5379 src1 = gen_load_fpr_D(dc, a->rd);
5380 src2 = gen_load_fpr_D(dc, a->rs2);
5381 src3 = gen_load_gpr(dc, a->rs1);
5382 gen_op_faligndata_i(dst, src1, src2, src3);
5383 gen_store_fpr_D(dc, a->rd, dst);
5384 return advance_pc(dc);
5385 }
5386
do_env_qqq(DisasContext * dc,arg_r_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128,TCGv_i128))5387 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5388 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5389 {
5390 TCGv_i128 src1, src2;
5391
5392 if (gen_trap_if_nofpu_fpexception(dc)) {
5393 return true;
5394 }
5395 if (gen_trap_float128(dc)) {
5396 return true;
5397 }
5398
5399 src1 = gen_load_fpr_Q(dc, a->rs1);
5400 src2 = gen_load_fpr_Q(dc, a->rs2);
5401 func(src1, tcg_env, src1, src2);
5402 gen_store_fpr_Q(dc, a->rd, src1);
5403 return advance_pc(dc);
5404 }
5405
TRANS(FADDq,ALL,do_env_qqq,a,gen_helper_faddq)5406 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5407 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5408 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5409 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5410
5411 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5412 {
5413 TCGv_i64 src1, src2;
5414 TCGv_i128 dst;
5415
5416 if (gen_trap_if_nofpu_fpexception(dc)) {
5417 return true;
5418 }
5419 if (gen_trap_float128(dc)) {
5420 return true;
5421 }
5422
5423 src1 = gen_load_fpr_D(dc, a->rs1);
5424 src2 = gen_load_fpr_D(dc, a->rs2);
5425 dst = tcg_temp_new_i128();
5426 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5427 gen_store_fpr_Q(dc, a->rd, dst);
5428 return advance_pc(dc);
5429 }
5430
do_fmovr(DisasContext * dc,arg_FMOVRs * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5431 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5432 void (*func)(DisasContext *, DisasCompare *, int, int))
5433 {
5434 DisasCompare cmp;
5435
5436 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5437 return false;
5438 }
5439 if (gen_trap_ifnofpu(dc)) {
5440 return true;
5441 }
5442 if (is_128 && gen_trap_float128(dc)) {
5443 return true;
5444 }
5445
5446 gen_op_clear_ieee_excp_and_FTT();
5447 func(dc, &cmp, a->rd, a->rs2);
5448 return advance_pc(dc);
5449 }
5450
5451 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5452 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5453 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5454
do_fmovcc(DisasContext * dc,arg_FMOVscc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5455 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5456 void (*func)(DisasContext *, DisasCompare *, int, int))
5457 {
5458 DisasCompare cmp;
5459
5460 if (gen_trap_ifnofpu(dc)) {
5461 return true;
5462 }
5463 if (is_128 && gen_trap_float128(dc)) {
5464 return true;
5465 }
5466
5467 gen_op_clear_ieee_excp_and_FTT();
5468 gen_compare(&cmp, a->cc, a->cond, dc);
5469 func(dc, &cmp, a->rd, a->rs2);
5470 return advance_pc(dc);
5471 }
5472
5473 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5474 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5475 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5476
do_fmovfcc(DisasContext * dc,arg_FMOVsfcc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5477 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5478 void (*func)(DisasContext *, DisasCompare *, int, int))
5479 {
5480 DisasCompare cmp;
5481
5482 if (gen_trap_ifnofpu(dc)) {
5483 return true;
5484 }
5485 if (is_128 && gen_trap_float128(dc)) {
5486 return true;
5487 }
5488
5489 gen_op_clear_ieee_excp_and_FTT();
5490 gen_fcompare(&cmp, a->cc, a->cond);
5491 func(dc, &cmp, a->rd, a->rs2);
5492 return advance_pc(dc);
5493 }
5494
5495 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5496 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5497 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5498
do_fcmps(DisasContext * dc,arg_FCMPs * a,bool e)5499 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5500 {
5501 TCGv_i32 src1, src2;
5502
5503 if (avail_32(dc) && a->cc != 0) {
5504 return false;
5505 }
5506 if (gen_trap_if_nofpu_fpexception(dc)) {
5507 return true;
5508 }
5509
5510 src1 = gen_load_fpr_F(dc, a->rs1);
5511 src2 = gen_load_fpr_F(dc, a->rs2);
5512 if (e) {
5513 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5514 } else {
5515 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5516 }
5517 return advance_pc(dc);
5518 }
5519
TRANS(FCMPs,ALL,do_fcmps,a,false)5520 TRANS(FCMPs, ALL, do_fcmps, a, false)
5521 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5522
5523 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5524 {
5525 TCGv_i64 src1, src2;
5526
5527 if (avail_32(dc) && a->cc != 0) {
5528 return false;
5529 }
5530 if (gen_trap_if_nofpu_fpexception(dc)) {
5531 return true;
5532 }
5533
5534 src1 = gen_load_fpr_D(dc, a->rs1);
5535 src2 = gen_load_fpr_D(dc, a->rs2);
5536 if (e) {
5537 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5538 } else {
5539 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5540 }
5541 return advance_pc(dc);
5542 }
5543
TRANS(FCMPd,ALL,do_fcmpd,a,false)5544 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5545 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5546
5547 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5548 {
5549 TCGv_i128 src1, src2;
5550
5551 if (avail_32(dc) && a->cc != 0) {
5552 return false;
5553 }
5554 if (gen_trap_if_nofpu_fpexception(dc)) {
5555 return true;
5556 }
5557 if (gen_trap_float128(dc)) {
5558 return true;
5559 }
5560
5561 src1 = gen_load_fpr_Q(dc, a->rs1);
5562 src2 = gen_load_fpr_Q(dc, a->rs2);
5563 if (e) {
5564 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5565 } else {
5566 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5567 }
5568 return advance_pc(dc);
5569 }
5570
TRANS(FCMPq,ALL,do_fcmpq,a,false)5571 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5572 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5573
5574 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5575 {
5576 TCGv_i32 src1, src2;
5577
5578 if (!avail_VIS3(dc)) {
5579 return false;
5580 }
5581 if (gen_trap_ifnofpu(dc)) {
5582 return true;
5583 }
5584
5585 src1 = gen_load_fpr_F(dc, a->rs1);
5586 src2 = gen_load_fpr_F(dc, a->rs2);
5587 gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5588 return advance_pc(dc);
5589 }
5590
trans_FLCMPd(DisasContext * dc,arg_FLCMPd * a)5591 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5592 {
5593 TCGv_i64 src1, src2;
5594
5595 if (!avail_VIS3(dc)) {
5596 return false;
5597 }
5598 if (gen_trap_ifnofpu(dc)) {
5599 return true;
5600 }
5601
5602 src1 = gen_load_fpr_D(dc, a->rs1);
5603 src2 = gen_load_fpr_D(dc, a->rs2);
5604 gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5605 return advance_pc(dc);
5606 }
5607
do_movf2r(DisasContext * dc,arg_r_r * a,int (* offset)(unsigned int),void (* load)(TCGv,TCGv_ptr,tcg_target_long))5608 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5609 int (*offset)(unsigned int),
5610 void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5611 {
5612 TCGv dst;
5613
5614 if (gen_trap_ifnofpu(dc)) {
5615 return true;
5616 }
5617 dst = gen_dest_gpr(dc, a->rd);
5618 load(dst, tcg_env, offset(a->rs));
5619 gen_store_gpr(dc, a->rd, dst);
5620 return advance_pc(dc);
5621 }
5622
TRANS(MOVsTOsw,VIS3B,do_movf2r,a,gen_offset_fpr_F,tcg_gen_ld32s_tl)5623 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5624 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5625 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5626
5627 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5628 int (*offset)(unsigned int),
5629 void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5630 {
5631 TCGv src;
5632
5633 if (gen_trap_ifnofpu(dc)) {
5634 return true;
5635 }
5636 src = gen_load_gpr(dc, a->rs);
5637 store(src, tcg_env, offset(a->rd));
5638 return advance_pc(dc);
5639 }
5640
TRANS(MOVwTOs,VIS3B,do_movr2f,a,gen_offset_fpr_F,tcg_gen_st32_tl)5641 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5642 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5643
5644 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5645 {
5646 DisasContext *dc = container_of(dcbase, DisasContext, base);
5647 int bound;
5648
5649 dc->pc = dc->base.pc_first;
5650 dc->npc = (target_ulong)dc->base.tb->cs_base;
5651 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5652 dc->def = &cpu_env(cs)->def;
5653 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5654 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5655 #ifndef CONFIG_USER_ONLY
5656 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5657 # ifdef TARGET_SPARC64
5658 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5659 # else
5660 dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5661 # endif
5662 #endif
5663 #ifdef TARGET_SPARC64
5664 dc->fprs_dirty = 0;
5665 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5666 #endif
5667 /*
5668 * if we reach a page boundary, we stop generation so that the
5669 * PC of a TT_TFAULT exception is always in the right page
5670 */
5671 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5672 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5673 }
5674
sparc_tr_tb_start(DisasContextBase * db,CPUState * cs)5675 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5676 {
5677 }
5678
sparc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)5679 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5680 {
5681 DisasContext *dc = container_of(dcbase, DisasContext, base);
5682 target_ulong npc = dc->npc;
5683
5684 if (npc & 3) {
5685 switch (npc) {
5686 case JUMP_PC:
5687 assert(dc->jump_pc[1] == dc->pc + 4);
5688 npc = dc->jump_pc[0] | JUMP_PC;
5689 break;
5690 case DYNAMIC_PC:
5691 case DYNAMIC_PC_LOOKUP:
5692 npc = DYNAMIC_PC;
5693 break;
5694 default:
5695 g_assert_not_reached();
5696 }
5697 }
5698 tcg_gen_insn_start(dc->pc, npc);
5699 }
5700
sparc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)5701 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5702 {
5703 DisasContext *dc = container_of(dcbase, DisasContext, base);
5704 unsigned int insn;
5705
5706 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5707 dc->base.pc_next += 4;
5708
5709 if (!decode(dc, insn)) {
5710 gen_exception(dc, TT_ILL_INSN);
5711 }
5712
5713 if (dc->base.is_jmp == DISAS_NORETURN) {
5714 return;
5715 }
5716 if (dc->pc != dc->base.pc_next) {
5717 dc->base.is_jmp = DISAS_TOO_MANY;
5718 }
5719 }
5720
sparc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)5721 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5722 {
5723 DisasContext *dc = container_of(dcbase, DisasContext, base);
5724 DisasDelayException *e, *e_next;
5725 bool may_lookup;
5726
5727 finishing_insn(dc);
5728
5729 switch (dc->base.is_jmp) {
5730 case DISAS_NEXT:
5731 case DISAS_TOO_MANY:
5732 if (((dc->pc | dc->npc) & 3) == 0) {
5733 /* static PC and NPC: we can use direct chaining */
5734 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5735 break;
5736 }
5737
5738 may_lookup = true;
5739 if (dc->pc & 3) {
5740 switch (dc->pc) {
5741 case DYNAMIC_PC_LOOKUP:
5742 break;
5743 case DYNAMIC_PC:
5744 may_lookup = false;
5745 break;
5746 default:
5747 g_assert_not_reached();
5748 }
5749 } else {
5750 tcg_gen_movi_tl(cpu_pc, dc->pc);
5751 }
5752
5753 if (dc->npc & 3) {
5754 switch (dc->npc) {
5755 case JUMP_PC:
5756 gen_generic_branch(dc);
5757 break;
5758 case DYNAMIC_PC:
5759 may_lookup = false;
5760 break;
5761 case DYNAMIC_PC_LOOKUP:
5762 break;
5763 default:
5764 g_assert_not_reached();
5765 }
5766 } else {
5767 tcg_gen_movi_tl(cpu_npc, dc->npc);
5768 }
5769 if (may_lookup) {
5770 tcg_gen_lookup_and_goto_ptr();
5771 } else {
5772 tcg_gen_exit_tb(NULL, 0);
5773 }
5774 break;
5775
5776 case DISAS_NORETURN:
5777 break;
5778
5779 case DISAS_EXIT:
5780 /* Exit TB */
5781 save_state(dc);
5782 tcg_gen_exit_tb(NULL, 0);
5783 break;
5784
5785 default:
5786 g_assert_not_reached();
5787 }
5788
5789 for (e = dc->delay_excp_list; e ; e = e_next) {
5790 gen_set_label(e->lab);
5791
5792 tcg_gen_movi_tl(cpu_pc, e->pc);
5793 if (e->npc % 4 == 0) {
5794 tcg_gen_movi_tl(cpu_npc, e->npc);
5795 }
5796 gen_helper_raise_exception(tcg_env, e->excp);
5797
5798 e_next = e->next;
5799 g_free(e);
5800 }
5801 }
5802
5803 static const TranslatorOps sparc_tr_ops = {
5804 .init_disas_context = sparc_tr_init_disas_context,
5805 .tb_start = sparc_tr_tb_start,
5806 .insn_start = sparc_tr_insn_start,
5807 .translate_insn = sparc_tr_translate_insn,
5808 .tb_stop = sparc_tr_tb_stop,
5809 };
5810
gen_intermediate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)5811 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5812 vaddr pc, void *host_pc)
5813 {
5814 DisasContext dc = {};
5815
5816 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5817 }
5818
sparc_tcg_init(void)5819 void sparc_tcg_init(void)
5820 {
5821 static const char gregnames[32][4] = {
5822 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5823 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5824 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5825 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5826 };
5827
5828 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5829 #ifdef TARGET_SPARC64
5830 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5831 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5832 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5833 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5834 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5835 #else
5836 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5837 #endif
5838 };
5839
5840 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5841 #ifdef TARGET_SPARC64
5842 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5843 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5844 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5845 #endif
5846 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5847 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5848 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5849 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5850 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5851 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5852 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5853 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5854 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5855 };
5856
5857 unsigned int i;
5858
5859 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5860 offsetof(CPUSPARCState, regwptr),
5861 "regwptr");
5862
5863 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5864 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5865 }
5866
5867 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5868 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5869 }
5870
5871 cpu_regs[0] = NULL;
5872 for (i = 1; i < 8; ++i) {
5873 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5874 offsetof(CPUSPARCState, gregs[i]),
5875 gregnames[i]);
5876 }
5877
5878 for (i = 8; i < 32; ++i) {
5879 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5880 (i - 8) * sizeof(target_ulong),
5881 gregnames[i]);
5882 }
5883 }
5884
sparc_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)5885 void sparc_restore_state_to_opc(CPUState *cs,
5886 const TranslationBlock *tb,
5887 const uint64_t *data)
5888 {
5889 CPUSPARCState *env = cpu_env(cs);
5890 target_ulong pc = data[0];
5891 target_ulong npc = data[1];
5892
5893 env->pc = pc;
5894 if (npc == DYNAMIC_PC) {
5895 /* dynamic NPC: already stored */
5896 } else if (npc & JUMP_PC) {
5897 /* jump PC: use 'cond' and the jump targets of the translation */
5898 if (env->cond) {
5899 env->npc = npc & ~3;
5900 } else {
5901 env->npc = pc + 4;
5902 }
5903 } else {
5904 env->npc = npc;
5905 }
5906 }
5907