1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include "unicorn/platform.h"
26
27 #include "cpu.h"
28 #include "exec/helper-proto.h"
29 #include "tcg-op.h"
30 #include "exec/cpu_ldst.h"
31
32 #include "exec/helper-gen.h"
33
34 #include "exec/gen-icount.h"
35
36 #define DYNAMIC_PC 1 /* dynamic pc value */
37 #define JUMP_PC 2 /* dynamic pc value which takes only two values
38 according to jump_pc[T2] */
39
40
41 typedef struct DisasContext {
42 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
43 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
44 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
45 int is_br;
46 int mem_idx;
47 int fpu_enabled;
48 int address_mask_32bit;
49 int singlestep;
50 uint32_t cc_op; /* current CC operation */
51 struct TranslationBlock *tb;
52 sparc_def_t *def;
53 TCGv_i32 t32[3];
54 TCGv ttl[6];
55 int n_t32;
56 int n_ttl;
57
58 // Unicorn engine
59 struct uc_struct *uc;
60 } DisasContext;
61
62 typedef struct {
63 TCGCond cond;
64 bool is_bool;
65 bool g1, g2;
66 TCGv c1, c2;
67 } DisasCompare;
68
69 // This function uses non-native bit order
70 #define GET_FIELD(X, FROM, TO) \
71 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
72
73 // This function uses the order in the manuals, i.e. bit 0 is 2^0
74 #define GET_FIELD_SP(X, FROM, TO) \
75 GET_FIELD(X, 31 - (TO), 31 - (FROM))
76
77 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
78 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
79
80 #ifdef TARGET_SPARC64
81 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
82 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
83 #else
84 #define DFPREG(r) (r & 0x1e)
85 #define QFPREG(r) (r & 0x1c)
86 #endif
87
88 #define UA2005_HTRAP_MASK 0xff
89 #define V8_TRAP_MASK 0x7f
90
sign_extend(int x,int len)91 static int sign_extend(int x, int len)
92 {
93 len = 32 - len;
94 return ((int)(((unsigned int)x) << len)) >> len;
95 }
96
97 #define IS_IMM (insn & (1<<13))
98
get_temp_i32(DisasContext * dc)99 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
100 {
101 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
102 TCGv_i32 t;
103 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
104 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(tcg_ctx);
105 return t;
106 }
107
get_temp_tl(DisasContext * dc)108 static inline TCGv get_temp_tl(DisasContext *dc)
109 {
110 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
111 TCGv t;
112 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
113 dc->ttl[dc->n_ttl++] = t = tcg_temp_new(tcg_ctx);
114 return t;
115 }
116
gen_update_fprs_dirty(DisasContext * dc,int rd)117 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
118 {
119 #if defined(TARGET_SPARC64)
120 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
121 tcg_gen_ori_i32(tcg_ctx, tcg_ctx->cpu_fprs, tcg_ctx->cpu_fprs, (rd < 32) ? 1 : 2);
122 #endif
123 }
124
125 /* floating point registers moves */
gen_load_fpr_F(DisasContext * dc,unsigned int src)126 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
127 {
128 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
129 #if TCG_TARGET_REG_BITS == 32
130 if (src & 1) {
131 return TCGV_LOW(tcg_ctx->cpu_fpr[src / 2]);
132 } else {
133 return TCGV_HIGH(tcg_ctx->cpu_fpr[src / 2]);
134 }
135 #else
136 if (src & 1) {
137 return MAKE_TCGV_I32(GET_TCGV_I64(tcg_ctx->cpu_fpr[src / 2]));
138 } else {
139 TCGv_i32 ret = get_temp_i32(dc);
140 TCGv_i64 t = tcg_temp_new_i64(tcg_ctx);
141
142 tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->cpu_fpr[src / 2], 32);
143 tcg_gen_trunc_i64_i32(tcg_ctx, ret, t);
144 tcg_temp_free_i64(tcg_ctx, t);
145
146 return ret;
147 }
148 #endif
149 }
150
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)151 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
152 {
153 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
154 #if TCG_TARGET_REG_BITS == 32
155 if (dst & 1) {
156 tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx->cpu_fpr[dst / 2]), v);
157 } else {
158 tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx->cpu_fpr[dst / 2]), v);
159 }
160 #else
161 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
162 tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_fpr[dst / 2], t,
163 (dst & 1 ? 0 : 32), 32);
164 #endif
165 gen_update_fprs_dirty(dc, dst);
166 }
167
gen_dest_fpr_F(DisasContext * dc)168 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
169 {
170 return get_temp_i32(dc);
171 }
172
gen_load_fpr_D(DisasContext * dc,unsigned int src)173 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
174 {
175 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
176 src = DFPREG(src);
177 return tcg_ctx->cpu_fpr[src / 2];
178 }
179
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)180 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
181 {
182 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
183 dst = DFPREG(dst);
184 tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], v);
185 gen_update_fprs_dirty(dc, dst);
186 }
187
gen_dest_fpr_D(DisasContext * dc,unsigned int dst)188 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
189 {
190 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
191 return tcg_ctx->cpu_fpr[DFPREG(dst) / 2];
192 }
193
gen_op_load_fpr_QT0(DisasContext * dc,unsigned int src)194 static void gen_op_load_fpr_QT0(DisasContext *dc, unsigned int src)
195 {
196 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
197 tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) +
198 offsetof(CPU_QuadU, ll.upper));
199 tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) +
200 offsetof(CPU_QuadU, ll.lower));
201 }
202
gen_op_load_fpr_QT1(DisasContext * dc,unsigned int src)203 static void gen_op_load_fpr_QT1(DisasContext *dc, unsigned int src)
204 {
205 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
206 tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) +
207 offsetof(CPU_QuadU, ll.upper));
208 tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) +
209 offsetof(CPU_QuadU, ll.lower));
210 }
211
gen_op_store_QT0_fpr(DisasContext * dc,unsigned int dst)212 static void gen_op_store_QT0_fpr(DisasContext *dc, unsigned int dst)
213 {
214 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
215 tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.upper));
217 tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) +
218 offsetof(CPU_QuadU, ll.lower));
219 }
220
221 #ifdef TARGET_SPARC64
gen_move_Q(DisasContext * dc,unsigned int rd,unsigned int rs)222 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
223 {
224 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
225 rd = QFPREG(rd);
226 rs = QFPREG(rs);
227
228 tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], tcg_ctx->cpu_fpr[rs / 2]);
229 tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + 1], tcg_ctx->cpu_fpr[rs / 2 + 1]);
230 gen_update_fprs_dirty(dc, rd);
231 }
232 #endif
233
234 /* moves */
235 #ifdef CONFIG_USER_ONLY
236 #define supervisor(dc) 0
237 #ifdef TARGET_SPARC64
238 #define hypervisor(dc) 0
239 #endif
240 #else
241 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
242 #ifdef TARGET_SPARC64
243 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
244 #else
245 #endif
246 #endif
247
248 #ifdef TARGET_SPARC64
249 #ifndef TARGET_ABI32
250 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
251 #else
252 #define AM_CHECK(dc) (1)
253 #endif
254 #endif
255
gen_address_mask(DisasContext * dc,TCGv addr)256 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
257 {
258 #ifdef TARGET_SPARC64
259 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
260 if (AM_CHECK(dc))
261 tcg_gen_andi_tl(tcg_ctx, addr, addr, 0xffffffffULL);
262 #endif
263 }
264
gen_load_gpr(DisasContext * dc,int reg)265 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
266 {
267 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
268 if (reg == 0 || reg >= 8) {
269 TCGv t = get_temp_tl(dc);
270 if (reg == 0) {
271 tcg_gen_movi_tl(tcg_ctx, t, 0);
272 } else {
273 tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong));
274 }
275 return t;
276 } else {
277 TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs;
278 return *cpu_gregs[reg];
279 }
280 }
281
gen_store_gpr(DisasContext * dc,int reg,TCGv v)282 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
283 {
284 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
285 if (reg > 0) {
286 if (reg < 8) {
287 TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs;
288 tcg_gen_mov_tl(tcg_ctx, *cpu_gregs[reg], v);
289 } else {
290 tcg_gen_st_tl(tcg_ctx, v, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong));
291 }
292 }
293 }
294
gen_dest_gpr(DisasContext * dc,int reg)295 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
296 {
297 if (reg == 0 || reg >= 8) {
298 return get_temp_tl(dc);
299 } else {
300 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
301 TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs;
302 return *cpu_gregs[reg];
303 }
304 }
305
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)306 static inline void gen_goto_tb(DisasContext *s, int tb_num,
307 target_ulong pc, target_ulong npc)
308 {
309 TCGContext *tcg_ctx = s->uc->tcg_ctx;
310 TranslationBlock *tb;
311
312 tb = s->tb;
313 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
314 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
315 !s->singlestep) {
316 /* jump to same page: we can use a direct jump */
317 tcg_gen_goto_tb(tcg_ctx, tb_num);
318 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc);
319 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc);
320 tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + tb_num);
321 } else {
322 /* jump to another page: currently not optimized */
323 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc);
324 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc);
325 tcg_gen_exit_tb(tcg_ctx, 0);
326 }
327 }
328
329 // XXX suboptimal
gen_mov_reg_N(DisasContext * dc,TCGv reg,TCGv_i32 src)330 static inline void gen_mov_reg_N(DisasContext *dc, TCGv reg, TCGv_i32 src)
331 {
332 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
333
334 tcg_gen_extu_i32_tl(tcg_ctx, reg, src);
335 tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_NEG_SHIFT);
336 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
337 }
338
gen_mov_reg_Z(DisasContext * dc,TCGv reg,TCGv_i32 src)339 static inline void gen_mov_reg_Z(DisasContext *dc, TCGv reg, TCGv_i32 src)
340 {
341 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
342
343 tcg_gen_extu_i32_tl(tcg_ctx, reg, src);
344 tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_ZERO_SHIFT);
345 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
346 }
347
gen_mov_reg_V(DisasContext * dc,TCGv reg,TCGv_i32 src)348 static inline void gen_mov_reg_V(DisasContext *dc, TCGv reg, TCGv_i32 src)
349 {
350 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
351
352 tcg_gen_extu_i32_tl(tcg_ctx, reg, src);
353 tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_OVF_SHIFT);
354 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
355 }
356
gen_mov_reg_C(DisasContext * dc,TCGv reg,TCGv_i32 src)357 static inline void gen_mov_reg_C(DisasContext *dc, TCGv reg, TCGv_i32 src)
358 {
359 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
360
361 tcg_gen_extu_i32_tl(tcg_ctx, reg, src);
362 tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_CARRY_SHIFT);
363 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
364 }
365
366 #if 0
367 static inline void gen_op_addi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2)
368 {
369 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
370
371 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
372 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
373 tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2);
374 tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst);
375 }
376 #endif
377
gen_op_add_cc(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2)378 static inline void gen_op_add_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2)
379 {
380 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
381
382 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
383 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
384 tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2);
385 tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst);
386 }
387
gen_add32_carry32(DisasContext * dc)388 static TCGv_i32 gen_add32_carry32(DisasContext *dc)
389 {
390 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
391 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
392
393 /* Carry is computed from a previous add: (dst < src) */
394 #if TARGET_LONG_BITS == 64
395 cc_src1_32 = tcg_temp_new_i32(tcg_ctx);
396 cc_src2_32 = tcg_temp_new_i32(tcg_ctx);
397 tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_dst);
398 tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src);
399 #else
400 cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_dst;
401 cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src;
402 #endif
403
404 carry_32 = tcg_temp_new_i32(tcg_ctx);
405 tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
406
407 #if TARGET_LONG_BITS == 64
408 tcg_temp_free_i32(tcg_ctx, cc_src1_32);
409 tcg_temp_free_i32(tcg_ctx, cc_src2_32);
410 #endif
411
412 return carry_32;
413 }
414
gen_sub32_carry32(DisasContext * dc)415 static TCGv_i32 gen_sub32_carry32(DisasContext *dc)
416 {
417 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
418 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
419
420 /* Carry is computed from a previous borrow: (src1 < src2) */
421 #if TARGET_LONG_BITS == 64
422 cc_src1_32 = tcg_temp_new_i32(tcg_ctx);
423 cc_src2_32 = tcg_temp_new_i32(tcg_ctx);
424 tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_src);
425 tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src2);
426 #else
427 cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_src;
428 cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src2;
429 #endif
430
431 carry_32 = tcg_temp_new_i32(tcg_ctx);
432 tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
433
434 #if TARGET_LONG_BITS == 64
435 tcg_temp_free_i32(tcg_ctx, cc_src1_32);
436 tcg_temp_free_i32(tcg_ctx, cc_src2_32);
437 #endif
438
439 return carry_32;
440 }
441
gen_op_addx_int(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2,int update_cc)442 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
443 TCGv src2, int update_cc)
444 {
445 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
446 TCGv_i32 carry_32;
447 TCGv carry;
448
449 switch (dc->cc_op) {
450 case CC_OP_DIV:
451 case CC_OP_LOGIC:
452 /* Carry is known to be zero. Fall back to plain ADD. */
453 if (update_cc) {
454 gen_op_add_cc(dc, dst, src1, src2);
455 } else {
456 tcg_gen_add_tl(tcg_ctx, dst, src1, src2);
457 }
458 return;
459
460 case CC_OP_ADD:
461 case CC_OP_TADD:
462 case CC_OP_TADDTV:
463 if (TARGET_LONG_BITS == 32) {
464 /* We can re-use the host's hardware carry generation by using
465 an ADD2 opcode. We discard the low part of the output.
466 Ideally we'd combine this operation with the add that
467 generated the carry in the first place. */
468 carry = tcg_temp_new(tcg_ctx);
469 tcg_gen_add2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
470 tcg_temp_free(tcg_ctx, carry);
471 goto add_done;
472 }
473 carry_32 = gen_add32_carry32(dc);
474 break;
475
476 case CC_OP_SUB:
477 case CC_OP_TSUB:
478 case CC_OP_TSUBTV:
479 carry_32 = gen_sub32_carry32(dc);
480 break;
481
482 default:
483 /* We need external help to produce the carry. */
484 carry_32 = tcg_temp_new_i32(tcg_ctx);
485 gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env);
486 break;
487 }
488
489 #if TARGET_LONG_BITS == 64
490 carry = tcg_temp_new(tcg_ctx);
491 tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32);
492 #else
493 carry = carry_32;
494 #endif
495
496 tcg_gen_add_tl(tcg_ctx, dst, src1, src2);
497 tcg_gen_add_tl(tcg_ctx, dst, dst, carry);
498
499 tcg_temp_free_i32(tcg_ctx, carry_32);
500 #if TARGET_LONG_BITS == 64
501 tcg_temp_free(tcg_ctx, carry);
502 #endif
503
504 add_done:
505 if (update_cc) {
506 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
507 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
508 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst);
509 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADDX);
510 dc->cc_op = CC_OP_ADDX;
511 }
512 }
513
514 #if 0
515 static inline void gen_op_subi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2)
516 {
517 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
518 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
519 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
520 if (src2 == 0) {
521 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, src1);
522 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
523 dc->cc_op = CC_OP_LOGIC;
524 } else {
525 tcg_gen_subi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2);
526 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB);
527 dc->cc_op = CC_OP_SUB;
528 }
529 tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst);
530 }
531 #endif
532
gen_op_sub_cc(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2)533 static inline void gen_op_sub_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2)
534 {
535 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
536 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
537 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
538 tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2);
539 tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst);
540 }
541
gen_op_subx_int(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2,int update_cc)542 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
543 TCGv src2, int update_cc)
544 {
545 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
546 TCGv_i32 carry_32;
547 TCGv carry;
548
549 switch (dc->cc_op) {
550 case CC_OP_DIV:
551 case CC_OP_LOGIC:
552 /* Carry is known to be zero. Fall back to plain SUB. */
553 if (update_cc) {
554 gen_op_sub_cc(dc, dst, src1, src2);
555 } else {
556 tcg_gen_sub_tl(tcg_ctx, dst, src1, src2);
557 }
558 return;
559
560 case CC_OP_ADD:
561 case CC_OP_TADD:
562 case CC_OP_TADDTV:
563 carry_32 = gen_add32_carry32(dc);
564 break;
565
566 case CC_OP_SUB:
567 case CC_OP_TSUB:
568 case CC_OP_TSUBTV:
569 if (TARGET_LONG_BITS == 32) {
570 /* We can re-use the host's hardware carry generation by using
571 a SUB2 opcode. We discard the low part of the output.
572 Ideally we'd combine this operation with the add that
573 generated the carry in the first place. */
574 carry = tcg_temp_new(tcg_ctx);
575 tcg_gen_sub2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
576 tcg_temp_free(tcg_ctx, carry);
577 goto sub_done;
578 }
579 carry_32 = gen_sub32_carry32(dc);
580 break;
581
582 default:
583 /* We need external help to produce the carry. */
584 carry_32 = tcg_temp_new_i32(tcg_ctx);
585 gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env);
586 break;
587 }
588
589 #if TARGET_LONG_BITS == 64
590 carry = tcg_temp_new(tcg_ctx);
591 tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32);
592 #else
593 carry = carry_32;
594 #endif
595
596 tcg_gen_sub_tl(tcg_ctx, dst, src1, src2);
597 tcg_gen_sub_tl(tcg_ctx, dst, dst, carry);
598
599 tcg_temp_free_i32(tcg_ctx, carry_32);
600 #if TARGET_LONG_BITS == 64
601 tcg_temp_free(tcg_ctx, carry);
602 #endif
603
604 sub_done:
605 if (update_cc) {
606 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1);
607 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2);
608 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst);
609 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUBX);
610 dc->cc_op = CC_OP_SUBX;
611 }
612 }
613
gen_op_mulscc(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2)614 static inline void gen_op_mulscc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2)
615 {
616 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
617 TCGv r_temp, zero, t0;
618
619 r_temp = tcg_temp_new(tcg_ctx);
620 t0 = tcg_temp_new(tcg_ctx);
621
622 /* old op:
623 if (!(env->y & 1))
624 T1 = 0;
625 */
626 zero = tcg_const_tl(tcg_ctx, 0);
627 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1, 0xffffffff);
628 tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_y, 0x1);
629 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2, 0xffffffff);
630 tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->cpu_cc_src2, r_temp, zero,
631 zero, *(TCGv *)tcg_ctx->cpu_cc_src2);
632 tcg_temp_free(tcg_ctx, zero);
633
634 // b2 = T0 & 1;
635 // env->y = (b2 << 31) | (env->y >> 1);
636 tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_cc_src, 0x1);
637 tcg_gen_shli_tl(tcg_ctx, r_temp, r_temp, 31);
638 tcg_gen_shri_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_y, 1);
639 tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7fffffff);
640 tcg_gen_or_tl(tcg_ctx, t0, t0, r_temp);
641 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, t0, 0xffffffff);
642
643 // b1 = N ^ V;
644 gen_mov_reg_N(dc, t0, tcg_ctx->cpu_psr);
645 gen_mov_reg_V(dc, r_temp, tcg_ctx->cpu_psr);
646 tcg_gen_xor_tl(tcg_ctx, t0, t0, r_temp);
647 tcg_temp_free(tcg_ctx, r_temp);
648
649 // T0 = (b1 << 31) | (T0 >> 1);
650 // src1 = T0;
651 tcg_gen_shli_tl(tcg_ctx, t0, t0, 31);
652 tcg_gen_shri_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, 1);
653 tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, t0);
654 tcg_temp_free(tcg_ctx, t0);
655
656 tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2);
657
658 tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst);
659 }
660
gen_op_multiply(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2,int sign_ext)661 static inline void gen_op_multiply(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2, int sign_ext)
662 {
663 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
664 #if TARGET_LONG_BITS == 32
665 if (sign_ext) {
666 tcg_gen_muls2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2);
667 } else {
668 tcg_gen_mulu2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2);
669 }
670 #else
671 TCGv t0 = tcg_temp_new_i64(tcg_ctx);
672 TCGv t1 = tcg_temp_new_i64(tcg_ctx);
673
674 if (sign_ext) {
675 tcg_gen_ext32s_i64(tcg_ctx, t0, src1);
676 tcg_gen_ext32s_i64(tcg_ctx, t1, src2);
677 } else {
678 tcg_gen_ext32u_i64(tcg_ctx, t0, src1);
679 tcg_gen_ext32u_i64(tcg_ctx, t1, src2);
680 }
681
682 tcg_gen_mul_i64(tcg_ctx, dst, t0, t1);
683 tcg_temp_free(tcg_ctx, t0);
684 tcg_temp_free(tcg_ctx, t1);
685
686 tcg_gen_shri_i64(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, dst, 32);
687 #endif
688 }
689
gen_op_umul(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2)690 static inline void gen_op_umul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2)
691 {
692 /* zero-extend truncated operands before multiplication */
693 gen_op_multiply(dc, dst, src1, src2, 0);
694 }
695
gen_op_smul(DisasContext * dc,TCGv dst,TCGv src1,TCGv src2)696 static inline void gen_op_smul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2)
697 {
698 /* sign-extend truncated operands before multiplication */
699 gen_op_multiply(dc, dst, src1, src2, 1);
700 }
701
702 // 1
gen_op_eval_ba(DisasContext * dc,TCGv dst)703 static inline void gen_op_eval_ba(DisasContext *dc, TCGv dst)
704 {
705 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
706 tcg_gen_movi_tl(tcg_ctx, dst, 1);
707 }
708
709 // Z
gen_op_eval_be(DisasContext * dc,TCGv dst,TCGv_i32 src)710 static inline void gen_op_eval_be(DisasContext *dc, TCGv dst, TCGv_i32 src)
711 {
712 gen_mov_reg_Z(dc, dst, src);
713 }
714
715 // Z | (N ^ V)
gen_op_eval_ble(DisasContext * dc,TCGv dst,TCGv_i32 src)716 static inline void gen_op_eval_ble(DisasContext *dc, TCGv dst, TCGv_i32 src)
717 {
718 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
719 TCGv t0 = tcg_temp_new(tcg_ctx);
720 gen_mov_reg_N(dc, t0, src);
721 gen_mov_reg_V(dc, dst, src);
722 tcg_gen_xor_tl(tcg_ctx, dst, dst, t0);
723 gen_mov_reg_Z(dc, t0, src);
724 tcg_gen_or_tl(tcg_ctx, dst, dst, t0);
725 tcg_temp_free(tcg_ctx, t0);
726 }
727
728 // N ^ V
gen_op_eval_bl(DisasContext * dc,TCGv dst,TCGv_i32 src)729 static inline void gen_op_eval_bl(DisasContext *dc, TCGv dst, TCGv_i32 src)
730 {
731 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
732 TCGv t0 = tcg_temp_new(tcg_ctx);
733 gen_mov_reg_V(dc, t0, src);
734 gen_mov_reg_N(dc, dst, src);
735 tcg_gen_xor_tl(tcg_ctx, dst, dst, t0);
736 tcg_temp_free(tcg_ctx, t0);
737 }
738
739 // C | Z
gen_op_eval_bleu(DisasContext * dc,TCGv dst,TCGv_i32 src)740 static inline void gen_op_eval_bleu(DisasContext *dc, TCGv dst, TCGv_i32 src)
741 {
742 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
743 TCGv t0 = tcg_temp_new(tcg_ctx);
744 gen_mov_reg_Z(dc, t0, src);
745 gen_mov_reg_C(dc, dst, src);
746 tcg_gen_or_tl(tcg_ctx, dst, dst, t0);
747 tcg_temp_free(tcg_ctx, t0);
748 }
749
750 // C
gen_op_eval_bcs(DisasContext * dc,TCGv dst,TCGv_i32 src)751 static inline void gen_op_eval_bcs(DisasContext *dc, TCGv dst, TCGv_i32 src)
752 {
753 gen_mov_reg_C(dc, dst, src);
754 }
755
756 // V
gen_op_eval_bvs(DisasContext * dc,TCGv dst,TCGv_i32 src)757 static inline void gen_op_eval_bvs(DisasContext *dc, TCGv dst, TCGv_i32 src)
758 {
759 gen_mov_reg_V(dc, dst, src);
760 }
761
762 // 0
gen_op_eval_bn(DisasContext * dc,TCGv dst)763 static inline void gen_op_eval_bn(DisasContext *dc, TCGv dst)
764 {
765 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
766 tcg_gen_movi_tl(tcg_ctx, dst, 0);
767 }
768
769 // N
gen_op_eval_bneg(DisasContext * dc,TCGv dst,TCGv_i32 src)770 static inline void gen_op_eval_bneg(DisasContext *dc, TCGv dst, TCGv_i32 src)
771 {
772 gen_mov_reg_N(dc, dst, src);
773 }
774
775 // !Z
gen_op_eval_bne(DisasContext * dc,TCGv dst,TCGv_i32 src)776 static inline void gen_op_eval_bne(DisasContext *dc, TCGv dst, TCGv_i32 src)
777 {
778 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
779 gen_mov_reg_Z(dc, dst, src);
780 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
781 }
782
783 // !(Z | (N ^ V))
gen_op_eval_bg(DisasContext * dc,TCGv dst,TCGv_i32 src)784 static inline void gen_op_eval_bg(DisasContext *dc, TCGv dst, TCGv_i32 src)
785 {
786 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
787 gen_op_eval_ble(dc, dst, src);
788 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
789 }
790
791 // !(N ^ V)
gen_op_eval_bge(DisasContext * dc,TCGv dst,TCGv_i32 src)792 static inline void gen_op_eval_bge(DisasContext *dc, TCGv dst, TCGv_i32 src)
793 {
794 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
795 gen_op_eval_bl(dc, dst, src);
796 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
797 }
798
799 // !(C | Z)
gen_op_eval_bgu(DisasContext * dc,TCGv dst,TCGv_i32 src)800 static inline void gen_op_eval_bgu(DisasContext *dc, TCGv dst, TCGv_i32 src)
801 {
802 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
803 gen_op_eval_bleu(dc, dst, src);
804 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
805 }
806
807 // !C
gen_op_eval_bcc(DisasContext * dc,TCGv dst,TCGv_i32 src)808 static inline void gen_op_eval_bcc(DisasContext *dc, TCGv dst, TCGv_i32 src)
809 {
810 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
811 gen_mov_reg_C(dc, dst, src);
812 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
813 }
814
815 // !N
gen_op_eval_bpos(DisasContext * dc,TCGv dst,TCGv_i32 src)816 static inline void gen_op_eval_bpos(DisasContext *dc, TCGv dst, TCGv_i32 src)
817 {
818 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
819 gen_mov_reg_N(dc, dst, src);
820 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
821 }
822
823 // !V
gen_op_eval_bvc(DisasContext * dc,TCGv dst,TCGv_i32 src)824 static inline void gen_op_eval_bvc(DisasContext *dc, TCGv dst, TCGv_i32 src)
825 {
826 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
827 gen_mov_reg_V(dc, dst, src);
828 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
829 }
830
831 /*
832 FPSR bit field FCC1 | FCC0:
833 0 =
834 1 <
835 2 >
836 3 unordered
837 */
gen_mov_reg_FCC0(DisasContext * dc,TCGv reg,TCGv src,unsigned int fcc_offset)838 static inline void gen_mov_reg_FCC0(DisasContext *dc, TCGv reg, TCGv src,
839 unsigned int fcc_offset)
840 {
841 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
842 tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC0_SHIFT + fcc_offset);
843 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
844 }
845
gen_mov_reg_FCC1(DisasContext * dc,TCGv reg,TCGv src,unsigned int fcc_offset)846 static inline void gen_mov_reg_FCC1(DisasContext *dc, TCGv reg, TCGv src,
847 unsigned int fcc_offset)
848 {
849 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
850 tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC1_SHIFT + fcc_offset);
851 tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1);
852 }
853
854 // !0: FCC0 | FCC1
gen_op_eval_fbne(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)855 static inline void gen_op_eval_fbne(DisasContext *dc, TCGv dst, TCGv src,
856 unsigned int fcc_offset)
857 {
858 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
859 TCGv t0 = tcg_temp_new(tcg_ctx);
860 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
861 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
862 tcg_gen_or_tl(tcg_ctx, dst, dst, t0);
863 tcg_temp_free(tcg_ctx, t0);
864 }
865
866 // 1 or 2: FCC0 ^ FCC1
gen_op_eval_fblg(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)867 static inline void gen_op_eval_fblg(DisasContext *dc, TCGv dst, TCGv src,
868 unsigned int fcc_offset)
869 {
870 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
871 TCGv t0 = tcg_temp_new(tcg_ctx);
872 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
873 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
874 tcg_gen_xor_tl(tcg_ctx, dst, dst, t0);
875 tcg_temp_free(tcg_ctx, t0);
876 }
877
878 // 1 or 3: FCC0
gen_op_eval_fbul(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)879 static inline void gen_op_eval_fbul(DisasContext *dc, TCGv dst, TCGv src,
880 unsigned int fcc_offset)
881 {
882 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
883 }
884
885 // 1: FCC0 & !FCC1
gen_op_eval_fbl(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)886 static inline void gen_op_eval_fbl(DisasContext *dc, TCGv dst, TCGv src,
887 unsigned int fcc_offset)
888 {
889 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
890 TCGv t0 = tcg_temp_new(tcg_ctx);
891 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
892 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
893 tcg_gen_andc_tl(tcg_ctx, dst, dst, t0);
894 tcg_temp_free(tcg_ctx, t0);
895 }
896
897 // 2 or 3: FCC1
gen_op_eval_fbug(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)898 static inline void gen_op_eval_fbug(DisasContext *dc, TCGv dst, TCGv src,
899 unsigned int fcc_offset)
900 {
901 gen_mov_reg_FCC1(dc, dst, src, fcc_offset);
902 }
903
904 // 2: !FCC0 & FCC1
gen_op_eval_fbg(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)905 static inline void gen_op_eval_fbg(DisasContext *dc, TCGv dst, TCGv src,
906 unsigned int fcc_offset)
907 {
908 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
909 TCGv t0 = tcg_temp_new(tcg_ctx);
910 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
911 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
912 tcg_gen_andc_tl(tcg_ctx, dst, t0, dst);
913 tcg_temp_free(tcg_ctx, t0);
914 }
915
916 // 3: FCC0 & FCC1
gen_op_eval_fbu(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)917 static inline void gen_op_eval_fbu(DisasContext *dc, TCGv dst, TCGv src,
918 unsigned int fcc_offset)
919 {
920 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
921 TCGv t0 = tcg_temp_new(tcg_ctx);
922 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
923 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
924 tcg_gen_and_tl(tcg_ctx, dst, dst, t0);
925 tcg_temp_free(tcg_ctx, t0);
926 }
927
928 // 0: !(FCC0 | FCC1)
gen_op_eval_fbe(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)929 static inline void gen_op_eval_fbe(DisasContext *dc, TCGv dst, TCGv src,
930 unsigned int fcc_offset)
931 {
932 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
933 TCGv t0 = tcg_temp_new(tcg_ctx);
934 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
935 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
936 tcg_gen_or_tl(tcg_ctx, dst, dst, t0);
937 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
938 tcg_temp_free(tcg_ctx, t0);
939 }
940
941 // 0 or 3: !(FCC0 ^ FCC1)
gen_op_eval_fbue(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)942 static inline void gen_op_eval_fbue(DisasContext *dc, TCGv dst, TCGv src,
943 unsigned int fcc_offset)
944 {
945 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
946 TCGv t0 = tcg_temp_new(tcg_ctx);
947 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
948 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
949 tcg_gen_xor_tl(tcg_ctx, dst, dst, t0);
950 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
951 tcg_temp_free(tcg_ctx, t0);
952 }
953
954 // 0 or 2: !FCC0
gen_op_eval_fbge(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)955 static inline void gen_op_eval_fbge(DisasContext *dc, TCGv dst, TCGv src,
956 unsigned int fcc_offset)
957 {
958 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
959 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
960 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
961 }
962
963 // !1: !(FCC0 & !FCC1)
gen_op_eval_fbuge(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)964 static inline void gen_op_eval_fbuge(DisasContext *dc, TCGv dst, TCGv src,
965 unsigned int fcc_offset)
966 {
967 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
968 TCGv t0 = tcg_temp_new(tcg_ctx);
969 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
970 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
971 tcg_gen_andc_tl(tcg_ctx, dst, dst, t0);
972 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
973 tcg_temp_free(tcg_ctx, t0);
974 }
975
976 // 0 or 1: !FCC1
gen_op_eval_fble(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)977 static inline void gen_op_eval_fble(DisasContext *dc, TCGv dst, TCGv src,
978 unsigned int fcc_offset)
979 {
980 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
981 gen_mov_reg_FCC1(dc, dst, src, fcc_offset);
982 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
983 }
984
985 // !2: !(!FCC0 & FCC1)
gen_op_eval_fbule(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)986 static inline void gen_op_eval_fbule(DisasContext *dc, TCGv dst, TCGv src,
987 unsigned int fcc_offset)
988 {
989 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
990 TCGv t0 = tcg_temp_new(tcg_ctx);
991 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
992 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
993 tcg_gen_andc_tl(tcg_ctx, dst, t0, dst);
994 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
995 tcg_temp_free(tcg_ctx, t0);
996 }
997
998 // !3: !(FCC0 & FCC1)
gen_op_eval_fbo(DisasContext * dc,TCGv dst,TCGv src,unsigned int fcc_offset)999 static inline void gen_op_eval_fbo(DisasContext *dc, TCGv dst, TCGv src,
1000 unsigned int fcc_offset)
1001 {
1002 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1003 TCGv t0 = tcg_temp_new(tcg_ctx);
1004 gen_mov_reg_FCC0(dc, dst, src, fcc_offset);
1005 gen_mov_reg_FCC1(dc, t0, src, fcc_offset);
1006 tcg_gen_and_tl(tcg_ctx, dst, dst, t0);
1007 tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1);
1008 tcg_temp_free(tcg_ctx, t0);
1009 }
1010
gen_branch2(DisasContext * dc,target_ulong pc1,target_ulong pc2,TCGv r_cond)1011 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
1012 target_ulong pc2, TCGv r_cond)
1013 {
1014 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1015 int l1;
1016
1017 l1 = gen_new_label(tcg_ctx);
1018
1019 tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1);
1020
1021 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1022
1023 gen_set_label(tcg_ctx, l1);
1024 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1025 }
1026
gen_branch_a(DisasContext * dc,target_ulong pc1,target_ulong pc2,TCGv r_cond)1027 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
1028 target_ulong pc2, TCGv r_cond)
1029 {
1030 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1031 int l1;
1032
1033 l1 = gen_new_label(tcg_ctx);
1034
1035 tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1);
1036
1037 gen_goto_tb(dc, 0, pc2, pc1);
1038
1039 gen_set_label(tcg_ctx, l1);
1040 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
1041 }
1042
gen_generic_branch(DisasContext * dc)1043 static inline void gen_generic_branch(DisasContext *dc)
1044 {
1045 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1046 TCGv npc0 = tcg_const_tl(tcg_ctx, dc->jump_pc[0]);
1047 TCGv npc1 = tcg_const_tl(tcg_ctx, dc->jump_pc[1]);
1048 TCGv zero = tcg_const_tl(tcg_ctx, 0);
1049
1050 tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_cond, zero, npc0, npc1);
1051
1052 tcg_temp_free(tcg_ctx, npc0);
1053 tcg_temp_free(tcg_ctx, npc1);
1054 tcg_temp_free(tcg_ctx, zero);
1055 }
1056
1057 /* call this function before using the condition register as it may
1058 have been set for a jump */
flush_cond(DisasContext * dc)1059 static inline void flush_cond(DisasContext *dc)
1060 {
1061 if (dc->npc == JUMP_PC) {
1062 gen_generic_branch(dc);
1063 dc->npc = DYNAMIC_PC;
1064 }
1065 }
1066
save_npc(DisasContext * dc)1067 static inline void save_npc(DisasContext *dc)
1068 {
1069 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1070 if (dc->npc == JUMP_PC) {
1071 gen_generic_branch(dc);
1072 dc->npc = DYNAMIC_PC;
1073 } else if (dc->npc != DYNAMIC_PC) {
1074 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, dc->npc);
1075 }
1076 }
1077
update_psr(DisasContext * dc)1078 static inline void update_psr(DisasContext *dc)
1079 {
1080 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1081 if (dc->cc_op != CC_OP_FLAGS) {
1082 dc->cc_op = CC_OP_FLAGS;
1083 gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env);
1084 }
1085 }
1086
save_state(DisasContext * dc)1087 static inline void save_state(DisasContext *dc)
1088 {
1089 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1090 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc);
1091 save_npc(dc);
1092 }
1093
gen_mov_pc_npc(DisasContext * dc)1094 static inline void gen_mov_pc_npc(DisasContext *dc)
1095 {
1096 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1097 if (dc->npc == JUMP_PC) {
1098 gen_generic_branch(dc);
1099 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc);
1100 dc->pc = DYNAMIC_PC;
1101 } else if (dc->npc == DYNAMIC_PC) {
1102 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc);
1103 dc->pc = DYNAMIC_PC;
1104 } else {
1105 dc->pc = dc->npc;
1106 }
1107 }
1108
gen_op_next_insn(DisasContext * dc)1109 static inline void gen_op_next_insn(DisasContext *dc)
1110 {
1111 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1112 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc);
1113 tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_npc, 4);
1114 }
1115
free_compare(TCGContext * tcg_ctx,DisasCompare * cmp)1116 static void free_compare(TCGContext *tcg_ctx, DisasCompare *cmp)
1117 {
1118 if (!cmp->g1) {
1119 tcg_temp_free(tcg_ctx, cmp->c1);
1120 }
1121 if (!cmp->g2) {
1122 tcg_temp_free(tcg_ctx, cmp->c2);
1123 }
1124 }
1125
gen_compare(DisasContext * dc,DisasCompare * cmp,bool xcc,unsigned int cond)1126 static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned int cond)
1127 {
1128 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1129 static int subcc_cond[16] = {
1130 TCG_COND_NEVER,
1131 TCG_COND_EQ,
1132 TCG_COND_LE,
1133 TCG_COND_LT,
1134 TCG_COND_LEU,
1135 TCG_COND_LTU,
1136 -1, /* neg */
1137 -1, /* overflow */
1138 TCG_COND_ALWAYS,
1139 TCG_COND_NE,
1140 TCG_COND_GT,
1141 TCG_COND_GE,
1142 TCG_COND_GTU,
1143 TCG_COND_GEU,
1144 -1, /* pos */
1145 -1, /* no overflow */
1146 };
1147
1148 static int logic_cond[16] = {
1149 TCG_COND_NEVER,
1150 TCG_COND_EQ, /* eq: Z */
1151 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1152 TCG_COND_LT, /* lt: N ^ V -> N */
1153 TCG_COND_EQ, /* leu: C | Z -> Z */
1154 TCG_COND_NEVER, /* ltu: C -> 0 */
1155 TCG_COND_LT, /* neg: N */
1156 TCG_COND_NEVER, /* vs: V -> 0 */
1157 TCG_COND_ALWAYS,
1158 TCG_COND_NE, /* ne: !Z */
1159 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1160 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1161 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1162 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1163 TCG_COND_GE, /* pos: !N */
1164 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1165 };
1166
1167 TCGv_i32 r_src;
1168 TCGv r_dst;
1169
1170 #ifdef TARGET_SPARC64
1171 if (xcc) {
1172 r_src = tcg_ctx->cpu_xcc;
1173 } else {
1174 r_src = tcg_ctx->cpu_psr;
1175 }
1176 #else
1177 r_src = tcg_ctx->cpu_psr;
1178 #endif
1179
1180 switch (dc->cc_op) {
1181 case CC_OP_LOGIC:
1182 cmp->cond = logic_cond[cond];
1183 do_compare_dst_0:
1184 cmp->is_bool = false;
1185 cmp->g2 = false;
1186 cmp->c2 = tcg_const_tl(tcg_ctx, 0);
1187 #ifdef TARGET_SPARC64
1188 if (!xcc) {
1189 cmp->g1 = false;
1190 cmp->c1 = tcg_temp_new(tcg_ctx);
1191 tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_dst);
1192 break;
1193 }
1194 #endif
1195 cmp->g1 = true;
1196 cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_dst;
1197 break;
1198
1199 case CC_OP_SUB:
1200 switch (cond) {
1201 case 6: /* neg */
1202 case 14: /* pos */
1203 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1204 goto do_compare_dst_0;
1205
1206 case 7: /* overflow */
1207 case 15: /* !overflow */
1208 goto do_dynamic;
1209
1210 default:
1211 cmp->cond = subcc_cond[cond];
1212 cmp->is_bool = false;
1213 #ifdef TARGET_SPARC64
1214 if (!xcc) {
1215 /* Note that sign-extension works for unsigned compares as
1216 long as both operands are sign-extended. */
1217 cmp->g1 = cmp->g2 = false;
1218 cmp->c1 = tcg_temp_new(tcg_ctx);
1219 cmp->c2 = tcg_temp_new(tcg_ctx);
1220 tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_src);
1221 tcg_gen_ext32s_tl(tcg_ctx, cmp->c2, *(TCGv *)tcg_ctx->cpu_cc_src2);
1222 break;
1223 }
1224 #endif
1225 cmp->g1 = cmp->g2 = true;
1226 cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_src;
1227 cmp->c2 = *(TCGv *)tcg_ctx->cpu_cc_src2;
1228 break;
1229 }
1230 break;
1231
1232 default:
1233 do_dynamic:
1234 gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env);
1235 dc->cc_op = CC_OP_FLAGS;
1236 /* FALLTHRU */
1237
1238 case CC_OP_FLAGS:
1239 /* We're going to generate a boolean result. */
1240 cmp->cond = TCG_COND_NE;
1241 cmp->is_bool = true;
1242 cmp->g1 = cmp->g2 = false;
1243 cmp->c1 = r_dst = tcg_temp_new(tcg_ctx);
1244 cmp->c2 = tcg_const_tl(tcg_ctx, 0);
1245
1246 switch (cond) {
1247 case 0x0:
1248 gen_op_eval_bn(dc, r_dst);
1249 break;
1250 case 0x1:
1251 gen_op_eval_be(dc, r_dst, r_src);
1252 break;
1253 case 0x2:
1254 gen_op_eval_ble(dc, r_dst, r_src);
1255 break;
1256 case 0x3:
1257 gen_op_eval_bl(dc, r_dst, r_src);
1258 break;
1259 case 0x4:
1260 gen_op_eval_bleu(dc, r_dst, r_src);
1261 break;
1262 case 0x5:
1263 gen_op_eval_bcs(dc, r_dst, r_src);
1264 break;
1265 case 0x6:
1266 gen_op_eval_bneg(dc, r_dst, r_src);
1267 break;
1268 case 0x7:
1269 gen_op_eval_bvs(dc, r_dst, r_src);
1270 break;
1271 case 0x8:
1272 gen_op_eval_ba(dc, r_dst);
1273 break;
1274 case 0x9:
1275 gen_op_eval_bne(dc, r_dst, r_src);
1276 break;
1277 case 0xa:
1278 gen_op_eval_bg(dc, r_dst, r_src);
1279 break;
1280 case 0xb:
1281 gen_op_eval_bge(dc, r_dst, r_src);
1282 break;
1283 case 0xc:
1284 gen_op_eval_bgu(dc, r_dst, r_src);
1285 break;
1286 case 0xd:
1287 gen_op_eval_bcc(dc, r_dst, r_src);
1288 break;
1289 case 0xe:
1290 gen_op_eval_bpos(dc, r_dst, r_src);
1291 break;
1292 case 0xf:
1293 gen_op_eval_bvc(dc, r_dst, r_src);
1294 break;
1295 }
1296 break;
1297 }
1298 }
1299
gen_fcompare(DisasContext * dc,DisasCompare * cmp,unsigned int cc,unsigned int cond)1300 static void gen_fcompare(DisasContext *dc, DisasCompare *cmp, unsigned int cc, unsigned int cond)
1301 {
1302 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1303 unsigned int offset;
1304 TCGv r_dst;
1305
1306 /* For now we still generate a straight boolean result. */
1307 cmp->cond = TCG_COND_NE;
1308 cmp->is_bool = true;
1309 cmp->g1 = cmp->g2 = false;
1310 cmp->c1 = r_dst = tcg_temp_new(tcg_ctx);
1311 cmp->c2 = tcg_const_tl(tcg_ctx, 0);
1312
1313 switch (cc) {
1314 default:
1315 case 0x0:
1316 offset = 0;
1317 break;
1318 case 0x1:
1319 offset = 32 - 10;
1320 break;
1321 case 0x2:
1322 offset = 34 - 10;
1323 break;
1324 case 0x3:
1325 offset = 36 - 10;
1326 break;
1327 }
1328
1329 switch (cond) {
1330 case 0x0:
1331 gen_op_eval_bn(dc, r_dst);
1332 break;
1333 case 0x1:
1334 gen_op_eval_fbne(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1335 break;
1336 case 0x2:
1337 gen_op_eval_fblg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1338 break;
1339 case 0x3:
1340 gen_op_eval_fbul(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1341 break;
1342 case 0x4:
1343 gen_op_eval_fbl(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1344 break;
1345 case 0x5:
1346 gen_op_eval_fbug(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1347 break;
1348 case 0x6:
1349 gen_op_eval_fbg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1350 break;
1351 case 0x7:
1352 gen_op_eval_fbu(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1353 break;
1354 case 0x8:
1355 gen_op_eval_ba(dc, r_dst);
1356 break;
1357 case 0x9:
1358 gen_op_eval_fbe(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1359 break;
1360 case 0xa:
1361 gen_op_eval_fbue(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1362 break;
1363 case 0xb:
1364 gen_op_eval_fbge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1365 break;
1366 case 0xc:
1367 gen_op_eval_fbuge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1368 break;
1369 case 0xd:
1370 gen_op_eval_fble(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1371 break;
1372 case 0xe:
1373 gen_op_eval_fbule(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1374 break;
1375 case 0xf:
1376 gen_op_eval_fbo(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset);
1377 break;
1378 }
1379 }
1380
gen_cond(DisasContext * dc,TCGv r_dst,unsigned int cc,unsigned int cond)1381 static void gen_cond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond)
1382 {
1383 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1384 DisasCompare cmp;
1385 gen_compare(dc, &cmp, cc, cond);
1386
1387 /* The interface is to return a boolean in r_dst. */
1388 if (cmp.is_bool) {
1389 tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1);
1390 } else {
1391 tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2);
1392 }
1393
1394 free_compare(tcg_ctx, &cmp);
1395 }
1396
gen_fcond(DisasContext * dc,TCGv r_dst,unsigned int cc,unsigned int cond)1397 static void gen_fcond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond)
1398 {
1399 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1400 DisasCompare cmp;
1401 gen_fcompare(dc, &cmp, cc, cond);
1402
1403 /* The interface is to return a boolean in r_dst. */
1404 if (cmp.is_bool) {
1405 tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1);
1406 } else {
1407 tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2);
1408 }
1409
1410 free_compare(tcg_ctx, &cmp);
1411 }
1412
1413 #ifdef TARGET_SPARC64
1414 // Inverted logic
1415 static const int gen_tcg_cond_reg[8] = {
1416 -1,
1417 TCG_COND_NE,
1418 TCG_COND_GT,
1419 TCG_COND_GE,
1420 -1,
1421 TCG_COND_EQ,
1422 TCG_COND_LE,
1423 TCG_COND_LT,
1424 };
1425
gen_compare_reg(DisasContext * dc,DisasCompare * cmp,int cond,TCGv r_src)1426 static void gen_compare_reg(DisasContext *dc, DisasCompare *cmp, int cond, TCGv r_src)
1427 {
1428 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1429 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1430 cmp->is_bool = false;
1431 cmp->g1 = true;
1432 cmp->g2 = false;
1433 cmp->c1 = r_src;
1434 cmp->c2 = tcg_const_tl(tcg_ctx, 0);
1435 }
1436
gen_cond_reg(DisasContext * dc,TCGv r_dst,int cond,TCGv r_src)1437 static inline void gen_cond_reg(DisasContext *dc, TCGv r_dst, int cond, TCGv r_src)
1438 {
1439 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1440 DisasCompare cmp;
1441 gen_compare_reg(dc, &cmp, cond, r_src);
1442
1443 /* The interface is to return a boolean in r_dst. */
1444 tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2);
1445
1446 free_compare(tcg_ctx, &cmp);
1447 }
1448 #endif
1449
do_branch(DisasContext * dc,int32_t offset,uint32_t insn,int cc)1450 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1451 {
1452 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1453 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1454 target_ulong target = dc->pc + offset;
1455
1456 #ifdef TARGET_SPARC64
1457 if (unlikely(AM_CHECK(dc))) {
1458 target &= 0xffffffffULL;
1459 }
1460 #endif
1461 if (cond == 0x0) {
1462 /* unconditional not taken */
1463 if (a) {
1464 dc->pc = dc->npc + 4;
1465 dc->npc = dc->pc + 4;
1466 } else {
1467 dc->pc = dc->npc;
1468 dc->npc = dc->pc + 4;
1469 }
1470 } else if (cond == 0x8) {
1471 /* unconditional taken */
1472 if (a) {
1473 dc->pc = target;
1474 dc->npc = dc->pc + 4;
1475 } else {
1476 dc->pc = dc->npc;
1477 dc->npc = target;
1478 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc);
1479 }
1480 } else {
1481 flush_cond(dc);
1482 gen_cond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond);
1483 if (a) {
1484 gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond);
1485 dc->is_br = 1;
1486 } else {
1487 dc->pc = dc->npc;
1488 dc->jump_pc[0] = target;
1489 if (unlikely(dc->npc == DYNAMIC_PC)) {
1490 dc->jump_pc[1] = DYNAMIC_PC;
1491 tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4);
1492 } else {
1493 dc->jump_pc[1] = dc->npc + 4;
1494 dc->npc = JUMP_PC;
1495 }
1496 }
1497 }
1498 }
1499
do_fbranch(DisasContext * dc,int32_t offset,uint32_t insn,int cc)1500 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1501 {
1502 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1503 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1504 target_ulong target = dc->pc + offset;
1505
1506 #ifdef TARGET_SPARC64
1507 if (unlikely(AM_CHECK(dc))) {
1508 target &= 0xffffffffULL;
1509 }
1510 #endif
1511 if (cond == 0x0) {
1512 /* unconditional not taken */
1513 if (a) {
1514 dc->pc = dc->npc + 4;
1515 dc->npc = dc->pc + 4;
1516 } else {
1517 dc->pc = dc->npc;
1518 dc->npc = dc->pc + 4;
1519 }
1520 } else if (cond == 0x8) {
1521 /* unconditional taken */
1522 if (a) {
1523 dc->pc = target;
1524 dc->npc = dc->pc + 4;
1525 } else {
1526 dc->pc = dc->npc;
1527 dc->npc = target;
1528 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc);
1529 }
1530 } else {
1531 flush_cond(dc);
1532 gen_fcond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond);
1533 if (a) {
1534 gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond);
1535 dc->is_br = 1;
1536 } else {
1537 dc->pc = dc->npc;
1538 dc->jump_pc[0] = target;
1539 if (unlikely(dc->npc == DYNAMIC_PC)) {
1540 dc->jump_pc[1] = DYNAMIC_PC;
1541 tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4);
1542 } else {
1543 dc->jump_pc[1] = dc->npc + 4;
1544 dc->npc = JUMP_PC;
1545 }
1546 }
1547 }
1548 }
1549
1550 #ifdef TARGET_SPARC64
do_branch_reg(DisasContext * dc,int32_t offset,uint32_t insn,TCGv r_reg)1551 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1552 TCGv r_reg)
1553 {
1554 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1555 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1556 target_ulong target = dc->pc + offset;
1557
1558 if (unlikely(AM_CHECK(dc))) {
1559 target &= 0xffffffffULL;
1560 }
1561 flush_cond(dc);
1562 gen_cond_reg(dc, *(TCGv *)tcg_ctx->cpu_cond, cond, r_reg);
1563 if (a) {
1564 gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond);
1565 dc->is_br = 1;
1566 } else {
1567 dc->pc = dc->npc;
1568 dc->jump_pc[0] = target;
1569 if (unlikely(dc->npc == DYNAMIC_PC)) {
1570 dc->jump_pc[1] = DYNAMIC_PC;
1571 tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4);
1572 } else {
1573 dc->jump_pc[1] = dc->npc + 4;
1574 dc->npc = JUMP_PC;
1575 }
1576 }
1577 }
1578
gen_op_fcmps(DisasContext * dc,int fccno,TCGv_i32 r_rs1,TCGv_i32 r_rs2)1579 static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1580 {
1581 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1582 switch (fccno) {
1583 case 0:
1584 gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1585 break;
1586 case 1:
1587 gen_helper_fcmps_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1588 break;
1589 case 2:
1590 gen_helper_fcmps_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1591 break;
1592 case 3:
1593 gen_helper_fcmps_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1594 break;
1595 }
1596 }
1597
gen_op_fcmpd(DisasContext * dc,int fccno,TCGv_i64 r_rs1,TCGv_i64 r_rs2)1598 static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1599 {
1600 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1601 switch (fccno) {
1602 case 0:
1603 gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 1:
1606 gen_helper_fcmpd_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1607 break;
1608 case 2:
1609 gen_helper_fcmpd_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1610 break;
1611 case 3:
1612 gen_helper_fcmpd_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1613 break;
1614 }
1615 }
1616
gen_op_fcmpq(DisasContext * dc,int fccno)1617 static inline void gen_op_fcmpq(DisasContext *dc, int fccno)
1618 {
1619 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1620 switch (fccno) {
1621 case 0:
1622 gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env);
1623 break;
1624 case 1:
1625 gen_helper_fcmpq_fcc1(tcg_ctx, tcg_ctx->cpu_env);
1626 break;
1627 case 2:
1628 gen_helper_fcmpq_fcc2(tcg_ctx, tcg_ctx->cpu_env);
1629 break;
1630 case 3:
1631 gen_helper_fcmpq_fcc3(tcg_ctx, tcg_ctx->cpu_env);
1632 break;
1633 }
1634 }
1635
gen_op_fcmpes(DisasContext * dc,int fccno,TCGv_i32 r_rs1,TCGv_i32 r_rs2)1636 static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1637 {
1638 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1639 switch (fccno) {
1640 case 0:
1641 gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1642 break;
1643 case 1:
1644 gen_helper_fcmpes_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1645 break;
1646 case 2:
1647 gen_helper_fcmpes_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1648 break;
1649 case 3:
1650 gen_helper_fcmpes_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1651 break;
1652 }
1653 }
1654
gen_op_fcmped(DisasContext * dc,int fccno,TCGv_i64 r_rs1,TCGv_i64 r_rs2)1655 static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 {
1657 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1658 switch (fccno) {
1659 case 0:
1660 gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1661 break;
1662 case 1:
1663 gen_helper_fcmped_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1664 break;
1665 case 2:
1666 gen_helper_fcmped_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1667 break;
1668 case 3:
1669 gen_helper_fcmped_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1670 break;
1671 }
1672 }
1673
gen_op_fcmpeq(DisasContext * dc,int fccno)1674 static inline void gen_op_fcmpeq(DisasContext *dc, int fccno)
1675 {
1676 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1677 switch (fccno) {
1678 case 0:
1679 gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env);
1680 break;
1681 case 1:
1682 gen_helper_fcmpeq_fcc1(tcg_ctx, tcg_ctx->cpu_env);
1683 break;
1684 case 2:
1685 gen_helper_fcmpeq_fcc2(tcg_ctx, tcg_ctx->cpu_env);
1686 break;
1687 case 3:
1688 gen_helper_fcmpeq_fcc3(tcg_ctx, tcg_ctx->cpu_env);
1689 break;
1690 }
1691 }
1692
1693 #else
1694
gen_op_fcmps(DisasContext * dc,int fccno,TCGv r_rs1,TCGv r_rs2)1695 static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2)
1696 {
1697 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1698 gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1699 }
1700
gen_op_fcmpd(DisasContext * dc,int fccno,TCGv_i64 r_rs1,TCGv_i64 r_rs2)1701 static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1702 {
1703 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1704 gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1705 }
1706
gen_op_fcmpq(DisasContext * dc,int fccno)1707 static inline void gen_op_fcmpq(DisasContext *dc, int fccno)
1708 {
1709 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1710 gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env);
1711 }
1712
gen_op_fcmpes(DisasContext * dc,int fccno,TCGv r_rs1,TCGv r_rs2)1713 static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2)
1714 {
1715 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1716 gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1717 }
1718
gen_op_fcmped(DisasContext * dc,int fccno,TCGv_i64 r_rs1,TCGv_i64 r_rs2)1719 static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1720 {
1721 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1722 gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2);
1723 }
1724
gen_op_fcmpeq(DisasContext * dc,int fccno)1725 static inline void gen_op_fcmpeq(DisasContext *dc, int fccno)
1726 {
1727 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1728 gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env);
1729 }
1730 #endif
1731
gen_op_fpexception_im(DisasContext * dc,int fsr_flags)1732 static inline void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1733 {
1734 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1735 TCGv_i32 r_const;
1736
1737 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_NMASK);
1738 tcg_gen_ori_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, fsr_flags);
1739 r_const = tcg_const_i32(tcg_ctx, TT_FP_EXCP);
1740 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
1741 tcg_temp_free_i32(tcg_ctx, r_const);
1742 }
1743
gen_trap_ifnofpu(DisasContext * dc)1744 static int gen_trap_ifnofpu(DisasContext *dc)
1745 {
1746 #if !defined(CONFIG_USER_ONLY)
1747 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1748 if (!dc->fpu_enabled) {
1749 TCGv_i32 r_const;
1750
1751 save_state(dc);
1752 r_const = tcg_const_i32(tcg_ctx, TT_NFPU_INSN);
1753 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
1754 tcg_temp_free_i32(tcg_ctx, r_const);
1755 dc->is_br = 1;
1756 return 1;
1757 }
1758 #endif
1759 return 0;
1760 }
1761
gen_op_clear_ieee_excp_and_FTT(DisasContext * dc)1762 static inline void gen_op_clear_ieee_excp_and_FTT(DisasContext *dc)
1763 {
1764 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1765 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_CEXC_NMASK);
1766 }
1767
gen_fop_FF(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_ptr,TCGv_i32))1768 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1769 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32))
1770 {
1771 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1772 TCGv_i32 dst, src;
1773
1774 src = gen_load_fpr_F(dc, rs);
1775 dst = gen_dest_fpr_F(dc);
1776
1777 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src);
1778
1779 gen_store_fpr_F(dc, rd, dst);
1780 }
1781
gen_ne_fop_FF(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_i32))1782 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1783 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32))
1784 {
1785 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1786 TCGv_i32 dst, src;
1787
1788 src = gen_load_fpr_F(dc, rs);
1789 dst = gen_dest_fpr_F(dc);
1790
1791 gen(tcg_ctx, dst, src);
1792
1793 gen_store_fpr_F(dc, rd, dst);
1794 }
1795
gen_fop_FFF(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_ptr,TCGv_i32,TCGv_i32))1796 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1797 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1798 {
1799 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1800 TCGv_i32 dst, src1, src2;
1801
1802 src1 = gen_load_fpr_F(dc, rs1);
1803 src2 = gen_load_fpr_F(dc, rs2);
1804 dst = gen_dest_fpr_F(dc);
1805
1806 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2);
1807
1808 gen_store_fpr_F(dc, rd, dst);
1809 }
1810
1811 #ifdef TARGET_SPARC64
gen_ne_fop_FFF(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_i32,TCGv_i32))1812 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1813 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32, TCGv_i32))
1814 {
1815 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1816 TCGv_i32 dst, src1, src2;
1817
1818 src1 = gen_load_fpr_F(dc, rs1);
1819 src2 = gen_load_fpr_F(dc, rs2);
1820 dst = gen_dest_fpr_F(dc);
1821
1822 gen(tcg_ctx, dst, src1, src2);
1823
1824 gen_store_fpr_F(dc, rd, dst);
1825 }
1826 #endif
1827
gen_fop_DD(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr,TCGv_i64))1828 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1829 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64))
1830 {
1831 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1832 TCGv_i64 dst, src;
1833
1834 src = gen_load_fpr_D(dc, rs);
1835 dst = gen_dest_fpr_D(dc, rd);
1836
1837 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src);
1838
1839 gen_store_fpr_D(dc, rd, dst);
1840 }
1841
1842 #ifdef TARGET_SPARC64
gen_ne_fop_DD(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_i64))1843 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1844 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64))
1845 {
1846 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1847 TCGv_i64 dst, src;
1848
1849 src = gen_load_fpr_D(dc, rs);
1850 dst = gen_dest_fpr_D(dc, rd);
1851
1852 gen(tcg_ctx, dst, src);
1853
1854 gen_store_fpr_D(dc, rd, dst);
1855 }
1856 #endif
1857
gen_fop_DDD(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr,TCGv_i64,TCGv_i64))1858 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1859 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1860 {
1861 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1862 TCGv_i64 dst, src1, src2;
1863
1864 src1 = gen_load_fpr_D(dc, rs1);
1865 src2 = gen_load_fpr_D(dc, rs2);
1866 dst = gen_dest_fpr_D(dc, rd);
1867
1868 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2);
1869
1870 gen_store_fpr_D(dc, rd, dst);
1871 }
1872
1873 #ifdef TARGET_SPARC64
gen_ne_fop_DDD(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_i64,TCGv_i64))1874 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1875 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64))
1876 {
1877 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1878 TCGv_i64 dst, src1, src2;
1879
1880 src1 = gen_load_fpr_D(dc, rs1);
1881 src2 = gen_load_fpr_D(dc, rs2);
1882 dst = gen_dest_fpr_D(dc, rd);
1883
1884 gen(tcg_ctx, dst, src1, src2);
1885
1886 gen_store_fpr_D(dc, rd, dst);
1887 }
1888
gen_gsr_fop_DDD(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i64))1889 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1891 {
1892 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1893 TCGv_i64 dst, src1, src2;
1894
1895 src1 = gen_load_fpr_D(dc, rs1);
1896 src2 = gen_load_fpr_D(dc, rs2);
1897 dst = gen_dest_fpr_D(dc, rd);
1898
1899 gen(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_gsr, src1, src2);
1900
1901 gen_store_fpr_D(dc, rd, dst);
1902 }
1903
gen_ne_fop_DDDD(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i64))1904 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1905 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1906 {
1907 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1908 TCGv_i64 dst, src0, src1, src2;
1909
1910 src1 = gen_load_fpr_D(dc, rs1);
1911 src2 = gen_load_fpr_D(dc, rs2);
1912 src0 = gen_load_fpr_D(dc, rd);
1913 dst = gen_dest_fpr_D(dc, rd);
1914
1915 gen(tcg_ctx, dst, src0, src1, src2);
1916
1917 gen_store_fpr_D(dc, rd, dst);
1918 }
1919 #endif
1920
gen_fop_QQ(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr))1921 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr))
1923 {
1924 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1925 gen_op_load_fpr_QT1(dc, QFPREG(rs));
1926
1927 gen(tcg_ctx, tcg_ctx->cpu_env);
1928
1929 gen_op_store_QT0_fpr(dc, QFPREG(rd));
1930 gen_update_fprs_dirty(dc, QFPREG(rd));
1931 }
1932
1933 #ifdef TARGET_SPARC64
gen_ne_fop_QQ(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr))1934 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1935 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr))
1936 {
1937 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1938 gen_op_load_fpr_QT1(dc, QFPREG(rs));
1939
1940 gen(tcg_ctx, tcg_ctx->cpu_env);
1941
1942 gen_op_store_QT0_fpr(dc, QFPREG(rd));
1943 gen_update_fprs_dirty(dc, QFPREG(rd));
1944 }
1945 #endif
1946
gen_fop_QQQ(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr))1947 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1948 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr))
1949 {
1950 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1951 gen_op_load_fpr_QT0(dc, QFPREG(rs1));
1952 gen_op_load_fpr_QT1(dc, QFPREG(rs2));
1953
1954 gen(tcg_ctx, tcg_ctx->cpu_env);
1955
1956 gen_op_store_QT0_fpr(dc, QFPREG(rd));
1957 gen_update_fprs_dirty(dc, QFPREG(rd));
1958 }
1959
gen_fop_DFF(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr,TCGv_i32,TCGv_i32))1960 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1961 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1962 {
1963 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1964 TCGv_i64 dst;
1965 TCGv_i32 src1, src2;
1966
1967 src1 = gen_load_fpr_F(dc, rs1);
1968 src2 = gen_load_fpr_F(dc, rs2);
1969 dst = gen_dest_fpr_D(dc, rd);
1970
1971 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2);
1972
1973 gen_store_fpr_D(dc, rd, dst);
1974 }
1975
gen_fop_QDD(DisasContext * dc,int rd,int rs1,int rs2,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr,TCGv_i64,TCGv_i64))1976 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1977 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64, TCGv_i64))
1978 {
1979 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1980 TCGv_i64 src1, src2;
1981
1982 src1 = gen_load_fpr_D(dc, rs1);
1983 src2 = gen_load_fpr_D(dc, rs2);
1984
1985 gen(tcg_ctx, tcg_ctx->cpu_env, src1, src2);
1986
1987 gen_op_store_QT0_fpr(dc, QFPREG(rd));
1988 gen_update_fprs_dirty(dc, QFPREG(rd));
1989 }
1990
1991 #ifdef TARGET_SPARC64
gen_fop_DF(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr,TCGv_i32))1992 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1993 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32))
1994 {
1995 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
1996 TCGv_i64 dst;
1997 TCGv_i32 src;
1998
1999 src = gen_load_fpr_F(dc, rs);
2000 dst = gen_dest_fpr_D(dc, rd);
2001
2002 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src);
2003
2004 gen_store_fpr_D(dc, rd, dst);
2005 }
2006 #endif
2007
gen_ne_fop_DF(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr,TCGv_i32))2008 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
2009 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32))
2010 {
2011 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2012 TCGv_i64 dst;
2013 TCGv_i32 src;
2014
2015 src = gen_load_fpr_F(dc, rs);
2016 dst = gen_dest_fpr_D(dc, rd);
2017
2018 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src);
2019
2020 gen_store_fpr_D(dc, rd, dst);
2021 }
2022
gen_fop_FD(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_ptr,TCGv_i64))2023 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
2024 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i64))
2025 {
2026 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2027 TCGv_i32 dst;
2028 TCGv_i64 src;
2029
2030 src = gen_load_fpr_D(dc, rs);
2031 dst = gen_dest_fpr_F(dc);
2032
2033 gen(tcg_ctx, dst, tcg_ctx->cpu_env, src);
2034
2035 gen_store_fpr_F(dc, rd, dst);
2036 }
2037
gen_fop_FQ(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i32,TCGv_ptr))2038 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
2039 void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr))
2040 {
2041 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2042 TCGv_i32 dst;
2043
2044 gen_op_load_fpr_QT1(dc, QFPREG(rs));
2045 dst = gen_dest_fpr_F(dc);
2046
2047 gen(tcg_ctx, dst, tcg_ctx->cpu_env);
2048
2049 gen_store_fpr_F(dc, rd, dst);
2050 }
2051
gen_fop_DQ(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_i64,TCGv_ptr))2052 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
2053 void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr))
2054 {
2055 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2056 TCGv_i64 dst;
2057
2058 gen_op_load_fpr_QT1(dc, QFPREG(rs));
2059 dst = gen_dest_fpr_D(dc, rd);
2060
2061 gen(tcg_ctx, dst, tcg_ctx->cpu_env);
2062
2063 gen_store_fpr_D(dc, rd, dst);
2064 }
2065
gen_ne_fop_QF(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr,TCGv_i32))2066 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2067 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i32))
2068 {
2069 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2070 TCGv_i32 src;
2071
2072 src = gen_load_fpr_F(dc, rs);
2073
2074 gen(tcg_ctx, tcg_ctx->cpu_env, src);
2075
2076 gen_op_store_QT0_fpr(dc, QFPREG(rd));
2077 gen_update_fprs_dirty(dc, QFPREG(rd));
2078 }
2079
gen_ne_fop_QD(DisasContext * dc,int rd,int rs,void (* gen)(TCGContext * tcg_ctx,TCGv_ptr,TCGv_i64))2080 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2081 void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64))
2082 {
2083 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2084 TCGv_i64 src;
2085
2086 src = gen_load_fpr_D(dc, rs);
2087
2088 gen(tcg_ctx, tcg_ctx->cpu_env, src);
2089
2090 gen_op_store_QT0_fpr(dc, QFPREG(rd));
2091 gen_update_fprs_dirty(dc, QFPREG(rd));
2092 }
2093
2094 /* asi moves */
2095 #ifdef TARGET_SPARC64
gen_get_asi(DisasContext * dc,int insn,TCGv r_addr)2096 static inline TCGv_i32 gen_get_asi(DisasContext *dc, int insn, TCGv r_addr)
2097 {
2098 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2099 int asi;
2100 TCGv_i32 r_asi;
2101
2102 if (IS_IMM) {
2103 r_asi = tcg_temp_new_i32(tcg_ctx);
2104 tcg_gen_mov_i32(tcg_ctx, r_asi, tcg_ctx->cpu_asi);
2105 } else {
2106 asi = GET_FIELD(insn, 19, 26);
2107 r_asi = tcg_const_i32(tcg_ctx, asi);
2108 }
2109 return r_asi;
2110 }
2111
gen_ld_asi(DisasContext * dc,TCGv dst,TCGv addr,int insn,int size,int sign)2112 static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size,
2113 int sign)
2114 {
2115 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2116 TCGv_i32 r_asi, r_size, r_sign;
2117
2118 r_asi = gen_get_asi(dc, insn, addr);
2119 r_size = tcg_const_i32(tcg_ctx, size);
2120 r_sign = tcg_const_i32(tcg_ctx, sign);
2121 gen_helper_ld_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign);
2122 tcg_temp_free_i32(tcg_ctx, r_sign);
2123 tcg_temp_free_i32(tcg_ctx, r_size);
2124 tcg_temp_free_i32(tcg_ctx, r_asi);
2125 }
2126
gen_st_asi(DisasContext * dc,TCGv src,TCGv addr,int insn,int size)2127 static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size)
2128 {
2129 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2130 TCGv_i32 r_asi, r_size;
2131
2132 r_asi = gen_get_asi(dc, insn, addr);
2133 r_size = tcg_const_i32(tcg_ctx, size);
2134 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size);
2135 tcg_temp_free_i32(tcg_ctx, r_size);
2136 tcg_temp_free_i32(tcg_ctx, r_asi);
2137 }
2138
gen_ldf_asi(DisasContext * dc,TCGv addr,int insn,int size,int rd)2139 static inline void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
2140 {
2141 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2142 TCGv_i32 r_asi, r_size, r_rd;
2143
2144 r_asi = gen_get_asi(dc, insn, addr);
2145 r_size = tcg_const_i32(tcg_ctx, size);
2146 r_rd = tcg_const_i32(tcg_ctx, rd);
2147 gen_helper_ldf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd);
2148 tcg_temp_free_i32(tcg_ctx, r_rd);
2149 tcg_temp_free_i32(tcg_ctx, r_size);
2150 tcg_temp_free_i32(tcg_ctx, r_asi);
2151 }
2152
gen_stf_asi(DisasContext * dc,TCGv addr,int insn,int size,int rd)2153 static inline void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
2154 {
2155 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2156 TCGv_i32 r_asi, r_size, r_rd;
2157
2158 r_asi = gen_get_asi(dc, insn, addr);
2159 r_size = tcg_const_i32(tcg_ctx, size);
2160 r_rd = tcg_const_i32(tcg_ctx, rd);
2161 gen_helper_stf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd);
2162 tcg_temp_free_i32(tcg_ctx, r_rd);
2163 tcg_temp_free_i32(tcg_ctx, r_size);
2164 tcg_temp_free_i32(tcg_ctx, r_asi);
2165 }
2166
gen_swap_asi(DisasContext * dc,TCGv dst,TCGv src,TCGv addr,int insn)2167 static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn)
2168 {
2169 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2170 TCGv_i32 r_asi, r_size, r_sign;
2171 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
2172
2173 r_asi = gen_get_asi(dc, insn, addr);
2174 r_size = tcg_const_i32(tcg_ctx, 4);
2175 r_sign = tcg_const_i32(tcg_ctx, 0);
2176 gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign);
2177 tcg_temp_free_i32(tcg_ctx, r_sign);
2178 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size);
2179 tcg_temp_free_i32(tcg_ctx, r_size);
2180 tcg_temp_free_i32(tcg_ctx, r_asi);
2181 tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64);
2182 tcg_temp_free_i64(tcg_ctx, t64);
2183 }
2184
gen_ldda_asi(DisasContext * dc,TCGv hi,TCGv addr,int insn,int rd)2185 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2186 int insn, int rd)
2187 {
2188 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2189 TCGv_i32 r_asi, r_rd;
2190
2191 r_asi = gen_get_asi(dc, insn, addr);
2192 r_rd = tcg_const_i32(tcg_ctx, rd);
2193 gen_helper_ldda_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_rd);
2194 tcg_temp_free_i32(tcg_ctx, r_rd);
2195 tcg_temp_free_i32(tcg_ctx, r_asi);
2196 }
2197
gen_stda_asi(DisasContext * dc,TCGv hi,TCGv addr,int insn,int rd)2198 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2199 int insn, int rd)
2200 {
2201 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2202 TCGv_i32 r_asi, r_size;
2203 TCGv lo = gen_load_gpr(dc, rd + 1);
2204 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
2205
2206 tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi);
2207 r_asi = gen_get_asi(dc, insn, addr);
2208 r_size = tcg_const_i32(tcg_ctx, 8);
2209 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size);
2210 tcg_temp_free_i32(tcg_ctx, r_size);
2211 tcg_temp_free_i32(tcg_ctx, r_asi);
2212 tcg_temp_free_i64(tcg_ctx, t64);
2213 }
2214
gen_casx_asi(DisasContext * dc,TCGv addr,TCGv val2,int insn,int rd)2215 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2216 TCGv val2, int insn, int rd)
2217 {
2218 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2219 TCGv val1 = gen_load_gpr(dc, rd);
2220 TCGv dst = gen_dest_gpr(dc, rd);
2221 TCGv_i32 r_asi = gen_get_asi(dc, insn, addr);
2222
2223 gen_helper_casx_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi);
2224 tcg_temp_free_i32(tcg_ctx, r_asi);
2225 gen_store_gpr(dc, rd, dst);
2226 }
2227
2228 #elif !defined(CONFIG_USER_ONLY)
2229
gen_ld_asi(DisasContext * dc,TCGv dst,TCGv addr,int insn,int size,int sign)2230 static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size,
2231 int sign)
2232 {
2233 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2234 TCGv_i32 r_asi, r_size, r_sign;
2235 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
2236
2237 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2238 r_size = tcg_const_i32(tcg_ctx, size);
2239 r_sign = tcg_const_i32(tcg_ctx, sign);
2240 gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign);
2241 tcg_temp_free_i32(tcg_ctx, r_sign);
2242 tcg_temp_free_i32(tcg_ctx, r_size);
2243 tcg_temp_free_i32(tcg_ctx, r_asi);
2244 tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64);
2245 tcg_temp_free_i64(tcg_ctx, t64);
2246 }
2247
gen_st_asi(DisasContext * dc,TCGv src,TCGv addr,int insn,int size)2248 static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size)
2249 {
2250 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2251 TCGv_i32 r_asi, r_size;
2252 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
2253
2254 tcg_gen_extu_tl_i64(tcg_ctx, t64, src);
2255 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2256 r_size = tcg_const_i32(tcg_ctx, size);
2257 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size);
2258 tcg_temp_free_i32(tcg_ctx, r_size);
2259 tcg_temp_free_i32(tcg_ctx, r_asi);
2260 tcg_temp_free_i64(tcg_ctx, t64);
2261 }
2262
gen_swap_asi(DisasContext * dc,TCGv dst,TCGv src,TCGv addr,int insn)2263 static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn)
2264 {
2265 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2266 TCGv_i32 r_asi, r_size, r_sign;
2267 TCGv_i64 r_val, t64;
2268
2269 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2270 r_size = tcg_const_i32(tcg_ctx, 4);
2271 r_sign = tcg_const_i32(tcg_ctx, 0);
2272 t64 = tcg_temp_new_i64(tcg_ctx);
2273 gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign);
2274 tcg_temp_free(tcg_ctx, r_sign);
2275 r_val = tcg_temp_new_i64(tcg_ctx);
2276 tcg_gen_extu_tl_i64(tcg_ctx, r_val, src);
2277 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size);
2278 tcg_temp_free_i64(tcg_ctx, r_val);
2279 tcg_temp_free_i32(tcg_ctx, r_size);
2280 tcg_temp_free_i32(tcg_ctx, r_asi);
2281 tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64);
2282 tcg_temp_free_i64(tcg_ctx, t64);
2283 }
2284
gen_ldda_asi(DisasContext * dc,TCGv hi,TCGv addr,int insn,int rd)2285 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2286 int insn, int rd)
2287 {
2288 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2289 TCGv_i32 r_asi, r_size, r_sign;
2290 TCGv t;
2291 TCGv_i64 t64;
2292
2293 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2294 r_size = tcg_const_i32(tcg_ctx, 8);
2295 r_sign = tcg_const_i32(tcg_ctx, 0);
2296 t64 = tcg_temp_new_i64(tcg_ctx);
2297 gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign);
2298 tcg_temp_free_i32(tcg_ctx, r_sign);
2299 tcg_temp_free_i32(tcg_ctx, r_size);
2300 tcg_temp_free_i32(tcg_ctx, r_asi);
2301
2302 t = gen_dest_gpr(dc, rd + 1);
2303 tcg_gen_trunc_i64_tl(tcg_ctx, t, t64);
2304 gen_store_gpr(dc, rd + 1, t);
2305
2306 tcg_gen_shri_i64(tcg_ctx, t64, t64, 32);
2307 tcg_gen_trunc_i64_tl(tcg_ctx, hi, t64);
2308 tcg_temp_free_i64(tcg_ctx, t64);
2309 gen_store_gpr(dc, rd, hi);
2310 }
2311
gen_stda_asi(DisasContext * dc,TCGv hi,TCGv addr,int insn,int rd)2312 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2313 int insn, int rd)
2314 {
2315 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2316 TCGv_i32 r_asi, r_size;
2317 TCGv lo = gen_load_gpr(dc, rd + 1);
2318 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
2319
2320 tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi);
2321 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2322 r_size = tcg_const_i32(tcg_ctx, 8);
2323 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size);
2324 tcg_temp_free_i32(tcg_ctx, r_size);
2325 tcg_temp_free_i32(tcg_ctx, r_asi);
2326 tcg_temp_free_i64(tcg_ctx, t64);
2327 }
2328 #endif
2329
2330 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
gen_cas_asi(DisasContext * dc,TCGv addr,TCGv val2,int insn,int rd)2331 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2332 TCGv val2, int insn, int rd)
2333 {
2334 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2335 TCGv val1 = gen_load_gpr(dc, rd);
2336 TCGv dst = gen_dest_gpr(dc, rd);
2337 #ifdef TARGET_SPARC64
2338 TCGv_i32 r_asi = gen_get_asi(dc, insn, addr);
2339 #else
2340 TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2341 #endif
2342
2343 gen_helper_cas_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi);
2344 tcg_temp_free_i32(tcg_ctx, r_asi);
2345 gen_store_gpr(dc, rd, dst);
2346 }
2347
gen_ldstub_asi(DisasContext * dc,TCGv dst,TCGv addr,int insn)2348 static inline void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2349 {
2350 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2351 TCGv_i64 r_val;
2352 TCGv_i32 r_asi, r_size;
2353
2354 gen_ld_asi(dc, dst, addr, insn, 1, 0);
2355
2356 r_val = tcg_const_i64(tcg_ctx, 0xffULL);
2357 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26));
2358 r_size = tcg_const_i32(tcg_ctx, 1);
2359 gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size);
2360 tcg_temp_free_i32(tcg_ctx, r_size);
2361 tcg_temp_free_i32(tcg_ctx, r_asi);
2362 tcg_temp_free_i64(tcg_ctx, r_val);
2363 }
2364 #endif
2365
get_src1(DisasContext * dc,unsigned int insn)2366 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2367 {
2368 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2369 return gen_load_gpr(dc, rs1);
2370 }
2371
get_src2(DisasContext * dc,unsigned int insn)2372 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2373 {
2374 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2375 if (IS_IMM) { /* immediate */
2376 target_long simm = GET_FIELDs(insn, 19, 31);
2377 TCGv t = get_temp_tl(dc);
2378 tcg_gen_movi_tl(tcg_ctx, t, simm);
2379 return t;
2380 } else { /* register */
2381 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2382 return gen_load_gpr(dc, rs2);
2383 }
2384 }
2385
2386 #ifdef TARGET_SPARC64
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2387 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2388 {
2389 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2390 TCGv_i32 c32, zero, dst, s1, s2;
2391
2392 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2393 or fold the comparison down to 32 bits and use movcond_i32. Choose
2394 the later. */
2395 c32 = tcg_temp_new_i32(tcg_ctx);
2396 if (cmp->is_bool) {
2397 tcg_gen_trunc_i64_i32(tcg_ctx, c32, cmp->c1);
2398 } else {
2399 TCGv_i64 c64 = tcg_temp_new_i64(tcg_ctx);
2400 tcg_gen_setcond_i64(tcg_ctx, cmp->cond, c64, cmp->c1, cmp->c2);
2401 tcg_gen_trunc_i64_i32(tcg_ctx, c32, c64);
2402 tcg_temp_free_i64(tcg_ctx, c64);
2403 }
2404
2405 s1 = gen_load_fpr_F(dc, rs);
2406 s2 = gen_load_fpr_F(dc, rd);
2407 dst = gen_dest_fpr_F(dc);
2408 zero = tcg_const_i32(tcg_ctx, 0);
2409
2410 tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dst, c32, zero, s1, s2);
2411
2412 tcg_temp_free_i32(tcg_ctx, c32);
2413 tcg_temp_free_i32(tcg_ctx, zero);
2414 gen_store_fpr_F(dc, rd, dst);
2415 }
2416
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2417 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2418 {
2419 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2420 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2421 tcg_gen_movcond_i64(tcg_ctx, cmp->cond, dst, cmp->c1, cmp->c2,
2422 gen_load_fpr_D(dc, rs),
2423 gen_load_fpr_D(dc, rd));
2424 gen_store_fpr_D(dc, rd, dst);
2425 }
2426
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2427 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2428 {
2429 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2430 int qd = QFPREG(rd);
2431 int qs = QFPREG(rs);
2432
2433 tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2434 tcg_ctx->cpu_fpr[qs / 2], tcg_ctx->cpu_fpr[qd / 2]);
2435 tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2436 tcg_ctx->cpu_fpr[qs / 2 + 1], tcg_ctx->cpu_fpr[qd / 2 + 1]);
2437
2438 gen_update_fprs_dirty(dc, qd);
2439 }
2440
gen_load_trap_state_at_tl(DisasContext * dc,TCGv_ptr r_tsptr,TCGv_ptr cpu_env)2441 static inline void gen_load_trap_state_at_tl(DisasContext *dc, TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2442 {
2443 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2444 TCGv_i32 r_tl = tcg_temp_new_i32(tcg_ctx);
2445
2446 /* load env->tl into r_tl */
2447 tcg_gen_ld_i32(tcg_ctx, r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2448
2449 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2450 tcg_gen_andi_i32(tcg_ctx, r_tl, r_tl, MAXTL_MASK);
2451
2452 /* calculate offset to current trap state from env->ts, reuse r_tl */
2453 tcg_gen_muli_i32(tcg_ctx, r_tl, r_tl, sizeof (trap_state));
2454 tcg_gen_addi_ptr(tcg_ctx, r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2455
2456 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2457 {
2458 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(tcg_ctx);
2459 tcg_gen_ext_i32_ptr(tcg_ctx, r_tl_tmp, r_tl);
2460 tcg_gen_add_ptr(tcg_ctx, r_tsptr, r_tsptr, r_tl_tmp);
2461 tcg_temp_free_ptr(tcg_ctx, r_tl_tmp);
2462 }
2463
2464 tcg_temp_free_i32(tcg_ctx, r_tl);
2465 }
2466
gen_edge(DisasContext * dc,TCGv dst,TCGv s1,TCGv s2,int width,bool cc,bool left)2467 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2468 int width, bool cc, bool left)
2469 {
2470 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2471 TCGv lo1, lo2, t1, t2;
2472 uint64_t amask, tabl, tabr;
2473 int shift, imask, omask;
2474
2475 if (cc) {
2476 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, s1);
2477 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, s2);
2478 tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, s1, s2);
2479 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB);
2480 dc->cc_op = CC_OP_SUB;
2481 }
2482
2483 /* Theory of operation: there are two tables, left and right (not to
2484 be confused with the left and right versions of the opcode). These
2485 are indexed by the low 3 bits of the inputs. To make things "easy",
2486 these tables are loaded into two constants, TABL and TABR below.
2487 The operation index = (input & imask) << shift calculates the index
2488 into the constant, while val = (table >> index) & omask calculates
2489 the value we're looking for. */
2490 switch (width) {
2491 case 8:
2492 imask = 0x7;
2493 shift = 3;
2494 omask = 0xff;
2495 if (left) {
2496 tabl = 0x80c0e0f0f8fcfeffULL;
2497 tabr = 0xff7f3f1f0f070301ULL;
2498 } else {
2499 tabl = 0x0103070f1f3f7fffULL;
2500 tabr = 0xfffefcf8f0e0c080ULL;
2501 }
2502 break;
2503 case 16:
2504 imask = 0x6;
2505 shift = 1;
2506 omask = 0xf;
2507 if (left) {
2508 tabl = 0x8cef;
2509 tabr = 0xf731;
2510 } else {
2511 tabl = 0x137f;
2512 tabr = 0xfec8;
2513 }
2514 break;
2515 case 32:
2516 imask = 0x4;
2517 shift = 0;
2518 omask = 0x3;
2519 if (left) {
2520 tabl = (2 << 2) | 3;
2521 tabr = (3 << 2) | 1;
2522 } else {
2523 tabl = (1 << 2) | 3;
2524 tabr = (3 << 2) | 2;
2525 }
2526 break;
2527 default:
2528 abort();
2529 }
2530
2531 lo1 = tcg_temp_new(tcg_ctx);
2532 lo2 = tcg_temp_new(tcg_ctx);
2533 tcg_gen_andi_tl(tcg_ctx, lo1, s1, imask);
2534 tcg_gen_andi_tl(tcg_ctx, lo2, s2, imask);
2535 tcg_gen_shli_tl(tcg_ctx, lo1, lo1, shift);
2536 tcg_gen_shli_tl(tcg_ctx, lo2, lo2, shift);
2537
2538 t1 = tcg_const_tl(tcg_ctx, tabl);
2539 t2 = tcg_const_tl(tcg_ctx, tabr);
2540 tcg_gen_shr_tl(tcg_ctx, lo1, t1, lo1);
2541 tcg_gen_shr_tl(tcg_ctx, lo2, t2, lo2);
2542 tcg_gen_andi_tl(tcg_ctx, dst, lo1, omask);
2543 tcg_gen_andi_tl(tcg_ctx, lo2, lo2, omask);
2544
2545 amask = -8;
2546 if (AM_CHECK(dc)) {
2547 amask &= 0xffffffffULL;
2548 }
2549 tcg_gen_andi_tl(tcg_ctx, s1, s1, amask);
2550 tcg_gen_andi_tl(tcg_ctx, s2, s2, amask);
2551
2552 /* We want to compute
2553 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2554 We've already done dst = lo1, so this reduces to
2555 dst &= (s1 == s2 ? -1 : lo2)
2556 Which we perform by
2557 lo2 |= -(s1 == s2)
2558 dst &= lo2
2559 */
2560 tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t1, s1, s2);
2561 tcg_gen_neg_tl(tcg_ctx, t1, t1);
2562 tcg_gen_or_tl(tcg_ctx, lo2, lo2, t1);
2563 tcg_gen_and_tl(tcg_ctx, dst, dst, lo2);
2564
2565 tcg_temp_free(tcg_ctx, lo1);
2566 tcg_temp_free(tcg_ctx, lo2);
2567 tcg_temp_free(tcg_ctx, t1);
2568 tcg_temp_free(tcg_ctx, t2);
2569 }
2570
gen_alignaddr(DisasContext * dc,TCGv dst,TCGv s1,TCGv s2,bool left)2571 static void gen_alignaddr(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, bool left)
2572 {
2573 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2574 TCGv tmp = tcg_temp_new(tcg_ctx);
2575
2576 tcg_gen_add_tl(tcg_ctx, tmp, s1, s2);
2577 tcg_gen_andi_tl(tcg_ctx, dst, tmp, -8);
2578 if (left) {
2579 tcg_gen_neg_tl(tcg_ctx, tmp, tmp);
2580 }
2581 tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, tmp, 0, 3);
2582
2583 tcg_temp_free(tcg_ctx, tmp);
2584 }
2585
gen_faligndata(TCGContext * tcg_ctx,TCGv dst,TCGv gsr,TCGv s1,TCGv s2)2586 static void gen_faligndata(TCGContext *tcg_ctx, TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2587 {
2588 TCGv t1, t2, shift;
2589
2590 t1 = tcg_temp_new(tcg_ctx);
2591 t2 = tcg_temp_new(tcg_ctx);
2592 shift = tcg_temp_new(tcg_ctx);
2593
2594 tcg_gen_andi_tl(tcg_ctx, shift, gsr, 7);
2595 tcg_gen_shli_tl(tcg_ctx, shift, shift, 3);
2596 tcg_gen_shl_tl(tcg_ctx, t1, s1, shift);
2597
2598 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2599 shift of (up to 63) followed by a constant shift of 1. */
2600 tcg_gen_xori_tl(tcg_ctx, shift, shift, 63);
2601 tcg_gen_shr_tl(tcg_ctx, t2, s2, shift);
2602 tcg_gen_shri_tl(tcg_ctx, t2, t2, 1);
2603
2604 tcg_gen_or_tl(tcg_ctx, dst, t1, t2);
2605
2606 tcg_temp_free(tcg_ctx, t1);
2607 tcg_temp_free(tcg_ctx, t2);
2608 tcg_temp_free(tcg_ctx, shift);
2609 }
2610 #endif
2611
2612 #define CHECK_IU_FEATURE(dc, FEATURE) \
2613 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2614 goto illegal_insn;
2615 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2616 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2617 goto nfpu_insn;
2618
2619 /* before an instruction, dc->pc must be static */
disas_sparc_insn(DisasContext * dc,unsigned int insn,bool hook_insn)2620 static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_insn)
2621 {
2622 TCGContext *tcg_ctx = dc->uc->tcg_ctx;
2623 unsigned int opc, rs1, rs2, rd;
2624 TCGv cpu_src1, cpu_src2;
2625 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2626 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2627 target_long simm;
2628
2629 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2630 tcg_gen_debug_insn_start(tcg_ctx, dc->pc);
2631 }
2632
2633 // Unicorn: trace this instruction on request
2634 if (hook_insn && HOOK_EXISTS_BOUNDED(dc->uc, UC_HOOK_CODE, dc->pc)) {
2635 gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, dc->uc, dc->pc);
2636 // the callback might want to stop emulation immediately
2637 check_exit_request(tcg_ctx);
2638 }
2639
2640 opc = GET_FIELD(insn, 0, 1);
2641 rd = GET_FIELD(insn, 2, 6);
2642
2643 switch (opc) {
2644 case 0: /* branches/sethi */
2645 {
2646 unsigned int xop = GET_FIELD(insn, 7, 9);
2647 int32_t target;
2648 switch (xop) {
2649 #ifdef TARGET_SPARC64
2650 case 0x1: /* V9 BPcc */
2651 {
2652 int cc;
2653
2654 target = GET_FIELD_SP(insn, 0, 18);
2655 target = sign_extend(target, 19);
2656 target <<= 2;
2657 cc = GET_FIELD_SP(insn, 20, 21);
2658 if (cc == 0)
2659 do_branch(dc, target, insn, 0);
2660 else if (cc == 2)
2661 do_branch(dc, target, insn, 1);
2662 else
2663 goto illegal_insn;
2664 goto jmp_insn;
2665 }
2666 case 0x3: /* V9 BPr */
2667 {
2668 target = GET_FIELD_SP(insn, 0, 13) |
2669 (GET_FIELD_SP(insn, 20, 21) << 14);
2670 target = sign_extend(target, 16);
2671 target = (int32_t)((uint32_t)target << 2);
2672 cpu_src1 = get_src1(dc, insn);
2673 do_branch_reg(dc, target, insn, cpu_src1);
2674 goto jmp_insn;
2675 }
2676 case 0x5: /* V9 FBPcc */
2677 {
2678 int cc = GET_FIELD_SP(insn, 20, 21);
2679 if (gen_trap_ifnofpu(dc)) {
2680 goto jmp_insn;
2681 }
2682 target = GET_FIELD_SP(insn, 0, 18);
2683 target = sign_extend(target, 19);
2684 target = (int32_t)((uint32_t)target << 2);
2685 do_fbranch(dc, target, insn, cc);
2686 goto jmp_insn;
2687 }
2688 #else
2689 case 0x7: /* CBN+x */
2690 {
2691 goto ncp_insn;
2692 }
2693 #endif
2694 case 0x2: /* BN+x */
2695 {
2696 target = GET_FIELD(insn, 10, 31);
2697 target = sign_extend(target, 22);
2698 target = (int32_t)((uint32_t)target << 2);
2699 do_branch(dc, target, insn, 0);
2700 goto jmp_insn;
2701 }
2702 case 0x6: /* FBN+x */
2703 {
2704 if (gen_trap_ifnofpu(dc)) {
2705 goto jmp_insn;
2706 }
2707 target = GET_FIELD(insn, 10, 31);
2708 target = sign_extend(target, 22);
2709 target = (int32_t)((uint32_t)target << 2);
2710 do_fbranch(dc, target, insn, 0);
2711 goto jmp_insn;
2712 }
2713 case 0x4: /* SETHI */
2714 /* Special-case %g0 because that's the canonical nop. */
2715 if (rd) {
2716 uint32_t value = GET_FIELD(insn, 10, 31);
2717 TCGv t = gen_dest_gpr(dc, rd);
2718 tcg_gen_movi_tl(tcg_ctx, t, value << 10);
2719 gen_store_gpr(dc, rd, t);
2720 }
2721 break;
2722 case 0x0: /* UNIMPL */
2723 default:
2724 goto illegal_insn;
2725 }
2726 break;
2727 }
2728 break;
2729 case 1: /*CALL*/
2730 {
2731 target_long target = (int)(((unsigned int)(GET_FIELDs(insn, 2, 31))) << 2);
2732 TCGv o7 = gen_dest_gpr(dc, 15);
2733
2734 tcg_gen_movi_tl(tcg_ctx, o7, dc->pc);
2735 gen_store_gpr(dc, 15, o7);
2736 target += dc->pc;
2737 gen_mov_pc_npc(dc);
2738 #ifdef TARGET_SPARC64
2739 if (unlikely(AM_CHECK(dc))) {
2740 target &= 0xffffffffULL;
2741 }
2742 #endif
2743 dc->npc = target;
2744 }
2745 goto jmp_insn;
2746 case 2: /* FPU & Logical Operations */
2747 {
2748 unsigned int xop = GET_FIELD(insn, 7, 12);
2749 TCGv cpu_dst = get_temp_tl(dc);
2750 TCGv cpu_tmp0;
2751
2752 if (xop == 0x3a) { /* generate trap */
2753 int cond = GET_FIELD(insn, 3, 6);
2754 TCGv_i32 trap;
2755 int l1 = -1, mask;
2756
2757 if (cond == 0) {
2758 /* Trap never. */
2759 break;
2760 }
2761
2762 save_state(dc);
2763
2764 if (cond != 8) {
2765 /* Conditional trap. */
2766 DisasCompare cmp;
2767 #ifdef TARGET_SPARC64
2768 /* V9 icc/xcc */
2769 int cc = GET_FIELD_SP(insn, 11, 12);
2770 if (cc == 0) {
2771 gen_compare(dc, &cmp, 0, cond);
2772 } else if (cc == 2) {
2773 gen_compare(dc, &cmp, 1, cond);
2774 } else {
2775 goto illegal_insn;
2776 }
2777 #else
2778 gen_compare(dc, &cmp, 0, cond);
2779 #endif
2780 l1 = gen_new_label(tcg_ctx);
2781 tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cmp.cond),
2782 cmp.c1, cmp.c2, l1);
2783 free_compare(tcg_ctx, &cmp);
2784 }
2785
2786 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2787 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2788
2789 /* Don't use the normal temporaries, as they may well have
2790 gone out of scope with the branch above. While we're
2791 doing that we might as well pre-truncate to 32-bit. */
2792 trap = tcg_temp_new_i32(tcg_ctx);
2793
2794 rs1 = GET_FIELD_SP(insn, 14, 18);
2795 if (IS_IMM) {
2796 rs2 = GET_FIELD_SP(insn, 0, 6);
2797 if (rs1 == 0) {
2798 tcg_gen_movi_i32(tcg_ctx, trap, (rs2 & mask) + TT_TRAP);
2799 /* Signal that the trap value is fully constant. */
2800 mask = 0;
2801 } else {
2802 TCGv t1 = gen_load_gpr(dc, rs1);
2803 tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1);
2804 tcg_gen_addi_i32(tcg_ctx, trap, trap, rs2);
2805 }
2806 } else {
2807 TCGv t1, t2;
2808 rs2 = GET_FIELD_SP(insn, 0, 4);
2809 t1 = gen_load_gpr(dc, rs1);
2810 t2 = gen_load_gpr(dc, rs2);
2811 tcg_gen_add_tl(tcg_ctx, t1, t1, t2);
2812 tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1);
2813 }
2814 if (mask != 0) {
2815 tcg_gen_andi_i32(tcg_ctx, trap, trap, mask);
2816 tcg_gen_addi_i32(tcg_ctx, trap, trap, TT_TRAP);
2817 }
2818
2819 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, trap);
2820 tcg_temp_free_i32(tcg_ctx, trap);
2821
2822 if (cond == 8) {
2823 /* An unconditional trap ends the TB. */
2824 dc->is_br = 1;
2825 goto jmp_insn;
2826 } else {
2827 /* A conditional trap falls through to the next insn. */
2828 gen_set_label(tcg_ctx, l1);
2829 break;
2830 }
2831 } else if (xop == 0x28) {
2832 rs1 = GET_FIELD(insn, 13, 17);
2833 switch(rs1) {
2834 case 0: /* rdy */
2835 #ifndef TARGET_SPARC64
2836 /* undefined in the SPARCv8 manual, rdy on the microSPARC II */
2837 case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07:
2838 case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
2839 /* stbar in the SPARCv8 manual, rdy on the microSPARC II */
2840 case 0x0f:
2841 /* implementation-dependent in the SPARCv8 manual, rdy on the microSPARC II */
2842 case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17:
2843 case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f:
2844 /* Read Asr17 */
2845 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2846 TCGv t = gen_dest_gpr(dc, rd);
2847 /* Read Asr17 for a Leon3 monoprocessor */
2848 tcg_gen_movi_tl(tcg_ctx, t, (1 << 8) | (dc->def->nwindows - 1));
2849 gen_store_gpr(dc, rd, t);
2850 break;
2851 }
2852 #endif
2853 gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_y);
2854 break;
2855 #ifdef TARGET_SPARC64
2856 case 0x2: /* V9 rdccr */
2857 update_psr(dc);
2858 gen_helper_rdccr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env);
2859 gen_store_gpr(dc, rd, cpu_dst);
2860 break;
2861 case 0x3: /* V9 rdasi */
2862 tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_asi);
2863 gen_store_gpr(dc, rd, cpu_dst);
2864 break;
2865 case 0x4: /* V9 rdtick */
2866 {
2867 TCGv_ptr r_tickptr;
2868
2869 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
2870 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
2871 offsetof(CPUSPARCState, tick));
2872 gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr);
2873 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
2874 gen_store_gpr(dc, rd, cpu_dst);
2875 }
2876 break;
2877 case 0x5: /* V9 rdpc */
2878 {
2879 TCGv t = gen_dest_gpr(dc, rd);
2880 if (unlikely(AM_CHECK(dc))) {
2881 tcg_gen_movi_tl(tcg_ctx, t, dc->pc & 0xffffffffULL);
2882 } else {
2883 tcg_gen_movi_tl(tcg_ctx, t, dc->pc);
2884 }
2885 gen_store_gpr(dc, rd, t);
2886 }
2887 break;
2888 case 0x6: /* V9 rdfprs */
2889 tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_fprs);
2890 gen_store_gpr(dc, rd, cpu_dst);
2891 break;
2892 case 0xf: /* V9 membar */
2893 break; /* no effect */
2894 case 0x13: /* Graphics Status */
2895 if (gen_trap_ifnofpu(dc)) {
2896 goto jmp_insn;
2897 }
2898 gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_gsr);
2899 break;
2900 case 0x16: /* Softint */
2901 tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_softint);
2902 gen_store_gpr(dc, rd, cpu_dst);
2903 break;
2904 case 0x17: /* Tick compare */
2905 gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tick_cmpr);
2906 break;
2907 case 0x18: /* System tick */
2908 {
2909 TCGv_ptr r_tickptr;
2910
2911 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
2912 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
2913 offsetof(CPUSPARCState, stick));
2914 gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr);
2915 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
2916 gen_store_gpr(dc, rd, cpu_dst);
2917 }
2918 break;
2919 case 0x19: /* System tick compare */
2920 gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_stick_cmpr);
2921 break;
2922 case 0x10: /* Performance Control */
2923 case 0x11: /* Performance Instrumentation Counter */
2924 case 0x12: /* Dispatch Control */
2925 case 0x14: /* Softint set, WO */
2926 case 0x15: /* Softint clear, WO */
2927 #endif
2928 default:
2929 goto illegal_insn;
2930 }
2931 #if !defined(CONFIG_USER_ONLY)
2932 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2933 #ifndef TARGET_SPARC64
2934 if (!supervisor(dc)) {
2935 goto priv_insn;
2936 }
2937 update_psr(dc);
2938 gen_helper_rdpsr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env);
2939 #else
2940 CHECK_IU_FEATURE(dc, HYPV);
2941 if (!hypervisor(dc))
2942 goto priv_insn;
2943 rs1 = GET_FIELD(insn, 13, 17);
2944 switch (rs1) {
2945 case 0: // hpstate
2946 // gen_op_rdhpstate();
2947 break;
2948 case 1: // htstate
2949 // gen_op_rdhtstate();
2950 break;
2951 case 3: // hintp
2952 tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hintp);
2953 break;
2954 case 5: // htba
2955 tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_htba);
2956 break;
2957 case 6: // hver
2958 tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hver);
2959 break;
2960 case 31: // hstick_cmpr
2961 tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hstick_cmpr);
2962 break;
2963 default:
2964 goto illegal_insn;
2965 }
2966 #endif
2967 gen_store_gpr(dc, rd, cpu_dst);
2968 break;
2969 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2970 if (!supervisor(dc)) {
2971 goto priv_insn;
2972 }
2973 cpu_tmp0 = get_temp_tl(dc);
2974 #ifdef TARGET_SPARC64
2975 rs1 = GET_FIELD(insn, 13, 17);
2976 switch (rs1) {
2977 case 0: // tpc
2978 {
2979 TCGv_ptr r_tsptr;
2980
2981 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
2982 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
2983 tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr,
2984 offsetof(trap_state, tpc));
2985 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
2986 }
2987 break;
2988 case 1: // tnpc
2989 {
2990 TCGv_ptr r_tsptr;
2991
2992 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
2993 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
2994 tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr,
2995 offsetof(trap_state, tnpc));
2996 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
2997 }
2998 break;
2999 case 2: // tstate
3000 {
3001 TCGv_ptr r_tsptr;
3002
3003 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3004 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3005 tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3006 offsetof(trap_state, tstate));
3007 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3008 }
3009 break;
3010 case 3: // tt
3011 {
3012 TCGv_ptr r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3013
3014 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3015 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3016 offsetof(trap_state, tt));
3017 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3018 }
3019 break;
3020 case 4: // tick
3021 {
3022 TCGv_ptr r_tickptr;
3023
3024 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
3025 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
3026 offsetof(CPUSPARCState, tick));
3027 gen_helper_tick_get_count(tcg_ctx, cpu_tmp0, r_tickptr);
3028 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
3029 }
3030 break;
3031 case 5: // tba
3032 tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_tbr);
3033 break;
3034 case 6: // pstate
3035 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3036 offsetof(CPUSPARCState, pstate));
3037 break;
3038 case 7: // tl
3039 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3040 offsetof(CPUSPARCState, tl));
3041 break;
3042 case 8: // pil
3043 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3044 offsetof(CPUSPARCState, psrpil));
3045 break;
3046 case 9: // cwp
3047 gen_helper_rdcwp(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env);
3048 break;
3049 case 10: // cansave
3050 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3051 offsetof(CPUSPARCState, cansave));
3052 break;
3053 case 11: // canrestore
3054 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3055 offsetof(CPUSPARCState, canrestore));
3056 break;
3057 case 12: // cleanwin
3058 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3059 offsetof(CPUSPARCState, cleanwin));
3060 break;
3061 case 13: // otherwin
3062 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3063 offsetof(CPUSPARCState, otherwin));
3064 break;
3065 case 14: // wstate
3066 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3067 offsetof(CPUSPARCState, wstate));
3068 break;
3069 case 16: // UA2005 gl
3070 CHECK_IU_FEATURE(dc, GL);
3071 tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3072 offsetof(CPUSPARCState, gl));
3073 break;
3074 case 26: // UA2005 strand status
3075 CHECK_IU_FEATURE(dc, HYPV);
3076 if (!hypervisor(dc))
3077 goto priv_insn;
3078 tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ssr);
3079 break;
3080 case 31: // ver
3081 tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ver);
3082 break;
3083 case 15: // fq
3084 default:
3085 goto illegal_insn;
3086 }
3087 #else
3088 tcg_gen_ext_i32_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_wim);
3089 #endif
3090 gen_store_gpr(dc, rd, cpu_tmp0);
3091 break;
3092 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3093 #ifdef TARGET_SPARC64
3094 save_state(dc);
3095 gen_helper_flushw(tcg_ctx, tcg_ctx->cpu_env);
3096 #else
3097 if (!supervisor(dc))
3098 goto priv_insn;
3099 gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tbr);
3100 #endif
3101 break;
3102 #endif
3103 } else if (xop == 0x34) { /* FPU Operations */
3104 if (gen_trap_ifnofpu(dc)) {
3105 goto jmp_insn;
3106 }
3107 gen_op_clear_ieee_excp_and_FTT(dc);
3108 rs1 = GET_FIELD(insn, 13, 17);
3109 rs2 = GET_FIELD(insn, 27, 31);
3110 xop = GET_FIELD(insn, 18, 26);
3111 save_state(dc);
3112 switch (xop) {
3113 case 0x1: /* fmovs */
3114 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3115 gen_store_fpr_F(dc, rd, cpu_src1_32);
3116 break;
3117 case 0x5: /* fnegs */
3118 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3119 break;
3120 case 0x9: /* fabss */
3121 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3122 break;
3123 case 0x29: /* fsqrts */
3124 CHECK_FPU_FEATURE(dc, FSQRT);
3125 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3126 break;
3127 case 0x2a: /* fsqrtd */
3128 CHECK_FPU_FEATURE(dc, FSQRT);
3129 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3130 break;
3131 case 0x2b: /* fsqrtq */
3132 CHECK_FPU_FEATURE(dc, FLOAT128);
3133 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3134 break;
3135 case 0x41: /* fadds */
3136 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3137 break;
3138 case 0x42: /* faddd */
3139 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3140 break;
3141 case 0x43: /* faddq */
3142 CHECK_FPU_FEATURE(dc, FLOAT128);
3143 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3144 break;
3145 case 0x45: /* fsubs */
3146 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3147 break;
3148 case 0x46: /* fsubd */
3149 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3150 break;
3151 case 0x47: /* fsubq */
3152 CHECK_FPU_FEATURE(dc, FLOAT128);
3153 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3154 break;
3155 case 0x49: /* fmuls */
3156 CHECK_FPU_FEATURE(dc, FMUL);
3157 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3158 break;
3159 case 0x4a: /* fmuld */
3160 CHECK_FPU_FEATURE(dc, FMUL);
3161 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3162 break;
3163 case 0x4b: /* fmulq */
3164 CHECK_FPU_FEATURE(dc, FLOAT128);
3165 CHECK_FPU_FEATURE(dc, FMUL);
3166 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3167 break;
3168 case 0x4d: /* fdivs */
3169 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3170 break;
3171 case 0x4e: /* fdivd */
3172 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3173 break;
3174 case 0x4f: /* fdivq */
3175 CHECK_FPU_FEATURE(dc, FLOAT128);
3176 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3177 break;
3178 case 0x69: /* fsmuld */
3179 CHECK_FPU_FEATURE(dc, FSMULD);
3180 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3181 break;
3182 case 0x6e: /* fdmulq */
3183 CHECK_FPU_FEATURE(dc, FLOAT128);
3184 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3185 break;
3186 case 0xc4: /* fitos */
3187 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3188 break;
3189 case 0xc6: /* fdtos */
3190 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3191 break;
3192 case 0xc7: /* fqtos */
3193 CHECK_FPU_FEATURE(dc, FLOAT128);
3194 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3195 break;
3196 case 0xc8: /* fitod */
3197 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3198 break;
3199 case 0xc9: /* fstod */
3200 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3201 break;
3202 case 0xcb: /* fqtod */
3203 CHECK_FPU_FEATURE(dc, FLOAT128);
3204 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3205 break;
3206 case 0xcc: /* fitoq */
3207 CHECK_FPU_FEATURE(dc, FLOAT128);
3208 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3209 break;
3210 case 0xcd: /* fstoq */
3211 CHECK_FPU_FEATURE(dc, FLOAT128);
3212 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3213 break;
3214 case 0xce: /* fdtoq */
3215 CHECK_FPU_FEATURE(dc, FLOAT128);
3216 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3217 break;
3218 case 0xd1: /* fstoi */
3219 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3220 break;
3221 case 0xd2: /* fdtoi */
3222 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3223 break;
3224 case 0xd3: /* fqtoi */
3225 CHECK_FPU_FEATURE(dc, FLOAT128);
3226 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3227 break;
3228 #ifdef TARGET_SPARC64
3229 case 0x2: /* V9 fmovd */
3230 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3231 gen_store_fpr_D(dc, rd, cpu_src1_64);
3232 break;
3233 case 0x3: /* V9 fmovq */
3234 CHECK_FPU_FEATURE(dc, FLOAT128);
3235 gen_move_Q(dc, rd, rs2);
3236 break;
3237 case 0x6: /* V9 fnegd */
3238 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3239 break;
3240 case 0x7: /* V9 fnegq */
3241 CHECK_FPU_FEATURE(dc, FLOAT128);
3242 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3243 break;
3244 case 0xa: /* V9 fabsd */
3245 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3246 break;
3247 case 0xb: /* V9 fabsq */
3248 CHECK_FPU_FEATURE(dc, FLOAT128);
3249 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3250 break;
3251 case 0x81: /* V9 fstox */
3252 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3253 break;
3254 case 0x82: /* V9 fdtox */
3255 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3256 break;
3257 case 0x83: /* V9 fqtox */
3258 CHECK_FPU_FEATURE(dc, FLOAT128);
3259 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3260 break;
3261 case 0x84: /* V9 fxtos */
3262 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3263 break;
3264 case 0x88: /* V9 fxtod */
3265 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3266 break;
3267 case 0x8c: /* V9 fxtoq */
3268 CHECK_FPU_FEATURE(dc, FLOAT128);
3269 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3270 break;
3271 #endif
3272 default:
3273 goto illegal_insn;
3274 }
3275 } else if (xop == 0x35) { /* FPU Operations */
3276 #ifdef TARGET_SPARC64
3277 int cond;
3278 #endif
3279 if (gen_trap_ifnofpu(dc)) {
3280 goto jmp_insn;
3281 }
3282 gen_op_clear_ieee_excp_and_FTT(dc);
3283 rs1 = GET_FIELD(insn, 13, 17);
3284 rs2 = GET_FIELD(insn, 27, 31);
3285 xop = GET_FIELD(insn, 18, 26);
3286 save_state(dc);
3287
3288 #ifdef TARGET_SPARC64
3289 #define FMOVR(sz) \
3290 do { \
3291 DisasCompare cmp; \
3292 cond = GET_FIELD_SP(insn, 10, 12); \
3293 cpu_src1 = get_src1(dc, insn); \
3294 gen_compare_reg(dc, &cmp, cond, cpu_src1); \
3295 gen_fmov##sz(dc, &cmp, rd, rs2); \
3296 free_compare(tcg_ctx, &cmp); \
3297 } while (0)
3298
3299 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3300 FMOVR(s);
3301 break;
3302 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3303 FMOVR(d);
3304 break;
3305 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3306 CHECK_FPU_FEATURE(dc, FLOAT128);
3307 FMOVR(q);
3308 break;
3309 }
3310 #undef FMOVR
3311 #endif
3312 switch (xop) {
3313 #ifdef TARGET_SPARC64
3314 #define FMOVCC(fcc, sz) \
3315 do { \
3316 DisasCompare cmp; \
3317 cond = GET_FIELD_SP(insn, 14, 17); \
3318 gen_fcompare(dc, &cmp, fcc, cond); \
3319 gen_fmov##sz(dc, &cmp, rd, rs2); \
3320 free_compare(tcg_ctx, &cmp); \
3321 } while (0)
3322
3323 case 0x001: /* V9 fmovscc %fcc0 */
3324 FMOVCC(0, s);
3325 break;
3326 case 0x002: /* V9 fmovdcc %fcc0 */
3327 FMOVCC(0, d);
3328 break;
3329 case 0x003: /* V9 fmovqcc %fcc0 */
3330 CHECK_FPU_FEATURE(dc, FLOAT128);
3331 FMOVCC(0, q);
3332 break;
3333 case 0x041: /* V9 fmovscc %fcc1 */
3334 FMOVCC(1, s);
3335 break;
3336 case 0x042: /* V9 fmovdcc %fcc1 */
3337 FMOVCC(1, d);
3338 break;
3339 case 0x043: /* V9 fmovqcc %fcc1 */
3340 CHECK_FPU_FEATURE(dc, FLOAT128);
3341 FMOVCC(1, q);
3342 break;
3343 case 0x081: /* V9 fmovscc %fcc2 */
3344 FMOVCC(2, s);
3345 break;
3346 case 0x082: /* V9 fmovdcc %fcc2 */
3347 FMOVCC(2, d);
3348 break;
3349 case 0x083: /* V9 fmovqcc %fcc2 */
3350 CHECK_FPU_FEATURE(dc, FLOAT128);
3351 FMOVCC(2, q);
3352 break;
3353 case 0x0c1: /* V9 fmovscc %fcc3 */
3354 FMOVCC(3, s);
3355 break;
3356 case 0x0c2: /* V9 fmovdcc %fcc3 */
3357 FMOVCC(3, d);
3358 break;
3359 case 0x0c3: /* V9 fmovqcc %fcc3 */
3360 CHECK_FPU_FEATURE(dc, FLOAT128);
3361 FMOVCC(3, q);
3362 break;
3363 #undef FMOVCC
3364 #define FMOVCC(xcc, sz) \
3365 do { \
3366 DisasCompare cmp; \
3367 cond = GET_FIELD_SP(insn, 14, 17); \
3368 gen_compare(dc, &cmp, xcc, cond); \
3369 gen_fmov##sz(dc, &cmp, rd, rs2); \
3370 free_compare(tcg_ctx, &cmp); \
3371 } while (0)
3372
3373 case 0x101: /* V9 fmovscc %icc */
3374 FMOVCC(0, s);
3375 break;
3376 case 0x102: /* V9 fmovdcc %icc */
3377 FMOVCC(0, d);
3378 break;
3379 case 0x103: /* V9 fmovqcc %icc */
3380 CHECK_FPU_FEATURE(dc, FLOAT128);
3381 FMOVCC(0, q);
3382 break;
3383 case 0x181: /* V9 fmovscc %xcc */
3384 FMOVCC(1, s);
3385 break;
3386 case 0x182: /* V9 fmovdcc %xcc */
3387 FMOVCC(1, d);
3388 break;
3389 case 0x183: /* V9 fmovqcc %xcc */
3390 CHECK_FPU_FEATURE(dc, FLOAT128);
3391 FMOVCC(1, q);
3392 break;
3393 #undef FMOVCC
3394 #endif
3395 case 0x51: /* fcmps, V9 %fcc */
3396 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3397 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3398 gen_op_fcmps(dc, rd & 3, cpu_src1_32, cpu_src2_32);
3399 break;
3400 case 0x52: /* fcmpd, V9 %fcc */
3401 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3402 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3403 gen_op_fcmpd(dc, rd & 3, cpu_src1_64, cpu_src2_64);
3404 break;
3405 case 0x53: /* fcmpq, V9 %fcc */
3406 CHECK_FPU_FEATURE(dc, FLOAT128);
3407 gen_op_load_fpr_QT0(dc, QFPREG(rs1));
3408 gen_op_load_fpr_QT1(dc, QFPREG(rs2));
3409 gen_op_fcmpq(dc, rd & 3);
3410 break;
3411 case 0x55: /* fcmpes, V9 %fcc */
3412 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3413 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3414 gen_op_fcmpes(dc, rd & 3, cpu_src1_32, cpu_src2_32);
3415 break;
3416 case 0x56: /* fcmped, V9 %fcc */
3417 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3418 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3419 gen_op_fcmped(dc, rd & 3, cpu_src1_64, cpu_src2_64);
3420 break;
3421 case 0x57: /* fcmpeq, V9 %fcc */
3422 CHECK_FPU_FEATURE(dc, FLOAT128);
3423 gen_op_load_fpr_QT0(dc, QFPREG(rs1));
3424 gen_op_load_fpr_QT1(dc, QFPREG(rs2));
3425 gen_op_fcmpeq(dc, rd & 3);
3426 break;
3427 default:
3428 goto illegal_insn;
3429 }
3430 } else if (xop == 0x2) {
3431 TCGv dst = gen_dest_gpr(dc, rd);
3432 rs1 = GET_FIELD(insn, 13, 17);
3433 if (rs1 == 0) {
3434 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3435 if (IS_IMM) { /* immediate */
3436 simm = GET_FIELDs(insn, 19, 31);
3437 tcg_gen_movi_tl(tcg_ctx, dst, simm);
3438 gen_store_gpr(dc, rd, dst);
3439 } else { /* register */
3440 rs2 = GET_FIELD(insn, 27, 31);
3441 if (rs2 == 0) {
3442 tcg_gen_movi_tl(tcg_ctx, dst, 0);
3443 gen_store_gpr(dc, rd, dst);
3444 } else {
3445 cpu_src2 = gen_load_gpr(dc, rs2);
3446 gen_store_gpr(dc, rd, cpu_src2);
3447 }
3448 }
3449 } else {
3450 cpu_src1 = get_src1(dc, insn);
3451 if (IS_IMM) { /* immediate */
3452 simm = GET_FIELDs(insn, 19, 31);
3453 tcg_gen_ori_tl(tcg_ctx, dst, cpu_src1, simm);
3454 gen_store_gpr(dc, rd, dst);
3455 } else { /* register */
3456 rs2 = GET_FIELD(insn, 27, 31);
3457 if (rs2 == 0) {
3458 /* mov shortcut: or x, %g0, y -> mov x, y */
3459 gen_store_gpr(dc, rd, cpu_src1);
3460 } else {
3461 cpu_src2 = gen_load_gpr(dc, rs2);
3462 tcg_gen_or_tl(tcg_ctx, dst, cpu_src1, cpu_src2);
3463 gen_store_gpr(dc, rd, dst);
3464 }
3465 }
3466 }
3467 #ifdef TARGET_SPARC64
3468 } else if (xop == 0x25) { /* sll, V9 sllx */
3469 cpu_src1 = get_src1(dc, insn);
3470 if (IS_IMM) { /* immediate */
3471 simm = GET_FIELDs(insn, 20, 31);
3472 if (insn & (1 << 12)) {
3473 tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f);
3474 } else {
3475 tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f);
3476 }
3477 } else { /* register */
3478 rs2 = GET_FIELD(insn, 27, 31);
3479 cpu_src2 = gen_load_gpr(dc, rs2);
3480 cpu_tmp0 = get_temp_tl(dc);
3481 if (insn & (1 << 12)) {
3482 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f);
3483 } else {
3484 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3485 }
3486 tcg_gen_shl_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3487 }
3488 gen_store_gpr(dc, rd, cpu_dst);
3489 } else if (xop == 0x26) { /* srl, V9 srlx */
3490 cpu_src1 = get_src1(dc, insn);
3491 if (IS_IMM) { /* immediate */
3492 simm = GET_FIELDs(insn, 20, 31);
3493 if (insn & (1 << 12)) {
3494 tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f);
3495 } else {
3496 tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL);
3497 tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f);
3498 }
3499 } else { /* register */
3500 rs2 = GET_FIELD(insn, 27, 31);
3501 cpu_src2 = gen_load_gpr(dc, rs2);
3502 cpu_tmp0 = get_temp_tl(dc);
3503 if (insn & (1 << 12)) {
3504 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f);
3505 tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3506 } else {
3507 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3508 tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL);
3509 tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0);
3510 }
3511 }
3512 gen_store_gpr(dc, rd, cpu_dst);
3513 } else if (xop == 0x27) { /* sra, V9 srax */
3514 cpu_src1 = get_src1(dc, insn);
3515 if (IS_IMM) { /* immediate */
3516 simm = GET_FIELDs(insn, 20, 31);
3517 if (insn & (1 << 12)) {
3518 tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f);
3519 } else {
3520 tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1);
3521 tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f);
3522 }
3523 } else { /* register */
3524 rs2 = GET_FIELD(insn, 27, 31);
3525 cpu_src2 = gen_load_gpr(dc, rs2);
3526 cpu_tmp0 = get_temp_tl(dc);
3527 if (insn & (1 << 12)) {
3528 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f);
3529 tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3530 } else {
3531 tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3532 tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1);
3533 tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0);
3534 }
3535 }
3536 gen_store_gpr(dc, rd, cpu_dst);
3537 #endif
3538 } else if (xop < 0x36) {
3539 if (xop < 0x20) {
3540 cpu_src1 = get_src1(dc, insn);
3541 cpu_src2 = get_src2(dc, insn);
3542 switch (xop & ~0x10) {
3543 case 0x0: /* add */
3544 if (xop & 0x10) {
3545 gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2);
3546 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD);
3547 dc->cc_op = CC_OP_ADD;
3548 } else {
3549 tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3550 }
3551 break;
3552 case 0x1: /* and */
3553 tcg_gen_and_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3554 if (xop & 0x10) {
3555 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3556 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3557 dc->cc_op = CC_OP_LOGIC;
3558 }
3559 break;
3560 case 0x2: /* or */
3561 tcg_gen_or_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3562 if (xop & 0x10) {
3563 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3564 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3565 dc->cc_op = CC_OP_LOGIC;
3566 }
3567 break;
3568 case 0x3: /* xor */
3569 tcg_gen_xor_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3570 if (xop & 0x10) {
3571 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3572 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3573 dc->cc_op = CC_OP_LOGIC;
3574 }
3575 break;
3576 case 0x4: /* sub */
3577 if (xop & 0x10) {
3578 gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2);
3579 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB);
3580 dc->cc_op = CC_OP_SUB;
3581 } else {
3582 tcg_gen_sub_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3583 }
3584 break;
3585 case 0x5: /* andn */
3586 tcg_gen_andc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3587 if (xop & 0x10) {
3588 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3589 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3590 dc->cc_op = CC_OP_LOGIC;
3591 }
3592 break;
3593 case 0x6: /* orn */
3594 tcg_gen_orc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3595 if (xop & 0x10) {
3596 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3597 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3598 dc->cc_op = CC_OP_LOGIC;
3599 }
3600 break;
3601 case 0x7: /* xorn */
3602 tcg_gen_eqv_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3603 if (xop & 0x10) {
3604 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3605 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3606 dc->cc_op = CC_OP_LOGIC;
3607 }
3608 break;
3609 case 0x8: /* addx, V9 addc */
3610 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3611 (xop & 0x10));
3612 break;
3613 #ifdef TARGET_SPARC64
3614 case 0x9: /* V9 mulx */
3615 tcg_gen_mul_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
3616 break;
3617 #endif
3618 case 0xa: /* umul */
3619 CHECK_IU_FEATURE(dc, MUL);
3620 gen_op_umul(dc, cpu_dst, cpu_src1, cpu_src2);
3621 if (xop & 0x10) {
3622 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3623 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3624 dc->cc_op = CC_OP_LOGIC;
3625 }
3626 break;
3627 case 0xb: /* smul */
3628 CHECK_IU_FEATURE(dc, MUL);
3629 gen_op_smul(dc, cpu_dst, cpu_src1, cpu_src2);
3630 if (xop & 0x10) {
3631 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst);
3632 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC);
3633 dc->cc_op = CC_OP_LOGIC;
3634 }
3635 break;
3636 case 0xc: /* subx, V9 subc */
3637 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3638 (xop & 0x10));
3639 break;
3640 #ifdef TARGET_SPARC64
3641 case 0xd: /* V9 udivx */
3642 gen_helper_udivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2);
3643 break;
3644 #endif
3645 case 0xe: /* udiv */
3646 CHECK_IU_FEATURE(dc, DIV);
3647 if (xop & 0x10) {
3648 gen_helper_udiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1,
3649 cpu_src2);
3650 dc->cc_op = CC_OP_DIV;
3651 } else {
3652 gen_helper_udiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1,
3653 cpu_src2);
3654 }
3655 break;
3656 case 0xf: /* sdiv */
3657 CHECK_IU_FEATURE(dc, DIV);
3658 if (xop & 0x10) {
3659 gen_helper_sdiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1,
3660 cpu_src2);
3661 dc->cc_op = CC_OP_DIV;
3662 } else {
3663 gen_helper_sdiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1,
3664 cpu_src2);
3665 }
3666 break;
3667 default:
3668 goto illegal_insn;
3669 }
3670 gen_store_gpr(dc, rd, cpu_dst);
3671 } else {
3672 cpu_src1 = get_src1(dc, insn);
3673 cpu_src2 = get_src2(dc, insn);
3674 switch (xop) {
3675 case 0x20: /* taddcc */
3676 gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2);
3677 gen_store_gpr(dc, rd, cpu_dst);
3678 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TADD);
3679 dc->cc_op = CC_OP_TADD;
3680 break;
3681 case 0x21: /* tsubcc */
3682 gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2);
3683 gen_store_gpr(dc, rd, cpu_dst);
3684 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TSUB);
3685 dc->cc_op = CC_OP_TSUB;
3686 break;
3687 case 0x22: /* taddcctv */
3688 gen_helper_taddcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env,
3689 cpu_src1, cpu_src2);
3690 gen_store_gpr(dc, rd, cpu_dst);
3691 dc->cc_op = CC_OP_TADDTV;
3692 break;
3693 case 0x23: /* tsubcctv */
3694 gen_helper_tsubcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env,
3695 cpu_src1, cpu_src2);
3696 gen_store_gpr(dc, rd, cpu_dst);
3697 dc->cc_op = CC_OP_TSUBTV;
3698 break;
3699 case 0x24: /* mulscc */
3700 update_psr(dc);
3701 gen_op_mulscc(dc, cpu_dst, cpu_src1, cpu_src2);
3702 gen_store_gpr(dc, rd, cpu_dst);
3703 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD);
3704 dc->cc_op = CC_OP_ADD;
3705 break;
3706 #ifndef TARGET_SPARC64
3707 case 0x25: /* sll */
3708 if (IS_IMM) { /* immediate */
3709 simm = GET_FIELDs(insn, 20, 31);
3710 tcg_gen_shli_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f);
3711 } else { /* register */
3712 cpu_tmp0 = get_temp_tl(dc);
3713 tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3714 tcg_gen_shl_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3715 }
3716 gen_store_gpr(dc, rd, cpu_dst);
3717 break;
3718 case 0x26: /* srl */
3719 if (IS_IMM) { /* immediate */
3720 simm = GET_FIELDs(insn, 20, 31);
3721 tcg_gen_shri_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f);
3722 } else { /* register */
3723 cpu_tmp0 = get_temp_tl(dc);
3724 tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3725 tcg_gen_shr_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3726 }
3727 gen_store_gpr(dc, rd, cpu_dst);
3728 break;
3729 case 0x27: /* sra */
3730 if (IS_IMM) { /* immediate */
3731 simm = GET_FIELDs(insn, 20, 31);
3732 tcg_gen_sari_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f);
3733 } else { /* register */
3734 cpu_tmp0 = get_temp_tl(dc);
3735 tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f);
3736 tcg_gen_sar_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0);
3737 }
3738 gen_store_gpr(dc, rd, cpu_dst);
3739 break;
3740 #endif
3741 case 0x30:
3742 {
3743 cpu_tmp0 = get_temp_tl(dc);
3744 switch(rd) {
3745 case 0: /* wry */
3746 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3747 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, cpu_tmp0, 0xffffffff);
3748 break;
3749 #ifndef TARGET_SPARC64
3750 /* undefined in the SPARCv8 manual, nop on the microSPARC II */
3751 case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07:
3752 case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f:
3753
3754 /* implementation-dependent in the SPARCv8 manual, nop on the microSPARC II */
3755 case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17:
3756 case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f:
3757 if ((rd == 0x13) && (dc->def->features &
3758 CPU_FEATURE_POWERDOWN)) {
3759 /* LEON3 power-down */
3760 save_state(dc);
3761 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env);
3762 }
3763 break;
3764 #else
3765 case 0x2: /* V9 wrccr */
3766 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3767 gen_helper_wrccr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3768 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS);
3769 dc->cc_op = CC_OP_FLAGS;
3770 break;
3771 case 0x3: /* V9 wrasi */
3772 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3773 tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, 0xff);
3774 tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_asi, cpu_tmp0);
3775 break;
3776 case 0x6: /* V9 wrfprs */
3777 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3778 tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_fprs, cpu_tmp0);
3779 save_state(dc);
3780 gen_op_next_insn(dc);
3781 tcg_gen_exit_tb(tcg_ctx, 0);
3782 dc->is_br = 1;
3783 break;
3784 case 0xf: /* V9 sir, nop if user */
3785 #if !defined(CONFIG_USER_ONLY)
3786 if (supervisor(dc)) {
3787 ; // XXX
3788 }
3789 #endif
3790 break;
3791 case 0x13: /* Graphics Status */
3792 if (gen_trap_ifnofpu(dc)) {
3793 goto jmp_insn;
3794 }
3795 tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1, cpu_src2);
3796 break;
3797 case 0x14: /* Softint set */
3798 if (!supervisor(dc))
3799 goto illegal_insn;
3800 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3801 gen_helper_set_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3802 break;
3803 case 0x15: /* Softint clear */
3804 if (!supervisor(dc))
3805 goto illegal_insn;
3806 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3807 gen_helper_clear_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3808 break;
3809 case 0x16: /* Softint write */
3810 if (!supervisor(dc))
3811 goto illegal_insn;
3812 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3813 gen_helper_write_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3814 break;
3815 case 0x17: /* Tick compare */
3816 #if !defined(CONFIG_USER_ONLY)
3817 if (!supervisor(dc))
3818 goto illegal_insn;
3819 #endif
3820 {
3821 TCGv_ptr r_tickptr;
3822
3823 tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tick_cmpr, cpu_src1,
3824 cpu_src2);
3825 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
3826 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
3827 offsetof(CPUSPARCState, tick));
3828 gen_helper_tick_set_limit(tcg_ctx, r_tickptr,
3829 *(TCGv *)tcg_ctx->cpu_tick_cmpr);
3830 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
3831 }
3832 break;
3833 case 0x18: /* System tick */
3834 #if !defined(CONFIG_USER_ONLY)
3835 if (!supervisor(dc))
3836 goto illegal_insn;
3837 #endif
3838 {
3839 TCGv_ptr r_tickptr;
3840
3841 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1,
3842 cpu_src2);
3843 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
3844 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
3845 offsetof(CPUSPARCState, stick));
3846 gen_helper_tick_set_count(tcg_ctx, r_tickptr,
3847 cpu_tmp0);
3848 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
3849 }
3850 break;
3851 case 0x19: /* System tick compare */
3852 #if !defined(CONFIG_USER_ONLY)
3853 if (!supervisor(dc))
3854 goto illegal_insn;
3855 #endif
3856 {
3857 TCGv_ptr r_tickptr;
3858
3859 tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_stick_cmpr, cpu_src1,
3860 cpu_src2);
3861 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
3862 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
3863 offsetof(CPUSPARCState, stick));
3864 gen_helper_tick_set_limit(tcg_ctx, r_tickptr,
3865 *(TCGv *)tcg_ctx->cpu_stick_cmpr);
3866 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
3867 }
3868 break;
3869
3870 case 0x10: /* Performance Control */
3871 case 0x11: /* Performance Instrumentation
3872 Counter */
3873 case 0x12: /* Dispatch Control */
3874 #endif
3875 default:
3876 goto illegal_insn;
3877 }
3878 }
3879 break;
3880 #if !defined(CONFIG_USER_ONLY)
3881 case 0x31: /* wrpsr, V9 saved, restored */
3882 {
3883 if (!supervisor(dc))
3884 goto priv_insn;
3885 #ifdef TARGET_SPARC64
3886 switch (rd) {
3887 case 0:
3888 gen_helper_saved(tcg_ctx, tcg_ctx->cpu_env);
3889 break;
3890 case 1:
3891 gen_helper_restored(tcg_ctx, tcg_ctx->cpu_env);
3892 break;
3893 case 2: /* UA2005 allclean */
3894 case 3: /* UA2005 otherw */
3895 case 4: /* UA2005 normalw */
3896 case 5: /* UA2005 invalw */
3897 // XXX
3898 default:
3899 goto illegal_insn;
3900 }
3901 #else
3902 cpu_tmp0 = get_temp_tl(dc);
3903 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3904 gen_helper_wrpsr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3905 tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS);
3906 dc->cc_op = CC_OP_FLAGS;
3907 save_state(dc);
3908 gen_op_next_insn(dc);
3909 tcg_gen_exit_tb(tcg_ctx, 0);
3910 dc->is_br = 1;
3911 #endif
3912 }
3913 break;
3914 case 0x32: /* wrwim, V9 wrpr */
3915 {
3916 if (!supervisor(dc))
3917 goto priv_insn;
3918 cpu_tmp0 = get_temp_tl(dc);
3919 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
3920 #ifdef TARGET_SPARC64
3921 switch (rd) {
3922 case 0: // tpc
3923 {
3924 TCGv_ptr r_tsptr;
3925
3926 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3927 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3928 tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3929 offsetof(trap_state, tpc));
3930 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3931 }
3932 break;
3933 case 1: // tnpc
3934 {
3935 TCGv_ptr r_tsptr;
3936
3937 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3938 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3939 tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3940 offsetof(trap_state, tnpc));
3941 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3942 }
3943 break;
3944 case 2: // tstate
3945 {
3946 TCGv_ptr r_tsptr;
3947
3948 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3949 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3950 tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3951 offsetof(trap_state,
3952 tstate));
3953 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3954 }
3955 break;
3956 case 3: // tt
3957 {
3958 TCGv_ptr r_tsptr;
3959
3960 r_tsptr = tcg_temp_new_ptr(tcg_ctx);
3961 gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env);
3962 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, r_tsptr,
3963 offsetof(trap_state, tt));
3964 tcg_temp_free_ptr(tcg_ctx, r_tsptr);
3965 }
3966 break;
3967 case 4: // tick
3968 {
3969 TCGv_ptr r_tickptr;
3970
3971 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
3972 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
3973 offsetof(CPUSPARCState, tick));
3974 gen_helper_tick_set_count(tcg_ctx, r_tickptr,
3975 cpu_tmp0);
3976 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
3977 }
3978 break;
3979 case 5: // tba
3980 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_tmp0);
3981 break;
3982 case 6: // pstate
3983 save_state(dc);
3984 gen_helper_wrpstate(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3985 dc->npc = DYNAMIC_PC;
3986 break;
3987 case 7: // tl
3988 save_state(dc);
3989 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
3990 offsetof(CPUSPARCState, tl));
3991 dc->npc = DYNAMIC_PC;
3992 break;
3993 case 8: // pil
3994 gen_helper_wrpil(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3995 break;
3996 case 9: // cwp
3997 gen_helper_wrcwp(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0);
3998 break;
3999 case 10: // cansave
4000 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4001 offsetof(CPUSPARCState,
4002 cansave));
4003 break;
4004 case 11: // canrestore
4005 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4006 offsetof(CPUSPARCState,
4007 canrestore));
4008 break;
4009 case 12: // cleanwin
4010 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4011 offsetof(CPUSPARCState,
4012 cleanwin));
4013 break;
4014 case 13: // otherwin
4015 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4016 offsetof(CPUSPARCState,
4017 otherwin));
4018 break;
4019 case 14: // wstate
4020 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4021 offsetof(CPUSPARCState,
4022 wstate));
4023 break;
4024 case 16: // UA2005 gl
4025 CHECK_IU_FEATURE(dc, GL);
4026 tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env,
4027 offsetof(CPUSPARCState, gl));
4028 break;
4029 case 26: // UA2005 strand status
4030 CHECK_IU_FEATURE(dc, HYPV);
4031 if (!hypervisor(dc))
4032 goto priv_insn;
4033 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_ssr, cpu_tmp0);
4034 break;
4035 default:
4036 goto illegal_insn;
4037 }
4038 #else
4039 tcg_gen_trunc_tl_i32(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, cpu_tmp0);
4040 if (dc->def->nwindows != 32) {
4041 tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, *(TCGv *)tcg_ctx->cpu_wim,
4042 (1 << dc->def->nwindows) - 1);
4043 }
4044 #endif
4045 }
4046 break;
4047 case 0x33: /* wrtbr, UA2005 wrhpr */
4048 {
4049 #ifndef TARGET_SPARC64
4050 if (!supervisor(dc))
4051 goto priv_insn;
4052 tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_src1, cpu_src2);
4053 #else
4054 CHECK_IU_FEATURE(dc, HYPV);
4055 if (!hypervisor(dc))
4056 goto priv_insn;
4057 cpu_tmp0 = get_temp_tl(dc);
4058 tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
4059 switch (rd) {
4060 case 0: // hpstate
4061 // XXX gen_op_wrhpstate();
4062 save_state(dc);
4063 gen_op_next_insn(dc);
4064 tcg_gen_exit_tb(tcg_ctx, 0);
4065 dc->is_br = 1;
4066 break;
4067 case 1: // htstate
4068 // XXX gen_op_wrhtstate();
4069 break;
4070 case 3: // hintp
4071 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hintp, cpu_tmp0);
4072 break;
4073 case 5: // htba
4074 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_htba, cpu_tmp0);
4075 break;
4076 case 31: // hstick_cmpr
4077 {
4078 TCGv_ptr r_tickptr;
4079
4080 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hstick_cmpr, cpu_tmp0);
4081 r_tickptr = tcg_temp_new_ptr(tcg_ctx);
4082 tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env,
4083 offsetof(CPUSPARCState, hstick));
4084 gen_helper_tick_set_limit(tcg_ctx, r_tickptr,
4085 *(TCGv *)tcg_ctx->cpu_hstick_cmpr);
4086 tcg_temp_free_ptr(tcg_ctx, r_tickptr);
4087 }
4088 break;
4089 case 6: // hver readonly
4090 default:
4091 goto illegal_insn;
4092 }
4093 #endif
4094 }
4095 break;
4096 #endif
4097 #ifdef TARGET_SPARC64
4098 case 0x2c: /* V9 movcc */
4099 {
4100 int cc = GET_FIELD_SP(insn, 11, 12);
4101 int cond = GET_FIELD_SP(insn, 14, 17);
4102 DisasCompare cmp;
4103 TCGv dst;
4104
4105 if (insn & (1 << 18)) {
4106 if (cc == 0) {
4107 gen_compare(dc, &cmp, 0, cond);
4108 } else if (cc == 2) {
4109 gen_compare(dc, &cmp, 1, cond);
4110 } else {
4111 goto illegal_insn;
4112 }
4113 } else {
4114 gen_fcompare(dc, &cmp, cc, cond);
4115 }
4116
4117 /* The get_src2 above loaded the normal 13-bit
4118 immediate field, not the 11-bit field we have
4119 in movcc. But it did handle the reg case. */
4120 if (IS_IMM) {
4121 simm = GET_FIELD_SPs(insn, 0, 10);
4122 tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm);
4123 }
4124
4125 dst = gen_load_gpr(dc, rd);
4126 tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst,
4127 cmp.c1, cmp.c2,
4128 cpu_src2, dst);
4129 free_compare(tcg_ctx, &cmp);
4130 gen_store_gpr(dc, rd, dst);
4131 break;
4132 }
4133 case 0x2d: /* V9 sdivx */
4134 gen_helper_sdivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2);
4135 gen_store_gpr(dc, rd, cpu_dst);
4136 break;
4137 case 0x2e: /* V9 popc */
4138 gen_helper_popc(tcg_ctx, cpu_dst, cpu_src2);
4139 gen_store_gpr(dc, rd, cpu_dst);
4140 break;
4141 case 0x2f: /* V9 movr */
4142 {
4143 int cond = GET_FIELD_SP(insn, 10, 12);
4144 DisasCompare cmp;
4145 TCGv dst;
4146
4147 gen_compare_reg(dc, &cmp, cond, cpu_src1);
4148
4149 /* The get_src2 above loaded the normal 13-bit
4150 immediate field, not the 10-bit field we have
4151 in movr. But it did handle the reg case. */
4152 if (IS_IMM) {
4153 simm = GET_FIELD_SPs(insn, 0, 9);
4154 tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm);
4155 }
4156
4157 dst = gen_load_gpr(dc, rd);
4158 tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst,
4159 cmp.c1, cmp.c2,
4160 cpu_src2, dst);
4161 free_compare(tcg_ctx, &cmp);
4162 gen_store_gpr(dc, rd, dst);
4163 break;
4164 }
4165 #endif
4166 default:
4167 goto illegal_insn;
4168 }
4169 }
4170 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4171 #ifdef TARGET_SPARC64
4172 int opf = GET_FIELD_SP(insn, 5, 13);
4173 rs1 = GET_FIELD(insn, 13, 17);
4174 rs2 = GET_FIELD(insn, 27, 31);
4175 if (gen_trap_ifnofpu(dc)) {
4176 goto jmp_insn;
4177 }
4178
4179 switch (opf) {
4180 case 0x000: /* VIS I edge8cc */
4181 CHECK_FPU_FEATURE(dc, VIS1);
4182 cpu_src1 = gen_load_gpr(dc, rs1);
4183 cpu_src2 = gen_load_gpr(dc, rs2);
4184 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4185 gen_store_gpr(dc, rd, cpu_dst);
4186 break;
4187 case 0x001: /* VIS II edge8n */
4188 CHECK_FPU_FEATURE(dc, VIS2);
4189 cpu_src1 = gen_load_gpr(dc, rs1);
4190 cpu_src2 = gen_load_gpr(dc, rs2);
4191 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4192 gen_store_gpr(dc, rd, cpu_dst);
4193 break;
4194 case 0x002: /* VIS I edge8lcc */
4195 CHECK_FPU_FEATURE(dc, VIS1);
4196 cpu_src1 = gen_load_gpr(dc, rs1);
4197 cpu_src2 = gen_load_gpr(dc, rs2);
4198 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4199 gen_store_gpr(dc, rd, cpu_dst);
4200 break;
4201 case 0x003: /* VIS II edge8ln */
4202 CHECK_FPU_FEATURE(dc, VIS2);
4203 cpu_src1 = gen_load_gpr(dc, rs1);
4204 cpu_src2 = gen_load_gpr(dc, rs2);
4205 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4206 gen_store_gpr(dc, rd, cpu_dst);
4207 break;
4208 case 0x004: /* VIS I edge16cc */
4209 CHECK_FPU_FEATURE(dc, VIS1);
4210 cpu_src1 = gen_load_gpr(dc, rs1);
4211 cpu_src2 = gen_load_gpr(dc, rs2);
4212 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4213 gen_store_gpr(dc, rd, cpu_dst);
4214 break;
4215 case 0x005: /* VIS II edge16n */
4216 CHECK_FPU_FEATURE(dc, VIS2);
4217 cpu_src1 = gen_load_gpr(dc, rs1);
4218 cpu_src2 = gen_load_gpr(dc, rs2);
4219 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4220 gen_store_gpr(dc, rd, cpu_dst);
4221 break;
4222 case 0x006: /* VIS I edge16lcc */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 cpu_src1 = gen_load_gpr(dc, rs1);
4225 cpu_src2 = gen_load_gpr(dc, rs2);
4226 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4227 gen_store_gpr(dc, rd, cpu_dst);
4228 break;
4229 case 0x007: /* VIS II edge16ln */
4230 CHECK_FPU_FEATURE(dc, VIS2);
4231 cpu_src1 = gen_load_gpr(dc, rs1);
4232 cpu_src2 = gen_load_gpr(dc, rs2);
4233 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4234 gen_store_gpr(dc, rd, cpu_dst);
4235 break;
4236 case 0x008: /* VIS I edge32cc */
4237 CHECK_FPU_FEATURE(dc, VIS1);
4238 cpu_src1 = gen_load_gpr(dc, rs1);
4239 cpu_src2 = gen_load_gpr(dc, rs2);
4240 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4241 gen_store_gpr(dc, rd, cpu_dst);
4242 break;
4243 case 0x009: /* VIS II edge32n */
4244 CHECK_FPU_FEATURE(dc, VIS2);
4245 cpu_src1 = gen_load_gpr(dc, rs1);
4246 cpu_src2 = gen_load_gpr(dc, rs2);
4247 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4248 gen_store_gpr(dc, rd, cpu_dst);
4249 break;
4250 case 0x00a: /* VIS I edge32lcc */
4251 CHECK_FPU_FEATURE(dc, VIS1);
4252 cpu_src1 = gen_load_gpr(dc, rs1);
4253 cpu_src2 = gen_load_gpr(dc, rs2);
4254 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4255 gen_store_gpr(dc, rd, cpu_dst);
4256 break;
4257 case 0x00b: /* VIS II edge32ln */
4258 CHECK_FPU_FEATURE(dc, VIS2);
4259 cpu_src1 = gen_load_gpr(dc, rs1);
4260 cpu_src2 = gen_load_gpr(dc, rs2);
4261 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4262 gen_store_gpr(dc, rd, cpu_dst);
4263 break;
4264 case 0x010: /* VIS I array8 */
4265 CHECK_FPU_FEATURE(dc, VIS1);
4266 cpu_src1 = gen_load_gpr(dc, rs1);
4267 cpu_src2 = gen_load_gpr(dc, rs2);
4268 gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
4269 gen_store_gpr(dc, rd, cpu_dst);
4270 break;
4271 case 0x012: /* VIS I array16 */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 cpu_src1 = gen_load_gpr(dc, rs1);
4274 cpu_src2 = gen_load_gpr(dc, rs2);
4275 gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
4276 tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 1);
4277 gen_store_gpr(dc, rd, cpu_dst);
4278 break;
4279 case 0x014: /* VIS I array32 */
4280 CHECK_FPU_FEATURE(dc, VIS1);
4281 cpu_src1 = gen_load_gpr(dc, rs1);
4282 cpu_src2 = gen_load_gpr(dc, rs2);
4283 gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
4284 tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 2);
4285 gen_store_gpr(dc, rd, cpu_dst);
4286 break;
4287 case 0x018: /* VIS I alignaddr */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 cpu_src1 = gen_load_gpr(dc, rs1);
4290 cpu_src2 = gen_load_gpr(dc, rs2);
4291 gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 0);
4292 gen_store_gpr(dc, rd, cpu_dst);
4293 break;
4294 case 0x01a: /* VIS I alignaddrl */
4295 CHECK_FPU_FEATURE(dc, VIS1);
4296 cpu_src1 = gen_load_gpr(dc, rs1);
4297 cpu_src2 = gen_load_gpr(dc, rs2);
4298 gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 1);
4299 gen_store_gpr(dc, rd, cpu_dst);
4300 break;
4301 case 0x019: /* VIS II bmask */
4302 CHECK_FPU_FEATURE(dc, VIS2);
4303 cpu_src1 = gen_load_gpr(dc, rs1);
4304 cpu_src2 = gen_load_gpr(dc, rs2);
4305 tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2);
4306 tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, cpu_dst, 32, 32);
4307 gen_store_gpr(dc, rd, cpu_dst);
4308 break;
4309 case 0x020: /* VIS I fcmple16 */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4312 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4313 gen_helper_fcmple16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4314 gen_store_gpr(dc, rd, cpu_dst);
4315 break;
4316 case 0x022: /* VIS I fcmpne16 */
4317 CHECK_FPU_FEATURE(dc, VIS1);
4318 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4319 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4320 gen_helper_fcmpne16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4321 gen_store_gpr(dc, rd, cpu_dst);
4322 break;
4323 case 0x024: /* VIS I fcmple32 */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4326 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4327 gen_helper_fcmple32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4328 gen_store_gpr(dc, rd, cpu_dst);
4329 break;
4330 case 0x026: /* VIS I fcmpne32 */
4331 CHECK_FPU_FEATURE(dc, VIS1);
4332 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4333 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4334 gen_helper_fcmpne32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4335 gen_store_gpr(dc, rd, cpu_dst);
4336 break;
4337 case 0x028: /* VIS I fcmpgt16 */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4340 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4341 gen_helper_fcmpgt16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4342 gen_store_gpr(dc, rd, cpu_dst);
4343 break;
4344 case 0x02a: /* VIS I fcmpeq16 */
4345 CHECK_FPU_FEATURE(dc, VIS1);
4346 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4347 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4348 gen_helper_fcmpeq16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4349 gen_store_gpr(dc, rd, cpu_dst);
4350 break;
4351 case 0x02c: /* VIS I fcmpgt32 */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4354 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4355 gen_helper_fcmpgt32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4356 gen_store_gpr(dc, rd, cpu_dst);
4357 break;
4358 case 0x02e: /* VIS I fcmpeq32 */
4359 CHECK_FPU_FEATURE(dc, VIS1);
4360 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4361 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4362 gen_helper_fcmpeq32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64);
4363 gen_store_gpr(dc, rd, cpu_dst);
4364 break;
4365 case 0x031: /* VIS I fmul8x16 */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4368 break;
4369 case 0x033: /* VIS I fmul8x16au */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4372 break;
4373 case 0x035: /* VIS I fmul8x16al */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4376 break;
4377 case 0x036: /* VIS I fmul8sux16 */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4380 break;
4381 case 0x037: /* VIS I fmul8ulx16 */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4384 break;
4385 case 0x038: /* VIS I fmuld8sux16 */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4388 break;
4389 case 0x039: /* VIS I fmuld8ulx16 */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4392 break;
4393 case 0x03a: /* VIS I fpack32 */
4394 CHECK_FPU_FEATURE(dc, VIS1);
4395 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4396 break;
4397 case 0x03b: /* VIS I fpack16 */
4398 CHECK_FPU_FEATURE(dc, VIS1);
4399 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4400 cpu_dst_32 = gen_dest_fpr_F(dc);
4401 gen_helper_fpack16(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64);
4402 gen_store_fpr_F(dc, rd, cpu_dst_32);
4403 break;
4404 case 0x03d: /* VIS I fpackfix */
4405 CHECK_FPU_FEATURE(dc, VIS1);
4406 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4407 cpu_dst_32 = gen_dest_fpr_F(dc);
4408 gen_helper_fpackfix(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64);
4409 gen_store_fpr_F(dc, rd, cpu_dst_32);
4410 break;
4411 case 0x03e: /* VIS I pdist */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4414 break;
4415 case 0x048: /* VIS I faligndata */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4418 break;
4419 case 0x04b: /* VIS I fpmerge */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4422 break;
4423 case 0x04c: /* VIS II bshuffle */
4424 CHECK_FPU_FEATURE(dc, VIS2);
4425 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4426 break;
4427 case 0x04d: /* VIS I fexpand */
4428 CHECK_FPU_FEATURE(dc, VIS1);
4429 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4430 break;
4431 case 0x050: /* VIS I fpadd16 */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4434 break;
4435 case 0x051: /* VIS I fpadd16s */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4438 break;
4439 case 0x052: /* VIS I fpadd32 */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4442 break;
4443 case 0x053: /* VIS I fpadd32s */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4446 break;
4447 case 0x054: /* VIS I fpsub16 */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4450 break;
4451 case 0x055: /* VIS I fpsub16s */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4454 break;
4455 case 0x056: /* VIS I fpsub32 */
4456 CHECK_FPU_FEATURE(dc, VIS1);
4457 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4458 break;
4459 case 0x057: /* VIS I fpsub32s */
4460 CHECK_FPU_FEATURE(dc, VIS1);
4461 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4462 break;
4463 case 0x060: /* VIS I fzero */
4464 CHECK_FPU_FEATURE(dc, VIS1);
4465 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4466 tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, 0);
4467 gen_store_fpr_D(dc, rd, cpu_dst_64);
4468 break;
4469 case 0x061: /* VIS I fzeros */
4470 CHECK_FPU_FEATURE(dc, VIS1);
4471 cpu_dst_32 = gen_dest_fpr_F(dc);
4472 tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, 0);
4473 gen_store_fpr_F(dc, rd, cpu_dst_32);
4474 break;
4475 case 0x062: /* VIS I fnor */
4476 CHECK_FPU_FEATURE(dc, VIS1);
4477 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4478 break;
4479 case 0x063: /* VIS I fnors */
4480 CHECK_FPU_FEATURE(dc, VIS1);
4481 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4482 break;
4483 case 0x064: /* VIS I fandnot2 */
4484 CHECK_FPU_FEATURE(dc, VIS1);
4485 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4486 break;
4487 case 0x065: /* VIS I fandnot2s */
4488 CHECK_FPU_FEATURE(dc, VIS1);
4489 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4490 break;
4491 case 0x066: /* VIS I fnot2 */
4492 CHECK_FPU_FEATURE(dc, VIS1);
4493 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4494 break;
4495 case 0x067: /* VIS I fnot2s */
4496 CHECK_FPU_FEATURE(dc, VIS1);
4497 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4498 break;
4499 case 0x068: /* VIS I fandnot1 */
4500 CHECK_FPU_FEATURE(dc, VIS1);
4501 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4502 break;
4503 case 0x069: /* VIS I fandnot1s */
4504 CHECK_FPU_FEATURE(dc, VIS1);
4505 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4506 break;
4507 case 0x06a: /* VIS I fnot1 */
4508 CHECK_FPU_FEATURE(dc, VIS1);
4509 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4510 break;
4511 case 0x06b: /* VIS I fnot1s */
4512 CHECK_FPU_FEATURE(dc, VIS1);
4513 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4514 break;
4515 case 0x06c: /* VIS I fxor */
4516 CHECK_FPU_FEATURE(dc, VIS1);
4517 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4518 break;
4519 case 0x06d: /* VIS I fxors */
4520 CHECK_FPU_FEATURE(dc, VIS1);
4521 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4522 break;
4523 case 0x06e: /* VIS I fnand */
4524 CHECK_FPU_FEATURE(dc, VIS1);
4525 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4526 break;
4527 case 0x06f: /* VIS I fnands */
4528 CHECK_FPU_FEATURE(dc, VIS1);
4529 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4530 break;
4531 case 0x070: /* VIS I fand */
4532 CHECK_FPU_FEATURE(dc, VIS1);
4533 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4534 break;
4535 case 0x071: /* VIS I fands */
4536 CHECK_FPU_FEATURE(dc, VIS1);
4537 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4538 break;
4539 case 0x072: /* VIS I fxnor */
4540 CHECK_FPU_FEATURE(dc, VIS1);
4541 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4542 break;
4543 case 0x073: /* VIS I fxnors */
4544 CHECK_FPU_FEATURE(dc, VIS1);
4545 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4546 break;
4547 case 0x074: /* VIS I fsrc1 */
4548 CHECK_FPU_FEATURE(dc, VIS1);
4549 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4550 gen_store_fpr_D(dc, rd, cpu_src1_64);
4551 break;
4552 case 0x075: /* VIS I fsrc1s */
4553 CHECK_FPU_FEATURE(dc, VIS1);
4554 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4555 gen_store_fpr_F(dc, rd, cpu_src1_32);
4556 break;
4557 case 0x076: /* VIS I fornot2 */
4558 CHECK_FPU_FEATURE(dc, VIS1);
4559 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4560 break;
4561 case 0x077: /* VIS I fornot2s */
4562 CHECK_FPU_FEATURE(dc, VIS1);
4563 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4564 break;
4565 case 0x078: /* VIS I fsrc2 */
4566 CHECK_FPU_FEATURE(dc, VIS1);
4567 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4568 gen_store_fpr_D(dc, rd, cpu_src1_64);
4569 break;
4570 case 0x079: /* VIS I fsrc2s */
4571 CHECK_FPU_FEATURE(dc, VIS1);
4572 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4573 gen_store_fpr_F(dc, rd, cpu_src1_32);
4574 break;
4575 case 0x07a: /* VIS I fornot1 */
4576 CHECK_FPU_FEATURE(dc, VIS1);
4577 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4578 break;
4579 case 0x07b: /* VIS I fornot1s */
4580 CHECK_FPU_FEATURE(dc, VIS1);
4581 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4582 break;
4583 case 0x07c: /* VIS I for */
4584 CHECK_FPU_FEATURE(dc, VIS1);
4585 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4586 break;
4587 case 0x07d: /* VIS I fors */
4588 CHECK_FPU_FEATURE(dc, VIS1);
4589 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4590 break;
4591 case 0x07e: /* VIS I fone */
4592 CHECK_FPU_FEATURE(dc, VIS1);
4593 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4594 tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, -1);
4595 gen_store_fpr_D(dc, rd, cpu_dst_64);
4596 break;
4597 case 0x07f: /* VIS I fones */
4598 CHECK_FPU_FEATURE(dc, VIS1);
4599 cpu_dst_32 = gen_dest_fpr_F(dc);
4600 tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, -1);
4601 gen_store_fpr_F(dc, rd, cpu_dst_32);
4602 break;
4603 case 0x080: /* VIS I shutdown */
4604 case 0x081: /* VIS II siam */
4605 // XXX
4606 goto illegal_insn;
4607 default:
4608 goto illegal_insn;
4609 }
4610 #else
4611 goto ncp_insn;
4612 #endif
4613 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4614 #ifdef TARGET_SPARC64
4615 goto illegal_insn;
4616 #else
4617 goto ncp_insn;
4618 #endif
4619 #ifdef TARGET_SPARC64
4620 } else if (xop == 0x39) { /* V9 return */
4621 TCGv_i32 r_const;
4622
4623 save_state(dc);
4624 cpu_src1 = get_src1(dc, insn);
4625 cpu_tmp0 = get_temp_tl(dc);
4626 if (IS_IMM) { /* immediate */
4627 simm = GET_FIELDs(insn, 19, 31);
4628 tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm);
4629 } else { /* register */
4630 rs2 = GET_FIELD(insn, 27, 31);
4631 if (rs2) {
4632 cpu_src2 = gen_load_gpr(dc, rs2);
4633 tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
4634 } else {
4635 tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1);
4636 }
4637 }
4638 gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env);
4639 gen_mov_pc_npc(dc);
4640 r_const = tcg_const_i32(tcg_ctx, 3);
4641 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const);
4642 tcg_temp_free_i32(tcg_ctx, r_const);
4643 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0);
4644 dc->npc = DYNAMIC_PC;
4645 goto jmp_insn;
4646 #endif
4647 } else {
4648 cpu_src1 = get_src1(dc, insn);
4649 cpu_tmp0 = get_temp_tl(dc);
4650 if (IS_IMM) { /* immediate */
4651 simm = GET_FIELDs(insn, 19, 31);
4652 tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm);
4653 } else { /* register */
4654 rs2 = GET_FIELD(insn, 27, 31);
4655 if (rs2) {
4656 cpu_src2 = gen_load_gpr(dc, rs2);
4657 tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2);
4658 } else {
4659 tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1);
4660 }
4661 }
4662 switch (xop) {
4663 case 0x38: /* jmpl */
4664 {
4665 TCGv t;
4666 TCGv_i32 r_const;
4667
4668 t = gen_dest_gpr(dc, rd);
4669 tcg_gen_movi_tl(tcg_ctx, t, dc->pc);
4670 gen_store_gpr(dc, rd, t);
4671 gen_mov_pc_npc(dc);
4672 r_const = tcg_const_i32(tcg_ctx, 3);
4673 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const);
4674 tcg_temp_free_i32(tcg_ctx, r_const);
4675 gen_address_mask(dc, cpu_tmp0);
4676 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0);
4677 dc->npc = DYNAMIC_PC;
4678 }
4679 goto jmp_insn;
4680 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4681 case 0x39: /* rett, V9 return */
4682 {
4683 TCGv_i32 r_const;
4684
4685 if (!supervisor(dc))
4686 goto priv_insn;
4687 gen_mov_pc_npc(dc);
4688 r_const = tcg_const_i32(tcg_ctx, 3);
4689 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const);
4690 tcg_temp_free_i32(tcg_ctx, r_const);
4691 tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0);
4692 dc->npc = DYNAMIC_PC;
4693 gen_helper_rett(tcg_ctx, tcg_ctx->cpu_env);
4694 }
4695 goto jmp_insn;
4696 #endif
4697 case 0x3b: /* flush */
4698 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4699 goto unimp_flush;
4700 /* nop */
4701 break;
4702 case 0x3c: /* save */
4703 save_state(dc);
4704 gen_helper_save(tcg_ctx, tcg_ctx->cpu_env);
4705 gen_store_gpr(dc, rd, cpu_tmp0);
4706 break;
4707 case 0x3d: /* restore */
4708 save_state(dc);
4709 gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env);
4710 gen_store_gpr(dc, rd, cpu_tmp0);
4711 break;
4712 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4713 case 0x3e: /* V9 done/retry */
4714 {
4715 switch (rd) {
4716 case 0:
4717 if (!supervisor(dc))
4718 goto priv_insn;
4719 dc->npc = DYNAMIC_PC;
4720 dc->pc = DYNAMIC_PC;
4721 gen_helper_done(tcg_ctx, tcg_ctx->cpu_env);
4722 goto jmp_insn;
4723 case 1:
4724 if (!supervisor(dc))
4725 goto priv_insn;
4726 dc->npc = DYNAMIC_PC;
4727 dc->pc = DYNAMIC_PC;
4728 gen_helper_retry(tcg_ctx, tcg_ctx->cpu_env);
4729 goto jmp_insn;
4730 default:
4731 goto illegal_insn;
4732 }
4733 }
4734 break;
4735 #endif
4736 default:
4737 goto illegal_insn;
4738 }
4739 }
4740 break;
4741 }
4742 break;
4743 case 3: /* load/store instructions */
4744 {
4745 unsigned int xop = GET_FIELD(insn, 7, 12);
4746 /* ??? gen_address_mask prevents us from using a source
4747 register directly. Always generate a temporary. */
4748 TCGv cpu_addr = get_temp_tl(dc);
4749
4750 tcg_gen_mov_tl(tcg_ctx, cpu_addr, get_src1(dc, insn));
4751 if (xop == 0x3c || xop == 0x3e) {
4752 /* V9 casa/casxa : no offset */
4753 } else if (IS_IMM) { /* immediate */
4754 simm = GET_FIELDs(insn, 19, 31);
4755 if (simm != 0) {
4756 tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, simm);
4757 }
4758 } else { /* register */
4759 rs2 = GET_FIELD(insn, 27, 31);
4760 if (rs2 != 0) {
4761 tcg_gen_add_tl(tcg_ctx, cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4762 }
4763 }
4764 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4765 (xop > 0x17 && xop <= 0x1d ) ||
4766 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4767 TCGv cpu_val = gen_dest_gpr(dc, rd);
4768
4769 switch (xop) {
4770 case 0x0: /* ld, V9 lduw, load unsigned word */
4771 gen_address_mask(dc, cpu_addr);
4772 tcg_gen_qemu_ld32u(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4773 break;
4774 case 0x1: /* ldub, load unsigned byte */
4775 gen_address_mask(dc, cpu_addr);
4776 tcg_gen_qemu_ld8u(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4777 break;
4778 case 0x2: /* lduh, load unsigned halfword */
4779 gen_address_mask(dc, cpu_addr);
4780 tcg_gen_qemu_ld16u(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4781 break;
4782 case 0x3: /* ldd, load double word */
4783 if (rd & 1)
4784 goto illegal_insn;
4785 else {
4786 TCGv_i32 r_const;
4787 TCGv_i64 t64;
4788
4789 save_state(dc);
4790 r_const = tcg_const_i32(tcg_ctx, 7);
4791 /* XXX remove alignment check */
4792 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const);
4793 tcg_temp_free_i32(tcg_ctx, r_const);
4794 gen_address_mask(dc, cpu_addr);
4795 t64 = tcg_temp_new_i64(tcg_ctx);
4796 tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx);
4797 tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64);
4798 tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val);
4799 gen_store_gpr(dc, rd + 1, cpu_val);
4800 tcg_gen_shri_i64(tcg_ctx, t64, t64, 32);
4801 tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64);
4802 tcg_temp_free_i64(tcg_ctx, t64);
4803 tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val);
4804 }
4805 break;
4806 case 0x9: /* ldsb, load signed byte */
4807 gen_address_mask(dc, cpu_addr);
4808 tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4809 break;
4810 case 0xa: /* ldsh, load signed halfword */
4811 gen_address_mask(dc, cpu_addr);
4812 tcg_gen_qemu_ld16s(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4813 break;
4814 case 0xd: /* ldstub -- XXX: should be atomically */
4815 {
4816 TCGv r_const;
4817
4818 gen_address_mask(dc, cpu_addr);
4819 tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4820 r_const = tcg_const_tl(tcg_ctx, 0xff);
4821 tcg_gen_qemu_st8(dc->uc, r_const, cpu_addr, dc->mem_idx);
4822 tcg_temp_free(tcg_ctx, r_const);
4823 }
4824 break;
4825 case 0x0f:
4826 /* swap, swap register with memory. Also atomically */
4827 {
4828 TCGv t0 = get_temp_tl(dc);
4829 CHECK_IU_FEATURE(dc, SWAP);
4830 cpu_src1 = gen_load_gpr(dc, rd);
4831 gen_address_mask(dc, cpu_addr);
4832 tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx);
4833 tcg_gen_qemu_st32(dc->uc, cpu_src1, cpu_addr, dc->mem_idx);
4834 tcg_gen_mov_tl(tcg_ctx, cpu_val, t0);
4835 }
4836 break;
4837 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4838 case 0x10: /* lda, V9 lduwa, load word alternate */
4839 #ifndef TARGET_SPARC64
4840 if (IS_IMM)
4841 goto illegal_insn;
4842 if (!supervisor(dc))
4843 goto priv_insn;
4844 #endif
4845 save_state(dc);
4846 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 0);
4847 break;
4848 case 0x11: /* lduba, load unsigned byte alternate */
4849 #ifndef TARGET_SPARC64
4850 if (IS_IMM)
4851 goto illegal_insn;
4852 if (!supervisor(dc))
4853 goto priv_insn;
4854 #endif
4855 save_state(dc);
4856 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 0);
4857 break;
4858 case 0x12: /* lduha, load unsigned halfword alternate */
4859 #ifndef TARGET_SPARC64
4860 if (IS_IMM)
4861 goto illegal_insn;
4862 if (!supervisor(dc))
4863 goto priv_insn;
4864 #endif
4865 save_state(dc);
4866 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 0);
4867 break;
4868 case 0x13: /* ldda, load double word alternate */
4869 #ifndef TARGET_SPARC64
4870 if (IS_IMM)
4871 goto illegal_insn;
4872 if (!supervisor(dc))
4873 goto priv_insn;
4874 #endif
4875 if (rd & 1)
4876 goto illegal_insn;
4877 save_state(dc);
4878 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4879 goto skip_move;
4880 case 0x19: /* ldsba, load signed byte alternate */
4881 #ifndef TARGET_SPARC64
4882 if (IS_IMM)
4883 goto illegal_insn;
4884 if (!supervisor(dc))
4885 goto priv_insn;
4886 #endif
4887 save_state(dc);
4888 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 1);
4889 break;
4890 case 0x1a: /* ldsha, load signed halfword alternate */
4891 #ifndef TARGET_SPARC64
4892 if (IS_IMM)
4893 goto illegal_insn;
4894 if (!supervisor(dc))
4895 goto priv_insn;
4896 #endif
4897 save_state(dc);
4898 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 1);
4899 break;
4900 case 0x1d: /* ldstuba -- XXX: should be atomically */
4901 #ifndef TARGET_SPARC64
4902 if (IS_IMM)
4903 goto illegal_insn;
4904 if (!supervisor(dc))
4905 goto priv_insn;
4906 #endif
4907 save_state(dc);
4908 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
4909 break;
4910 case 0x1f: /* swapa, swap reg with alt. memory. Also
4911 atomically */
4912 CHECK_IU_FEATURE(dc, SWAP);
4913 #ifndef TARGET_SPARC64
4914 if (IS_IMM)
4915 goto illegal_insn;
4916 if (!supervisor(dc))
4917 goto priv_insn;
4918 #endif
4919 save_state(dc);
4920 cpu_src1 = gen_load_gpr(dc, rd);
4921 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
4922 break;
4923
4924 #ifndef TARGET_SPARC64
4925 case 0x30: /* ldc */
4926 case 0x31: /* ldcsr */
4927 case 0x33: /* lddc */
4928 goto ncp_insn;
4929 #endif
4930 #endif
4931 #ifdef TARGET_SPARC64
4932 case 0x08: /* V9 ldsw */
4933 gen_address_mask(dc, cpu_addr);
4934 tcg_gen_qemu_ld32s(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4935 break;
4936 case 0x0b: /* V9 ldx */
4937 gen_address_mask(dc, cpu_addr);
4938 tcg_gen_qemu_ld64(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
4939 break;
4940 case 0x18: /* V9 ldswa */
4941 save_state(dc);
4942 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 1);
4943 break;
4944 case 0x1b: /* V9 ldxa */
4945 save_state(dc);
4946 gen_ld_asi(dc, cpu_val, cpu_addr, insn, 8, 0);
4947 break;
4948 case 0x2d: /* V9 prefetch, no effect */
4949 goto skip_move;
4950 case 0x30: /* V9 ldfa */
4951 if (gen_trap_ifnofpu(dc)) {
4952 goto jmp_insn;
4953 }
4954 save_state(dc);
4955 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
4956 gen_update_fprs_dirty(dc, rd);
4957 goto skip_move;
4958 case 0x33: /* V9 lddfa */
4959 if (gen_trap_ifnofpu(dc)) {
4960 goto jmp_insn;
4961 }
4962 save_state(dc);
4963 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
4964 gen_update_fprs_dirty(dc, DFPREG(rd));
4965 goto skip_move;
4966 case 0x3d: /* V9 prefetcha, no effect */
4967 goto skip_move;
4968 case 0x32: /* V9 ldqfa */
4969 CHECK_FPU_FEATURE(dc, FLOAT128);
4970 if (gen_trap_ifnofpu(dc)) {
4971 goto jmp_insn;
4972 }
4973 save_state(dc);
4974 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
4975 gen_update_fprs_dirty(dc, QFPREG(rd));
4976 goto skip_move;
4977 #endif
4978 default:
4979 goto illegal_insn;
4980 }
4981 gen_store_gpr(dc, rd, cpu_val);
4982 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4983 skip_move: ;
4984 #endif
4985 } else if (xop >= 0x20 && xop < 0x24) {
4986 TCGv t0;
4987
4988 if (gen_trap_ifnofpu(dc)) {
4989 goto jmp_insn;
4990 }
4991 save_state(dc);
4992 switch (xop) {
4993 case 0x20: /* ldf, load fpreg */
4994 gen_address_mask(dc, cpu_addr);
4995 t0 = get_temp_tl(dc);
4996 tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx);
4997 cpu_dst_32 = gen_dest_fpr_F(dc);
4998 tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0);
4999 gen_store_fpr_F(dc, rd, cpu_dst_32);
5000 break;
5001 case 0x21: /* ldfsr, V9 ldxfsr */
5002 #ifdef TARGET_SPARC64
5003 gen_address_mask(dc, cpu_addr);
5004 if (rd == 1) {
5005 TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx);
5006 tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx);
5007 gen_helper_ldxfsr(tcg_ctx, tcg_ctx->cpu_env, t64);
5008 tcg_temp_free_i64(tcg_ctx, t64);
5009 break;
5010 }
5011 #endif
5012 cpu_dst_32 = get_temp_i32(dc);
5013 t0 = get_temp_tl(dc);
5014 tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx);
5015 tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0);
5016 gen_helper_ldfsr(tcg_ctx, tcg_ctx->cpu_env, cpu_dst_32);
5017 break;
5018 case 0x22: /* ldqf, load quad fpreg */
5019 {
5020 TCGv_i32 r_const;
5021
5022 CHECK_FPU_FEATURE(dc, FLOAT128);
5023 r_const = tcg_const_i32(tcg_ctx, dc->mem_idx);
5024 gen_address_mask(dc, cpu_addr);
5025 gen_helper_ldqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const);
5026 tcg_temp_free_i32(tcg_ctx, r_const);
5027 gen_op_store_QT0_fpr(dc, QFPREG(rd));
5028 gen_update_fprs_dirty(dc, QFPREG(rd));
5029 }
5030 break;
5031 case 0x23: /* lddf, load double fpreg */
5032 gen_address_mask(dc, cpu_addr);
5033 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5034 tcg_gen_qemu_ld64(dc->uc, cpu_dst_64, cpu_addr, dc->mem_idx);
5035 gen_store_fpr_D(dc, rd, cpu_dst_64);
5036 break;
5037 default:
5038 goto illegal_insn;
5039 }
5040 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5041 xop == 0xe || xop == 0x1e) {
5042 TCGv cpu_val = gen_load_gpr(dc, rd);
5043
5044 switch (xop) {
5045 case 0x4: /* st, store word */
5046 gen_address_mask(dc, cpu_addr);
5047 tcg_gen_qemu_st32(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
5048 break;
5049 case 0x5: /* stb, store byte */
5050 gen_address_mask(dc, cpu_addr);
5051 tcg_gen_qemu_st8(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
5052 break;
5053 case 0x6: /* sth, store halfword */
5054 gen_address_mask(dc, cpu_addr);
5055 tcg_gen_qemu_st16(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
5056 break;
5057 case 0x7: /* std, store double word */
5058 if (rd & 1)
5059 goto illegal_insn;
5060 else {
5061 TCGv_i32 r_const;
5062 TCGv_i64 t64;
5063 TCGv lo;
5064
5065 save_state(dc);
5066 gen_address_mask(dc, cpu_addr);
5067 r_const = tcg_const_i32(tcg_ctx, 7);
5068 /* XXX remove alignment check */
5069 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const);
5070 tcg_temp_free_i32(tcg_ctx, r_const);
5071 lo = gen_load_gpr(dc, rd + 1);
5072
5073 t64 = tcg_temp_new_i64(tcg_ctx);
5074 tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, cpu_val);
5075 tcg_gen_qemu_st64(dc->uc, t64, cpu_addr, dc->mem_idx);
5076 tcg_temp_free_i64(tcg_ctx, t64);
5077 }
5078 break;
5079 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5080 case 0x14: /* sta, V9 stwa, store word alternate */
5081 #ifndef TARGET_SPARC64
5082 if (IS_IMM)
5083 goto illegal_insn;
5084 if (!supervisor(dc))
5085 goto priv_insn;
5086 #endif
5087 save_state(dc);
5088 gen_st_asi(dc, cpu_val, cpu_addr, insn, 4);
5089 dc->npc = DYNAMIC_PC;
5090 break;
5091 case 0x15: /* stba, store byte alternate */
5092 #ifndef TARGET_SPARC64
5093 if (IS_IMM)
5094 goto illegal_insn;
5095 if (!supervisor(dc))
5096 goto priv_insn;
5097 #endif
5098 save_state(dc);
5099 gen_st_asi(dc, cpu_val, cpu_addr, insn, 1);
5100 dc->npc = DYNAMIC_PC;
5101 break;
5102 case 0x16: /* stha, store halfword alternate */
5103 #ifndef TARGET_SPARC64
5104 if (IS_IMM)
5105 goto illegal_insn;
5106 if (!supervisor(dc))
5107 goto priv_insn;
5108 #endif
5109 save_state(dc);
5110 gen_st_asi(dc, cpu_val, cpu_addr, insn, 2);
5111 dc->npc = DYNAMIC_PC;
5112 break;
5113 case 0x17: /* stda, store double word alternate */
5114 #ifndef TARGET_SPARC64
5115 if (IS_IMM)
5116 goto illegal_insn;
5117 if (!supervisor(dc))
5118 goto priv_insn;
5119 #endif
5120 if (rd & 1)
5121 goto illegal_insn;
5122 else {
5123 save_state(dc);
5124 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5125 }
5126 break;
5127 #endif
5128 #ifdef TARGET_SPARC64
5129 case 0x0e: /* V9 stx */
5130 gen_address_mask(dc, cpu_addr);
5131 tcg_gen_qemu_st64(dc->uc, cpu_val, cpu_addr, dc->mem_idx);
5132 break;
5133 case 0x1e: /* V9 stxa */
5134 save_state(dc);
5135 gen_st_asi(dc, cpu_val, cpu_addr, insn, 8);
5136 dc->npc = DYNAMIC_PC;
5137 break;
5138 #endif
5139 default:
5140 goto illegal_insn;
5141 }
5142 } else if (xop > 0x23 && xop < 0x28) {
5143 if (gen_trap_ifnofpu(dc)) {
5144 goto jmp_insn;
5145 }
5146 save_state(dc);
5147 switch (xop) {
5148 case 0x24: /* stf, store fpreg */
5149 {
5150 TCGv t = get_temp_tl(dc);
5151 gen_address_mask(dc, cpu_addr);
5152 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5153 tcg_gen_ext_i32_tl(tcg_ctx, t, cpu_src1_32);
5154 tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx);
5155 }
5156 break;
5157 case 0x25: /* stfsr, V9 stxfsr */
5158 {
5159 TCGv t = get_temp_tl(dc);
5160
5161 tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUSPARCState, fsr));
5162 #ifdef TARGET_SPARC64
5163 gen_address_mask(dc, cpu_addr);
5164 if (rd == 1) {
5165 tcg_gen_qemu_st64(dc->uc, t, cpu_addr, dc->mem_idx);
5166 break;
5167 }
5168 #endif
5169 tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx);
5170 }
5171 break;
5172 case 0x26:
5173 #ifdef TARGET_SPARC64
5174 /* V9 stqf, store quad fpreg */
5175 {
5176 TCGv_i32 r_const;
5177
5178 CHECK_FPU_FEATURE(dc, FLOAT128);
5179 gen_op_load_fpr_QT0(dc, QFPREG(rd));
5180 r_const = tcg_const_i32(tcg_ctx, dc->mem_idx);
5181 gen_address_mask(dc, cpu_addr);
5182 gen_helper_stqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const);
5183 tcg_temp_free_i32(tcg_ctx, r_const);
5184 }
5185 break;
5186 #else /* !TARGET_SPARC64 */
5187 /* stdfq, store floating point queue */
5188 #if defined(CONFIG_USER_ONLY)
5189 goto illegal_insn;
5190 #else
5191 if (!supervisor(dc))
5192 goto priv_insn;
5193 if (gen_trap_ifnofpu(dc)) {
5194 goto jmp_insn;
5195 }
5196 goto nfq_insn;
5197 #endif
5198 #endif
5199 case 0x27: /* stdf, store double fpreg */
5200 gen_address_mask(dc, cpu_addr);
5201 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5202 tcg_gen_qemu_st64(dc->uc, cpu_src1_64, cpu_addr, dc->mem_idx);
5203 break;
5204 default:
5205 goto illegal_insn;
5206 }
5207 } else if (xop > 0x33 && xop < 0x3f) {
5208 save_state(dc);
5209 switch (xop) {
5210 #ifdef TARGET_SPARC64
5211 case 0x34: /* V9 stfa */
5212 if (gen_trap_ifnofpu(dc)) {
5213 goto jmp_insn;
5214 }
5215 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5216 break;
5217 case 0x36: /* V9 stqfa */
5218 {
5219 TCGv_i32 r_const;
5220
5221 CHECK_FPU_FEATURE(dc, FLOAT128);
5222 if (gen_trap_ifnofpu(dc)) {
5223 goto jmp_insn;
5224 }
5225 r_const = tcg_const_i32(tcg_ctx, 7);
5226 gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const);
5227 tcg_temp_free_i32(tcg_ctx, r_const);
5228 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5229 }
5230 break;
5231 case 0x37: /* V9 stdfa */
5232 if (gen_trap_ifnofpu(dc)) {
5233 goto jmp_insn;
5234 }
5235 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5236 break;
5237 case 0x3e: /* V9 casxa */
5238 rs2 = GET_FIELD(insn, 27, 31);
5239 cpu_src2 = gen_load_gpr(dc, rs2);
5240 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5241 break;
5242 #else
5243 case 0x34: /* stc */
5244 case 0x35: /* stcsr */
5245 case 0x36: /* stdcq */
5246 case 0x37: /* stdc */
5247 goto ncp_insn;
5248 #endif
5249 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5250 case 0x3c: /* V9 or LEON3 casa */
5251 #ifndef TARGET_SPARC64
5252 CHECK_IU_FEATURE(dc, CASA);
5253 if (IS_IMM) {
5254 goto illegal_insn;
5255 }
5256 if (!supervisor(dc)) {
5257 goto priv_insn;
5258 }
5259 #endif
5260 rs2 = GET_FIELD(insn, 27, 31);
5261 cpu_src2 = gen_load_gpr(dc, rs2);
5262 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5263 break;
5264 #endif
5265 default:
5266 goto illegal_insn;
5267 }
5268 } else {
5269 goto illegal_insn;
5270 }
5271 }
5272 break;
5273 }
5274 /* default case for non jump instructions */
5275 if (dc->npc == DYNAMIC_PC) {
5276 dc->pc = DYNAMIC_PC;
5277 gen_op_next_insn(dc);
5278 } else if (dc->npc == JUMP_PC) {
5279 /* we can do a static jump */
5280 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], *(TCGv *)tcg_ctx->cpu_cond);
5281 dc->is_br = 1;
5282 } else {
5283 dc->pc = dc->npc;
5284 dc->npc = dc->npc + 4;
5285 }
5286 jmp_insn:
5287 goto egress;
5288 illegal_insn:
5289 {
5290 TCGv_i32 r_const;
5291
5292 save_state(dc);
5293 r_const = tcg_const_i32(tcg_ctx, TT_ILL_INSN);
5294 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
5295 tcg_temp_free_i32(tcg_ctx, r_const);
5296 dc->is_br = 1;
5297 }
5298 goto egress;
5299 unimp_flush:
5300 {
5301 TCGv_i32 r_const;
5302
5303 save_state(dc);
5304 r_const = tcg_const_i32(tcg_ctx, TT_UNIMP_FLUSH);
5305 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
5306 tcg_temp_free_i32(tcg_ctx, r_const);
5307 dc->is_br = 1;
5308 }
5309 goto egress;
5310 #if !defined(CONFIG_USER_ONLY)
5311 priv_insn:
5312 {
5313 TCGv_i32 r_const;
5314
5315 save_state(dc);
5316 r_const = tcg_const_i32(tcg_ctx, TT_PRIV_INSN);
5317 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
5318 tcg_temp_free_i32(tcg_ctx, r_const);
5319 dc->is_br = 1;
5320 }
5321 goto egress;
5322 #endif
5323 nfpu_insn:
5324 save_state(dc);
5325 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5326 dc->is_br = 1;
5327 goto egress;
5328 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5329 nfq_insn:
5330 save_state(dc);
5331 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5332 dc->is_br = 1;
5333 goto egress;
5334 #endif
5335 #ifndef TARGET_SPARC64
5336 ncp_insn:
5337 {
5338 TCGv r_const;
5339
5340 save_state(dc);
5341 r_const = tcg_const_i32(tcg_ctx, TT_NCP_INSN);
5342 gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const);
5343 tcg_temp_free(tcg_ctx, r_const);
5344 dc->is_br = 1;
5345 }
5346 goto egress;
5347 #endif
5348 egress:
5349 if (dc->n_t32 != 0) {
5350 int i;
5351 for (i = dc->n_t32 - 1; i >= 0; --i) {
5352 tcg_temp_free_i32(tcg_ctx, dc->t32[i]);
5353 }
5354 dc->n_t32 = 0;
5355 }
5356 if (dc->n_ttl != 0) {
5357 int i;
5358 for (i = dc->n_ttl - 1; i >= 0; --i) {
5359 tcg_temp_free(tcg_ctx, dc->ttl[i]);
5360 }
5361 dc->n_ttl = 0;
5362 }
5363 }
5364
gen_intermediate_code_internal(SPARCCPU * cpu,TranslationBlock * tb,bool spc)5365 static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
5366 TranslationBlock *tb,
5367 bool spc)
5368 {
5369 CPUState *cs = CPU(cpu);
5370 CPUSPARCState *env = &cpu->env;
5371 target_ulong pc_start, last_pc;
5372 uint16_t *gen_opc_end;
5373 DisasContext dc1, *dc = &dc1;
5374 CPUBreakpoint *bp;
5375 int j, lj = -1;
5376 int num_insns = 0;
5377 int max_insns;
5378 unsigned int insn;
5379 TCGContext *tcg_ctx = env->uc->tcg_ctx;
5380 bool block_full = false;
5381
5382 memset(dc, 0, sizeof(DisasContext));
5383 dc->uc = env->uc;
5384 dc->tb = tb;
5385 pc_start = tb->pc;
5386 dc->pc = pc_start;
5387 last_pc = dc->pc;
5388 dc->npc = (target_ulong) tb->cs_base;
5389 dc->cc_op = CC_OP_DYNAMIC;
5390 dc->mem_idx = cpu_mmu_index(env);
5391 dc->def = env->def;
5392 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5393 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5394 dc->singlestep = (cs->singlestep_enabled); // || singlestep);
5395 gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE;
5396
5397
5398 // early check to see if the address of this block is the until address
5399 if (pc_start == env->uc->addr_end) {
5400 gen_tb_start(tcg_ctx);
5401 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env);
5402 goto done_generating;
5403 }
5404
5405 max_insns = tb->cflags & CF_COUNT_MASK;
5406 if (max_insns == 0)
5407 max_insns = CF_COUNT_MASK;
5408
5409 // Unicorn: early check to see if the address of this block is the until address
5410 if (tb->pc == env->uc->addr_end) {
5411 gen_tb_start(tcg_ctx);
5412 save_state(dc);
5413 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env);
5414 goto done_generating;
5415 }
5416
5417 // Unicorn: trace this block on request
5418 // Only hook this block if it is not broken from previous translation due to
5419 // full translation cache
5420 if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) {
5421 // save block address to see if we need to patch block size later
5422 env->uc->block_addr = pc_start;
5423 env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1;
5424 gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start);
5425 }
5426
5427 gen_tb_start(tcg_ctx);
5428 do {
5429 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5430 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5431 if (bp->pc == dc->pc) {
5432 if (dc->pc != pc_start)
5433 save_state(dc);
5434 gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env);
5435 tcg_gen_exit_tb(tcg_ctx, 0);
5436 dc->is_br = 1;
5437 goto exit_gen_loop;
5438 }
5439 }
5440 }
5441 if (spc) {
5442 qemu_log("Search PC...\n");
5443 j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf;
5444 if (lj < j) {
5445 lj++;
5446 while (lj < j)
5447 tcg_ctx->gen_opc_instr_start[lj++] = 0;
5448 tcg_ctx->gen_opc_pc[lj] = dc->pc;
5449 tcg_ctx->gen_opc_npc[lj] = dc->npc;
5450 tcg_ctx->gen_opc_instr_start[lj] = 1;
5451 tcg_ctx->gen_opc_icount[lj] = num_insns;
5452 }
5453 }
5454 //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5455 // gen_io_start();
5456 // Unicorn: end address tells us to stop emulation
5457 if (dc->pc == dc->uc->addr_end) {
5458 save_state(dc);
5459 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env);
5460 break;
5461 } else {
5462 last_pc = dc->pc;
5463 insn = cpu_ldl_code(env, dc->pc);
5464 }
5465
5466 disas_sparc_insn(dc, insn, true);
5467 num_insns++;
5468
5469 if (dc->is_br)
5470 break;
5471 /* if the next PC is different, we abort now */
5472 if (dc->pc != (last_pc + 4))
5473 break;
5474
5475 /* if we reach a page boundary, we stop generation so that the
5476 PC of a TT_TFAULT exception is always in the right page */
5477 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5478 break;
5479 /* if single step mode, we generate only one instruction and
5480 generate an exception */
5481 if (dc->singlestep) {
5482 break;
5483 }
5484 } while ((tcg_ctx->gen_opc_ptr < gen_opc_end) &&
5485 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5486 num_insns < max_insns);
5487
5488 /* if too long translation, save this info */
5489 if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns)
5490 block_full = true;
5491
5492 exit_gen_loop:
5493 //if (tb->cflags & CF_LAST_IO) {
5494 // gen_io_end();
5495 //}
5496 if (!dc->is_br) {
5497 if (dc->pc != DYNAMIC_PC &&
5498 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5499 /* static PC and NPC: we can use direct chaining */
5500 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5501 } else {
5502 if (dc->pc != DYNAMIC_PC) {
5503 tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc);
5504 }
5505 save_npc(dc);
5506 tcg_gen_exit_tb(tcg_ctx, 0);
5507 }
5508 }
5509
5510 done_generating:
5511 gen_tb_end(tcg_ctx, tb, num_insns);
5512 *tcg_ctx->gen_opc_ptr = INDEX_op_end;
5513 if (spc) {
5514 j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf;
5515 lj++;
5516 while (lj <= j)
5517 tcg_ctx->gen_opc_instr_start[lj++] = 0;
5518 #if 0
5519 log_page_dump();
5520 #endif
5521 tcg_ctx->gen_opc_jump_pc[0] = dc->jump_pc[0];
5522 tcg_ctx->gen_opc_jump_pc[1] = dc->jump_pc[1];
5523 } else {
5524 tb->size = last_pc + 4 - pc_start;
5525 tb->icount = num_insns;
5526 }
5527
5528 env->uc->block_full = block_full;
5529 }
5530
gen_intermediate_code(CPUSPARCState * env,TranslationBlock * tb)5531 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5532 {
5533 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false);
5534 }
5535
gen_intermediate_code_pc(CPUSPARCState * env,TranslationBlock * tb)5536 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5537 {
5538 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true);
5539 }
5540
gen_intermediate_code_init(CPUSPARCState * env)5541 void gen_intermediate_code_init(CPUSPARCState *env)
5542 {
5543 TCGContext *tcg_ctx = env->uc->tcg_ctx;
5544 struct uc_struct *uc = env->uc;
5545 unsigned int i;
5546 static const char * const gregnames[8] = {
5547 NULL, // g0 not used
5548 "g1",
5549 "g2",
5550 "g3",
5551 "g4",
5552 "g5",
5553 "g6",
5554 "g7",
5555 };
5556 static const char * const fregnames[32] = {
5557 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5558 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5559 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5560 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5561 };
5562
5563 /* init various static tables */
5564 tcg_ctx->cpu_env = tcg_global_reg_new_ptr(tcg_ctx, TCG_AREG0, "env");
5565 tcg_ctx->cpu_regwptr = tcg_global_mem_new_ptr(tcg_ctx, TCG_AREG0,
5566 offsetof(CPUSPARCState, regwptr),
5567 "regwptr");
5568 #ifdef TARGET_SPARC64
5569 tcg_ctx->cpu_xcc = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, xcc),
5570 "xcc");
5571 tcg_ctx->cpu_asi = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, asi),
5572 "asi");
5573 tcg_ctx->cpu_fprs = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fprs),
5574 "fprs");
5575
5576 if (!uc->init_tcg)
5577 tcg_ctx->cpu_gsr = g_malloc0(sizeof(TCGv));
5578 *(TCGv *)tcg_ctx->cpu_gsr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, gsr),
5579 "gsr");
5580
5581 if (!uc->init_tcg)
5582 tcg_ctx->cpu_tick_cmpr = g_malloc0(sizeof(TCGv));
5583 *(TCGv *)tcg_ctx->cpu_tick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5584 offsetof(CPUSPARCState, tick_cmpr),
5585 "tick_cmpr");
5586
5587 if (!uc->init_tcg)
5588 tcg_ctx->cpu_stick_cmpr = g_malloc0(sizeof(TCGv));
5589 *(TCGv *)tcg_ctx->cpu_stick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5590 offsetof(CPUSPARCState, stick_cmpr),
5591 "stick_cmpr");
5592
5593 if (!uc->init_tcg)
5594 tcg_ctx->cpu_hstick_cmpr = g_malloc0(sizeof(TCGv));
5595 *(TCGv *)tcg_ctx->cpu_hstick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5596 offsetof(CPUSPARCState, hstick_cmpr),
5597 "hstick_cmpr");
5598
5599 if (!uc->init_tcg)
5600 tcg_ctx->cpu_hintp = g_malloc0(sizeof(TCGv));
5601 *(TCGv *)tcg_ctx->cpu_hintp = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hintp),
5602 "hintp");
5603
5604 if (!uc->init_tcg)
5605 tcg_ctx->cpu_htba = g_malloc0(sizeof(TCGv));
5606 *(TCGv *)tcg_ctx->cpu_htba = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, htba),
5607 "htba");
5608
5609 if (!uc->init_tcg)
5610 tcg_ctx->cpu_hver = g_malloc0(sizeof(TCGv));
5611 *(TCGv *)tcg_ctx->cpu_hver = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hver),
5612 "hver");
5613
5614 if (!uc->init_tcg)
5615 tcg_ctx->cpu_ssr = g_malloc0(sizeof(TCGv));
5616 *(TCGv *)tcg_ctx->cpu_ssr = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5617 offsetof(CPUSPARCState, ssr), "ssr");
5618
5619 if (!uc->init_tcg)
5620 tcg_ctx->cpu_ver = g_malloc0(sizeof(TCGv));
5621 *(TCGv *)tcg_ctx->cpu_ver = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5622 offsetof(CPUSPARCState, version), "ver");
5623
5624 tcg_ctx->cpu_softint = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0,
5625 offsetof(CPUSPARCState, softint),
5626 "softint");
5627 #else
5628 if (!uc->init_tcg)
5629 tcg_ctx->cpu_wim = g_malloc0(sizeof(TCGv));
5630 *(TCGv *)tcg_ctx->cpu_wim = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, wim),
5631 "wim");
5632 #endif
5633
5634 if (!uc->init_tcg)
5635 tcg_ctx->cpu_cond = g_malloc0(sizeof(TCGv));
5636 *(TCGv *)tcg_ctx->cpu_cond = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cond),
5637 "cond");
5638
5639 if (!uc->init_tcg)
5640 tcg_ctx->cpu_cc_src = g_malloc0(sizeof(TCGv));
5641 *((TCGv *)tcg_ctx->cpu_cc_src) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5642 "cc_src");
5643
5644 if (!uc->init_tcg)
5645 tcg_ctx->cpu_cc_src2 = g_malloc0(sizeof(TCGv));
5646 *((TCGv *)tcg_ctx->cpu_cc_src2) = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5647 offsetof(CPUSPARCState, cc_src2),
5648 "cc_src2");
5649
5650 if (!uc->init_tcg)
5651 tcg_ctx->cpu_cc_dst = g_malloc0(sizeof(TCGv));
5652 *(TCGv *)tcg_ctx->cpu_cc_dst = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5653 "cc_dst");
5654
5655 tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5656 "cc_op");
5657 tcg_ctx->cpu_psr = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, psr),
5658 "psr");
5659
5660 if (!uc->init_tcg)
5661 tcg_ctx->cpu_fsr = g_malloc0(sizeof(TCGv));
5662 *((TCGv *)tcg_ctx->cpu_fsr) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fsr),
5663 "fsr");
5664
5665 if (!uc->init_tcg)
5666 tcg_ctx->sparc_cpu_pc = g_malloc0(sizeof(TCGv));
5667 *(TCGv *)tcg_ctx->sparc_cpu_pc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, pc),
5668 "pc");
5669
5670 if (!uc->init_tcg)
5671 tcg_ctx->cpu_npc = g_malloc0(sizeof(TCGv));
5672 *(TCGv *)tcg_ctx->cpu_npc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, npc),
5673 "npc");
5674
5675 if (!uc->init_tcg)
5676 tcg_ctx->cpu_y = g_malloc0(sizeof(TCGv));
5677 *(TCGv *)tcg_ctx->cpu_y = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5678 #ifndef CONFIG_USER_ONLY
5679 if (!uc->init_tcg)
5680 tcg_ctx->cpu_tbr = g_malloc0(sizeof(TCGv));
5681 *(TCGv *)tcg_ctx->cpu_tbr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, tbr),
5682 "tbr");
5683 #endif
5684 if (!uc->init_tcg) {
5685 for (i = 0; i < 8; i++) {
5686 tcg_ctx->cpu_gregs[i] = g_malloc0(sizeof(TCGv));
5687 *((TCGv *)tcg_ctx->cpu_gregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0,
5688 offsetof(CPUSPARCState, gregs[i]),
5689 gregnames[i]);
5690 }
5691 }
5692
5693 for (i = 0; i < TARGET_DPREGS; i++) {
5694 tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0,
5695 offsetof(CPUSPARCState, fpr[i]),
5696 fregnames[i]);
5697 }
5698
5699 uc->init_tcg = true;
5700 }
5701
restore_state_to_opc(CPUSPARCState * env,TranslationBlock * tb,int pc_pos)5702 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5703 {
5704 TCGContext *tcg_ctx = env->uc->tcg_ctx;
5705 target_ulong npc;
5706 npc = tcg_ctx->gen_opc_npc[pc_pos];
5707 if (npc == 1) {
5708 /* dynamic NPC: already stored */
5709 } else if (npc == 2) {
5710 /* jump PC: use 'cond' and the jump targets of the translation */
5711 if (env->cond) {
5712 env->npc = tcg_ctx->gen_opc_jump_pc[0];
5713 } else {
5714 env->npc = tcg_ctx->gen_opc_jump_pc[1];
5715 }
5716 } else {
5717 env->npc = npc;
5718 }
5719 }
5720