1 /*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/log.h"
29
30 #define HELPER_H "helper.h"
31 #include "exec/helper-info.c.inc"
32 #undef HELPER_H
33
34
35 typedef struct DisasContext {
36 DisasContextBase base;
37 CPURXState *env;
38 uint32_t pc;
39 uint32_t tb_flags;
40 } DisasContext;
41
42 typedef struct DisasCompare {
43 TCGv value;
44 TCGv temp;
45 TCGCond cond;
46 } DisasCompare;
47
rx_crname(uint8_t cr)48 const char *rx_crname(uint8_t cr)
49 {
50 static const char *cr_names[] = {
51 "psw", "pc", "usp", "fpsw", "", "", "", "",
52 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
53 };
54 if (cr >= ARRAY_SIZE(cr_names)) {
55 return "illegal";
56 }
57 return cr_names[cr];
58 }
59
60 /* Target-specific values for dc->base.is_jmp. */
61 #define DISAS_JUMP DISAS_TARGET_0
62 #define DISAS_UPDATE DISAS_TARGET_1
63 #define DISAS_EXIT DISAS_TARGET_2
64
65 /* global register indexes */
66 static TCGv cpu_regs[16];
67 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
68 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
69 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
70 static TCGv cpu_fintv, cpu_intb, cpu_pc;
71 static TCGv_i64 cpu_acc;
72
73 #define cpu_sp cpu_regs[0]
74
75 /* decoder helper */
decode_load_bytes(DisasContext * ctx,uint32_t insn,int i,int n)76 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
77 int i, int n)
78 {
79 while (++i <= n) {
80 uint8_t b = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next++);
81 insn |= b << (32 - i * 8);
82 }
83 return insn;
84 }
85
li(DisasContext * ctx,int sz)86 static uint32_t li(DisasContext *ctx, int sz)
87 {
88 target_ulong addr;
89 uint32_t tmp;
90 CPURXState *env = ctx->env;
91 addr = ctx->base.pc_next;
92
93 switch (sz) {
94 case 1:
95 ctx->base.pc_next += 1;
96 return (int8_t)translator_ldub(env, &ctx->base, addr);
97 case 2:
98 ctx->base.pc_next += 2;
99 return (int16_t)translator_lduw(env, &ctx->base, addr);
100 case 3:
101 ctx->base.pc_next += 3;
102 tmp = (int8_t)translator_ldub(env, &ctx->base, addr + 2);
103 tmp <<= 16;
104 tmp |= translator_lduw(env, &ctx->base, addr);
105 return tmp;
106 case 0:
107 ctx->base.pc_next += 4;
108 return translator_ldl(env, &ctx->base, addr);
109 default:
110 g_assert_not_reached();
111 }
112 return 0;
113 }
114
bdsp_s(DisasContext * ctx,int d)115 static int bdsp_s(DisasContext *ctx, int d)
116 {
117 /*
118 * 0 -> 8
119 * 1 -> 9
120 * 2 -> 10
121 * 3 -> 3
122 * :
123 * 7 -> 7
124 */
125 if (d < 3) {
126 d += 8;
127 }
128 return d;
129 }
130
131 /* Include the auto-generated decoder. */
132 #include "decode-insns.c.inc"
133
rx_cpu_dump_state(CPUState * cs,FILE * f,int flags)134 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
135 {
136 CPURXState *env = cpu_env(cs);
137 int i;
138 uint32_t psw;
139
140 psw = rx_cpu_pack_psw(env);
141 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
142 env->pc, psw);
143 for (i = 0; i < 16; i += 4) {
144 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
145 i, env->regs[i], i + 1, env->regs[i + 1],
146 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
147 }
148 }
149
gen_goto_tb(DisasContext * dc,int n,target_ulong dest)150 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
151 {
152 if (translator_use_goto_tb(&dc->base, dest)) {
153 tcg_gen_goto_tb(n);
154 tcg_gen_movi_i32(cpu_pc, dest);
155 tcg_gen_exit_tb(dc->base.tb, n);
156 } else {
157 tcg_gen_movi_i32(cpu_pc, dest);
158 tcg_gen_lookup_and_goto_ptr();
159 }
160 dc->base.is_jmp = DISAS_NORETURN;
161 }
162
163 /* generic load wrapper */
rx_gen_ld(unsigned int size,TCGv reg,TCGv mem)164 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
165 {
166 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
167 }
168
169 /* unsigned load wrapper */
rx_gen_ldu(unsigned int size,TCGv reg,TCGv mem)170 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
171 {
172 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
173 }
174
175 /* generic store wrapper */
rx_gen_st(unsigned int size,TCGv reg,TCGv mem)176 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
177 {
178 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
179 }
180
181 /* [ri, rb] */
rx_gen_regindex(DisasContext * ctx,TCGv mem,int size,int ri,int rb)182 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
183 int size, int ri, int rb)
184 {
185 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
186 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
187 }
188
189 /* dsp[reg] */
rx_index_addr(DisasContext * ctx,TCGv mem,int ld,int size,int reg)190 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
191 int ld, int size, int reg)
192 {
193 uint32_t dsp;
194
195 switch (ld) {
196 case 0:
197 return cpu_regs[reg];
198 case 1:
199 dsp = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next) << size;
200 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
201 ctx->base.pc_next += 1;
202 return mem;
203 case 2:
204 dsp = translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << size;
205 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
206 ctx->base.pc_next += 2;
207 return mem;
208 default:
209 g_assert_not_reached();
210 }
211 }
212
mi_to_mop(unsigned mi)213 static inline MemOp mi_to_mop(unsigned mi)
214 {
215 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
216 tcg_debug_assert(mi < 5);
217 return mop[mi];
218 }
219
220 /* load source operand */
rx_load_source(DisasContext * ctx,TCGv mem,int ld,int mi,int rs)221 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
222 int ld, int mi, int rs)
223 {
224 TCGv addr;
225 MemOp mop;
226 if (ld < 3) {
227 mop = mi_to_mop(mi);
228 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
229 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
230 return mem;
231 } else {
232 return cpu_regs[rs];
233 }
234 }
235
236 /* Processor mode check */
is_privileged(DisasContext * ctx,int is_exception)237 static int is_privileged(DisasContext *ctx, int is_exception)
238 {
239 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) {
240 if (is_exception) {
241 gen_helper_raise_privilege_violation(tcg_env);
242 }
243 return 0;
244 } else {
245 return 1;
246 }
247 }
248
249 /* generate QEMU condition */
psw_cond(DisasCompare * dc,uint32_t cond)250 static void psw_cond(DisasCompare *dc, uint32_t cond)
251 {
252 tcg_debug_assert(cond < 16);
253 switch (cond) {
254 case 0: /* z */
255 dc->cond = TCG_COND_EQ;
256 dc->value = cpu_psw_z;
257 break;
258 case 1: /* nz */
259 dc->cond = TCG_COND_NE;
260 dc->value = cpu_psw_z;
261 break;
262 case 2: /* c */
263 dc->cond = TCG_COND_NE;
264 dc->value = cpu_psw_c;
265 break;
266 case 3: /* nc */
267 dc->cond = TCG_COND_EQ;
268 dc->value = cpu_psw_c;
269 break;
270 case 4: /* gtu (C& ~Z) == 1 */
271 case 5: /* leu (C& ~Z) == 0 */
272 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
273 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
274 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
275 dc->value = dc->temp;
276 break;
277 case 6: /* pz (S == 0) */
278 dc->cond = TCG_COND_GE;
279 dc->value = cpu_psw_s;
280 break;
281 case 7: /* n (S == 1) */
282 dc->cond = TCG_COND_LT;
283 dc->value = cpu_psw_s;
284 break;
285 case 8: /* ge (S^O)==0 */
286 case 9: /* lt (S^O)==1 */
287 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
288 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
289 dc->value = dc->temp;
290 break;
291 case 10: /* gt ((S^O)|Z)==0 */
292 case 11: /* le ((S^O)|Z)==1 */
293 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
294 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
295 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
296 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
297 dc->value = dc->temp;
298 break;
299 case 12: /* o */
300 dc->cond = TCG_COND_LT;
301 dc->value = cpu_psw_o;
302 break;
303 case 13: /* no */
304 dc->cond = TCG_COND_GE;
305 dc->value = cpu_psw_o;
306 break;
307 case 14: /* always true */
308 dc->cond = TCG_COND_ALWAYS;
309 dc->value = dc->temp;
310 break;
311 case 15: /* always false */
312 dc->cond = TCG_COND_NEVER;
313 dc->value = dc->temp;
314 break;
315 }
316 }
317
move_from_cr(DisasContext * ctx,TCGv ret,int cr,uint32_t pc)318 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
319 {
320 switch (cr) {
321 case 0: /* PSW */
322 gen_helper_pack_psw(ret, tcg_env);
323 break;
324 case 1: /* PC */
325 tcg_gen_movi_i32(ret, pc);
326 break;
327 case 2: /* USP */
328 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
329 tcg_gen_mov_i32(ret, cpu_sp);
330 } else {
331 tcg_gen_mov_i32(ret, cpu_usp);
332 }
333 break;
334 case 3: /* FPSW */
335 tcg_gen_mov_i32(ret, cpu_fpsw);
336 break;
337 case 8: /* BPSW */
338 tcg_gen_mov_i32(ret, cpu_bpsw);
339 break;
340 case 9: /* BPC */
341 tcg_gen_mov_i32(ret, cpu_bpc);
342 break;
343 case 10: /* ISP */
344 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
345 tcg_gen_mov_i32(ret, cpu_isp);
346 } else {
347 tcg_gen_mov_i32(ret, cpu_sp);
348 }
349 break;
350 case 11: /* FINTV */
351 tcg_gen_mov_i32(ret, cpu_fintv);
352 break;
353 case 12: /* INTB */
354 tcg_gen_mov_i32(ret, cpu_intb);
355 break;
356 default:
357 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
358 /* Unimplement registers return 0 */
359 tcg_gen_movi_i32(ret, 0);
360 break;
361 }
362 }
363
move_to_cr(DisasContext * ctx,TCGv val,int cr)364 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
365 {
366 if (cr >= 8 && !is_privileged(ctx, 0)) {
367 /* Some control registers can only be written in privileged mode. */
368 qemu_log_mask(LOG_GUEST_ERROR,
369 "disallow control register write %s", rx_crname(cr));
370 return;
371 }
372 switch (cr) {
373 case 0: /* PSW */
374 gen_helper_set_psw(tcg_env, val);
375 if (is_privileged(ctx, 0)) {
376 /* PSW.{I,U} may be updated here. exit TB. */
377 ctx->base.is_jmp = DISAS_UPDATE;
378 }
379 break;
380 /* case 1: to PC not supported */
381 case 2: /* USP */
382 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
383 tcg_gen_mov_i32(cpu_sp, val);
384 } else {
385 tcg_gen_mov_i32(cpu_usp, val);
386 }
387 break;
388 case 3: /* FPSW */
389 gen_helper_set_fpsw(tcg_env, val);
390 break;
391 case 8: /* BPSW */
392 tcg_gen_mov_i32(cpu_bpsw, val);
393 break;
394 case 9: /* BPC */
395 tcg_gen_mov_i32(cpu_bpc, val);
396 break;
397 case 10: /* ISP */
398 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
399 tcg_gen_mov_i32(cpu_isp, val);
400 } else {
401 tcg_gen_mov_i32(cpu_sp, val);
402 }
403 break;
404 case 11: /* FINTV */
405 tcg_gen_mov_i32(cpu_fintv, val);
406 break;
407 case 12: /* INTB */
408 tcg_gen_mov_i32(cpu_intb, val);
409 break;
410 default:
411 qemu_log_mask(LOG_GUEST_ERROR,
412 "Unimplement control register %d", cr);
413 break;
414 }
415 }
416
push(TCGv val)417 static void push(TCGv val)
418 {
419 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
420 rx_gen_st(MO_32, val, cpu_sp);
421 }
422
pop(TCGv ret)423 static void pop(TCGv ret)
424 {
425 rx_gen_ld(MO_32, ret, cpu_sp);
426 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
427 }
428
429 /* mov.<bwl> rs,dsp5[rd] */
trans_MOV_rm(DisasContext * ctx,arg_MOV_rm * a)430 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
431 {
432 TCGv mem;
433 mem = tcg_temp_new();
434 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
435 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
436 return true;
437 }
438
439 /* mov.<bwl> dsp5[rs],rd */
trans_MOV_mr(DisasContext * ctx,arg_MOV_mr * a)440 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
441 {
442 TCGv mem;
443 mem = tcg_temp_new();
444 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
445 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
446 return true;
447 }
448
449 /* mov.l #uimm4,rd */
450 /* mov.l #uimm8,rd */
451 /* mov.l #imm,rd */
trans_MOV_ir(DisasContext * ctx,arg_MOV_ir * a)452 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
453 {
454 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
455 return true;
456 }
457
458 /* mov.<bwl> #uimm8,dsp[rd] */
459 /* mov.<bwl> #imm, dsp[rd] */
trans_MOV_im(DisasContext * ctx,arg_MOV_im * a)460 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
461 {
462 TCGv imm, mem;
463 imm = tcg_constant_i32(a->imm);
464 mem = tcg_temp_new();
465 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
466 rx_gen_st(a->sz, imm, mem);
467 return true;
468 }
469
470 /* mov.<bwl> [ri,rb],rd */
trans_MOV_ar(DisasContext * ctx,arg_MOV_ar * a)471 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
472 {
473 TCGv mem;
474 mem = tcg_temp_new();
475 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
476 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
477 return true;
478 }
479
480 /* mov.<bwl> rd,[ri,rb] */
trans_MOV_ra(DisasContext * ctx,arg_MOV_ra * a)481 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
482 {
483 TCGv mem;
484 mem = tcg_temp_new();
485 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
486 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
487 return true;
488 }
489
490 /* mov.<bwl> dsp[rs],dsp[rd] */
491 /* mov.<bwl> rs,dsp[rd] */
492 /* mov.<bwl> dsp[rs],rd */
493 /* mov.<bwl> rs,rd */
trans_MOV_mm(DisasContext * ctx,arg_MOV_mm * a)494 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
495 {
496 TCGv tmp, mem, addr;
497
498 if (a->lds == 3 && a->ldd == 3) {
499 /* mov.<bwl> rs,rd */
500 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN);
501 return true;
502 }
503
504 mem = tcg_temp_new();
505 if (a->lds == 3) {
506 /* mov.<bwl> rs,dsp[rd] */
507 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
508 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
509 } else if (a->ldd == 3) {
510 /* mov.<bwl> dsp[rs],rd */
511 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
512 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
513 } else {
514 /* mov.<bwl> dsp[rs],dsp[rd] */
515 tmp = tcg_temp_new();
516 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
517 rx_gen_ld(a->sz, tmp, addr);
518 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
519 rx_gen_st(a->sz, tmp, addr);
520 }
521 return true;
522 }
523
524 /* mov.<bwl> rs,[rd+] */
525 /* mov.<bwl> rs,[-rd] */
trans_MOV_rp(DisasContext * ctx,arg_MOV_rp * a)526 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
527 {
528 TCGv val;
529 val = tcg_temp_new();
530 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
531 if (a->ad == 1) {
532 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
533 }
534 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
535 if (a->ad == 0) {
536 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
537 }
538 return true;
539 }
540
541 /* mov.<bwl> [rd+],rs */
542 /* mov.<bwl> [-rd],rs */
trans_MOV_pr(DisasContext * ctx,arg_MOV_pr * a)543 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
544 {
545 TCGv val;
546 val = tcg_temp_new();
547 if (a->ad == 1) {
548 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
549 }
550 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
551 if (a->ad == 0) {
552 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
553 }
554 tcg_gen_mov_i32(cpu_regs[a->rs], val);
555 return true;
556 }
557
558 /* movu.<bw> dsp5[rs],rd */
559 /* movu.<bw> dsp[rs],rd */
trans_MOVU_mr(DisasContext * ctx,arg_MOVU_mr * a)560 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
561 {
562 TCGv mem;
563 mem = tcg_temp_new();
564 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
565 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
566 return true;
567 }
568
569 /* movu.<bw> rs,rd */
trans_MOVU_rr(DisasContext * ctx,arg_MOVU_rr * a)570 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
571 {
572 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz);
573 return true;
574 }
575
576 /* movu.<bw> [ri,rb],rd */
trans_MOVU_ar(DisasContext * ctx,arg_MOVU_ar * a)577 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
578 {
579 TCGv mem;
580 mem = tcg_temp_new();
581 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
582 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
583 return true;
584 }
585
586 /* movu.<bw> [rd+],rs */
587 /* mov.<bw> [-rd],rs */
trans_MOVU_pr(DisasContext * ctx,arg_MOVU_pr * a)588 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
589 {
590 TCGv val;
591 val = tcg_temp_new();
592 if (a->ad == 1) {
593 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
594 }
595 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
596 if (a->ad == 0) {
597 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
598 }
599 tcg_gen_mov_i32(cpu_regs[a->rs], val);
600 return true;
601 }
602
603
604 /* pop rd */
trans_POP(DisasContext * ctx,arg_POP * a)605 static bool trans_POP(DisasContext *ctx, arg_POP *a)
606 {
607 /* mov.l [r0+], rd */
608 arg_MOV_rp mov_a;
609 mov_a.rd = 0;
610 mov_a.rs = a->rd;
611 mov_a.ad = 0;
612 mov_a.sz = MO_32;
613 trans_MOV_pr(ctx, &mov_a);
614 return true;
615 }
616
617 /* popc cr */
trans_POPC(DisasContext * ctx,arg_POPC * a)618 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
619 {
620 TCGv val;
621 val = tcg_temp_new();
622 pop(val);
623 move_to_cr(ctx, val, a->cr);
624 return true;
625 }
626
627 /* popm rd-rd2 */
trans_POPM(DisasContext * ctx,arg_POPM * a)628 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
629 {
630 int r;
631 if (a->rd == 0 || a->rd >= a->rd2) {
632 qemu_log_mask(LOG_GUEST_ERROR,
633 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
634 }
635 r = a->rd;
636 while (r <= a->rd2 && r < 16) {
637 pop(cpu_regs[r++]);
638 }
639 return true;
640 }
641
642
643 /* push.<bwl> rs */
trans_PUSH_r(DisasContext * ctx,arg_PUSH_r * a)644 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
645 {
646 TCGv val;
647 val = tcg_temp_new();
648 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
649 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
650 rx_gen_st(a->sz, val, cpu_sp);
651 return true;
652 }
653
654 /* push.<bwl> dsp[rs] */
trans_PUSH_m(DisasContext * ctx,arg_PUSH_m * a)655 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
656 {
657 TCGv mem, val, addr;
658 mem = tcg_temp_new();
659 val = tcg_temp_new();
660 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
661 rx_gen_ld(a->sz, val, addr);
662 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
663 rx_gen_st(a->sz, val, cpu_sp);
664 return true;
665 }
666
667 /* pushc rx */
trans_PUSHC(DisasContext * ctx,arg_PUSHC * a)668 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
669 {
670 TCGv val;
671 val = tcg_temp_new();
672 move_from_cr(ctx, val, a->cr, ctx->pc);
673 push(val);
674 return true;
675 }
676
677 /* pushm rs-rs2 */
trans_PUSHM(DisasContext * ctx,arg_PUSHM * a)678 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
679 {
680 int r;
681
682 if (a->rs == 0 || a->rs >= a->rs2) {
683 qemu_log_mask(LOG_GUEST_ERROR,
684 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
685 }
686 r = a->rs2;
687 while (r >= a->rs && r >= 0) {
688 push(cpu_regs[r--]);
689 }
690 return true;
691 }
692
693 /* xchg rs,rd */
trans_XCHG_rr(DisasContext * ctx,arg_XCHG_rr * a)694 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
695 {
696 TCGv tmp;
697 tmp = tcg_temp_new();
698 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
699 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
700 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
701 return true;
702 }
703
704 /* xchg dsp[rs].<mi>,rd */
trans_XCHG_mr(DisasContext * ctx,arg_XCHG_mr * a)705 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
706 {
707 TCGv mem, addr;
708 mem = tcg_temp_new();
709 switch (a->mi) {
710 case 0: /* dsp[rs].b */
711 case 1: /* dsp[rs].w */
712 case 2: /* dsp[rs].l */
713 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
714 break;
715 case 3: /* dsp[rs].uw */
716 case 4: /* dsp[rs].ub */
717 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
718 break;
719 default:
720 g_assert_not_reached();
721 }
722 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
723 0, mi_to_mop(a->mi));
724 return true;
725 }
726
stcond(TCGCond cond,int rd,int imm)727 static inline void stcond(TCGCond cond, int rd, int imm)
728 {
729 TCGv z;
730 TCGv _imm;
731 z = tcg_constant_i32(0);
732 _imm = tcg_constant_i32(imm);
733 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
734 _imm, cpu_regs[rd]);
735 }
736
737 /* stz #imm,rd */
trans_STZ(DisasContext * ctx,arg_STZ * a)738 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
739 {
740 stcond(TCG_COND_EQ, a->rd, a->imm);
741 return true;
742 }
743
744 /* stnz #imm,rd */
trans_STNZ(DisasContext * ctx,arg_STNZ * a)745 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
746 {
747 stcond(TCG_COND_NE, a->rd, a->imm);
748 return true;
749 }
750
751 /* sccnd.<bwl> rd */
752 /* sccnd.<bwl> dsp:[rd] */
trans_SCCnd(DisasContext * ctx,arg_SCCnd * a)753 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
754 {
755 DisasCompare dc;
756 TCGv val, mem, addr;
757 dc.temp = tcg_temp_new();
758 psw_cond(&dc, a->cd);
759 if (a->ld < 3) {
760 val = tcg_temp_new();
761 mem = tcg_temp_new();
762 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
763 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
764 rx_gen_st(a->sz, val, addr);
765 } else {
766 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
767 }
768 return true;
769 }
770
771 /* rtsd #imm */
trans_RTSD_i(DisasContext * ctx,arg_RTSD_i * a)772 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
773 {
774 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
775 pop(cpu_pc);
776 ctx->base.is_jmp = DISAS_JUMP;
777 return true;
778 }
779
780 /* rtsd #imm, rd-rd2 */
trans_RTSD_irr(DisasContext * ctx,arg_RTSD_irr * a)781 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
782 {
783 int dst;
784 int adj;
785
786 if (a->rd2 >= a->rd) {
787 adj = a->imm - (a->rd2 - a->rd + 1);
788 } else {
789 adj = a->imm - (15 - a->rd + 1);
790 }
791
792 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
793 dst = a->rd;
794 while (dst <= a->rd2 && dst < 16) {
795 pop(cpu_regs[dst++]);
796 }
797 pop(cpu_pc);
798 ctx->base.is_jmp = DISAS_JUMP;
799 return true;
800 }
801
802 typedef void (*op2fn)(TCGv ret, TCGv arg1);
803 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
804
rx_gen_op_rr(op2fn opr,int dst,int src)805 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
806 {
807 opr(cpu_regs[dst], cpu_regs[src]);
808 }
809
rx_gen_op_rrr(op3fn opr,int dst,int src,int src2)810 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
811 {
812 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
813 }
814
rx_gen_op_irr(op3fn opr,int dst,int src,uint32_t src2)815 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
816 {
817 TCGv imm = tcg_constant_i32(src2);
818 opr(cpu_regs[dst], cpu_regs[src], imm);
819 }
820
rx_gen_op_mr(op3fn opr,DisasContext * ctx,int dst,int src,int ld,int mi)821 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
822 int dst, int src, int ld, int mi)
823 {
824 TCGv val, mem;
825 mem = tcg_temp_new();
826 val = rx_load_source(ctx, mem, ld, mi, src);
827 opr(cpu_regs[dst], cpu_regs[dst], val);
828 }
829
rx_and(TCGv ret,TCGv arg1,TCGv arg2)830 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
831 {
832 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
833 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
834 tcg_gen_mov_i32(ret, cpu_psw_s);
835 }
836
837 /* and #uimm:4, rd */
838 /* and #imm, rd */
trans_AND_ir(DisasContext * ctx,arg_AND_ir * a)839 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
840 {
841 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
842 return true;
843 }
844
845 /* and dsp[rs], rd */
846 /* and rs,rd */
trans_AND_mr(DisasContext * ctx,arg_AND_mr * a)847 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
848 {
849 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
850 return true;
851 }
852
853 /* and rs,rs2,rd */
trans_AND_rrr(DisasContext * ctx,arg_AND_rrr * a)854 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
855 {
856 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
857 return true;
858 }
859
rx_or(TCGv ret,TCGv arg1,TCGv arg2)860 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
861 {
862 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
863 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
864 tcg_gen_mov_i32(ret, cpu_psw_s);
865 }
866
867 /* or #uimm:4, rd */
868 /* or #imm, rd */
trans_OR_ir(DisasContext * ctx,arg_OR_ir * a)869 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
870 {
871 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
872 return true;
873 }
874
875 /* or dsp[rs], rd */
876 /* or rs,rd */
trans_OR_mr(DisasContext * ctx,arg_OR_mr * a)877 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
878 {
879 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
880 return true;
881 }
882
883 /* or rs,rs2,rd */
trans_OR_rrr(DisasContext * ctx,arg_OR_rrr * a)884 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
885 {
886 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
887 return true;
888 }
889
rx_xor(TCGv ret,TCGv arg1,TCGv arg2)890 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
891 {
892 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
893 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
894 tcg_gen_mov_i32(ret, cpu_psw_s);
895 }
896
897 /* xor #imm, rd */
trans_XOR_ir(DisasContext * ctx,arg_XOR_ir * a)898 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
899 {
900 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
901 return true;
902 }
903
904 /* xor dsp[rs], rd */
905 /* xor rs,rd */
trans_XOR_mr(DisasContext * ctx,arg_XOR_mr * a)906 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
907 {
908 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
909 return true;
910 }
911
rx_tst(TCGv ret,TCGv arg1,TCGv arg2)912 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
913 {
914 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
915 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
916 }
917
918 /* tst #imm, rd */
trans_TST_ir(DisasContext * ctx,arg_TST_ir * a)919 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
920 {
921 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
922 return true;
923 }
924
925 /* tst dsp[rs], rd */
926 /* tst rs, rd */
trans_TST_mr(DisasContext * ctx,arg_TST_mr * a)927 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
928 {
929 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
930 return true;
931 }
932
rx_not(TCGv ret,TCGv arg1)933 static void rx_not(TCGv ret, TCGv arg1)
934 {
935 tcg_gen_not_i32(ret, arg1);
936 tcg_gen_mov_i32(cpu_psw_z, ret);
937 tcg_gen_mov_i32(cpu_psw_s, ret);
938 }
939
940 /* not rd */
941 /* not rs, rd */
trans_NOT_rr(DisasContext * ctx,arg_NOT_rr * a)942 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
943 {
944 rx_gen_op_rr(rx_not, a->rd, a->rs);
945 return true;
946 }
947
rx_neg(TCGv ret,TCGv arg1)948 static void rx_neg(TCGv ret, TCGv arg1)
949 {
950 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
951 tcg_gen_neg_i32(ret, arg1);
952 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
953 tcg_gen_mov_i32(cpu_psw_z, ret);
954 tcg_gen_mov_i32(cpu_psw_s, ret);
955 }
956
957
958 /* neg rd */
959 /* neg rs, rd */
trans_NEG_rr(DisasContext * ctx,arg_NEG_rr * a)960 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
961 {
962 rx_gen_op_rr(rx_neg, a->rd, a->rs);
963 return true;
964 }
965
966 /* ret = arg1 + arg2 + psw_c */
rx_adc(TCGv ret,TCGv arg1,TCGv arg2)967 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
968 {
969 TCGv z = tcg_constant_i32(0);
970 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
971 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
972 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
973 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
974 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
975 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
976 tcg_gen_mov_i32(ret, cpu_psw_s);
977 }
978
979 /* adc #imm, rd */
trans_ADC_ir(DisasContext * ctx,arg_ADC_ir * a)980 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
981 {
982 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
983 return true;
984 }
985
986 /* adc rs, rd */
trans_ADC_rr(DisasContext * ctx,arg_ADC_rr * a)987 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
988 {
989 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
990 return true;
991 }
992
993 /* adc dsp[rs], rd */
trans_ADC_mr(DisasContext * ctx,arg_ADC_mr * a)994 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
995 {
996 /* mi only 2 */
997 if (a->mi != 2) {
998 return false;
999 }
1000 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1001 return true;
1002 }
1003
1004 /* ret = arg1 + arg2 */
rx_add(TCGv ret,TCGv arg1,TCGv arg2)1005 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1006 {
1007 TCGv z = tcg_constant_i32(0);
1008 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1009 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1010 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1011 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1012 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1013 tcg_gen_mov_i32(ret, cpu_psw_s);
1014 }
1015
1016 /* add #uimm4, rd */
1017 /* add #imm, rs, rd */
trans_ADD_irr(DisasContext * ctx,arg_ADD_irr * a)1018 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1019 {
1020 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1021 return true;
1022 }
1023
1024 /* add rs, rd */
1025 /* add dsp[rs], rd */
trans_ADD_mr(DisasContext * ctx,arg_ADD_mr * a)1026 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1027 {
1028 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1029 return true;
1030 }
1031
1032 /* add rs, rs2, rd */
trans_ADD_rrr(DisasContext * ctx,arg_ADD_rrr * a)1033 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1034 {
1035 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1036 return true;
1037 }
1038
1039 /* ret = arg1 - arg2 */
rx_sub(TCGv ret,TCGv arg1,TCGv arg2)1040 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1041 {
1042 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1043 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1044 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1045 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1046 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1047 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1048 /* CMP not required return */
1049 if (ret) {
1050 tcg_gen_mov_i32(ret, cpu_psw_s);
1051 }
1052 }
1053
rx_cmp(TCGv dummy,TCGv arg1,TCGv arg2)1054 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1055 {
1056 rx_sub(NULL, arg1, arg2);
1057 }
1058
1059 /* ret = arg1 - arg2 - !psw_c */
1060 /* -> ret = arg1 + ~arg2 + psw_c */
rx_sbb(TCGv ret,TCGv arg1,TCGv arg2)1061 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1062 {
1063 TCGv temp;
1064 temp = tcg_temp_new();
1065 tcg_gen_not_i32(temp, arg2);
1066 rx_adc(ret, arg1, temp);
1067 }
1068
1069 /* cmp #imm4, rs2 */
1070 /* cmp #imm8, rs2 */
1071 /* cmp #imm, rs2 */
trans_CMP_ir(DisasContext * ctx,arg_CMP_ir * a)1072 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1073 {
1074 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1075 return true;
1076 }
1077
1078 /* cmp rs, rs2 */
1079 /* cmp dsp[rs], rs2 */
trans_CMP_mr(DisasContext * ctx,arg_CMP_mr * a)1080 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1081 {
1082 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1083 return true;
1084 }
1085
1086 /* sub #imm4, rd */
trans_SUB_ir(DisasContext * ctx,arg_SUB_ir * a)1087 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1088 {
1089 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1090 return true;
1091 }
1092
1093 /* sub rs, rd */
1094 /* sub dsp[rs], rd */
trans_SUB_mr(DisasContext * ctx,arg_SUB_mr * a)1095 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1096 {
1097 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1098 return true;
1099 }
1100
1101 /* sub rs2, rs, rd */
trans_SUB_rrr(DisasContext * ctx,arg_SUB_rrr * a)1102 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1103 {
1104 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1105 return true;
1106 }
1107
1108 /* sbb rs, rd */
trans_SBB_rr(DisasContext * ctx,arg_SBB_rr * a)1109 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1110 {
1111 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1112 return true;
1113 }
1114
1115 /* sbb dsp[rs], rd */
trans_SBB_mr(DisasContext * ctx,arg_SBB_mr * a)1116 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1117 {
1118 /* mi only 2 */
1119 if (a->mi != 2) {
1120 return false;
1121 }
1122 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1123 return true;
1124 }
1125
1126 /* abs rd */
1127 /* abs rs, rd */
trans_ABS_rr(DisasContext * ctx,arg_ABS_rr * a)1128 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1129 {
1130 rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs);
1131 return true;
1132 }
1133
1134 /* max #imm, rd */
trans_MAX_ir(DisasContext * ctx,arg_MAX_ir * a)1135 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1136 {
1137 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1138 return true;
1139 }
1140
1141 /* max rs, rd */
1142 /* max dsp[rs], rd */
trans_MAX_mr(DisasContext * ctx,arg_MAX_mr * a)1143 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1144 {
1145 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1146 return true;
1147 }
1148
1149 /* min #imm, rd */
trans_MIN_ir(DisasContext * ctx,arg_MIN_ir * a)1150 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1151 {
1152 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1153 return true;
1154 }
1155
1156 /* min rs, rd */
1157 /* min dsp[rs], rd */
trans_MIN_mr(DisasContext * ctx,arg_MIN_mr * a)1158 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1159 {
1160 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1161 return true;
1162 }
1163
1164 /* mul #uimm4, rd */
1165 /* mul #imm, rd */
trans_MUL_ir(DisasContext * ctx,arg_MUL_ir * a)1166 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1167 {
1168 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1169 return true;
1170 }
1171
1172 /* mul rs, rd */
1173 /* mul dsp[rs], rd */
trans_MUL_mr(DisasContext * ctx,arg_MUL_mr * a)1174 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1175 {
1176 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1177 return true;
1178 }
1179
1180 /* mul rs, rs2, rd */
trans_MUL_rrr(DisasContext * ctx,arg_MUL_rrr * a)1181 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1182 {
1183 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1184 return true;
1185 }
1186
1187 /* emul #imm, rd */
trans_EMUL_ir(DisasContext * ctx,arg_EMUL_ir * a)1188 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1189 {
1190 TCGv imm = tcg_constant_i32(a->imm);
1191 if (a->rd > 14) {
1192 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1193 }
1194 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1195 cpu_regs[a->rd], imm);
1196 return true;
1197 }
1198
1199 /* emul rs, rd */
1200 /* emul dsp[rs], rd */
trans_EMUL_mr(DisasContext * ctx,arg_EMUL_mr * a)1201 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1202 {
1203 TCGv val, mem;
1204 if (a->rd > 14) {
1205 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1206 }
1207 mem = tcg_temp_new();
1208 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1209 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1210 cpu_regs[a->rd], val);
1211 return true;
1212 }
1213
1214 /* emulu #imm, rd */
trans_EMULU_ir(DisasContext * ctx,arg_EMULU_ir * a)1215 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1216 {
1217 TCGv imm = tcg_constant_i32(a->imm);
1218 if (a->rd > 14) {
1219 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1220 }
1221 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1222 cpu_regs[a->rd], imm);
1223 return true;
1224 }
1225
1226 /* emulu rs, rd */
1227 /* emulu dsp[rs], rd */
trans_EMULU_mr(DisasContext * ctx,arg_EMULU_mr * a)1228 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1229 {
1230 TCGv val, mem;
1231 if (a->rd > 14) {
1232 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1233 }
1234 mem = tcg_temp_new();
1235 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1236 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1237 cpu_regs[a->rd], val);
1238 return true;
1239 }
1240
rx_div(TCGv ret,TCGv arg1,TCGv arg2)1241 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1242 {
1243 gen_helper_div(ret, tcg_env, arg1, arg2);
1244 }
1245
rx_divu(TCGv ret,TCGv arg1,TCGv arg2)1246 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1247 {
1248 gen_helper_divu(ret, tcg_env, arg1, arg2);
1249 }
1250
1251 /* div #imm, rd */
trans_DIV_ir(DisasContext * ctx,arg_DIV_ir * a)1252 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1253 {
1254 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1255 return true;
1256 }
1257
1258 /* div rs, rd */
1259 /* div dsp[rs], rd */
trans_DIV_mr(DisasContext * ctx,arg_DIV_mr * a)1260 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1261 {
1262 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1263 return true;
1264 }
1265
1266 /* divu #imm, rd */
trans_DIVU_ir(DisasContext * ctx,arg_DIVU_ir * a)1267 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1268 {
1269 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1270 return true;
1271 }
1272
1273 /* divu rs, rd */
1274 /* divu dsp[rs], rd */
trans_DIVU_mr(DisasContext * ctx,arg_DIVU_mr * a)1275 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1276 {
1277 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1278 return true;
1279 }
1280
1281
1282 /* shll #imm:5, rd */
1283 /* shll #imm:5, rs2, rd */
trans_SHLL_irr(DisasContext * ctx,arg_SHLL_irr * a)1284 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1285 {
1286 TCGv tmp;
1287 tmp = tcg_temp_new();
1288 if (a->imm) {
1289 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1290 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1291 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1292 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1293 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1294 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1295 } else {
1296 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1297 tcg_gen_movi_i32(cpu_psw_c, 0);
1298 tcg_gen_movi_i32(cpu_psw_o, 0);
1299 }
1300 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1301 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1302 return true;
1303 }
1304
1305 /* shll rs, rd */
trans_SHLL_rr(DisasContext * ctx,arg_SHLL_rr * a)1306 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1307 {
1308 TCGLabel *noshift, *done;
1309 TCGv count, tmp;
1310
1311 noshift = gen_new_label();
1312 done = gen_new_label();
1313 /* if (cpu_regs[a->rs]) { */
1314 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1315 count = tcg_temp_new();
1316 tmp = tcg_temp_new();
1317 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1318 tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
1319 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1320 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1321 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1322 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1323 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1324 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1325 tcg_gen_br(done);
1326 /* } else { */
1327 gen_set_label(noshift);
1328 tcg_gen_movi_i32(cpu_psw_c, 0);
1329 tcg_gen_movi_i32(cpu_psw_o, 0);
1330 /* } */
1331 gen_set_label(done);
1332 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1333 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1334 return true;
1335 }
1336
shiftr_imm(uint32_t rd,uint32_t rs,uint32_t imm,unsigned int alith)1337 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1338 unsigned int alith)
1339 {
1340 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1341 tcg_gen_shri_i32, tcg_gen_sari_i32,
1342 };
1343 tcg_debug_assert(alith < 2);
1344 if (imm) {
1345 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1346 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1347 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1348 } else {
1349 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1350 tcg_gen_movi_i32(cpu_psw_c, 0);
1351 }
1352 tcg_gen_movi_i32(cpu_psw_o, 0);
1353 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1354 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1355 }
1356
shiftr_reg(uint32_t rd,uint32_t rs,unsigned int alith)1357 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1358 {
1359 TCGLabel *noshift, *done;
1360 TCGv count;
1361 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1362 tcg_gen_shri_i32, tcg_gen_sari_i32,
1363 };
1364 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1365 tcg_gen_shr_i32, tcg_gen_sar_i32,
1366 };
1367 tcg_debug_assert(alith < 2);
1368 noshift = gen_new_label();
1369 done = gen_new_label();
1370 count = tcg_temp_new();
1371 /* if (cpu_regs[rs]) { */
1372 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1373 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1374 tcg_gen_subi_i32(count, count, 1);
1375 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1376 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1377 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1378 tcg_gen_br(done);
1379 /* } else { */
1380 gen_set_label(noshift);
1381 tcg_gen_movi_i32(cpu_psw_c, 0);
1382 /* } */
1383 gen_set_label(done);
1384 tcg_gen_movi_i32(cpu_psw_o, 0);
1385 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1386 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1387 }
1388
1389 /* shar #imm:5, rd */
1390 /* shar #imm:5, rs2, rd */
trans_SHAR_irr(DisasContext * ctx,arg_SHAR_irr * a)1391 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1392 {
1393 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1394 return true;
1395 }
1396
1397 /* shar rs, rd */
trans_SHAR_rr(DisasContext * ctx,arg_SHAR_rr * a)1398 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1399 {
1400 shiftr_reg(a->rd, a->rs, 1);
1401 return true;
1402 }
1403
1404 /* shlr #imm:5, rd */
1405 /* shlr #imm:5, rs2, rd */
trans_SHLR_irr(DisasContext * ctx,arg_SHLR_irr * a)1406 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1407 {
1408 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1409 return true;
1410 }
1411
1412 /* shlr rs, rd */
trans_SHLR_rr(DisasContext * ctx,arg_SHLR_rr * a)1413 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1414 {
1415 shiftr_reg(a->rd, a->rs, 0);
1416 return true;
1417 }
1418
1419 /* rolc rd */
trans_ROLC(DisasContext * ctx,arg_ROLC * a)1420 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1421 {
1422 TCGv tmp;
1423 tmp = tcg_temp_new();
1424 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1425 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1426 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1427 tcg_gen_mov_i32(cpu_psw_c, tmp);
1428 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1429 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1430 return true;
1431 }
1432
1433 /* rorc rd */
trans_RORC(DisasContext * ctx,arg_RORC * a)1434 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1435 {
1436 TCGv tmp;
1437 tmp = tcg_temp_new();
1438 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1439 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1440 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1441 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1442 tcg_gen_mov_i32(cpu_psw_c, tmp);
1443 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1444 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1445 return true;
1446 }
1447
1448 enum {ROTR = 0, ROTL = 1};
1449 enum {ROT_IMM = 0, ROT_REG = 1};
rx_rot(int ir,int dir,int rd,int src)1450 static inline void rx_rot(int ir, int dir, int rd, int src)
1451 {
1452 switch (dir) {
1453 case ROTL:
1454 if (ir == ROT_IMM) {
1455 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1456 } else {
1457 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1458 }
1459 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1460 break;
1461 case ROTR:
1462 if (ir == ROT_IMM) {
1463 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1464 } else {
1465 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1466 }
1467 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1468 break;
1469 }
1470 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1471 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1472 }
1473
1474 /* rotl #imm, rd */
trans_ROTL_ir(DisasContext * ctx,arg_ROTL_ir * a)1475 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1476 {
1477 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1478 return true;
1479 }
1480
1481 /* rotl rs, rd */
trans_ROTL_rr(DisasContext * ctx,arg_ROTL_rr * a)1482 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1483 {
1484 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1485 return true;
1486 }
1487
1488 /* rotr #imm, rd */
trans_ROTR_ir(DisasContext * ctx,arg_ROTR_ir * a)1489 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1490 {
1491 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1492 return true;
1493 }
1494
1495 /* rotr rs, rd */
trans_ROTR_rr(DisasContext * ctx,arg_ROTR_rr * a)1496 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1497 {
1498 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1499 return true;
1500 }
1501
1502 /* revl rs, rd */
trans_REVL(DisasContext * ctx,arg_REVL * a)1503 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1504 {
1505 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1506 return true;
1507 }
1508
1509 /* revw rs, rd */
trans_REVW(DisasContext * ctx,arg_REVW * a)1510 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1511 {
1512 TCGv tmp;
1513 tmp = tcg_temp_new();
1514 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1515 tcg_gen_shli_i32(tmp, tmp, 8);
1516 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1517 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1518 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1519 return true;
1520 }
1521
1522 /* conditional branch helper */
rx_bcnd_main(DisasContext * ctx,int cd,int dst)1523 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1524 {
1525 DisasCompare dc;
1526 TCGLabel *t, *done;
1527
1528 switch (cd) {
1529 case 0 ... 13:
1530 dc.temp = tcg_temp_new();
1531 psw_cond(&dc, cd);
1532 t = gen_new_label();
1533 done = gen_new_label();
1534 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1535 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1536 tcg_gen_br(done);
1537 gen_set_label(t);
1538 gen_goto_tb(ctx, 1, ctx->pc + dst);
1539 gen_set_label(done);
1540 break;
1541 case 14:
1542 /* always true case */
1543 gen_goto_tb(ctx, 0, ctx->pc + dst);
1544 break;
1545 case 15:
1546 /* always false case */
1547 /* Nothing do */
1548 break;
1549 }
1550 }
1551
1552 /* beq dsp:3 / bne dsp:3 */
1553 /* beq dsp:8 / bne dsp:8 */
1554 /* bc dsp:8 / bnc dsp:8 */
1555 /* bgtu dsp:8 / bleu dsp:8 */
1556 /* bpz dsp:8 / bn dsp:8 */
1557 /* bge dsp:8 / blt dsp:8 */
1558 /* bgt dsp:8 / ble dsp:8 */
1559 /* bo dsp:8 / bno dsp:8 */
1560 /* beq dsp:16 / bne dsp:16 */
trans_BCnd(DisasContext * ctx,arg_BCnd * a)1561 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1562 {
1563 rx_bcnd_main(ctx, a->cd, a->dsp);
1564 return true;
1565 }
1566
1567 /* bra dsp:3 */
1568 /* bra dsp:8 */
1569 /* bra dsp:16 */
1570 /* bra dsp:24 */
trans_BRA(DisasContext * ctx,arg_BRA * a)1571 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1572 {
1573 rx_bcnd_main(ctx, 14, a->dsp);
1574 return true;
1575 }
1576
1577 /* bra rs */
trans_BRA_l(DisasContext * ctx,arg_BRA_l * a)1578 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1579 {
1580 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1581 ctx->base.is_jmp = DISAS_JUMP;
1582 return true;
1583 }
1584
rx_save_pc(DisasContext * ctx)1585 static inline void rx_save_pc(DisasContext *ctx)
1586 {
1587 TCGv pc = tcg_constant_i32(ctx->base.pc_next);
1588 push(pc);
1589 }
1590
1591 /* jmp rs */
trans_JMP(DisasContext * ctx,arg_JMP * a)1592 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1593 {
1594 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1595 ctx->base.is_jmp = DISAS_JUMP;
1596 return true;
1597 }
1598
1599 /* jsr rs */
trans_JSR(DisasContext * ctx,arg_JSR * a)1600 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1601 {
1602 rx_save_pc(ctx);
1603 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1604 ctx->base.is_jmp = DISAS_JUMP;
1605 return true;
1606 }
1607
1608 /* bsr dsp:16 */
1609 /* bsr dsp:24 */
trans_BSR(DisasContext * ctx,arg_BSR * a)1610 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1611 {
1612 rx_save_pc(ctx);
1613 rx_bcnd_main(ctx, 14, a->dsp);
1614 return true;
1615 }
1616
1617 /* bsr rs */
trans_BSR_l(DisasContext * ctx,arg_BSR_l * a)1618 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1619 {
1620 rx_save_pc(ctx);
1621 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1622 ctx->base.is_jmp = DISAS_JUMP;
1623 return true;
1624 }
1625
1626 /* rts */
trans_RTS(DisasContext * ctx,arg_RTS * a)1627 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1628 {
1629 pop(cpu_pc);
1630 ctx->base.is_jmp = DISAS_JUMP;
1631 return true;
1632 }
1633
1634 /* nop */
trans_NOP(DisasContext * ctx,arg_NOP * a)1635 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1636 {
1637 return true;
1638 }
1639
1640 /* scmpu */
trans_SCMPU(DisasContext * ctx,arg_SCMPU * a)1641 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1642 {
1643 gen_helper_scmpu(tcg_env);
1644 return true;
1645 }
1646
1647 /* smovu */
trans_SMOVU(DisasContext * ctx,arg_SMOVU * a)1648 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1649 {
1650 gen_helper_smovu(tcg_env);
1651 return true;
1652 }
1653
1654 /* smovf */
trans_SMOVF(DisasContext * ctx,arg_SMOVF * a)1655 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1656 {
1657 gen_helper_smovf(tcg_env);
1658 return true;
1659 }
1660
1661 /* smovb */
trans_SMOVB(DisasContext * ctx,arg_SMOVB * a)1662 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1663 {
1664 gen_helper_smovb(tcg_env);
1665 return true;
1666 }
1667
1668 #define STRING(op) \
1669 do { \
1670 TCGv size = tcg_constant_i32(a->sz); \
1671 gen_helper_##op(tcg_env, size); \
1672 } while (0)
1673
1674 /* suntile.<bwl> */
trans_SUNTIL(DisasContext * ctx,arg_SUNTIL * a)1675 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1676 {
1677 STRING(suntil);
1678 return true;
1679 }
1680
1681 /* swhile.<bwl> */
trans_SWHILE(DisasContext * ctx,arg_SWHILE * a)1682 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1683 {
1684 STRING(swhile);
1685 return true;
1686 }
1687 /* sstr.<bwl> */
trans_SSTR(DisasContext * ctx,arg_SSTR * a)1688 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1689 {
1690 STRING(sstr);
1691 return true;
1692 }
1693
1694 /* rmpa.<bwl> */
trans_RMPA(DisasContext * ctx,arg_RMPA * a)1695 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1696 {
1697 STRING(rmpa);
1698 return true;
1699 }
1700
rx_mul64hi(TCGv_i64 ret,int rs,int rs2)1701 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1702 {
1703 TCGv_i64 tmp0, tmp1;
1704 tmp0 = tcg_temp_new_i64();
1705 tmp1 = tcg_temp_new_i64();
1706 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1707 tcg_gen_sari_i64(tmp0, tmp0, 16);
1708 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1709 tcg_gen_sari_i64(tmp1, tmp1, 16);
1710 tcg_gen_mul_i64(ret, tmp0, tmp1);
1711 tcg_gen_shli_i64(ret, ret, 16);
1712 }
1713
rx_mul64lo(TCGv_i64 ret,int rs,int rs2)1714 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1715 {
1716 TCGv_i64 tmp0, tmp1;
1717 tmp0 = tcg_temp_new_i64();
1718 tmp1 = tcg_temp_new_i64();
1719 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1720 tcg_gen_ext16s_i64(tmp0, tmp0);
1721 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1722 tcg_gen_ext16s_i64(tmp1, tmp1);
1723 tcg_gen_mul_i64(ret, tmp0, tmp1);
1724 tcg_gen_shli_i64(ret, ret, 16);
1725 }
1726
1727 /* mulhi rs,rs2 */
trans_MULHI(DisasContext * ctx,arg_MULHI * a)1728 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1729 {
1730 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1731 return true;
1732 }
1733
1734 /* mullo rs,rs2 */
trans_MULLO(DisasContext * ctx,arg_MULLO * a)1735 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1736 {
1737 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1738 return true;
1739 }
1740
1741 /* machi rs,rs2 */
trans_MACHI(DisasContext * ctx,arg_MACHI * a)1742 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1743 {
1744 TCGv_i64 tmp;
1745 tmp = tcg_temp_new_i64();
1746 rx_mul64hi(tmp, a->rs, a->rs2);
1747 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1748 return true;
1749 }
1750
1751 /* maclo rs,rs2 */
trans_MACLO(DisasContext * ctx,arg_MACLO * a)1752 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1753 {
1754 TCGv_i64 tmp;
1755 tmp = tcg_temp_new_i64();
1756 rx_mul64lo(tmp, a->rs, a->rs2);
1757 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1758 return true;
1759 }
1760
1761 /* mvfachi rd */
trans_MVFACHI(DisasContext * ctx,arg_MVFACHI * a)1762 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1763 {
1764 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1765 return true;
1766 }
1767
1768 /* mvfacmi rd */
trans_MVFACMI(DisasContext * ctx,arg_MVFACMI * a)1769 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1770 {
1771 TCGv_i64 rd64;
1772 rd64 = tcg_temp_new_i64();
1773 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1774 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1775 return true;
1776 }
1777
1778 /* mvtachi rs */
trans_MVTACHI(DisasContext * ctx,arg_MVTACHI * a)1779 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1780 {
1781 TCGv_i64 rs64;
1782 rs64 = tcg_temp_new_i64();
1783 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1784 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1785 return true;
1786 }
1787
1788 /* mvtaclo rs */
trans_MVTACLO(DisasContext * ctx,arg_MVTACLO * a)1789 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1790 {
1791 TCGv_i64 rs64;
1792 rs64 = tcg_temp_new_i64();
1793 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1794 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1795 return true;
1796 }
1797
1798 /* racw #imm */
trans_RACW(DisasContext * ctx,arg_RACW * a)1799 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1800 {
1801 TCGv imm = tcg_constant_i32(a->imm + 1);
1802 gen_helper_racw(tcg_env, imm);
1803 return true;
1804 }
1805
1806 /* sat rd */
trans_SAT(DisasContext * ctx,arg_SAT * a)1807 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1808 {
1809 TCGv tmp, z;
1810 tmp = tcg_temp_new();
1811 z = tcg_constant_i32(0);
1812 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1813 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1814 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1815 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1816 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1817 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1818 return true;
1819 }
1820
1821 /* satr */
trans_SATR(DisasContext * ctx,arg_SATR * a)1822 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1823 {
1824 gen_helper_satr(tcg_env);
1825 return true;
1826 }
1827
1828 #define cat3(a, b, c) a##b##c
1829 #define FOP(name, op) \
1830 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1831 cat3(arg_, name, _ir) * a) \
1832 { \
1833 TCGv imm = tcg_constant_i32(li(ctx, 0)); \
1834 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1835 cpu_regs[a->rd], imm); \
1836 return true; \
1837 } \
1838 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1839 cat3(arg_, name, _mr) * a) \
1840 { \
1841 TCGv val, mem; \
1842 mem = tcg_temp_new(); \
1843 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1844 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1845 cpu_regs[a->rd], val); \
1846 return true; \
1847 }
1848
1849 #define FCONVOP(name, op) \
1850 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1851 { \
1852 TCGv val, mem; \
1853 mem = tcg_temp_new(); \
1854 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1855 gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
1856 return true; \
1857 }
1858
FOP(FADD,fadd)1859 FOP(FADD, fadd)
1860 FOP(FSUB, fsub)
1861 FOP(FMUL, fmul)
1862 FOP(FDIV, fdiv)
1863
1864 /* fcmp #imm, rd */
1865 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1866 {
1867 TCGv imm = tcg_constant_i32(li(ctx, 0));
1868 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
1869 return true;
1870 }
1871
1872 /* fcmp dsp[rs], rd */
1873 /* fcmp rs, rd */
trans_FCMP_mr(DisasContext * ctx,arg_FCMP_mr * a)1874 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1875 {
1876 TCGv val, mem;
1877 mem = tcg_temp_new();
1878 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1879 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
1880 return true;
1881 }
1882
FCONVOP(FTOI,ftoi)1883 FCONVOP(FTOI, ftoi)
1884 FCONVOP(ROUND, round)
1885
1886 /* itof rs, rd */
1887 /* itof dsp[rs], rd */
1888 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1889 {
1890 TCGv val, mem;
1891 mem = tcg_temp_new();
1892 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1893 gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
1894 return true;
1895 }
1896
rx_bsetm(TCGv mem,TCGv mask)1897 static void rx_bsetm(TCGv mem, TCGv mask)
1898 {
1899 TCGv val;
1900 val = tcg_temp_new();
1901 rx_gen_ld(MO_8, val, mem);
1902 tcg_gen_or_i32(val, val, mask);
1903 rx_gen_st(MO_8, val, mem);
1904 }
1905
rx_bclrm(TCGv mem,TCGv mask)1906 static void rx_bclrm(TCGv mem, TCGv mask)
1907 {
1908 TCGv val;
1909 val = tcg_temp_new();
1910 rx_gen_ld(MO_8, val, mem);
1911 tcg_gen_andc_i32(val, val, mask);
1912 rx_gen_st(MO_8, val, mem);
1913 }
1914
rx_btstm(TCGv mem,TCGv mask)1915 static void rx_btstm(TCGv mem, TCGv mask)
1916 {
1917 TCGv val;
1918 val = tcg_temp_new();
1919 rx_gen_ld(MO_8, val, mem);
1920 tcg_gen_and_i32(val, val, mask);
1921 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
1922 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1923 }
1924
rx_bnotm(TCGv mem,TCGv mask)1925 static void rx_bnotm(TCGv mem, TCGv mask)
1926 {
1927 TCGv val;
1928 val = tcg_temp_new();
1929 rx_gen_ld(MO_8, val, mem);
1930 tcg_gen_xor_i32(val, val, mask);
1931 rx_gen_st(MO_8, val, mem);
1932 }
1933
rx_bsetr(TCGv reg,TCGv mask)1934 static void rx_bsetr(TCGv reg, TCGv mask)
1935 {
1936 tcg_gen_or_i32(reg, reg, mask);
1937 }
1938
rx_bclrr(TCGv reg,TCGv mask)1939 static void rx_bclrr(TCGv reg, TCGv mask)
1940 {
1941 tcg_gen_andc_i32(reg, reg, mask);
1942 }
1943
rx_btstr(TCGv reg,TCGv mask)1944 static inline void rx_btstr(TCGv reg, TCGv mask)
1945 {
1946 TCGv t0;
1947 t0 = tcg_temp_new();
1948 tcg_gen_and_i32(t0, reg, mask);
1949 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
1950 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1951 }
1952
rx_bnotr(TCGv reg,TCGv mask)1953 static inline void rx_bnotr(TCGv reg, TCGv mask)
1954 {
1955 tcg_gen_xor_i32(reg, reg, mask);
1956 }
1957
1958 #define BITOP(name, op) \
1959 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
1960 cat3(arg_, name, _im) * a) \
1961 { \
1962 TCGv mask, mem, addr; \
1963 mem = tcg_temp_new(); \
1964 mask = tcg_constant_i32(1 << a->imm); \
1965 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1966 cat3(rx_, op, m)(addr, mask); \
1967 return true; \
1968 } \
1969 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1970 cat3(arg_, name, _ir) * a) \
1971 { \
1972 TCGv mask; \
1973 mask = tcg_constant_i32(1 << a->imm); \
1974 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1975 return true; \
1976 } \
1977 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
1978 cat3(arg_, name, _rr) * a) \
1979 { \
1980 TCGv mask, b; \
1981 mask = tcg_temp_new(); \
1982 b = tcg_temp_new(); \
1983 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
1984 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1985 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1986 return true; \
1987 } \
1988 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
1989 cat3(arg_, name, _rm) * a) \
1990 { \
1991 TCGv mask, mem, addr, b; \
1992 mask = tcg_temp_new(); \
1993 b = tcg_temp_new(); \
1994 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
1995 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1996 mem = tcg_temp_new(); \
1997 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1998 cat3(rx_, op, m)(addr, mask); \
1999 return true; \
2000 }
2001
BITOP(BSET,bset)2002 BITOP(BSET, bset)
2003 BITOP(BCLR, bclr)
2004 BITOP(BTST, btst)
2005 BITOP(BNOT, bnot)
2006
2007 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2008 {
2009 TCGv bit;
2010 DisasCompare dc;
2011 dc.temp = tcg_temp_new();
2012 bit = tcg_temp_new();
2013 psw_cond(&dc, cond);
2014 tcg_gen_andi_i32(val, val, ~(1 << pos));
2015 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2016 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2017 }
2018
2019 /* bmcnd #imm, dsp[rd] */
trans_BMCnd_im(DisasContext * ctx,arg_BMCnd_im * a)2020 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2021 {
2022 TCGv val, mem, addr;
2023 val = tcg_temp_new();
2024 mem = tcg_temp_new();
2025 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2026 rx_gen_ld(MO_8, val, addr);
2027 bmcnd_op(val, a->cd, a->imm);
2028 rx_gen_st(MO_8, val, addr);
2029 return true;
2030 }
2031
2032 /* bmcond #imm, rd */
trans_BMCnd_ir(DisasContext * ctx,arg_BMCnd_ir * a)2033 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2034 {
2035 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2036 return true;
2037 }
2038
2039 enum {
2040 PSW_C = 0,
2041 PSW_Z = 1,
2042 PSW_S = 2,
2043 PSW_O = 3,
2044 PSW_I = 8,
2045 PSW_U = 9,
2046 };
2047
clrsetpsw(DisasContext * ctx,int cb,int val)2048 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2049 {
2050 if (cb < 8) {
2051 switch (cb) {
2052 case PSW_C:
2053 tcg_gen_movi_i32(cpu_psw_c, val);
2054 break;
2055 case PSW_Z:
2056 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2057 break;
2058 case PSW_S:
2059 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2060 break;
2061 case PSW_O:
2062 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2063 break;
2064 default:
2065 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2066 break;
2067 }
2068 } else if (is_privileged(ctx, 0)) {
2069 switch (cb) {
2070 case PSW_I:
2071 tcg_gen_movi_i32(cpu_psw_i, val);
2072 ctx->base.is_jmp = DISAS_UPDATE;
2073 break;
2074 case PSW_U:
2075 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) {
2076 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val);
2077 tcg_gen_movi_i32(cpu_psw_u, val);
2078 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp);
2079 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp);
2080 }
2081 break;
2082 default:
2083 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2084 break;
2085 }
2086 }
2087 }
2088
2089 /* clrpsw psw */
trans_CLRPSW(DisasContext * ctx,arg_CLRPSW * a)2090 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2091 {
2092 clrsetpsw(ctx, a->cb, 0);
2093 return true;
2094 }
2095
2096 /* setpsw psw */
trans_SETPSW(DisasContext * ctx,arg_SETPSW * a)2097 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2098 {
2099 clrsetpsw(ctx, a->cb, 1);
2100 return true;
2101 }
2102
2103 /* mvtipl #imm */
trans_MVTIPL(DisasContext * ctx,arg_MVTIPL * a)2104 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2105 {
2106 if (is_privileged(ctx, 1)) {
2107 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2108 ctx->base.is_jmp = DISAS_UPDATE;
2109 }
2110 return true;
2111 }
2112
2113 /* mvtc #imm, rd */
trans_MVTC_i(DisasContext * ctx,arg_MVTC_i * a)2114 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2115 {
2116 TCGv imm;
2117
2118 imm = tcg_constant_i32(a->imm);
2119 move_to_cr(ctx, imm, a->cr);
2120 return true;
2121 }
2122
2123 /* mvtc rs, rd */
trans_MVTC_r(DisasContext * ctx,arg_MVTC_r * a)2124 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2125 {
2126 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2127 return true;
2128 }
2129
2130 /* mvfc rs, rd */
trans_MVFC(DisasContext * ctx,arg_MVFC * a)2131 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2132 {
2133 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc);
2134 return true;
2135 }
2136
2137 /* rtfi */
trans_RTFI(DisasContext * ctx,arg_RTFI * a)2138 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2139 {
2140 TCGv psw;
2141 if (is_privileged(ctx, 1)) {
2142 psw = tcg_temp_new();
2143 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2144 tcg_gen_mov_i32(psw, cpu_bpsw);
2145 gen_helper_set_psw_rte(tcg_env, psw);
2146 ctx->base.is_jmp = DISAS_EXIT;
2147 }
2148 return true;
2149 }
2150
2151 /* rte */
trans_RTE(DisasContext * ctx,arg_RTE * a)2152 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2153 {
2154 TCGv psw;
2155 if (is_privileged(ctx, 1)) {
2156 psw = tcg_temp_new();
2157 pop(cpu_pc);
2158 pop(psw);
2159 gen_helper_set_psw_rte(tcg_env, psw);
2160 ctx->base.is_jmp = DISAS_EXIT;
2161 }
2162 return true;
2163 }
2164
2165 /* brk */
trans_BRK(DisasContext * ctx,arg_BRK * a)2166 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2167 {
2168 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2169 gen_helper_rxbrk(tcg_env);
2170 ctx->base.is_jmp = DISAS_NORETURN;
2171 return true;
2172 }
2173
2174 /* int #imm */
trans_INT(DisasContext * ctx,arg_INT * a)2175 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2176 {
2177 TCGv vec;
2178
2179 tcg_debug_assert(a->imm < 0x100);
2180 vec = tcg_constant_i32(a->imm);
2181 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2182 gen_helper_rxint(tcg_env, vec);
2183 ctx->base.is_jmp = DISAS_NORETURN;
2184 return true;
2185 }
2186
2187 /* wait */
trans_WAIT(DisasContext * ctx,arg_WAIT * a)2188 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2189 {
2190 if (is_privileged(ctx, 1)) {
2191 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2192 gen_helper_wait(tcg_env);
2193 }
2194 return true;
2195 }
2196
rx_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)2197 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2198 {
2199 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2200 ctx->env = cpu_env(cs);
2201 ctx->tb_flags = ctx->base.tb->flags;
2202 }
2203
rx_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)2204 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2205 {
2206 }
2207
rx_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)2208 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2209 {
2210 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2211
2212 tcg_gen_insn_start(ctx->base.pc_next);
2213 }
2214
rx_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)2215 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2216 {
2217 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2218 uint32_t insn;
2219
2220 ctx->pc = ctx->base.pc_next;
2221 insn = decode_load(ctx);
2222 if (!decode(ctx, insn)) {
2223 gen_helper_raise_illegal_instruction(tcg_env);
2224 }
2225 }
2226
rx_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)2227 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2228 {
2229 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2230
2231 switch (ctx->base.is_jmp) {
2232 case DISAS_NEXT:
2233 case DISAS_TOO_MANY:
2234 gen_goto_tb(ctx, 0, dcbase->pc_next);
2235 break;
2236 case DISAS_JUMP:
2237 tcg_gen_lookup_and_goto_ptr();
2238 break;
2239 case DISAS_UPDATE:
2240 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2241 /* fall through */
2242 case DISAS_EXIT:
2243 tcg_gen_exit_tb(NULL, 0);
2244 break;
2245 case DISAS_NORETURN:
2246 break;
2247 default:
2248 g_assert_not_reached();
2249 }
2250 }
2251
2252 static const TranslatorOps rx_tr_ops = {
2253 .init_disas_context = rx_tr_init_disas_context,
2254 .tb_start = rx_tr_tb_start,
2255 .insn_start = rx_tr_insn_start,
2256 .translate_insn = rx_tr_translate_insn,
2257 .tb_stop = rx_tr_tb_stop,
2258 };
2259
gen_intermediate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)2260 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2261 vaddr pc, void *host_pc)
2262 {
2263 DisasContext dc;
2264
2265 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
2266 }
2267
2268 #define ALLOC_REGISTER(sym, name) \
2269 cpu_##sym = tcg_global_mem_new_i32(tcg_env, \
2270 offsetof(CPURXState, sym), name)
2271
rx_translate_init(void)2272 void rx_translate_init(void)
2273 {
2274 static const char * const regnames[NUM_REGS] = {
2275 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2276 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2277 };
2278 int i;
2279
2280 for (i = 0; i < NUM_REGS; i++) {
2281 cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
2282 offsetof(CPURXState, regs[i]),
2283 regnames[i]);
2284 }
2285 ALLOC_REGISTER(pc, "PC");
2286 ALLOC_REGISTER(psw_o, "PSW(O)");
2287 ALLOC_REGISTER(psw_s, "PSW(S)");
2288 ALLOC_REGISTER(psw_z, "PSW(Z)");
2289 ALLOC_REGISTER(psw_c, "PSW(C)");
2290 ALLOC_REGISTER(psw_u, "PSW(U)");
2291 ALLOC_REGISTER(psw_i, "PSW(I)");
2292 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2293 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2294 ALLOC_REGISTER(usp, "USP");
2295 ALLOC_REGISTER(fpsw, "FPSW");
2296 ALLOC_REGISTER(bpsw, "BPSW");
2297 ALLOC_REGISTER(bpc, "BPC");
2298 ALLOC_REGISTER(isp, "ISP");
2299 ALLOC_REGISTER(fintv, "FINTV");
2300 ALLOC_REGISTER(intb, "INTB");
2301 cpu_acc = tcg_global_mem_new_i64(tcg_env,
2302 offsetof(CPURXState, acc), "ACC");
2303 }
2304