xref: /qemu/target/riscv/translate.c (revision 92eecfff)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     target_ulong priv_ver;
47     bool virt_enabled;
48     uint32_t opcode;
49     uint32_t mstatus_fs;
50     uint32_t misa;
51     uint32_t mem_idx;
52     /* Remember the rounding mode encoded in the previous fp instruction,
53        which we have already installed into env->fp_status.  Or -1 for
54        no previous fp instruction.  Note that we exit the TB when writing
55        to any system register, which includes CSR_FRM, so we do not have
56        to reset this known value.  */
57     int frm;
58     bool ext_ifencei;
59     bool hlsx;
60     /* vector extension */
61     bool vill;
62     uint8_t lmul;
63     uint8_t sew;
64     uint16_t vlen;
65     uint16_t mlen;
66     bool vl_eq_vlmax;
67 } DisasContext;
68 
69 #ifdef TARGET_RISCV64
70 /* convert riscv funct3 to qemu memop for load/store */
71 static const int tcg_memop_lookup[8] = {
72     [0 ... 7] = -1,
73     [0] = MO_SB,
74     [1] = MO_TESW,
75     [2] = MO_TESL,
76     [3] = MO_TEQ,
77     [4] = MO_UB,
78     [5] = MO_TEUW,
79     [6] = MO_TEUL,
80 };
81 #endif
82 
83 #ifdef TARGET_RISCV64
84 #define CASE_OP_32_64(X) case X: case glue(X, W)
85 #else
86 #define CASE_OP_32_64(X) case X
87 #endif
88 
89 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
90 {
91     return ctx->misa & ext;
92 }
93 
94 /*
95  * RISC-V requires NaN-boxing of narrower width floating point values.
96  * This applies when a 32-bit value is assigned to a 64-bit FP register.
97  * For consistency and simplicity, we nanbox results even when the RVD
98  * extension is not present.
99  */
100 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
101 {
102     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
103 }
104 
105 /*
106  * A narrow n-bit operation, where n < FLEN, checks that input operands
107  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
108  * If so, the least-significant bits of the input are used, otherwise the
109  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
110  *
111  * Here, the result is always nan-boxed, even the canonical nan.
112  */
113 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
114 {
115     TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
116     TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
117 
118     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
119     tcg_temp_free_i64(t_max);
120     tcg_temp_free_i64(t_nan);
121 }
122 
123 static void generate_exception(DisasContext *ctx, int excp)
124 {
125     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
126     TCGv_i32 helper_tmp = tcg_const_i32(excp);
127     gen_helper_raise_exception(cpu_env, helper_tmp);
128     tcg_temp_free_i32(helper_tmp);
129     ctx->base.is_jmp = DISAS_NORETURN;
130 }
131 
132 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
133 {
134     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
135     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
136     TCGv_i32 helper_tmp = tcg_const_i32(excp);
137     gen_helper_raise_exception(cpu_env, helper_tmp);
138     tcg_temp_free_i32(helper_tmp);
139     ctx->base.is_jmp = DISAS_NORETURN;
140 }
141 
142 static void gen_exception_debug(void)
143 {
144     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
145     gen_helper_raise_exception(cpu_env, helper_tmp);
146     tcg_temp_free_i32(helper_tmp);
147 }
148 
149 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
150 static void exit_tb(DisasContext *ctx)
151 {
152     if (ctx->base.singlestep_enabled) {
153         gen_exception_debug();
154     } else {
155         tcg_gen_exit_tb(NULL, 0);
156     }
157 }
158 
159 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
160 static void lookup_and_goto_ptr(DisasContext *ctx)
161 {
162     if (ctx->base.singlestep_enabled) {
163         gen_exception_debug();
164     } else {
165         tcg_gen_lookup_and_goto_ptr();
166     }
167 }
168 
169 static void gen_exception_illegal(DisasContext *ctx)
170 {
171     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
172 }
173 
174 static void gen_exception_inst_addr_mis(DisasContext *ctx)
175 {
176     generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
177 }
178 
179 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
180 {
181     if (unlikely(ctx->base.singlestep_enabled)) {
182         return false;
183     }
184 
185 #ifndef CONFIG_USER_ONLY
186     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
187 #else
188     return true;
189 #endif
190 }
191 
192 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
193 {
194     if (use_goto_tb(ctx, dest)) {
195         /* chaining is only allowed when the jump is to the same page */
196         tcg_gen_goto_tb(n);
197         tcg_gen_movi_tl(cpu_pc, dest);
198 
199         /* No need to check for single stepping here as use_goto_tb() will
200          * return false in case of single stepping.
201          */
202         tcg_gen_exit_tb(ctx->base.tb, n);
203     } else {
204         tcg_gen_movi_tl(cpu_pc, dest);
205         lookup_and_goto_ptr(ctx);
206     }
207 }
208 
209 /* Wrapper for getting reg values - need to check of reg is zero since
210  * cpu_gpr[0] is not actually allocated
211  */
212 static inline void gen_get_gpr(TCGv t, int reg_num)
213 {
214     if (reg_num == 0) {
215         tcg_gen_movi_tl(t, 0);
216     } else {
217         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
218     }
219 }
220 
221 /* Wrapper for setting reg values - need to check of reg is zero since
222  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
223  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
224  * $zero
225  */
226 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
227 {
228     if (reg_num_dst != 0) {
229         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
230     }
231 }
232 
233 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
234 {
235     TCGv rl = tcg_temp_new();
236     TCGv rh = tcg_temp_new();
237 
238     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
239     /* fix up for one negative */
240     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
241     tcg_gen_and_tl(rl, rl, arg2);
242     tcg_gen_sub_tl(ret, rh, rl);
243 
244     tcg_temp_free(rl);
245     tcg_temp_free(rh);
246 }
247 
248 static void gen_div(TCGv ret, TCGv source1, TCGv source2)
249 {
250     TCGv cond1, cond2, zeroreg, resultopt1;
251     /*
252      * Handle by altering args to tcg_gen_div to produce req'd results:
253      * For overflow: want source1 in source1 and 1 in source2
254      * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
255      */
256     cond1 = tcg_temp_new();
257     cond2 = tcg_temp_new();
258     zeroreg = tcg_const_tl(0);
259     resultopt1 = tcg_temp_new();
260 
261     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
262     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
263     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
264                         ((target_ulong)1) << (TARGET_LONG_BITS - 1));
265     tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
266     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
267     /* if div by zero, set source1 to -1, otherwise don't change */
268     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
269             resultopt1);
270     /* if overflow or div by zero, set source2 to 1, else don't change */
271     tcg_gen_or_tl(cond1, cond1, cond2);
272     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
273     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
274             resultopt1);
275     tcg_gen_div_tl(ret, source1, source2);
276 
277     tcg_temp_free(cond1);
278     tcg_temp_free(cond2);
279     tcg_temp_free(zeroreg);
280     tcg_temp_free(resultopt1);
281 }
282 
283 static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
284 {
285     TCGv cond1, zeroreg, resultopt1;
286     cond1 = tcg_temp_new();
287 
288     zeroreg = tcg_const_tl(0);
289     resultopt1 = tcg_temp_new();
290 
291     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
292     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
293     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
294             resultopt1);
295     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
296     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
297             resultopt1);
298     tcg_gen_divu_tl(ret, source1, source2);
299 
300     tcg_temp_free(cond1);
301     tcg_temp_free(zeroreg);
302     tcg_temp_free(resultopt1);
303 }
304 
305 static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
306 {
307     TCGv cond1, cond2, zeroreg, resultopt1;
308 
309     cond1 = tcg_temp_new();
310     cond2 = tcg_temp_new();
311     zeroreg = tcg_const_tl(0);
312     resultopt1 = tcg_temp_new();
313 
314     tcg_gen_movi_tl(resultopt1, 1L);
315     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
316     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
317                         (target_ulong)1 << (TARGET_LONG_BITS - 1));
318     tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
319     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
320     /* if overflow or div by zero, set source2 to 1, else don't change */
321     tcg_gen_or_tl(cond2, cond1, cond2);
322     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
323             resultopt1);
324     tcg_gen_rem_tl(resultopt1, source1, source2);
325     /* if div by zero, just return the original dividend */
326     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
327             source1);
328 
329     tcg_temp_free(cond1);
330     tcg_temp_free(cond2);
331     tcg_temp_free(zeroreg);
332     tcg_temp_free(resultopt1);
333 }
334 
335 static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
336 {
337     TCGv cond1, zeroreg, resultopt1;
338     cond1 = tcg_temp_new();
339     zeroreg = tcg_const_tl(0);
340     resultopt1 = tcg_temp_new();
341 
342     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
343     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
344     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
345             resultopt1);
346     tcg_gen_remu_tl(resultopt1, source1, source2);
347     /* if div by zero, just return the original dividend */
348     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
349             source1);
350 
351     tcg_temp_free(cond1);
352     tcg_temp_free(zeroreg);
353     tcg_temp_free(resultopt1);
354 }
355 
356 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
357 {
358     target_ulong next_pc;
359 
360     /* check misaligned: */
361     next_pc = ctx->base.pc_next + imm;
362     if (!has_ext(ctx, RVC)) {
363         if ((next_pc & 0x3) != 0) {
364             gen_exception_inst_addr_mis(ctx);
365             return;
366         }
367     }
368     if (rd != 0) {
369         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
370     }
371 
372     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
373     ctx->base.is_jmp = DISAS_NORETURN;
374 }
375 
376 #ifdef TARGET_RISCV64
377 static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
378         target_long imm)
379 {
380     TCGv t0 = tcg_temp_new();
381     TCGv t1 = tcg_temp_new();
382     gen_get_gpr(t0, rs1);
383     tcg_gen_addi_tl(t0, t0, imm);
384     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
385 
386     if (memop < 0) {
387         gen_exception_illegal(ctx);
388         return;
389     }
390 
391     tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
392     gen_set_gpr(rd, t1);
393     tcg_temp_free(t0);
394     tcg_temp_free(t1);
395 }
396 
397 static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
398         target_long imm)
399 {
400     TCGv t0 = tcg_temp_new();
401     TCGv dat = tcg_temp_new();
402     gen_get_gpr(t0, rs1);
403     tcg_gen_addi_tl(t0, t0, imm);
404     gen_get_gpr(dat, rs2);
405     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
406 
407     if (memop < 0) {
408         gen_exception_illegal(ctx);
409         return;
410     }
411 
412     tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
413     tcg_temp_free(t0);
414     tcg_temp_free(dat);
415 }
416 #endif
417 
418 #ifndef CONFIG_USER_ONLY
419 /* The states of mstatus_fs are:
420  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
421  * We will have already diagnosed disabled state,
422  * and need to turn initial/clean into dirty.
423  */
424 static void mark_fs_dirty(DisasContext *ctx)
425 {
426     TCGv tmp;
427     if (ctx->mstatus_fs == MSTATUS_FS) {
428         return;
429     }
430     /* Remember the state change for the rest of the TB.  */
431     ctx->mstatus_fs = MSTATUS_FS;
432 
433     tmp = tcg_temp_new();
434     tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
435     tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
436     tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
437 
438     if (ctx->virt_enabled) {
439         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
440         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
441         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
442     }
443     tcg_temp_free(tmp);
444 }
445 #else
446 static inline void mark_fs_dirty(DisasContext *ctx) { }
447 #endif
448 
449 #if !defined(TARGET_RISCV64)
450 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
451         int rs1, target_long imm)
452 {
453     TCGv t0;
454 
455     if (ctx->mstatus_fs == 0) {
456         gen_exception_illegal(ctx);
457         return;
458     }
459 
460     t0 = tcg_temp_new();
461     gen_get_gpr(t0, rs1);
462     tcg_gen_addi_tl(t0, t0, imm);
463 
464     switch (opc) {
465     case OPC_RISC_FLW:
466         if (!has_ext(ctx, RVF)) {
467             goto do_illegal;
468         }
469         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
470         /* RISC-V requires NaN-boxing of narrower width floating point values */
471         tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
472         break;
473     case OPC_RISC_FLD:
474         if (!has_ext(ctx, RVD)) {
475             goto do_illegal;
476         }
477         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
478         break;
479     do_illegal:
480     default:
481         gen_exception_illegal(ctx);
482         break;
483     }
484     tcg_temp_free(t0);
485 
486     mark_fs_dirty(ctx);
487 }
488 
489 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
490         int rs2, target_long imm)
491 {
492     TCGv t0;
493 
494     if (ctx->mstatus_fs == 0) {
495         gen_exception_illegal(ctx);
496         return;
497     }
498 
499     t0 = tcg_temp_new();
500     gen_get_gpr(t0, rs1);
501     tcg_gen_addi_tl(t0, t0, imm);
502 
503     switch (opc) {
504     case OPC_RISC_FSW:
505         if (!has_ext(ctx, RVF)) {
506             goto do_illegal;
507         }
508         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
509         break;
510     case OPC_RISC_FSD:
511         if (!has_ext(ctx, RVD)) {
512             goto do_illegal;
513         }
514         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
515         break;
516     do_illegal:
517     default:
518         gen_exception_illegal(ctx);
519         break;
520     }
521 
522     tcg_temp_free(t0);
523 }
524 #endif
525 
526 static void gen_set_rm(DisasContext *ctx, int rm)
527 {
528     TCGv_i32 t0;
529 
530     if (ctx->frm == rm) {
531         return;
532     }
533     ctx->frm = rm;
534     t0 = tcg_const_i32(rm);
535     gen_helper_set_rounding_mode(cpu_env, t0);
536     tcg_temp_free_i32(t0);
537 }
538 
539 static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode)
540 {
541     uint8_t funct3 = extract16(opcode, 13, 3);
542     uint8_t rd_rs2 = GET_C_RS2S(opcode);
543     uint8_t rs1s = GET_C_RS1S(opcode);
544 
545     switch (funct3) {
546     case 3:
547 #if defined(TARGET_RISCV64)
548         /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
549         gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
550                  GET_C_LD_IMM(opcode));
551 #else
552         /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
553         gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
554                     GET_C_LW_IMM(opcode));
555 #endif
556         break;
557     case 7:
558 #if defined(TARGET_RISCV64)
559         /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
560         gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
561                   GET_C_LD_IMM(opcode));
562 #else
563         /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
564         gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
565                      GET_C_LW_IMM(opcode));
566 #endif
567         break;
568     }
569 }
570 
571 static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
572 {
573     uint8_t op = extract16(opcode, 0, 2);
574 
575     switch (op) {
576     case 0:
577         decode_RV32_64C0(ctx, opcode);
578         break;
579     }
580 }
581 
582 static int ex_plus_1(DisasContext *ctx, int nf)
583 {
584     return nf + 1;
585 }
586 
587 #define EX_SH(amount) \
588     static int ex_shift_##amount(DisasContext *ctx, int imm) \
589     {                                         \
590         return imm << amount;                 \
591     }
592 EX_SH(1)
593 EX_SH(2)
594 EX_SH(3)
595 EX_SH(4)
596 EX_SH(12)
597 
598 #define REQUIRE_EXT(ctx, ext) do { \
599     if (!has_ext(ctx, ext)) {      \
600         return false;              \
601     }                              \
602 } while (0)
603 
604 static int ex_rvc_register(DisasContext *ctx, int reg)
605 {
606     return 8 + reg;
607 }
608 
609 static int ex_rvc_shifti(DisasContext *ctx, int imm)
610 {
611     /* For RV128 a shamt of 0 means a shift by 64. */
612     return imm ? imm : 64;
613 }
614 
615 /* Include the auto-generated decoder for 32 bit insn */
616 #include "decode-insn32.c.inc"
617 
618 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
619                              void (*func)(TCGv, TCGv, target_long))
620 {
621     TCGv source1;
622     source1 = tcg_temp_new();
623 
624     gen_get_gpr(source1, a->rs1);
625 
626     (*func)(source1, source1, a->imm);
627 
628     gen_set_gpr(a->rd, source1);
629     tcg_temp_free(source1);
630     return true;
631 }
632 
633 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
634                              void (*func)(TCGv, TCGv, TCGv))
635 {
636     TCGv source1, source2;
637     source1 = tcg_temp_new();
638     source2 = tcg_temp_new();
639 
640     gen_get_gpr(source1, a->rs1);
641     tcg_gen_movi_tl(source2, a->imm);
642 
643     (*func)(source1, source1, source2);
644 
645     gen_set_gpr(a->rd, source1);
646     tcg_temp_free(source1);
647     tcg_temp_free(source2);
648     return true;
649 }
650 
651 #ifdef TARGET_RISCV64
652 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
653 {
654     tcg_gen_add_tl(ret, arg1, arg2);
655     tcg_gen_ext32s_tl(ret, ret);
656 }
657 
658 static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
659 {
660     tcg_gen_sub_tl(ret, arg1, arg2);
661     tcg_gen_ext32s_tl(ret, ret);
662 }
663 
664 static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
665 {
666     tcg_gen_mul_tl(ret, arg1, arg2);
667     tcg_gen_ext32s_tl(ret, ret);
668 }
669 
670 static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
671                             void(*func)(TCGv, TCGv, TCGv))
672 {
673     TCGv source1, source2;
674     source1 = tcg_temp_new();
675     source2 = tcg_temp_new();
676 
677     gen_get_gpr(source1, a->rs1);
678     gen_get_gpr(source2, a->rs2);
679     tcg_gen_ext32s_tl(source1, source1);
680     tcg_gen_ext32s_tl(source2, source2);
681 
682     (*func)(source1, source1, source2);
683 
684     tcg_gen_ext32s_tl(source1, source1);
685     gen_set_gpr(a->rd, source1);
686     tcg_temp_free(source1);
687     tcg_temp_free(source2);
688     return true;
689 }
690 
691 static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
692                             void(*func)(TCGv, TCGv, TCGv))
693 {
694     TCGv source1, source2;
695     source1 = tcg_temp_new();
696     source2 = tcg_temp_new();
697 
698     gen_get_gpr(source1, a->rs1);
699     gen_get_gpr(source2, a->rs2);
700     tcg_gen_ext32u_tl(source1, source1);
701     tcg_gen_ext32u_tl(source2, source2);
702 
703     (*func)(source1, source1, source2);
704 
705     tcg_gen_ext32s_tl(source1, source1);
706     gen_set_gpr(a->rd, source1);
707     tcg_temp_free(source1);
708     tcg_temp_free(source2);
709     return true;
710 }
711 
712 #endif
713 
714 static bool gen_arith(DisasContext *ctx, arg_r *a,
715                       void(*func)(TCGv, TCGv, TCGv))
716 {
717     TCGv source1, source2;
718     source1 = tcg_temp_new();
719     source2 = tcg_temp_new();
720 
721     gen_get_gpr(source1, a->rs1);
722     gen_get_gpr(source2, a->rs2);
723 
724     (*func)(source1, source1, source2);
725 
726     gen_set_gpr(a->rd, source1);
727     tcg_temp_free(source1);
728     tcg_temp_free(source2);
729     return true;
730 }
731 
732 static bool gen_shift(DisasContext *ctx, arg_r *a,
733                         void(*func)(TCGv, TCGv, TCGv))
734 {
735     TCGv source1 = tcg_temp_new();
736     TCGv source2 = tcg_temp_new();
737 
738     gen_get_gpr(source1, a->rs1);
739     gen_get_gpr(source2, a->rs2);
740 
741     tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
742     (*func)(source1, source1, source2);
743 
744     gen_set_gpr(a->rd, source1);
745     tcg_temp_free(source1);
746     tcg_temp_free(source2);
747     return true;
748 }
749 
750 /* Include insn module translation function */
751 #include "insn_trans/trans_rvi.c.inc"
752 #include "insn_trans/trans_rvm.c.inc"
753 #include "insn_trans/trans_rva.c.inc"
754 #include "insn_trans/trans_rvf.c.inc"
755 #include "insn_trans/trans_rvd.c.inc"
756 #include "insn_trans/trans_rvh.c.inc"
757 #include "insn_trans/trans_rvv.c.inc"
758 #include "insn_trans/trans_privileged.c.inc"
759 
760 /* Include the auto-generated decoder for 16 bit insn */
761 #include "decode-insn16.c.inc"
762 
763 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
764 {
765     /* check for compressed insn */
766     if (extract16(opcode, 0, 2) != 3) {
767         if (!has_ext(ctx, RVC)) {
768             gen_exception_illegal(ctx);
769         } else {
770             ctx->pc_succ_insn = ctx->base.pc_next + 2;
771             if (!decode_insn16(ctx, opcode)) {
772                 /* fall back to old decoder */
773                 decode_RV32_64C(ctx, opcode);
774             }
775         }
776     } else {
777         uint32_t opcode32 = opcode;
778         opcode32 = deposit32(opcode32, 16, 16,
779                              translator_lduw(env, ctx->base.pc_next + 2));
780         ctx->pc_succ_insn = ctx->base.pc_next + 4;
781         if (!decode_insn32(ctx, opcode32)) {
782             gen_exception_illegal(ctx);
783         }
784     }
785 }
786 
787 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
788 {
789     DisasContext *ctx = container_of(dcbase, DisasContext, base);
790     CPURISCVState *env = cs->env_ptr;
791     RISCVCPU *cpu = RISCV_CPU(cs);
792     uint32_t tb_flags = ctx->base.tb->flags;
793 
794     ctx->pc_succ_insn = ctx->base.pc_first;
795     ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
796     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
797     ctx->priv_ver = env->priv_ver;
798 #if !defined(CONFIG_USER_ONLY)
799     if (riscv_has_ext(env, RVH)) {
800         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
801     } else {
802         ctx->virt_enabled = false;
803     }
804 #else
805     ctx->virt_enabled = false;
806 #endif
807     ctx->misa = env->misa;
808     ctx->frm = -1;  /* unknown rounding mode */
809     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
810     ctx->vlen = cpu->cfg.vlen;
811     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
812     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
813     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
814     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
815     ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
816     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
817 }
818 
819 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
820 {
821 }
822 
823 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
824 {
825     DisasContext *ctx = container_of(dcbase, DisasContext, base);
826 
827     tcg_gen_insn_start(ctx->base.pc_next);
828 }
829 
830 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
831                                       const CPUBreakpoint *bp)
832 {
833     DisasContext *ctx = container_of(dcbase, DisasContext, base);
834 
835     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
836     ctx->base.is_jmp = DISAS_NORETURN;
837     gen_exception_debug();
838     /* The address covered by the breakpoint must be included in
839        [tb->pc, tb->pc + tb->size) in order to for it to be
840        properly cleared -- thus we increment the PC here so that
841        the logic setting tb->size below does the right thing.  */
842     ctx->base.pc_next += 4;
843     return true;
844 }
845 
846 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
847 {
848     DisasContext *ctx = container_of(dcbase, DisasContext, base);
849     CPURISCVState *env = cpu->env_ptr;
850     uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
851 
852     decode_opc(env, ctx, opcode16);
853     ctx->base.pc_next = ctx->pc_succ_insn;
854 
855     if (ctx->base.is_jmp == DISAS_NEXT) {
856         target_ulong page_start;
857 
858         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
859         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
860             ctx->base.is_jmp = DISAS_TOO_MANY;
861         }
862     }
863 }
864 
865 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
866 {
867     DisasContext *ctx = container_of(dcbase, DisasContext, base);
868 
869     switch (ctx->base.is_jmp) {
870     case DISAS_TOO_MANY:
871         gen_goto_tb(ctx, 0, ctx->base.pc_next);
872         break;
873     case DISAS_NORETURN:
874         break;
875     default:
876         g_assert_not_reached();
877     }
878 }
879 
880 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
881 {
882 #ifndef CONFIG_USER_ONLY
883     RISCVCPU *rvcpu = RISCV_CPU(cpu);
884     CPURISCVState *env = &rvcpu->env;
885 #endif
886 
887     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
888 #ifndef CONFIG_USER_ONLY
889     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
890 #endif
891     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
892 }
893 
894 static const TranslatorOps riscv_tr_ops = {
895     .init_disas_context = riscv_tr_init_disas_context,
896     .tb_start           = riscv_tr_tb_start,
897     .insn_start         = riscv_tr_insn_start,
898     .breakpoint_check   = riscv_tr_breakpoint_check,
899     .translate_insn     = riscv_tr_translate_insn,
900     .tb_stop            = riscv_tr_tb_stop,
901     .disas_log          = riscv_tr_disas_log,
902 };
903 
904 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
905 {
906     DisasContext ctx;
907 
908     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
909 }
910 
911 void riscv_translate_init(void)
912 {
913     int i;
914 
915     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
916     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
917     /* registers, unless you specifically block reads/writes to reg 0 */
918     cpu_gpr[0] = NULL;
919 
920     for (i = 1; i < 32; i++) {
921         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
922             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
923     }
924 
925     for (i = 0; i < 32; i++) {
926         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
927             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
928     }
929 
930     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
931     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
932     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
933                              "load_res");
934     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
935                              "load_val");
936 }
937