xref: /qemu/target/riscv/translate.c (revision 2e8f72ac)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     target_ulong priv_ver;
47     bool virt_enabled;
48     uint32_t opcode;
49     uint32_t mstatus_fs;
50     uint32_t misa;
51     uint32_t mem_idx;
52     /* Remember the rounding mode encoded in the previous fp instruction,
53        which we have already installed into env->fp_status.  Or -1 for
54        no previous fp instruction.  Note that we exit the TB when writing
55        to any system register, which includes CSR_FRM, so we do not have
56        to reset this known value.  */
57     int frm;
58     bool ext_ifencei;
59     bool hlsx;
60     /* vector extension */
61     bool vill;
62     uint8_t lmul;
63     uint8_t sew;
64     uint16_t vlen;
65     uint16_t mlen;
66     bool vl_eq_vlmax;
67     CPUState *cs;
68 } DisasContext;
69 
70 #ifdef TARGET_RISCV64
71 /* convert riscv funct3 to qemu memop for load/store */
72 static const int tcg_memop_lookup[8] = {
73     [0 ... 7] = -1,
74     [0] = MO_SB,
75     [1] = MO_TESW,
76     [2] = MO_TESL,
77     [3] = MO_TEQ,
78     [4] = MO_UB,
79     [5] = MO_TEUW,
80     [6] = MO_TEUL,
81 };
82 #endif
83 
84 #ifdef TARGET_RISCV64
85 #define CASE_OP_32_64(X) case X: case glue(X, W)
86 #else
87 #define CASE_OP_32_64(X) case X
88 #endif
89 
90 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
91 {
92     return ctx->misa & ext;
93 }
94 
95 /*
96  * RISC-V requires NaN-boxing of narrower width floating point values.
97  * This applies when a 32-bit value is assigned to a 64-bit FP register.
98  * For consistency and simplicity, we nanbox results even when the RVD
99  * extension is not present.
100  */
101 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
102 {
103     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
104 }
105 
106 /*
107  * A narrow n-bit operation, where n < FLEN, checks that input operands
108  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
109  * If so, the least-significant bits of the input are used, otherwise the
110  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
111  *
112  * Here, the result is always nan-boxed, even the canonical nan.
113  */
114 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
115 {
116     TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
117     TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
118 
119     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
120     tcg_temp_free_i64(t_max);
121     tcg_temp_free_i64(t_nan);
122 }
123 
124 static void generate_exception(DisasContext *ctx, int excp)
125 {
126     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
127     TCGv_i32 helper_tmp = tcg_const_i32(excp);
128     gen_helper_raise_exception(cpu_env, helper_tmp);
129     tcg_temp_free_i32(helper_tmp);
130     ctx->base.is_jmp = DISAS_NORETURN;
131 }
132 
133 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
134 {
135     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
136     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
137     TCGv_i32 helper_tmp = tcg_const_i32(excp);
138     gen_helper_raise_exception(cpu_env, helper_tmp);
139     tcg_temp_free_i32(helper_tmp);
140     ctx->base.is_jmp = DISAS_NORETURN;
141 }
142 
143 static void gen_exception_debug(void)
144 {
145     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
146     gen_helper_raise_exception(cpu_env, helper_tmp);
147     tcg_temp_free_i32(helper_tmp);
148 }
149 
150 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
151 static void exit_tb(DisasContext *ctx)
152 {
153     if (ctx->base.singlestep_enabled) {
154         gen_exception_debug();
155     } else {
156         tcg_gen_exit_tb(NULL, 0);
157     }
158 }
159 
160 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
161 static void lookup_and_goto_ptr(DisasContext *ctx)
162 {
163     if (ctx->base.singlestep_enabled) {
164         gen_exception_debug();
165     } else {
166         tcg_gen_lookup_and_goto_ptr();
167     }
168 }
169 
170 static void gen_exception_illegal(DisasContext *ctx)
171 {
172     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
173 }
174 
175 static void gen_exception_inst_addr_mis(DisasContext *ctx)
176 {
177     generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
178 }
179 
180 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
181 {
182     if (unlikely(ctx->base.singlestep_enabled)) {
183         return false;
184     }
185 
186 #ifndef CONFIG_USER_ONLY
187     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
188 #else
189     return true;
190 #endif
191 }
192 
193 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
194 {
195     if (use_goto_tb(ctx, dest)) {
196         /* chaining is only allowed when the jump is to the same page */
197         tcg_gen_goto_tb(n);
198         tcg_gen_movi_tl(cpu_pc, dest);
199 
200         /* No need to check for single stepping here as use_goto_tb() will
201          * return false in case of single stepping.
202          */
203         tcg_gen_exit_tb(ctx->base.tb, n);
204     } else {
205         tcg_gen_movi_tl(cpu_pc, dest);
206         lookup_and_goto_ptr(ctx);
207     }
208 }
209 
210 /* Wrapper for getting reg values - need to check of reg is zero since
211  * cpu_gpr[0] is not actually allocated
212  */
213 static inline void gen_get_gpr(TCGv t, int reg_num)
214 {
215     if (reg_num == 0) {
216         tcg_gen_movi_tl(t, 0);
217     } else {
218         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
219     }
220 }
221 
222 /* Wrapper for setting reg values - need to check of reg is zero since
223  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
224  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
225  * $zero
226  */
227 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
228 {
229     if (reg_num_dst != 0) {
230         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
231     }
232 }
233 
234 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
235 {
236     TCGv rl = tcg_temp_new();
237     TCGv rh = tcg_temp_new();
238 
239     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
240     /* fix up for one negative */
241     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
242     tcg_gen_and_tl(rl, rl, arg2);
243     tcg_gen_sub_tl(ret, rh, rl);
244 
245     tcg_temp_free(rl);
246     tcg_temp_free(rh);
247 }
248 
249 static void gen_div(TCGv ret, TCGv source1, TCGv source2)
250 {
251     TCGv cond1, cond2, zeroreg, resultopt1;
252     /*
253      * Handle by altering args to tcg_gen_div to produce req'd results:
254      * For overflow: want source1 in source1 and 1 in source2
255      * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
256      */
257     cond1 = tcg_temp_new();
258     cond2 = tcg_temp_new();
259     zeroreg = tcg_const_tl(0);
260     resultopt1 = tcg_temp_new();
261 
262     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
263     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
264     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
265                         ((target_ulong)1) << (TARGET_LONG_BITS - 1));
266     tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
267     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
268     /* if div by zero, set source1 to -1, otherwise don't change */
269     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
270             resultopt1);
271     /* if overflow or div by zero, set source2 to 1, else don't change */
272     tcg_gen_or_tl(cond1, cond1, cond2);
273     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
274     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
275             resultopt1);
276     tcg_gen_div_tl(ret, source1, source2);
277 
278     tcg_temp_free(cond1);
279     tcg_temp_free(cond2);
280     tcg_temp_free(zeroreg);
281     tcg_temp_free(resultopt1);
282 }
283 
284 static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
285 {
286     TCGv cond1, zeroreg, resultopt1;
287     cond1 = tcg_temp_new();
288 
289     zeroreg = tcg_const_tl(0);
290     resultopt1 = tcg_temp_new();
291 
292     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
293     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
294     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
295             resultopt1);
296     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
297     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
298             resultopt1);
299     tcg_gen_divu_tl(ret, source1, source2);
300 
301     tcg_temp_free(cond1);
302     tcg_temp_free(zeroreg);
303     tcg_temp_free(resultopt1);
304 }
305 
306 static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
307 {
308     TCGv cond1, cond2, zeroreg, resultopt1;
309 
310     cond1 = tcg_temp_new();
311     cond2 = tcg_temp_new();
312     zeroreg = tcg_const_tl(0);
313     resultopt1 = tcg_temp_new();
314 
315     tcg_gen_movi_tl(resultopt1, 1L);
316     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
317     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
318                         (target_ulong)1 << (TARGET_LONG_BITS - 1));
319     tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
320     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
321     /* if overflow or div by zero, set source2 to 1, else don't change */
322     tcg_gen_or_tl(cond2, cond1, cond2);
323     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
324             resultopt1);
325     tcg_gen_rem_tl(resultopt1, source1, source2);
326     /* if div by zero, just return the original dividend */
327     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
328             source1);
329 
330     tcg_temp_free(cond1);
331     tcg_temp_free(cond2);
332     tcg_temp_free(zeroreg);
333     tcg_temp_free(resultopt1);
334 }
335 
336 static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
337 {
338     TCGv cond1, zeroreg, resultopt1;
339     cond1 = tcg_temp_new();
340     zeroreg = tcg_const_tl(0);
341     resultopt1 = tcg_temp_new();
342 
343     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
344     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
345     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
346             resultopt1);
347     tcg_gen_remu_tl(resultopt1, source1, source2);
348     /* if div by zero, just return the original dividend */
349     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
350             source1);
351 
352     tcg_temp_free(cond1);
353     tcg_temp_free(zeroreg);
354     tcg_temp_free(resultopt1);
355 }
356 
357 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
358 {
359     target_ulong next_pc;
360 
361     /* check misaligned: */
362     next_pc = ctx->base.pc_next + imm;
363     if (!has_ext(ctx, RVC)) {
364         if ((next_pc & 0x3) != 0) {
365             gen_exception_inst_addr_mis(ctx);
366             return;
367         }
368     }
369     if (rd != 0) {
370         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
371     }
372 
373     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
374     ctx->base.is_jmp = DISAS_NORETURN;
375 }
376 
377 #ifdef TARGET_RISCV64
378 static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
379         target_long imm)
380 {
381     TCGv t0 = tcg_temp_new();
382     TCGv t1 = tcg_temp_new();
383     gen_get_gpr(t0, rs1);
384     tcg_gen_addi_tl(t0, t0, imm);
385     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
386 
387     if (memop < 0) {
388         gen_exception_illegal(ctx);
389         return;
390     }
391 
392     tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
393     gen_set_gpr(rd, t1);
394     tcg_temp_free(t0);
395     tcg_temp_free(t1);
396 }
397 
398 static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
399         target_long imm)
400 {
401     TCGv t0 = tcg_temp_new();
402     TCGv dat = tcg_temp_new();
403     gen_get_gpr(t0, rs1);
404     tcg_gen_addi_tl(t0, t0, imm);
405     gen_get_gpr(dat, rs2);
406     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
407 
408     if (memop < 0) {
409         gen_exception_illegal(ctx);
410         return;
411     }
412 
413     tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
414     tcg_temp_free(t0);
415     tcg_temp_free(dat);
416 }
417 #endif
418 
419 #ifndef CONFIG_USER_ONLY
420 /* The states of mstatus_fs are:
421  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
422  * We will have already diagnosed disabled state,
423  * and need to turn initial/clean into dirty.
424  */
425 static void mark_fs_dirty(DisasContext *ctx)
426 {
427     TCGv tmp;
428     if (ctx->mstatus_fs == MSTATUS_FS) {
429         return;
430     }
431     /* Remember the state change for the rest of the TB.  */
432     ctx->mstatus_fs = MSTATUS_FS;
433 
434     tmp = tcg_temp_new();
435     tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
436     tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
437     tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
438 
439     if (ctx->virt_enabled) {
440         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
441         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
442         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
443     }
444     tcg_temp_free(tmp);
445 }
446 #else
447 static inline void mark_fs_dirty(DisasContext *ctx) { }
448 #endif
449 
450 #if !defined(TARGET_RISCV64)
451 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
452         int rs1, target_long imm)
453 {
454     TCGv t0;
455 
456     if (ctx->mstatus_fs == 0) {
457         gen_exception_illegal(ctx);
458         return;
459     }
460 
461     t0 = tcg_temp_new();
462     gen_get_gpr(t0, rs1);
463     tcg_gen_addi_tl(t0, t0, imm);
464 
465     switch (opc) {
466     case OPC_RISC_FLW:
467         if (!has_ext(ctx, RVF)) {
468             goto do_illegal;
469         }
470         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
471         /* RISC-V requires NaN-boxing of narrower width floating point values */
472         tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
473         break;
474     case OPC_RISC_FLD:
475         if (!has_ext(ctx, RVD)) {
476             goto do_illegal;
477         }
478         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
479         break;
480     do_illegal:
481     default:
482         gen_exception_illegal(ctx);
483         break;
484     }
485     tcg_temp_free(t0);
486 
487     mark_fs_dirty(ctx);
488 }
489 
490 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
491         int rs2, target_long imm)
492 {
493     TCGv t0;
494 
495     if (ctx->mstatus_fs == 0) {
496         gen_exception_illegal(ctx);
497         return;
498     }
499 
500     t0 = tcg_temp_new();
501     gen_get_gpr(t0, rs1);
502     tcg_gen_addi_tl(t0, t0, imm);
503 
504     switch (opc) {
505     case OPC_RISC_FSW:
506         if (!has_ext(ctx, RVF)) {
507             goto do_illegal;
508         }
509         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
510         break;
511     case OPC_RISC_FSD:
512         if (!has_ext(ctx, RVD)) {
513             goto do_illegal;
514         }
515         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
516         break;
517     do_illegal:
518     default:
519         gen_exception_illegal(ctx);
520         break;
521     }
522 
523     tcg_temp_free(t0);
524 }
525 #endif
526 
527 static void gen_set_rm(DisasContext *ctx, int rm)
528 {
529     TCGv_i32 t0;
530 
531     if (ctx->frm == rm) {
532         return;
533     }
534     ctx->frm = rm;
535     t0 = tcg_const_i32(rm);
536     gen_helper_set_rounding_mode(cpu_env, t0);
537     tcg_temp_free_i32(t0);
538 }
539 
540 static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode)
541 {
542     uint8_t funct3 = extract16(opcode, 13, 3);
543     uint8_t rd_rs2 = GET_C_RS2S(opcode);
544     uint8_t rs1s = GET_C_RS1S(opcode);
545 
546     switch (funct3) {
547     case 3:
548 #if defined(TARGET_RISCV64)
549         /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
550         gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
551                  GET_C_LD_IMM(opcode));
552 #else
553         /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
554         gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
555                     GET_C_LW_IMM(opcode));
556 #endif
557         break;
558     case 7:
559 #if defined(TARGET_RISCV64)
560         /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
561         gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
562                   GET_C_LD_IMM(opcode));
563 #else
564         /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
565         gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
566                      GET_C_LW_IMM(opcode));
567 #endif
568         break;
569     }
570 }
571 
572 static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
573 {
574     uint8_t op = extract16(opcode, 0, 2);
575 
576     switch (op) {
577     case 0:
578         decode_RV32_64C0(ctx, opcode);
579         break;
580     }
581 }
582 
583 static int ex_plus_1(DisasContext *ctx, int nf)
584 {
585     return nf + 1;
586 }
587 
588 #define EX_SH(amount) \
589     static int ex_shift_##amount(DisasContext *ctx, int imm) \
590     {                                         \
591         return imm << amount;                 \
592     }
593 EX_SH(1)
594 EX_SH(2)
595 EX_SH(3)
596 EX_SH(4)
597 EX_SH(12)
598 
599 #define REQUIRE_EXT(ctx, ext) do { \
600     if (!has_ext(ctx, ext)) {      \
601         return false;              \
602     }                              \
603 } while (0)
604 
605 static int ex_rvc_register(DisasContext *ctx, int reg)
606 {
607     return 8 + reg;
608 }
609 
610 static int ex_rvc_shifti(DisasContext *ctx, int imm)
611 {
612     /* For RV128 a shamt of 0 means a shift by 64. */
613     return imm ? imm : 64;
614 }
615 
616 /* Include the auto-generated decoder for 32 bit insn */
617 #include "decode-insn32.c.inc"
618 
619 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
620                              void (*func)(TCGv, TCGv, target_long))
621 {
622     TCGv source1;
623     source1 = tcg_temp_new();
624 
625     gen_get_gpr(source1, a->rs1);
626 
627     (*func)(source1, source1, a->imm);
628 
629     gen_set_gpr(a->rd, source1);
630     tcg_temp_free(source1);
631     return true;
632 }
633 
634 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
635                              void (*func)(TCGv, TCGv, TCGv))
636 {
637     TCGv source1, source2;
638     source1 = tcg_temp_new();
639     source2 = tcg_temp_new();
640 
641     gen_get_gpr(source1, a->rs1);
642     tcg_gen_movi_tl(source2, a->imm);
643 
644     (*func)(source1, source1, source2);
645 
646     gen_set_gpr(a->rd, source1);
647     tcg_temp_free(source1);
648     tcg_temp_free(source2);
649     return true;
650 }
651 
652 #ifdef TARGET_RISCV64
653 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
654 {
655     tcg_gen_add_tl(ret, arg1, arg2);
656     tcg_gen_ext32s_tl(ret, ret);
657 }
658 
659 static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
660 {
661     tcg_gen_sub_tl(ret, arg1, arg2);
662     tcg_gen_ext32s_tl(ret, ret);
663 }
664 
665 static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
666 {
667     tcg_gen_mul_tl(ret, arg1, arg2);
668     tcg_gen_ext32s_tl(ret, ret);
669 }
670 
671 static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
672                             void(*func)(TCGv, TCGv, TCGv))
673 {
674     TCGv source1, source2;
675     source1 = tcg_temp_new();
676     source2 = tcg_temp_new();
677 
678     gen_get_gpr(source1, a->rs1);
679     gen_get_gpr(source2, a->rs2);
680     tcg_gen_ext32s_tl(source1, source1);
681     tcg_gen_ext32s_tl(source2, source2);
682 
683     (*func)(source1, source1, source2);
684 
685     tcg_gen_ext32s_tl(source1, source1);
686     gen_set_gpr(a->rd, source1);
687     tcg_temp_free(source1);
688     tcg_temp_free(source2);
689     return true;
690 }
691 
692 static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
693                             void(*func)(TCGv, TCGv, TCGv))
694 {
695     TCGv source1, source2;
696     source1 = tcg_temp_new();
697     source2 = tcg_temp_new();
698 
699     gen_get_gpr(source1, a->rs1);
700     gen_get_gpr(source2, a->rs2);
701     tcg_gen_ext32u_tl(source1, source1);
702     tcg_gen_ext32u_tl(source2, source2);
703 
704     (*func)(source1, source1, source2);
705 
706     tcg_gen_ext32s_tl(source1, source1);
707     gen_set_gpr(a->rd, source1);
708     tcg_temp_free(source1);
709     tcg_temp_free(source2);
710     return true;
711 }
712 
713 #endif
714 
715 static bool gen_arith(DisasContext *ctx, arg_r *a,
716                       void(*func)(TCGv, TCGv, TCGv))
717 {
718     TCGv source1, source2;
719     source1 = tcg_temp_new();
720     source2 = tcg_temp_new();
721 
722     gen_get_gpr(source1, a->rs1);
723     gen_get_gpr(source2, a->rs2);
724 
725     (*func)(source1, source1, source2);
726 
727     gen_set_gpr(a->rd, source1);
728     tcg_temp_free(source1);
729     tcg_temp_free(source2);
730     return true;
731 }
732 
733 static bool gen_shift(DisasContext *ctx, arg_r *a,
734                         void(*func)(TCGv, TCGv, TCGv))
735 {
736     TCGv source1 = tcg_temp_new();
737     TCGv source2 = tcg_temp_new();
738 
739     gen_get_gpr(source1, a->rs1);
740     gen_get_gpr(source2, a->rs2);
741 
742     tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
743     (*func)(source1, source1, source2);
744 
745     gen_set_gpr(a->rd, source1);
746     tcg_temp_free(source1);
747     tcg_temp_free(source2);
748     return true;
749 }
750 
751 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
752 {
753     DisasContext *ctx = container_of(dcbase, DisasContext, base);
754     CPUState *cpu = ctx->cs;
755     CPURISCVState *env = cpu->env_ptr;
756 
757     return cpu_ldl_code(env, pc);
758 }
759 
760 /* Include insn module translation function */
761 #include "insn_trans/trans_rvi.c.inc"
762 #include "insn_trans/trans_rvm.c.inc"
763 #include "insn_trans/trans_rva.c.inc"
764 #include "insn_trans/trans_rvf.c.inc"
765 #include "insn_trans/trans_rvd.c.inc"
766 #include "insn_trans/trans_rvh.c.inc"
767 #include "insn_trans/trans_rvv.c.inc"
768 #include "insn_trans/trans_privileged.c.inc"
769 
770 /* Include the auto-generated decoder for 16 bit insn */
771 #include "decode-insn16.c.inc"
772 
773 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
774 {
775     /* check for compressed insn */
776     if (extract16(opcode, 0, 2) != 3) {
777         if (!has_ext(ctx, RVC)) {
778             gen_exception_illegal(ctx);
779         } else {
780             ctx->pc_succ_insn = ctx->base.pc_next + 2;
781             if (!decode_insn16(ctx, opcode)) {
782                 /* fall back to old decoder */
783                 decode_RV32_64C(ctx, opcode);
784             }
785         }
786     } else {
787         uint32_t opcode32 = opcode;
788         opcode32 = deposit32(opcode32, 16, 16,
789                              translator_lduw(env, ctx->base.pc_next + 2));
790         ctx->pc_succ_insn = ctx->base.pc_next + 4;
791         if (!decode_insn32(ctx, opcode32)) {
792             gen_exception_illegal(ctx);
793         }
794     }
795 }
796 
797 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
798 {
799     DisasContext *ctx = container_of(dcbase, DisasContext, base);
800     CPURISCVState *env = cs->env_ptr;
801     RISCVCPU *cpu = RISCV_CPU(cs);
802     uint32_t tb_flags = ctx->base.tb->flags;
803 
804     ctx->pc_succ_insn = ctx->base.pc_first;
805     ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
806     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
807     ctx->priv_ver = env->priv_ver;
808 #if !defined(CONFIG_USER_ONLY)
809     if (riscv_has_ext(env, RVH)) {
810         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
811     } else {
812         ctx->virt_enabled = false;
813     }
814 #else
815     ctx->virt_enabled = false;
816 #endif
817     ctx->misa = env->misa;
818     ctx->frm = -1;  /* unknown rounding mode */
819     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
820     ctx->vlen = cpu->cfg.vlen;
821     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
822     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
823     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
824     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
825     ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
826     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
827     ctx->cs = cs;
828 }
829 
830 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
831 {
832 }
833 
834 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
835 {
836     DisasContext *ctx = container_of(dcbase, DisasContext, base);
837 
838     tcg_gen_insn_start(ctx->base.pc_next);
839 }
840 
841 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
842                                       const CPUBreakpoint *bp)
843 {
844     DisasContext *ctx = container_of(dcbase, DisasContext, base);
845 
846     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
847     ctx->base.is_jmp = DISAS_NORETURN;
848     gen_exception_debug();
849     /* The address covered by the breakpoint must be included in
850        [tb->pc, tb->pc + tb->size) in order to for it to be
851        properly cleared -- thus we increment the PC here so that
852        the logic setting tb->size below does the right thing.  */
853     ctx->base.pc_next += 4;
854     return true;
855 }
856 
857 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
858 {
859     DisasContext *ctx = container_of(dcbase, DisasContext, base);
860     CPURISCVState *env = cpu->env_ptr;
861     uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
862 
863     decode_opc(env, ctx, opcode16);
864     ctx->base.pc_next = ctx->pc_succ_insn;
865 
866     if (ctx->base.is_jmp == DISAS_NEXT) {
867         target_ulong page_start;
868 
869         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
870         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
871             ctx->base.is_jmp = DISAS_TOO_MANY;
872         }
873     }
874 }
875 
876 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
877 {
878     DisasContext *ctx = container_of(dcbase, DisasContext, base);
879 
880     switch (ctx->base.is_jmp) {
881     case DISAS_TOO_MANY:
882         gen_goto_tb(ctx, 0, ctx->base.pc_next);
883         break;
884     case DISAS_NORETURN:
885         break;
886     default:
887         g_assert_not_reached();
888     }
889 }
890 
891 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
892 {
893 #ifndef CONFIG_USER_ONLY
894     RISCVCPU *rvcpu = RISCV_CPU(cpu);
895     CPURISCVState *env = &rvcpu->env;
896 #endif
897 
898     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
899 #ifndef CONFIG_USER_ONLY
900     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
901 #endif
902     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
903 }
904 
905 static const TranslatorOps riscv_tr_ops = {
906     .init_disas_context = riscv_tr_init_disas_context,
907     .tb_start           = riscv_tr_tb_start,
908     .insn_start         = riscv_tr_insn_start,
909     .breakpoint_check   = riscv_tr_breakpoint_check,
910     .translate_insn     = riscv_tr_translate_insn,
911     .tb_stop            = riscv_tr_tb_stop,
912     .disas_log          = riscv_tr_disas_log,
913 };
914 
915 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
916 {
917     DisasContext ctx;
918 
919     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
920 }
921 
922 void riscv_translate_init(void)
923 {
924     int i;
925 
926     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
927     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
928     /* registers, unless you specifically block reads/writes to reg 0 */
929     cpu_gpr[0] = NULL;
930 
931     for (i = 1; i < 32; i++) {
932         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
933             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
934     }
935 
936     for (i = 0; i < 32; i++) {
937         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
938             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
939     }
940 
941     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
942     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
943     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
944                              "load_res");
945     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
946                              "load_val");
947 }
948