xref: /qemu/target/riscv/translate.c (revision c7bb41b4)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     target_ulong priv_ver;
47     bool virt_enabled;
48     uint32_t opcode;
49     uint32_t mstatus_fs;
50     uint32_t misa;
51     uint32_t mem_idx;
52     /* Remember the rounding mode encoded in the previous fp instruction,
53        which we have already installed into env->fp_status.  Or -1 for
54        no previous fp instruction.  Note that we exit the TB when writing
55        to any system register, which includes CSR_FRM, so we do not have
56        to reset this known value.  */
57     int frm;
58     bool ext_ifencei;
59     bool hlsx;
60     /* vector extension */
61     bool vill;
62     uint8_t lmul;
63     uint8_t sew;
64     uint16_t vlen;
65     uint16_t mlen;
66     bool vl_eq_vlmax;
67     CPUState *cs;
68 } DisasContext;
69 
70 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
71 {
72     return ctx->misa & ext;
73 }
74 
75 #ifdef TARGET_RISCV32
76 # define is_32bit(ctx)  true
77 #elif defined(CONFIG_USER_ONLY)
78 # define is_32bit(ctx)  false
79 #else
80 static inline bool is_32bit(DisasContext *ctx)
81 {
82     return (ctx->misa & RV32) == RV32;
83 }
84 #endif
85 
86 /*
87  * RISC-V requires NaN-boxing of narrower width floating point values.
88  * This applies when a 32-bit value is assigned to a 64-bit FP register.
89  * For consistency and simplicity, we nanbox results even when the RVD
90  * extension is not present.
91  */
92 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
93 {
94     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
95 }
96 
97 /*
98  * A narrow n-bit operation, where n < FLEN, checks that input operands
99  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
100  * If so, the least-significant bits of the input are used, otherwise the
101  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
102  *
103  * Here, the result is always nan-boxed, even the canonical nan.
104  */
105 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
106 {
107     TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
108     TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
109 
110     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
111     tcg_temp_free_i64(t_max);
112     tcg_temp_free_i64(t_nan);
113 }
114 
115 static void generate_exception(DisasContext *ctx, int excp)
116 {
117     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
118     TCGv_i32 helper_tmp = tcg_const_i32(excp);
119     gen_helper_raise_exception(cpu_env, helper_tmp);
120     tcg_temp_free_i32(helper_tmp);
121     ctx->base.is_jmp = DISAS_NORETURN;
122 }
123 
124 static void generate_exception_mtval(DisasContext *ctx, int excp)
125 {
126     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
127     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
128     TCGv_i32 helper_tmp = tcg_const_i32(excp);
129     gen_helper_raise_exception(cpu_env, helper_tmp);
130     tcg_temp_free_i32(helper_tmp);
131     ctx->base.is_jmp = DISAS_NORETURN;
132 }
133 
134 static void gen_exception_debug(void)
135 {
136     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
137     gen_helper_raise_exception(cpu_env, helper_tmp);
138     tcg_temp_free_i32(helper_tmp);
139 }
140 
141 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
142 static void exit_tb(DisasContext *ctx)
143 {
144     if (ctx->base.singlestep_enabled) {
145         gen_exception_debug();
146     } else {
147         tcg_gen_exit_tb(NULL, 0);
148     }
149 }
150 
151 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
152 static void lookup_and_goto_ptr(DisasContext *ctx)
153 {
154     if (ctx->base.singlestep_enabled) {
155         gen_exception_debug();
156     } else {
157         tcg_gen_lookup_and_goto_ptr();
158     }
159 }
160 
161 static void gen_exception_illegal(DisasContext *ctx)
162 {
163     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
164 }
165 
166 static void gen_exception_inst_addr_mis(DisasContext *ctx)
167 {
168     generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
169 }
170 
171 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
172 {
173     if (unlikely(ctx->base.singlestep_enabled)) {
174         return false;
175     }
176 
177 #ifndef CONFIG_USER_ONLY
178     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
179 #else
180     return true;
181 #endif
182 }
183 
184 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
185 {
186     if (use_goto_tb(ctx, dest)) {
187         /* chaining is only allowed when the jump is to the same page */
188         tcg_gen_goto_tb(n);
189         tcg_gen_movi_tl(cpu_pc, dest);
190 
191         /* No need to check for single stepping here as use_goto_tb() will
192          * return false in case of single stepping.
193          */
194         tcg_gen_exit_tb(ctx->base.tb, n);
195     } else {
196         tcg_gen_movi_tl(cpu_pc, dest);
197         lookup_and_goto_ptr(ctx);
198     }
199 }
200 
201 /* Wrapper for getting reg values - need to check of reg is zero since
202  * cpu_gpr[0] is not actually allocated
203  */
204 static inline void gen_get_gpr(TCGv t, int reg_num)
205 {
206     if (reg_num == 0) {
207         tcg_gen_movi_tl(t, 0);
208     } else {
209         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
210     }
211 }
212 
213 /* Wrapper for setting reg values - need to check of reg is zero since
214  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
215  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
216  * $zero
217  */
218 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
219 {
220     if (reg_num_dst != 0) {
221         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
222     }
223 }
224 
225 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
226 {
227     TCGv rl = tcg_temp_new();
228     TCGv rh = tcg_temp_new();
229 
230     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
231     /* fix up for one negative */
232     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
233     tcg_gen_and_tl(rl, rl, arg2);
234     tcg_gen_sub_tl(ret, rh, rl);
235 
236     tcg_temp_free(rl);
237     tcg_temp_free(rh);
238 }
239 
240 static void gen_div(TCGv ret, TCGv source1, TCGv source2)
241 {
242     TCGv cond1, cond2, zeroreg, resultopt1;
243     /*
244      * Handle by altering args to tcg_gen_div to produce req'd results:
245      * For overflow: want source1 in source1 and 1 in source2
246      * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
247      */
248     cond1 = tcg_temp_new();
249     cond2 = tcg_temp_new();
250     zeroreg = tcg_const_tl(0);
251     resultopt1 = tcg_temp_new();
252 
253     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
254     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
255     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
256                         ((target_ulong)1) << (TARGET_LONG_BITS - 1));
257     tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
258     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
259     /* if div by zero, set source1 to -1, otherwise don't change */
260     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
261             resultopt1);
262     /* if overflow or div by zero, set source2 to 1, else don't change */
263     tcg_gen_or_tl(cond1, cond1, cond2);
264     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
265     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
266             resultopt1);
267     tcg_gen_div_tl(ret, source1, source2);
268 
269     tcg_temp_free(cond1);
270     tcg_temp_free(cond2);
271     tcg_temp_free(zeroreg);
272     tcg_temp_free(resultopt1);
273 }
274 
275 static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
276 {
277     TCGv cond1, zeroreg, resultopt1;
278     cond1 = tcg_temp_new();
279 
280     zeroreg = tcg_const_tl(0);
281     resultopt1 = tcg_temp_new();
282 
283     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
284     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
285     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
286             resultopt1);
287     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
288     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
289             resultopt1);
290     tcg_gen_divu_tl(ret, source1, source2);
291 
292     tcg_temp_free(cond1);
293     tcg_temp_free(zeroreg);
294     tcg_temp_free(resultopt1);
295 }
296 
297 static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
298 {
299     TCGv cond1, cond2, zeroreg, resultopt1;
300 
301     cond1 = tcg_temp_new();
302     cond2 = tcg_temp_new();
303     zeroreg = tcg_const_tl(0);
304     resultopt1 = tcg_temp_new();
305 
306     tcg_gen_movi_tl(resultopt1, 1L);
307     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
308     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
309                         (target_ulong)1 << (TARGET_LONG_BITS - 1));
310     tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
311     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
312     /* if overflow or div by zero, set source2 to 1, else don't change */
313     tcg_gen_or_tl(cond2, cond1, cond2);
314     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
315             resultopt1);
316     tcg_gen_rem_tl(resultopt1, source1, source2);
317     /* if div by zero, just return the original dividend */
318     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
319             source1);
320 
321     tcg_temp_free(cond1);
322     tcg_temp_free(cond2);
323     tcg_temp_free(zeroreg);
324     tcg_temp_free(resultopt1);
325 }
326 
327 static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
328 {
329     TCGv cond1, zeroreg, resultopt1;
330     cond1 = tcg_temp_new();
331     zeroreg = tcg_const_tl(0);
332     resultopt1 = tcg_temp_new();
333 
334     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
335     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
336     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
337             resultopt1);
338     tcg_gen_remu_tl(resultopt1, source1, source2);
339     /* if div by zero, just return the original dividend */
340     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
341             source1);
342 
343     tcg_temp_free(cond1);
344     tcg_temp_free(zeroreg);
345     tcg_temp_free(resultopt1);
346 }
347 
348 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
349 {
350     target_ulong next_pc;
351 
352     /* check misaligned: */
353     next_pc = ctx->base.pc_next + imm;
354     if (!has_ext(ctx, RVC)) {
355         if ((next_pc & 0x3) != 0) {
356             gen_exception_inst_addr_mis(ctx);
357             return;
358         }
359     }
360     if (rd != 0) {
361         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
362     }
363 
364     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
365     ctx->base.is_jmp = DISAS_NORETURN;
366 }
367 
368 #ifndef CONFIG_USER_ONLY
369 /* The states of mstatus_fs are:
370  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
371  * We will have already diagnosed disabled state,
372  * and need to turn initial/clean into dirty.
373  */
374 static void mark_fs_dirty(DisasContext *ctx)
375 {
376     TCGv tmp;
377     target_ulong sd;
378 
379     if (ctx->mstatus_fs == MSTATUS_FS) {
380         return;
381     }
382     /* Remember the state change for the rest of the TB.  */
383     ctx->mstatus_fs = MSTATUS_FS;
384 
385     tmp = tcg_temp_new();
386     sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD;
387 
388     tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
389     tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
390     tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
391 
392     if (ctx->virt_enabled) {
393         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
394         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
395         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
396     }
397     tcg_temp_free(tmp);
398 }
399 #else
400 static inline void mark_fs_dirty(DisasContext *ctx) { }
401 #endif
402 
403 static void gen_set_rm(DisasContext *ctx, int rm)
404 {
405     TCGv_i32 t0;
406 
407     if (ctx->frm == rm) {
408         return;
409     }
410     ctx->frm = rm;
411     t0 = tcg_const_i32(rm);
412     gen_helper_set_rounding_mode(cpu_env, t0);
413     tcg_temp_free_i32(t0);
414 }
415 
416 static int ex_plus_1(DisasContext *ctx, int nf)
417 {
418     return nf + 1;
419 }
420 
421 #define EX_SH(amount) \
422     static int ex_shift_##amount(DisasContext *ctx, int imm) \
423     {                                         \
424         return imm << amount;                 \
425     }
426 EX_SH(1)
427 EX_SH(2)
428 EX_SH(3)
429 EX_SH(4)
430 EX_SH(12)
431 
432 #define REQUIRE_EXT(ctx, ext) do { \
433     if (!has_ext(ctx, ext)) {      \
434         return false;              \
435     }                              \
436 } while (0)
437 
438 #define REQUIRE_64BIT(ctx) do { \
439     if (is_32bit(ctx)) {        \
440         return false;           \
441     }                           \
442 } while (0)
443 
444 static int ex_rvc_register(DisasContext *ctx, int reg)
445 {
446     return 8 + reg;
447 }
448 
449 static int ex_rvc_shifti(DisasContext *ctx, int imm)
450 {
451     /* For RV128 a shamt of 0 means a shift by 64. */
452     return imm ? imm : 64;
453 }
454 
455 /* Include the auto-generated decoder for 32 bit insn */
456 #include "decode-insn32.c.inc"
457 
458 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
459                              void (*func)(TCGv, TCGv, target_long))
460 {
461     TCGv source1;
462     source1 = tcg_temp_new();
463 
464     gen_get_gpr(source1, a->rs1);
465 
466     (*func)(source1, source1, a->imm);
467 
468     gen_set_gpr(a->rd, source1);
469     tcg_temp_free(source1);
470     return true;
471 }
472 
473 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
474                              void (*func)(TCGv, TCGv, TCGv))
475 {
476     TCGv source1, source2;
477     source1 = tcg_temp_new();
478     source2 = tcg_temp_new();
479 
480     gen_get_gpr(source1, a->rs1);
481     tcg_gen_movi_tl(source2, a->imm);
482 
483     (*func)(source1, source1, source2);
484 
485     gen_set_gpr(a->rd, source1);
486     tcg_temp_free(source1);
487     tcg_temp_free(source2);
488     return true;
489 }
490 
491 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
492 {
493     tcg_gen_add_tl(ret, arg1, arg2);
494     tcg_gen_ext32s_tl(ret, ret);
495 }
496 
497 static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
498 {
499     tcg_gen_sub_tl(ret, arg1, arg2);
500     tcg_gen_ext32s_tl(ret, ret);
501 }
502 
503 static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
504 {
505     tcg_gen_mul_tl(ret, arg1, arg2);
506     tcg_gen_ext32s_tl(ret, ret);
507 }
508 
509 static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
510                             void(*func)(TCGv, TCGv, TCGv))
511 {
512     TCGv source1, source2;
513     source1 = tcg_temp_new();
514     source2 = tcg_temp_new();
515 
516     gen_get_gpr(source1, a->rs1);
517     gen_get_gpr(source2, a->rs2);
518     tcg_gen_ext32s_tl(source1, source1);
519     tcg_gen_ext32s_tl(source2, source2);
520 
521     (*func)(source1, source1, source2);
522 
523     tcg_gen_ext32s_tl(source1, source1);
524     gen_set_gpr(a->rd, source1);
525     tcg_temp_free(source1);
526     tcg_temp_free(source2);
527     return true;
528 }
529 
530 static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
531                             void(*func)(TCGv, TCGv, TCGv))
532 {
533     TCGv source1, source2;
534     source1 = tcg_temp_new();
535     source2 = tcg_temp_new();
536 
537     gen_get_gpr(source1, a->rs1);
538     gen_get_gpr(source2, a->rs2);
539     tcg_gen_ext32u_tl(source1, source1);
540     tcg_gen_ext32u_tl(source2, source2);
541 
542     (*func)(source1, source1, source2);
543 
544     tcg_gen_ext32s_tl(source1, source1);
545     gen_set_gpr(a->rd, source1);
546     tcg_temp_free(source1);
547     tcg_temp_free(source2);
548     return true;
549 }
550 
551 static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2)
552 {
553     tcg_gen_deposit_tl(ret, arg1, arg2,
554                        TARGET_LONG_BITS / 2,
555                        TARGET_LONG_BITS / 2);
556 }
557 
558 static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2)
559 {
560     TCGv t = tcg_temp_new();
561     tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2);
562     tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2);
563     tcg_temp_free(t);
564 }
565 
566 static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2)
567 {
568     TCGv t = tcg_temp_new();
569     tcg_gen_ext8u_tl(t, arg2);
570     tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8);
571     tcg_temp_free(t);
572 }
573 
574 static void gen_sbop_mask(TCGv ret, TCGv shamt)
575 {
576     tcg_gen_movi_tl(ret, 1);
577     tcg_gen_shl_tl(ret, ret, shamt);
578 }
579 
580 static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
581 {
582     TCGv t = tcg_temp_new();
583 
584     gen_sbop_mask(t, shamt);
585     tcg_gen_or_tl(ret, arg1, t);
586 
587     tcg_temp_free(t);
588 }
589 
590 static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
591 {
592     TCGv t = tcg_temp_new();
593 
594     gen_sbop_mask(t, shamt);
595     tcg_gen_andc_tl(ret, arg1, t);
596 
597     tcg_temp_free(t);
598 }
599 
600 static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
601 {
602     TCGv t = tcg_temp_new();
603 
604     gen_sbop_mask(t, shamt);
605     tcg_gen_xor_tl(ret, arg1, t);
606 
607     tcg_temp_free(t);
608 }
609 
610 static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
611 {
612     tcg_gen_shr_tl(ret, arg1, shamt);
613     tcg_gen_andi_tl(ret, ret, 1);
614 }
615 
616 static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
617 {
618     tcg_gen_not_tl(ret, arg1);
619     tcg_gen_shl_tl(ret, ret, arg2);
620     tcg_gen_not_tl(ret, ret);
621 }
622 
623 static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
624 {
625     tcg_gen_not_tl(ret, arg1);
626     tcg_gen_shr_tl(ret, ret, arg2);
627     tcg_gen_not_tl(ret, ret);
628 }
629 
630 static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
631 {
632     TCGv source1 = tcg_temp_new();
633     TCGv source2;
634 
635     gen_get_gpr(source1, a->rs1);
636 
637     if (a->shamt == (TARGET_LONG_BITS - 8)) {
638         /* rev8, byte swaps */
639         tcg_gen_bswap_tl(source1, source1);
640     } else {
641         source2 = tcg_temp_new();
642         tcg_gen_movi_tl(source2, a->shamt);
643         gen_helper_grev(source1, source1, source2);
644         tcg_temp_free(source2);
645     }
646 
647     gen_set_gpr(a->rd, source1);
648     tcg_temp_free(source1);
649     return true;
650 }
651 
652 #define GEN_SHADD(SHAMT)                                       \
653 static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \
654 {                                                              \
655     TCGv t = tcg_temp_new();                                   \
656                                                                \
657     tcg_gen_shli_tl(t, arg1, SHAMT);                           \
658     tcg_gen_add_tl(ret, t, arg2);                              \
659                                                                \
660     tcg_temp_free(t);                                          \
661 }
662 
663 GEN_SHADD(1)
664 GEN_SHADD(2)
665 GEN_SHADD(3)
666 
667 static void gen_ctzw(TCGv ret, TCGv arg1)
668 {
669     tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32));
670     tcg_gen_ctzi_tl(ret, ret, 64);
671 }
672 
673 static void gen_clzw(TCGv ret, TCGv arg1)
674 {
675     tcg_gen_ext32u_tl(ret, arg1);
676     tcg_gen_clzi_tl(ret, ret, 64);
677     tcg_gen_subi_tl(ret, ret, 32);
678 }
679 
680 static void gen_cpopw(TCGv ret, TCGv arg1)
681 {
682     tcg_gen_ext32u_tl(arg1, arg1);
683     tcg_gen_ctpop_tl(ret, arg1);
684 }
685 
686 static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
687 {
688     TCGv t = tcg_temp_new();
689     tcg_gen_ext16s_tl(t, arg2);
690     tcg_gen_deposit_tl(ret, arg1, t, 16, 48);
691     tcg_temp_free(t);
692 }
693 
694 static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2)
695 {
696     TCGv t = tcg_temp_new();
697     tcg_gen_shri_tl(t, arg1, 16);
698     tcg_gen_deposit_tl(ret, arg2, t, 0, 16);
699     tcg_gen_ext32s_tl(ret, ret);
700     tcg_temp_free(t);
701 }
702 
703 static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
704 {
705     TCGv_i32 t1 = tcg_temp_new_i32();
706     TCGv_i32 t2 = tcg_temp_new_i32();
707 
708     /* truncate to 32-bits */
709     tcg_gen_trunc_tl_i32(t1, arg1);
710     tcg_gen_trunc_tl_i32(t2, arg2);
711 
712     tcg_gen_rotr_i32(t1, t1, t2);
713 
714     /* sign-extend 64-bits */
715     tcg_gen_ext_i32_tl(ret, t1);
716 
717     tcg_temp_free_i32(t1);
718     tcg_temp_free_i32(t2);
719 }
720 
721 static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
722 {
723     TCGv_i32 t1 = tcg_temp_new_i32();
724     TCGv_i32 t2 = tcg_temp_new_i32();
725 
726     /* truncate to 32-bits */
727     tcg_gen_trunc_tl_i32(t1, arg1);
728     tcg_gen_trunc_tl_i32(t2, arg2);
729 
730     tcg_gen_rotl_i32(t1, t1, t2);
731 
732     /* sign-extend 64-bits */
733     tcg_gen_ext_i32_tl(ret, t1);
734 
735     tcg_temp_free_i32(t1);
736     tcg_temp_free_i32(t2);
737 }
738 
739 static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
740 {
741     tcg_gen_ext32u_tl(arg1, arg1);
742     gen_helper_grev(ret, arg1, arg2);
743 }
744 
745 static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2)
746 {
747     tcg_gen_ext32u_tl(arg1, arg1);
748     gen_helper_gorcw(ret, arg1, arg2);
749 }
750 
751 #define GEN_SHADD_UW(SHAMT)                                       \
752 static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \
753 {                                                                 \
754     TCGv t = tcg_temp_new();                                      \
755                                                                   \
756     tcg_gen_ext32u_tl(t, arg1);                                   \
757                                                                   \
758     tcg_gen_shli_tl(t, t, SHAMT);                                 \
759     tcg_gen_add_tl(ret, t, arg2);                                 \
760                                                                   \
761     tcg_temp_free(t);                                             \
762 }
763 
764 GEN_SHADD_UW(1)
765 GEN_SHADD_UW(2)
766 GEN_SHADD_UW(3)
767 
768 static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
769 {
770     tcg_gen_ext32u_tl(arg1, arg1);
771     tcg_gen_add_tl(ret, arg1, arg2);
772 }
773 
774 static bool gen_arith(DisasContext *ctx, arg_r *a,
775                       void(*func)(TCGv, TCGv, TCGv))
776 {
777     TCGv source1, source2;
778     source1 = tcg_temp_new();
779     source2 = tcg_temp_new();
780 
781     gen_get_gpr(source1, a->rs1);
782     gen_get_gpr(source2, a->rs2);
783 
784     (*func)(source1, source1, source2);
785 
786     gen_set_gpr(a->rd, source1);
787     tcg_temp_free(source1);
788     tcg_temp_free(source2);
789     return true;
790 }
791 
792 static bool gen_shift(DisasContext *ctx, arg_r *a,
793                         void(*func)(TCGv, TCGv, TCGv))
794 {
795     TCGv source1 = tcg_temp_new();
796     TCGv source2 = tcg_temp_new();
797 
798     gen_get_gpr(source1, a->rs1);
799     gen_get_gpr(source2, a->rs2);
800 
801     tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
802     (*func)(source1, source1, source2);
803 
804     gen_set_gpr(a->rd, source1);
805     tcg_temp_free(source1);
806     tcg_temp_free(source2);
807     return true;
808 }
809 
810 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
811 {
812     DisasContext *ctx = container_of(dcbase, DisasContext, base);
813     CPUState *cpu = ctx->cs;
814     CPURISCVState *env = cpu->env_ptr;
815 
816     return cpu_ldl_code(env, pc);
817 }
818 
819 static bool gen_shifti(DisasContext *ctx, arg_shift *a,
820                        void(*func)(TCGv, TCGv, TCGv))
821 {
822     if (a->shamt >= TARGET_LONG_BITS) {
823         return false;
824     }
825 
826     TCGv source1 = tcg_temp_new();
827     TCGv source2 = tcg_temp_new();
828 
829     gen_get_gpr(source1, a->rs1);
830 
831     tcg_gen_movi_tl(source2, a->shamt);
832     (*func)(source1, source1, source2);
833 
834     gen_set_gpr(a->rd, source1);
835     tcg_temp_free(source1);
836     tcg_temp_free(source2);
837     return true;
838 }
839 
840 static bool gen_shiftw(DisasContext *ctx, arg_r *a,
841                        void(*func)(TCGv, TCGv, TCGv))
842 {
843     TCGv source1 = tcg_temp_new();
844     TCGv source2 = tcg_temp_new();
845 
846     gen_get_gpr(source1, a->rs1);
847     gen_get_gpr(source2, a->rs2);
848 
849     tcg_gen_andi_tl(source2, source2, 31);
850     (*func)(source1, source1, source2);
851     tcg_gen_ext32s_tl(source1, source1);
852 
853     gen_set_gpr(a->rd, source1);
854     tcg_temp_free(source1);
855     tcg_temp_free(source2);
856     return true;
857 }
858 
859 static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
860                         void(*func)(TCGv, TCGv, TCGv))
861 {
862     TCGv source1 = tcg_temp_new();
863     TCGv source2 = tcg_temp_new();
864 
865     gen_get_gpr(source1, a->rs1);
866     tcg_gen_movi_tl(source2, a->shamt);
867 
868     (*func)(source1, source1, source2);
869     tcg_gen_ext32s_tl(source1, source1);
870 
871     gen_set_gpr(a->rd, source1);
872     tcg_temp_free(source1);
873     tcg_temp_free(source2);
874     return true;
875 }
876 
877 static void gen_ctz(TCGv ret, TCGv arg1)
878 {
879     tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS);
880 }
881 
882 static void gen_clz(TCGv ret, TCGv arg1)
883 {
884     tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS);
885 }
886 
887 static bool gen_unary(DisasContext *ctx, arg_r2 *a,
888                       void(*func)(TCGv, TCGv))
889 {
890     TCGv source = tcg_temp_new();
891 
892     gen_get_gpr(source, a->rs1);
893 
894     (*func)(source, source);
895 
896     gen_set_gpr(a->rd, source);
897     tcg_temp_free(source);
898     return true;
899 }
900 
901 /* Include insn module translation function */
902 #include "insn_trans/trans_rvi.c.inc"
903 #include "insn_trans/trans_rvm.c.inc"
904 #include "insn_trans/trans_rva.c.inc"
905 #include "insn_trans/trans_rvf.c.inc"
906 #include "insn_trans/trans_rvd.c.inc"
907 #include "insn_trans/trans_rvh.c.inc"
908 #include "insn_trans/trans_rvv.c.inc"
909 #include "insn_trans/trans_rvb.c.inc"
910 #include "insn_trans/trans_privileged.c.inc"
911 
912 /* Include the auto-generated decoder for 16 bit insn */
913 #include "decode-insn16.c.inc"
914 
915 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
916 {
917     /* check for compressed insn */
918     if (extract16(opcode, 0, 2) != 3) {
919         if (!has_ext(ctx, RVC)) {
920             gen_exception_illegal(ctx);
921         } else {
922             ctx->pc_succ_insn = ctx->base.pc_next + 2;
923             if (!decode_insn16(ctx, opcode)) {
924                 gen_exception_illegal(ctx);
925             }
926         }
927     } else {
928         uint32_t opcode32 = opcode;
929         opcode32 = deposit32(opcode32, 16, 16,
930                              translator_lduw(env, ctx->base.pc_next + 2));
931         ctx->pc_succ_insn = ctx->base.pc_next + 4;
932         if (!decode_insn32(ctx, opcode32)) {
933             gen_exception_illegal(ctx);
934         }
935     }
936 }
937 
938 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
939 {
940     DisasContext *ctx = container_of(dcbase, DisasContext, base);
941     CPURISCVState *env = cs->env_ptr;
942     RISCVCPU *cpu = RISCV_CPU(cs);
943     uint32_t tb_flags = ctx->base.tb->flags;
944 
945     ctx->pc_succ_insn = ctx->base.pc_first;
946     ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
947     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
948     ctx->priv_ver = env->priv_ver;
949 #if !defined(CONFIG_USER_ONLY)
950     if (riscv_has_ext(env, RVH)) {
951         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
952     } else {
953         ctx->virt_enabled = false;
954     }
955 #else
956     ctx->virt_enabled = false;
957 #endif
958     ctx->misa = env->misa;
959     ctx->frm = -1;  /* unknown rounding mode */
960     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
961     ctx->vlen = cpu->cfg.vlen;
962     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
963     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
964     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
965     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
966     ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
967     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
968     ctx->cs = cs;
969 }
970 
971 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
972 {
973 }
974 
975 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
976 {
977     DisasContext *ctx = container_of(dcbase, DisasContext, base);
978 
979     tcg_gen_insn_start(ctx->base.pc_next);
980 }
981 
982 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
983                                       const CPUBreakpoint *bp)
984 {
985     DisasContext *ctx = container_of(dcbase, DisasContext, base);
986 
987     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
988     ctx->base.is_jmp = DISAS_NORETURN;
989     gen_exception_debug();
990     /* The address covered by the breakpoint must be included in
991        [tb->pc, tb->pc + tb->size) in order to for it to be
992        properly cleared -- thus we increment the PC here so that
993        the logic setting tb->size below does the right thing.  */
994     ctx->base.pc_next += 4;
995     return true;
996 }
997 
998 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
999 {
1000     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1001     CPURISCVState *env = cpu->env_ptr;
1002     uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
1003 
1004     decode_opc(env, ctx, opcode16);
1005     ctx->base.pc_next = ctx->pc_succ_insn;
1006 
1007     if (ctx->base.is_jmp == DISAS_NEXT) {
1008         target_ulong page_start;
1009 
1010         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
1011         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
1012             ctx->base.is_jmp = DISAS_TOO_MANY;
1013         }
1014     }
1015 }
1016 
1017 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1018 {
1019     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1020 
1021     switch (ctx->base.is_jmp) {
1022     case DISAS_TOO_MANY:
1023         gen_goto_tb(ctx, 0, ctx->base.pc_next);
1024         break;
1025     case DISAS_NORETURN:
1026         break;
1027     default:
1028         g_assert_not_reached();
1029     }
1030 }
1031 
1032 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1033 {
1034 #ifndef CONFIG_USER_ONLY
1035     RISCVCPU *rvcpu = RISCV_CPU(cpu);
1036     CPURISCVState *env = &rvcpu->env;
1037 #endif
1038 
1039     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1040 #ifndef CONFIG_USER_ONLY
1041     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
1042 #endif
1043     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1044 }
1045 
1046 static const TranslatorOps riscv_tr_ops = {
1047     .init_disas_context = riscv_tr_init_disas_context,
1048     .tb_start           = riscv_tr_tb_start,
1049     .insn_start         = riscv_tr_insn_start,
1050     .breakpoint_check   = riscv_tr_breakpoint_check,
1051     .translate_insn     = riscv_tr_translate_insn,
1052     .tb_stop            = riscv_tr_tb_stop,
1053     .disas_log          = riscv_tr_disas_log,
1054 };
1055 
1056 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1057 {
1058     DisasContext ctx;
1059 
1060     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
1061 }
1062 
1063 void riscv_translate_init(void)
1064 {
1065     int i;
1066 
1067     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1068     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1069     /* registers, unless you specifically block reads/writes to reg 0 */
1070     cpu_gpr[0] = NULL;
1071 
1072     for (i = 1; i < 32; i++) {
1073         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1074             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1075     }
1076 
1077     for (i = 0; i < 32; i++) {
1078         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1079             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1080     }
1081 
1082     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1083     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
1084     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1085                              "load_res");
1086     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1087                              "load_val");
1088 }
1089