1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    if (a->rd != 0) {
36        gen_set_gpri(ctx, a->rd, a->imm);
37    }
38    return true;
39}
40
41static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
42{
43    if (a->rd != 0) {
44        gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
45    }
46    return true;
47}
48
49static bool trans_jal(DisasContext *ctx, arg_jal *a)
50{
51    gen_jal(ctx, a->rd, a->imm);
52    return true;
53}
54
55static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
56{
57    TCGLabel *misaligned = NULL;
58
59    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
60    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
61
62    gen_set_pc(ctx, cpu_pc);
63    if (!has_ext(ctx, RVC)) {
64        TCGv t0 = tcg_temp_new();
65
66        misaligned = gen_new_label();
67        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
68        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
69        tcg_temp_free(t0);
70    }
71
72    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
73    tcg_gen_lookup_and_goto_ptr();
74
75    if (misaligned) {
76        gen_set_label(misaligned);
77        gen_exception_inst_addr_mis(ctx);
78    }
79    ctx->base.is_jmp = DISAS_NORETURN;
80
81    return true;
82}
83
84static TCGCond gen_compare_i128(bool bz, TCGv rl,
85                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
86                                TCGCond cond)
87{
88    TCGv rh = tcg_temp_new();
89    bool invert = false;
90
91    switch (cond) {
92    case TCG_COND_EQ:
93    case TCG_COND_NE:
94        if (bz) {
95            tcg_gen_or_tl(rl, al, ah);
96        } else {
97            tcg_gen_xor_tl(rl, al, bl);
98            tcg_gen_xor_tl(rh, ah, bh);
99            tcg_gen_or_tl(rl, rl, rh);
100        }
101        break;
102
103    case TCG_COND_GE:
104    case TCG_COND_LT:
105        if (bz) {
106            tcg_gen_mov_tl(rl, ah);
107        } else {
108            TCGv tmp = tcg_temp_new();
109
110            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
111            tcg_gen_xor_tl(rl, rh, ah);
112            tcg_gen_xor_tl(tmp, ah, bh);
113            tcg_gen_and_tl(rl, rl, tmp);
114            tcg_gen_xor_tl(rl, rh, rl);
115
116            tcg_temp_free(tmp);
117        }
118        break;
119
120    case TCG_COND_LTU:
121        invert = true;
122        /* fallthrough */
123    case TCG_COND_GEU:
124        {
125            TCGv tmp = tcg_temp_new();
126            TCGv zero = tcg_constant_tl(0);
127            TCGv one = tcg_constant_tl(1);
128
129            cond = TCG_COND_NE;
130            /* borrow in to second word */
131            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
132            /* seed third word with 1, which will be result */
133            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
134            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
135
136            tcg_temp_free(tmp);
137        }
138        break;
139
140    default:
141        g_assert_not_reached();
142    }
143
144    if (invert) {
145        cond = tcg_invert_cond(cond);
146    }
147
148    tcg_temp_free(rh);
149    return cond;
150}
151
152static void gen_setcond_i128(TCGv rl, TCGv rh,
153                             TCGv src1l, TCGv src1h,
154                             TCGv src2l, TCGv src2h,
155                             TCGCond cond)
156{
157    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
158    tcg_gen_setcondi_tl(cond, rl, rl, 0);
159    tcg_gen_movi_tl(rh, 0);
160}
161
162static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
163{
164    TCGLabel *l = gen_new_label();
165    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
166    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
167
168    if (get_xl(ctx) == MXL_RV128) {
169        TCGv src1h = get_gprh(ctx, a->rs1);
170        TCGv src2h = get_gprh(ctx, a->rs2);
171        TCGv tmp = tcg_temp_new();
172
173        cond = gen_compare_i128(a->rs2 == 0,
174                                tmp, src1, src1h, src2, src2h, cond);
175        tcg_gen_brcondi_tl(cond, tmp, 0, l);
176
177        tcg_temp_free(tmp);
178    } else {
179        tcg_gen_brcond_tl(cond, src1, src2, l);
180    }
181    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
182
183    gen_set_label(l); /* branch taken */
184
185    if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
186        /* misaligned */
187        gen_exception_inst_addr_mis(ctx);
188    } else {
189        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
190    }
191    ctx->base.is_jmp = DISAS_NORETURN;
192
193    return true;
194}
195
196static bool trans_beq(DisasContext *ctx, arg_beq *a)
197{
198    return gen_branch(ctx, a, TCG_COND_EQ);
199}
200
201static bool trans_bne(DisasContext *ctx, arg_bne *a)
202{
203    return gen_branch(ctx, a, TCG_COND_NE);
204}
205
206static bool trans_blt(DisasContext *ctx, arg_blt *a)
207{
208    return gen_branch(ctx, a, TCG_COND_LT);
209}
210
211static bool trans_bge(DisasContext *ctx, arg_bge *a)
212{
213    return gen_branch(ctx, a, TCG_COND_GE);
214}
215
216static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
217{
218    return gen_branch(ctx, a, TCG_COND_LTU);
219}
220
221static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
222{
223    return gen_branch(ctx, a, TCG_COND_GEU);
224}
225
226static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
227{
228    TCGv dest = dest_gpr(ctx, a->rd);
229    TCGv addr = get_address(ctx, a->rs1, a->imm);
230
231    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
232    gen_set_gpr(ctx, a->rd, dest);
233    return true;
234}
235
236/* Compute only 64-bit addresses to use the address translation mechanism */
237static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
238{
239    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
240    TCGv destl = dest_gpr(ctx, a->rd);
241    TCGv desth = dest_gprh(ctx, a->rd);
242    TCGv addrl = tcg_temp_new();
243
244    tcg_gen_addi_tl(addrl, src1l, a->imm);
245
246    if ((memop & MO_SIZE) <= MO_64) {
247        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
248        if (memop & MO_SIGN) {
249            tcg_gen_sari_tl(desth, destl, 63);
250        } else {
251            tcg_gen_movi_tl(desth, 0);
252        }
253    } else {
254        /* assume little-endian memory access for now */
255        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
256        tcg_gen_addi_tl(addrl, addrl, 8);
257        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
258    }
259
260    gen_set_gpr128(ctx, a->rd, destl, desth);
261
262    tcg_temp_free(addrl);
263    return true;
264}
265
266static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
267{
268    if (get_xl(ctx) == MXL_RV128) {
269        return gen_load_i128(ctx, a, memop);
270    } else {
271        return gen_load_tl(ctx, a, memop);
272    }
273}
274
275static bool trans_lb(DisasContext *ctx, arg_lb *a)
276{
277    return gen_load(ctx, a, MO_SB);
278}
279
280static bool trans_lh(DisasContext *ctx, arg_lh *a)
281{
282    return gen_load(ctx, a, MO_TESW);
283}
284
285static bool trans_lw(DisasContext *ctx, arg_lw *a)
286{
287    return gen_load(ctx, a, MO_TESL);
288}
289
290static bool trans_ld(DisasContext *ctx, arg_ld *a)
291{
292    REQUIRE_64_OR_128BIT(ctx);
293    return gen_load(ctx, a, MO_TESQ);
294}
295
296static bool trans_lq(DisasContext *ctx, arg_lq *a)
297{
298    REQUIRE_128BIT(ctx);
299    return gen_load(ctx, a, MO_TEUO);
300}
301
302static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
303{
304    return gen_load(ctx, a, MO_UB);
305}
306
307static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
308{
309    return gen_load(ctx, a, MO_TEUW);
310}
311
312static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
313{
314    REQUIRE_64_OR_128BIT(ctx);
315    return gen_load(ctx, a, MO_TEUL);
316}
317
318static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
319{
320    REQUIRE_128BIT(ctx);
321    return gen_load(ctx, a, MO_TEUQ);
322}
323
324static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
325{
326    TCGv addr = get_address(ctx, a->rs1, a->imm);
327    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
328
329    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
330    return true;
331}
332
333static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
334{
335    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
336    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
337    TCGv src2h = get_gprh(ctx, a->rs2);
338    TCGv addrl = tcg_temp_new();
339
340    tcg_gen_addi_tl(addrl, src1l, a->imm);
341
342    if ((memop & MO_SIZE) <= MO_64) {
343        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
344    } else {
345        /* little-endian memory access assumed for now */
346        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
347        tcg_gen_addi_tl(addrl, addrl, 8);
348        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
349    }
350
351    tcg_temp_free(addrl);
352    return true;
353}
354
355static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
356{
357    if (get_xl(ctx) == MXL_RV128) {
358        return gen_store_i128(ctx, a, memop);
359    } else {
360        return gen_store_tl(ctx, a, memop);
361    }
362}
363
364static bool trans_sb(DisasContext *ctx, arg_sb *a)
365{
366    return gen_store(ctx, a, MO_SB);
367}
368
369static bool trans_sh(DisasContext *ctx, arg_sh *a)
370{
371    return gen_store(ctx, a, MO_TESW);
372}
373
374static bool trans_sw(DisasContext *ctx, arg_sw *a)
375{
376    return gen_store(ctx, a, MO_TESL);
377}
378
379static bool trans_sd(DisasContext *ctx, arg_sd *a)
380{
381    REQUIRE_64_OR_128BIT(ctx);
382    return gen_store(ctx, a, MO_TEUQ);
383}
384
385static bool trans_sq(DisasContext *ctx, arg_sq *a)
386{
387    REQUIRE_128BIT(ctx);
388    return gen_store(ctx, a, MO_TEUO);
389}
390
391static bool trans_addd(DisasContext *ctx, arg_addd *a)
392{
393    REQUIRE_128BIT(ctx);
394    ctx->ol = MXL_RV64;
395    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
396}
397
398static bool trans_addid(DisasContext *ctx, arg_addid *a)
399{
400    REQUIRE_128BIT(ctx);
401    ctx->ol = MXL_RV64;
402    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
403}
404
405static bool trans_subd(DisasContext *ctx, arg_subd *a)
406{
407    REQUIRE_128BIT(ctx);
408    ctx->ol = MXL_RV64;
409    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
410}
411
412static void gen_addi2_i128(TCGv retl, TCGv reth,
413                           TCGv srcl, TCGv srch, target_long imm)
414{
415    TCGv imml  = tcg_constant_tl(imm);
416    TCGv immh  = tcg_constant_tl(-(imm < 0));
417    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
418}
419
420static bool trans_addi(DisasContext *ctx, arg_addi *a)
421{
422    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
423}
424
425static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
426{
427    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
428}
429
430static void gen_slt_i128(TCGv retl, TCGv reth,
431                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
432{
433    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
434}
435
436static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
437{
438    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
439}
440
441static void gen_sltu_i128(TCGv retl, TCGv reth,
442                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
443{
444    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
445}
446
447static bool trans_slti(DisasContext *ctx, arg_slti *a)
448{
449    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
450}
451
452static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
453{
454    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
455}
456
457static bool trans_xori(DisasContext *ctx, arg_xori *a)
458{
459    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
460}
461
462static bool trans_ori(DisasContext *ctx, arg_ori *a)
463{
464    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
465}
466
467static bool trans_andi(DisasContext *ctx, arg_andi *a)
468{
469    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
470}
471
472static void gen_slli_i128(TCGv retl, TCGv reth,
473                          TCGv src1l, TCGv src1h,
474                          target_long shamt)
475{
476    if (shamt >= 64) {
477        tcg_gen_shli_tl(reth, src1l, shamt - 64);
478        tcg_gen_movi_tl(retl, 0);
479    } else {
480        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
481        tcg_gen_shli_tl(retl, src1l, shamt);
482    }
483}
484
485static bool trans_slli(DisasContext *ctx, arg_slli *a)
486{
487    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
488}
489
490static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
491{
492    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
493}
494
495static void gen_srli_i128(TCGv retl, TCGv reth,
496                          TCGv src1l, TCGv src1h,
497                          target_long shamt)
498{
499    if (shamt >= 64) {
500        tcg_gen_shri_tl(retl, src1h, shamt - 64);
501        tcg_gen_movi_tl(reth, 0);
502    } else {
503        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
504        tcg_gen_shri_tl(reth, src1h, shamt);
505    }
506}
507
508static bool trans_srli(DisasContext *ctx, arg_srli *a)
509{
510    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
511                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
512}
513
514static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
515{
516    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
517}
518
519static void gen_srai_i128(TCGv retl, TCGv reth,
520                          TCGv src1l, TCGv src1h,
521                          target_long shamt)
522{
523    if (shamt >= 64) {
524        tcg_gen_sari_tl(retl, src1h, shamt - 64);
525        tcg_gen_sari_tl(reth, src1h, 63);
526    } else {
527        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
528        tcg_gen_sari_tl(reth, src1h, shamt);
529    }
530}
531
532static bool trans_srai(DisasContext *ctx, arg_srai *a)
533{
534    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
535                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
536}
537
538static bool trans_add(DisasContext *ctx, arg_add *a)
539{
540    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
541}
542
543static bool trans_sub(DisasContext *ctx, arg_sub *a)
544{
545    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
546}
547
548static void gen_sll_i128(TCGv destl, TCGv desth,
549                         TCGv src1l, TCGv src1h, TCGv shamt)
550{
551    TCGv ls = tcg_temp_new();
552    TCGv rs = tcg_temp_new();
553    TCGv hs = tcg_temp_new();
554    TCGv ll = tcg_temp_new();
555    TCGv lr = tcg_temp_new();
556    TCGv h0 = tcg_temp_new();
557    TCGv h1 = tcg_temp_new();
558    TCGv zero = tcg_constant_tl(0);
559
560    tcg_gen_andi_tl(hs, shamt, 64);
561    tcg_gen_andi_tl(ls, shamt, 63);
562    tcg_gen_neg_tl(shamt, shamt);
563    tcg_gen_andi_tl(rs, shamt, 63);
564
565    tcg_gen_shl_tl(ll, src1l, ls);
566    tcg_gen_shl_tl(h0, src1h, ls);
567    tcg_gen_shr_tl(lr, src1l, rs);
568    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
569    tcg_gen_or_tl(h1, h0, lr);
570
571    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
572    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
573
574    tcg_temp_free(ls);
575    tcg_temp_free(rs);
576    tcg_temp_free(hs);
577    tcg_temp_free(ll);
578    tcg_temp_free(lr);
579    tcg_temp_free(h0);
580    tcg_temp_free(h1);
581}
582
583static bool trans_sll(DisasContext *ctx, arg_sll *a)
584{
585    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
586}
587
588static bool trans_slt(DisasContext *ctx, arg_slt *a)
589{
590    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
591}
592
593static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
594{
595    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
596}
597
598static void gen_srl_i128(TCGv destl, TCGv desth,
599                         TCGv src1l, TCGv src1h, TCGv shamt)
600{
601    TCGv ls = tcg_temp_new();
602    TCGv rs = tcg_temp_new();
603    TCGv hs = tcg_temp_new();
604    TCGv ll = tcg_temp_new();
605    TCGv lr = tcg_temp_new();
606    TCGv h0 = tcg_temp_new();
607    TCGv h1 = tcg_temp_new();
608    TCGv zero = tcg_constant_tl(0);
609
610    tcg_gen_andi_tl(hs, shamt, 64);
611    tcg_gen_andi_tl(rs, shamt, 63);
612    tcg_gen_neg_tl(shamt, shamt);
613    tcg_gen_andi_tl(ls, shamt, 63);
614
615    tcg_gen_shr_tl(lr, src1l, rs);
616    tcg_gen_shr_tl(h1, src1h, rs);
617    tcg_gen_shl_tl(ll, src1h, ls);
618    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
619    tcg_gen_or_tl(h0, ll, lr);
620
621    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
622    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
623
624    tcg_temp_free(ls);
625    tcg_temp_free(rs);
626    tcg_temp_free(hs);
627    tcg_temp_free(ll);
628    tcg_temp_free(lr);
629    tcg_temp_free(h0);
630    tcg_temp_free(h1);
631}
632
633static bool trans_srl(DisasContext *ctx, arg_srl *a)
634{
635    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
636}
637
638static void gen_sra_i128(TCGv destl, TCGv desth,
639                         TCGv src1l, TCGv src1h, TCGv shamt)
640{
641    TCGv ls = tcg_temp_new();
642    TCGv rs = tcg_temp_new();
643    TCGv hs = tcg_temp_new();
644    TCGv ll = tcg_temp_new();
645    TCGv lr = tcg_temp_new();
646    TCGv h0 = tcg_temp_new();
647    TCGv h1 = tcg_temp_new();
648    TCGv zero = tcg_constant_tl(0);
649
650    tcg_gen_andi_tl(hs, shamt, 64);
651    tcg_gen_andi_tl(rs, shamt, 63);
652    tcg_gen_neg_tl(shamt, shamt);
653    tcg_gen_andi_tl(ls, shamt, 63);
654
655    tcg_gen_shr_tl(lr, src1l, rs);
656    tcg_gen_sar_tl(h1, src1h, rs);
657    tcg_gen_shl_tl(ll, src1h, ls);
658    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
659    tcg_gen_or_tl(h0, ll, lr);
660    tcg_gen_sari_tl(lr, src1h, 63);
661
662    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
663    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
664
665    tcg_temp_free(ls);
666    tcg_temp_free(rs);
667    tcg_temp_free(hs);
668    tcg_temp_free(ll);
669    tcg_temp_free(lr);
670    tcg_temp_free(h0);
671    tcg_temp_free(h1);
672}
673
674static bool trans_sra(DisasContext *ctx, arg_sra *a)
675{
676    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
677}
678
679static bool trans_xor(DisasContext *ctx, arg_xor *a)
680{
681    return gen_logic(ctx, a, tcg_gen_xor_tl);
682}
683
684static bool trans_or(DisasContext *ctx, arg_or *a)
685{
686    return gen_logic(ctx, a, tcg_gen_or_tl);
687}
688
689static bool trans_and(DisasContext *ctx, arg_and *a)
690{
691    return gen_logic(ctx, a, tcg_gen_and_tl);
692}
693
694static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
695{
696    REQUIRE_64_OR_128BIT(ctx);
697    ctx->ol = MXL_RV32;
698    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
699}
700
701static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
702{
703    REQUIRE_64_OR_128BIT(ctx);
704    ctx->ol = MXL_RV32;
705    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
706}
707
708static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
709{
710    REQUIRE_64_OR_128BIT(ctx);
711    ctx->ol = MXL_RV32;
712    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
713}
714
715static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
716{
717    REQUIRE_64_OR_128BIT(ctx);
718    ctx->ol = MXL_RV32;
719    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
720}
721
722static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
723{
724    REQUIRE_128BIT(ctx);
725    ctx->ol = MXL_RV64;
726    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
727}
728
729static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
730{
731    REQUIRE_128BIT(ctx);
732    ctx->ol = MXL_RV64;
733    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
734}
735
736static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
737{
738    REQUIRE_128BIT(ctx);
739    ctx->ol = MXL_RV64;
740    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
741}
742
743static bool trans_addw(DisasContext *ctx, arg_addw *a)
744{
745    REQUIRE_64_OR_128BIT(ctx);
746    ctx->ol = MXL_RV32;
747    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
748}
749
750static bool trans_subw(DisasContext *ctx, arg_subw *a)
751{
752    REQUIRE_64_OR_128BIT(ctx);
753    ctx->ol = MXL_RV32;
754    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
755}
756
757static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
758{
759    REQUIRE_64_OR_128BIT(ctx);
760    ctx->ol = MXL_RV32;
761    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
762}
763
764static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
765{
766    REQUIRE_64_OR_128BIT(ctx);
767    ctx->ol = MXL_RV32;
768    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
769}
770
771static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
772{
773    REQUIRE_64_OR_128BIT(ctx);
774    ctx->ol = MXL_RV32;
775    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
776}
777
778static bool trans_slld(DisasContext *ctx, arg_slld *a)
779{
780    REQUIRE_128BIT(ctx);
781    ctx->ol = MXL_RV64;
782    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
783}
784
785static bool trans_srld(DisasContext *ctx, arg_srld *a)
786{
787    REQUIRE_128BIT(ctx);
788    ctx->ol = MXL_RV64;
789    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
790}
791
792static bool trans_srad(DisasContext *ctx, arg_srad *a)
793{
794    REQUIRE_128BIT(ctx);
795    ctx->ol = MXL_RV64;
796    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
797}
798
799
800static bool trans_fence(DisasContext *ctx, arg_fence *a)
801{
802    /* FENCE is a full memory barrier. */
803    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
804    return true;
805}
806
807static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
808{
809    if (!ctx->cfg_ptr->ext_ifencei) {
810        return false;
811    }
812
813    /*
814     * FENCE_I is a no-op in QEMU,
815     * however we need to end the translation block
816     */
817    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
818    tcg_gen_exit_tb(NULL, 0);
819    ctx->base.is_jmp = DISAS_NORETURN;
820    return true;
821}
822
823static bool do_csr_post(DisasContext *ctx)
824{
825    /* We may have changed important cpu state -- exit to main loop. */
826    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
827    tcg_gen_exit_tb(NULL, 0);
828    ctx->base.is_jmp = DISAS_NORETURN;
829    return true;
830}
831
832static bool do_csrr(DisasContext *ctx, int rd, int rc)
833{
834    TCGv dest = dest_gpr(ctx, rd);
835    TCGv_i32 csr = tcg_constant_i32(rc);
836
837    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
838        gen_io_start();
839    }
840    gen_helper_csrr(dest, cpu_env, csr);
841    gen_set_gpr(ctx, rd, dest);
842    return do_csr_post(ctx);
843}
844
845static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
846{
847    TCGv_i32 csr = tcg_constant_i32(rc);
848
849    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
850        gen_io_start();
851    }
852    gen_helper_csrw(cpu_env, csr, src);
853    return do_csr_post(ctx);
854}
855
856static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
857{
858    TCGv dest = dest_gpr(ctx, rd);
859    TCGv_i32 csr = tcg_constant_i32(rc);
860
861    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
862        gen_io_start();
863    }
864    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
865    gen_set_gpr(ctx, rd, dest);
866    return do_csr_post(ctx);
867}
868
869static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
870{
871    TCGv destl = dest_gpr(ctx, rd);
872    TCGv desth = dest_gprh(ctx, rd);
873    TCGv_i32 csr = tcg_constant_i32(rc);
874
875    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
876        gen_io_start();
877    }
878    gen_helper_csrr_i128(destl, cpu_env, csr);
879    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
880    gen_set_gpr128(ctx, rd, destl, desth);
881    return do_csr_post(ctx);
882}
883
884static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
885{
886    TCGv_i32 csr = tcg_constant_i32(rc);
887
888    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
889        gen_io_start();
890    }
891    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
892    return do_csr_post(ctx);
893}
894
895static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
896                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
897{
898    TCGv destl = dest_gpr(ctx, rd);
899    TCGv desth = dest_gprh(ctx, rd);
900    TCGv_i32 csr = tcg_constant_i32(rc);
901
902    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
903        gen_io_start();
904    }
905    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
906    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
907    gen_set_gpr128(ctx, rd, destl, desth);
908    return do_csr_post(ctx);
909}
910
911static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
912{
913    RISCVMXL xl = get_xl(ctx);
914    if (xl < MXL_RV128) {
915        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
916
917        /*
918         * If rd == 0, the insn shall not read the csr, nor cause any of the
919         * side effects that might occur on a csr read.
920         */
921        if (a->rd == 0) {
922            return do_csrw(ctx, a->csr, src);
923        }
924
925        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
926                                                     (target_ulong)-1);
927        return do_csrrw(ctx, a->rd, a->csr, src, mask);
928    } else {
929        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
930        TCGv srch = get_gprh(ctx, a->rs1);
931
932        /*
933         * If rd == 0, the insn shall not read the csr, nor cause any of the
934         * side effects that might occur on a csr read.
935         */
936        if (a->rd == 0) {
937            return do_csrw_i128(ctx, a->csr, srcl, srch);
938        }
939
940        TCGv mask = tcg_constant_tl(-1);
941        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
942    }
943}
944
945static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
946{
947    /*
948     * If rs1 == 0, the insn shall not write to the csr at all, nor
949     * cause any of the side effects that might occur on a csr write.
950     * Note that if rs1 specifies a register other than x0, holding
951     * a zero value, the instruction will still attempt to write the
952     * unmodified value back to the csr and will cause side effects.
953     */
954    if (get_xl(ctx) < MXL_RV128) {
955        if (a->rs1 == 0) {
956            return do_csrr(ctx, a->rd, a->csr);
957        }
958
959        TCGv ones = tcg_constant_tl(-1);
960        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
961        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
962    } else {
963        if (a->rs1 == 0) {
964            return do_csrr_i128(ctx, a->rd, a->csr);
965        }
966
967        TCGv ones = tcg_constant_tl(-1);
968        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
969        TCGv maskh = get_gprh(ctx, a->rs1);
970        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
971    }
972}
973
974static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
975{
976    /*
977     * If rs1 == 0, the insn shall not write to the csr at all, nor
978     * cause any of the side effects that might occur on a csr write.
979     * Note that if rs1 specifies a register other than x0, holding
980     * a zero value, the instruction will still attempt to write the
981     * unmodified value back to the csr and will cause side effects.
982     */
983    if (get_xl(ctx) < MXL_RV128) {
984        if (a->rs1 == 0) {
985            return do_csrr(ctx, a->rd, a->csr);
986        }
987
988        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
989        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
990    } else {
991        if (a->rs1 == 0) {
992            return do_csrr_i128(ctx, a->rd, a->csr);
993        }
994
995        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
996        TCGv maskh = get_gprh(ctx, a->rs1);
997        return do_csrrw_i128(ctx, a->rd, a->csr,
998                             ctx->zero, ctx->zero, maskl, maskh);
999    }
1000}
1001
1002static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1003{
1004    RISCVMXL xl = get_xl(ctx);
1005    if (xl < MXL_RV128) {
1006        TCGv src = tcg_constant_tl(a->rs1);
1007
1008        /*
1009         * If rd == 0, the insn shall not read the csr, nor cause any of the
1010         * side effects that might occur on a csr read.
1011         */
1012        if (a->rd == 0) {
1013            return do_csrw(ctx, a->csr, src);
1014        }
1015
1016        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1017                                                     (target_ulong)-1);
1018        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1019    } else {
1020        TCGv src = tcg_constant_tl(a->rs1);
1021
1022        /*
1023         * If rd == 0, the insn shall not read the csr, nor cause any of the
1024         * side effects that might occur on a csr read.
1025         */
1026        if (a->rd == 0) {
1027            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1028        }
1029
1030        TCGv mask = tcg_constant_tl(-1);
1031        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1032    }
1033}
1034
1035static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1036{
1037    /*
1038     * If rs1 == 0, the insn shall not write to the csr at all, nor
1039     * cause any of the side effects that might occur on a csr write.
1040     * Note that if rs1 specifies a register other than x0, holding
1041     * a zero value, the instruction will still attempt to write the
1042     * unmodified value back to the csr and will cause side effects.
1043     */
1044    if (get_xl(ctx) < MXL_RV128) {
1045        if (a->rs1 == 0) {
1046            return do_csrr(ctx, a->rd, a->csr);
1047        }
1048
1049        TCGv ones = tcg_constant_tl(-1);
1050        TCGv mask = tcg_constant_tl(a->rs1);
1051        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1052    } else {
1053        if (a->rs1 == 0) {
1054            return do_csrr_i128(ctx, a->rd, a->csr);
1055        }
1056
1057        TCGv ones = tcg_constant_tl(-1);
1058        TCGv mask = tcg_constant_tl(a->rs1);
1059        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1060    }
1061}
1062
1063static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1064{
1065    /*
1066     * If rs1 == 0, the insn shall not write to the csr at all, nor
1067     * cause any of the side effects that might occur on a csr write.
1068     * Note that if rs1 specifies a register other than x0, holding
1069     * a zero value, the instruction will still attempt to write the
1070     * unmodified value back to the csr and will cause side effects.
1071     */
1072    if (get_xl(ctx) < MXL_RV128) {
1073        if (a->rs1 == 0) {
1074            return do_csrr(ctx, a->rd, a->csr);
1075        }
1076
1077        TCGv mask = tcg_constant_tl(a->rs1);
1078        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1079    } else {
1080        if (a->rs1 == 0) {
1081            return do_csrr_i128(ctx, a->rd, a->csr);
1082        }
1083
1084        TCGv mask = tcg_constant_tl(a->rs1);
1085        return do_csrrw_i128(ctx, a->rd, a->csr,
1086                             ctx->zero, ctx->zero, mask, ctx->zero);
1087    }
1088}
1089