1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42    return true;
43}
44
45static bool trans_jal(DisasContext *ctx, arg_jal *a)
46{
47    gen_jal(ctx, a->rd, a->imm);
48    return true;
49}
50
51static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52{
53    TCGLabel *misaligned = NULL;
54
55    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
56    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
57
58    gen_set_pc(ctx, cpu_pc);
59    if (!has_ext(ctx, RVC)) {
60        TCGv t0 = tcg_temp_new();
61
62        misaligned = gen_new_label();
63        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
64        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
65        tcg_temp_free(t0);
66    }
67
68    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
69    tcg_gen_lookup_and_goto_ptr();
70
71    if (misaligned) {
72        gen_set_label(misaligned);
73        gen_exception_inst_addr_mis(ctx);
74    }
75    ctx->base.is_jmp = DISAS_NORETURN;
76
77    return true;
78}
79
80static TCGCond gen_compare_i128(bool bz, TCGv rl,
81                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
82                                TCGCond cond)
83{
84    TCGv rh = tcg_temp_new();
85    bool invert = false;
86
87    switch (cond) {
88    case TCG_COND_EQ:
89    case TCG_COND_NE:
90        if (bz) {
91            tcg_gen_or_tl(rl, al, ah);
92        } else {
93            tcg_gen_xor_tl(rl, al, bl);
94            tcg_gen_xor_tl(rh, ah, bh);
95            tcg_gen_or_tl(rl, rl, rh);
96        }
97        break;
98
99    case TCG_COND_GE:
100    case TCG_COND_LT:
101        if (bz) {
102            tcg_gen_mov_tl(rl, ah);
103        } else {
104            TCGv tmp = tcg_temp_new();
105
106            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
107            tcg_gen_xor_tl(rl, rh, ah);
108            tcg_gen_xor_tl(tmp, ah, bh);
109            tcg_gen_and_tl(rl, rl, tmp);
110            tcg_gen_xor_tl(rl, rh, rl);
111
112            tcg_temp_free(tmp);
113        }
114        break;
115
116    case TCG_COND_LTU:
117        invert = true;
118        /* fallthrough */
119    case TCG_COND_GEU:
120        {
121            TCGv tmp = tcg_temp_new();
122            TCGv zero = tcg_constant_tl(0);
123            TCGv one = tcg_constant_tl(1);
124
125            cond = TCG_COND_NE;
126            /* borrow in to second word */
127            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
128            /* seed third word with 1, which will be result */
129            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
130            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
131
132            tcg_temp_free(tmp);
133        }
134        break;
135
136    default:
137        g_assert_not_reached();
138    }
139
140    if (invert) {
141        cond = tcg_invert_cond(cond);
142    }
143
144    tcg_temp_free(rh);
145    return cond;
146}
147
148static void gen_setcond_i128(TCGv rl, TCGv rh,
149                             TCGv src1l, TCGv src1h,
150                             TCGv src2l, TCGv src2h,
151                             TCGCond cond)
152{
153    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
154    tcg_gen_setcondi_tl(cond, rl, rl, 0);
155    tcg_gen_movi_tl(rh, 0);
156}
157
158static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
159{
160    TCGLabel *l = gen_new_label();
161    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
162    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
163
164    if (get_xl(ctx) == MXL_RV128) {
165        TCGv src1h = get_gprh(ctx, a->rs1);
166        TCGv src2h = get_gprh(ctx, a->rs2);
167        TCGv tmp = tcg_temp_new();
168
169        cond = gen_compare_i128(a->rs2 == 0,
170                                tmp, src1, src1h, src2, src2h, cond);
171        tcg_gen_brcondi_tl(cond, tmp, 0, l);
172
173        tcg_temp_free(tmp);
174    } else {
175        tcg_gen_brcond_tl(cond, src1, src2, l);
176    }
177    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
178
179    gen_set_label(l); /* branch taken */
180
181    if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
182        /* misaligned */
183        gen_exception_inst_addr_mis(ctx);
184    } else {
185        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
186    }
187    ctx->base.is_jmp = DISAS_NORETURN;
188
189    return true;
190}
191
192static bool trans_beq(DisasContext *ctx, arg_beq *a)
193{
194    return gen_branch(ctx, a, TCG_COND_EQ);
195}
196
197static bool trans_bne(DisasContext *ctx, arg_bne *a)
198{
199    return gen_branch(ctx, a, TCG_COND_NE);
200}
201
202static bool trans_blt(DisasContext *ctx, arg_blt *a)
203{
204    return gen_branch(ctx, a, TCG_COND_LT);
205}
206
207static bool trans_bge(DisasContext *ctx, arg_bge *a)
208{
209    return gen_branch(ctx, a, TCG_COND_GE);
210}
211
212static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
213{
214    return gen_branch(ctx, a, TCG_COND_LTU);
215}
216
217static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
218{
219    return gen_branch(ctx, a, TCG_COND_GEU);
220}
221
222static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
223{
224    TCGv dest = dest_gpr(ctx, a->rd);
225    TCGv addr = get_address(ctx, a->rs1, a->imm);
226
227    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
228    gen_set_gpr(ctx, a->rd, dest);
229    return true;
230}
231
232/* Compute only 64-bit addresses to use the address translation mechanism */
233static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
234{
235    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
236    TCGv destl = dest_gpr(ctx, a->rd);
237    TCGv desth = dest_gprh(ctx, a->rd);
238    TCGv addrl = tcg_temp_new();
239
240    tcg_gen_addi_tl(addrl, src1l, a->imm);
241
242    if ((memop & MO_SIZE) <= MO_64) {
243        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
244        if (memop & MO_SIGN) {
245            tcg_gen_sari_tl(desth, destl, 63);
246        } else {
247            tcg_gen_movi_tl(desth, 0);
248        }
249    } else {
250        /* assume little-endian memory access for now */
251        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
252        tcg_gen_addi_tl(addrl, addrl, 8);
253        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
254    }
255
256    gen_set_gpr128(ctx, a->rd, destl, desth);
257
258    tcg_temp_free(addrl);
259    return true;
260}
261
262static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
263{
264    if (get_xl(ctx) == MXL_RV128) {
265        return gen_load_i128(ctx, a, memop);
266    } else {
267        return gen_load_tl(ctx, a, memop);
268    }
269}
270
271static bool trans_lb(DisasContext *ctx, arg_lb *a)
272{
273    return gen_load(ctx, a, MO_SB);
274}
275
276static bool trans_lh(DisasContext *ctx, arg_lh *a)
277{
278    return gen_load(ctx, a, MO_TESW);
279}
280
281static bool trans_lw(DisasContext *ctx, arg_lw *a)
282{
283    return gen_load(ctx, a, MO_TESL);
284}
285
286static bool trans_ld(DisasContext *ctx, arg_ld *a)
287{
288    REQUIRE_64_OR_128BIT(ctx);
289    return gen_load(ctx, a, MO_TESQ);
290}
291
292static bool trans_lq(DisasContext *ctx, arg_lq *a)
293{
294    REQUIRE_128BIT(ctx);
295    return gen_load(ctx, a, MO_TEUO);
296}
297
298static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
299{
300    return gen_load(ctx, a, MO_UB);
301}
302
303static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
304{
305    return gen_load(ctx, a, MO_TEUW);
306}
307
308static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
309{
310    REQUIRE_64_OR_128BIT(ctx);
311    return gen_load(ctx, a, MO_TEUL);
312}
313
314static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
315{
316    REQUIRE_128BIT(ctx);
317    return gen_load(ctx, a, MO_TEUQ);
318}
319
320static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
321{
322    TCGv addr = get_address(ctx, a->rs1, a->imm);
323    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
324
325    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
326    return true;
327}
328
329static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
330{
331    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
332    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
333    TCGv src2h = get_gprh(ctx, a->rs2);
334    TCGv addrl = tcg_temp_new();
335
336    tcg_gen_addi_tl(addrl, src1l, a->imm);
337
338    if ((memop & MO_SIZE) <= MO_64) {
339        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
340    } else {
341        /* little-endian memory access assumed for now */
342        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
343        tcg_gen_addi_tl(addrl, addrl, 8);
344        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
345    }
346
347    tcg_temp_free(addrl);
348    return true;
349}
350
351static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
352{
353    if (get_xl(ctx) == MXL_RV128) {
354        return gen_store_i128(ctx, a, memop);
355    } else {
356        return gen_store_tl(ctx, a, memop);
357    }
358}
359
360static bool trans_sb(DisasContext *ctx, arg_sb *a)
361{
362    return gen_store(ctx, a, MO_SB);
363}
364
365static bool trans_sh(DisasContext *ctx, arg_sh *a)
366{
367    return gen_store(ctx, a, MO_TESW);
368}
369
370static bool trans_sw(DisasContext *ctx, arg_sw *a)
371{
372    return gen_store(ctx, a, MO_TESL);
373}
374
375static bool trans_sd(DisasContext *ctx, arg_sd *a)
376{
377    REQUIRE_64_OR_128BIT(ctx);
378    return gen_store(ctx, a, MO_TEUQ);
379}
380
381static bool trans_sq(DisasContext *ctx, arg_sq *a)
382{
383    REQUIRE_128BIT(ctx);
384    return gen_store(ctx, a, MO_TEUO);
385}
386
387static bool trans_addd(DisasContext *ctx, arg_addd *a)
388{
389    REQUIRE_128BIT(ctx);
390    ctx->ol = MXL_RV64;
391    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
392}
393
394static bool trans_addid(DisasContext *ctx, arg_addid *a)
395{
396    REQUIRE_128BIT(ctx);
397    ctx->ol = MXL_RV64;
398    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
399}
400
401static bool trans_subd(DisasContext *ctx, arg_subd *a)
402{
403    REQUIRE_128BIT(ctx);
404    ctx->ol = MXL_RV64;
405    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
406}
407
408static void gen_addi2_i128(TCGv retl, TCGv reth,
409                           TCGv srcl, TCGv srch, target_long imm)
410{
411    TCGv imml  = tcg_constant_tl(imm);
412    TCGv immh  = tcg_constant_tl(-(imm < 0));
413    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
414}
415
416static bool trans_addi(DisasContext *ctx, arg_addi *a)
417{
418    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
419}
420
421static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
422{
423    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
424}
425
426static void gen_slt_i128(TCGv retl, TCGv reth,
427                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
428{
429    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
430}
431
432static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
433{
434    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
435}
436
437static void gen_sltu_i128(TCGv retl, TCGv reth,
438                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
439{
440    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
441}
442
443static bool trans_slti(DisasContext *ctx, arg_slti *a)
444{
445    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
446}
447
448static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
449{
450    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
451}
452
453static bool trans_xori(DisasContext *ctx, arg_xori *a)
454{
455    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
456}
457
458static bool trans_ori(DisasContext *ctx, arg_ori *a)
459{
460    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
461}
462
463static bool trans_andi(DisasContext *ctx, arg_andi *a)
464{
465    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
466}
467
468static void gen_slli_i128(TCGv retl, TCGv reth,
469                          TCGv src1l, TCGv src1h,
470                          target_long shamt)
471{
472    if (shamt >= 64) {
473        tcg_gen_shli_tl(reth, src1l, shamt - 64);
474        tcg_gen_movi_tl(retl, 0);
475    } else {
476        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
477        tcg_gen_shli_tl(retl, src1l, shamt);
478    }
479}
480
481static bool trans_slli(DisasContext *ctx, arg_slli *a)
482{
483    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
484}
485
486static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
487{
488    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
489}
490
491static void gen_srli_i128(TCGv retl, TCGv reth,
492                          TCGv src1l, TCGv src1h,
493                          target_long shamt)
494{
495    if (shamt >= 64) {
496        tcg_gen_shri_tl(retl, src1h, shamt - 64);
497        tcg_gen_movi_tl(reth, 0);
498    } else {
499        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
500        tcg_gen_shri_tl(reth, src1h, shamt);
501    }
502}
503
504static bool trans_srli(DisasContext *ctx, arg_srli *a)
505{
506    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
507                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
508}
509
510static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
511{
512    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
513}
514
515static void gen_srai_i128(TCGv retl, TCGv reth,
516                          TCGv src1l, TCGv src1h,
517                          target_long shamt)
518{
519    if (shamt >= 64) {
520        tcg_gen_sari_tl(retl, src1h, shamt - 64);
521        tcg_gen_sari_tl(reth, src1h, 63);
522    } else {
523        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
524        tcg_gen_sari_tl(reth, src1h, shamt);
525    }
526}
527
528static bool trans_srai(DisasContext *ctx, arg_srai *a)
529{
530    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
531                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
532}
533
534static bool trans_add(DisasContext *ctx, arg_add *a)
535{
536    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
537}
538
539static bool trans_sub(DisasContext *ctx, arg_sub *a)
540{
541    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
542}
543
544static void gen_sll_i128(TCGv destl, TCGv desth,
545                         TCGv src1l, TCGv src1h, TCGv shamt)
546{
547    TCGv ls = tcg_temp_new();
548    TCGv rs = tcg_temp_new();
549    TCGv hs = tcg_temp_new();
550    TCGv ll = tcg_temp_new();
551    TCGv lr = tcg_temp_new();
552    TCGv h0 = tcg_temp_new();
553    TCGv h1 = tcg_temp_new();
554    TCGv zero = tcg_constant_tl(0);
555
556    tcg_gen_andi_tl(hs, shamt, 64);
557    tcg_gen_andi_tl(ls, shamt, 63);
558    tcg_gen_neg_tl(shamt, shamt);
559    tcg_gen_andi_tl(rs, shamt, 63);
560
561    tcg_gen_shl_tl(ll, src1l, ls);
562    tcg_gen_shl_tl(h0, src1h, ls);
563    tcg_gen_shr_tl(lr, src1l, rs);
564    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
565    tcg_gen_or_tl(h1, h0, lr);
566
567    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
568    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
569
570    tcg_temp_free(ls);
571    tcg_temp_free(rs);
572    tcg_temp_free(hs);
573    tcg_temp_free(ll);
574    tcg_temp_free(lr);
575    tcg_temp_free(h0);
576    tcg_temp_free(h1);
577}
578
579static bool trans_sll(DisasContext *ctx, arg_sll *a)
580{
581    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
582}
583
584static bool trans_slt(DisasContext *ctx, arg_slt *a)
585{
586    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
587}
588
589static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
590{
591    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
592}
593
594static void gen_srl_i128(TCGv destl, TCGv desth,
595                         TCGv src1l, TCGv src1h, TCGv shamt)
596{
597    TCGv ls = tcg_temp_new();
598    TCGv rs = tcg_temp_new();
599    TCGv hs = tcg_temp_new();
600    TCGv ll = tcg_temp_new();
601    TCGv lr = tcg_temp_new();
602    TCGv h0 = tcg_temp_new();
603    TCGv h1 = tcg_temp_new();
604    TCGv zero = tcg_constant_tl(0);
605
606    tcg_gen_andi_tl(hs, shamt, 64);
607    tcg_gen_andi_tl(rs, shamt, 63);
608    tcg_gen_neg_tl(shamt, shamt);
609    tcg_gen_andi_tl(ls, shamt, 63);
610
611    tcg_gen_shr_tl(lr, src1l, rs);
612    tcg_gen_shr_tl(h1, src1h, rs);
613    tcg_gen_shl_tl(ll, src1h, ls);
614    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
615    tcg_gen_or_tl(h0, ll, lr);
616
617    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
618    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
619
620    tcg_temp_free(ls);
621    tcg_temp_free(rs);
622    tcg_temp_free(hs);
623    tcg_temp_free(ll);
624    tcg_temp_free(lr);
625    tcg_temp_free(h0);
626    tcg_temp_free(h1);
627}
628
629static bool trans_srl(DisasContext *ctx, arg_srl *a)
630{
631    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
632}
633
634static void gen_sra_i128(TCGv destl, TCGv desth,
635                         TCGv src1l, TCGv src1h, TCGv shamt)
636{
637    TCGv ls = tcg_temp_new();
638    TCGv rs = tcg_temp_new();
639    TCGv hs = tcg_temp_new();
640    TCGv ll = tcg_temp_new();
641    TCGv lr = tcg_temp_new();
642    TCGv h0 = tcg_temp_new();
643    TCGv h1 = tcg_temp_new();
644    TCGv zero = tcg_constant_tl(0);
645
646    tcg_gen_andi_tl(hs, shamt, 64);
647    tcg_gen_andi_tl(rs, shamt, 63);
648    tcg_gen_neg_tl(shamt, shamt);
649    tcg_gen_andi_tl(ls, shamt, 63);
650
651    tcg_gen_shr_tl(lr, src1l, rs);
652    tcg_gen_sar_tl(h1, src1h, rs);
653    tcg_gen_shl_tl(ll, src1h, ls);
654    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
655    tcg_gen_or_tl(h0, ll, lr);
656    tcg_gen_sari_tl(lr, src1h, 63);
657
658    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
659    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
660
661    tcg_temp_free(ls);
662    tcg_temp_free(rs);
663    tcg_temp_free(hs);
664    tcg_temp_free(ll);
665    tcg_temp_free(lr);
666    tcg_temp_free(h0);
667    tcg_temp_free(h1);
668}
669
670static bool trans_sra(DisasContext *ctx, arg_sra *a)
671{
672    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
673}
674
675static bool trans_xor(DisasContext *ctx, arg_xor *a)
676{
677    return gen_logic(ctx, a, tcg_gen_xor_tl);
678}
679
680static bool trans_or(DisasContext *ctx, arg_or *a)
681{
682    return gen_logic(ctx, a, tcg_gen_or_tl);
683}
684
685static bool trans_and(DisasContext *ctx, arg_and *a)
686{
687    return gen_logic(ctx, a, tcg_gen_and_tl);
688}
689
690static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
691{
692    REQUIRE_64_OR_128BIT(ctx);
693    ctx->ol = MXL_RV32;
694    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
695}
696
697static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
698{
699    REQUIRE_64_OR_128BIT(ctx);
700    ctx->ol = MXL_RV32;
701    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
702}
703
704static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
705{
706    REQUIRE_64_OR_128BIT(ctx);
707    ctx->ol = MXL_RV32;
708    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
709}
710
711static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
712{
713    REQUIRE_64_OR_128BIT(ctx);
714    ctx->ol = MXL_RV32;
715    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
716}
717
718static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
719{
720    REQUIRE_128BIT(ctx);
721    ctx->ol = MXL_RV64;
722    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
723}
724
725static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
726{
727    REQUIRE_128BIT(ctx);
728    ctx->ol = MXL_RV64;
729    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
730}
731
732static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
733{
734    REQUIRE_128BIT(ctx);
735    ctx->ol = MXL_RV64;
736    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
737}
738
739static bool trans_addw(DisasContext *ctx, arg_addw *a)
740{
741    REQUIRE_64_OR_128BIT(ctx);
742    ctx->ol = MXL_RV32;
743    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
744}
745
746static bool trans_subw(DisasContext *ctx, arg_subw *a)
747{
748    REQUIRE_64_OR_128BIT(ctx);
749    ctx->ol = MXL_RV32;
750    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
751}
752
753static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
754{
755    REQUIRE_64_OR_128BIT(ctx);
756    ctx->ol = MXL_RV32;
757    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
758}
759
760static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
761{
762    REQUIRE_64_OR_128BIT(ctx);
763    ctx->ol = MXL_RV32;
764    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
765}
766
767static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
768{
769    REQUIRE_64_OR_128BIT(ctx);
770    ctx->ol = MXL_RV32;
771    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
772}
773
774static bool trans_slld(DisasContext *ctx, arg_slld *a)
775{
776    REQUIRE_128BIT(ctx);
777    ctx->ol = MXL_RV64;
778    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
779}
780
781static bool trans_srld(DisasContext *ctx, arg_srld *a)
782{
783    REQUIRE_128BIT(ctx);
784    ctx->ol = MXL_RV64;
785    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
786}
787
788static bool trans_srad(DisasContext *ctx, arg_srad *a)
789{
790    REQUIRE_128BIT(ctx);
791    ctx->ol = MXL_RV64;
792    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
793}
794
795static bool trans_pause(DisasContext *ctx, arg_pause *a)
796{
797    if (!ctx->cfg_ptr->ext_zihintpause) {
798        return false;
799    }
800
801    /*
802     * PAUSE is a no-op in QEMU,
803     * end the TB and return to main loop
804     */
805    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
806    tcg_gen_exit_tb(NULL, 0);
807    ctx->base.is_jmp = DISAS_NORETURN;
808
809    return true;
810}
811
812static bool trans_fence(DisasContext *ctx, arg_fence *a)
813{
814    /* FENCE is a full memory barrier. */
815    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
816    return true;
817}
818
819static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
820{
821    if (!ctx->cfg_ptr->ext_ifencei) {
822        return false;
823    }
824
825    /*
826     * FENCE_I is a no-op in QEMU,
827     * however we need to end the translation block
828     */
829    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
830    tcg_gen_exit_tb(NULL, 0);
831    ctx->base.is_jmp = DISAS_NORETURN;
832    return true;
833}
834
835static bool do_csr_post(DisasContext *ctx)
836{
837    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
838    decode_save_opc(ctx);
839    /* We may have changed important cpu state -- exit to main loop. */
840    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
841    tcg_gen_exit_tb(NULL, 0);
842    ctx->base.is_jmp = DISAS_NORETURN;
843    return true;
844}
845
846static bool do_csrr(DisasContext *ctx, int rd, int rc)
847{
848    TCGv dest = dest_gpr(ctx, rd);
849    TCGv_i32 csr = tcg_constant_i32(rc);
850
851    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
852        gen_io_start();
853    }
854    gen_helper_csrr(dest, cpu_env, csr);
855    gen_set_gpr(ctx, rd, dest);
856    return do_csr_post(ctx);
857}
858
859static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
860{
861    TCGv_i32 csr = tcg_constant_i32(rc);
862
863    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
864        gen_io_start();
865    }
866    gen_helper_csrw(cpu_env, csr, src);
867    return do_csr_post(ctx);
868}
869
870static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
871{
872    TCGv dest = dest_gpr(ctx, rd);
873    TCGv_i32 csr = tcg_constant_i32(rc);
874
875    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
876        gen_io_start();
877    }
878    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
879    gen_set_gpr(ctx, rd, dest);
880    return do_csr_post(ctx);
881}
882
883static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
884{
885    TCGv destl = dest_gpr(ctx, rd);
886    TCGv desth = dest_gprh(ctx, rd);
887    TCGv_i32 csr = tcg_constant_i32(rc);
888
889    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
890        gen_io_start();
891    }
892    gen_helper_csrr_i128(destl, cpu_env, csr);
893    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
894    gen_set_gpr128(ctx, rd, destl, desth);
895    return do_csr_post(ctx);
896}
897
898static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
899{
900    TCGv_i32 csr = tcg_constant_i32(rc);
901
902    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
903        gen_io_start();
904    }
905    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
906    return do_csr_post(ctx);
907}
908
909static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
910                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
911{
912    TCGv destl = dest_gpr(ctx, rd);
913    TCGv desth = dest_gprh(ctx, rd);
914    TCGv_i32 csr = tcg_constant_i32(rc);
915
916    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
917        gen_io_start();
918    }
919    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
920    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
921    gen_set_gpr128(ctx, rd, destl, desth);
922    return do_csr_post(ctx);
923}
924
925static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
926{
927    RISCVMXL xl = get_xl(ctx);
928    if (xl < MXL_RV128) {
929        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
930
931        /*
932         * If rd == 0, the insn shall not read the csr, nor cause any of the
933         * side effects that might occur on a csr read.
934         */
935        if (a->rd == 0) {
936            return do_csrw(ctx, a->csr, src);
937        }
938
939        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
940                                                     (target_ulong)-1);
941        return do_csrrw(ctx, a->rd, a->csr, src, mask);
942    } else {
943        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
944        TCGv srch = get_gprh(ctx, a->rs1);
945
946        /*
947         * If rd == 0, the insn shall not read the csr, nor cause any of the
948         * side effects that might occur on a csr read.
949         */
950        if (a->rd == 0) {
951            return do_csrw_i128(ctx, a->csr, srcl, srch);
952        }
953
954        TCGv mask = tcg_constant_tl(-1);
955        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
956    }
957}
958
959static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
960{
961    /*
962     * If rs1 == 0, the insn shall not write to the csr at all, nor
963     * cause any of the side effects that might occur on a csr write.
964     * Note that if rs1 specifies a register other than x0, holding
965     * a zero value, the instruction will still attempt to write the
966     * unmodified value back to the csr and will cause side effects.
967     */
968    if (get_xl(ctx) < MXL_RV128) {
969        if (a->rs1 == 0) {
970            return do_csrr(ctx, a->rd, a->csr);
971        }
972
973        TCGv ones = tcg_constant_tl(-1);
974        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
975        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
976    } else {
977        if (a->rs1 == 0) {
978            return do_csrr_i128(ctx, a->rd, a->csr);
979        }
980
981        TCGv ones = tcg_constant_tl(-1);
982        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
983        TCGv maskh = get_gprh(ctx, a->rs1);
984        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
985    }
986}
987
988static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
989{
990    /*
991     * If rs1 == 0, the insn shall not write to the csr at all, nor
992     * cause any of the side effects that might occur on a csr write.
993     * Note that if rs1 specifies a register other than x0, holding
994     * a zero value, the instruction will still attempt to write the
995     * unmodified value back to the csr and will cause side effects.
996     */
997    if (get_xl(ctx) < MXL_RV128) {
998        if (a->rs1 == 0) {
999            return do_csrr(ctx, a->rd, a->csr);
1000        }
1001
1002        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1003        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1004    } else {
1005        if (a->rs1 == 0) {
1006            return do_csrr_i128(ctx, a->rd, a->csr);
1007        }
1008
1009        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1010        TCGv maskh = get_gprh(ctx, a->rs1);
1011        return do_csrrw_i128(ctx, a->rd, a->csr,
1012                             ctx->zero, ctx->zero, maskl, maskh);
1013    }
1014}
1015
1016static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1017{
1018    RISCVMXL xl = get_xl(ctx);
1019    if (xl < MXL_RV128) {
1020        TCGv src = tcg_constant_tl(a->rs1);
1021
1022        /*
1023         * If rd == 0, the insn shall not read the csr, nor cause any of the
1024         * side effects that might occur on a csr read.
1025         */
1026        if (a->rd == 0) {
1027            return do_csrw(ctx, a->csr, src);
1028        }
1029
1030        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1031                                                     (target_ulong)-1);
1032        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1033    } else {
1034        TCGv src = tcg_constant_tl(a->rs1);
1035
1036        /*
1037         * If rd == 0, the insn shall not read the csr, nor cause any of the
1038         * side effects that might occur on a csr read.
1039         */
1040        if (a->rd == 0) {
1041            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1042        }
1043
1044        TCGv mask = tcg_constant_tl(-1);
1045        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1046    }
1047}
1048
1049static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1050{
1051    /*
1052     * If rs1 == 0, the insn shall not write to the csr at all, nor
1053     * cause any of the side effects that might occur on a csr write.
1054     * Note that if rs1 specifies a register other than x0, holding
1055     * a zero value, the instruction will still attempt to write the
1056     * unmodified value back to the csr and will cause side effects.
1057     */
1058    if (get_xl(ctx) < MXL_RV128) {
1059        if (a->rs1 == 0) {
1060            return do_csrr(ctx, a->rd, a->csr);
1061        }
1062
1063        TCGv ones = tcg_constant_tl(-1);
1064        TCGv mask = tcg_constant_tl(a->rs1);
1065        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1066    } else {
1067        if (a->rs1 == 0) {
1068            return do_csrr_i128(ctx, a->rd, a->csr);
1069        }
1070
1071        TCGv ones = tcg_constant_tl(-1);
1072        TCGv mask = tcg_constant_tl(a->rs1);
1073        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1074    }
1075}
1076
1077static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1078{
1079    /*
1080     * If rs1 == 0, the insn shall not write to the csr at all, nor
1081     * cause any of the side effects that might occur on a csr write.
1082     * Note that if rs1 specifies a register other than x0, holding
1083     * a zero value, the instruction will still attempt to write the
1084     * unmodified value back to the csr and will cause side effects.
1085     */
1086    if (get_xl(ctx) < MXL_RV128) {
1087        if (a->rs1 == 0) {
1088            return do_csrr(ctx, a->rd, a->csr);
1089        }
1090
1091        TCGv mask = tcg_constant_tl(a->rs1);
1092        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1093    } else {
1094        if (a->rs1 == 0) {
1095            return do_csrr_i128(ctx, a->rd, a->csr);
1096        }
1097
1098        TCGv mask = tcg_constant_tl(a->rs1);
1099        return do_csrrw_i128(ctx, a->rd, a->csr,
1100                             ctx->zero, ctx->zero, mask, ctx->zero);
1101    }
1102}
1103