1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42    return true;
43}
44
45static bool trans_jal(DisasContext *ctx, arg_jal *a)
46{
47    gen_jal(ctx, a->rd, a->imm);
48    return true;
49}
50
51static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52{
53    TCGLabel *misaligned = NULL;
54
55    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
56    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
57
58    gen_set_pc(ctx, cpu_pc);
59    if (!has_ext(ctx, RVC)) {
60        TCGv t0 = tcg_temp_new();
61
62        misaligned = gen_new_label();
63        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
64        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
65        tcg_temp_free(t0);
66    }
67
68    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
69    tcg_gen_lookup_and_goto_ptr();
70
71    if (misaligned) {
72        gen_set_label(misaligned);
73        gen_exception_inst_addr_mis(ctx);
74    }
75    ctx->base.is_jmp = DISAS_NORETURN;
76
77    return true;
78}
79
80static TCGCond gen_compare_i128(bool bz, TCGv rl,
81                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
82                                TCGCond cond)
83{
84    TCGv rh = tcg_temp_new();
85    bool invert = false;
86
87    switch (cond) {
88    case TCG_COND_EQ:
89    case TCG_COND_NE:
90        if (bz) {
91            tcg_gen_or_tl(rl, al, ah);
92        } else {
93            tcg_gen_xor_tl(rl, al, bl);
94            tcg_gen_xor_tl(rh, ah, bh);
95            tcg_gen_or_tl(rl, rl, rh);
96        }
97        break;
98
99    case TCG_COND_GE:
100    case TCG_COND_LT:
101        if (bz) {
102            tcg_gen_mov_tl(rl, ah);
103        } else {
104            TCGv tmp = tcg_temp_new();
105
106            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
107            tcg_gen_xor_tl(rl, rh, ah);
108            tcg_gen_xor_tl(tmp, ah, bh);
109            tcg_gen_and_tl(rl, rl, tmp);
110            tcg_gen_xor_tl(rl, rh, rl);
111
112            tcg_temp_free(tmp);
113        }
114        break;
115
116    case TCG_COND_LTU:
117        invert = true;
118        /* fallthrough */
119    case TCG_COND_GEU:
120        {
121            TCGv tmp = tcg_temp_new();
122            TCGv zero = tcg_constant_tl(0);
123            TCGv one = tcg_constant_tl(1);
124
125            cond = TCG_COND_NE;
126            /* borrow in to second word */
127            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
128            /* seed third word with 1, which will be result */
129            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
130            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
131
132            tcg_temp_free(tmp);
133        }
134        break;
135
136    default:
137        g_assert_not_reached();
138    }
139
140    if (invert) {
141        cond = tcg_invert_cond(cond);
142    }
143
144    tcg_temp_free(rh);
145    return cond;
146}
147
148static void gen_setcond_i128(TCGv rl, TCGv rh,
149                             TCGv src1l, TCGv src1h,
150                             TCGv src2l, TCGv src2h,
151                             TCGCond cond)
152{
153    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
154    tcg_gen_setcondi_tl(cond, rl, rl, 0);
155    tcg_gen_movi_tl(rh, 0);
156}
157
158static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
159{
160    TCGLabel *l = gen_new_label();
161    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
162    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
163
164    if (get_xl(ctx) == MXL_RV128) {
165        TCGv src1h = get_gprh(ctx, a->rs1);
166        TCGv src2h = get_gprh(ctx, a->rs2);
167        TCGv tmp = tcg_temp_new();
168
169        cond = gen_compare_i128(a->rs2 == 0,
170                                tmp, src1, src1h, src2, src2h, cond);
171        tcg_gen_brcondi_tl(cond, tmp, 0, l);
172
173        tcg_temp_free(tmp);
174    } else {
175        tcg_gen_brcond_tl(cond, src1, src2, l);
176    }
177    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
178
179    gen_set_label(l); /* branch taken */
180
181    if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
182        /* misaligned */
183        gen_exception_inst_addr_mis(ctx);
184    } else {
185        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
186    }
187    ctx->base.is_jmp = DISAS_NORETURN;
188
189    return true;
190}
191
192static bool trans_beq(DisasContext *ctx, arg_beq *a)
193{
194    return gen_branch(ctx, a, TCG_COND_EQ);
195}
196
197static bool trans_bne(DisasContext *ctx, arg_bne *a)
198{
199    return gen_branch(ctx, a, TCG_COND_NE);
200}
201
202static bool trans_blt(DisasContext *ctx, arg_blt *a)
203{
204    return gen_branch(ctx, a, TCG_COND_LT);
205}
206
207static bool trans_bge(DisasContext *ctx, arg_bge *a)
208{
209    return gen_branch(ctx, a, TCG_COND_GE);
210}
211
212static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
213{
214    return gen_branch(ctx, a, TCG_COND_LTU);
215}
216
217static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
218{
219    return gen_branch(ctx, a, TCG_COND_GEU);
220}
221
222static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
223{
224    TCGv dest = dest_gpr(ctx, a->rd);
225    TCGv addr = get_address(ctx, a->rs1, a->imm);
226
227    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
228    gen_set_gpr(ctx, a->rd, dest);
229    return true;
230}
231
232/* Compute only 64-bit addresses to use the address translation mechanism */
233static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
234{
235    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
236    TCGv destl = dest_gpr(ctx, a->rd);
237    TCGv desth = dest_gprh(ctx, a->rd);
238    TCGv addrl = tcg_temp_new();
239
240    tcg_gen_addi_tl(addrl, src1l, a->imm);
241
242    if ((memop & MO_SIZE) <= MO_64) {
243        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
244        if (memop & MO_SIGN) {
245            tcg_gen_sari_tl(desth, destl, 63);
246        } else {
247            tcg_gen_movi_tl(desth, 0);
248        }
249    } else {
250        /* assume little-endian memory access for now */
251        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
252        tcg_gen_addi_tl(addrl, addrl, 8);
253        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
254    }
255
256    gen_set_gpr128(ctx, a->rd, destl, desth);
257
258    tcg_temp_free(addrl);
259    return true;
260}
261
262static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
263{
264    if (get_xl(ctx) == MXL_RV128) {
265        return gen_load_i128(ctx, a, memop);
266    } else {
267        return gen_load_tl(ctx, a, memop);
268    }
269}
270
271static bool trans_lb(DisasContext *ctx, arg_lb *a)
272{
273    return gen_load(ctx, a, MO_SB);
274}
275
276static bool trans_lh(DisasContext *ctx, arg_lh *a)
277{
278    return gen_load(ctx, a, MO_TESW);
279}
280
281static bool trans_lw(DisasContext *ctx, arg_lw *a)
282{
283    return gen_load(ctx, a, MO_TESL);
284}
285
286static bool trans_ld(DisasContext *ctx, arg_ld *a)
287{
288    REQUIRE_64_OR_128BIT(ctx);
289    return gen_load(ctx, a, MO_TESQ);
290}
291
292static bool trans_lq(DisasContext *ctx, arg_lq *a)
293{
294    REQUIRE_128BIT(ctx);
295    return gen_load(ctx, a, MO_TEUO);
296}
297
298static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
299{
300    return gen_load(ctx, a, MO_UB);
301}
302
303static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
304{
305    return gen_load(ctx, a, MO_TEUW);
306}
307
308static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
309{
310    REQUIRE_64_OR_128BIT(ctx);
311    return gen_load(ctx, a, MO_TEUL);
312}
313
314static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
315{
316    REQUIRE_128BIT(ctx);
317    return gen_load(ctx, a, MO_TEUQ);
318}
319
320static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
321{
322    TCGv addr = get_address(ctx, a->rs1, a->imm);
323    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
324
325    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
326    return true;
327}
328
329static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
330{
331    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
332    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
333    TCGv src2h = get_gprh(ctx, a->rs2);
334    TCGv addrl = tcg_temp_new();
335
336    tcg_gen_addi_tl(addrl, src1l, a->imm);
337
338    if ((memop & MO_SIZE) <= MO_64) {
339        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
340    } else {
341        /* little-endian memory access assumed for now */
342        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
343        tcg_gen_addi_tl(addrl, addrl, 8);
344        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
345    }
346
347    tcg_temp_free(addrl);
348    return true;
349}
350
351static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
352{
353    if (get_xl(ctx) == MXL_RV128) {
354        return gen_store_i128(ctx, a, memop);
355    } else {
356        return gen_store_tl(ctx, a, memop);
357    }
358}
359
360static bool trans_sb(DisasContext *ctx, arg_sb *a)
361{
362    return gen_store(ctx, a, MO_SB);
363}
364
365static bool trans_sh(DisasContext *ctx, arg_sh *a)
366{
367    return gen_store(ctx, a, MO_TESW);
368}
369
370static bool trans_sw(DisasContext *ctx, arg_sw *a)
371{
372    return gen_store(ctx, a, MO_TESL);
373}
374
375static bool trans_sd(DisasContext *ctx, arg_sd *a)
376{
377    REQUIRE_64_OR_128BIT(ctx);
378    return gen_store(ctx, a, MO_TEUQ);
379}
380
381static bool trans_sq(DisasContext *ctx, arg_sq *a)
382{
383    REQUIRE_128BIT(ctx);
384    return gen_store(ctx, a, MO_TEUO);
385}
386
387static bool trans_addd(DisasContext *ctx, arg_addd *a)
388{
389    REQUIRE_128BIT(ctx);
390    ctx->ol = MXL_RV64;
391    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
392}
393
394static bool trans_addid(DisasContext *ctx, arg_addid *a)
395{
396    REQUIRE_128BIT(ctx);
397    ctx->ol = MXL_RV64;
398    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
399}
400
401static bool trans_subd(DisasContext *ctx, arg_subd *a)
402{
403    REQUIRE_128BIT(ctx);
404    ctx->ol = MXL_RV64;
405    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
406}
407
408static void gen_addi2_i128(TCGv retl, TCGv reth,
409                           TCGv srcl, TCGv srch, target_long imm)
410{
411    TCGv imml  = tcg_constant_tl(imm);
412    TCGv immh  = tcg_constant_tl(-(imm < 0));
413    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
414}
415
416static bool trans_addi(DisasContext *ctx, arg_addi *a)
417{
418    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
419}
420
421static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
422{
423    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
424}
425
426static void gen_slt_i128(TCGv retl, TCGv reth,
427                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
428{
429    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
430}
431
432static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
433{
434    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
435}
436
437static void gen_sltu_i128(TCGv retl, TCGv reth,
438                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
439{
440    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
441}
442
443static bool trans_slti(DisasContext *ctx, arg_slti *a)
444{
445    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
446}
447
448static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
449{
450    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
451}
452
453static bool trans_xori(DisasContext *ctx, arg_xori *a)
454{
455    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
456}
457
458static bool trans_ori(DisasContext *ctx, arg_ori *a)
459{
460    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
461}
462
463static bool trans_andi(DisasContext *ctx, arg_andi *a)
464{
465    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
466}
467
468static void gen_slli_i128(TCGv retl, TCGv reth,
469                          TCGv src1l, TCGv src1h,
470                          target_long shamt)
471{
472    if (shamt >= 64) {
473        tcg_gen_shli_tl(reth, src1l, shamt - 64);
474        tcg_gen_movi_tl(retl, 0);
475    } else {
476        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
477        tcg_gen_shli_tl(retl, src1l, shamt);
478    }
479}
480
481static bool trans_slli(DisasContext *ctx, arg_slli *a)
482{
483    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
484}
485
486static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
487{
488    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
489}
490
491static void gen_srli_i128(TCGv retl, TCGv reth,
492                          TCGv src1l, TCGv src1h,
493                          target_long shamt)
494{
495    if (shamt >= 64) {
496        tcg_gen_shri_tl(retl, src1h, shamt - 64);
497        tcg_gen_movi_tl(reth, 0);
498    } else {
499        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
500        tcg_gen_shri_tl(reth, src1h, shamt);
501    }
502}
503
504static bool trans_srli(DisasContext *ctx, arg_srli *a)
505{
506    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
507                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
508}
509
510static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
511{
512    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
513}
514
515static void gen_srai_i128(TCGv retl, TCGv reth,
516                          TCGv src1l, TCGv src1h,
517                          target_long shamt)
518{
519    if (shamt >= 64) {
520        tcg_gen_sari_tl(retl, src1h, shamt - 64);
521        tcg_gen_sari_tl(reth, src1h, 63);
522    } else {
523        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
524        tcg_gen_sari_tl(reth, src1h, shamt);
525    }
526}
527
528static bool trans_srai(DisasContext *ctx, arg_srai *a)
529{
530    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
531                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
532}
533
534static bool trans_add(DisasContext *ctx, arg_add *a)
535{
536    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
537}
538
539static bool trans_sub(DisasContext *ctx, arg_sub *a)
540{
541    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
542}
543
544static void gen_sll_i128(TCGv destl, TCGv desth,
545                         TCGv src1l, TCGv src1h, TCGv shamt)
546{
547    TCGv ls = tcg_temp_new();
548    TCGv rs = tcg_temp_new();
549    TCGv hs = tcg_temp_new();
550    TCGv ll = tcg_temp_new();
551    TCGv lr = tcg_temp_new();
552    TCGv h0 = tcg_temp_new();
553    TCGv h1 = tcg_temp_new();
554    TCGv zero = tcg_constant_tl(0);
555
556    tcg_gen_andi_tl(hs, shamt, 64);
557    tcg_gen_andi_tl(ls, shamt, 63);
558    tcg_gen_neg_tl(shamt, shamt);
559    tcg_gen_andi_tl(rs, shamt, 63);
560
561    tcg_gen_shl_tl(ll, src1l, ls);
562    tcg_gen_shl_tl(h0, src1h, ls);
563    tcg_gen_shr_tl(lr, src1l, rs);
564    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
565    tcg_gen_or_tl(h1, h0, lr);
566
567    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
568    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
569
570    tcg_temp_free(ls);
571    tcg_temp_free(rs);
572    tcg_temp_free(hs);
573    tcg_temp_free(ll);
574    tcg_temp_free(lr);
575    tcg_temp_free(h0);
576    tcg_temp_free(h1);
577}
578
579static bool trans_sll(DisasContext *ctx, arg_sll *a)
580{
581    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
582}
583
584static bool trans_slt(DisasContext *ctx, arg_slt *a)
585{
586    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
587}
588
589static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
590{
591    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
592}
593
594static void gen_srl_i128(TCGv destl, TCGv desth,
595                         TCGv src1l, TCGv src1h, TCGv shamt)
596{
597    TCGv ls = tcg_temp_new();
598    TCGv rs = tcg_temp_new();
599    TCGv hs = tcg_temp_new();
600    TCGv ll = tcg_temp_new();
601    TCGv lr = tcg_temp_new();
602    TCGv h0 = tcg_temp_new();
603    TCGv h1 = tcg_temp_new();
604    TCGv zero = tcg_constant_tl(0);
605
606    tcg_gen_andi_tl(hs, shamt, 64);
607    tcg_gen_andi_tl(rs, shamt, 63);
608    tcg_gen_neg_tl(shamt, shamt);
609    tcg_gen_andi_tl(ls, shamt, 63);
610
611    tcg_gen_shr_tl(lr, src1l, rs);
612    tcg_gen_shr_tl(h1, src1h, rs);
613    tcg_gen_shl_tl(ll, src1h, ls);
614    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
615    tcg_gen_or_tl(h0, ll, lr);
616
617    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
618    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
619
620    tcg_temp_free(ls);
621    tcg_temp_free(rs);
622    tcg_temp_free(hs);
623    tcg_temp_free(ll);
624    tcg_temp_free(lr);
625    tcg_temp_free(h0);
626    tcg_temp_free(h1);
627}
628
629static bool trans_srl(DisasContext *ctx, arg_srl *a)
630{
631    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
632}
633
634static void gen_sra_i128(TCGv destl, TCGv desth,
635                         TCGv src1l, TCGv src1h, TCGv shamt)
636{
637    TCGv ls = tcg_temp_new();
638    TCGv rs = tcg_temp_new();
639    TCGv hs = tcg_temp_new();
640    TCGv ll = tcg_temp_new();
641    TCGv lr = tcg_temp_new();
642    TCGv h0 = tcg_temp_new();
643    TCGv h1 = tcg_temp_new();
644    TCGv zero = tcg_constant_tl(0);
645
646    tcg_gen_andi_tl(hs, shamt, 64);
647    tcg_gen_andi_tl(rs, shamt, 63);
648    tcg_gen_neg_tl(shamt, shamt);
649    tcg_gen_andi_tl(ls, shamt, 63);
650
651    tcg_gen_shr_tl(lr, src1l, rs);
652    tcg_gen_sar_tl(h1, src1h, rs);
653    tcg_gen_shl_tl(ll, src1h, ls);
654    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
655    tcg_gen_or_tl(h0, ll, lr);
656    tcg_gen_sari_tl(lr, src1h, 63);
657
658    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
659    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
660
661    tcg_temp_free(ls);
662    tcg_temp_free(rs);
663    tcg_temp_free(hs);
664    tcg_temp_free(ll);
665    tcg_temp_free(lr);
666    tcg_temp_free(h0);
667    tcg_temp_free(h1);
668}
669
670static bool trans_sra(DisasContext *ctx, arg_sra *a)
671{
672    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
673}
674
675static bool trans_xor(DisasContext *ctx, arg_xor *a)
676{
677    return gen_logic(ctx, a, tcg_gen_xor_tl);
678}
679
680static bool trans_or(DisasContext *ctx, arg_or *a)
681{
682    return gen_logic(ctx, a, tcg_gen_or_tl);
683}
684
685static bool trans_and(DisasContext *ctx, arg_and *a)
686{
687    return gen_logic(ctx, a, tcg_gen_and_tl);
688}
689
690static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
691{
692    REQUIRE_64_OR_128BIT(ctx);
693    ctx->ol = MXL_RV32;
694    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
695}
696
697static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
698{
699    REQUIRE_64_OR_128BIT(ctx);
700    ctx->ol = MXL_RV32;
701    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
702}
703
704static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
705{
706    REQUIRE_64_OR_128BIT(ctx);
707    ctx->ol = MXL_RV32;
708    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
709}
710
711static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
712{
713    REQUIRE_64_OR_128BIT(ctx);
714    ctx->ol = MXL_RV32;
715    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
716}
717
718static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
719{
720    REQUIRE_128BIT(ctx);
721    ctx->ol = MXL_RV64;
722    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
723}
724
725static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
726{
727    REQUIRE_128BIT(ctx);
728    ctx->ol = MXL_RV64;
729    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
730}
731
732static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
733{
734    REQUIRE_128BIT(ctx);
735    ctx->ol = MXL_RV64;
736    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
737}
738
739static bool trans_addw(DisasContext *ctx, arg_addw *a)
740{
741    REQUIRE_64_OR_128BIT(ctx);
742    ctx->ol = MXL_RV32;
743    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
744}
745
746static bool trans_subw(DisasContext *ctx, arg_subw *a)
747{
748    REQUIRE_64_OR_128BIT(ctx);
749    ctx->ol = MXL_RV32;
750    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
751}
752
753static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
754{
755    REQUIRE_64_OR_128BIT(ctx);
756    ctx->ol = MXL_RV32;
757    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
758}
759
760static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
761{
762    REQUIRE_64_OR_128BIT(ctx);
763    ctx->ol = MXL_RV32;
764    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
765}
766
767static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
768{
769    REQUIRE_64_OR_128BIT(ctx);
770    ctx->ol = MXL_RV32;
771    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
772}
773
774static bool trans_slld(DisasContext *ctx, arg_slld *a)
775{
776    REQUIRE_128BIT(ctx);
777    ctx->ol = MXL_RV64;
778    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
779}
780
781static bool trans_srld(DisasContext *ctx, arg_srld *a)
782{
783    REQUIRE_128BIT(ctx);
784    ctx->ol = MXL_RV64;
785    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
786}
787
788static bool trans_srad(DisasContext *ctx, arg_srad *a)
789{
790    REQUIRE_128BIT(ctx);
791    ctx->ol = MXL_RV64;
792    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
793}
794
795
796static bool trans_fence(DisasContext *ctx, arg_fence *a)
797{
798    /* FENCE is a full memory barrier. */
799    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
800    return true;
801}
802
803static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
804{
805    if (!ctx->cfg_ptr->ext_ifencei) {
806        return false;
807    }
808
809    /*
810     * FENCE_I is a no-op in QEMU,
811     * however we need to end the translation block
812     */
813    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
814    tcg_gen_exit_tb(NULL, 0);
815    ctx->base.is_jmp = DISAS_NORETURN;
816    return true;
817}
818
819static bool do_csr_post(DisasContext *ctx)
820{
821    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
822    decode_save_opc(ctx);
823    /* We may have changed important cpu state -- exit to main loop. */
824    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
825    tcg_gen_exit_tb(NULL, 0);
826    ctx->base.is_jmp = DISAS_NORETURN;
827    return true;
828}
829
830static bool do_csrr(DisasContext *ctx, int rd, int rc)
831{
832    TCGv dest = dest_gpr(ctx, rd);
833    TCGv_i32 csr = tcg_constant_i32(rc);
834
835    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
836        gen_io_start();
837    }
838    gen_helper_csrr(dest, cpu_env, csr);
839    gen_set_gpr(ctx, rd, dest);
840    return do_csr_post(ctx);
841}
842
843static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
844{
845    TCGv_i32 csr = tcg_constant_i32(rc);
846
847    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
848        gen_io_start();
849    }
850    gen_helper_csrw(cpu_env, csr, src);
851    return do_csr_post(ctx);
852}
853
854static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
855{
856    TCGv dest = dest_gpr(ctx, rd);
857    TCGv_i32 csr = tcg_constant_i32(rc);
858
859    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
860        gen_io_start();
861    }
862    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
863    gen_set_gpr(ctx, rd, dest);
864    return do_csr_post(ctx);
865}
866
867static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
868{
869    TCGv destl = dest_gpr(ctx, rd);
870    TCGv desth = dest_gprh(ctx, rd);
871    TCGv_i32 csr = tcg_constant_i32(rc);
872
873    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
874        gen_io_start();
875    }
876    gen_helper_csrr_i128(destl, cpu_env, csr);
877    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
878    gen_set_gpr128(ctx, rd, destl, desth);
879    return do_csr_post(ctx);
880}
881
882static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
883{
884    TCGv_i32 csr = tcg_constant_i32(rc);
885
886    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
887        gen_io_start();
888    }
889    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
890    return do_csr_post(ctx);
891}
892
893static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
894                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
895{
896    TCGv destl = dest_gpr(ctx, rd);
897    TCGv desth = dest_gprh(ctx, rd);
898    TCGv_i32 csr = tcg_constant_i32(rc);
899
900    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
901        gen_io_start();
902    }
903    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
904    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
905    gen_set_gpr128(ctx, rd, destl, desth);
906    return do_csr_post(ctx);
907}
908
909static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
910{
911    RISCVMXL xl = get_xl(ctx);
912    if (xl < MXL_RV128) {
913        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
914
915        /*
916         * If rd == 0, the insn shall not read the csr, nor cause any of the
917         * side effects that might occur on a csr read.
918         */
919        if (a->rd == 0) {
920            return do_csrw(ctx, a->csr, src);
921        }
922
923        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
924                                                     (target_ulong)-1);
925        return do_csrrw(ctx, a->rd, a->csr, src, mask);
926    } else {
927        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
928        TCGv srch = get_gprh(ctx, a->rs1);
929
930        /*
931         * If rd == 0, the insn shall not read the csr, nor cause any of the
932         * side effects that might occur on a csr read.
933         */
934        if (a->rd == 0) {
935            return do_csrw_i128(ctx, a->csr, srcl, srch);
936        }
937
938        TCGv mask = tcg_constant_tl(-1);
939        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
940    }
941}
942
943static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
944{
945    /*
946     * If rs1 == 0, the insn shall not write to the csr at all, nor
947     * cause any of the side effects that might occur on a csr write.
948     * Note that if rs1 specifies a register other than x0, holding
949     * a zero value, the instruction will still attempt to write the
950     * unmodified value back to the csr and will cause side effects.
951     */
952    if (get_xl(ctx) < MXL_RV128) {
953        if (a->rs1 == 0) {
954            return do_csrr(ctx, a->rd, a->csr);
955        }
956
957        TCGv ones = tcg_constant_tl(-1);
958        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
959        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
960    } else {
961        if (a->rs1 == 0) {
962            return do_csrr_i128(ctx, a->rd, a->csr);
963        }
964
965        TCGv ones = tcg_constant_tl(-1);
966        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
967        TCGv maskh = get_gprh(ctx, a->rs1);
968        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
969    }
970}
971
972static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
973{
974    /*
975     * If rs1 == 0, the insn shall not write to the csr at all, nor
976     * cause any of the side effects that might occur on a csr write.
977     * Note that if rs1 specifies a register other than x0, holding
978     * a zero value, the instruction will still attempt to write the
979     * unmodified value back to the csr and will cause side effects.
980     */
981    if (get_xl(ctx) < MXL_RV128) {
982        if (a->rs1 == 0) {
983            return do_csrr(ctx, a->rd, a->csr);
984        }
985
986        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
987        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
988    } else {
989        if (a->rs1 == 0) {
990            return do_csrr_i128(ctx, a->rd, a->csr);
991        }
992
993        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
994        TCGv maskh = get_gprh(ctx, a->rs1);
995        return do_csrrw_i128(ctx, a->rd, a->csr,
996                             ctx->zero, ctx->zero, maskl, maskh);
997    }
998}
999
1000static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1001{
1002    RISCVMXL xl = get_xl(ctx);
1003    if (xl < MXL_RV128) {
1004        TCGv src = tcg_constant_tl(a->rs1);
1005
1006        /*
1007         * If rd == 0, the insn shall not read the csr, nor cause any of the
1008         * side effects that might occur on a csr read.
1009         */
1010        if (a->rd == 0) {
1011            return do_csrw(ctx, a->csr, src);
1012        }
1013
1014        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1015                                                     (target_ulong)-1);
1016        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1017    } else {
1018        TCGv src = tcg_constant_tl(a->rs1);
1019
1020        /*
1021         * If rd == 0, the insn shall not read the csr, nor cause any of the
1022         * side effects that might occur on a csr read.
1023         */
1024        if (a->rd == 0) {
1025            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1026        }
1027
1028        TCGv mask = tcg_constant_tl(-1);
1029        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1030    }
1031}
1032
1033static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1034{
1035    /*
1036     * If rs1 == 0, the insn shall not write to the csr at all, nor
1037     * cause any of the side effects that might occur on a csr write.
1038     * Note that if rs1 specifies a register other than x0, holding
1039     * a zero value, the instruction will still attempt to write the
1040     * unmodified value back to the csr and will cause side effects.
1041     */
1042    if (get_xl(ctx) < MXL_RV128) {
1043        if (a->rs1 == 0) {
1044            return do_csrr(ctx, a->rd, a->csr);
1045        }
1046
1047        TCGv ones = tcg_constant_tl(-1);
1048        TCGv mask = tcg_constant_tl(a->rs1);
1049        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1050    } else {
1051        if (a->rs1 == 0) {
1052            return do_csrr_i128(ctx, a->rd, a->csr);
1053        }
1054
1055        TCGv ones = tcg_constant_tl(-1);
1056        TCGv mask = tcg_constant_tl(a->rs1);
1057        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1058    }
1059}
1060
1061static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1062{
1063    /*
1064     * If rs1 == 0, the insn shall not write to the csr at all, nor
1065     * cause any of the side effects that might occur on a csr write.
1066     * Note that if rs1 specifies a register other than x0, holding
1067     * a zero value, the instruction will still attempt to write the
1068     * unmodified value back to the csr and will cause side effects.
1069     */
1070    if (get_xl(ctx) < MXL_RV128) {
1071        if (a->rs1 == 0) {
1072            return do_csrr(ctx, a->rd, a->csr);
1073        }
1074
1075        TCGv mask = tcg_constant_tl(a->rs1);
1076        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1077    } else {
1078        if (a->rs1 == 0) {
1079            return do_csrr_i128(ctx, a->rd, a->csr);
1080        }
1081
1082        TCGv mask = tcg_constant_tl(a->rs1);
1083        return do_csrrw_i128(ctx, a->rd, a->csr,
1084                             ctx->zero, ctx->zero, mask, ctx->zero);
1085    }
1086}
1087