1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42    return true;
43}
44
45static bool trans_jal(DisasContext *ctx, arg_jal *a)
46{
47    gen_jal(ctx, a->rd, a->imm);
48    return true;
49}
50
51static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52{
53    TCGLabel *misaligned = NULL;
54
55    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
56    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
57
58    gen_set_pc(ctx, cpu_pc);
59    if (!ctx->cfg_ptr->ext_zca) {
60        TCGv t0 = tcg_temp_new();
61
62        misaligned = gen_new_label();
63        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
64        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
65    }
66
67    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
68    lookup_and_goto_ptr(ctx);
69
70    if (misaligned) {
71        gen_set_label(misaligned);
72        gen_exception_inst_addr_mis(ctx);
73    }
74    ctx->base.is_jmp = DISAS_NORETURN;
75
76    return true;
77}
78
79static TCGCond gen_compare_i128(bool bz, TCGv rl,
80                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
81                                TCGCond cond)
82{
83    TCGv rh = tcg_temp_new();
84    bool invert = false;
85
86    switch (cond) {
87    case TCG_COND_EQ:
88    case TCG_COND_NE:
89        if (bz) {
90            tcg_gen_or_tl(rl, al, ah);
91        } else {
92            tcg_gen_xor_tl(rl, al, bl);
93            tcg_gen_xor_tl(rh, ah, bh);
94            tcg_gen_or_tl(rl, rl, rh);
95        }
96        break;
97
98    case TCG_COND_GE:
99    case TCG_COND_LT:
100        if (bz) {
101            tcg_gen_mov_tl(rl, ah);
102        } else {
103            TCGv tmp = tcg_temp_new();
104
105            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
106            tcg_gen_xor_tl(rl, rh, ah);
107            tcg_gen_xor_tl(tmp, ah, bh);
108            tcg_gen_and_tl(rl, rl, tmp);
109            tcg_gen_xor_tl(rl, rh, rl);
110        }
111        break;
112
113    case TCG_COND_LTU:
114        invert = true;
115        /* fallthrough */
116    case TCG_COND_GEU:
117        {
118            TCGv tmp = tcg_temp_new();
119            TCGv zero = tcg_constant_tl(0);
120            TCGv one = tcg_constant_tl(1);
121
122            cond = TCG_COND_NE;
123            /* borrow in to second word */
124            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
125            /* seed third word with 1, which will be result */
126            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
127            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
128        }
129        break;
130
131    default:
132        g_assert_not_reached();
133    }
134
135    if (invert) {
136        cond = tcg_invert_cond(cond);
137    }
138    return cond;
139}
140
141static void gen_setcond_i128(TCGv rl, TCGv rh,
142                             TCGv src1l, TCGv src1h,
143                             TCGv src2l, TCGv src2h,
144                             TCGCond cond)
145{
146    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
147    tcg_gen_setcondi_tl(cond, rl, rl, 0);
148    tcg_gen_movi_tl(rh, 0);
149}
150
151static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
152{
153    TCGLabel *l = gen_new_label();
154    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
155    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
156
157    if (get_xl(ctx) == MXL_RV128) {
158        TCGv src1h = get_gprh(ctx, a->rs1);
159        TCGv src2h = get_gprh(ctx, a->rs2);
160        TCGv tmp = tcg_temp_new();
161
162        cond = gen_compare_i128(a->rs2 == 0,
163                                tmp, src1, src1h, src2, src2h, cond);
164        tcg_gen_brcondi_tl(cond, tmp, 0, l);
165    } else {
166        tcg_gen_brcond_tl(cond, src1, src2, l);
167    }
168    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
169
170    gen_set_label(l); /* branch taken */
171
172    if (!ctx->cfg_ptr->ext_zca && ((ctx->base.pc_next + a->imm) & 0x3)) {
173        /* misaligned */
174        gen_exception_inst_addr_mis(ctx);
175    } else {
176        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
177    }
178    ctx->base.is_jmp = DISAS_NORETURN;
179
180    return true;
181}
182
183static bool trans_beq(DisasContext *ctx, arg_beq *a)
184{
185    return gen_branch(ctx, a, TCG_COND_EQ);
186}
187
188static bool trans_bne(DisasContext *ctx, arg_bne *a)
189{
190    return gen_branch(ctx, a, TCG_COND_NE);
191}
192
193static bool trans_blt(DisasContext *ctx, arg_blt *a)
194{
195    return gen_branch(ctx, a, TCG_COND_LT);
196}
197
198static bool trans_bge(DisasContext *ctx, arg_bge *a)
199{
200    return gen_branch(ctx, a, TCG_COND_GE);
201}
202
203static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
204{
205    return gen_branch(ctx, a, TCG_COND_LTU);
206}
207
208static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
209{
210    return gen_branch(ctx, a, TCG_COND_GEU);
211}
212
213static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
214{
215    TCGv dest = dest_gpr(ctx, a->rd);
216    TCGv addr = get_address(ctx, a->rs1, a->imm);
217
218    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
219    gen_set_gpr(ctx, a->rd, dest);
220    return true;
221}
222
223/* Compute only 64-bit addresses to use the address translation mechanism */
224static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
225{
226    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
227    TCGv destl = dest_gpr(ctx, a->rd);
228    TCGv desth = dest_gprh(ctx, a->rd);
229    TCGv addrl = tcg_temp_new();
230
231    tcg_gen_addi_tl(addrl, src1l, a->imm);
232
233    if ((memop & MO_SIZE) <= MO_64) {
234        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
235        if (memop & MO_SIGN) {
236            tcg_gen_sari_tl(desth, destl, 63);
237        } else {
238            tcg_gen_movi_tl(desth, 0);
239        }
240    } else {
241        /* assume little-endian memory access for now */
242        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
243        tcg_gen_addi_tl(addrl, addrl, 8);
244        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
245    }
246
247    gen_set_gpr128(ctx, a->rd, destl, desth);
248    return true;
249}
250
251static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
252{
253    decode_save_opc(ctx);
254    if (get_xl(ctx) == MXL_RV128) {
255        return gen_load_i128(ctx, a, memop);
256    } else {
257        return gen_load_tl(ctx, a, memop);
258    }
259}
260
261static bool trans_lb(DisasContext *ctx, arg_lb *a)
262{
263    return gen_load(ctx, a, MO_SB);
264}
265
266static bool trans_lh(DisasContext *ctx, arg_lh *a)
267{
268    return gen_load(ctx, a, MO_TESW);
269}
270
271static bool trans_lw(DisasContext *ctx, arg_lw *a)
272{
273    return gen_load(ctx, a, MO_TESL);
274}
275
276static bool trans_ld(DisasContext *ctx, arg_ld *a)
277{
278    REQUIRE_64_OR_128BIT(ctx);
279    return gen_load(ctx, a, MO_TESQ);
280}
281
282static bool trans_lq(DisasContext *ctx, arg_lq *a)
283{
284    REQUIRE_128BIT(ctx);
285    return gen_load(ctx, a, MO_TEUO);
286}
287
288static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
289{
290    return gen_load(ctx, a, MO_UB);
291}
292
293static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
294{
295    return gen_load(ctx, a, MO_TEUW);
296}
297
298static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
299{
300    REQUIRE_64_OR_128BIT(ctx);
301    return gen_load(ctx, a, MO_TEUL);
302}
303
304static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
305{
306    REQUIRE_128BIT(ctx);
307    return gen_load(ctx, a, MO_TEUQ);
308}
309
310static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
311{
312    TCGv addr = get_address(ctx, a->rs1, a->imm);
313    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
314
315    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
316    return true;
317}
318
319static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
320{
321    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
322    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
323    TCGv src2h = get_gprh(ctx, a->rs2);
324    TCGv addrl = tcg_temp_new();
325
326    tcg_gen_addi_tl(addrl, src1l, a->imm);
327
328    if ((memop & MO_SIZE) <= MO_64) {
329        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
330    } else {
331        /* little-endian memory access assumed for now */
332        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
333        tcg_gen_addi_tl(addrl, addrl, 8);
334        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
335    }
336    return true;
337}
338
339static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
340{
341    decode_save_opc(ctx);
342    if (get_xl(ctx) == MXL_RV128) {
343        return gen_store_i128(ctx, a, memop);
344    } else {
345        return gen_store_tl(ctx, a, memop);
346    }
347}
348
349static bool trans_sb(DisasContext *ctx, arg_sb *a)
350{
351    return gen_store(ctx, a, MO_SB);
352}
353
354static bool trans_sh(DisasContext *ctx, arg_sh *a)
355{
356    return gen_store(ctx, a, MO_TESW);
357}
358
359static bool trans_sw(DisasContext *ctx, arg_sw *a)
360{
361    return gen_store(ctx, a, MO_TESL);
362}
363
364static bool trans_sd(DisasContext *ctx, arg_sd *a)
365{
366    REQUIRE_64_OR_128BIT(ctx);
367    return gen_store(ctx, a, MO_TEUQ);
368}
369
370static bool trans_sq(DisasContext *ctx, arg_sq *a)
371{
372    REQUIRE_128BIT(ctx);
373    return gen_store(ctx, a, MO_TEUO);
374}
375
376static bool trans_addd(DisasContext *ctx, arg_addd *a)
377{
378    REQUIRE_128BIT(ctx);
379    ctx->ol = MXL_RV64;
380    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
381}
382
383static bool trans_addid(DisasContext *ctx, arg_addid *a)
384{
385    REQUIRE_128BIT(ctx);
386    ctx->ol = MXL_RV64;
387    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
388}
389
390static bool trans_subd(DisasContext *ctx, arg_subd *a)
391{
392    REQUIRE_128BIT(ctx);
393    ctx->ol = MXL_RV64;
394    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
395}
396
397static void gen_addi2_i128(TCGv retl, TCGv reth,
398                           TCGv srcl, TCGv srch, target_long imm)
399{
400    TCGv imml  = tcg_constant_tl(imm);
401    TCGv immh  = tcg_constant_tl(-(imm < 0));
402    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
403}
404
405static bool trans_addi(DisasContext *ctx, arg_addi *a)
406{
407    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
408}
409
410static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
411{
412    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
413}
414
415static void gen_slt_i128(TCGv retl, TCGv reth,
416                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
417{
418    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
419}
420
421static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
422{
423    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
424}
425
426static void gen_sltu_i128(TCGv retl, TCGv reth,
427                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
428{
429    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
430}
431
432static bool trans_slti(DisasContext *ctx, arg_slti *a)
433{
434    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
435}
436
437static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
438{
439    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
440}
441
442static bool trans_xori(DisasContext *ctx, arg_xori *a)
443{
444    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
445}
446
447static bool trans_ori(DisasContext *ctx, arg_ori *a)
448{
449    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
450}
451
452static bool trans_andi(DisasContext *ctx, arg_andi *a)
453{
454    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
455}
456
457static void gen_slli_i128(TCGv retl, TCGv reth,
458                          TCGv src1l, TCGv src1h,
459                          target_long shamt)
460{
461    if (shamt >= 64) {
462        tcg_gen_shli_tl(reth, src1l, shamt - 64);
463        tcg_gen_movi_tl(retl, 0);
464    } else {
465        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
466        tcg_gen_shli_tl(retl, src1l, shamt);
467    }
468}
469
470static bool trans_slli(DisasContext *ctx, arg_slli *a)
471{
472    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
473}
474
475static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
476{
477    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
478}
479
480static void gen_srli_i128(TCGv retl, TCGv reth,
481                          TCGv src1l, TCGv src1h,
482                          target_long shamt)
483{
484    if (shamt >= 64) {
485        tcg_gen_shri_tl(retl, src1h, shamt - 64);
486        tcg_gen_movi_tl(reth, 0);
487    } else {
488        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
489        tcg_gen_shri_tl(reth, src1h, shamt);
490    }
491}
492
493static bool trans_srli(DisasContext *ctx, arg_srli *a)
494{
495    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
496                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
497}
498
499static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
500{
501    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
502}
503
504static void gen_srai_i128(TCGv retl, TCGv reth,
505                          TCGv src1l, TCGv src1h,
506                          target_long shamt)
507{
508    if (shamt >= 64) {
509        tcg_gen_sari_tl(retl, src1h, shamt - 64);
510        tcg_gen_sari_tl(reth, src1h, 63);
511    } else {
512        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
513        tcg_gen_sari_tl(reth, src1h, shamt);
514    }
515}
516
517static bool trans_srai(DisasContext *ctx, arg_srai *a)
518{
519    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
520                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
521}
522
523static bool trans_add(DisasContext *ctx, arg_add *a)
524{
525    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
526}
527
528static bool trans_sub(DisasContext *ctx, arg_sub *a)
529{
530    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
531}
532
533static void gen_sll_i128(TCGv destl, TCGv desth,
534                         TCGv src1l, TCGv src1h, TCGv shamt)
535{
536    TCGv ls = tcg_temp_new();
537    TCGv rs = tcg_temp_new();
538    TCGv hs = tcg_temp_new();
539    TCGv ll = tcg_temp_new();
540    TCGv lr = tcg_temp_new();
541    TCGv h0 = tcg_temp_new();
542    TCGv h1 = tcg_temp_new();
543    TCGv zero = tcg_constant_tl(0);
544
545    tcg_gen_andi_tl(hs, shamt, 64);
546    tcg_gen_andi_tl(ls, shamt, 63);
547    tcg_gen_neg_tl(shamt, shamt);
548    tcg_gen_andi_tl(rs, shamt, 63);
549
550    tcg_gen_shl_tl(ll, src1l, ls);
551    tcg_gen_shl_tl(h0, src1h, ls);
552    tcg_gen_shr_tl(lr, src1l, rs);
553    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
554    tcg_gen_or_tl(h1, h0, lr);
555
556    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
557    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
558}
559
560static bool trans_sll(DisasContext *ctx, arg_sll *a)
561{
562    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
563}
564
565static bool trans_slt(DisasContext *ctx, arg_slt *a)
566{
567    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
568}
569
570static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
571{
572    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
573}
574
575static void gen_srl_i128(TCGv destl, TCGv desth,
576                         TCGv src1l, TCGv src1h, TCGv shamt)
577{
578    TCGv ls = tcg_temp_new();
579    TCGv rs = tcg_temp_new();
580    TCGv hs = tcg_temp_new();
581    TCGv ll = tcg_temp_new();
582    TCGv lr = tcg_temp_new();
583    TCGv h0 = tcg_temp_new();
584    TCGv h1 = tcg_temp_new();
585    TCGv zero = tcg_constant_tl(0);
586
587    tcg_gen_andi_tl(hs, shamt, 64);
588    tcg_gen_andi_tl(rs, shamt, 63);
589    tcg_gen_neg_tl(shamt, shamt);
590    tcg_gen_andi_tl(ls, shamt, 63);
591
592    tcg_gen_shr_tl(lr, src1l, rs);
593    tcg_gen_shr_tl(h1, src1h, rs);
594    tcg_gen_shl_tl(ll, src1h, ls);
595    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
596    tcg_gen_or_tl(h0, ll, lr);
597
598    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
599    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
600}
601
602static bool trans_srl(DisasContext *ctx, arg_srl *a)
603{
604    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
605}
606
607static void gen_sra_i128(TCGv destl, TCGv desth,
608                         TCGv src1l, TCGv src1h, TCGv shamt)
609{
610    TCGv ls = tcg_temp_new();
611    TCGv rs = tcg_temp_new();
612    TCGv hs = tcg_temp_new();
613    TCGv ll = tcg_temp_new();
614    TCGv lr = tcg_temp_new();
615    TCGv h0 = tcg_temp_new();
616    TCGv h1 = tcg_temp_new();
617    TCGv zero = tcg_constant_tl(0);
618
619    tcg_gen_andi_tl(hs, shamt, 64);
620    tcg_gen_andi_tl(rs, shamt, 63);
621    tcg_gen_neg_tl(shamt, shamt);
622    tcg_gen_andi_tl(ls, shamt, 63);
623
624    tcg_gen_shr_tl(lr, src1l, rs);
625    tcg_gen_sar_tl(h1, src1h, rs);
626    tcg_gen_shl_tl(ll, src1h, ls);
627    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
628    tcg_gen_or_tl(h0, ll, lr);
629    tcg_gen_sari_tl(lr, src1h, 63);
630
631    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
632    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
633}
634
635static bool trans_sra(DisasContext *ctx, arg_sra *a)
636{
637    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
638}
639
640static bool trans_xor(DisasContext *ctx, arg_xor *a)
641{
642    return gen_logic(ctx, a, tcg_gen_xor_tl);
643}
644
645static bool trans_or(DisasContext *ctx, arg_or *a)
646{
647    return gen_logic(ctx, a, tcg_gen_or_tl);
648}
649
650static bool trans_and(DisasContext *ctx, arg_and *a)
651{
652    return gen_logic(ctx, a, tcg_gen_and_tl);
653}
654
655static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
656{
657    REQUIRE_64_OR_128BIT(ctx);
658    ctx->ol = MXL_RV32;
659    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
660}
661
662static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
663{
664    REQUIRE_64_OR_128BIT(ctx);
665    ctx->ol = MXL_RV32;
666    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
667}
668
669static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
670{
671    REQUIRE_64_OR_128BIT(ctx);
672    ctx->ol = MXL_RV32;
673    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
674}
675
676static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
677{
678    REQUIRE_64_OR_128BIT(ctx);
679    ctx->ol = MXL_RV32;
680    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
681}
682
683static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
684{
685    REQUIRE_128BIT(ctx);
686    ctx->ol = MXL_RV64;
687    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
688}
689
690static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
691{
692    REQUIRE_128BIT(ctx);
693    ctx->ol = MXL_RV64;
694    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
695}
696
697static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
698{
699    REQUIRE_128BIT(ctx);
700    ctx->ol = MXL_RV64;
701    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
702}
703
704static bool trans_addw(DisasContext *ctx, arg_addw *a)
705{
706    REQUIRE_64_OR_128BIT(ctx);
707    ctx->ol = MXL_RV32;
708    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
709}
710
711static bool trans_subw(DisasContext *ctx, arg_subw *a)
712{
713    REQUIRE_64_OR_128BIT(ctx);
714    ctx->ol = MXL_RV32;
715    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
716}
717
718static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
719{
720    REQUIRE_64_OR_128BIT(ctx);
721    ctx->ol = MXL_RV32;
722    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
723}
724
725static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
726{
727    REQUIRE_64_OR_128BIT(ctx);
728    ctx->ol = MXL_RV32;
729    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
730}
731
732static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
733{
734    REQUIRE_64_OR_128BIT(ctx);
735    ctx->ol = MXL_RV32;
736    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
737}
738
739static bool trans_slld(DisasContext *ctx, arg_slld *a)
740{
741    REQUIRE_128BIT(ctx);
742    ctx->ol = MXL_RV64;
743    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
744}
745
746static bool trans_srld(DisasContext *ctx, arg_srld *a)
747{
748    REQUIRE_128BIT(ctx);
749    ctx->ol = MXL_RV64;
750    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
751}
752
753static bool trans_srad(DisasContext *ctx, arg_srad *a)
754{
755    REQUIRE_128BIT(ctx);
756    ctx->ol = MXL_RV64;
757    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
758}
759
760static bool trans_pause(DisasContext *ctx, arg_pause *a)
761{
762    if (!ctx->cfg_ptr->ext_zihintpause) {
763        return false;
764    }
765
766    /*
767     * PAUSE is a no-op in QEMU,
768     * end the TB and return to main loop
769     */
770    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
771    exit_tb(ctx);
772    ctx->base.is_jmp = DISAS_NORETURN;
773
774    return true;
775}
776
777static bool trans_fence(DisasContext *ctx, arg_fence *a)
778{
779    /* FENCE is a full memory barrier. */
780    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
781    return true;
782}
783
784static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
785{
786    if (!ctx->cfg_ptr->ext_ifencei) {
787        return false;
788    }
789
790    /*
791     * FENCE_I is a no-op in QEMU,
792     * however we need to end the translation block
793     */
794    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
795    exit_tb(ctx);
796    ctx->base.is_jmp = DISAS_NORETURN;
797    return true;
798}
799
800static bool do_csr_post(DisasContext *ctx)
801{
802    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
803    decode_save_opc(ctx);
804    /* We may have changed important cpu state -- exit to main loop. */
805    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
806    exit_tb(ctx);
807    ctx->base.is_jmp = DISAS_NORETURN;
808    return true;
809}
810
811static bool do_csrr(DisasContext *ctx, int rd, int rc)
812{
813    TCGv dest = dest_gpr(ctx, rd);
814    TCGv_i32 csr = tcg_constant_i32(rc);
815
816    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
817        gen_io_start();
818    }
819    gen_helper_csrr(dest, cpu_env, csr);
820    gen_set_gpr(ctx, rd, dest);
821    return do_csr_post(ctx);
822}
823
824static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
825{
826    TCGv_i32 csr = tcg_constant_i32(rc);
827
828    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
829        gen_io_start();
830    }
831    gen_helper_csrw(cpu_env, csr, src);
832    return do_csr_post(ctx);
833}
834
835static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
836{
837    TCGv dest = dest_gpr(ctx, rd);
838    TCGv_i32 csr = tcg_constant_i32(rc);
839
840    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
841        gen_io_start();
842    }
843    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
844    gen_set_gpr(ctx, rd, dest);
845    return do_csr_post(ctx);
846}
847
848static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
849{
850    TCGv destl = dest_gpr(ctx, rd);
851    TCGv desth = dest_gprh(ctx, rd);
852    TCGv_i32 csr = tcg_constant_i32(rc);
853
854    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
855        gen_io_start();
856    }
857    gen_helper_csrr_i128(destl, cpu_env, csr);
858    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
859    gen_set_gpr128(ctx, rd, destl, desth);
860    return do_csr_post(ctx);
861}
862
863static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
864{
865    TCGv_i32 csr = tcg_constant_i32(rc);
866
867    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
868        gen_io_start();
869    }
870    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
871    return do_csr_post(ctx);
872}
873
874static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
875                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
876{
877    TCGv destl = dest_gpr(ctx, rd);
878    TCGv desth = dest_gprh(ctx, rd);
879    TCGv_i32 csr = tcg_constant_i32(rc);
880
881    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
882        gen_io_start();
883    }
884    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
885    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
886    gen_set_gpr128(ctx, rd, destl, desth);
887    return do_csr_post(ctx);
888}
889
890static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
891{
892    RISCVMXL xl = get_xl(ctx);
893    if (xl < MXL_RV128) {
894        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
895
896        /*
897         * If rd == 0, the insn shall not read the csr, nor cause any of the
898         * side effects that might occur on a csr read.
899         */
900        if (a->rd == 0) {
901            return do_csrw(ctx, a->csr, src);
902        }
903
904        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
905                                                     (target_ulong)-1);
906        return do_csrrw(ctx, a->rd, a->csr, src, mask);
907    } else {
908        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
909        TCGv srch = get_gprh(ctx, a->rs1);
910
911        /*
912         * If rd == 0, the insn shall not read the csr, nor cause any of the
913         * side effects that might occur on a csr read.
914         */
915        if (a->rd == 0) {
916            return do_csrw_i128(ctx, a->csr, srcl, srch);
917        }
918
919        TCGv mask = tcg_constant_tl(-1);
920        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
921    }
922}
923
924static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
925{
926    /*
927     * If rs1 == 0, the insn shall not write to the csr at all, nor
928     * cause any of the side effects that might occur on a csr write.
929     * Note that if rs1 specifies a register other than x0, holding
930     * a zero value, the instruction will still attempt to write the
931     * unmodified value back to the csr and will cause side effects.
932     */
933    if (get_xl(ctx) < MXL_RV128) {
934        if (a->rs1 == 0) {
935            return do_csrr(ctx, a->rd, a->csr);
936        }
937
938        TCGv ones = tcg_constant_tl(-1);
939        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
940        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
941    } else {
942        if (a->rs1 == 0) {
943            return do_csrr_i128(ctx, a->rd, a->csr);
944        }
945
946        TCGv ones = tcg_constant_tl(-1);
947        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
948        TCGv maskh = get_gprh(ctx, a->rs1);
949        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
950    }
951}
952
953static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
954{
955    /*
956     * If rs1 == 0, the insn shall not write to the csr at all, nor
957     * cause any of the side effects that might occur on a csr write.
958     * Note that if rs1 specifies a register other than x0, holding
959     * a zero value, the instruction will still attempt to write the
960     * unmodified value back to the csr and will cause side effects.
961     */
962    if (get_xl(ctx) < MXL_RV128) {
963        if (a->rs1 == 0) {
964            return do_csrr(ctx, a->rd, a->csr);
965        }
966
967        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
968        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
969    } else {
970        if (a->rs1 == 0) {
971            return do_csrr_i128(ctx, a->rd, a->csr);
972        }
973
974        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
975        TCGv maskh = get_gprh(ctx, a->rs1);
976        return do_csrrw_i128(ctx, a->rd, a->csr,
977                             ctx->zero, ctx->zero, maskl, maskh);
978    }
979}
980
981static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
982{
983    RISCVMXL xl = get_xl(ctx);
984    if (xl < MXL_RV128) {
985        TCGv src = tcg_constant_tl(a->rs1);
986
987        /*
988         * If rd == 0, the insn shall not read the csr, nor cause any of the
989         * side effects that might occur on a csr read.
990         */
991        if (a->rd == 0) {
992            return do_csrw(ctx, a->csr, src);
993        }
994
995        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
996                                                     (target_ulong)-1);
997        return do_csrrw(ctx, a->rd, a->csr, src, mask);
998    } else {
999        TCGv src = tcg_constant_tl(a->rs1);
1000
1001        /*
1002         * If rd == 0, the insn shall not read the csr, nor cause any of the
1003         * side effects that might occur on a csr read.
1004         */
1005        if (a->rd == 0) {
1006            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1007        }
1008
1009        TCGv mask = tcg_constant_tl(-1);
1010        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1011    }
1012}
1013
1014static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1015{
1016    /*
1017     * If rs1 == 0, the insn shall not write to the csr at all, nor
1018     * cause any of the side effects that might occur on a csr write.
1019     * Note that if rs1 specifies a register other than x0, holding
1020     * a zero value, the instruction will still attempt to write the
1021     * unmodified value back to the csr and will cause side effects.
1022     */
1023    if (get_xl(ctx) < MXL_RV128) {
1024        if (a->rs1 == 0) {
1025            return do_csrr(ctx, a->rd, a->csr);
1026        }
1027
1028        TCGv ones = tcg_constant_tl(-1);
1029        TCGv mask = tcg_constant_tl(a->rs1);
1030        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1031    } else {
1032        if (a->rs1 == 0) {
1033            return do_csrr_i128(ctx, a->rd, a->csr);
1034        }
1035
1036        TCGv ones = tcg_constant_tl(-1);
1037        TCGv mask = tcg_constant_tl(a->rs1);
1038        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1039    }
1040}
1041
1042static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1043{
1044    /*
1045     * If rs1 == 0, the insn shall not write to the csr at all, nor
1046     * cause any of the side effects that might occur on a csr write.
1047     * Note that if rs1 specifies a register other than x0, holding
1048     * a zero value, the instruction will still attempt to write the
1049     * unmodified value back to the csr and will cause side effects.
1050     */
1051    if (get_xl(ctx) < MXL_RV128) {
1052        if (a->rs1 == 0) {
1053            return do_csrr(ctx, a->rd, a->csr);
1054        }
1055
1056        TCGv mask = tcg_constant_tl(a->rs1);
1057        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1058    } else {
1059        if (a->rs1 == 0) {
1060            return do_csrr_i128(ctx, a->rd, a->csr);
1061        }
1062
1063        TCGv mask = tcg_constant_tl(a->rs1);
1064        return do_csrrw_i128(ctx, a->rd, a->csr,
1065                             ctx->zero, ctx->zero, mask, ctx->zero);
1066    }
1067}
1068