1/*
2 * RISC-V translation routines for the RV64A Standard Extension.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
22{
23    TCGv src1 = tcg_temp_new();
24    /* Put addr in load_res, data in load_val.  */
25    gen_get_gpr(src1, a->rs1);
26    if (a->rl) {
27        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
28    }
29    tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
30    if (a->aq) {
31        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
32    }
33    tcg_gen_mov_tl(load_res, src1);
34    gen_set_gpr(a->rd, load_val);
35
36    tcg_temp_free(src1);
37    return true;
38}
39
40static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
41{
42    TCGv src1 = tcg_temp_new();
43    TCGv src2 = tcg_temp_new();
44    TCGv dat = tcg_temp_new();
45    TCGLabel *l1 = gen_new_label();
46    TCGLabel *l2 = gen_new_label();
47
48    gen_get_gpr(src1, a->rs1);
49    tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
50
51    gen_get_gpr(src2, a->rs2);
52    /*
53     * Note that the TCG atomic primitives are SC,
54     * so we can ignore AQ/RL along this path.
55     */
56    tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
57                              ctx->mem_idx, mop);
58    tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
59    gen_set_gpr(a->rd, dat);
60    tcg_gen_br(l2);
61
62    gen_set_label(l1);
63    /*
64     * Address comparison failure.  However, we still need to
65     * provide the memory barrier implied by AQ/RL.
66     */
67    tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
68    tcg_gen_movi_tl(dat, 1);
69    gen_set_gpr(a->rd, dat);
70
71    gen_set_label(l2);
72    /*
73     * Clear the load reservation, since an SC must fail if there is
74     * an SC to any address, in between an LR and SC pair.
75     */
76    tcg_gen_movi_tl(load_res, -1);
77
78    tcg_temp_free(dat);
79    tcg_temp_free(src1);
80    tcg_temp_free(src2);
81    return true;
82}
83
84static bool gen_amo(DisasContext *ctx, arg_atomic *a,
85                    void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
86                    MemOp mop)
87{
88    TCGv src1 = tcg_temp_new();
89    TCGv src2 = tcg_temp_new();
90
91    gen_get_gpr(src1, a->rs1);
92    gen_get_gpr(src2, a->rs2);
93
94    (*func)(src2, src1, src2, ctx->mem_idx, mop);
95
96    gen_set_gpr(a->rd, src2);
97    tcg_temp_free(src1);
98    tcg_temp_free(src2);
99    return true;
100}
101
102static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
103{
104    REQUIRE_EXT(ctx, RVA);
105    return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
106}
107
108static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
109{
110    REQUIRE_EXT(ctx, RVA);
111    return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
112}
113
114static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
115{
116    REQUIRE_EXT(ctx, RVA);
117    return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
118}
119
120static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
121{
122    REQUIRE_EXT(ctx, RVA);
123    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
124}
125
126static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
127{
128    REQUIRE_EXT(ctx, RVA);
129    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
130}
131
132static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
133{
134    REQUIRE_EXT(ctx, RVA);
135    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
136}
137
138static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
139{
140    REQUIRE_EXT(ctx, RVA);
141    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
142}
143
144static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
145{
146    REQUIRE_EXT(ctx, RVA);
147    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
148}
149
150static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
151{
152    REQUIRE_EXT(ctx, RVA);
153    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
154}
155
156static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
157{
158    REQUIRE_EXT(ctx, RVA);
159    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
160}
161
162static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
163{
164    REQUIRE_EXT(ctx, RVA);
165    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
166}
167
168static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
169{
170    REQUIRE_64BIT(ctx);
171    return gen_lr(ctx, a, MO_ALIGN | MO_TEQ);
172}
173
174static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
175{
176    REQUIRE_64BIT(ctx);
177    return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ));
178}
179
180static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
181{
182    REQUIRE_64BIT(ctx);
183    return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ));
184}
185
186static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
187{
188    REQUIRE_64BIT(ctx);
189    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ));
190}
191
192static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
193{
194    REQUIRE_64BIT(ctx);
195    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ));
196}
197
198static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
199{
200    REQUIRE_64BIT(ctx);
201    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ));
202}
203
204static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
205{
206    REQUIRE_64BIT(ctx);
207    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ));
208}
209
210static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
211{
212    REQUIRE_64BIT(ctx);
213    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ));
214}
215
216static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
217{
218    REQUIRE_64BIT(ctx);
219    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ));
220}
221
222static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
223{
224    REQUIRE_64BIT(ctx);
225    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ));
226}
227
228static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
229{
230    REQUIRE_64BIT(ctx);
231    return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ));
232}
233