1 /*
2  * RISC-V translation routines for the RV64D Standard Extension.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6  *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2 or later, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
trans_fld(DisasContext * ctx,arg_fld * a)21 static bool trans_fld(DisasContext *ctx, arg_fld *a)
22 {
23     REQUIRE_FPU;
24     REQUIRE_EXT(ctx, RVD);
25     const MemOp mop = MO_TEQ;
26     TCGv_cap_checked_ptr t0 =
27         get_capmode_dependent_load_addr(ctx, a->rs1, a->imm, mop);
28     tcg_gen_qemu_ld_i64_with_checked_addr(cpu_fpr[a->rd], t0, ctx->mem_idx,
29                                           mop);
30 
31     mark_fs_dirty(ctx);
32     tcg_temp_free_cap_checked(t0);
33     return true;
34 }
35 
trans_fsd(DisasContext * ctx,arg_fsd * a)36 static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
37 {
38     REQUIRE_FPU;
39     REQUIRE_EXT(ctx, RVD);
40     const MemOp mop = MO_TEQ;
41     TCGv_cap_checked_ptr t0 =
42         get_capmode_dependent_store_addr(ctx, a->rs1, a->imm, mop);
43     tcg_gen_qemu_st_i64_with_checked_addr(cpu_fpr[a->rs2], t0, ctx->mem_idx,
44                                           mop);
45     tcg_temp_free_cap_checked(t0);
46     return true;
47 }
48 
trans_fmadd_d(DisasContext * ctx,arg_fmadd_d * a)49 static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
50 {
51     REQUIRE_FPU;
52     REQUIRE_EXT(ctx, RVD);
53     gen_set_rm(ctx, a->rm);
54     gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
55                        cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
56     mark_fs_dirty(ctx);
57     return true;
58 }
59 
trans_fmsub_d(DisasContext * ctx,arg_fmsub_d * a)60 static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
61 {
62     REQUIRE_FPU;
63     REQUIRE_EXT(ctx, RVD);
64     gen_set_rm(ctx, a->rm);
65     gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
66                        cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
67     mark_fs_dirty(ctx);
68     return true;
69 }
70 
trans_fnmsub_d(DisasContext * ctx,arg_fnmsub_d * a)71 static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
72 {
73     REQUIRE_FPU;
74     REQUIRE_EXT(ctx, RVD);
75     gen_set_rm(ctx, a->rm);
76     gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
77                         cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
78     mark_fs_dirty(ctx);
79     return true;
80 }
81 
trans_fnmadd_d(DisasContext * ctx,arg_fnmadd_d * a)82 static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
83 {
84     REQUIRE_FPU;
85     REQUIRE_EXT(ctx, RVD);
86     gen_set_rm(ctx, a->rm);
87     gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
88                         cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
89     mark_fs_dirty(ctx);
90     return true;
91 }
92 
trans_fadd_d(DisasContext * ctx,arg_fadd_d * a)93 static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
94 {
95     REQUIRE_FPU;
96     REQUIRE_EXT(ctx, RVD);
97 
98     gen_set_rm(ctx, a->rm);
99     gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
100                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
101 
102     mark_fs_dirty(ctx);
103     return true;
104 }
105 
trans_fsub_d(DisasContext * ctx,arg_fsub_d * a)106 static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
107 {
108     REQUIRE_FPU;
109     REQUIRE_EXT(ctx, RVD);
110 
111     gen_set_rm(ctx, a->rm);
112     gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
113                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
114 
115     mark_fs_dirty(ctx);
116     return true;
117 }
118 
trans_fmul_d(DisasContext * ctx,arg_fmul_d * a)119 static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
120 {
121     REQUIRE_FPU;
122     REQUIRE_EXT(ctx, RVD);
123 
124     gen_set_rm(ctx, a->rm);
125     gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
126                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
127 
128     mark_fs_dirty(ctx);
129     return true;
130 }
131 
trans_fdiv_d(DisasContext * ctx,arg_fdiv_d * a)132 static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
133 {
134     REQUIRE_FPU;
135     REQUIRE_EXT(ctx, RVD);
136 
137     gen_set_rm(ctx, a->rm);
138     gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
139                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
140 
141     mark_fs_dirty(ctx);
142     return true;
143 }
144 
trans_fsqrt_d(DisasContext * ctx,arg_fsqrt_d * a)145 static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
146 {
147     REQUIRE_FPU;
148     REQUIRE_EXT(ctx, RVD);
149 
150     gen_set_rm(ctx, a->rm);
151     gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
152 
153     mark_fs_dirty(ctx);
154     return true;
155 }
156 
trans_fsgnj_d(DisasContext * ctx,arg_fsgnj_d * a)157 static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
158 {
159     if (a->rs1 == a->rs2) { /* FMOV */
160         tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
161     } else {
162         tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
163                             cpu_fpr[a->rs1], 0, 63);
164     }
165     mark_fs_dirty(ctx);
166     return true;
167 }
168 
trans_fsgnjn_d(DisasContext * ctx,arg_fsgnjn_d * a)169 static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
170 {
171     REQUIRE_FPU;
172     REQUIRE_EXT(ctx, RVD);
173     if (a->rs1 == a->rs2) { /* FNEG */
174         tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
175     } else {
176         TCGv_i64 t0 = tcg_temp_new_i64();
177         tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
178         tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
179         tcg_temp_free_i64(t0);
180     }
181     mark_fs_dirty(ctx);
182     return true;
183 }
184 
trans_fsgnjx_d(DisasContext * ctx,arg_fsgnjx_d * a)185 static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
186 {
187     REQUIRE_FPU;
188     REQUIRE_EXT(ctx, RVD);
189     if (a->rs1 == a->rs2) { /* FABS */
190         tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
191     } else {
192         TCGv_i64 t0 = tcg_temp_new_i64();
193         tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
194         tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
195         tcg_temp_free_i64(t0);
196     }
197     mark_fs_dirty(ctx);
198     return true;
199 }
200 
trans_fmin_d(DisasContext * ctx,arg_fmin_d * a)201 static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
202 {
203     REQUIRE_FPU;
204     REQUIRE_EXT(ctx, RVD);
205 
206     gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
207                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
208 
209     mark_fs_dirty(ctx);
210     return true;
211 }
212 
trans_fmax_d(DisasContext * ctx,arg_fmax_d * a)213 static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
214 {
215     REQUIRE_FPU;
216     REQUIRE_EXT(ctx, RVD);
217 
218     gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
219                       cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
220 
221     mark_fs_dirty(ctx);
222     return true;
223 }
224 
trans_fcvt_s_d(DisasContext * ctx,arg_fcvt_s_d * a)225 static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
226 {
227     REQUIRE_FPU;
228     REQUIRE_EXT(ctx, RVD);
229 
230     gen_set_rm(ctx, a->rm);
231     gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
232 
233     mark_fs_dirty(ctx);
234     return true;
235 }
236 
trans_fcvt_d_s(DisasContext * ctx,arg_fcvt_d_s * a)237 static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
238 {
239     REQUIRE_FPU;
240     REQUIRE_EXT(ctx, RVD);
241 
242     gen_set_rm(ctx, a->rm);
243     gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
244 
245     mark_fs_dirty(ctx);
246     return true;
247 }
248 
trans_feq_d(DisasContext * ctx,arg_feq_d * a)249 static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
250 {
251     REQUIRE_FPU;
252     REQUIRE_EXT(ctx, RVD);
253 
254     TCGv t0 = tcg_temp_new();
255     gen_helper_feq_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
256     gen_set_gpr(a->rd, t0);
257     tcg_temp_free(t0);
258 
259     return true;
260 }
261 
trans_flt_d(DisasContext * ctx,arg_flt_d * a)262 static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
263 {
264     REQUIRE_FPU;
265     REQUIRE_EXT(ctx, RVD);
266 
267     TCGv t0 = tcg_temp_new();
268     gen_helper_flt_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
269     gen_set_gpr(a->rd, t0);
270     tcg_temp_free(t0);
271 
272     return true;
273 }
274 
trans_fle_d(DisasContext * ctx,arg_fle_d * a)275 static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
276 {
277     REQUIRE_FPU;
278     REQUIRE_EXT(ctx, RVD);
279 
280     TCGv t0 = tcg_temp_new();
281     gen_helper_fle_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
282     gen_set_gpr(a->rd, t0);
283     tcg_temp_free(t0);
284 
285     return true;
286 }
287 
trans_fclass_d(DisasContext * ctx,arg_fclass_d * a)288 static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
289 {
290     REQUIRE_FPU;
291     REQUIRE_EXT(ctx, RVD);
292 
293     TCGv t0 = tcg_temp_new();
294     gen_helper_fclass_d(t0, cpu_fpr[a->rs1]);
295     gen_set_gpr(a->rd, t0);
296     tcg_temp_free(t0);
297     return true;
298 }
299 
trans_fcvt_w_d(DisasContext * ctx,arg_fcvt_w_d * a)300 static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
301 {
302     REQUIRE_FPU;
303     REQUIRE_EXT(ctx, RVD);
304 
305     TCGv t0 = tcg_temp_new();
306     gen_set_rm(ctx, a->rm);
307     gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[a->rs1]);
308     gen_set_gpr(a->rd, t0);
309     tcg_temp_free(t0);
310 
311     return true;
312 }
313 
trans_fcvt_wu_d(DisasContext * ctx,arg_fcvt_wu_d * a)314 static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
315 {
316     REQUIRE_FPU;
317     REQUIRE_EXT(ctx, RVD);
318 
319     TCGv t0 = tcg_temp_new();
320     gen_set_rm(ctx, a->rm);
321     gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[a->rs1]);
322     gen_set_gpr(a->rd, t0);
323     tcg_temp_free(t0);
324 
325     return true;
326 }
327 
trans_fcvt_d_w(DisasContext * ctx,arg_fcvt_d_w * a)328 static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
329 {
330     REQUIRE_FPU;
331     REQUIRE_EXT(ctx, RVD);
332 
333     TCGv t0 = tcg_temp_new();
334     gen_get_gpr(t0, a->rs1);
335 
336     gen_set_rm(ctx, a->rm);
337     gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, t0);
338     tcg_temp_free(t0);
339 
340     mark_fs_dirty(ctx);
341     return true;
342 }
343 
trans_fcvt_d_wu(DisasContext * ctx,arg_fcvt_d_wu * a)344 static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
345 {
346     REQUIRE_FPU;
347     REQUIRE_EXT(ctx, RVD);
348 
349     TCGv t0 = tcg_temp_new();
350     gen_get_gpr(t0, a->rs1);
351 
352     gen_set_rm(ctx, a->rm);
353     gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, t0);
354     tcg_temp_free(t0);
355 
356     mark_fs_dirty(ctx);
357     return true;
358 }
359 
360 #ifdef TARGET_RISCV64
361 
trans_fcvt_l_d(DisasContext * ctx,arg_fcvt_l_d * a)362 static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
363 {
364     REQUIRE_FPU;
365     REQUIRE_EXT(ctx, RVD);
366 
367     TCGv t0 = tcg_temp_new();
368     gen_set_rm(ctx, a->rm);
369     gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[a->rs1]);
370     gen_set_gpr(a->rd, t0);
371     tcg_temp_free(t0);
372     return true;
373 }
374 
trans_fcvt_lu_d(DisasContext * ctx,arg_fcvt_lu_d * a)375 static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
376 {
377     REQUIRE_FPU;
378     REQUIRE_EXT(ctx, RVD);
379 
380     TCGv t0 = tcg_temp_new();
381     gen_set_rm(ctx, a->rm);
382     gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[a->rs1]);
383     gen_set_gpr(a->rd, t0);
384     tcg_temp_free(t0);
385     return true;
386 }
387 
trans_fmv_x_d(DisasContext * ctx,arg_fmv_x_d * a)388 static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a)
389 {
390     REQUIRE_FPU;
391     REQUIRE_EXT(ctx, RVD);
392 
393     gen_set_gpr(a->rd, cpu_fpr[a->rs1]);
394     return true;
395 }
396 
trans_fcvt_d_l(DisasContext * ctx,arg_fcvt_d_l * a)397 static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
398 {
399     REQUIRE_FPU;
400     REQUIRE_EXT(ctx, RVD);
401 
402     TCGv t0 = tcg_temp_new();
403     gen_get_gpr(t0, a->rs1);
404 
405     gen_set_rm(ctx, a->rm);
406     gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, t0);
407     tcg_temp_free(t0);
408     mark_fs_dirty(ctx);
409     return true;
410 }
411 
trans_fcvt_d_lu(DisasContext * ctx,arg_fcvt_d_lu * a)412 static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
413 {
414     REQUIRE_FPU;
415     REQUIRE_EXT(ctx, RVD);
416 
417     TCGv t0 = tcg_temp_new();
418     gen_get_gpr(t0, a->rs1);
419 
420     gen_set_rm(ctx, a->rm);
421     gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, t0);
422     tcg_temp_free(t0);
423     mark_fs_dirty(ctx);
424     return true;
425 }
426 
trans_fmv_d_x(DisasContext * ctx,arg_fmv_d_x * a)427 static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a)
428 {
429     REQUIRE_FPU;
430     REQUIRE_EXT(ctx, RVD);
431 
432     TCGv t0 = tcg_temp_new();
433     gen_get_gpr(t0, a->rs1);
434 
435     tcg_gen_mov_tl(cpu_fpr[a->rd], t0);
436     tcg_temp_free(t0);
437     mark_fs_dirty(ctx);
438     return true;
439 }
440 #endif
441