1 /*
2 * AArch64 specific helpers
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "qemu/host-utils.h"
23 #include "sysemu/sysemu.h"
24 #include "qemu/bitops.h"
25 #include "internals.h"
26 #include "qemu/crc32c.h"
27
28 /* C2.4.7 Multiply and divide */
29 /* special cases for 0 and LLONG_MIN are mandated by the standard */
HELPER(udiv64)30 uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
31 {
32 if (den == 0) {
33 return 0;
34 }
35 return num / den;
36 }
37
HELPER(sdiv64)38 int64_t HELPER(sdiv64)(int64_t num, int64_t den)
39 {
40 if (den == 0) {
41 return 0;
42 }
43 if (num == LLONG_MIN && den == -1) {
44 return LLONG_MIN;
45 }
46 return num / den;
47 }
48
HELPER(clz64)49 uint64_t HELPER(clz64)(uint64_t x)
50 {
51 return clz64(x);
52 }
53
HELPER(cls64)54 uint64_t HELPER(cls64)(uint64_t x)
55 {
56 return clrsb64(x);
57 }
58
HELPER(cls32)59 uint32_t HELPER(cls32)(uint32_t x)
60 {
61 return clrsb32(x);
62 }
63
HELPER(clz32)64 uint32_t HELPER(clz32)(uint32_t x)
65 {
66 return clz32(x);
67 }
68
HELPER(rbit64)69 uint64_t HELPER(rbit64)(uint64_t x)
70 {
71 /* assign the correct byte position */
72 x = bswap64(x);
73
74 /* assign the correct nibble position */
75 x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
76 | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
77
78 /* assign the correct bit position */
79 x = ((x & 0x8888888888888888ULL) >> 3)
80 | ((x & 0x4444444444444444ULL) >> 1)
81 | ((x & 0x2222222222222222ULL) << 1)
82 | ((x & 0x1111111111111111ULL) << 3);
83
84 return x;
85 }
86
87 /* Convert a softfloat float_relation_ (as returned by
88 * the float*_compare functions) to the correct ARM
89 * NZCV flag state.
90 */
float_rel_to_flags(int res)91 static inline uint32_t float_rel_to_flags(int res)
92 {
93 uint64_t flags;
94 switch (res) {
95 case float_relation_equal:
96 flags = PSTATE_Z | PSTATE_C;
97 break;
98 case float_relation_less:
99 flags = PSTATE_N;
100 break;
101 case float_relation_greater:
102 flags = PSTATE_C;
103 break;
104 case float_relation_unordered:
105 default:
106 flags = PSTATE_C | PSTATE_V;
107 break;
108 }
109 return flags;
110 }
111
HELPER(vfp_cmps_a64)112 uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
113 {
114 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
115 }
116
HELPER(vfp_cmpes_a64)117 uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
118 {
119 return float_rel_to_flags(float32_compare(x, y, fp_status));
120 }
121
HELPER(vfp_cmpd_a64)122 uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
123 {
124 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
125 }
126
HELPER(vfp_cmped_a64)127 uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
128 {
129 return float_rel_to_flags(float64_compare(x, y, fp_status));
130 }
131
HELPER(vfp_mulxs)132 float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
133 {
134 float_status *fpst = fpstp;
135
136 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
137 (float32_is_infinity(a) && float32_is_zero(b))) {
138 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
139 return make_float32((1U << 30) |
140 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
141 }
142 return float32_mul(a, b, fpst);
143 }
144
HELPER(vfp_mulxd)145 float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
146 {
147 float_status *fpst = fpstp;
148
149 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
150 (float64_is_infinity(a) && float64_is_zero(b))) {
151 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
152 return make_float64((1ULL << 62) |
153 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
154 }
155 return float64_mul(a, b, fpst);
156 }
157
HELPER(simd_tbl)158 uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
159 uint32_t rn, uint32_t numregs)
160 {
161 /* Helper function for SIMD TBL and TBX. We have to do the table
162 * lookup part for the 64 bits worth of indices we're passed in.
163 * result is the initial results vector (either zeroes for TBL
164 * or some guest values for TBX), rn the register number where
165 * the table starts, and numregs the number of registers in the table.
166 * We return the results of the lookups.
167 */
168 int shift;
169
170 for (shift = 0; shift < 64; shift += 8) {
171 int index = extract64(indices, shift, 8);
172 if (index < 16 * numregs) {
173 /* Convert index (a byte offset into the virtual table
174 * which is a series of 128-bit vectors concatenated)
175 * into the correct vfp.regs[] element plus a bit offset
176 * into that element, bearing in mind that the table
177 * can wrap around from V31 to V0.
178 */
179 int elt = (rn * 2 + (index >> 3)) % 64;
180 int bitidx = (index & 7) * 8;
181 uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8);
182
183 result = deposit64(result, shift, 8, val);
184 }
185 }
186 return result;
187 }
188
189 /* 64bit/double versions of the neon float compare functions */
HELPER(neon_ceq_f64)190 uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
191 {
192 float_status *fpst = fpstp;
193 return -float64_eq_quiet(a, b, fpst);
194 }
195
HELPER(neon_cge_f64)196 uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
197 {
198 float_status *fpst = fpstp;
199 return -float64_le(b, a, fpst);
200 }
201
HELPER(neon_cgt_f64)202 uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
203 {
204 float_status *fpst = fpstp;
205 return -float64_lt(b, a, fpst);
206 }
207
208 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
209 * versions, these do a fully fused multiply-add or
210 * multiply-add-and-halve.
211 */
212 #define float32_two make_float32(0x40000000)
213 #define float32_three make_float32(0x40400000)
214 #define float32_one_point_five make_float32(0x3fc00000)
215
216 #define float64_two make_float64(0x4000000000000000ULL)
217 #define float64_three make_float64(0x4008000000000000ULL)
218 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
219
HELPER(recpsf_f32)220 float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
221 {
222 float_status *fpst = fpstp;
223
224 a = float32_chs(a);
225 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
226 (float32_is_infinity(b) && float32_is_zero(a))) {
227 return float32_two;
228 }
229 return float32_muladd(a, b, float32_two, 0, fpst);
230 }
231
HELPER(recpsf_f64)232 float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
233 {
234 float_status *fpst = fpstp;
235
236 a = float64_chs(a);
237 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
238 (float64_is_infinity(b) && float64_is_zero(a))) {
239 return float64_two;
240 }
241 return float64_muladd(a, b, float64_two, 0, fpst);
242 }
243
HELPER(rsqrtsf_f32)244 float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
245 {
246 float_status *fpst = fpstp;
247
248 a = float32_chs(a);
249 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
250 (float32_is_infinity(b) && float32_is_zero(a))) {
251 return float32_one_point_five;
252 }
253 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
254 }
255
HELPER(rsqrtsf_f64)256 float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
257 {
258 float_status *fpst = fpstp;
259
260 a = float64_chs(a);
261 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
262 (float64_is_infinity(b) && float64_is_zero(a))) {
263 return float64_one_point_five;
264 }
265 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
266 }
267
268 /* Pairwise long add: add pairs of adjacent elements into
269 * double-width elements in the result (eg _s8 is an 8x8->16 op)
270 */
HELPER(neon_addlp_s8)271 uint64_t HELPER(neon_addlp_s8)(uint64_t a)
272 {
273 uint64_t nsignmask = 0x0080008000800080ULL;
274 uint64_t wsignmask = 0x8000800080008000ULL;
275 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
276 uint64_t tmp1, tmp2;
277 uint64_t res, signres;
278
279 /* Extract odd elements, sign extend each to a 16 bit field */
280 tmp1 = a & elementmask;
281 tmp1 ^= nsignmask;
282 tmp1 |= wsignmask;
283 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
284 /* Ditto for the even elements */
285 tmp2 = (a >> 8) & elementmask;
286 tmp2 ^= nsignmask;
287 tmp2 |= wsignmask;
288 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
289
290 /* calculate the result by summing bits 0..14, 16..22, etc,
291 * and then adjusting the sign bits 15, 23, etc manually.
292 * This ensures the addition can't overflow the 16 bit field.
293 */
294 signres = (tmp1 ^ tmp2) & wsignmask;
295 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
296 res ^= signres;
297
298 return res;
299 }
300
HELPER(neon_addlp_u8)301 uint64_t HELPER(neon_addlp_u8)(uint64_t a)
302 {
303 uint64_t tmp;
304
305 tmp = a & 0x00ff00ff00ff00ffULL;
306 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
307 return tmp;
308 }
309
HELPER(neon_addlp_s16)310 uint64_t HELPER(neon_addlp_s16)(uint64_t a)
311 {
312 int32_t reslo, reshi;
313
314 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
315 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
316
317 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
318 }
319
HELPER(neon_addlp_u16)320 uint64_t HELPER(neon_addlp_u16)(uint64_t a)
321 {
322 uint64_t tmp;
323
324 tmp = a & 0x0000ffff0000ffffULL;
325 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
326 return tmp;
327 }
328
329 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
HELPER(frecpx_f32)330 float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
331 {
332 float_status *fpst = fpstp;
333 uint32_t val32, sbit;
334 int32_t exp;
335
336 if (float32_is_any_nan(a)) {
337 float32 nan = a;
338 if (float32_is_signaling_nan(a)) {
339 float_raise(float_flag_invalid, fpst);
340 nan = float32_maybe_silence_nan(a);
341 }
342 if (fpst->default_nan_mode) {
343 nan = float32_default_nan;
344 }
345 return nan;
346 }
347
348 val32 = float32_val(a);
349 sbit = 0x80000000ULL & val32;
350 exp = extract32(val32, 23, 8);
351
352 if (exp == 0) {
353 return make_float32(sbit | (0xfe << 23));
354 } else {
355 return make_float32(sbit | (~exp & 0xff) << 23);
356 }
357 }
358
HELPER(frecpx_f64)359 float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
360 {
361 float_status *fpst = fpstp;
362 uint64_t val64, sbit;
363 int64_t exp;
364
365 if (float64_is_any_nan(a)) {
366 float64 nan = a;
367 if (float64_is_signaling_nan(a)) {
368 float_raise(float_flag_invalid, fpst);
369 nan = float64_maybe_silence_nan(a);
370 }
371 if (fpst->default_nan_mode) {
372 nan = float64_default_nan;
373 }
374 return nan;
375 }
376
377 val64 = float64_val(a);
378 sbit = 0x8000000000000000ULL & val64;
379 exp = extract64(float64_val(a), 52, 11);
380
381 if (exp == 0) {
382 return make_float64(sbit | (0x7feULL << 52));
383 } else {
384 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
385 }
386 }
387
HELPER(fcvtx_f64_to_f32)388 float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
389 {
390 /* Von Neumann rounding is implemented by using round-to-zero
391 * and then setting the LSB of the result if Inexact was raised.
392 */
393 float32 r;
394 float_status *fpst = &env->vfp.fp_status;
395 float_status tstat = *fpst;
396 int exflags;
397
398 set_float_rounding_mode(float_round_to_zero, &tstat);
399 set_float_exception_flags(0, &tstat);
400 r = float64_to_float32(a, &tstat);
401 r = float32_maybe_silence_nan(r);
402 exflags = get_float_exception_flags(&tstat);
403 if (exflags & float_flag_inexact) {
404 r = make_float32(float32_val(r) | 1);
405 }
406 exflags |= get_float_exception_flags(fpst);
407 set_float_exception_flags(exflags, fpst);
408 return r;
409 }
410
411 /* 64-bit versions of the CRC helpers. Note that although the operation
412 * (and the prototypes of crc32c() and crc32() mean that only the bottom
413 * 32 bits of the accumulator and result are used, we pass and return
414 * uint64_t for convenience of the generated code. Unlike the 32-bit
415 * instruction set versions, val may genuinely have 64 bits of data in it.
416 * The upper bytes of val (above the number specified by 'bytes') must have
417 * been zeroed out by the caller.
418 */
HELPER(crc32_64)419 uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
420 {
421 uint8_t buf[8];
422
423 stq_le_p(buf, val);
424
425 /* zlib crc32 converts the accumulator and output to one's complement. */
426 // return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
427 return 0; // FIXME
428 }
429
HELPER(crc32c_64)430 uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
431 {
432 uint8_t buf[8];
433
434 stq_le_p(buf, val);
435
436 /* Linux crc32c converts the output to one's complement. */
437 return crc32c(acc, buf, bytes) ^ 0xffffffff;
438 }
439
440 #if !defined(CONFIG_USER_ONLY)
441
442 /* Handle a CPU exception. */
aarch64_cpu_do_interrupt(CPUState * cs)443 void aarch64_cpu_do_interrupt(CPUState *cs)
444 {
445 CPUARMState *env = cs->env_ptr;
446 ARMCPU *cpu = ARM_CPU(env->uc, cs);
447 unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
448 target_ulong addr = env->cp15.vbar_el[new_el];
449 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
450 int i;
451
452 if (arm_current_el(env) < new_el) {
453 if (env->aarch64) {
454 addr += 0x400;
455 } else {
456 addr += 0x600;
457 }
458 } else if (pstate_read(env) & PSTATE_SP) {
459 addr += 0x200;
460 }
461
462 arm_log_exception(cs->exception_index);
463 qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env));
464 if (qemu_loglevel_mask(CPU_LOG_INT)
465 && !excp_is_internal(cs->exception_index)) {
466 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
467 env->exception.syndrome);
468 }
469
470 if (arm_is_psci_call(cpu, cs->exception_index)) {
471 arm_handle_psci_call(cpu);
472 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
473 return;
474 }
475
476 switch (cs->exception_index) {
477 case EXCP_PREFETCH_ABORT:
478 case EXCP_DATA_ABORT:
479 env->cp15.far_el[new_el] = env->exception.vaddress;
480 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
481 env->cp15.far_el[new_el]);
482 /* fall through */
483 case EXCP_BKPT:
484 case EXCP_UDEF:
485 case EXCP_SWI:
486 case EXCP_HVC:
487 case EXCP_HYP_TRAP:
488 case EXCP_SMC:
489 env->cp15.esr_el[new_el] = env->exception.syndrome;
490 break;
491 case EXCP_IRQ:
492 case EXCP_VIRQ:
493 addr += 0x80;
494 break;
495 case EXCP_FIQ:
496 case EXCP_VFIQ:
497 addr += 0x100;
498 break;
499 default:
500 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
501 }
502
503 if (is_a64(env)) {
504 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
505 aarch64_save_sp(env, arm_current_el(env));
506 env->elr_el[new_el] = env->pc;
507 } else {
508 env->banked_spsr[0] = cpsr_read(env);
509 if (!env->thumb) {
510 env->cp15.esr_el[new_el] |= 1 << 25;
511 }
512 env->elr_el[new_el] = env->regs[15];
513
514 for (i = 0; i < 15; i++) {
515 env->xregs[i] = env->regs[i];
516 }
517
518 env->condexec_bits = 0;
519 }
520
521 pstate_write(env, PSTATE_DAIF | new_mode);
522 env->aarch64 = 1;
523 aarch64_restore_sp(env, new_el);
524
525 env->pc = addr;
526 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
527 }
528 #endif
529