1 /*
2 * AArch64 specific helpers
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "gdbstub/helpers.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/host-utils.h"
26 #include "qemu/log.h"
27 #include "qemu/main-loop.h"
28 #include "qemu/bitops.h"
29 #include "internals.h"
30 #include "qemu/crc32c.h"
31 #include "exec/exec-all.h"
32 #include "exec/cpu_ldst.h"
33 #include "qemu/int128.h"
34 #include "qemu/atomic128.h"
35 #include "fpu/softfloat.h"
36 #include <zlib.h> /* For crc32 */
37
38 /* C2.4.7 Multiply and divide */
39 /* special cases for 0 and LLONG_MIN are mandated by the standard */
HELPER(udiv64)40 uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
41 {
42 if (den == 0) {
43 return 0;
44 }
45 return num / den;
46 }
47
HELPER(sdiv64)48 int64_t HELPER(sdiv64)(int64_t num, int64_t den)
49 {
50 if (den == 0) {
51 return 0;
52 }
53 if (num == LLONG_MIN && den == -1) {
54 return LLONG_MIN;
55 }
56 return num / den;
57 }
58
HELPER(rbit64)59 uint64_t HELPER(rbit64)(uint64_t x)
60 {
61 return revbit64(x);
62 }
63
HELPER(msr_i_spsel)64 void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm)
65 {
66 update_spsel(env, imm);
67 }
68
HELPER(msr_set_allint_el1)69 void HELPER(msr_set_allint_el1)(CPUARMState *env)
70 {
71 /* ALLINT update to PSTATE. */
72 if (arm_hcrx_el2_eff(env) & HCRX_TALLINT) {
73 raise_exception_ra(env, EXCP_UDEF,
74 syn_aa64_sysregtrap(0, 1, 0, 4, 1, 0x1f, 0), 2,
75 GETPC());
76 }
77
78 env->pstate |= PSTATE_ALLINT;
79 }
80
daif_check(CPUARMState * env,uint32_t op,uint32_t imm,uintptr_t ra)81 static void daif_check(CPUARMState *env, uint32_t op,
82 uint32_t imm, uintptr_t ra)
83 {
84 /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
85 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
86 raise_exception_ra(env, EXCP_UDEF,
87 syn_aa64_sysregtrap(0, extract32(op, 0, 3),
88 extract32(op, 3, 3), 4,
89 imm, 0x1f, 0),
90 exception_target_el(env), ra);
91 }
92 }
93
HELPER(msr_i_daifset)94 void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm)
95 {
96 daif_check(env, 0x1e, imm, GETPC());
97 env->daif |= (imm << 6) & PSTATE_DAIF;
98 arm_rebuild_hflags(env);
99 }
100
HELPER(msr_i_daifclear)101 void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm)
102 {
103 daif_check(env, 0x1f, imm, GETPC());
104 env->daif &= ~((imm << 6) & PSTATE_DAIF);
105 arm_rebuild_hflags(env);
106 }
107
108 /* Convert a softfloat float_relation_ (as returned by
109 * the float*_compare functions) to the correct ARM
110 * NZCV flag state.
111 */
float_rel_to_flags(int res)112 static inline uint32_t float_rel_to_flags(int res)
113 {
114 uint64_t flags;
115 switch (res) {
116 case float_relation_equal:
117 flags = PSTATE_Z | PSTATE_C;
118 break;
119 case float_relation_less:
120 flags = PSTATE_N;
121 break;
122 case float_relation_greater:
123 flags = PSTATE_C;
124 break;
125 case float_relation_unordered:
126 default:
127 flags = PSTATE_C | PSTATE_V;
128 break;
129 }
130 return flags;
131 }
132
HELPER(vfp_cmph_a64)133 uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
134 {
135 return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
136 }
137
HELPER(vfp_cmpeh_a64)138 uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
139 {
140 return float_rel_to_flags(float16_compare(x, y, fp_status));
141 }
142
HELPER(vfp_cmps_a64)143 uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
144 {
145 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
146 }
147
HELPER(vfp_cmpes_a64)148 uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
149 {
150 return float_rel_to_flags(float32_compare(x, y, fp_status));
151 }
152
HELPER(vfp_cmpd_a64)153 uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
154 {
155 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
156 }
157
HELPER(vfp_cmped_a64)158 uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
159 {
160 return float_rel_to_flags(float64_compare(x, y, fp_status));
161 }
162
HELPER(vfp_mulxs)163 float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
164 {
165 float_status *fpst = fpstp;
166
167 a = float32_squash_input_denormal(a, fpst);
168 b = float32_squash_input_denormal(b, fpst);
169
170 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
171 (float32_is_infinity(a) && float32_is_zero(b))) {
172 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
173 return make_float32((1U << 30) |
174 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
175 }
176 return float32_mul(a, b, fpst);
177 }
178
HELPER(vfp_mulxd)179 float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
180 {
181 float_status *fpst = fpstp;
182
183 a = float64_squash_input_denormal(a, fpst);
184 b = float64_squash_input_denormal(b, fpst);
185
186 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
187 (float64_is_infinity(a) && float64_is_zero(b))) {
188 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
189 return make_float64((1ULL << 62) |
190 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
191 }
192 return float64_mul(a, b, fpst);
193 }
194
195 /* 64bit/double versions of the neon float compare functions */
HELPER(neon_ceq_f64)196 uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
197 {
198 float_status *fpst = fpstp;
199 return -float64_eq_quiet(a, b, fpst);
200 }
201
HELPER(neon_cge_f64)202 uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
203 {
204 float_status *fpst = fpstp;
205 return -float64_le(b, a, fpst);
206 }
207
HELPER(neon_cgt_f64)208 uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
209 {
210 float_status *fpst = fpstp;
211 return -float64_lt(b, a, fpst);
212 }
213
214 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
215 * versions, these do a fully fused multiply-add or
216 * multiply-add-and-halve.
217 */
218
HELPER(recpsf_f16)219 uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
220 {
221 float_status *fpst = fpstp;
222
223 a = float16_squash_input_denormal(a, fpst);
224 b = float16_squash_input_denormal(b, fpst);
225
226 a = float16_chs(a);
227 if ((float16_is_infinity(a) && float16_is_zero(b)) ||
228 (float16_is_infinity(b) && float16_is_zero(a))) {
229 return float16_two;
230 }
231 return float16_muladd(a, b, float16_two, 0, fpst);
232 }
233
HELPER(recpsf_f32)234 float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
235 {
236 float_status *fpst = fpstp;
237
238 a = float32_squash_input_denormal(a, fpst);
239 b = float32_squash_input_denormal(b, fpst);
240
241 a = float32_chs(a);
242 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
243 (float32_is_infinity(b) && float32_is_zero(a))) {
244 return float32_two;
245 }
246 return float32_muladd(a, b, float32_two, 0, fpst);
247 }
248
HELPER(recpsf_f64)249 float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
250 {
251 float_status *fpst = fpstp;
252
253 a = float64_squash_input_denormal(a, fpst);
254 b = float64_squash_input_denormal(b, fpst);
255
256 a = float64_chs(a);
257 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
258 (float64_is_infinity(b) && float64_is_zero(a))) {
259 return float64_two;
260 }
261 return float64_muladd(a, b, float64_two, 0, fpst);
262 }
263
HELPER(rsqrtsf_f16)264 uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
265 {
266 float_status *fpst = fpstp;
267
268 a = float16_squash_input_denormal(a, fpst);
269 b = float16_squash_input_denormal(b, fpst);
270
271 a = float16_chs(a);
272 if ((float16_is_infinity(a) && float16_is_zero(b)) ||
273 (float16_is_infinity(b) && float16_is_zero(a))) {
274 return float16_one_point_five;
275 }
276 return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
277 }
278
HELPER(rsqrtsf_f32)279 float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
280 {
281 float_status *fpst = fpstp;
282
283 a = float32_squash_input_denormal(a, fpst);
284 b = float32_squash_input_denormal(b, fpst);
285
286 a = float32_chs(a);
287 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
288 (float32_is_infinity(b) && float32_is_zero(a))) {
289 return float32_one_point_five;
290 }
291 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
292 }
293
HELPER(rsqrtsf_f64)294 float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
295 {
296 float_status *fpst = fpstp;
297
298 a = float64_squash_input_denormal(a, fpst);
299 b = float64_squash_input_denormal(b, fpst);
300
301 a = float64_chs(a);
302 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
303 (float64_is_infinity(b) && float64_is_zero(a))) {
304 return float64_one_point_five;
305 }
306 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
307 }
308
309 /* Pairwise long add: add pairs of adjacent elements into
310 * double-width elements in the result (eg _s8 is an 8x8->16 op)
311 */
HELPER(neon_addlp_s8)312 uint64_t HELPER(neon_addlp_s8)(uint64_t a)
313 {
314 uint64_t nsignmask = 0x0080008000800080ULL;
315 uint64_t wsignmask = 0x8000800080008000ULL;
316 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
317 uint64_t tmp1, tmp2;
318 uint64_t res, signres;
319
320 /* Extract odd elements, sign extend each to a 16 bit field */
321 tmp1 = a & elementmask;
322 tmp1 ^= nsignmask;
323 tmp1 |= wsignmask;
324 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
325 /* Ditto for the even elements */
326 tmp2 = (a >> 8) & elementmask;
327 tmp2 ^= nsignmask;
328 tmp2 |= wsignmask;
329 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
330
331 /* calculate the result by summing bits 0..14, 16..22, etc,
332 * and then adjusting the sign bits 15, 23, etc manually.
333 * This ensures the addition can't overflow the 16 bit field.
334 */
335 signres = (tmp1 ^ tmp2) & wsignmask;
336 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
337 res ^= signres;
338
339 return res;
340 }
341
HELPER(neon_addlp_u8)342 uint64_t HELPER(neon_addlp_u8)(uint64_t a)
343 {
344 uint64_t tmp;
345
346 tmp = a & 0x00ff00ff00ff00ffULL;
347 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
348 return tmp;
349 }
350
HELPER(neon_addlp_s16)351 uint64_t HELPER(neon_addlp_s16)(uint64_t a)
352 {
353 int32_t reslo, reshi;
354
355 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
356 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
357
358 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
359 }
360
HELPER(neon_addlp_u16)361 uint64_t HELPER(neon_addlp_u16)(uint64_t a)
362 {
363 uint64_t tmp;
364
365 tmp = a & 0x0000ffff0000ffffULL;
366 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
367 return tmp;
368 }
369
370 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
HELPER(frecpx_f16)371 uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
372 {
373 float_status *fpst = fpstp;
374 uint16_t val16, sbit;
375 int16_t exp;
376
377 if (float16_is_any_nan(a)) {
378 float16 nan = a;
379 if (float16_is_signaling_nan(a, fpst)) {
380 float_raise(float_flag_invalid, fpst);
381 if (!fpst->default_nan_mode) {
382 nan = float16_silence_nan(a, fpst);
383 }
384 }
385 if (fpst->default_nan_mode) {
386 nan = float16_default_nan(fpst);
387 }
388 return nan;
389 }
390
391 a = float16_squash_input_denormal(a, fpst);
392
393 val16 = float16_val(a);
394 sbit = 0x8000 & val16;
395 exp = extract32(val16, 10, 5);
396
397 if (exp == 0) {
398 return make_float16(deposit32(sbit, 10, 5, 0x1e));
399 } else {
400 return make_float16(deposit32(sbit, 10, 5, ~exp));
401 }
402 }
403
HELPER(frecpx_f32)404 float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
405 {
406 float_status *fpst = fpstp;
407 uint32_t val32, sbit;
408 int32_t exp;
409
410 if (float32_is_any_nan(a)) {
411 float32 nan = a;
412 if (float32_is_signaling_nan(a, fpst)) {
413 float_raise(float_flag_invalid, fpst);
414 if (!fpst->default_nan_mode) {
415 nan = float32_silence_nan(a, fpst);
416 }
417 }
418 if (fpst->default_nan_mode) {
419 nan = float32_default_nan(fpst);
420 }
421 return nan;
422 }
423
424 a = float32_squash_input_denormal(a, fpst);
425
426 val32 = float32_val(a);
427 sbit = 0x80000000ULL & val32;
428 exp = extract32(val32, 23, 8);
429
430 if (exp == 0) {
431 return make_float32(sbit | (0xfe << 23));
432 } else {
433 return make_float32(sbit | (~exp & 0xff) << 23);
434 }
435 }
436
HELPER(frecpx_f64)437 float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
438 {
439 float_status *fpst = fpstp;
440 uint64_t val64, sbit;
441 int64_t exp;
442
443 if (float64_is_any_nan(a)) {
444 float64 nan = a;
445 if (float64_is_signaling_nan(a, fpst)) {
446 float_raise(float_flag_invalid, fpst);
447 if (!fpst->default_nan_mode) {
448 nan = float64_silence_nan(a, fpst);
449 }
450 }
451 if (fpst->default_nan_mode) {
452 nan = float64_default_nan(fpst);
453 }
454 return nan;
455 }
456
457 a = float64_squash_input_denormal(a, fpst);
458
459 val64 = float64_val(a);
460 sbit = 0x8000000000000000ULL & val64;
461 exp = extract64(float64_val(a), 52, 11);
462
463 if (exp == 0) {
464 return make_float64(sbit | (0x7feULL << 52));
465 } else {
466 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
467 }
468 }
469
HELPER(fcvtx_f64_to_f32)470 float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
471 {
472 /* Von Neumann rounding is implemented by using round-to-zero
473 * and then setting the LSB of the result if Inexact was raised.
474 */
475 float32 r;
476 float_status *fpst = &env->vfp.fp_status;
477 float_status tstat = *fpst;
478 int exflags;
479
480 set_float_rounding_mode(float_round_to_zero, &tstat);
481 set_float_exception_flags(0, &tstat);
482 r = float64_to_float32(a, &tstat);
483 exflags = get_float_exception_flags(&tstat);
484 if (exflags & float_flag_inexact) {
485 r = make_float32(float32_val(r) | 1);
486 }
487 exflags |= get_float_exception_flags(fpst);
488 set_float_exception_flags(exflags, fpst);
489 return r;
490 }
491
492 /* 64-bit versions of the CRC helpers. Note that although the operation
493 * (and the prototypes of crc32c() and crc32() mean that only the bottom
494 * 32 bits of the accumulator and result are used, we pass and return
495 * uint64_t for convenience of the generated code. Unlike the 32-bit
496 * instruction set versions, val may genuinely have 64 bits of data in it.
497 * The upper bytes of val (above the number specified by 'bytes') must have
498 * been zeroed out by the caller.
499 */
HELPER(crc32_64)500 uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
501 {
502 uint8_t buf[8];
503
504 stq_le_p(buf, val);
505
506 /* zlib crc32 converts the accumulator and output to one's complement. */
507 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
508 }
509
HELPER(crc32c_64)510 uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
511 {
512 uint8_t buf[8];
513
514 stq_le_p(buf, val);
515
516 /* Linux crc32c converts the output to one's complement. */
517 return crc32c(acc, buf, bytes) ^ 0xffffffff;
518 }
519
520 /*
521 * AdvSIMD half-precision
522 */
523
524 #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
525
526 #define ADVSIMD_HALFOP(name) \
527 uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
528 { \
529 float_status *fpst = fpstp; \
530 return float16_ ## name(a, b, fpst); \
531 }
532
533 ADVSIMD_HALFOP(add)
ADVSIMD_HALFOP(sub)534 ADVSIMD_HALFOP(sub)
535 ADVSIMD_HALFOP(mul)
536 ADVSIMD_HALFOP(div)
537 ADVSIMD_HALFOP(min)
538 ADVSIMD_HALFOP(max)
539 ADVSIMD_HALFOP(minnum)
540 ADVSIMD_HALFOP(maxnum)
541
542 #define ADVSIMD_TWOHALFOP(name) \
543 uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
544 { \
545 float16 a1, a2, b1, b2; \
546 uint32_t r1, r2; \
547 float_status *fpst = fpstp; \
548 a1 = extract32(two_a, 0, 16); \
549 a2 = extract32(two_a, 16, 16); \
550 b1 = extract32(two_b, 0, 16); \
551 b2 = extract32(two_b, 16, 16); \
552 r1 = float16_ ## name(a1, b1, fpst); \
553 r2 = float16_ ## name(a2, b2, fpst); \
554 return deposit32(r1, 16, 16, r2); \
555 }
556
557 ADVSIMD_TWOHALFOP(add)
558 ADVSIMD_TWOHALFOP(sub)
559 ADVSIMD_TWOHALFOP(mul)
560 ADVSIMD_TWOHALFOP(div)
561 ADVSIMD_TWOHALFOP(min)
562 ADVSIMD_TWOHALFOP(max)
563 ADVSIMD_TWOHALFOP(minnum)
564 ADVSIMD_TWOHALFOP(maxnum)
565
566 /* Data processing - scalar floating-point and advanced SIMD */
567 static float16 float16_mulx(float16 a, float16 b, void *fpstp)
568 {
569 float_status *fpst = fpstp;
570
571 a = float16_squash_input_denormal(a, fpst);
572 b = float16_squash_input_denormal(b, fpst);
573
574 if ((float16_is_zero(a) && float16_is_infinity(b)) ||
575 (float16_is_infinity(a) && float16_is_zero(b))) {
576 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
577 return make_float16((1U << 14) |
578 ((float16_val(a) ^ float16_val(b)) & (1U << 15)));
579 }
580 return float16_mul(a, b, fpst);
581 }
582
583 ADVSIMD_HALFOP(mulx)
ADVSIMD_TWOHALFOP(mulx)584 ADVSIMD_TWOHALFOP(mulx)
585
586 /* fused multiply-accumulate */
587 uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
588 void *fpstp)
589 {
590 float_status *fpst = fpstp;
591 return float16_muladd(a, b, c, 0, fpst);
592 }
593
HELPER(advsimd_muladd2h)594 uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
595 uint32_t two_c, void *fpstp)
596 {
597 float_status *fpst = fpstp;
598 float16 a1, a2, b1, b2, c1, c2;
599 uint32_t r1, r2;
600 a1 = extract32(two_a, 0, 16);
601 a2 = extract32(two_a, 16, 16);
602 b1 = extract32(two_b, 0, 16);
603 b2 = extract32(two_b, 16, 16);
604 c1 = extract32(two_c, 0, 16);
605 c2 = extract32(two_c, 16, 16);
606 r1 = float16_muladd(a1, b1, c1, 0, fpst);
607 r2 = float16_muladd(a2, b2, c2, 0, fpst);
608 return deposit32(r1, 16, 16, r2);
609 }
610
611 /*
612 * Floating point comparisons produce an integer result. Softfloat
613 * routines return float_relation types which we convert to the 0/-1
614 * Neon requires.
615 */
616
617 #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
618
HELPER(advsimd_ceq_f16)619 uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
620 {
621 float_status *fpst = fpstp;
622 int compare = float16_compare_quiet(a, b, fpst);
623 return ADVSIMD_CMPRES(compare == float_relation_equal);
624 }
625
HELPER(advsimd_cge_f16)626 uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
627 {
628 float_status *fpst = fpstp;
629 int compare = float16_compare(a, b, fpst);
630 return ADVSIMD_CMPRES(compare == float_relation_greater ||
631 compare == float_relation_equal);
632 }
633
HELPER(advsimd_cgt_f16)634 uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
635 {
636 float_status *fpst = fpstp;
637 int compare = float16_compare(a, b, fpst);
638 return ADVSIMD_CMPRES(compare == float_relation_greater);
639 }
640
HELPER(advsimd_acge_f16)641 uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
642 {
643 float_status *fpst = fpstp;
644 float16 f0 = float16_abs(a);
645 float16 f1 = float16_abs(b);
646 int compare = float16_compare(f0, f1, fpst);
647 return ADVSIMD_CMPRES(compare == float_relation_greater ||
648 compare == float_relation_equal);
649 }
650
HELPER(advsimd_acgt_f16)651 uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
652 {
653 float_status *fpst = fpstp;
654 float16 f0 = float16_abs(a);
655 float16 f1 = float16_abs(b);
656 int compare = float16_compare(f0, f1, fpst);
657 return ADVSIMD_CMPRES(compare == float_relation_greater);
658 }
659
660 /* round to integral */
HELPER(advsimd_rinth_exact)661 uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
662 {
663 return float16_round_to_int(x, fp_status);
664 }
665
HELPER(advsimd_rinth)666 uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
667 {
668 int old_flags = get_float_exception_flags(fp_status), new_flags;
669 float16 ret;
670
671 ret = float16_round_to_int(x, fp_status);
672
673 /* Suppress any inexact exceptions the conversion produced */
674 if (!(old_flags & float_flag_inexact)) {
675 new_flags = get_float_exception_flags(fp_status);
676 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
677 }
678
679 return ret;
680 }
681
682 /*
683 * Half-precision floating point conversion functions
684 *
685 * There are a multitude of conversion functions with various
686 * different rounding modes. This is dealt with by the calling code
687 * setting the mode appropriately before calling the helper.
688 */
689
HELPER(advsimd_f16tosinth)690 uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
691 {
692 float_status *fpst = fpstp;
693
694 /* Invalid if we are passed a NaN */
695 if (float16_is_any_nan(a)) {
696 float_raise(float_flag_invalid, fpst);
697 return 0;
698 }
699 return float16_to_int16(a, fpst);
700 }
701
HELPER(advsimd_f16touinth)702 uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
703 {
704 float_status *fpst = fpstp;
705
706 /* Invalid if we are passed a NaN */
707 if (float16_is_any_nan(a)) {
708 float_raise(float_flag_invalid, fpst);
709 return 0;
710 }
711 return float16_to_uint16(a, fpst);
712 }
713
el_from_spsr(uint32_t spsr)714 static int el_from_spsr(uint32_t spsr)
715 {
716 /* Return the exception level that this SPSR is requesting a return to,
717 * or -1 if it is invalid (an illegal return)
718 */
719 if (spsr & PSTATE_nRW) {
720 switch (spsr & CPSR_M) {
721 case ARM_CPU_MODE_USR:
722 return 0;
723 case ARM_CPU_MODE_HYP:
724 return 2;
725 case ARM_CPU_MODE_FIQ:
726 case ARM_CPU_MODE_IRQ:
727 case ARM_CPU_MODE_SVC:
728 case ARM_CPU_MODE_ABT:
729 case ARM_CPU_MODE_UND:
730 case ARM_CPU_MODE_SYS:
731 return 1;
732 case ARM_CPU_MODE_MON:
733 /* Returning to Mon from AArch64 is never possible,
734 * so this is an illegal return.
735 */
736 default:
737 return -1;
738 }
739 } else {
740 if (extract32(spsr, 1, 1)) {
741 /* Return with reserved M[1] bit set */
742 return -1;
743 }
744 if (extract32(spsr, 0, 4) == 1) {
745 /* return to EL0 with M[0] bit set */
746 return -1;
747 }
748 return extract32(spsr, 2, 2);
749 }
750 }
751
cpsr_write_from_spsr_elx(CPUARMState * env,uint32_t val)752 static void cpsr_write_from_spsr_elx(CPUARMState *env,
753 uint32_t val)
754 {
755 uint32_t mask;
756
757 /* Save SPSR_ELx.SS into PSTATE. */
758 env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
759 val &= ~PSTATE_SS;
760
761 /* Move DIT to the correct location for CPSR */
762 if (val & PSTATE_DIT) {
763 val &= ~PSTATE_DIT;
764 val |= CPSR_DIT;
765 }
766
767 mask = aarch32_cpsr_valid_mask(env->features, \
768 &env_archcpu(env)->isar);
769 cpsr_write(env, val, mask, CPSRWriteRaw);
770 }
771
HELPER(exception_return)772 void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
773 {
774 int cur_el = arm_current_el(env);
775 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
776 uint32_t spsr = env->banked_spsr[spsr_idx];
777 int new_el;
778 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
779
780 aarch64_save_sp(env, cur_el);
781
782 arm_clear_exclusive(env);
783
784 /* We must squash the PSTATE.SS bit to zero unless both of the
785 * following hold:
786 * 1. debug exceptions are currently disabled
787 * 2. singlestep will be active in the EL we return to
788 * We check 1 here and 2 after we've done the pstate/cpsr write() to
789 * transition to the EL we're going to.
790 */
791 if (arm_generate_debug_exceptions(env)) {
792 spsr &= ~PSTATE_SS;
793 }
794
795 /*
796 * FEAT_RME forbids return from EL3 with an invalid security state.
797 * We don't need an explicit check for FEAT_RME here because we enforce
798 * in scr_write() that you can't set the NSE bit without it.
799 */
800 if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
801 goto illegal_return;
802 }
803
804 new_el = el_from_spsr(spsr);
805 if (new_el == -1) {
806 goto illegal_return;
807 }
808 if (new_el > cur_el || (new_el == 2 && !arm_is_el2_enabled(env))) {
809 /* Disallow return to an EL which is unimplemented or higher
810 * than the current one.
811 */
812 goto illegal_return;
813 }
814
815 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
816 /* Return to an EL which is configured for a different register width */
817 goto illegal_return;
818 }
819
820 if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
821 goto illegal_return;
822 }
823
824 bql_lock();
825 arm_call_pre_el_change_hook(env_archcpu(env));
826 bql_unlock();
827
828 if (!return_to_aa64) {
829 env->aarch64 = false;
830 /* We do a raw CPSR write because aarch64_sync_64_to_32()
831 * will sort the register banks out for us, and we've already
832 * caught all the bad-mode cases in el_from_spsr().
833 */
834 cpsr_write_from_spsr_elx(env, spsr);
835 if (!arm_singlestep_active(env)) {
836 env->pstate &= ~PSTATE_SS;
837 }
838 aarch64_sync_64_to_32(env);
839
840 if (spsr & CPSR_T) {
841 env->regs[15] = new_pc & ~0x1;
842 } else {
843 env->regs[15] = new_pc & ~0x3;
844 }
845 helper_rebuild_hflags_a32(env, new_el);
846 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
847 "AArch32 EL%d PC 0x%" PRIx32 "\n",
848 cur_el, new_el, env->regs[15]);
849 } else {
850 int tbii;
851
852 env->aarch64 = true;
853 spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
854 pstate_write(env, spsr);
855 if (!arm_singlestep_active(env)) {
856 env->pstate &= ~PSTATE_SS;
857 }
858 aarch64_restore_sp(env, new_el);
859 helper_rebuild_hflags_a64(env, new_el);
860
861 /*
862 * Apply TBI to the exception return address. We had to delay this
863 * until after we selected the new EL, so that we could select the
864 * correct TBI+TBID bits. This is made easier by waiting until after
865 * the hflags rebuild, since we can pull the composite TBII field
866 * from there.
867 */
868 tbii = EX_TBFLAG_A64(env->hflags, TBII);
869 if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
870 /* TBI is enabled. */
871 int core_mmu_idx = arm_env_mmu_index(env);
872 if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) {
873 new_pc = sextract64(new_pc, 0, 56);
874 } else {
875 new_pc = extract64(new_pc, 0, 56);
876 }
877 }
878 env->pc = new_pc;
879
880 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
881 "AArch64 EL%d PC 0x%" PRIx64 "\n",
882 cur_el, new_el, env->pc);
883 }
884
885 /*
886 * Note that cur_el can never be 0. If new_el is 0, then
887 * el0_a64 is return_to_aa64, else el0_a64 is ignored.
888 */
889 aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
890
891 bql_lock();
892 arm_call_el_change_hook(env_archcpu(env));
893 bql_unlock();
894
895 return;
896
897 illegal_return:
898 /* Illegal return events of various kinds have architecturally
899 * mandated behaviour:
900 * restore NZCV and DAIF from SPSR_ELx
901 * set PSTATE.IL
902 * restore PC from ELR_ELx
903 * no change to exception level, execution state or stack pointer
904 */
905 env->pstate |= PSTATE_IL;
906 env->pc = new_pc;
907 spsr &= PSTATE_NZCV | PSTATE_DAIF | PSTATE_ALLINT;
908 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF | PSTATE_ALLINT);
909 pstate_write(env, spsr);
910 if (!arm_singlestep_active(env)) {
911 env->pstate &= ~PSTATE_SS;
912 }
913 helper_rebuild_hflags_a64(env, cur_el);
914 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
915 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
916 }
917
918 /*
919 * Square Root and Reciprocal square root
920 */
921
HELPER(sqrt_f16)922 uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
923 {
924 float_status *s = fpstp;
925
926 return float16_sqrt(a, s);
927 }
928
HELPER(dc_zva)929 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
930 {
931 /*
932 * Implement DC ZVA, which zeroes a fixed-length block of memory.
933 * Note that we do not implement the (architecturally mandated)
934 * alignment fault for attempts to use this on Device memory
935 * (which matches the usual QEMU behaviour of not implementing either
936 * alignment faults or any memory attribute handling).
937 */
938 int blocklen = 4 << env_archcpu(env)->dcz_blocksize;
939 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
940 int mmu_idx = arm_env_mmu_index(env);
941 void *mem;
942
943 /*
944 * Trapless lookup. In addition to actual invalid page, may
945 * return NULL for I/O, watchpoints, clean pages, etc.
946 */
947 mem = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
948
949 #ifndef CONFIG_USER_ONLY
950 if (unlikely(!mem)) {
951 uintptr_t ra = GETPC();
952
953 /*
954 * Trap if accessing an invalid page. DC_ZVA requires that we supply
955 * the original pointer for an invalid page. But watchpoints require
956 * that we probe the actual space. So do both.
957 */
958 (void) probe_write(env, vaddr_in, 1, mmu_idx, ra);
959 mem = probe_write(env, vaddr, blocklen, mmu_idx, ra);
960
961 if (unlikely(!mem)) {
962 /*
963 * The only remaining reason for mem == NULL is I/O.
964 * Just do a series of byte writes as the architecture demands.
965 */
966 for (int i = 0; i < blocklen; i++) {
967 cpu_stb_mmuidx_ra(env, vaddr + i, 0, mmu_idx, ra);
968 }
969 return;
970 }
971 }
972 #endif
973
974 memset(mem, 0, blocklen);
975 }
976
HELPER(unaligned_access)977 void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr,
978 uint32_t access_type, uint32_t mmu_idx)
979 {
980 arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type,
981 mmu_idx, GETPC());
982 }
983
984 /* Memory operations (memset, memmove, memcpy) */
985
986 /*
987 * Return true if the CPY* and SET* insns can execute; compare
988 * pseudocode CheckMOPSEnabled(), though we refactor it a little.
989 */
mops_enabled(CPUARMState * env)990 static bool mops_enabled(CPUARMState *env)
991 {
992 int el = arm_current_el(env);
993
994 if (el < 2 &&
995 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
996 !(arm_hcrx_el2_eff(env) & HCRX_MSCEN)) {
997 return false;
998 }
999
1000 if (el == 0) {
1001 if (!el_is_in_host(env, 0)) {
1002 return env->cp15.sctlr_el[1] & SCTLR_MSCEN;
1003 } else {
1004 return env->cp15.sctlr_el[2] & SCTLR_MSCEN;
1005 }
1006 }
1007 return true;
1008 }
1009
check_mops_enabled(CPUARMState * env,uintptr_t ra)1010 static void check_mops_enabled(CPUARMState *env, uintptr_t ra)
1011 {
1012 if (!mops_enabled(env)) {
1013 raise_exception_ra(env, EXCP_UDEF, syn_uncategorized(),
1014 exception_target_el(env), ra);
1015 }
1016 }
1017
1018 /*
1019 * Return the target exception level for an exception due
1020 * to mismatched arguments in a FEAT_MOPS copy or set.
1021 * Compare pseudocode MismatchedCpySetTargetEL()
1022 */
mops_mismatch_exception_target_el(CPUARMState * env)1023 static int mops_mismatch_exception_target_el(CPUARMState *env)
1024 {
1025 int el = arm_current_el(env);
1026
1027 if (el > 1) {
1028 return el;
1029 }
1030 if (el == 0 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
1031 return 2;
1032 }
1033 if (el == 1 && (arm_hcrx_el2_eff(env) & HCRX_MCE2)) {
1034 return 2;
1035 }
1036 return 1;
1037 }
1038
1039 /*
1040 * Check whether an M or E instruction was executed with a CF value
1041 * indicating the wrong option for this implementation.
1042 * Assumes we are always Option A.
1043 */
check_mops_wrong_option(CPUARMState * env,uint32_t syndrome,uintptr_t ra)1044 static void check_mops_wrong_option(CPUARMState *env, uint32_t syndrome,
1045 uintptr_t ra)
1046 {
1047 if (env->CF != 0) {
1048 syndrome |= 1 << 17; /* Set the wrong-option bit */
1049 raise_exception_ra(env, EXCP_UDEF, syndrome,
1050 mops_mismatch_exception_target_el(env), ra);
1051 }
1052 }
1053
1054 /*
1055 * Return the maximum number of bytes we can transfer starting at addr
1056 * without crossing a page boundary.
1057 */
page_limit(uint64_t addr)1058 static uint64_t page_limit(uint64_t addr)
1059 {
1060 return TARGET_PAGE_ALIGN(addr + 1) - addr;
1061 }
1062
1063 /*
1064 * Return the number of bytes we can copy starting from addr and working
1065 * backwards without crossing a page boundary.
1066 */
page_limit_rev(uint64_t addr)1067 static uint64_t page_limit_rev(uint64_t addr)
1068 {
1069 return (addr & ~TARGET_PAGE_MASK) + 1;
1070 }
1071
1072 /*
1073 * Perform part of a memory set on an area of guest memory starting at
1074 * toaddr (a dirty address) and extending for setsize bytes.
1075 *
1076 * Returns the number of bytes actually set, which might be less than
1077 * setsize; the caller should loop until the whole set has been done.
1078 * The caller should ensure that the guest registers are correct
1079 * for the possibility that the first byte of the set encounters
1080 * an exception or watchpoint. We guarantee not to take any faults
1081 * for bytes other than the first.
1082 */
set_step(CPUARMState * env,uint64_t toaddr,uint64_t setsize,uint32_t data,int memidx,uint32_t * mtedesc,uintptr_t ra)1083 static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
1084 uint64_t setsize, uint32_t data, int memidx,
1085 uint32_t *mtedesc, uintptr_t ra)
1086 {
1087 void *mem;
1088
1089 setsize = MIN(setsize, page_limit(toaddr));
1090 if (*mtedesc) {
1091 uint64_t mtesize = mte_mops_probe(env, toaddr, setsize, *mtedesc);
1092 if (mtesize == 0) {
1093 /* Trap, or not. All CPU state is up to date */
1094 mte_check_fail(env, *mtedesc, toaddr, ra);
1095 /* Continue, with no further MTE checks required */
1096 *mtedesc = 0;
1097 } else {
1098 /* Advance to the end, or to the tag mismatch */
1099 setsize = MIN(setsize, mtesize);
1100 }
1101 }
1102
1103 toaddr = useronly_clean_ptr(toaddr);
1104 /*
1105 * Trapless lookup: returns NULL for invalid page, I/O,
1106 * watchpoints, clean pages, etc.
1107 */
1108 mem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, memidx);
1109
1110 #ifndef CONFIG_USER_ONLY
1111 if (unlikely(!mem)) {
1112 /*
1113 * Slow-path: just do one byte write. This will handle the
1114 * watchpoint, invalid page, etc handling correctly.
1115 * For clean code pages, the next iteration will see
1116 * the page dirty and will use the fast path.
1117 */
1118 cpu_stb_mmuidx_ra(env, toaddr, data, memidx, ra);
1119 return 1;
1120 }
1121 #endif
1122 /* Easy case: just memset the host memory */
1123 memset(mem, data, setsize);
1124 return setsize;
1125 }
1126
1127 /*
1128 * Similar, but setting tags. The architecture requires us to do this
1129 * in 16-byte chunks. SETP accesses are not tag checked; they set
1130 * the tags.
1131 */
set_step_tags(CPUARMState * env,uint64_t toaddr,uint64_t setsize,uint32_t data,int memidx,uint32_t * mtedesc,uintptr_t ra)1132 static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
1133 uint64_t setsize, uint32_t data, int memidx,
1134 uint32_t *mtedesc, uintptr_t ra)
1135 {
1136 void *mem;
1137 uint64_t cleanaddr;
1138
1139 setsize = MIN(setsize, page_limit(toaddr));
1140
1141 cleanaddr = useronly_clean_ptr(toaddr);
1142 /*
1143 * Trapless lookup: returns NULL for invalid page, I/O,
1144 * watchpoints, clean pages, etc.
1145 */
1146 mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
1147
1148 #ifndef CONFIG_USER_ONLY
1149 if (unlikely(!mem)) {
1150 /*
1151 * Slow-path: just do one write. This will handle the
1152 * watchpoint, invalid page, etc handling correctly.
1153 * The architecture requires that we do 16 bytes at a time,
1154 * and we know both ptr and size are 16 byte aligned.
1155 * For clean code pages, the next iteration will see
1156 * the page dirty and will use the fast path.
1157 */
1158 uint64_t repldata = data * 0x0101010101010101ULL;
1159 MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);
1160 cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra);
1161 mte_mops_set_tags(env, toaddr, 16, *mtedesc);
1162 return 16;
1163 }
1164 #endif
1165 /* Easy case: just memset the host memory */
1166 memset(mem, data, setsize);
1167 mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
1168 return setsize;
1169 }
1170
1171 typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr,
1172 uint64_t setsize, uint32_t data,
1173 int memidx, uint32_t *mtedesc, uintptr_t ra);
1174
1175 /* Extract register numbers from a MOPS exception syndrome value */
mops_destreg(uint32_t syndrome)1176 static int mops_destreg(uint32_t syndrome)
1177 {
1178 return extract32(syndrome, 10, 5);
1179 }
1180
mops_srcreg(uint32_t syndrome)1181 static int mops_srcreg(uint32_t syndrome)
1182 {
1183 return extract32(syndrome, 5, 5);
1184 }
1185
mops_sizereg(uint32_t syndrome)1186 static int mops_sizereg(uint32_t syndrome)
1187 {
1188 return extract32(syndrome, 0, 5);
1189 }
1190
1191 /*
1192 * Return true if TCMA and TBI bits mean we need to do MTE checks.
1193 * We only need to do this once per MOPS insn, not for every page.
1194 */
mte_checks_needed(uint64_t ptr,uint32_t desc)1195 static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
1196 {
1197 int bit55 = extract64(ptr, 55, 1);
1198
1199 /*
1200 * Note that tbi_check() returns true for "access checked" but
1201 * tcma_check() returns true for "access unchecked".
1202 */
1203 if (!tbi_check(desc, bit55)) {
1204 return false;
1205 }
1206 return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
1207 }
1208
1209 /* Take an exception if the SETG addr/size are not granule aligned */
check_setg_alignment(CPUARMState * env,uint64_t ptr,uint64_t size,uint32_t memidx,uintptr_t ra)1210 static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size,
1211 uint32_t memidx, uintptr_t ra)
1212 {
1213 if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) ||
1214 !QEMU_IS_ALIGNED(size, TAG_GRANULE)) {
1215 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
1216 memidx, ra);
1217
1218 }
1219 }
1220
arm_reg_or_xzr(CPUARMState * env,int reg)1221 static uint64_t arm_reg_or_xzr(CPUARMState *env, int reg)
1222 {
1223 /*
1224 * Runtime equivalent of cpu_reg() -- return the CPU register value,
1225 * for contexts when index 31 means XZR (not SP).
1226 */
1227 return reg == 31 ? 0 : env->xregs[reg];
1228 }
1229
1230 /*
1231 * For the Memory Set operation, our implementation chooses
1232 * always to use "option A", where we update Xd to the final
1233 * address in the SETP insn, and set Xn to be -(bytes remaining).
1234 * On SETM and SETE insns we only need update Xn.
1235 *
1236 * @env: CPU
1237 * @syndrome: syndrome value for mismatch exceptions
1238 * (also contains the register numbers we need to use)
1239 * @mtedesc: MTE descriptor word
1240 * @stepfn: function which does a single part of the set operation
1241 * @is_setg: true if this is the tag-setting SETG variant
1242 */
do_setp(CPUARMState * env,uint32_t syndrome,uint32_t mtedesc,StepFn * stepfn,bool is_setg,uintptr_t ra)1243 static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1244 StepFn *stepfn, bool is_setg, uintptr_t ra)
1245 {
1246 /* Prologue: we choose to do up to the next page boundary */
1247 int rd = mops_destreg(syndrome);
1248 int rs = mops_srcreg(syndrome);
1249 int rn = mops_sizereg(syndrome);
1250 uint8_t data = arm_reg_or_xzr(env, rs);
1251 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1252 uint64_t toaddr = env->xregs[rd];
1253 uint64_t setsize = env->xregs[rn];
1254 uint64_t stagesetsize, step;
1255
1256 check_mops_enabled(env, ra);
1257
1258 if (setsize > INT64_MAX) {
1259 setsize = INT64_MAX;
1260 if (is_setg) {
1261 setsize &= ~0xf;
1262 }
1263 }
1264
1265 if (unlikely(is_setg)) {
1266 check_setg_alignment(env, toaddr, setsize, memidx, ra);
1267 } else if (!mte_checks_needed(toaddr, mtedesc)) {
1268 mtedesc = 0;
1269 }
1270
1271 stagesetsize = MIN(setsize, page_limit(toaddr));
1272 while (stagesetsize) {
1273 env->xregs[rd] = toaddr;
1274 env->xregs[rn] = setsize;
1275 step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
1276 toaddr += step;
1277 setsize -= step;
1278 stagesetsize -= step;
1279 }
1280 /* Insn completed, so update registers to the Option A format */
1281 env->xregs[rd] = toaddr + setsize;
1282 env->xregs[rn] = -setsize;
1283
1284 /* Set NZCV = 0000 to indicate we are an Option A implementation */
1285 env->NF = 0;
1286 env->ZF = 1; /* our env->ZF encoding is inverted */
1287 env->CF = 0;
1288 env->VF = 0;
1289 return;
1290 }
1291
HELPER(setp)1292 void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1293 {
1294 do_setp(env, syndrome, mtedesc, set_step, false, GETPC());
1295 }
1296
HELPER(setgp)1297 void HELPER(setgp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1298 {
1299 do_setp(env, syndrome, mtedesc, set_step_tags, true, GETPC());
1300 }
1301
do_setm(CPUARMState * env,uint32_t syndrome,uint32_t mtedesc,StepFn * stepfn,bool is_setg,uintptr_t ra)1302 static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1303 StepFn *stepfn, bool is_setg, uintptr_t ra)
1304 {
1305 /* Main: we choose to do all the full-page chunks */
1306 CPUState *cs = env_cpu(env);
1307 int rd = mops_destreg(syndrome);
1308 int rs = mops_srcreg(syndrome);
1309 int rn = mops_sizereg(syndrome);
1310 uint8_t data = arm_reg_or_xzr(env, rs);
1311 uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
1312 uint64_t setsize = -env->xregs[rn];
1313 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1314 uint64_t step, stagesetsize;
1315
1316 check_mops_enabled(env, ra);
1317
1318 /*
1319 * We're allowed to NOP out "no data to copy" before the consistency
1320 * checks; we choose to do so.
1321 */
1322 if (env->xregs[rn] == 0) {
1323 return;
1324 }
1325
1326 check_mops_wrong_option(env, syndrome, ra);
1327
1328 /*
1329 * Our implementation will work fine even if we have an unaligned
1330 * destination address, and because we update Xn every time around
1331 * the loop below and the return value from stepfn() may be less
1332 * than requested, we might find toaddr is unaligned. So we don't
1333 * have an IMPDEF check for alignment here.
1334 */
1335
1336 if (unlikely(is_setg)) {
1337 check_setg_alignment(env, toaddr, setsize, memidx, ra);
1338 } else if (!mte_checks_needed(toaddr, mtedesc)) {
1339 mtedesc = 0;
1340 }
1341
1342 /* Do the actual memset: we leave the last partial page to SETE */
1343 stagesetsize = setsize & TARGET_PAGE_MASK;
1344 while (stagesetsize > 0) {
1345 step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
1346 toaddr += step;
1347 setsize -= step;
1348 stagesetsize -= step;
1349 env->xregs[rn] = -setsize;
1350 if (stagesetsize > 0 && unlikely(cpu_loop_exit_requested(cs))) {
1351 cpu_loop_exit_restore(cs, ra);
1352 }
1353 }
1354 }
1355
HELPER(setm)1356 void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1357 {
1358 do_setm(env, syndrome, mtedesc, set_step, false, GETPC());
1359 }
1360
HELPER(setgm)1361 void HELPER(setgm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1362 {
1363 do_setm(env, syndrome, mtedesc, set_step_tags, true, GETPC());
1364 }
1365
do_sete(CPUARMState * env,uint32_t syndrome,uint32_t mtedesc,StepFn * stepfn,bool is_setg,uintptr_t ra)1366 static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1367 StepFn *stepfn, bool is_setg, uintptr_t ra)
1368 {
1369 /* Epilogue: do the last partial page */
1370 int rd = mops_destreg(syndrome);
1371 int rs = mops_srcreg(syndrome);
1372 int rn = mops_sizereg(syndrome);
1373 uint8_t data = arm_reg_or_xzr(env, rs);
1374 uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
1375 uint64_t setsize = -env->xregs[rn];
1376 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1377 uint64_t step;
1378
1379 check_mops_enabled(env, ra);
1380
1381 /*
1382 * We're allowed to NOP out "no data to copy" before the consistency
1383 * checks; we choose to do so.
1384 */
1385 if (setsize == 0) {
1386 return;
1387 }
1388
1389 check_mops_wrong_option(env, syndrome, ra);
1390
1391 /*
1392 * Our implementation has no address alignment requirements, but
1393 * we do want to enforce the "less than a page" size requirement,
1394 * so we don't need to have the "check for interrupts" here.
1395 */
1396 if (setsize >= TARGET_PAGE_SIZE) {
1397 raise_exception_ra(env, EXCP_UDEF, syndrome,
1398 mops_mismatch_exception_target_el(env), ra);
1399 }
1400
1401 if (unlikely(is_setg)) {
1402 check_setg_alignment(env, toaddr, setsize, memidx, ra);
1403 } else if (!mte_checks_needed(toaddr, mtedesc)) {
1404 mtedesc = 0;
1405 }
1406
1407 /* Do the actual memset */
1408 while (setsize > 0) {
1409 step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
1410 toaddr += step;
1411 setsize -= step;
1412 env->xregs[rn] = -setsize;
1413 }
1414 }
1415
HELPER(sete)1416 void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1417 {
1418 do_sete(env, syndrome, mtedesc, set_step, false, GETPC());
1419 }
1420
HELPER(setge)1421 void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1422 {
1423 do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC());
1424 }
1425
1426 /*
1427 * Perform part of a memory copy from the guest memory at fromaddr
1428 * and extending for copysize bytes, to the guest memory at
1429 * toaddr. Both addresses are dirty.
1430 *
1431 * Returns the number of bytes actually set, which might be less than
1432 * copysize; the caller should loop until the whole copy has been done.
1433 * The caller should ensure that the guest registers are correct
1434 * for the possibility that the first byte of the copy encounters
1435 * an exception or watchpoint. We guarantee not to take any faults
1436 * for bytes other than the first.
1437 */
copy_step(CPUARMState * env,uint64_t toaddr,uint64_t fromaddr,uint64_t copysize,int wmemidx,int rmemidx,uint32_t * wdesc,uint32_t * rdesc,uintptr_t ra)1438 static uint64_t copy_step(CPUARMState *env, uint64_t toaddr, uint64_t fromaddr,
1439 uint64_t copysize, int wmemidx, int rmemidx,
1440 uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra)
1441 {
1442 void *rmem;
1443 void *wmem;
1444
1445 /* Don't cross a page boundary on either source or destination */
1446 copysize = MIN(copysize, page_limit(toaddr));
1447 copysize = MIN(copysize, page_limit(fromaddr));
1448 /*
1449 * Handle MTE tag checks: either handle the tag mismatch for byte 0,
1450 * or else copy up to but not including the byte with the mismatch.
1451 */
1452 if (*rdesc) {
1453 uint64_t mtesize = mte_mops_probe(env, fromaddr, copysize, *rdesc);
1454 if (mtesize == 0) {
1455 mte_check_fail(env, *rdesc, fromaddr, ra);
1456 *rdesc = 0;
1457 } else {
1458 copysize = MIN(copysize, mtesize);
1459 }
1460 }
1461 if (*wdesc) {
1462 uint64_t mtesize = mte_mops_probe(env, toaddr, copysize, *wdesc);
1463 if (mtesize == 0) {
1464 mte_check_fail(env, *wdesc, toaddr, ra);
1465 *wdesc = 0;
1466 } else {
1467 copysize = MIN(copysize, mtesize);
1468 }
1469 }
1470
1471 toaddr = useronly_clean_ptr(toaddr);
1472 fromaddr = useronly_clean_ptr(fromaddr);
1473 /* Trapless lookup of whether we can get a host memory pointer */
1474 wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx);
1475 rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx);
1476
1477 #ifndef CONFIG_USER_ONLY
1478 /*
1479 * If we don't have host memory for both source and dest then just
1480 * do a single byte copy. This will handle watchpoints, invalid pages,
1481 * etc correctly. For clean code pages, the next iteration will see
1482 * the page dirty and will use the fast path.
1483 */
1484 if (unlikely(!rmem || !wmem)) {
1485 uint8_t byte;
1486 if (rmem) {
1487 byte = *(uint8_t *)rmem;
1488 } else {
1489 byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra);
1490 }
1491 if (wmem) {
1492 *(uint8_t *)wmem = byte;
1493 } else {
1494 cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra);
1495 }
1496 return 1;
1497 }
1498 #endif
1499 /* Easy case: just memmove the host memory */
1500 memmove(wmem, rmem, copysize);
1501 return copysize;
1502 }
1503
1504 /*
1505 * Do part of a backwards memory copy. Here toaddr and fromaddr point
1506 * to the *last* byte to be copied.
1507 */
copy_step_rev(CPUARMState * env,uint64_t toaddr,uint64_t fromaddr,uint64_t copysize,int wmemidx,int rmemidx,uint32_t * wdesc,uint32_t * rdesc,uintptr_t ra)1508 static uint64_t copy_step_rev(CPUARMState *env, uint64_t toaddr,
1509 uint64_t fromaddr,
1510 uint64_t copysize, int wmemidx, int rmemidx,
1511 uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra)
1512 {
1513 void *rmem;
1514 void *wmem;
1515
1516 /* Don't cross a page boundary on either source or destination */
1517 copysize = MIN(copysize, page_limit_rev(toaddr));
1518 copysize = MIN(copysize, page_limit_rev(fromaddr));
1519
1520 /*
1521 * Handle MTE tag checks: either handle the tag mismatch for byte 0,
1522 * or else copy up to but not including the byte with the mismatch.
1523 */
1524 if (*rdesc) {
1525 uint64_t mtesize = mte_mops_probe_rev(env, fromaddr, copysize, *rdesc);
1526 if (mtesize == 0) {
1527 mte_check_fail(env, *rdesc, fromaddr, ra);
1528 *rdesc = 0;
1529 } else {
1530 copysize = MIN(copysize, mtesize);
1531 }
1532 }
1533 if (*wdesc) {
1534 uint64_t mtesize = mte_mops_probe_rev(env, toaddr, copysize, *wdesc);
1535 if (mtesize == 0) {
1536 mte_check_fail(env, *wdesc, toaddr, ra);
1537 *wdesc = 0;
1538 } else {
1539 copysize = MIN(copysize, mtesize);
1540 }
1541 }
1542
1543 toaddr = useronly_clean_ptr(toaddr);
1544 fromaddr = useronly_clean_ptr(fromaddr);
1545 /* Trapless lookup of whether we can get a host memory pointer */
1546 wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx);
1547 rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx);
1548
1549 #ifndef CONFIG_USER_ONLY
1550 /*
1551 * If we don't have host memory for both source and dest then just
1552 * do a single byte copy. This will handle watchpoints, invalid pages,
1553 * etc correctly. For clean code pages, the next iteration will see
1554 * the page dirty and will use the fast path.
1555 */
1556 if (unlikely(!rmem || !wmem)) {
1557 uint8_t byte;
1558 if (rmem) {
1559 byte = *(uint8_t *)rmem;
1560 } else {
1561 byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra);
1562 }
1563 if (wmem) {
1564 *(uint8_t *)wmem = byte;
1565 } else {
1566 cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra);
1567 }
1568 return 1;
1569 }
1570 #endif
1571 /*
1572 * Easy case: just memmove the host memory. Note that wmem and
1573 * rmem here point to the *last* byte to copy.
1574 */
1575 memmove(wmem - (copysize - 1), rmem - (copysize - 1), copysize);
1576 return copysize;
1577 }
1578
1579 /*
1580 * for the Memory Copy operation, our implementation chooses always
1581 * to use "option A", where we update Xd and Xs to the final addresses
1582 * in the CPYP insn, and then in CPYM and CPYE only need to update Xn.
1583 *
1584 * @env: CPU
1585 * @syndrome: syndrome value for mismatch exceptions
1586 * (also contains the register numbers we need to use)
1587 * @wdesc: MTE descriptor for the writes (destination)
1588 * @rdesc: MTE descriptor for the reads (source)
1589 * @move: true if this is CPY (memmove), false for CPYF (memcpy forwards)
1590 */
do_cpyp(CPUARMState * env,uint32_t syndrome,uint32_t wdesc,uint32_t rdesc,uint32_t move,uintptr_t ra)1591 static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1592 uint32_t rdesc, uint32_t move, uintptr_t ra)
1593 {
1594 int rd = mops_destreg(syndrome);
1595 int rs = mops_srcreg(syndrome);
1596 int rn = mops_sizereg(syndrome);
1597 uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
1598 uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
1599 bool forwards = true;
1600 uint64_t toaddr = env->xregs[rd];
1601 uint64_t fromaddr = env->xregs[rs];
1602 uint64_t copysize = env->xregs[rn];
1603 uint64_t stagecopysize, step;
1604
1605 check_mops_enabled(env, ra);
1606
1607
1608 if (move) {
1609 /*
1610 * Copy backwards if necessary. The direction for a non-overlapping
1611 * copy is IMPDEF; we choose forwards.
1612 */
1613 if (copysize > 0x007FFFFFFFFFFFFFULL) {
1614 copysize = 0x007FFFFFFFFFFFFFULL;
1615 }
1616 uint64_t fs = extract64(fromaddr, 0, 56);
1617 uint64_t ts = extract64(toaddr, 0, 56);
1618 uint64_t fe = extract64(fromaddr + copysize, 0, 56);
1619
1620 if (fs < ts && fe > ts) {
1621 forwards = false;
1622 }
1623 } else {
1624 if (copysize > INT64_MAX) {
1625 copysize = INT64_MAX;
1626 }
1627 }
1628
1629 if (!mte_checks_needed(fromaddr, rdesc)) {
1630 rdesc = 0;
1631 }
1632 if (!mte_checks_needed(toaddr, wdesc)) {
1633 wdesc = 0;
1634 }
1635
1636 if (forwards) {
1637 stagecopysize = MIN(copysize, page_limit(toaddr));
1638 stagecopysize = MIN(stagecopysize, page_limit(fromaddr));
1639 while (stagecopysize) {
1640 env->xregs[rd] = toaddr;
1641 env->xregs[rs] = fromaddr;
1642 env->xregs[rn] = copysize;
1643 step = copy_step(env, toaddr, fromaddr, stagecopysize,
1644 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1645 toaddr += step;
1646 fromaddr += step;
1647 copysize -= step;
1648 stagecopysize -= step;
1649 }
1650 /* Insn completed, so update registers to the Option A format */
1651 env->xregs[rd] = toaddr + copysize;
1652 env->xregs[rs] = fromaddr + copysize;
1653 env->xregs[rn] = -copysize;
1654 } else {
1655 /*
1656 * In a reverse copy the to and from addrs in Xs and Xd are the start
1657 * of the range, but it's more convenient for us to work with pointers
1658 * to the last byte being copied.
1659 */
1660 toaddr += copysize - 1;
1661 fromaddr += copysize - 1;
1662 stagecopysize = MIN(copysize, page_limit_rev(toaddr));
1663 stagecopysize = MIN(stagecopysize, page_limit_rev(fromaddr));
1664 while (stagecopysize) {
1665 env->xregs[rn] = copysize;
1666 step = copy_step_rev(env, toaddr, fromaddr, stagecopysize,
1667 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1668 copysize -= step;
1669 stagecopysize -= step;
1670 toaddr -= step;
1671 fromaddr -= step;
1672 }
1673 /*
1674 * Insn completed, so update registers to the Option A format.
1675 * For a reverse copy this is no different to the CPYP input format.
1676 */
1677 env->xregs[rn] = copysize;
1678 }
1679
1680 /* Set NZCV = 0000 to indicate we are an Option A implementation */
1681 env->NF = 0;
1682 env->ZF = 1; /* our env->ZF encoding is inverted */
1683 env->CF = 0;
1684 env->VF = 0;
1685 return;
1686 }
1687
HELPER(cpyp)1688 void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1689 uint32_t rdesc)
1690 {
1691 do_cpyp(env, syndrome, wdesc, rdesc, true, GETPC());
1692 }
1693
HELPER(cpyfp)1694 void HELPER(cpyfp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1695 uint32_t rdesc)
1696 {
1697 do_cpyp(env, syndrome, wdesc, rdesc, false, GETPC());
1698 }
1699
do_cpym(CPUARMState * env,uint32_t syndrome,uint32_t wdesc,uint32_t rdesc,uint32_t move,uintptr_t ra)1700 static void do_cpym(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1701 uint32_t rdesc, uint32_t move, uintptr_t ra)
1702 {
1703 /* Main: we choose to copy until less than a page remaining */
1704 CPUState *cs = env_cpu(env);
1705 int rd = mops_destreg(syndrome);
1706 int rs = mops_srcreg(syndrome);
1707 int rn = mops_sizereg(syndrome);
1708 uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
1709 uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
1710 bool forwards = true;
1711 uint64_t toaddr, fromaddr, copysize, step;
1712
1713 check_mops_enabled(env, ra);
1714
1715 /* We choose to NOP out "no data to copy" before consistency checks */
1716 if (env->xregs[rn] == 0) {
1717 return;
1718 }
1719
1720 check_mops_wrong_option(env, syndrome, ra);
1721
1722 if (move) {
1723 forwards = (int64_t)env->xregs[rn] < 0;
1724 }
1725
1726 if (forwards) {
1727 toaddr = env->xregs[rd] + env->xregs[rn];
1728 fromaddr = env->xregs[rs] + env->xregs[rn];
1729 copysize = -env->xregs[rn];
1730 } else {
1731 copysize = env->xregs[rn];
1732 /* This toaddr and fromaddr point to the *last* byte to copy */
1733 toaddr = env->xregs[rd] + copysize - 1;
1734 fromaddr = env->xregs[rs] + copysize - 1;
1735 }
1736
1737 if (!mte_checks_needed(fromaddr, rdesc)) {
1738 rdesc = 0;
1739 }
1740 if (!mte_checks_needed(toaddr, wdesc)) {
1741 wdesc = 0;
1742 }
1743
1744 /* Our implementation has no particular parameter requirements for CPYM */
1745
1746 /* Do the actual memmove */
1747 if (forwards) {
1748 while (copysize >= TARGET_PAGE_SIZE) {
1749 step = copy_step(env, toaddr, fromaddr, copysize,
1750 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1751 toaddr += step;
1752 fromaddr += step;
1753 copysize -= step;
1754 env->xregs[rn] = -copysize;
1755 if (copysize >= TARGET_PAGE_SIZE &&
1756 unlikely(cpu_loop_exit_requested(cs))) {
1757 cpu_loop_exit_restore(cs, ra);
1758 }
1759 }
1760 } else {
1761 while (copysize >= TARGET_PAGE_SIZE) {
1762 step = copy_step_rev(env, toaddr, fromaddr, copysize,
1763 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1764 toaddr -= step;
1765 fromaddr -= step;
1766 copysize -= step;
1767 env->xregs[rn] = copysize;
1768 if (copysize >= TARGET_PAGE_SIZE &&
1769 unlikely(cpu_loop_exit_requested(cs))) {
1770 cpu_loop_exit_restore(cs, ra);
1771 }
1772 }
1773 }
1774 }
1775
HELPER(cpym)1776 void HELPER(cpym)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1777 uint32_t rdesc)
1778 {
1779 do_cpym(env, syndrome, wdesc, rdesc, true, GETPC());
1780 }
1781
HELPER(cpyfm)1782 void HELPER(cpyfm)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1783 uint32_t rdesc)
1784 {
1785 do_cpym(env, syndrome, wdesc, rdesc, false, GETPC());
1786 }
1787
do_cpye(CPUARMState * env,uint32_t syndrome,uint32_t wdesc,uint32_t rdesc,uint32_t move,uintptr_t ra)1788 static void do_cpye(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1789 uint32_t rdesc, uint32_t move, uintptr_t ra)
1790 {
1791 /* Epilogue: do the last partial page */
1792 int rd = mops_destreg(syndrome);
1793 int rs = mops_srcreg(syndrome);
1794 int rn = mops_sizereg(syndrome);
1795 uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
1796 uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
1797 bool forwards = true;
1798 uint64_t toaddr, fromaddr, copysize, step;
1799
1800 check_mops_enabled(env, ra);
1801
1802 /* We choose to NOP out "no data to copy" before consistency checks */
1803 if (env->xregs[rn] == 0) {
1804 return;
1805 }
1806
1807 check_mops_wrong_option(env, syndrome, ra);
1808
1809 if (move) {
1810 forwards = (int64_t)env->xregs[rn] < 0;
1811 }
1812
1813 if (forwards) {
1814 toaddr = env->xregs[rd] + env->xregs[rn];
1815 fromaddr = env->xregs[rs] + env->xregs[rn];
1816 copysize = -env->xregs[rn];
1817 } else {
1818 copysize = env->xregs[rn];
1819 /* This toaddr and fromaddr point to the *last* byte to copy */
1820 toaddr = env->xregs[rd] + copysize - 1;
1821 fromaddr = env->xregs[rs] + copysize - 1;
1822 }
1823
1824 if (!mte_checks_needed(fromaddr, rdesc)) {
1825 rdesc = 0;
1826 }
1827 if (!mte_checks_needed(toaddr, wdesc)) {
1828 wdesc = 0;
1829 }
1830
1831 /* Check the size; we don't want to have do a check-for-interrupts */
1832 if (copysize >= TARGET_PAGE_SIZE) {
1833 raise_exception_ra(env, EXCP_UDEF, syndrome,
1834 mops_mismatch_exception_target_el(env), ra);
1835 }
1836
1837 /* Do the actual memmove */
1838 if (forwards) {
1839 while (copysize > 0) {
1840 step = copy_step(env, toaddr, fromaddr, copysize,
1841 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1842 toaddr += step;
1843 fromaddr += step;
1844 copysize -= step;
1845 env->xregs[rn] = -copysize;
1846 }
1847 } else {
1848 while (copysize > 0) {
1849 step = copy_step_rev(env, toaddr, fromaddr, copysize,
1850 wmemidx, rmemidx, &wdesc, &rdesc, ra);
1851 toaddr -= step;
1852 fromaddr -= step;
1853 copysize -= step;
1854 env->xregs[rn] = copysize;
1855 }
1856 }
1857 }
1858
HELPER(cpye)1859 void HELPER(cpye)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1860 uint32_t rdesc)
1861 {
1862 do_cpye(env, syndrome, wdesc, rdesc, true, GETPC());
1863 }
1864
HELPER(cpyfe)1865 void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
1866 uint32_t rdesc)
1867 {
1868 do_cpye(env, syndrome, wdesc, rdesc, false, GETPC());
1869 }
1870