1 /* 2 * M-profile MVE Operations 3 * 4 * Copyright (c) 2021 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "vec_internal.h" 24 #include "exec/helper-proto.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg.h" 28 #include "fpu/softfloat.h" 29 #include "crypto/clmul.h" 30 31 static uint16_t mve_eci_mask(CPUARMState *env) 32 { 33 /* 34 * Return the mask of which elements in the MVE vector correspond 35 * to beats being executed. The mask has 1 bits for executed lanes 36 * and 0 bits where ECI says this beat was already executed. 37 */ 38 int eci; 39 40 if ((env->condexec_bits & 0xf) != 0) { 41 return 0xffff; 42 } 43 44 eci = env->condexec_bits >> 4; 45 switch (eci) { 46 case ECI_NONE: 47 return 0xffff; 48 case ECI_A0: 49 return 0xfff0; 50 case ECI_A0A1: 51 return 0xff00; 52 case ECI_A0A1A2: 53 case ECI_A0A1A2B0: 54 return 0xf000; 55 default: 56 g_assert_not_reached(); 57 } 58 } 59 60 static uint16_t mve_element_mask(CPUARMState *env) 61 { 62 /* 63 * Return the mask of which elements in the MVE vector should be 64 * updated. This is a combination of multiple things: 65 * (1) by default, we update every lane in the vector 66 * (2) VPT predication stores its state in the VPR register; 67 * (3) low-overhead-branch tail predication will mask out part 68 * the vector on the final iteration of the loop 69 * (4) if EPSR.ECI is set then we must execute only some beats 70 * of the insn 71 * We combine all these into a 16-bit result with the same semantics 72 * as VPR.P0: 0 to mask the lane, 1 if it is active. 73 * 8-bit vector ops will look at all bits of the result; 74 * 16-bit ops will look at bits 0, 2, 4, ...; 75 * 32-bit ops will look at bits 0, 4, 8 and 12. 76 * Compare pseudocode GetCurInstrBeat(), though that only returns 77 * the 4-bit slice of the mask corresponding to a single beat. 78 */ 79 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 80 81 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 82 mask |= 0xff; 83 } 84 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 85 mask |= 0xff00; 86 } 87 88 if (env->v7m.ltpsize < 4 && 89 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 90 /* 91 * Tail predication active, and this is the last loop iteration. 92 * The element size is (1 << ltpsize), and we only want to process 93 * loopcount elements, so we want to retain the least significant 94 * (loopcount * esize) predicate bits and zero out bits above that. 95 */ 96 int masklen = env->regs[14] << env->v7m.ltpsize; 97 assert(masklen <= 16); 98 uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 99 mask &= ltpmask; 100 } 101 102 /* 103 * ECI bits indicate which beats are already executed; 104 * we handle this by effectively predicating them out. 105 */ 106 mask &= mve_eci_mask(env); 107 return mask; 108 } 109 110 static void mve_advance_vpt(CPUARMState *env) 111 { 112 /* Advance the VPT and ECI state if necessary */ 113 uint32_t vpr = env->v7m.vpr; 114 unsigned mask01, mask23; 115 uint16_t inv_mask; 116 uint16_t eci_mask = mve_eci_mask(env); 117 118 if ((env->condexec_bits & 0xf) == 0) { 119 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 120 (ECI_A0 << 4) : (ECI_NONE << 4); 121 } 122 123 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 124 /* VPT not enabled, nothing to do */ 125 return; 126 } 127 128 /* Invert P0 bits if needed, but only for beats we actually executed */ 129 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 130 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 131 /* Start by assuming we invert all bits corresponding to executed beats */ 132 inv_mask = eci_mask; 133 if (mask01 <= 8) { 134 /* MASK01 says don't invert low half of P0 */ 135 inv_mask &= ~0xff; 136 } 137 if (mask23 <= 8) { 138 /* MASK23 says don't invert high half of P0 */ 139 inv_mask &= ~0xff00; 140 } 141 vpr ^= inv_mask; 142 /* Only update MASK01 if beat 1 executed */ 143 if (eci_mask & 0xf0) { 144 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 145 } 146 /* Beat 3 always executes, so update MASK23 */ 147 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 148 env->v7m.vpr = vpr; 149 } 150 151 /* For loads, predicated lanes are zeroed instead of keeping their old values */ 152 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 153 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 154 { \ 155 TYPE *d = vd; \ 156 uint16_t mask = mve_element_mask(env); \ 157 uint16_t eci_mask = mve_eci_mask(env); \ 158 unsigned b, e; \ 159 /* \ 160 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 161 * beats so we don't care if we update part of the dest and \ 162 * then take an exception. \ 163 */ \ 164 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 165 if (eci_mask & (1 << b)) { \ 166 d[H##ESIZE(e)] = (mask & (1 << b)) ? \ 167 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 168 } \ 169 addr += MSIZE; \ 170 } \ 171 mve_advance_vpt(env); \ 172 } 173 174 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 175 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 176 { \ 177 TYPE *d = vd; \ 178 uint16_t mask = mve_element_mask(env); \ 179 unsigned b, e; \ 180 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 181 if (mask & (1 << b)) { \ 182 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 183 } \ 184 addr += MSIZE; \ 185 } \ 186 mve_advance_vpt(env); \ 187 } 188 189 DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 190 DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 191 DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 192 193 DO_VSTR(vstrb, 1, stb, 1, uint8_t) 194 DO_VSTR(vstrh, 2, stw, 2, uint16_t) 195 DO_VSTR(vstrw, 4, stl, 4, uint32_t) 196 197 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 198 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 199 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 200 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 201 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 202 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 203 204 DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 205 DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 206 DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 207 208 #undef DO_VLDR 209 #undef DO_VSTR 210 211 /* 212 * Gather loads/scatter stores. Here each element of Qm specifies 213 * an offset to use from the base register Rm. In the _os_ versions 214 * that offset is scaled by the element size. 215 * For loads, predicated lanes are zeroed instead of retaining 216 * their previous values. 217 */ 218 #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ 219 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 220 uint32_t base) \ 221 { \ 222 TYPE *d = vd; \ 223 OFFTYPE *m = vm; \ 224 uint16_t mask = mve_element_mask(env); \ 225 uint16_t eci_mask = mve_eci_mask(env); \ 226 unsigned e; \ 227 uint32_t addr; \ 228 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 229 if (!(eci_mask & 1)) { \ 230 continue; \ 231 } \ 232 addr = ADDRFN(base, m[H##ESIZE(e)]); \ 233 d[H##ESIZE(e)] = (mask & 1) ? \ 234 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 235 if (WB) { \ 236 m[H##ESIZE(e)] = addr; \ 237 } \ 238 } \ 239 mve_advance_vpt(env); \ 240 } 241 242 /* We know here TYPE is unsigned so always the same as the offset type */ 243 #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ 244 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 245 uint32_t base) \ 246 { \ 247 TYPE *d = vd; \ 248 TYPE *m = vm; \ 249 uint16_t mask = mve_element_mask(env); \ 250 uint16_t eci_mask = mve_eci_mask(env); \ 251 unsigned e; \ 252 uint32_t addr; \ 253 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 254 if (!(eci_mask & 1)) { \ 255 continue; \ 256 } \ 257 addr = ADDRFN(base, m[H##ESIZE(e)]); \ 258 if (mask & 1) { \ 259 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 260 } \ 261 if (WB) { \ 262 m[H##ESIZE(e)] = addr; \ 263 } \ 264 } \ 265 mve_advance_vpt(env); \ 266 } 267 268 /* 269 * 64-bit accesses are slightly different: they are done as two 32-bit 270 * accesses, controlled by the predicate mask for the relevant beat, 271 * and with a single 32-bit offset in the first of the two Qm elements. 272 * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). 273 * Address writeback happens on the odd beats and updates the address 274 * stored in the even-beat element. 275 */ 276 #define DO_VLDR64_SG(OP, ADDRFN, WB) \ 277 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 278 uint32_t base) \ 279 { \ 280 uint32_t *d = vd; \ 281 uint32_t *m = vm; \ 282 uint16_t mask = mve_element_mask(env); \ 283 uint16_t eci_mask = mve_eci_mask(env); \ 284 unsigned e; \ 285 uint32_t addr; \ 286 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 287 if (!(eci_mask & 1)) { \ 288 continue; \ 289 } \ 290 addr = ADDRFN(base, m[H4(e & ~1)]); \ 291 addr += 4 * (e & 1); \ 292 d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ 293 if (WB && (e & 1)) { \ 294 m[H4(e & ~1)] = addr - 4; \ 295 } \ 296 } \ 297 mve_advance_vpt(env); \ 298 } 299 300 #define DO_VSTR64_SG(OP, ADDRFN, WB) \ 301 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 302 uint32_t base) \ 303 { \ 304 uint32_t *d = vd; \ 305 uint32_t *m = vm; \ 306 uint16_t mask = mve_element_mask(env); \ 307 uint16_t eci_mask = mve_eci_mask(env); \ 308 unsigned e; \ 309 uint32_t addr; \ 310 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 311 if (!(eci_mask & 1)) { \ 312 continue; \ 313 } \ 314 addr = ADDRFN(base, m[H4(e & ~1)]); \ 315 addr += 4 * (e & 1); \ 316 if (mask & 1) { \ 317 cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ 318 } \ 319 if (WB && (e & 1)) { \ 320 m[H4(e & ~1)] = addr - 4; \ 321 } \ 322 } \ 323 mve_advance_vpt(env); \ 324 } 325 326 #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) 327 #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) 328 #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) 329 #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) 330 331 DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) 332 DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) 333 DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) 334 335 DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) 336 DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) 337 DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) 338 DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) 339 DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) 340 DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) 341 DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) 342 343 DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) 344 DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) 345 DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) 346 DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) 347 DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) 348 349 DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) 350 DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) 351 DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) 352 DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) 353 DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) 354 DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) 355 DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) 356 357 DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) 358 DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) 359 DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) 360 DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) 361 362 DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) 363 DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) 364 DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) 365 DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) 366 367 /* 368 * Deinterleaving loads/interleaving stores. 369 * 370 * For these helpers we are passed the index of the first Qreg 371 * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3) 372 * and the value of the base address register Rn. 373 * The helpers are specialized for pattern and element size, so 374 * for instance vld42h is VLD4 with pattern 2, element size MO_16. 375 * 376 * These insns are beatwise but not predicated, so we must honour ECI, 377 * but need not look at mve_element_mask(). 378 * 379 * The pseudocode implements these insns with multiple memory accesses 380 * of the element size, but rules R_VVVG and R_FXDM permit us to make 381 * one 32-bit memory access per beat. 382 */ 383 #define DO_VLD4B(OP, O1, O2, O3, O4) \ 384 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 385 uint32_t base) \ 386 { \ 387 int beat, e; \ 388 uint16_t mask = mve_eci_mask(env); \ 389 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 390 uint32_t addr, data; \ 391 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 392 if ((mask & 1) == 0) { \ 393 /* ECI says skip this beat */ \ 394 continue; \ 395 } \ 396 addr = base + off[beat] * 4; \ 397 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 398 for (e = 0; e < 4; e++, data >>= 8) { \ 399 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 400 qd[H1(off[beat])] = data; \ 401 } \ 402 } \ 403 } 404 405 #define DO_VLD4H(OP, O1, O2) \ 406 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 407 uint32_t base) \ 408 { \ 409 int beat; \ 410 uint16_t mask = mve_eci_mask(env); \ 411 static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 412 uint32_t addr, data; \ 413 int y; /* y counts 0 2 0 2 */ \ 414 uint16_t *qd; \ 415 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 416 if ((mask & 1) == 0) { \ 417 /* ECI says skip this beat */ \ 418 continue; \ 419 } \ 420 addr = base + off[beat] * 8 + (beat & 1) * 4; \ 421 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 422 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 423 qd[H2(off[beat])] = data; \ 424 data >>= 16; \ 425 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 426 qd[H2(off[beat])] = data; \ 427 } \ 428 } 429 430 #define DO_VLD4W(OP, O1, O2, O3, O4) \ 431 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 432 uint32_t base) \ 433 { \ 434 int beat; \ 435 uint16_t mask = mve_eci_mask(env); \ 436 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 437 uint32_t addr, data; \ 438 uint32_t *qd; \ 439 int y; \ 440 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 441 if ((mask & 1) == 0) { \ 442 /* ECI says skip this beat */ \ 443 continue; \ 444 } \ 445 addr = base + off[beat] * 4; \ 446 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 447 y = (beat + (O1 & 2)) & 3; \ 448 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 449 qd[H4(off[beat] >> 2)] = data; \ 450 } \ 451 } 452 453 DO_VLD4B(vld40b, 0, 1, 10, 11) 454 DO_VLD4B(vld41b, 2, 3, 12, 13) 455 DO_VLD4B(vld42b, 4, 5, 14, 15) 456 DO_VLD4B(vld43b, 6, 7, 8, 9) 457 458 DO_VLD4H(vld40h, 0, 5) 459 DO_VLD4H(vld41h, 1, 6) 460 DO_VLD4H(vld42h, 2, 7) 461 DO_VLD4H(vld43h, 3, 4) 462 463 DO_VLD4W(vld40w, 0, 1, 10, 11) 464 DO_VLD4W(vld41w, 2, 3, 12, 13) 465 DO_VLD4W(vld42w, 4, 5, 14, 15) 466 DO_VLD4W(vld43w, 6, 7, 8, 9) 467 468 #define DO_VLD2B(OP, O1, O2, O3, O4) \ 469 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 470 uint32_t base) \ 471 { \ 472 int beat, e; \ 473 uint16_t mask = mve_eci_mask(env); \ 474 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 475 uint32_t addr, data; \ 476 uint8_t *qd; \ 477 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 478 if ((mask & 1) == 0) { \ 479 /* ECI says skip this beat */ \ 480 continue; \ 481 } \ 482 addr = base + off[beat] * 2; \ 483 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 484 for (e = 0; e < 4; e++, data >>= 8) { \ 485 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 486 qd[H1(off[beat] + (e >> 1))] = data; \ 487 } \ 488 } \ 489 } 490 491 #define DO_VLD2H(OP, O1, O2, O3, O4) \ 492 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 493 uint32_t base) \ 494 { \ 495 int beat; \ 496 uint16_t mask = mve_eci_mask(env); \ 497 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 498 uint32_t addr, data; \ 499 int e; \ 500 uint16_t *qd; \ 501 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 502 if ((mask & 1) == 0) { \ 503 /* ECI says skip this beat */ \ 504 continue; \ 505 } \ 506 addr = base + off[beat] * 4; \ 507 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 508 for (e = 0; e < 2; e++, data >>= 16) { \ 509 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 510 qd[H2(off[beat])] = data; \ 511 } \ 512 } \ 513 } 514 515 #define DO_VLD2W(OP, O1, O2, O3, O4) \ 516 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 517 uint32_t base) \ 518 { \ 519 int beat; \ 520 uint16_t mask = mve_eci_mask(env); \ 521 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 522 uint32_t addr, data; \ 523 uint32_t *qd; \ 524 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 525 if ((mask & 1) == 0) { \ 526 /* ECI says skip this beat */ \ 527 continue; \ 528 } \ 529 addr = base + off[beat]; \ 530 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 531 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 532 qd[H4(off[beat] >> 3)] = data; \ 533 } \ 534 } 535 536 DO_VLD2B(vld20b, 0, 2, 12, 14) 537 DO_VLD2B(vld21b, 4, 6, 8, 10) 538 539 DO_VLD2H(vld20h, 0, 1, 6, 7) 540 DO_VLD2H(vld21h, 2, 3, 4, 5) 541 542 DO_VLD2W(vld20w, 0, 4, 24, 28) 543 DO_VLD2W(vld21w, 8, 12, 16, 20) 544 545 #define DO_VST4B(OP, O1, O2, O3, O4) \ 546 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 547 uint32_t base) \ 548 { \ 549 int beat, e; \ 550 uint16_t mask = mve_eci_mask(env); \ 551 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 552 uint32_t addr, data; \ 553 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 554 if ((mask & 1) == 0) { \ 555 /* ECI says skip this beat */ \ 556 continue; \ 557 } \ 558 addr = base + off[beat] * 4; \ 559 data = 0; \ 560 for (e = 3; e >= 0; e--) { \ 561 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 562 data = (data << 8) | qd[H1(off[beat])]; \ 563 } \ 564 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 565 } \ 566 } 567 568 #define DO_VST4H(OP, O1, O2) \ 569 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 570 uint32_t base) \ 571 { \ 572 int beat; \ 573 uint16_t mask = mve_eci_mask(env); \ 574 static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 575 uint32_t addr, data; \ 576 int y; /* y counts 0 2 0 2 */ \ 577 uint16_t *qd; \ 578 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 579 if ((mask & 1) == 0) { \ 580 /* ECI says skip this beat */ \ 581 continue; \ 582 } \ 583 addr = base + off[beat] * 8 + (beat & 1) * 4; \ 584 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 585 data = qd[H2(off[beat])]; \ 586 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 587 data |= qd[H2(off[beat])] << 16; \ 588 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 589 } \ 590 } 591 592 #define DO_VST4W(OP, O1, O2, O3, O4) \ 593 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 594 uint32_t base) \ 595 { \ 596 int beat; \ 597 uint16_t mask = mve_eci_mask(env); \ 598 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 599 uint32_t addr, data; \ 600 uint32_t *qd; \ 601 int y; \ 602 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 603 if ((mask & 1) == 0) { \ 604 /* ECI says skip this beat */ \ 605 continue; \ 606 } \ 607 addr = base + off[beat] * 4; \ 608 y = (beat + (O1 & 2)) & 3; \ 609 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 610 data = qd[H4(off[beat] >> 2)]; \ 611 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 612 } \ 613 } 614 615 DO_VST4B(vst40b, 0, 1, 10, 11) 616 DO_VST4B(vst41b, 2, 3, 12, 13) 617 DO_VST4B(vst42b, 4, 5, 14, 15) 618 DO_VST4B(vst43b, 6, 7, 8, 9) 619 620 DO_VST4H(vst40h, 0, 5) 621 DO_VST4H(vst41h, 1, 6) 622 DO_VST4H(vst42h, 2, 7) 623 DO_VST4H(vst43h, 3, 4) 624 625 DO_VST4W(vst40w, 0, 1, 10, 11) 626 DO_VST4W(vst41w, 2, 3, 12, 13) 627 DO_VST4W(vst42w, 4, 5, 14, 15) 628 DO_VST4W(vst43w, 6, 7, 8, 9) 629 630 #define DO_VST2B(OP, O1, O2, O3, O4) \ 631 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 632 uint32_t base) \ 633 { \ 634 int beat, e; \ 635 uint16_t mask = mve_eci_mask(env); \ 636 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 637 uint32_t addr, data; \ 638 uint8_t *qd; \ 639 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 640 if ((mask & 1) == 0) { \ 641 /* ECI says skip this beat */ \ 642 continue; \ 643 } \ 644 addr = base + off[beat] * 2; \ 645 data = 0; \ 646 for (e = 3; e >= 0; e--) { \ 647 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 648 data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ 649 } \ 650 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 651 } \ 652 } 653 654 #define DO_VST2H(OP, O1, O2, O3, O4) \ 655 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 656 uint32_t base) \ 657 { \ 658 int beat; \ 659 uint16_t mask = mve_eci_mask(env); \ 660 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 661 uint32_t addr, data; \ 662 int e; \ 663 uint16_t *qd; \ 664 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 665 if ((mask & 1) == 0) { \ 666 /* ECI says skip this beat */ \ 667 continue; \ 668 } \ 669 addr = base + off[beat] * 4; \ 670 data = 0; \ 671 for (e = 1; e >= 0; e--) { \ 672 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 673 data = (data << 16) | qd[H2(off[beat])]; \ 674 } \ 675 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 676 } \ 677 } 678 679 #define DO_VST2W(OP, O1, O2, O3, O4) \ 680 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 681 uint32_t base) \ 682 { \ 683 int beat; \ 684 uint16_t mask = mve_eci_mask(env); \ 685 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 686 uint32_t addr, data; \ 687 uint32_t *qd; \ 688 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 689 if ((mask & 1) == 0) { \ 690 /* ECI says skip this beat */ \ 691 continue; \ 692 } \ 693 addr = base + off[beat]; \ 694 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 695 data = qd[H4(off[beat] >> 3)]; \ 696 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 697 } \ 698 } 699 700 DO_VST2B(vst20b, 0, 2, 12, 14) 701 DO_VST2B(vst21b, 4, 6, 8, 10) 702 703 DO_VST2H(vst20h, 0, 1, 6, 7) 704 DO_VST2H(vst21h, 2, 3, 4, 5) 705 706 DO_VST2W(vst20w, 0, 4, 24, 28) 707 DO_VST2W(vst21w, 8, 12, 16, 20) 708 709 /* 710 * The mergemask(D, R, M) macro performs the operation "*D = R" but 711 * storing only the bytes which correspond to 1 bits in M, 712 * leaving other bytes in *D unchanged. We use _Generic 713 * to select the correct implementation based on the type of D. 714 */ 715 716 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 717 { 718 if (mask & 1) { 719 *d = r; 720 } 721 } 722 723 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 724 { 725 mergemask_ub((uint8_t *)d, r, mask); 726 } 727 728 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 729 { 730 uint16_t bmask = expand_pred_b(mask); 731 *d = (*d & ~bmask) | (r & bmask); 732 } 733 734 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 735 { 736 mergemask_uh((uint16_t *)d, r, mask); 737 } 738 739 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 740 { 741 uint32_t bmask = expand_pred_b(mask); 742 *d = (*d & ~bmask) | (r & bmask); 743 } 744 745 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 746 { 747 mergemask_uw((uint32_t *)d, r, mask); 748 } 749 750 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 751 { 752 uint64_t bmask = expand_pred_b(mask); 753 *d = (*d & ~bmask) | (r & bmask); 754 } 755 756 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 757 { 758 mergemask_uq((uint64_t *)d, r, mask); 759 } 760 761 #define mergemask(D, R, M) \ 762 _Generic(D, \ 763 uint8_t *: mergemask_ub, \ 764 int8_t *: mergemask_sb, \ 765 uint16_t *: mergemask_uh, \ 766 int16_t *: mergemask_sh, \ 767 uint32_t *: mergemask_uw, \ 768 int32_t *: mergemask_sw, \ 769 uint64_t *: mergemask_uq, \ 770 int64_t *: mergemask_sq)(D, R, M) 771 772 void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 773 { 774 /* 775 * The generated code already replicated an 8 or 16 bit constant 776 * into the 32-bit value, so we only need to write the 32-bit 777 * value to all elements of the Qreg, allowing for predication. 778 */ 779 uint32_t *d = vd; 780 uint16_t mask = mve_element_mask(env); 781 unsigned e; 782 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 783 mergemask(&d[H4(e)], val, mask); 784 } 785 mve_advance_vpt(env); 786 } 787 788 #define DO_1OP(OP, ESIZE, TYPE, FN) \ 789 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 790 { \ 791 TYPE *d = vd, *m = vm; \ 792 uint16_t mask = mve_element_mask(env); \ 793 unsigned e; \ 794 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 795 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 796 } \ 797 mve_advance_vpt(env); \ 798 } 799 800 #define DO_CLS_B(N) (clrsb32(N) - 24) 801 #define DO_CLS_H(N) (clrsb32(N) - 16) 802 803 DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 804 DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 805 DO_1OP(vclsw, 4, int32_t, clrsb32) 806 807 #define DO_CLZ_B(N) (clz32(N) - 24) 808 #define DO_CLZ_H(N) (clz32(N) - 16) 809 810 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 811 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 812 DO_1OP(vclzw, 4, uint32_t, clz32) 813 814 DO_1OP(vrev16b, 2, uint16_t, bswap16) 815 DO_1OP(vrev32b, 4, uint32_t, bswap32) 816 DO_1OP(vrev32h, 4, uint32_t, hswap32) 817 DO_1OP(vrev64b, 8, uint64_t, bswap64) 818 DO_1OP(vrev64h, 8, uint64_t, hswap64) 819 DO_1OP(vrev64w, 8, uint64_t, wswap64) 820 821 #define DO_NOT(N) (~(N)) 822 823 DO_1OP(vmvn, 8, uint64_t, DO_NOT) 824 825 #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 826 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 827 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 828 829 DO_1OP(vabsb, 1, int8_t, DO_ABS) 830 DO_1OP(vabsh, 2, int16_t, DO_ABS) 831 DO_1OP(vabsw, 4, int32_t, DO_ABS) 832 833 /* We can do these 64 bits at a time */ 834 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 835 DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 836 837 #define DO_NEG(N) (-(N)) 838 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 839 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 840 841 DO_1OP(vnegb, 1, int8_t, DO_NEG) 842 DO_1OP(vnegh, 2, int16_t, DO_NEG) 843 DO_1OP(vnegw, 4, int32_t, DO_NEG) 844 845 /* We can do these 64 bits at a time */ 846 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 847 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 848 849 /* 850 * 1 operand immediates: Vda is destination and possibly also one source. 851 * All these insns work at 64-bit widths. 852 */ 853 #define DO_1OP_IMM(OP, FN) \ 854 void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ 855 { \ 856 uint64_t *da = vda; \ 857 uint16_t mask = mve_element_mask(env); \ 858 unsigned e; \ 859 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 860 mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ 861 } \ 862 mve_advance_vpt(env); \ 863 } 864 865 #define DO_MOVI(N, I) (I) 866 #define DO_ANDI(N, I) ((N) & (I)) 867 #define DO_ORRI(N, I) ((N) | (I)) 868 869 DO_1OP_IMM(vmovi, DO_MOVI) 870 DO_1OP_IMM(vandi, DO_ANDI) 871 DO_1OP_IMM(vorri, DO_ORRI) 872 873 #define DO_2OP(OP, ESIZE, TYPE, FN) \ 874 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 875 void *vd, void *vn, void *vm) \ 876 { \ 877 TYPE *d = vd, *n = vn, *m = vm; \ 878 uint16_t mask = mve_element_mask(env); \ 879 unsigned e; \ 880 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 881 mergemask(&d[H##ESIZE(e)], \ 882 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 883 } \ 884 mve_advance_vpt(env); \ 885 } 886 887 /* provide unsigned 2-op helpers for all sizes */ 888 #define DO_2OP_U(OP, FN) \ 889 DO_2OP(OP##b, 1, uint8_t, FN) \ 890 DO_2OP(OP##h, 2, uint16_t, FN) \ 891 DO_2OP(OP##w, 4, uint32_t, FN) 892 893 /* provide signed 2-op helpers for all sizes */ 894 #define DO_2OP_S(OP, FN) \ 895 DO_2OP(OP##b, 1, int8_t, FN) \ 896 DO_2OP(OP##h, 2, int16_t, FN) \ 897 DO_2OP(OP##w, 4, int32_t, FN) 898 899 /* 900 * "Long" operations where two half-sized inputs (taken from either the 901 * top or the bottom of the input vector) produce a double-width result. 902 * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 903 */ 904 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 905 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 906 { \ 907 LTYPE *d = vd; \ 908 TYPE *n = vn, *m = vm; \ 909 uint16_t mask = mve_element_mask(env); \ 910 unsigned le; \ 911 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 912 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 913 m[H##ESIZE(le * 2 + TOP)]); \ 914 mergemask(&d[H##LESIZE(le)], r, mask); \ 915 } \ 916 mve_advance_vpt(env); \ 917 } 918 919 #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 920 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 921 { \ 922 TYPE *d = vd, *n = vn, *m = vm; \ 923 uint16_t mask = mve_element_mask(env); \ 924 unsigned e; \ 925 bool qc = false; \ 926 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 927 bool sat = false; \ 928 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 929 mergemask(&d[H##ESIZE(e)], r, mask); \ 930 qc |= sat & mask & 1; \ 931 } \ 932 if (qc) { \ 933 env->vfp.qc[0] = qc; \ 934 } \ 935 mve_advance_vpt(env); \ 936 } 937 938 /* provide unsigned 2-op helpers for all sizes */ 939 #define DO_2OP_SAT_U(OP, FN) \ 940 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 941 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 942 DO_2OP_SAT(OP##w, 4, uint32_t, FN) 943 944 /* provide signed 2-op helpers for all sizes */ 945 #define DO_2OP_SAT_S(OP, FN) \ 946 DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 947 DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 948 DO_2OP_SAT(OP##w, 4, int32_t, FN) 949 950 #define DO_AND(N, M) ((N) & (M)) 951 #define DO_BIC(N, M) ((N) & ~(M)) 952 #define DO_ORR(N, M) ((N) | (M)) 953 #define DO_ORN(N, M) ((N) | ~(M)) 954 #define DO_EOR(N, M) ((N) ^ (M)) 955 956 DO_2OP(vand, 8, uint64_t, DO_AND) 957 DO_2OP(vbic, 8, uint64_t, DO_BIC) 958 DO_2OP(vorr, 8, uint64_t, DO_ORR) 959 DO_2OP(vorn, 8, uint64_t, DO_ORN) 960 DO_2OP(veor, 8, uint64_t, DO_EOR) 961 962 #define DO_ADD(N, M) ((N) + (M)) 963 #define DO_SUB(N, M) ((N) - (M)) 964 #define DO_MUL(N, M) ((N) * (M)) 965 966 DO_2OP_U(vadd, DO_ADD) 967 DO_2OP_U(vsub, DO_SUB) 968 DO_2OP_U(vmul, DO_MUL) 969 970 DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 971 DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 972 DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 973 DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 974 DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 975 DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 976 977 DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 978 DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 979 DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 980 DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 981 DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 982 DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 983 984 /* 985 * Polynomial multiply. We can always do this generating 64 bits 986 * of the result at a time, so we don't need to use DO_2OP_L. 987 */ 988 #define VMULLPW_MASK 0x0000ffff0000ffffULL 989 #define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK) 990 #define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16) 991 992 DO_2OP(vmullpbh, 8, uint64_t, clmul_8x4_even) 993 DO_2OP(vmullpth, 8, uint64_t, clmul_8x4_odd) 994 DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW) 995 DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW) 996 997 /* 998 * Because the computation type is at least twice as large as required, 999 * these work for both signed and unsigned source types. 1000 */ 1001 static inline uint8_t do_mulh_b(int32_t n, int32_t m) 1002 { 1003 return (n * m) >> 8; 1004 } 1005 1006 static inline uint16_t do_mulh_h(int32_t n, int32_t m) 1007 { 1008 return (n * m) >> 16; 1009 } 1010 1011 static inline uint32_t do_mulh_w(int64_t n, int64_t m) 1012 { 1013 return (n * m) >> 32; 1014 } 1015 1016 static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 1017 { 1018 return (n * m + (1U << 7)) >> 8; 1019 } 1020 1021 static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 1022 { 1023 return (n * m + (1U << 15)) >> 16; 1024 } 1025 1026 static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 1027 { 1028 return (n * m + (1U << 31)) >> 32; 1029 } 1030 1031 DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 1032 DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 1033 DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 1034 DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 1035 DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 1036 DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 1037 1038 DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 1039 DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 1040 DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 1041 DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 1042 DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 1043 DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 1044 1045 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 1046 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 1047 1048 DO_2OP_S(vmaxs, DO_MAX) 1049 DO_2OP_U(vmaxu, DO_MAX) 1050 DO_2OP_S(vmins, DO_MIN) 1051 DO_2OP_U(vminu, DO_MIN) 1052 1053 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 1054 1055 DO_2OP_S(vabds, DO_ABD) 1056 DO_2OP_U(vabdu, DO_ABD) 1057 1058 static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 1059 { 1060 return ((uint64_t)n + m) >> 1; 1061 } 1062 1063 static inline int32_t do_vhadd_s(int32_t n, int32_t m) 1064 { 1065 return ((int64_t)n + m) >> 1; 1066 } 1067 1068 static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 1069 { 1070 return ((uint64_t)n - m) >> 1; 1071 } 1072 1073 static inline int32_t do_vhsub_s(int32_t n, int32_t m) 1074 { 1075 return ((int64_t)n - m) >> 1; 1076 } 1077 1078 DO_2OP_S(vhadds, do_vhadd_s) 1079 DO_2OP_U(vhaddu, do_vhadd_u) 1080 DO_2OP_S(vhsubs, do_vhsub_s) 1081 DO_2OP_U(vhsubu, do_vhsub_u) 1082 1083 #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1084 #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1085 #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1086 #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1087 1088 DO_2OP_S(vshls, DO_VSHLS) 1089 DO_2OP_U(vshlu, DO_VSHLU) 1090 DO_2OP_S(vrshls, DO_VRSHLS) 1091 DO_2OP_U(vrshlu, DO_VRSHLU) 1092 1093 #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 1094 #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 1095 1096 DO_2OP_S(vrhadds, DO_RHADD_S) 1097 DO_2OP_U(vrhaddu, DO_RHADD_U) 1098 1099 static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 1100 uint32_t inv, uint32_t carry_in, bool update_flags) 1101 { 1102 uint16_t mask = mve_element_mask(env); 1103 unsigned e; 1104 1105 /* If any additions trigger, we will update flags. */ 1106 if (mask & 0x1111) { 1107 update_flags = true; 1108 } 1109 1110 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 1111 uint64_t r = carry_in; 1112 r += n[H4(e)]; 1113 r += m[H4(e)] ^ inv; 1114 if (mask & 1) { 1115 carry_in = r >> 32; 1116 } 1117 mergemask(&d[H4(e)], r, mask); 1118 } 1119 1120 if (update_flags) { 1121 /* Store C, clear NZV. */ 1122 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK; 1123 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C; 1124 } 1125 mve_advance_vpt(env); 1126 } 1127 1128 void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 1129 { 1130 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 1131 do_vadc(env, vd, vn, vm, 0, carry_in, false); 1132 } 1133 1134 void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 1135 { 1136 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 1137 do_vadc(env, vd, vn, vm, -1, carry_in, false); 1138 } 1139 1140 1141 void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 1142 { 1143 do_vadc(env, vd, vn, vm, 0, 0, true); 1144 } 1145 1146 void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 1147 { 1148 do_vadc(env, vd, vn, vm, -1, 1, true); 1149 } 1150 1151 #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 1152 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 1153 { \ 1154 TYPE *d = vd, *n = vn, *m = vm; \ 1155 uint16_t mask = mve_element_mask(env); \ 1156 unsigned e; \ 1157 TYPE r[16 / ESIZE]; \ 1158 /* Calculate all results first to avoid overwriting inputs */ \ 1159 for (e = 0; e < 16 / ESIZE; e++) { \ 1160 if (!(e & 1)) { \ 1161 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 1162 } else { \ 1163 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 1164 } \ 1165 } \ 1166 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1167 mergemask(&d[H##ESIZE(e)], r[e], mask); \ 1168 } \ 1169 mve_advance_vpt(env); \ 1170 } 1171 1172 #define DO_VCADD_ALL(OP, FN0, FN1) \ 1173 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 1174 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 1175 DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 1176 1177 DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 1178 DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 1179 DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s) 1180 DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s) 1181 1182 static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 1183 { 1184 if (val > max) { 1185 *s = true; 1186 return max; 1187 } else if (val < min) { 1188 *s = true; 1189 return min; 1190 } 1191 return val; 1192 } 1193 1194 #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 1195 #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 1196 #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 1197 1198 #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 1199 #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 1200 #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 1201 1202 #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 1203 #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 1204 #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 1205 1206 #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 1207 #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 1208 #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 1209 1210 /* 1211 * For QDMULH and QRDMULH we simplify "double and shift by esize" into 1212 * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 1213 */ 1214 #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 1215 INT8_MIN, INT8_MAX, s) 1216 #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 1217 INT16_MIN, INT16_MAX, s) 1218 #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 1219 INT32_MIN, INT32_MAX, s) 1220 1221 #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 1222 INT8_MIN, INT8_MAX, s) 1223 #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 1224 INT16_MIN, INT16_MAX, s) 1225 #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 1226 INT32_MIN, INT32_MAX, s) 1227 1228 DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 1229 DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 1230 DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 1231 1232 DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 1233 DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 1234 DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 1235 1236 DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 1237 DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 1238 DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 1239 DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 1240 DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 1241 DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 1242 1243 DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 1244 DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 1245 DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 1246 DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 1247 DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 1248 DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 1249 1250 /* 1251 * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 1252 * and friends wanting a uint32_t* sat and our needing a bool*. 1253 */ 1254 #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 1255 ({ \ 1256 uint32_t su32 = 0; \ 1257 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 1258 if (su32) { \ 1259 *satp = true; \ 1260 } \ 1261 r; \ 1262 }) 1263 1264 #define DO_SQSHL_OP(N, M, satp) \ 1265 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 1266 #define DO_UQSHL_OP(N, M, satp) \ 1267 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 1268 #define DO_SQRSHL_OP(N, M, satp) \ 1269 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 1270 #define DO_UQRSHL_OP(N, M, satp) \ 1271 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 1272 #define DO_SUQSHL_OP(N, M, satp) \ 1273 WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) 1274 1275 DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 1276 DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 1277 DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 1278 DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 1279 1280 /* 1281 * Multiply add dual returning high half 1282 * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 1283 * whether to add the rounding constant, and the pointer to the 1284 * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 1285 * saturate to twice the input size and return the high half; or 1286 * (A * B - C * D) etc for VQDMLSDH. 1287 */ 1288 #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 1289 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1290 void *vm) \ 1291 { \ 1292 TYPE *d = vd, *n = vn, *m = vm; \ 1293 uint16_t mask = mve_element_mask(env); \ 1294 unsigned e; \ 1295 bool qc = false; \ 1296 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1297 bool sat = false; \ 1298 if ((e & 1) == XCHG) { \ 1299 TYPE r = FN(n[H##ESIZE(e)], \ 1300 m[H##ESIZE(e - XCHG)], \ 1301 n[H##ESIZE(e + (1 - 2 * XCHG))], \ 1302 m[H##ESIZE(e + (1 - XCHG))], \ 1303 ROUND, &sat); \ 1304 mergemask(&d[H##ESIZE(e)], r, mask); \ 1305 qc |= sat & mask & 1; \ 1306 } \ 1307 } \ 1308 if (qc) { \ 1309 env->vfp.qc[0] = qc; \ 1310 } \ 1311 mve_advance_vpt(env); \ 1312 } 1313 1314 static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1315 int round, bool *sat) 1316 { 1317 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 1318 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1319 } 1320 1321 static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1322 int round, bool *sat) 1323 { 1324 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 1325 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1326 } 1327 1328 static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1329 int round, bool *sat) 1330 { 1331 int64_t m1 = (int64_t)a * b; 1332 int64_t m2 = (int64_t)c * d; 1333 int64_t r; 1334 /* 1335 * Architecturally we should do the entire add, double, round 1336 * and then check for saturation. We do three saturating adds, 1337 * but we need to be careful about the order. If the first 1338 * m1 + m2 saturates then it's impossible for the *2+rc to 1339 * bring it back into the non-saturated range. However, if 1340 * m1 + m2 is negative then it's possible that doing the doubling 1341 * would take the intermediate result below INT64_MAX and the 1342 * addition of the rounding constant then brings it back in range. 1343 * So we add half the rounding constant before doubling rather 1344 * than adding the rounding constant after the doubling. 1345 */ 1346 if (sadd64_overflow(m1, m2, &r) || 1347 sadd64_overflow(r, (round << 30), &r) || 1348 sadd64_overflow(r, r, &r)) { 1349 *sat = true; 1350 return r < 0 ? INT32_MAX : INT32_MIN; 1351 } 1352 return r >> 32; 1353 } 1354 1355 static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1356 int round, bool *sat) 1357 { 1358 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 1359 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1360 } 1361 1362 static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1363 int round, bool *sat) 1364 { 1365 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 1366 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1367 } 1368 1369 static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1370 int round, bool *sat) 1371 { 1372 int64_t m1 = (int64_t)a * b; 1373 int64_t m2 = (int64_t)c * d; 1374 int64_t r; 1375 /* The same ordering issue as in do_vqdmladh_w applies here too */ 1376 if (ssub64_overflow(m1, m2, &r) || 1377 sadd64_overflow(r, (round << 30), &r) || 1378 sadd64_overflow(r, r, &r)) { 1379 *sat = true; 1380 return r < 0 ? INT32_MAX : INT32_MIN; 1381 } 1382 return r >> 32; 1383 } 1384 1385 DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 1386 DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 1387 DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 1388 DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 1389 DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 1390 DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 1391 1392 DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 1393 DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 1394 DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 1395 DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 1396 DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 1397 DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 1398 1399 DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 1400 DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 1401 DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 1402 DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 1403 DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 1404 DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 1405 1406 DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 1407 DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 1408 DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 1409 DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 1410 DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 1411 DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 1412 1413 #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 1414 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1415 uint32_t rm) \ 1416 { \ 1417 TYPE *d = vd, *n = vn; \ 1418 TYPE m = rm; \ 1419 uint16_t mask = mve_element_mask(env); \ 1420 unsigned e; \ 1421 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1422 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 1423 } \ 1424 mve_advance_vpt(env); \ 1425 } 1426 1427 #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 1428 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1429 uint32_t rm) \ 1430 { \ 1431 TYPE *d = vd, *n = vn; \ 1432 TYPE m = rm; \ 1433 uint16_t mask = mve_element_mask(env); \ 1434 unsigned e; \ 1435 bool qc = false; \ 1436 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1437 bool sat = false; \ 1438 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 1439 mask); \ 1440 qc |= sat & mask & 1; \ 1441 } \ 1442 if (qc) { \ 1443 env->vfp.qc[0] = qc; \ 1444 } \ 1445 mve_advance_vpt(env); \ 1446 } 1447 1448 /* "accumulating" version where FN takes d as well as n and m */ 1449 #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 1450 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1451 uint32_t rm) \ 1452 { \ 1453 TYPE *d = vd, *n = vn; \ 1454 TYPE m = rm; \ 1455 uint16_t mask = mve_element_mask(env); \ 1456 unsigned e; \ 1457 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1458 mergemask(&d[H##ESIZE(e)], \ 1459 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ 1460 } \ 1461 mve_advance_vpt(env); \ 1462 } 1463 1464 #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 1465 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1466 uint32_t rm) \ 1467 { \ 1468 TYPE *d = vd, *n = vn; \ 1469 TYPE m = rm; \ 1470 uint16_t mask = mve_element_mask(env); \ 1471 unsigned e; \ 1472 bool qc = false; \ 1473 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1474 bool sat = false; \ 1475 mergemask(&d[H##ESIZE(e)], \ 1476 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ 1477 mask); \ 1478 qc |= sat & mask & 1; \ 1479 } \ 1480 if (qc) { \ 1481 env->vfp.qc[0] = qc; \ 1482 } \ 1483 mve_advance_vpt(env); \ 1484 } 1485 1486 /* provide unsigned 2-op scalar helpers for all sizes */ 1487 #define DO_2OP_SCALAR_U(OP, FN) \ 1488 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 1489 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 1490 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 1491 #define DO_2OP_SCALAR_S(OP, FN) \ 1492 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 1493 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 1494 DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 1495 1496 #define DO_2OP_ACC_SCALAR_U(OP, FN) \ 1497 DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ 1498 DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ 1499 DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) 1500 1501 DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 1502 DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 1503 DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 1504 DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 1505 DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 1506 DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 1507 DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 1508 1509 DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 1510 DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 1511 DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 1512 DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 1513 DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 1514 DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 1515 1516 DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 1517 DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 1518 DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 1519 DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 1520 DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 1521 DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 1522 1523 DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 1524 DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 1525 DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 1526 DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 1527 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 1528 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 1529 1530 static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) 1531 { 1532 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); 1533 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1534 } 1535 1536 static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, 1537 int round, bool *sat) 1538 { 1539 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); 1540 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1541 } 1542 1543 static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, 1544 int round, bool *sat) 1545 { 1546 /* 1547 * Architecturally we should do the entire add, double, round 1548 * and then check for saturation. We do three saturating adds, 1549 * but we need to be careful about the order. If the first 1550 * m1 + m2 saturates then it's impossible for the *2+rc to 1551 * bring it back into the non-saturated range. However, if 1552 * m1 + m2 is negative then it's possible that doing the doubling 1553 * would take the intermediate result below INT64_MAX and the 1554 * addition of the rounding constant then brings it back in range. 1555 * So we add half the rounding constant and half the "c << esize" 1556 * before doubling rather than adding the rounding constant after 1557 * the doubling. 1558 */ 1559 int64_t m1 = (int64_t)a * b; 1560 int64_t m2 = (int64_t)c << 31; 1561 int64_t r; 1562 if (sadd64_overflow(m1, m2, &r) || 1563 sadd64_overflow(r, (round << 30), &r) || 1564 sadd64_overflow(r, r, &r)) { 1565 *sat = true; 1566 return r < 0 ? INT32_MAX : INT32_MIN; 1567 } 1568 return r >> 32; 1569 } 1570 1571 /* 1572 * The *MLAH insns are vector * scalar + vector; 1573 * the *MLASH insns are vector * vector + scalar 1574 */ 1575 #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) 1576 #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) 1577 #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) 1578 #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) 1579 #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) 1580 #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) 1581 1582 #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) 1583 #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) 1584 #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) 1585 #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) 1586 #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) 1587 #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) 1588 1589 DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) 1590 DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) 1591 DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) 1592 DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) 1593 DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) 1594 DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) 1595 1596 DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) 1597 DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) 1598 DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) 1599 DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) 1600 DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) 1601 DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) 1602 1603 /* Vector by scalar plus vector */ 1604 #define DO_VMLA(D, N, M) ((N) * (M) + (D)) 1605 1606 DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) 1607 1608 /* Vector by vector plus scalar */ 1609 #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) 1610 1611 DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) 1612 1613 /* 1614 * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 1615 * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 1616 * SATMASK specifies which bits of the predicate mask matter for determining 1617 * whether to propagate a saturation indication into FPSCR.QC -- for 1618 * the 16x16->32 case we must check only the bit corresponding to the T or B 1619 * half that we used, but for the 32x32->64 case we propagate if the mask 1620 * bit is set for either half. 1621 */ 1622 #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1623 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1624 uint32_t rm) \ 1625 { \ 1626 LTYPE *d = vd; \ 1627 TYPE *n = vn; \ 1628 TYPE m = rm; \ 1629 uint16_t mask = mve_element_mask(env); \ 1630 unsigned le; \ 1631 bool qc = false; \ 1632 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1633 bool sat = false; \ 1634 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 1635 mergemask(&d[H##LESIZE(le)], r, mask); \ 1636 qc |= sat && (mask & SATMASK); \ 1637 } \ 1638 if (qc) { \ 1639 env->vfp.qc[0] = qc; \ 1640 } \ 1641 mve_advance_vpt(env); \ 1642 } 1643 1644 static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 1645 { 1646 int64_t r = ((int64_t)n * m) * 2; 1647 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 1648 } 1649 1650 static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 1651 { 1652 /* The multiply can't overflow, but the doubling might */ 1653 int64_t r = (int64_t)n * m; 1654 if (r > INT64_MAX / 2) { 1655 *sat = true; 1656 return INT64_MAX; 1657 } else if (r < INT64_MIN / 2) { 1658 *sat = true; 1659 return INT64_MIN; 1660 } else { 1661 return r * 2; 1662 } 1663 } 1664 1665 #define SATMASK16B 1 1666 #define SATMASK16T (1 << 2) 1667 #define SATMASK32 ((1 << 4) | 1) 1668 1669 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 1670 do_qdmullh, SATMASK16B) 1671 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 1672 do_qdmullw, SATMASK32) 1673 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 1674 do_qdmullh, SATMASK16T) 1675 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 1676 do_qdmullw, SATMASK32) 1677 1678 /* 1679 * Long saturating ops 1680 */ 1681 #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1682 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1683 void *vm) \ 1684 { \ 1685 LTYPE *d = vd; \ 1686 TYPE *n = vn, *m = vm; \ 1687 uint16_t mask = mve_element_mask(env); \ 1688 unsigned le; \ 1689 bool qc = false; \ 1690 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1691 bool sat = false; \ 1692 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 1693 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 1694 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 1695 qc |= sat && (mask & SATMASK); \ 1696 } \ 1697 if (qc) { \ 1698 env->vfp.qc[0] = qc; \ 1699 } \ 1700 mve_advance_vpt(env); \ 1701 } 1702 1703 DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 1704 DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1705 DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 1706 DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1707 1708 static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1709 { 1710 m &= 0xff; 1711 if (m == 0) { 1712 return 0; 1713 } 1714 n = revbit8(n); 1715 if (m < 8) { 1716 n >>= 8 - m; 1717 } 1718 return n; 1719 } 1720 1721 static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1722 { 1723 m &= 0xff; 1724 if (m == 0) { 1725 return 0; 1726 } 1727 n = revbit16(n); 1728 if (m < 16) { 1729 n >>= 16 - m; 1730 } 1731 return n; 1732 } 1733 1734 static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1735 { 1736 m &= 0xff; 1737 if (m == 0) { 1738 return 0; 1739 } 1740 n = revbit32(n); 1741 if (m < 32) { 1742 n >>= 32 - m; 1743 } 1744 return n; 1745 } 1746 1747 DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1748 DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1749 DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1750 1751 /* 1752 * Multiply add long dual accumulate ops. 1753 */ 1754 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1755 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1756 void *vm, uint64_t a) \ 1757 { \ 1758 uint16_t mask = mve_element_mask(env); \ 1759 unsigned e; \ 1760 TYPE *n = vn, *m = vm; \ 1761 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1762 if (mask & 1) { \ 1763 if (e & 1) { \ 1764 a ODDACC \ 1765 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1766 } else { \ 1767 a EVENACC \ 1768 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1769 } \ 1770 } \ 1771 } \ 1772 mve_advance_vpt(env); \ 1773 return a; \ 1774 } 1775 1776 DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 1777 DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 1778 DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 1779 DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 1780 1781 DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 1782 DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1783 1784 DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1785 DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1786 DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1787 DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 1788 1789 /* 1790 * Multiply add dual accumulate ops 1791 */ 1792 #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1793 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1794 void *vm, uint32_t a) \ 1795 { \ 1796 uint16_t mask = mve_element_mask(env); \ 1797 unsigned e; \ 1798 TYPE *n = vn, *m = vm; \ 1799 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1800 if (mask & 1) { \ 1801 if (e & 1) { \ 1802 a ODDACC \ 1803 n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1804 } else { \ 1805 a EVENACC \ 1806 n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1807 } \ 1808 } \ 1809 } \ 1810 mve_advance_vpt(env); \ 1811 return a; \ 1812 } 1813 1814 #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ 1815 DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ 1816 DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ 1817 DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) 1818 1819 #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ 1820 DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ 1821 DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ 1822 DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) 1823 1824 DO_DAV_S(vmladavs, false, +=, +=) 1825 DO_DAV_U(vmladavu, false, +=, +=) 1826 DO_DAV_S(vmlsdav, false, +=, -=) 1827 DO_DAV_S(vmladavsx, true, +=, +=) 1828 DO_DAV_S(vmlsdavx, true, +=, -=) 1829 1830 /* 1831 * Rounding multiply add long dual accumulate high. In the pseudocode 1832 * this is implemented with a 72-bit internal accumulator value of which 1833 * the top 64 bits are returned. We optimize this to avoid having to 1834 * use 128-bit arithmetic -- we can do this because the 74-bit accumulator 1835 * is squashed back into 64-bits after each beat. 1836 */ 1837 #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ 1838 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1839 void *vm, uint64_t a) \ 1840 { \ 1841 uint16_t mask = mve_element_mask(env); \ 1842 unsigned e; \ 1843 TYPE *n = vn, *m = vm; \ 1844 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1845 if (mask & 1) { \ 1846 LTYPE mul; \ 1847 if (e & 1) { \ 1848 mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ 1849 if (SUB) { \ 1850 mul = -mul; \ 1851 } \ 1852 } else { \ 1853 mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ 1854 } \ 1855 mul = (mul >> 8) + ((mul >> 7) & 1); \ 1856 a += mul; \ 1857 } \ 1858 } \ 1859 mve_advance_vpt(env); \ 1860 return a; \ 1861 } 1862 1863 DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) 1864 DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) 1865 1866 DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) 1867 1868 DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) 1869 DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) 1870 1871 /* Vector add across vector */ 1872 #define DO_VADDV(OP, ESIZE, TYPE) \ 1873 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1874 uint32_t ra) \ 1875 { \ 1876 uint16_t mask = mve_element_mask(env); \ 1877 unsigned e; \ 1878 TYPE *m = vm; \ 1879 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1880 if (mask & 1) { \ 1881 ra += m[H##ESIZE(e)]; \ 1882 } \ 1883 } \ 1884 mve_advance_vpt(env); \ 1885 return ra; \ 1886 } \ 1887 1888 DO_VADDV(vaddvsb, 1, int8_t) 1889 DO_VADDV(vaddvsh, 2, int16_t) 1890 DO_VADDV(vaddvsw, 4, int32_t) 1891 DO_VADDV(vaddvub, 1, uint8_t) 1892 DO_VADDV(vaddvuh, 2, uint16_t) 1893 DO_VADDV(vaddvuw, 4, uint32_t) 1894 1895 /* 1896 * Vector max/min across vector. Unlike VADDV, we must 1897 * read ra as the element size, not its full width. 1898 * We work with int64_t internally for simplicity. 1899 */ 1900 #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ 1901 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1902 uint32_t ra_in) \ 1903 { \ 1904 uint16_t mask = mve_element_mask(env); \ 1905 unsigned e; \ 1906 TYPE *m = vm; \ 1907 int64_t ra = (RATYPE)ra_in; \ 1908 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1909 if (mask & 1) { \ 1910 ra = FN(ra, m[H##ESIZE(e)]); \ 1911 } \ 1912 } \ 1913 mve_advance_vpt(env); \ 1914 return ra; \ 1915 } \ 1916 1917 #define DO_VMAXMINV_U(INSN, FN) \ 1918 DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ 1919 DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ 1920 DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) 1921 #define DO_VMAXMINV_S(INSN, FN) \ 1922 DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ 1923 DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ 1924 DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) 1925 1926 /* 1927 * Helpers for max and min of absolute values across vector: 1928 * note that we only take the absolute value of 'm', not 'n' 1929 */ 1930 static int64_t do_maxa(int64_t n, int64_t m) 1931 { 1932 if (m < 0) { 1933 m = -m; 1934 } 1935 return MAX(n, m); 1936 } 1937 1938 static int64_t do_mina(int64_t n, int64_t m) 1939 { 1940 if (m < 0) { 1941 m = -m; 1942 } 1943 return MIN(n, m); 1944 } 1945 1946 DO_VMAXMINV_S(vmaxvs, DO_MAX) 1947 DO_VMAXMINV_U(vmaxvu, DO_MAX) 1948 DO_VMAXMINV_S(vminvs, DO_MIN) 1949 DO_VMAXMINV_U(vminvu, DO_MIN) 1950 /* 1951 * VMAXAV, VMINAV treat the general purpose input as unsigned 1952 * and the vector elements as signed. 1953 */ 1954 DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) 1955 DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) 1956 DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) 1957 DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) 1958 DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) 1959 DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) 1960 1961 #define DO_VABAV(OP, ESIZE, TYPE) \ 1962 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1963 void *vm, uint32_t ra) \ 1964 { \ 1965 uint16_t mask = mve_element_mask(env); \ 1966 unsigned e; \ 1967 TYPE *m = vm, *n = vn; \ 1968 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1969 if (mask & 1) { \ 1970 int64_t n0 = n[H##ESIZE(e)]; \ 1971 int64_t m0 = m[H##ESIZE(e)]; \ 1972 uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ 1973 ra += r; \ 1974 } \ 1975 } \ 1976 mve_advance_vpt(env); \ 1977 return ra; \ 1978 } 1979 1980 DO_VABAV(vabavsb, 1, int8_t) 1981 DO_VABAV(vabavsh, 2, int16_t) 1982 DO_VABAV(vabavsw, 4, int32_t) 1983 DO_VABAV(vabavub, 1, uint8_t) 1984 DO_VABAV(vabavuh, 2, uint16_t) 1985 DO_VABAV(vabavuw, 4, uint32_t) 1986 1987 #define DO_VADDLV(OP, TYPE, LTYPE) \ 1988 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1989 uint64_t ra) \ 1990 { \ 1991 uint16_t mask = mve_element_mask(env); \ 1992 unsigned e; \ 1993 TYPE *m = vm; \ 1994 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1995 if (mask & 1) { \ 1996 ra += (LTYPE)m[H4(e)]; \ 1997 } \ 1998 } \ 1999 mve_advance_vpt(env); \ 2000 return ra; \ 2001 } \ 2002 2003 DO_VADDLV(vaddlv_s, int32_t, int64_t) 2004 DO_VADDLV(vaddlv_u, uint32_t, uint64_t) 2005 2006 /* Shifts by immediate */ 2007 #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ 2008 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2009 void *vm, uint32_t shift) \ 2010 { \ 2011 TYPE *d = vd, *m = vm; \ 2012 uint16_t mask = mve_element_mask(env); \ 2013 unsigned e; \ 2014 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2015 mergemask(&d[H##ESIZE(e)], \ 2016 FN(m[H##ESIZE(e)], shift), mask); \ 2017 } \ 2018 mve_advance_vpt(env); \ 2019 } 2020 2021 #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ 2022 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2023 void *vm, uint32_t shift) \ 2024 { \ 2025 TYPE *d = vd, *m = vm; \ 2026 uint16_t mask = mve_element_mask(env); \ 2027 unsigned e; \ 2028 bool qc = false; \ 2029 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2030 bool sat = false; \ 2031 mergemask(&d[H##ESIZE(e)], \ 2032 FN(m[H##ESIZE(e)], shift, &sat), mask); \ 2033 qc |= sat & mask & 1; \ 2034 } \ 2035 if (qc) { \ 2036 env->vfp.qc[0] = qc; \ 2037 } \ 2038 mve_advance_vpt(env); \ 2039 } 2040 2041 /* provide unsigned 2-op shift helpers for all sizes */ 2042 #define DO_2SHIFT_U(OP, FN) \ 2043 DO_2SHIFT(OP##b, 1, uint8_t, FN) \ 2044 DO_2SHIFT(OP##h, 2, uint16_t, FN) \ 2045 DO_2SHIFT(OP##w, 4, uint32_t, FN) 2046 #define DO_2SHIFT_S(OP, FN) \ 2047 DO_2SHIFT(OP##b, 1, int8_t, FN) \ 2048 DO_2SHIFT(OP##h, 2, int16_t, FN) \ 2049 DO_2SHIFT(OP##w, 4, int32_t, FN) 2050 2051 #define DO_2SHIFT_SAT_U(OP, FN) \ 2052 DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ 2053 DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ 2054 DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) 2055 #define DO_2SHIFT_SAT_S(OP, FN) \ 2056 DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ 2057 DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ 2058 DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) 2059 2060 DO_2SHIFT_U(vshli_u, DO_VSHLU) 2061 DO_2SHIFT_S(vshli_s, DO_VSHLS) 2062 DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) 2063 DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) 2064 DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) 2065 DO_2SHIFT_U(vrshli_u, DO_VRSHLU) 2066 DO_2SHIFT_S(vrshli_s, DO_VRSHLS) 2067 DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) 2068 DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) 2069 2070 /* Shift-and-insert; we always work with 64 bits at a time */ 2071 #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ 2072 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2073 void *vm, uint32_t shift) \ 2074 { \ 2075 uint64_t *d = vd, *m = vm; \ 2076 uint16_t mask; \ 2077 uint64_t shiftmask; \ 2078 unsigned e; \ 2079 if (shift == ESIZE * 8) { \ 2080 /* \ 2081 * Only VSRI can shift by <dt>; it should mean "don't \ 2082 * update the destination". The generic logic can't handle \ 2083 * this because it would try to shift by an out-of-range \ 2084 * amount, so special case it here. \ 2085 */ \ 2086 goto done; \ 2087 } \ 2088 assert(shift < ESIZE * 8); \ 2089 mask = mve_element_mask(env); \ 2090 /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ 2091 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ 2092 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 2093 uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ 2094 (d[H8(e)] & ~shiftmask); \ 2095 mergemask(&d[H8(e)], r, mask); \ 2096 } \ 2097 done: \ 2098 mve_advance_vpt(env); \ 2099 } 2100 2101 #define DO_SHL(N, SHIFT) ((N) << (SHIFT)) 2102 #define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) 2103 #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) 2104 #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) 2105 2106 DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) 2107 DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) 2108 DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) 2109 DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) 2110 DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) 2111 DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) 2112 2113 /* 2114 * Long shifts taking half-sized inputs from top or bottom of the input 2115 * vector and producing a double-width result. ESIZE, TYPE are for 2116 * the input, and LESIZE, LTYPE for the output. 2117 * Unlike the normal shift helpers, we do not handle negative shift counts, 2118 * because the long shift is strictly left-only. 2119 */ 2120 #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2121 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2122 void *vm, uint32_t shift) \ 2123 { \ 2124 LTYPE *d = vd; \ 2125 TYPE *m = vm; \ 2126 uint16_t mask = mve_element_mask(env); \ 2127 unsigned le; \ 2128 assert(shift <= 16); \ 2129 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2130 LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ 2131 mergemask(&d[H##LESIZE(le)], r, mask); \ 2132 } \ 2133 mve_advance_vpt(env); \ 2134 } 2135 2136 #define DO_VSHLL_ALL(OP, TOP) \ 2137 DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ 2138 DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ 2139 DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ 2140 DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ 2141 2142 DO_VSHLL_ALL(vshllb, false) 2143 DO_VSHLL_ALL(vshllt, true) 2144 2145 /* 2146 * Narrowing right shifts, taking a double sized input, shifting it 2147 * and putting the result in either the top or bottom half of the output. 2148 * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. 2149 */ 2150 #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2151 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2152 void *vm, uint32_t shift) \ 2153 { \ 2154 LTYPE *m = vm; \ 2155 TYPE *d = vd; \ 2156 uint16_t mask = mve_element_mask(env); \ 2157 unsigned le; \ 2158 mask >>= ESIZE * TOP; \ 2159 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2160 TYPE r = FN(m[H##LESIZE(le)], shift); \ 2161 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2162 } \ 2163 mve_advance_vpt(env); \ 2164 } 2165 2166 #define DO_VSHRN_ALL(OP, FN) \ 2167 DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ 2168 DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ 2169 DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ 2170 DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) 2171 2172 static inline uint64_t do_urshr(uint64_t x, unsigned sh) 2173 { 2174 if (likely(sh < 64)) { 2175 return (x >> sh) + ((x >> (sh - 1)) & 1); 2176 } else if (sh == 64) { 2177 return x >> 63; 2178 } else { 2179 return 0; 2180 } 2181 } 2182 2183 static inline int64_t do_srshr(int64_t x, unsigned sh) 2184 { 2185 if (likely(sh < 64)) { 2186 return (x >> sh) + ((x >> (sh - 1)) & 1); 2187 } else { 2188 /* Rounding the sign bit always produces 0. */ 2189 return 0; 2190 } 2191 } 2192 2193 DO_VSHRN_ALL(vshrn, DO_SHR) 2194 DO_VSHRN_ALL(vrshrn, do_urshr) 2195 2196 static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, 2197 bool *satp) 2198 { 2199 if (val > max) { 2200 *satp = true; 2201 return max; 2202 } else if (val < min) { 2203 *satp = true; 2204 return min; 2205 } else { 2206 return val; 2207 } 2208 } 2209 2210 /* Saturating narrowing right shifts */ 2211 #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2212 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2213 void *vm, uint32_t shift) \ 2214 { \ 2215 LTYPE *m = vm; \ 2216 TYPE *d = vd; \ 2217 uint16_t mask = mve_element_mask(env); \ 2218 bool qc = false; \ 2219 unsigned le; \ 2220 mask >>= ESIZE * TOP; \ 2221 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2222 bool sat = false; \ 2223 TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ 2224 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2225 qc |= sat & mask & 1; \ 2226 } \ 2227 if (qc) { \ 2228 env->vfp.qc[0] = qc; \ 2229 } \ 2230 mve_advance_vpt(env); \ 2231 } 2232 2233 #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ 2234 DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2235 DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2236 2237 #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ 2238 DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2239 DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2240 2241 #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ 2242 DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2243 DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2244 2245 #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ 2246 DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2247 DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2248 2249 #define DO_SHRN_SB(N, M, SATP) \ 2250 do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) 2251 #define DO_SHRN_UB(N, M, SATP) \ 2252 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2253 #define DO_SHRUN_B(N, M, SATP) \ 2254 do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2255 2256 #define DO_SHRN_SH(N, M, SATP) \ 2257 do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) 2258 #define DO_SHRN_UH(N, M, SATP) \ 2259 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2260 #define DO_SHRUN_H(N, M, SATP) \ 2261 do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2262 2263 #define DO_RSHRN_SB(N, M, SATP) \ 2264 do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) 2265 #define DO_RSHRN_UB(N, M, SATP) \ 2266 do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) 2267 #define DO_RSHRUN_B(N, M, SATP) \ 2268 do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) 2269 2270 #define DO_RSHRN_SH(N, M, SATP) \ 2271 do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) 2272 #define DO_RSHRN_UH(N, M, SATP) \ 2273 do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) 2274 #define DO_RSHRUN_H(N, M, SATP) \ 2275 do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) 2276 2277 DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) 2278 DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) 2279 DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) 2280 DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) 2281 DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) 2282 DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) 2283 2284 DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) 2285 DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) 2286 DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) 2287 DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) 2288 DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) 2289 DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) 2290 2291 #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2292 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2293 { \ 2294 LTYPE *m = vm; \ 2295 TYPE *d = vd; \ 2296 uint16_t mask = mve_element_mask(env); \ 2297 unsigned le; \ 2298 mask >>= ESIZE * TOP; \ 2299 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2300 mergemask(&d[H##ESIZE(le * 2 + TOP)], \ 2301 m[H##LESIZE(le)], mask); \ 2302 } \ 2303 mve_advance_vpt(env); \ 2304 } 2305 2306 DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) 2307 DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) 2308 DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) 2309 DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) 2310 2311 #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2312 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2313 { \ 2314 LTYPE *m = vm; \ 2315 TYPE *d = vd; \ 2316 uint16_t mask = mve_element_mask(env); \ 2317 bool qc = false; \ 2318 unsigned le; \ 2319 mask >>= ESIZE * TOP; \ 2320 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2321 bool sat = false; \ 2322 TYPE r = FN(m[H##LESIZE(le)], &sat); \ 2323 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2324 qc |= sat & mask & 1; \ 2325 } \ 2326 if (qc) { \ 2327 env->vfp.qc[0] = qc; \ 2328 } \ 2329 mve_advance_vpt(env); \ 2330 } 2331 2332 #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ 2333 DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2334 DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2335 2336 #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ 2337 DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2338 DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2339 2340 #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ 2341 DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2342 DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2343 2344 #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ 2345 DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2346 DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2347 2348 #define DO_VQMOVN_SB(N, SATP) \ 2349 do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) 2350 #define DO_VQMOVN_UB(N, SATP) \ 2351 do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) 2352 #define DO_VQMOVUN_B(N, SATP) \ 2353 do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) 2354 2355 #define DO_VQMOVN_SH(N, SATP) \ 2356 do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) 2357 #define DO_VQMOVN_UH(N, SATP) \ 2358 do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) 2359 #define DO_VQMOVUN_H(N, SATP) \ 2360 do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) 2361 2362 DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) 2363 DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) 2364 DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) 2365 DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) 2366 DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) 2367 DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) 2368 2369 uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, 2370 uint32_t shift) 2371 { 2372 uint32_t *d = vd; 2373 uint16_t mask = mve_element_mask(env); 2374 unsigned e; 2375 uint32_t r; 2376 2377 /* 2378 * For each 32-bit element, we shift it left, bringing in the 2379 * low 'shift' bits of rdm at the bottom. Bits shifted out at 2380 * the top become the new rdm, if the predicate mask permits. 2381 * The final rdm value is returned to update the register. 2382 * shift == 0 here means "shift by 32 bits". 2383 */ 2384 if (shift == 0) { 2385 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 2386 r = rdm; 2387 if (mask & 1) { 2388 rdm = d[H4(e)]; 2389 } 2390 mergemask(&d[H4(e)], r, mask); 2391 } 2392 } else { 2393 uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); 2394 2395 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 2396 r = (d[H4(e)] << shift) | (rdm & shiftmask); 2397 if (mask & 1) { 2398 rdm = d[H4(e)] >> (32 - shift); 2399 } 2400 mergemask(&d[H4(e)], r, mask); 2401 } 2402 } 2403 mve_advance_vpt(env); 2404 return rdm; 2405 } 2406 2407 uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 2408 { 2409 return do_sqrshl_d(n, -(int8_t)shift, false, NULL); 2410 } 2411 2412 uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) 2413 { 2414 return do_uqrshl_d(n, (int8_t)shift, false, NULL); 2415 } 2416 2417 uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2418 { 2419 return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); 2420 } 2421 2422 uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2423 { 2424 return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); 2425 } 2426 2427 uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 2428 { 2429 return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); 2430 } 2431 2432 uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2433 { 2434 return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); 2435 } 2436 2437 /* Operate on 64-bit values, but saturate at 48 bits */ 2438 static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, 2439 bool round, uint32_t *sat) 2440 { 2441 int64_t val, extval; 2442 2443 if (shift <= -48) { 2444 /* Rounding the sign bit always produces 0. */ 2445 if (round) { 2446 return 0; 2447 } 2448 return src >> 63; 2449 } else if (shift < 0) { 2450 if (round) { 2451 src >>= -shift - 1; 2452 val = (src >> 1) + (src & 1); 2453 } else { 2454 val = src >> -shift; 2455 } 2456 extval = sextract64(val, 0, 48); 2457 if (!sat || val == extval) { 2458 return extval; 2459 } 2460 } else if (shift < 48) { 2461 int64_t extval = sextract64(src << shift, 0, 48); 2462 if (!sat || src == (extval >> shift)) { 2463 return extval; 2464 } 2465 } else if (!sat || src == 0) { 2466 return 0; 2467 } 2468 2469 *sat = 1; 2470 return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); 2471 } 2472 2473 /* Operate on 64-bit values, but saturate at 48 bits */ 2474 static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, 2475 bool round, uint32_t *sat) 2476 { 2477 uint64_t val, extval; 2478 2479 if (shift <= -(48 + round)) { 2480 return 0; 2481 } else if (shift < 0) { 2482 if (round) { 2483 val = src >> (-shift - 1); 2484 val = (val >> 1) + (val & 1); 2485 } else { 2486 val = src >> -shift; 2487 } 2488 extval = extract64(val, 0, 48); 2489 if (!sat || val == extval) { 2490 return extval; 2491 } 2492 } else if (shift < 48) { 2493 uint64_t extval = extract64(src << shift, 0, 48); 2494 if (!sat || src == (extval >> shift)) { 2495 return extval; 2496 } 2497 } else if (!sat || src == 0) { 2498 return 0; 2499 } 2500 2501 *sat = 1; 2502 return MAKE_64BIT_MASK(0, 48); 2503 } 2504 2505 uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) 2506 { 2507 return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); 2508 } 2509 2510 uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) 2511 { 2512 return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); 2513 } 2514 2515 uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2516 { 2517 return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 2518 } 2519 2520 uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2521 { 2522 return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 2523 } 2524 2525 uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2526 { 2527 return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); 2528 } 2529 2530 uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) 2531 { 2532 return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); 2533 } 2534 2535 #define DO_VIDUP(OP, ESIZE, TYPE, FN) \ 2536 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2537 uint32_t offset, uint32_t imm) \ 2538 { \ 2539 TYPE *d = vd; \ 2540 uint16_t mask = mve_element_mask(env); \ 2541 unsigned e; \ 2542 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2543 mergemask(&d[H##ESIZE(e)], offset, mask); \ 2544 offset = FN(offset, imm); \ 2545 } \ 2546 mve_advance_vpt(env); \ 2547 return offset; \ 2548 } 2549 2550 #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ 2551 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2552 uint32_t offset, uint32_t wrap, \ 2553 uint32_t imm) \ 2554 { \ 2555 TYPE *d = vd; \ 2556 uint16_t mask = mve_element_mask(env); \ 2557 unsigned e; \ 2558 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2559 mergemask(&d[H##ESIZE(e)], offset, mask); \ 2560 offset = FN(offset, wrap, imm); \ 2561 } \ 2562 mve_advance_vpt(env); \ 2563 return offset; \ 2564 } 2565 2566 #define DO_VIDUP_ALL(OP, FN) \ 2567 DO_VIDUP(OP##b, 1, int8_t, FN) \ 2568 DO_VIDUP(OP##h, 2, int16_t, FN) \ 2569 DO_VIDUP(OP##w, 4, int32_t, FN) 2570 2571 #define DO_VIWDUP_ALL(OP, FN) \ 2572 DO_VIWDUP(OP##b, 1, int8_t, FN) \ 2573 DO_VIWDUP(OP##h, 2, int16_t, FN) \ 2574 DO_VIWDUP(OP##w, 4, int32_t, FN) 2575 2576 static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2577 { 2578 offset += imm; 2579 if (offset == wrap) { 2580 offset = 0; 2581 } 2582 return offset; 2583 } 2584 2585 static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2586 { 2587 if (offset == 0) { 2588 offset = wrap; 2589 } 2590 offset -= imm; 2591 return offset; 2592 } 2593 2594 DO_VIDUP_ALL(vidup, DO_ADD) 2595 DO_VIWDUP_ALL(viwdup, do_add_wrap) 2596 DO_VIWDUP_ALL(vdwdup, do_sub_wrap) 2597 2598 /* 2599 * Vector comparison. 2600 * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. 2601 * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. 2602 * P0 bits otherwise are updated with the results of the comparisons. 2603 * We must also keep unchanged the MASK fields at the top of v7m.vpr. 2604 */ 2605 #define DO_VCMP(OP, ESIZE, TYPE, FN) \ 2606 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 2607 { \ 2608 TYPE *n = vn, *m = vm; \ 2609 uint16_t mask = mve_element_mask(env); \ 2610 uint16_t eci_mask = mve_eci_mask(env); \ 2611 uint16_t beatpred = 0; \ 2612 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2613 unsigned e; \ 2614 for (e = 0; e < 16 / ESIZE; e++) { \ 2615 bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ 2616 /* Comparison sets 0/1 bits for each byte in the element */ \ 2617 beatpred |= r * emask; \ 2618 emask <<= ESIZE; \ 2619 } \ 2620 beatpred &= mask; \ 2621 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2622 (beatpred & eci_mask); \ 2623 mve_advance_vpt(env); \ 2624 } 2625 2626 #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ 2627 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 2628 uint32_t rm) \ 2629 { \ 2630 TYPE *n = vn; \ 2631 uint16_t mask = mve_element_mask(env); \ 2632 uint16_t eci_mask = mve_eci_mask(env); \ 2633 uint16_t beatpred = 0; \ 2634 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2635 unsigned e; \ 2636 for (e = 0; e < 16 / ESIZE; e++) { \ 2637 bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ 2638 /* Comparison sets 0/1 bits for each byte in the element */ \ 2639 beatpred |= r * emask; \ 2640 emask <<= ESIZE; \ 2641 } \ 2642 beatpred &= mask; \ 2643 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2644 (beatpred & eci_mask); \ 2645 mve_advance_vpt(env); \ 2646 } 2647 2648 #define DO_VCMP_S(OP, FN) \ 2649 DO_VCMP(OP##b, 1, int8_t, FN) \ 2650 DO_VCMP(OP##h, 2, int16_t, FN) \ 2651 DO_VCMP(OP##w, 4, int32_t, FN) \ 2652 DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ 2653 DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ 2654 DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) 2655 2656 #define DO_VCMP_U(OP, FN) \ 2657 DO_VCMP(OP##b, 1, uint8_t, FN) \ 2658 DO_VCMP(OP##h, 2, uint16_t, FN) \ 2659 DO_VCMP(OP##w, 4, uint32_t, FN) \ 2660 DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ 2661 DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ 2662 DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) 2663 2664 #define DO_EQ(N, M) ((N) == (M)) 2665 #define DO_NE(N, M) ((N) != (M)) 2666 #define DO_EQ(N, M) ((N) == (M)) 2667 #define DO_EQ(N, M) ((N) == (M)) 2668 #define DO_GE(N, M) ((N) >= (M)) 2669 #define DO_LT(N, M) ((N) < (M)) 2670 #define DO_GT(N, M) ((N) > (M)) 2671 #define DO_LE(N, M) ((N) <= (M)) 2672 2673 DO_VCMP_U(vcmpeq, DO_EQ) 2674 DO_VCMP_U(vcmpne, DO_NE) 2675 DO_VCMP_U(vcmpcs, DO_GE) 2676 DO_VCMP_U(vcmphi, DO_GT) 2677 DO_VCMP_S(vcmpge, DO_GE) 2678 DO_VCMP_S(vcmplt, DO_LT) 2679 DO_VCMP_S(vcmpgt, DO_GT) 2680 DO_VCMP_S(vcmple, DO_LE) 2681 2682 void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) 2683 { 2684 /* 2685 * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] 2686 * but note that whether bytes are written to Qd is still subject 2687 * to (all forms of) predication in the usual way. 2688 */ 2689 uint64_t *d = vd, *n = vn, *m = vm; 2690 uint16_t mask = mve_element_mask(env); 2691 uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 2692 unsigned e; 2693 for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { 2694 uint64_t r = m[H8(e)]; 2695 mergemask(&r, n[H8(e)], p0); 2696 mergemask(&d[H8(e)], r, mask); 2697 } 2698 mve_advance_vpt(env); 2699 } 2700 2701 void HELPER(mve_vpnot)(CPUARMState *env) 2702 { 2703 /* 2704 * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. 2705 * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. 2706 * P0 bits otherwise are inverted. 2707 * (This is the same logic as VCMP.) 2708 * This insn is itself subject to predication and to beat-wise execution, 2709 * and after it executes VPT state advances in the usual way. 2710 */ 2711 uint16_t mask = mve_element_mask(env); 2712 uint16_t eci_mask = mve_eci_mask(env); 2713 uint16_t beatpred = ~env->v7m.vpr & mask; 2714 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); 2715 mve_advance_vpt(env); 2716 } 2717 2718 /* 2719 * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, 2720 * otherwise set according to value of Rn. The calculation of 2721 * newmask here works in the same way as the calculation of the 2722 * ltpmask in mve_element_mask(), but we have pre-calculated 2723 * the masklen in the generated code. 2724 */ 2725 void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) 2726 { 2727 uint16_t mask = mve_element_mask(env); 2728 uint16_t eci_mask = mve_eci_mask(env); 2729 uint16_t newmask; 2730 2731 assert(masklen <= 16); 2732 newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 2733 newmask &= mask; 2734 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); 2735 mve_advance_vpt(env); 2736 } 2737 2738 #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ 2739 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2740 { \ 2741 TYPE *d = vd, *m = vm; \ 2742 uint16_t mask = mve_element_mask(env); \ 2743 unsigned e; \ 2744 bool qc = false; \ 2745 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2746 bool sat = false; \ 2747 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ 2748 qc |= sat & mask & 1; \ 2749 } \ 2750 if (qc) { \ 2751 env->vfp.qc[0] = qc; \ 2752 } \ 2753 mve_advance_vpt(env); \ 2754 } 2755 2756 #define DO_VQABS_B(N, SATP) \ 2757 do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) 2758 #define DO_VQABS_H(N, SATP) \ 2759 do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) 2760 #define DO_VQABS_W(N, SATP) \ 2761 do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) 2762 2763 #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) 2764 #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) 2765 #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) 2766 2767 DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) 2768 DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) 2769 DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) 2770 2771 DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) 2772 DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) 2773 DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) 2774 2775 /* 2776 * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its 2777 * absolute value; we then do an unsigned comparison. 2778 */ 2779 #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ 2780 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2781 { \ 2782 UTYPE *d = vd; \ 2783 STYPE *m = vm; \ 2784 uint16_t mask = mve_element_mask(env); \ 2785 unsigned e; \ 2786 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2787 UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ 2788 r = FN(d[H##ESIZE(e)], r); \ 2789 mergemask(&d[H##ESIZE(e)], r, mask); \ 2790 } \ 2791 mve_advance_vpt(env); \ 2792 } 2793 2794 DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) 2795 DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) 2796 DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) 2797 DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) 2798 DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) 2799 DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) 2800 2801 /* 2802 * 2-operand floating point. Note that if an element is partially 2803 * predicated we must do the FP operation to update the non-predicated 2804 * bytes, but we must be careful to avoid updating the FP exception 2805 * state unless byte 0 of the element was unpredicated. 2806 */ 2807 #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \ 2808 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2809 void *vd, void *vn, void *vm) \ 2810 { \ 2811 TYPE *d = vd, *n = vn, *m = vm; \ 2812 TYPE r; \ 2813 uint16_t mask = mve_element_mask(env); \ 2814 unsigned e; \ 2815 float_status *fpst; \ 2816 float_status scratch_fpst; \ 2817 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2818 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2819 continue; \ 2820 } \ 2821 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2822 &env->vfp.standard_fp_status; \ 2823 if (!(mask & 1)) { \ 2824 /* We need the result but without updating flags */ \ 2825 scratch_fpst = *fpst; \ 2826 fpst = &scratch_fpst; \ 2827 } \ 2828 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 2829 mergemask(&d[H##ESIZE(e)], r, mask); \ 2830 } \ 2831 mve_advance_vpt(env); \ 2832 } 2833 2834 #define DO_2OP_FP_ALL(OP, FN) \ 2835 DO_2OP_FP(OP##h, 2, float16, float16_##FN) \ 2836 DO_2OP_FP(OP##s, 4, float32, float32_##FN) 2837 2838 DO_2OP_FP_ALL(vfadd, add) 2839 DO_2OP_FP_ALL(vfsub, sub) 2840 DO_2OP_FP_ALL(vfmul, mul) 2841 2842 static inline float16 float16_abd(float16 a, float16 b, float_status *s) 2843 { 2844 return float16_abs(float16_sub(a, b, s)); 2845 } 2846 2847 static inline float32 float32_abd(float32 a, float32 b, float_status *s) 2848 { 2849 return float32_abs(float32_sub(a, b, s)); 2850 } 2851 2852 DO_2OP_FP_ALL(vfabd, abd) 2853 DO_2OP_FP_ALL(vmaxnm, maxnum) 2854 DO_2OP_FP_ALL(vminnm, minnum) 2855 2856 static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s) 2857 { 2858 return float16_maxnum(float16_abs(a), float16_abs(b), s); 2859 } 2860 2861 static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s) 2862 { 2863 return float32_maxnum(float32_abs(a), float32_abs(b), s); 2864 } 2865 2866 static inline float16 float16_minnuma(float16 a, float16 b, float_status *s) 2867 { 2868 return float16_minnum(float16_abs(a), float16_abs(b), s); 2869 } 2870 2871 static inline float32 float32_minnuma(float32 a, float32 b, float_status *s) 2872 { 2873 return float32_minnum(float32_abs(a), float32_abs(b), s); 2874 } 2875 2876 DO_2OP_FP_ALL(vmaxnma, maxnuma) 2877 DO_2OP_FP_ALL(vminnma, minnuma) 2878 2879 #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \ 2880 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2881 void *vd, void *vn, void *vm) \ 2882 { \ 2883 TYPE *d = vd, *n = vn, *m = vm; \ 2884 TYPE r[16 / ESIZE]; \ 2885 uint16_t tm, mask = mve_element_mask(env); \ 2886 unsigned e; \ 2887 float_status *fpst; \ 2888 float_status scratch_fpst; \ 2889 /* Calculate all results first to avoid overwriting inputs */ \ 2890 for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \ 2891 if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2892 r[e] = 0; \ 2893 continue; \ 2894 } \ 2895 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2896 &env->vfp.standard_fp_status; \ 2897 if (!(tm & 1)) { \ 2898 /* We need the result but without updating flags */ \ 2899 scratch_fpst = *fpst; \ 2900 fpst = &scratch_fpst; \ 2901 } \ 2902 if (!(e & 1)) { \ 2903 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \ 2904 } else { \ 2905 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \ 2906 } \ 2907 } \ 2908 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2909 mergemask(&d[H##ESIZE(e)], r[e], mask); \ 2910 } \ 2911 mve_advance_vpt(env); \ 2912 } 2913 2914 DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add) 2915 DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add) 2916 DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub) 2917 DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) 2918 2919 #define DO_VFMA(OP, ESIZE, TYPE, CHS) \ 2920 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2921 void *vd, void *vn, void *vm) \ 2922 { \ 2923 TYPE *d = vd, *n = vn, *m = vm; \ 2924 TYPE r; \ 2925 uint16_t mask = mve_element_mask(env); \ 2926 unsigned e; \ 2927 float_status *fpst; \ 2928 float_status scratch_fpst; \ 2929 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2930 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2931 continue; \ 2932 } \ 2933 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2934 &env->vfp.standard_fp_status; \ 2935 if (!(mask & 1)) { \ 2936 /* We need the result but without updating flags */ \ 2937 scratch_fpst = *fpst; \ 2938 fpst = &scratch_fpst; \ 2939 } \ 2940 r = n[H##ESIZE(e)]; \ 2941 if (CHS) { \ 2942 r = TYPE##_chs(r); \ 2943 } \ 2944 r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \ 2945 0, fpst); \ 2946 mergemask(&d[H##ESIZE(e)], r, mask); \ 2947 } \ 2948 mve_advance_vpt(env); \ 2949 } 2950 2951 DO_VFMA(vfmah, 2, float16, false) 2952 DO_VFMA(vfmas, 4, float32, false) 2953 DO_VFMA(vfmsh, 2, float16, true) 2954 DO_VFMA(vfmss, 4, float32, true) 2955 2956 #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ 2957 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2958 void *vd, void *vn, void *vm) \ 2959 { \ 2960 TYPE *d = vd, *n = vn, *m = vm; \ 2961 TYPE r0, r1, e1, e2, e3, e4; \ 2962 uint16_t mask = mve_element_mask(env); \ 2963 unsigned e; \ 2964 float_status *fpst0, *fpst1; \ 2965 float_status scratch_fpst; \ 2966 /* We loop through pairs of elements at a time */ \ 2967 for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ 2968 if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ 2969 continue; \ 2970 } \ 2971 fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2972 &env->vfp.standard_fp_status; \ 2973 fpst1 = fpst0; \ 2974 if (!(mask & 1)) { \ 2975 scratch_fpst = *fpst0; \ 2976 fpst0 = &scratch_fpst; \ 2977 } \ 2978 if (!(mask & (1 << ESIZE))) { \ 2979 scratch_fpst = *fpst1; \ 2980 fpst1 = &scratch_fpst; \ 2981 } \ 2982 switch (ROT) { \ 2983 case 0: \ 2984 e1 = m[H##ESIZE(e)]; \ 2985 e2 = n[H##ESIZE(e)]; \ 2986 e3 = m[H##ESIZE(e + 1)]; \ 2987 e4 = n[H##ESIZE(e)]; \ 2988 break; \ 2989 case 1: \ 2990 e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2991 e2 = n[H##ESIZE(e + 1)]; \ 2992 e3 = m[H##ESIZE(e)]; \ 2993 e4 = n[H##ESIZE(e + 1)]; \ 2994 break; \ 2995 case 2: \ 2996 e1 = TYPE##_chs(m[H##ESIZE(e)]); \ 2997 e2 = n[H##ESIZE(e)]; \ 2998 e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2999 e4 = n[H##ESIZE(e)]; \ 3000 break; \ 3001 case 3: \ 3002 e1 = m[H##ESIZE(e + 1)]; \ 3003 e2 = n[H##ESIZE(e + 1)]; \ 3004 e3 = TYPE##_chs(m[H##ESIZE(e)]); \ 3005 e4 = n[H##ESIZE(e + 1)]; \ 3006 break; \ 3007 default: \ 3008 g_assert_not_reached(); \ 3009 } \ 3010 r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ 3011 r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ 3012 mergemask(&d[H##ESIZE(e)], r0, mask); \ 3013 mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ 3014 } \ 3015 mve_advance_vpt(env); \ 3016 } 3017 3018 #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) 3019 #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) 3020 3021 #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) 3022 #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) 3023 3024 DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) 3025 DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) 3026 DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) 3027 DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) 3028 DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) 3029 DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) 3030 DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) 3031 DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) 3032 3033 DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) 3034 DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) 3035 DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) 3036 DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) 3037 DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) 3038 DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) 3039 DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) 3040 DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) 3041 3042 #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3043 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3044 void *vd, void *vn, uint32_t rm) \ 3045 { \ 3046 TYPE *d = vd, *n = vn; \ 3047 TYPE r, m = rm; \ 3048 uint16_t mask = mve_element_mask(env); \ 3049 unsigned e; \ 3050 float_status *fpst; \ 3051 float_status scratch_fpst; \ 3052 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3053 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3054 continue; \ 3055 } \ 3056 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3057 &env->vfp.standard_fp_status; \ 3058 if (!(mask & 1)) { \ 3059 /* We need the result but without updating flags */ \ 3060 scratch_fpst = *fpst; \ 3061 fpst = &scratch_fpst; \ 3062 } \ 3063 r = FN(n[H##ESIZE(e)], m, fpst); \ 3064 mergemask(&d[H##ESIZE(e)], r, mask); \ 3065 } \ 3066 mve_advance_vpt(env); \ 3067 } 3068 3069 #define DO_2OP_FP_SCALAR_ALL(OP, FN) \ 3070 DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \ 3071 DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN) 3072 3073 DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add) 3074 DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub) 3075 DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) 3076 3077 #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 3078 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3079 void *vd, void *vn, uint32_t rm) \ 3080 { \ 3081 TYPE *d = vd, *n = vn; \ 3082 TYPE r, m = rm; \ 3083 uint16_t mask = mve_element_mask(env); \ 3084 unsigned e; \ 3085 float_status *fpst; \ 3086 float_status scratch_fpst; \ 3087 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3088 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3089 continue; \ 3090 } \ 3091 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3092 &env->vfp.standard_fp_status; \ 3093 if (!(mask & 1)) { \ 3094 /* We need the result but without updating flags */ \ 3095 scratch_fpst = *fpst; \ 3096 fpst = &scratch_fpst; \ 3097 } \ 3098 r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \ 3099 mergemask(&d[H##ESIZE(e)], r, mask); \ 3100 } \ 3101 mve_advance_vpt(env); \ 3102 } 3103 3104 /* VFMAS is vector * vector + scalar, so swap op2 and op3 */ 3105 #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S) 3106 #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S) 3107 3108 /* VFMA is vector * scalar + vector */ 3109 DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd) 3110 DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd) 3111 DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH) 3112 DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) 3113 3114 /* Floating point max/min across vector. */ 3115 #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \ 3116 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 3117 uint32_t ra_in) \ 3118 { \ 3119 uint16_t mask = mve_element_mask(env); \ 3120 unsigned e; \ 3121 TYPE *m = vm; \ 3122 TYPE ra = (TYPE)ra_in; \ 3123 float_status *fpst = (ESIZE == 2) ? \ 3124 &env->vfp.standard_fp_status_f16 : \ 3125 &env->vfp.standard_fp_status; \ 3126 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3127 if (mask & 1) { \ 3128 TYPE v = m[H##ESIZE(e)]; \ 3129 if (TYPE##_is_signaling_nan(ra, fpst)) { \ 3130 ra = TYPE##_silence_nan(ra, fpst); \ 3131 float_raise(float_flag_invalid, fpst); \ 3132 } \ 3133 if (TYPE##_is_signaling_nan(v, fpst)) { \ 3134 v = TYPE##_silence_nan(v, fpst); \ 3135 float_raise(float_flag_invalid, fpst); \ 3136 } \ 3137 if (ABS) { \ 3138 v = TYPE##_abs(v); \ 3139 } \ 3140 ra = FN(ra, v, fpst); \ 3141 } \ 3142 } \ 3143 mve_advance_vpt(env); \ 3144 return ra; \ 3145 } \ 3146 3147 #define NOP(X) (X) 3148 3149 DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum) 3150 DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum) 3151 DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum) 3152 DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum) 3153 DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum) 3154 DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum) 3155 DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum) 3156 DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) 3157 3158 /* FP compares; note that all comparisons signal InvalidOp for QNaNs */ 3159 #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \ 3160 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 3161 { \ 3162 TYPE *n = vn, *m = vm; \ 3163 uint16_t mask = mve_element_mask(env); \ 3164 uint16_t eci_mask = mve_eci_mask(env); \ 3165 uint16_t beatpred = 0; \ 3166 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3167 unsigned e; \ 3168 float_status *fpst; \ 3169 float_status scratch_fpst; \ 3170 bool r; \ 3171 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3172 if ((mask & emask) == 0) { \ 3173 continue; \ 3174 } \ 3175 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3176 &env->vfp.standard_fp_status; \ 3177 if (!(mask & (1 << (e * ESIZE)))) { \ 3178 /* We need the result but without updating flags */ \ 3179 scratch_fpst = *fpst; \ 3180 fpst = &scratch_fpst; \ 3181 } \ 3182 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 3183 /* Comparison sets 0/1 bits for each byte in the element */ \ 3184 beatpred |= r * emask; \ 3185 } \ 3186 beatpred &= mask; \ 3187 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3188 (beatpred & eci_mask); \ 3189 mve_advance_vpt(env); \ 3190 } 3191 3192 #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3193 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 3194 uint32_t rm) \ 3195 { \ 3196 TYPE *n = vn; \ 3197 uint16_t mask = mve_element_mask(env); \ 3198 uint16_t eci_mask = mve_eci_mask(env); \ 3199 uint16_t beatpred = 0; \ 3200 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3201 unsigned e; \ 3202 float_status *fpst; \ 3203 float_status scratch_fpst; \ 3204 bool r; \ 3205 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3206 if ((mask & emask) == 0) { \ 3207 continue; \ 3208 } \ 3209 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3210 &env->vfp.standard_fp_status; \ 3211 if (!(mask & (1 << (e * ESIZE)))) { \ 3212 /* We need the result but without updating flags */ \ 3213 scratch_fpst = *fpst; \ 3214 fpst = &scratch_fpst; \ 3215 } \ 3216 r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \ 3217 /* Comparison sets 0/1 bits for each byte in the element */ \ 3218 beatpred |= r * emask; \ 3219 } \ 3220 beatpred &= mask; \ 3221 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3222 (beatpred & eci_mask); \ 3223 mve_advance_vpt(env); \ 3224 } 3225 3226 #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \ 3227 DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \ 3228 DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN) 3229 3230 /* 3231 * Some care is needed here to get the correct result for the unordered case. 3232 * Architecturally EQ, GE and GT are defined to be false for unordered, but 3233 * the NE, LT and LE comparisons are defined as simple logical inverses of 3234 * EQ, GE and GT and so they must return true for unordered. The softfloat 3235 * comparison functions float*_{eq,le,lt} all return false for unordered. 3236 */ 3237 #define DO_GE16(X, Y, S) float16_le(Y, X, S) 3238 #define DO_GE32(X, Y, S) float32_le(Y, X, S) 3239 #define DO_GT16(X, Y, S) float16_lt(Y, X, S) 3240 #define DO_GT32(X, Y, S) float32_lt(Y, X, S) 3241 3242 DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq) 3243 DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq) 3244 3245 DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq) 3246 DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq) 3247 3248 DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16) 3249 DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32) 3250 3251 DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16) 3252 DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32) 3253 3254 DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16) 3255 DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32) 3256 3257 DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16) 3258 DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) 3259 3260 #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \ 3261 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \ 3262 uint32_t shift) \ 3263 { \ 3264 TYPE *d = vd, *m = vm; \ 3265 TYPE r; \ 3266 uint16_t mask = mve_element_mask(env); \ 3267 unsigned e; \ 3268 float_status *fpst; \ 3269 float_status scratch_fpst; \ 3270 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3271 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3272 continue; \ 3273 } \ 3274 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3275 &env->vfp.standard_fp_status; \ 3276 if (!(mask & 1)) { \ 3277 /* We need the result but without updating flags */ \ 3278 scratch_fpst = *fpst; \ 3279 fpst = &scratch_fpst; \ 3280 } \ 3281 r = FN(m[H##ESIZE(e)], shift, fpst); \ 3282 mergemask(&d[H##ESIZE(e)], r, mask); \ 3283 } \ 3284 mve_advance_vpt(env); \ 3285 } 3286 3287 DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh) 3288 DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh) 3289 DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero) 3290 DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero) 3291 DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos) 3292 DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos) 3293 DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero) 3294 DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) 3295 3296 /* VCVT with specified rmode */ 3297 #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \ 3298 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3299 void *vd, void *vm, uint32_t rmode) \ 3300 { \ 3301 TYPE *d = vd, *m = vm; \ 3302 TYPE r; \ 3303 uint16_t mask = mve_element_mask(env); \ 3304 unsigned e; \ 3305 float_status *fpst; \ 3306 float_status scratch_fpst; \ 3307 float_status *base_fpst = (ESIZE == 2) ? \ 3308 &env->vfp.standard_fp_status_f16 : \ 3309 &env->vfp.standard_fp_status; \ 3310 uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ 3311 set_float_rounding_mode(rmode, base_fpst); \ 3312 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3313 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3314 continue; \ 3315 } \ 3316 fpst = base_fpst; \ 3317 if (!(mask & 1)) { \ 3318 /* We need the result but without updating flags */ \ 3319 scratch_fpst = *fpst; \ 3320 fpst = &scratch_fpst; \ 3321 } \ 3322 r = FN(m[H##ESIZE(e)], 0, fpst); \ 3323 mergemask(&d[H##ESIZE(e)], r, mask); \ 3324 } \ 3325 set_float_rounding_mode(prev_rmode, base_fpst); \ 3326 mve_advance_vpt(env); \ 3327 } 3328 3329 DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh) 3330 DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh) 3331 DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls) 3332 DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls) 3333 3334 #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S) 3335 #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S) 3336 3337 DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H) 3338 DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S) 3339 3340 /* 3341 * VCVT between halfprec and singleprec. As usual for halfprec 3342 * conversions, FZ16 is ignored and AHP is observed. 3343 */ 3344 static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) 3345 { 3346 uint16_t *d = vd; 3347 uint32_t *m = vm; 3348 uint16_t r; 3349 uint16_t mask = mve_element_mask(env); 3350 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 3351 unsigned e; 3352 float_status *fpst; 3353 float_status scratch_fpst; 3354 float_status *base_fpst = &env->vfp.standard_fp_status; 3355 bool old_fz = get_flush_to_zero(base_fpst); 3356 set_flush_to_zero(false, base_fpst); 3357 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 3358 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 3359 continue; 3360 } 3361 fpst = base_fpst; 3362 if (!(mask & 1)) { 3363 /* We need the result but without updating flags */ 3364 scratch_fpst = *fpst; 3365 fpst = &scratch_fpst; 3366 } 3367 r = float32_to_float16(m[H4(e)], ieee, fpst); 3368 mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2)); 3369 } 3370 set_flush_to_zero(old_fz, base_fpst); 3371 mve_advance_vpt(env); 3372 } 3373 3374 static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) 3375 { 3376 uint32_t *d = vd; 3377 uint16_t *m = vm; 3378 uint32_t r; 3379 uint16_t mask = mve_element_mask(env); 3380 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 3381 unsigned e; 3382 float_status *fpst; 3383 float_status scratch_fpst; 3384 float_status *base_fpst = &env->vfp.standard_fp_status; 3385 bool old_fiz = get_flush_inputs_to_zero(base_fpst); 3386 set_flush_inputs_to_zero(false, base_fpst); 3387 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 3388 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 3389 continue; 3390 } 3391 fpst = base_fpst; 3392 if (!(mask & (1 << (top * 2)))) { 3393 /* We need the result but without updating flags */ 3394 scratch_fpst = *fpst; 3395 fpst = &scratch_fpst; 3396 } 3397 r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst); 3398 mergemask(&d[H4(e)], r, mask); 3399 } 3400 set_flush_inputs_to_zero(old_fiz, base_fpst); 3401 mve_advance_vpt(env); 3402 } 3403 3404 void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm) 3405 { 3406 do_vcvt_sh(env, vd, vm, 0); 3407 } 3408 void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm) 3409 { 3410 do_vcvt_sh(env, vd, vm, 1); 3411 } 3412 void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm) 3413 { 3414 do_vcvt_hs(env, vd, vm, 0); 3415 } 3416 void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) 3417 { 3418 do_vcvt_hs(env, vd, vm, 1); 3419 } 3420 3421 #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \ 3422 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \ 3423 { \ 3424 TYPE *d = vd, *m = vm; \ 3425 TYPE r; \ 3426 uint16_t mask = mve_element_mask(env); \ 3427 unsigned e; \ 3428 float_status *fpst; \ 3429 float_status scratch_fpst; \ 3430 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3431 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3432 continue; \ 3433 } \ 3434 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3435 &env->vfp.standard_fp_status; \ 3436 if (!(mask & 1)) { \ 3437 /* We need the result but without updating flags */ \ 3438 scratch_fpst = *fpst; \ 3439 fpst = &scratch_fpst; \ 3440 } \ 3441 r = FN(m[H##ESIZE(e)], fpst); \ 3442 mergemask(&d[H##ESIZE(e)], r, mask); \ 3443 } \ 3444 mve_advance_vpt(env); \ 3445 } 3446 3447 DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int) 3448 DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int) 3449