1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// AArch64 Instruction definitions. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// ARM Instruction Predicate Definitions. 15// 16def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">, 17 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">; 18def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">, 19 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">; 20def HasV8_3a : Predicate<"Subtarget->hasV8_3aOps()">, 21 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">; 22def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">, 23 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">; 24def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">, 25 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">; 26def HasV8_6a : Predicate<"Subtarget->hasV8_6aOps()">, 27 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">; 28def HasV8_7a : Predicate<"Subtarget->hasV8_7aOps()">, 29 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">; 30def HasV9_0a : Predicate<"Subtarget->hasV9_0aOps()">, 31 AssemblerPredicate<(all_of HasV9_0aOps), "armv9-a">; 32def HasV9_1a : Predicate<"Subtarget->hasV9_1aOps()">, 33 AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">; 34def HasV9_2a : Predicate<"Subtarget->hasV9_2aOps()">, 35 AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">; 36def HasV9_3a : Predicate<"Subtarget->hasV9_3aOps()">, 37 AssemblerPredicate<(all_of HasV9_3aOps), "armv9.3a">; 38def HasV8_0r : Predicate<"Subtarget->hasV8_0rOps()">, 39 AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">; 40 41def HasEL2VMSA : Predicate<"Subtarget->hasEL2VMSA()">, 42 AssemblerPredicate<(all_of FeatureEL2VMSA), "el2vmsa">; 43 44def HasEL3 : Predicate<"Subtarget->hasEL3()">, 45 AssemblerPredicate<(all_of FeatureEL3), "el3">; 46 47def HasVH : Predicate<"Subtarget->hasVH()">, 48 AssemblerPredicate<(all_of FeatureVH), "vh">; 49 50def HasLOR : Predicate<"Subtarget->hasLOR()">, 51 AssemblerPredicate<(all_of FeatureLOR), "lor">; 52 53def HasPAuth : Predicate<"Subtarget->hasPAuth()">, 54 AssemblerPredicate<(all_of FeaturePAuth), "pauth">; 55 56def HasJS : Predicate<"Subtarget->hasJS()">, 57 AssemblerPredicate<(all_of FeatureJS), "jsconv">; 58 59def HasCCIDX : Predicate<"Subtarget->hasCCIDX()">, 60 AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">; 61 62def HasComplxNum : Predicate<"Subtarget->hasComplxNum()">, 63 AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">; 64 65def HasNV : Predicate<"Subtarget->hasNV()">, 66 AssemblerPredicate<(all_of FeatureNV), "nv">; 67 68def HasMPAM : Predicate<"Subtarget->hasMPAM()">, 69 AssemblerPredicate<(all_of FeatureMPAM), "mpam">; 70 71def HasDIT : Predicate<"Subtarget->hasDIT()">, 72 AssemblerPredicate<(all_of FeatureDIT), "dit">; 73 74def HasTRACEV8_4 : Predicate<"Subtarget->hasTRACEV8_4()">, 75 AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">; 76 77def HasAM : Predicate<"Subtarget->hasAM()">, 78 AssemblerPredicate<(all_of FeatureAM), "am">; 79 80def HasSEL2 : Predicate<"Subtarget->hasSEL2()">, 81 AssemblerPredicate<(all_of FeatureSEL2), "sel2">; 82 83def HasTLB_RMI : Predicate<"Subtarget->hasTLB_RMI()">, 84 AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">; 85 86def HasFlagM : Predicate<"Subtarget->hasFlagM()">, 87 AssemblerPredicate<(all_of FeatureFlagM), "flagm">; 88 89def HasRCPC_IMMO : Predicate<"Subtarget->hasRCPCImm()">, 90 AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">; 91 92def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">, 93 AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">; 94def HasNEON : Predicate<"Subtarget->hasNEON()">, 95 AssemblerPredicate<(all_of FeatureNEON), "neon">; 96def HasCrypto : Predicate<"Subtarget->hasCrypto()">, 97 AssemblerPredicate<(all_of FeatureCrypto), "crypto">; 98def HasSM4 : Predicate<"Subtarget->hasSM4()">, 99 AssemblerPredicate<(all_of FeatureSM4), "sm4">; 100def HasSHA3 : Predicate<"Subtarget->hasSHA3()">, 101 AssemblerPredicate<(all_of FeatureSHA3), "sha3">; 102def HasSHA2 : Predicate<"Subtarget->hasSHA2()">, 103 AssemblerPredicate<(all_of FeatureSHA2), "sha2">; 104def HasAES : Predicate<"Subtarget->hasAES()">, 105 AssemblerPredicate<(all_of FeatureAES), "aes">; 106def HasDotProd : Predicate<"Subtarget->hasDotProd()">, 107 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">; 108def HasCRC : Predicate<"Subtarget->hasCRC()">, 109 AssemblerPredicate<(all_of FeatureCRC), "crc">; 110def HasLSE : Predicate<"Subtarget->hasLSE()">, 111 AssemblerPredicate<(all_of FeatureLSE), "lse">; 112def HasNoLSE : Predicate<"!Subtarget->hasLSE()">; 113def HasRAS : Predicate<"Subtarget->hasRAS()">, 114 AssemblerPredicate<(all_of FeatureRAS), "ras">; 115def HasRDM : Predicate<"Subtarget->hasRDM()">, 116 AssemblerPredicate<(all_of FeatureRDM), "rdm">; 117def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">; 118def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">, 119 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">; 120def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">, 121 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">; 122def HasSPE : Predicate<"Subtarget->hasSPE()">, 123 AssemblerPredicate<(all_of FeatureSPE), "spe">; 124def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">, 125 AssemblerPredicate<(all_of FeatureFuseAES), 126 "fuse-aes">; 127def HasSVE : Predicate<"Subtarget->hasSVE()">, 128 AssemblerPredicate<(all_of FeatureSVE), "sve">; 129def HasSVE2 : Predicate<"Subtarget->hasSVE2()">, 130 AssemblerPredicate<(all_of FeatureSVE2), "sve2">; 131def HasSVE2AES : Predicate<"Subtarget->hasSVE2AES()">, 132 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">; 133def HasSVE2SM4 : Predicate<"Subtarget->hasSVE2SM4()">, 134 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">; 135def HasSVE2SHA3 : Predicate<"Subtarget->hasSVE2SHA3()">, 136 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">; 137def HasSVE2BitPerm : Predicate<"Subtarget->hasSVE2BitPerm()">, 138 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">; 139def HasSME : Predicate<"Subtarget->hasSME()">, 140 AssemblerPredicate<(all_of FeatureSME), "sme">; 141def HasSMEF64 : Predicate<"Subtarget->hasSMEF64()">, 142 AssemblerPredicate<(all_of FeatureSMEF64), "sme-f64">; 143def HasSMEI64 : Predicate<"Subtarget->hasSMEI64()">, 144 AssemblerPredicate<(all_of FeatureSMEI64), "sme-i64">; 145def HasStreamingSVE : Predicate<"Subtarget->hasStreamingSVE()">, 146 AssemblerPredicate<(all_of FeatureStreamingSVE), "streaming-sve">; 147// A subset of SVE(2) instructions are legal in Streaming SVE execution mode, 148// they should be enabled if either has been specified. 149def HasSVEorStreamingSVE 150 : Predicate<"Subtarget->hasSVE() || Subtarget->hasStreamingSVE()">, 151 AssemblerPredicate<(any_of FeatureSVE, FeatureStreamingSVE), 152 "streaming-sve or sve">; 153def HasSVE2orStreamingSVE 154 : Predicate<"Subtarget->hasSVE2() || Subtarget->hasStreamingSVE()">, 155 AssemblerPredicate<(any_of FeatureSVE2, FeatureStreamingSVE), 156 "streaming-sve or sve2">; 157// A subset of NEON instructions are legal in Streaming SVE execution mode, 158// they should be enabled if either has been specified. 159def HasNEONorStreamingSVE 160 : Predicate<"Subtarget->hasNEON() || Subtarget->hasStreamingSVE()">, 161 AssemblerPredicate<(any_of FeatureNEON, FeatureStreamingSVE), 162 "streaming-sve or neon">; 163def HasRCPC : Predicate<"Subtarget->hasRCPC()">, 164 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">; 165def HasAltNZCV : Predicate<"Subtarget->hasAlternativeNZCV()">, 166 AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">; 167def HasFRInt3264 : Predicate<"Subtarget->hasFRInt3264()">, 168 AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">; 169def HasSB : Predicate<"Subtarget->hasSB()">, 170 AssemblerPredicate<(all_of FeatureSB), "sb">; 171def HasPredRes : Predicate<"Subtarget->hasPredRes()">, 172 AssemblerPredicate<(all_of FeaturePredRes), "predres">; 173def HasCCDP : Predicate<"Subtarget->hasCCDP()">, 174 AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">; 175def HasBTI : Predicate<"Subtarget->hasBTI()">, 176 AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">; 177def HasMTE : Predicate<"Subtarget->hasMTE()">, 178 AssemblerPredicate<(all_of FeatureMTE), "mte">; 179def HasTME : Predicate<"Subtarget->hasTME()">, 180 AssemblerPredicate<(all_of FeatureTME), "tme">; 181def HasETE : Predicate<"Subtarget->hasETE()">, 182 AssemblerPredicate<(all_of FeatureETE), "ete">; 183def HasTRBE : Predicate<"Subtarget->hasTRBE()">, 184 AssemblerPredicate<(all_of FeatureTRBE), "trbe">; 185def HasBF16 : Predicate<"Subtarget->hasBF16()">, 186 AssemblerPredicate<(all_of FeatureBF16), "bf16">; 187def HasMatMulInt8 : Predicate<"Subtarget->hasMatMulInt8()">, 188 AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">; 189def HasMatMulFP32 : Predicate<"Subtarget->hasMatMulFP32()">, 190 AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">; 191def HasMatMulFP64 : Predicate<"Subtarget->hasMatMulFP64()">, 192 AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">; 193def HasXS : Predicate<"Subtarget->hasXS()">, 194 AssemblerPredicate<(all_of FeatureXS), "xs">; 195def HasWFxT : Predicate<"Subtarget->hasWFxT()">, 196 AssemblerPredicate<(all_of FeatureWFxT), "wfxt">; 197def HasLS64 : Predicate<"Subtarget->hasLS64()">, 198 AssemblerPredicate<(all_of FeatureLS64), "ls64">; 199def HasBRBE : Predicate<"Subtarget->hasBRBE()">, 200 AssemblerPredicate<(all_of FeatureBRBE), "brbe">; 201def HasSPE_EEF : Predicate<"Subtarget->hasSPE_EEF()">, 202 AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">; 203def HasHBC : Predicate<"Subtarget->hasHBC()">, 204 AssemblerPredicate<(all_of FeatureHBC), "hbc">; 205def HasMOPS : Predicate<"Subtarget->hasMOPS()">, 206 AssemblerPredicate<(all_of FeatureMOPS), "mops">; 207def IsLE : Predicate<"Subtarget->isLittleEndian()">; 208def IsBE : Predicate<"!Subtarget->isLittleEndian()">; 209def IsWindows : Predicate<"Subtarget->isTargetWindows()">; 210def UseExperimentalZeroingPseudos 211 : Predicate<"Subtarget->useExperimentalZeroingPseudos()">; 212def UseAlternateSExtLoadCVTF32 213 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">; 214 215def UseNegativeImmediates 216 : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)), 217 "NegativeImmediates">; 218 219def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">; 220 221def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER", 222 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, 223 SDTCisInt<1>]>>; 224 225 226//===----------------------------------------------------------------------===// 227// AArch64-specific DAG Nodes. 228// 229 230// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS 231def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2, 232 [SDTCisSameAs<0, 2>, 233 SDTCisSameAs<0, 3>, 234 SDTCisInt<0>, SDTCisVT<1, i32>]>; 235 236// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS 237def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3, 238 [SDTCisSameAs<0, 1>, 239 SDTCisSameAs<0, 2>, 240 SDTCisInt<0>, 241 SDTCisVT<3, i32>]>; 242 243// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS 244def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, 245 [SDTCisSameAs<0, 2>, 246 SDTCisSameAs<0, 3>, 247 SDTCisInt<0>, 248 SDTCisVT<1, i32>, 249 SDTCisVT<4, i32>]>; 250 251def SDT_AArch64Brcond : SDTypeProfile<0, 3, 252 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>, 253 SDTCisVT<2, i32>]>; 254def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>; 255def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, 256 SDTCisVT<2, OtherVT>]>; 257 258 259def SDT_AArch64CSel : SDTypeProfile<1, 4, 260 [SDTCisSameAs<0, 1>, 261 SDTCisSameAs<0, 2>, 262 SDTCisInt<3>, 263 SDTCisVT<4, i32>]>; 264def SDT_AArch64CCMP : SDTypeProfile<1, 5, 265 [SDTCisVT<0, i32>, 266 SDTCisInt<1>, 267 SDTCisSameAs<1, 2>, 268 SDTCisInt<3>, 269 SDTCisInt<4>, 270 SDTCisVT<5, i32>]>; 271def SDT_AArch64FCCMP : SDTypeProfile<1, 5, 272 [SDTCisVT<0, i32>, 273 SDTCisFP<1>, 274 SDTCisSameAs<1, 2>, 275 SDTCisInt<3>, 276 SDTCisInt<4>, 277 SDTCisVT<5, i32>]>; 278def SDT_AArch64FCmp : SDTypeProfile<0, 2, 279 [SDTCisFP<0>, 280 SDTCisSameAs<0, 1>]>; 281def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>; 282def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>; 283def SDT_AArch64Insr : SDTypeProfile<1, 2, [SDTCisVec<0>]>; 284def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>, 285 SDTCisSameAs<0, 1>, 286 SDTCisSameAs<0, 2>]>; 287def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>; 288def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>; 289def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 290 SDTCisInt<2>, SDTCisInt<3>]>; 291def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; 292def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 293 SDTCisSameAs<0,2>, SDTCisInt<3>]>; 294def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>; 295def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 296 SDTCisVec<2>, SDTCisSameAs<2,3>]>; 297 298def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>, 299 SDTCisSameAs<0,1>, 300 SDTCisSameAs<0,2>]>; 301 302def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; 303def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>; 304def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>; 305def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 306 SDTCisSameAs<0,2>]>; 307def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 308 SDTCisSameAs<0,2>, 309 SDTCisSameAs<0,3>]>; 310def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>; 311def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>; 312 313def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>; 314 315def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>, 316 SDTCisPtrTy<1>]>; 317 318def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>; 319 320def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>; 321def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>; 322def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>; 323 324// Generates the general dynamic sequences, i.e. 325// adrp x0, :tlsdesc:var 326// ldr x1, [x0, #:tlsdesc_lo12:var] 327// add x0, x0, #:tlsdesc_lo12:var 328// .tlsdesccall var 329// blr x1 330 331// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here) 332// number of operands (the variable) 333def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1, 334 [SDTCisPtrTy<0>]>; 335 336def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4, 337 [SDTCisVT<0, i64>, SDTCisVT<1, i32>, 338 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, 339 SDTCisSameAs<1, 4>]>; 340 341def SDT_AArch64TBL : SDTypeProfile<1, 2, [ 342 SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2> 343]>; 344 345// non-extending masked load fragment. 346def nonext_masked_load : 347 PatFrag<(ops node:$ptr, node:$pred, node:$def), 348 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{ 349 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD && 350 cast<MaskedLoadSDNode>(N)->isUnindexed() && 351 !cast<MaskedLoadSDNode>(N)->isNonTemporal(); 352}]>; 353// sign extending masked load fragments. 354def asext_masked_load : 355 PatFrag<(ops node:$ptr, node:$pred, node:$def), 356 (masked_ld node:$ptr, undef, node:$pred, node:$def),[{ 357 return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD || 358 cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) && 359 cast<MaskedLoadSDNode>(N)->isUnindexed(); 360}]>; 361def asext_masked_load_i8 : 362 PatFrag<(ops node:$ptr, node:$pred, node:$def), 363 (asext_masked_load node:$ptr, node:$pred, node:$def), [{ 364 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 365}]>; 366def asext_masked_load_i16 : 367 PatFrag<(ops node:$ptr, node:$pred, node:$def), 368 (asext_masked_load node:$ptr, node:$pred, node:$def), [{ 369 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; 370}]>; 371def asext_masked_load_i32 : 372 PatFrag<(ops node:$ptr, node:$pred, node:$def), 373 (asext_masked_load node:$ptr, node:$pred, node:$def), [{ 374 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; 375}]>; 376// zero extending masked load fragments. 377def zext_masked_load : 378 PatFrag<(ops node:$ptr, node:$pred, node:$def), 379 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{ 380 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD && 381 cast<MaskedLoadSDNode>(N)->isUnindexed(); 382}]>; 383def zext_masked_load_i8 : 384 PatFrag<(ops node:$ptr, node:$pred, node:$def), 385 (zext_masked_load node:$ptr, node:$pred, node:$def), [{ 386 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 387}]>; 388def zext_masked_load_i16 : 389 PatFrag<(ops node:$ptr, node:$pred, node:$def), 390 (zext_masked_load node:$ptr, node:$pred, node:$def), [{ 391 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; 392}]>; 393def zext_masked_load_i32 : 394 PatFrag<(ops node:$ptr, node:$pred, node:$def), 395 (zext_masked_load node:$ptr, node:$pred, node:$def), [{ 396 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; 397}]>; 398 399def non_temporal_load : 400 PatFrag<(ops node:$ptr, node:$pred, node:$def), 401 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{ 402 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD && 403 cast<MaskedLoadSDNode>(N)->isUnindexed() && 404 cast<MaskedLoadSDNode>(N)->isNonTemporal(); 405}]>; 406 407// non-truncating masked store fragment. 408def nontrunc_masked_store : 409 PatFrag<(ops node:$val, node:$ptr, node:$pred), 410 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 411 return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() && 412 cast<MaskedStoreSDNode>(N)->isUnindexed() && 413 !cast<MaskedStoreSDNode>(N)->isNonTemporal(); 414}]>; 415// truncating masked store fragments. 416def trunc_masked_store : 417 PatFrag<(ops node:$val, node:$ptr, node:$pred), 418 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 419 return cast<MaskedStoreSDNode>(N)->isTruncatingStore() && 420 cast<MaskedStoreSDNode>(N)->isUnindexed(); 421}]>; 422def trunc_masked_store_i8 : 423 PatFrag<(ops node:$val, node:$ptr, node:$pred), 424 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ 425 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; 426}]>; 427def trunc_masked_store_i16 : 428 PatFrag<(ops node:$val, node:$ptr, node:$pred), 429 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ 430 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; 431}]>; 432def trunc_masked_store_i32 : 433 PatFrag<(ops node:$val, node:$ptr, node:$pred), 434 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ 435 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; 436}]>; 437 438def non_temporal_store : 439 PatFrag<(ops node:$val, node:$ptr, node:$pred), 440 (masked_st node:$val, node:$ptr, undef, node:$pred), [{ 441 return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() && 442 cast<MaskedStoreSDNode>(N)->isUnindexed() && 443 cast<MaskedStoreSDNode>(N)->isNonTemporal(); 444}]>; 445 446// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise 447def top16Zero: PatLeaf<(i32 GPR32:$src), [{ 448 return SDValue(N,0)->getValueType(0) == MVT::i32 && 449 CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16)); 450 }]>; 451 452// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise 453def top32Zero: PatLeaf<(i64 GPR64:$src), [{ 454 return SDValue(N,0)->getValueType(0) == MVT::i64 && 455 CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32)); 456 }]>; 457 458// Node definitions. 459def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>; 460def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>; 461def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>; 462def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>; 463def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START", 464 SDCallSeqStart<[ SDTCisVT<0, i32>, 465 SDTCisVT<1, i32> ]>, 466 [SDNPHasChain, SDNPOutGlue]>; 467def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END", 468 SDCallSeqEnd<[ SDTCisVT<0, i32>, 469 SDTCisVT<1, i32> ]>, 470 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 471def AArch64call : SDNode<"AArch64ISD::CALL", 472 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, 473 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 474 SDNPVariadic]>; 475 476def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER", 477 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, 478 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 479 SDNPVariadic]>; 480 481def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond, 482 [SDNPHasChain]>; 483def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz, 484 [SDNPHasChain]>; 485def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz, 486 [SDNPHasChain]>; 487def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz, 488 [SDNPHasChain]>; 489def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz, 490 [SDNPHasChain]>; 491 492 493def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>; 494def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>; 495def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>; 496def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>; 497def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone, 498 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 499def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >; 500def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>; 501def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut, 502 [SDNPCommutative]>; 503def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>; 504def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut, 505 [SDNPCommutative]>; 506def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>; 507def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>; 508 509def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>; 510def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>; 511def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>; 512 513def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>; 514 515def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>; 516def AArch64strict_fcmp : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp, 517 [SDNPHasChain]>; 518def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp, 519 [SDNPHasChain]>; 520def AArch64any_fcmp : PatFrags<(ops node:$lhs, node:$rhs), 521 [(AArch64strict_fcmp node:$lhs, node:$rhs), 522 (AArch64fcmp node:$lhs, node:$rhs)]>; 523 524def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>; 525def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>; 526def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>; 527def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>; 528def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>; 529 530def AArch64insr : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>; 531 532def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>; 533def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>; 534def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>; 535def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>; 536def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>; 537def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>; 538 539def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>; 540def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>; 541def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>; 542def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>; 543def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>; 544def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>; 545def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>; 546 547def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>; 548def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>; 549def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>; 550def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>; 551 552def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>; 553def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>; 554def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>; 555def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>; 556def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>; 557def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>; 558def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>; 559def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>; 560def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>; 561def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>; 562 563def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>; 564def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>; 565 566def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>; 567def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>; 568def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>; 569def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>; 570def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>; 571 572def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>; 573def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>; 574def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>; 575 576def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>; 577def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>; 578def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>; 579def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>; 580def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>; 581def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS), 582 (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>; 583 584def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>; 585def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>; 586def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>; 587def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>; 588def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>; 589 590def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>; 591def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>; 592 593def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET, 594 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 595 596def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH, 597 [SDNPHasChain, SDNPSideEffect]>; 598 599def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>; 600def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>; 601 602def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ", 603 SDT_AArch64TLSDescCallSeq, 604 [SDNPInGlue, SDNPOutGlue, SDNPHasChain, 605 SDNPVariadic]>; 606 607 608def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge", 609 SDT_AArch64WrapperLarge>; 610 611def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>; 612 613def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, 614 SDTCisSameAs<1, 2>]>; 615def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>; 616def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>; 617 618def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>; 619def AArch64frecps : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>; 620def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>; 621def AArch64frsqrts : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>; 622 623def AArch64sdot : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>; 624def AArch64udot : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>; 625 626def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>; 627def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>; 628def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>; 629def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>; 630def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>; 631def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>; 632 633def AArch64srhadd : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>; 634def AArch64urhadd : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>; 635def AArch64shadd : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>; 636def AArch64uhadd : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>; 637 638def AArch64uabd : PatFrags<(ops node:$lhs, node:$rhs), 639 [(abdu node:$lhs, node:$rhs), 640 (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>; 641def AArch64sabd : PatFrags<(ops node:$lhs, node:$rhs), 642 [(abds node:$lhs, node:$rhs), 643 (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>; 644 645def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>; 646def AArch64uaddlp : PatFrags<(ops node:$src), 647 [(AArch64uaddlp_n node:$src), 648 (int_aarch64_neon_uaddlp node:$src)]>; 649 650def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>; 651def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 652def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 653def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 654def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 655 656def SDT_AArch64unpk : SDTypeProfile<1, 1, [ 657 SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0> 658]>; 659def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>; 660def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>; 661def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>; 662def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>; 663 664def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 665def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 666def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 667 668def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>; 669def AArch64mrs : SDNode<"AArch64ISD::MRS", 670 SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>, 671 [SDNPHasChain, SDNPOutGlue]>; 672//===----------------------------------------------------------------------===// 673 674//===----------------------------------------------------------------------===// 675 676// AArch64 Instruction Predicate Definitions. 677// We could compute these on a per-module basis but doing so requires accessing 678// the Function object through the <Target>Subtarget and objections were raised 679// to that (see post-commit review comments for r301750). 680let RecomputePerFunction = 1 in { 681 def ForCodeSize : Predicate<"shouldOptForSize(MF)">; 682 def NotForCodeSize : Predicate<"!shouldOptForSize(MF)">; 683 // Avoid generating STRQro if it is slow, unless we're optimizing for code size. 684 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">; 685 686 def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>; 687 def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>; 688 689 def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>; 690 def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>; 691 // Toggles patterns which aren't beneficial in GlobalISel when we aren't 692 // optimizing. This allows us to selectively use patterns without impacting 693 // SelectionDAG's behaviour. 694 // FIXME: One day there will probably be a nicer way to check for this, but 695 // today is not that day. 696 def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">; 697} 698 699include "AArch64InstrFormats.td" 700include "SVEInstrFormats.td" 701include "SMEInstrFormats.td" 702 703//===----------------------------------------------------------------------===// 704 705//===----------------------------------------------------------------------===// 706// Miscellaneous instructions. 707//===----------------------------------------------------------------------===// 708 709let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in { 710// We set Sched to empty list because we expect these instructions to simply get 711// removed in most cases. 712def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 713 [(AArch64callseq_start timm:$amt1, timm:$amt2)]>, 714 Sched<[]>; 715def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 716 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>, 717 Sched<[]>; 718} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 719 720let isReMaterializable = 1, isCodeGenOnly = 1 in { 721// FIXME: The following pseudo instructions are only needed because remat 722// cannot handle multiple instructions. When that changes, they can be 723// removed, along with the AArch64Wrapper node. 724 725let AddedComplexity = 10 in 726def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr), 727 [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>, 728 Sched<[WriteLDAdr]>; 729 730// The MOVaddr instruction should match only when the add is not folded 731// into a load or store address. 732def MOVaddr 733 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 734 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi), 735 tglobaladdr:$low))]>, 736 Sched<[WriteAdrAdr]>; 737def MOVaddrJT 738 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 739 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi), 740 tjumptable:$low))]>, 741 Sched<[WriteAdrAdr]>; 742def MOVaddrCP 743 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 744 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi), 745 tconstpool:$low))]>, 746 Sched<[WriteAdrAdr]>; 747def MOVaddrBA 748 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 749 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi), 750 tblockaddress:$low))]>, 751 Sched<[WriteAdrAdr]>; 752def MOVaddrTLS 753 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 754 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi), 755 tglobaltlsaddr:$low))]>, 756 Sched<[WriteAdrAdr]>; 757def MOVaddrEXT 758 : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low), 759 [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi), 760 texternalsym:$low))]>, 761 Sched<[WriteAdrAdr]>; 762// Normally AArch64addlow either gets folded into a following ldr/str, 763// or together with an adrp into MOVaddr above. For cases with TLS, it 764// might appear without either of them, so allow lowering it into a plain 765// add. 766def ADDlowTLS 767 : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low), 768 [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src, 769 tglobaltlsaddr:$low))]>, 770 Sched<[WriteAdr]>; 771 772} // isReMaterializable, isCodeGenOnly 773 774def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr), 775 (LOADgot tglobaltlsaddr:$addr)>; 776 777def : Pat<(AArch64LOADgot texternalsym:$addr), 778 (LOADgot texternalsym:$addr)>; 779 780def : Pat<(AArch64LOADgot tconstpool:$addr), 781 (LOADgot tconstpool:$addr)>; 782 783// 32-bit jump table destination is actually only 2 instructions since we can 784// use the table itself as a PC-relative base. But optimization occurs after 785// branch relaxation so be pessimistic. 786let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch", 787 isNotDuplicable = 1 in { 788def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 789 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 790 Sched<[]>; 791def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 792 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 793 Sched<[]>; 794def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 795 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 796 Sched<[]>; 797} 798 799// Space-consuming pseudo to aid testing of placement and reachability 800// algorithms. Immediate operand is the number of bytes this "instruction" 801// occupies; register operands can be used to enforce dependency and constrain 802// the scheduler. 803let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in 804def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn), 805 [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>, 806 Sched<[]>; 807 808let hasSideEffects = 1, isCodeGenOnly = 1 in { 809 def SpeculationSafeValueX 810 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>; 811 def SpeculationSafeValueW 812 : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>; 813} 814 815// SpeculationBarrierEndBB must only be used after an unconditional control 816// flow, i.e. after a terminator for which isBarrier is True. 817let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in { 818 def SpeculationBarrierISBDSBEndBB 819 : Pseudo<(outs), (ins), []>, Sched<[]>; 820 def SpeculationBarrierSBEndBB 821 : Pseudo<(outs), (ins), []>, Sched<[]>; 822} 823 824//===----------------------------------------------------------------------===// 825// System instructions. 826//===----------------------------------------------------------------------===// 827 828def HINT : HintI<"hint">; 829def : InstAlias<"nop", (HINT 0b000)>; 830def : InstAlias<"yield",(HINT 0b001)>; 831def : InstAlias<"wfe", (HINT 0b010)>; 832def : InstAlias<"wfi", (HINT 0b011)>; 833def : InstAlias<"sev", (HINT 0b100)>; 834def : InstAlias<"sevl", (HINT 0b101)>; 835def : InstAlias<"dgh", (HINT 0b110)>; 836def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>; 837def : InstAlias<"csdb", (HINT 20)>; 838// In order to be able to write readable assembly, LLVM should accept assembly 839// inputs that use Branch Target Indentification mnemonics, even with BTI disabled. 840// However, in order to be compatible with other assemblers (e.g. GAS), LLVM 841// should not emit these mnemonics unless BTI is enabled. 842def : InstAlias<"bti", (HINT 32), 0>; 843def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>; 844def : InstAlias<"bti", (HINT 32)>, Requires<[HasBTI]>; 845def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>; 846 847// v8.2a Statistical Profiling extension 848def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>; 849 850// As far as LLVM is concerned this writes to the system's exclusive monitors. 851let mayLoad = 1, mayStore = 1 in 852def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">; 853 854// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot 855// model patterns with sufficiently fine granularity. 856let mayLoad = ?, mayStore = ? in { 857def DMB : CRmSystemI<barrier_op, 0b101, "dmb", 858 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>; 859 860def DSB : CRmSystemI<barrier_op, 0b100, "dsb", 861 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>; 862 863def ISB : CRmSystemI<barrier_op, 0b110, "isb", 864 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>; 865 866def TSB : CRmSystemI<barrier_op, 0b010, "tsb", []> { 867 let CRm = 0b0010; 868 let Inst{12} = 0; 869 let Predicates = [HasTRACEV8_4]; 870} 871 872def DSBnXS : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> { 873 let CRm{1-0} = 0b11; 874 let Inst{9-8} = 0b10; 875 let Predicates = [HasXS]; 876} 877 878let Predicates = [HasWFxT] in { 879def WFET : RegInputSystemI<0b0000, 0b000, "wfet">; 880def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">; 881} 882 883// Branch Record Buffer two-word mnemonic instructions 884class BRBEI<bits<3> op2, string keyword> 885 : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> { 886 let Inst{31-8} = 0b110101010000100101110010; 887 let Inst{7-5} = op2; 888 let Predicates = [HasBRBE]; 889} 890def BRB_IALL: BRBEI<0b100, "\tiall">; 891def BRB_INJ: BRBEI<0b101, "\tinj">; 892 893} 894 895// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ 896def : TokenAlias<"INJ", "inj">; 897def : TokenAlias<"IALL", "iall">; 898 899// ARMv8.2-A Dot Product 900let Predicates = [HasDotProd] in { 901defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>; 902defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>; 903defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>; 904defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>; 905} 906 907// ARMv8.6-A BFloat 908let Predicates = [HasNEON, HasBF16] in { 909defm BFDOT : SIMDThreeSameVectorBFDot<1, "bfdot">; 910defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">; 911def BFMMLA : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">; 912def BFMLALB : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>; 913def BFMLALT : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>; 914def BFMLALBIdx : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>; 915def BFMLALTIdx : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>; 916def BFCVTN : SIMD_BFCVTN; 917def BFCVTN2 : SIMD_BFCVTN2; 918 919// Vector-scalar BFDOT: 920// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit 921// register (the instruction uses a single 32-bit lane from it), so the pattern 922// is a bit tricky. 923def : Pat<(v2f32 (int_aarch64_neon_bfdot 924 (v2f32 V64:$Rd), (v4bf16 V64:$Rn), 925 (v4bf16 (bitconvert 926 (v2i32 (AArch64duplane32 927 (v4i32 (bitconvert 928 (v8bf16 (insert_subvector undef, 929 (v4bf16 V64:$Rm), 930 (i64 0))))), 931 VectorIndexS:$idx)))))), 932 (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn), 933 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 934 VectorIndexS:$idx)>; 935} 936 937let Predicates = [HasNEONorStreamingSVE, HasBF16] in { 938def BFCVT : BF16ToSinglePrecision<"bfcvt">; 939} 940 941// ARMv8.6A AArch64 matrix multiplication 942let Predicates = [HasMatMulInt8] in { 943def SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>; 944def UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>; 945def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>; 946defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>; 947defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>; 948 949// sudot lane has a pattern where usdot is expected (there is no sudot). 950// The second operand is used in the dup operation to repeat the indexed 951// element. 952class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind, 953 string rhs_kind, RegisterOperand RegType, 954 ValueType AccumType, ValueType InputType> 955 : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind, 956 lhs_kind, rhs_kind, RegType, AccumType, 957 InputType, null_frag> { 958 let Pattern = [(set (AccumType RegType:$dst), 959 (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd), 960 (InputType (bitconvert (AccumType 961 (AArch64duplane32 (v4i32 V128:$Rm), 962 VectorIndexS:$idx)))), 963 (InputType RegType:$Rn))))]; 964} 965 966multiclass SIMDSUDOTIndex { 967 def v8i8 : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>; 968 def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>; 969} 970 971defm SUDOTlane : SIMDSUDOTIndex; 972 973} 974 975// ARMv8.2-A FP16 Fused Multiply-Add Long 976let Predicates = [HasNEON, HasFP16FML] in { 977defm FMLAL : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>; 978defm FMLSL : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>; 979defm FMLAL2 : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>; 980defm FMLSL2 : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>; 981defm FMLALlane : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>; 982defm FMLSLlane : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>; 983defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>; 984defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>; 985} 986 987// Armv8.2-A Crypto extensions 988let Predicates = [HasSHA3] in { 989def SHA512H : CryptoRRRTied<0b0, 0b00, "sha512h">; 990def SHA512H2 : CryptoRRRTied<0b0, 0b01, "sha512h2">; 991def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">; 992def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">; 993def RAX1 : CryptoRRR_2D<0b0,0b11, "rax1">; 994def EOR3 : CryptoRRRR_16B<0b00, "eor3">; 995def BCAX : CryptoRRRR_16B<0b01, "bcax">; 996def XAR : CryptoRRRi6<"xar">; 997 998class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy> 999 : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))), 1000 (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>; 1001 1002def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))), 1003 (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>; 1004 1005def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>; 1006def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>; 1007def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>; 1008 1009def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>; 1010def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>; 1011def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>; 1012def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>; 1013 1014class EOR3_pattern<ValueType VecTy> 1015 : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)), 1016 (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>; 1017 1018def : EOR3_pattern<v16i8>; 1019def : EOR3_pattern<v8i16>; 1020def : EOR3_pattern<v4i32>; 1021def : EOR3_pattern<v2i64>; 1022 1023def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>; 1024def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>; 1025def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>; 1026def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>; 1027 1028def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>; 1029def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>; 1030def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>; 1031def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>; 1032 1033def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>; 1034def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>; 1035def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>; 1036def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>; 1037 1038def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))), 1039 (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>; 1040 1041def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))), 1042 (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>; 1043 1044 1045} // HasSHA3 1046 1047let Predicates = [HasSM4] in { 1048def SM3TT1A : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">; 1049def SM3TT1B : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">; 1050def SM3TT2A : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">; 1051def SM3TT2B : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">; 1052def SM3SS1 : CryptoRRRR_4S<0b10, "sm3ss1">; 1053def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">; 1054def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">; 1055def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">; 1056def SM4E : CryptoRRTied_4S<0b0, 0b01, "sm4e">; 1057 1058def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))), 1059 (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>; 1060 1061class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode> 1062 : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))), 1063 (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>; 1064 1065class SM3TT_pattern<Instruction INST, Intrinsic OpNode> 1066 : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )), 1067 (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>; 1068 1069class SM4_pattern<Instruction INST, Intrinsic OpNode> 1070 : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))), 1071 (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>; 1072 1073def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>; 1074def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>; 1075 1076def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>; 1077def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>; 1078def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>; 1079def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>; 1080 1081def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>; 1082def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>; 1083} // HasSM4 1084 1085let Predicates = [HasRCPC] in { 1086 // v8.3 Release Consistent Processor Consistent support, optional in v8.2. 1087 def LDAPRB : RCPCLoad<0b00, "ldaprb", GPR32>; 1088 def LDAPRH : RCPCLoad<0b01, "ldaprh", GPR32>; 1089 def LDAPRW : RCPCLoad<0b10, "ldapr", GPR32>; 1090 def LDAPRX : RCPCLoad<0b11, "ldapr", GPR64>; 1091} 1092 1093// v8.3a complex add and multiply-accumulate. No predicate here, that is done 1094// inside the multiclass as the FP16 versions need different predicates. 1095defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop, 1096 "fcmla", null_frag>; 1097defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd, 1098 "fcadd", null_frag>; 1099defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">; 1100 1101let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in { 1102 def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))), 1103 (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>; 1104 def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))), 1105 (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>; 1106 def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))), 1107 (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>; 1108 def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))), 1109 (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>; 1110} 1111 1112let Predicates = [HasComplxNum, HasNEON] in { 1113 def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 1114 (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>; 1115 def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 1116 (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>; 1117 foreach Ty = [v4f32, v2f64] in { 1118 def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))), 1119 (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>; 1120 def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))), 1121 (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>; 1122 } 1123} 1124 1125multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> { 1126 def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))), 1127 (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>; 1128 def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))), 1129 (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>; 1130 def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))), 1131 (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>; 1132 def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))), 1133 (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>; 1134} 1135 1136multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> { 1137 def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)), 1138 (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>; 1139 def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)), 1140 (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>; 1141 def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)), 1142 (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>; 1143 def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)), 1144 (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>; 1145} 1146 1147 1148let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in { 1149 defm : FCMLA_PATS<v4f16, V64>; 1150 defm : FCMLA_PATS<v8f16, V128>; 1151 1152 defm : FCMLA_LANE_PATS<v4f16, V64, 1153 (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>; 1154 defm : FCMLA_LANE_PATS<v8f16, V128, 1155 (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>; 1156} 1157let Predicates = [HasComplxNum, HasNEON] in { 1158 defm : FCMLA_PATS<v2f32, V64>; 1159 defm : FCMLA_PATS<v4f32, V128>; 1160 defm : FCMLA_PATS<v2f64, V128>; 1161 1162 defm : FCMLA_LANE_PATS<v4f32, V128, 1163 (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>; 1164} 1165 1166// v8.3a Pointer Authentication 1167// These instructions inhabit part of the hint space and so can be used for 1168// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is 1169// important for compatibility with other assemblers (e.g. GAS) when building 1170// software compatible with both CPUs that do or don't implement PA. 1171let Uses = [LR], Defs = [LR] in { 1172 def PACIAZ : SystemNoOperands<0b000, "hint\t#24">; 1173 def PACIBZ : SystemNoOperands<0b010, "hint\t#26">; 1174 let isAuthenticated = 1 in { 1175 def AUTIAZ : SystemNoOperands<0b100, "hint\t#28">; 1176 def AUTIBZ : SystemNoOperands<0b110, "hint\t#30">; 1177 } 1178} 1179let Uses = [LR, SP], Defs = [LR] in { 1180 def PACIASP : SystemNoOperands<0b001, "hint\t#25">; 1181 def PACIBSP : SystemNoOperands<0b011, "hint\t#27">; 1182 let isAuthenticated = 1 in { 1183 def AUTIASP : SystemNoOperands<0b101, "hint\t#29">; 1184 def AUTIBSP : SystemNoOperands<0b111, "hint\t#31">; 1185 } 1186} 1187let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in { 1188 def PACIA1716 : SystemNoOperands<0b000, "hint\t#8">; 1189 def PACIB1716 : SystemNoOperands<0b010, "hint\t#10">; 1190 let isAuthenticated = 1 in { 1191 def AUTIA1716 : SystemNoOperands<0b100, "hint\t#12">; 1192 def AUTIB1716 : SystemNoOperands<0b110, "hint\t#14">; 1193 } 1194} 1195 1196let Uses = [LR], Defs = [LR], CRm = 0b0000 in { 1197 def XPACLRI : SystemNoOperands<0b111, "hint\t#7">; 1198} 1199 1200// In order to be able to write readable assembly, LLVM should accept assembly 1201// inputs that use pointer authentication mnemonics, even with PA disabled. 1202// However, in order to be compatible with other assemblers (e.g. GAS), LLVM 1203// should not emit these mnemonics unless PA is enabled. 1204def : InstAlias<"paciaz", (PACIAZ), 0>; 1205def : InstAlias<"pacibz", (PACIBZ), 0>; 1206def : InstAlias<"autiaz", (AUTIAZ), 0>; 1207def : InstAlias<"autibz", (AUTIBZ), 0>; 1208def : InstAlias<"paciasp", (PACIASP), 0>; 1209def : InstAlias<"pacibsp", (PACIBSP), 0>; 1210def : InstAlias<"autiasp", (AUTIASP), 0>; 1211def : InstAlias<"autibsp", (AUTIBSP), 0>; 1212def : InstAlias<"pacia1716", (PACIA1716), 0>; 1213def : InstAlias<"pacib1716", (PACIB1716), 0>; 1214def : InstAlias<"autia1716", (AUTIA1716), 0>; 1215def : InstAlias<"autib1716", (AUTIB1716), 0>; 1216def : InstAlias<"xpaclri", (XPACLRI), 0>; 1217 1218// These pointer authentication instructions require armv8.3a 1219let Predicates = [HasPAuth] in { 1220 1221 // When PA is enabled, a better mnemonic should be emitted. 1222 def : InstAlias<"paciaz", (PACIAZ), 1>; 1223 def : InstAlias<"pacibz", (PACIBZ), 1>; 1224 def : InstAlias<"autiaz", (AUTIAZ), 1>; 1225 def : InstAlias<"autibz", (AUTIBZ), 1>; 1226 def : InstAlias<"paciasp", (PACIASP), 1>; 1227 def : InstAlias<"pacibsp", (PACIBSP), 1>; 1228 def : InstAlias<"autiasp", (AUTIASP), 1>; 1229 def : InstAlias<"autibsp", (AUTIBSP), 1>; 1230 def : InstAlias<"pacia1716", (PACIA1716), 1>; 1231 def : InstAlias<"pacib1716", (PACIB1716), 1>; 1232 def : InstAlias<"autia1716", (AUTIA1716), 1>; 1233 def : InstAlias<"autib1716", (AUTIB1716), 1>; 1234 def : InstAlias<"xpaclri", (XPACLRI), 1>; 1235 1236 multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm, 1237 SDPatternOperator op> { 1238 def IA : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia"), op>; 1239 def IB : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib"), op>; 1240 def DA : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da"), op>; 1241 def DB : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db"), op>; 1242 def IZA : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza"), op>; 1243 def DZA : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza"), op>; 1244 def IZB : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb"), op>; 1245 def DZB : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb"), op>; 1246 } 1247 1248 defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>; 1249 defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>; 1250 1251 def XPACI : ClearAuth<0, "xpaci">; 1252 def XPACD : ClearAuth<1, "xpacd">; 1253 1254 def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>; 1255 1256 // Combined Instructions 1257 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { 1258 def BRAA : AuthBranchTwoOperands<0, 0, "braa">; 1259 def BRAB : AuthBranchTwoOperands<0, 1, "brab">; 1260 } 1261 let isCall = 1, Defs = [LR], Uses = [SP] in { 1262 def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">; 1263 def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">; 1264 } 1265 1266 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { 1267 def BRAAZ : AuthOneOperand<0b000, 0, "braaz">; 1268 def BRABZ : AuthOneOperand<0b000, 1, "brabz">; 1269 } 1270 let isCall = 1, Defs = [LR], Uses = [SP] in { 1271 def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">; 1272 def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">; 1273 } 1274 1275 let isReturn = 1, isTerminator = 1, isBarrier = 1 in { 1276 def RETAA : AuthReturn<0b010, 0, "retaa">; 1277 def RETAB : AuthReturn<0b010, 1, "retab">; 1278 def ERETAA : AuthReturn<0b100, 0, "eretaa">; 1279 def ERETAB : AuthReturn<0b100, 1, "eretab">; 1280 } 1281 1282 defm LDRAA : AuthLoad<0, "ldraa", simm10Scaled>; 1283 defm LDRAB : AuthLoad<1, "ldrab", simm10Scaled>; 1284 1285} 1286 1287// v8.3a floating point conversion for javascript 1288let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in 1289def FJCVTZS : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32, 1290 "fjcvtzs", 1291 [(set GPR32:$Rd, 1292 (int_aarch64_fjcvtzs FPR64:$Rn))]> { 1293 let Inst{31} = 0; 1294} // HasJS, HasFPARMv8 1295 1296// v8.4 Flag manipulation instructions 1297let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in { 1298def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> { 1299 let Inst{20-5} = 0b0000001000000000; 1300} 1301def SETF8 : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">; 1302def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">; 1303def RMIF : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif", 1304 "{\t$Rn, $imm, $mask}">; 1305} // HasFlagM 1306 1307// v8.5 flag manipulation instructions 1308let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in { 1309 1310def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> { 1311 let Inst{18-16} = 0b000; 1312 let Inst{11-8} = 0b0000; 1313 let Unpredictable{11-8} = 0b1111; 1314 let Inst{7-5} = 0b001; 1315} 1316 1317def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> { 1318 let Inst{18-16} = 0b000; 1319 let Inst{11-8} = 0b0000; 1320 let Unpredictable{11-8} = 0b1111; 1321 let Inst{7-5} = 0b010; 1322} 1323} // HasAltNZCV 1324 1325 1326// Armv8.5-A speculation barrier 1327def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> { 1328 let Inst{20-5} = 0b0001100110000111; 1329 let Unpredictable{11-8} = 0b1111; 1330 let Predicates = [HasSB]; 1331 let hasSideEffects = 1; 1332} 1333 1334def : InstAlias<"clrex", (CLREX 0xf)>; 1335def : InstAlias<"isb", (ISB 0xf)>; 1336def : InstAlias<"ssbb", (DSB 0)>; 1337def : InstAlias<"pssbb", (DSB 4)>; 1338def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>; 1339 1340def MRS : MRSI; 1341def MSR : MSRI; 1342def MSRpstateImm1 : MSRpstateImm0_1; 1343def MSRpstateImm4 : MSRpstateImm0_15; 1344 1345def : Pat<(AArch64mrs imm:$id), 1346 (MRS imm:$id)>; 1347 1348// The thread pointer (on Linux, at least, where this has been implemented) is 1349// TPIDR_EL0. 1350def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins), 1351 [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>; 1352 1353let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in { 1354def HWASAN_CHECK_MEMACCESS : Pseudo< 1355 (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo), 1356 [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>, 1357 Sched<[]>; 1358} 1359 1360let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in { 1361def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo< 1362 (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo), 1363 [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>, 1364 Sched<[]>; 1365} 1366 1367// The cycle counter PMC register is PMCCNTR_EL0. 1368let Predicates = [HasPerfMon] in 1369def : Pat<(readcyclecounter), (MRS 0xdce8)>; 1370 1371// FPCR register 1372def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>; 1373def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>; 1374 1375// Generic system instructions 1376def SYSxt : SystemXtI<0, "sys">; 1377def SYSLxt : SystemLXtI<1, "sysl">; 1378 1379def : InstAlias<"sys $op1, $Cn, $Cm, $op2", 1380 (SYSxt imm0_7:$op1, sys_cr_op:$Cn, 1381 sys_cr_op:$Cm, imm0_7:$op2, XZR)>; 1382 1383 1384let Predicates = [HasTME] in { 1385 1386def TSTART : TMSystemI<0b0000, "tstart", 1387 [(set GPR64:$Rt, (int_aarch64_tstart))]>; 1388 1389def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>; 1390 1391def TCANCEL : TMSystemException<0b011, "tcancel", 1392 [(int_aarch64_tcancel timm64_0_65535:$imm)]>; 1393 1394def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> { 1395 let mayLoad = 0; 1396 let mayStore = 0; 1397} 1398} // HasTME 1399 1400//===----------------------------------------------------------------------===// 1401// Move immediate instructions. 1402//===----------------------------------------------------------------------===// 1403 1404defm MOVK : InsertImmediate<0b11, "movk">; 1405defm MOVN : MoveImmediate<0b00, "movn">; 1406 1407let PostEncoderMethod = "fixMOVZ" in 1408defm MOVZ : MoveImmediate<0b10, "movz">; 1409 1410// First group of aliases covers an implicit "lsl #0". 1411def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>; 1412def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>; 1413def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>; 1414def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>; 1415def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>; 1416def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>; 1417 1418// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax. 1419def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>; 1420def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>; 1421def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>; 1422def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>; 1423 1424def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>; 1425def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>; 1426def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>; 1427def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>; 1428 1429def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>; 1430def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>; 1431def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>; 1432def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>; 1433 1434def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>; 1435def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>; 1436 1437def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>; 1438def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>; 1439 1440def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>; 1441def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>; 1442 1443// Final group of aliases covers true "mov $Rd, $imm" cases. 1444multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR, 1445 int width, int shift> { 1446 def _asmoperand : AsmOperandClass { 1447 let Name = basename # width # "_lsl" # shift # "MovAlias"; 1448 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", " 1449 # shift # ">"; 1450 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">"; 1451 } 1452 1453 def _movimm : Operand<i32> { 1454 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand"); 1455 } 1456 1457 def : InstAlias<"mov $Rd, $imm", 1458 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>; 1459} 1460 1461defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>; 1462defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>; 1463 1464defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>; 1465defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>; 1466defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>; 1467defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>; 1468 1469defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>; 1470defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>; 1471 1472defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>; 1473defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>; 1474defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>; 1475defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>; 1476 1477let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1, 1478 isAsCheapAsAMove = 1 in { 1479// FIXME: The following pseudo instructions are only needed because remat 1480// cannot handle multiple instructions. When that changes, we can select 1481// directly to the real instructions and get rid of these pseudos. 1482 1483def MOVi32imm 1484 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src), 1485 [(set GPR32:$dst, imm:$src)]>, 1486 Sched<[WriteImm]>; 1487def MOVi64imm 1488 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src), 1489 [(set GPR64:$dst, imm:$src)]>, 1490 Sched<[WriteImm]>; 1491} // isReMaterializable, isCodeGenOnly 1492 1493// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the 1494// eventual expansion code fewer bits to worry about getting right. Marshalling 1495// the types is a little tricky though: 1496def i64imm_32bit : ImmLeaf<i64, [{ 1497 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm); 1498}]>; 1499 1500def s64imm_32bit : ImmLeaf<i64, [{ 1501 int64_t Imm64 = static_cast<int64_t>(Imm); 1502 return Imm64 >= std::numeric_limits<int32_t>::min() && 1503 Imm64 <= std::numeric_limits<int32_t>::max(); 1504}]>; 1505 1506def trunc_imm : SDNodeXForm<imm, [{ 1507 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32); 1508}]>; 1509 1510def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">, 1511 GISDNodeXFormEquiv<trunc_imm>; 1512 1513let Predicates = [OptimizedGISelOrOtherSelector] in { 1514// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless 1515// copies. 1516def : Pat<(i64 i64imm_32bit:$src), 1517 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>; 1518} 1519 1520// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model). 1521def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{ 1522return CurDAG->getTargetConstant( 1523 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32); 1524}]>; 1525 1526def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{ 1527return CurDAG->getTargetConstant( 1528 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64); 1529}]>; 1530 1531 1532def : Pat<(f32 fpimm:$in), 1533 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>; 1534def : Pat<(f64 fpimm:$in), 1535 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>; 1536 1537 1538// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK 1539// sequences. 1540def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2, 1541 tglobaladdr:$g1, tglobaladdr:$g0), 1542 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0), 1543 tglobaladdr:$g1, 16), 1544 tglobaladdr:$g2, 32), 1545 tglobaladdr:$g3, 48)>; 1546 1547def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2, 1548 tblockaddress:$g1, tblockaddress:$g0), 1549 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0), 1550 tblockaddress:$g1, 16), 1551 tblockaddress:$g2, 32), 1552 tblockaddress:$g3, 48)>; 1553 1554def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2, 1555 tconstpool:$g1, tconstpool:$g0), 1556 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0), 1557 tconstpool:$g1, 16), 1558 tconstpool:$g2, 32), 1559 tconstpool:$g3, 48)>; 1560 1561def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2, 1562 tjumptable:$g1, tjumptable:$g0), 1563 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0), 1564 tjumptable:$g1, 16), 1565 tjumptable:$g2, 32), 1566 tjumptable:$g3, 48)>; 1567 1568 1569//===----------------------------------------------------------------------===// 1570// Arithmetic instructions. 1571//===----------------------------------------------------------------------===// 1572 1573// Add/subtract with carry. 1574defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>; 1575defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>; 1576 1577def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>; 1578def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>; 1579def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>; 1580def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>; 1581 1582// Add/subtract 1583defm ADD : AddSub<0, "add", "sub", add>; 1584defm SUB : AddSub<1, "sub", "add">; 1585 1586def : InstAlias<"mov $dst, $src", 1587 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>; 1588def : InstAlias<"mov $dst, $src", 1589 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>; 1590def : InstAlias<"mov $dst, $src", 1591 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>; 1592def : InstAlias<"mov $dst, $src", 1593 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>; 1594 1595defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">; 1596defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">; 1597 1598// Use SUBS instead of SUB to enable CSE between SUBS and SUB. 1599def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm), 1600 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>; 1601def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm), 1602 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>; 1603def : Pat<(sub GPR32:$Rn, GPR32:$Rm), 1604 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>; 1605def : Pat<(sub GPR64:$Rn, GPR64:$Rm), 1606 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>; 1607def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm), 1608 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>; 1609def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm), 1610 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>; 1611let AddedComplexity = 1 in { 1612def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3), 1613 (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>; 1614def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3), 1615 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>; 1616} 1617 1618// Because of the immediate format for add/sub-imm instructions, the 1619// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1). 1620// These patterns capture that transformation. 1621let AddedComplexity = 1 in { 1622def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1623 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1624def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1625 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1626def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1627 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1628def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1629 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1630} 1631 1632// Because of the immediate format for add/sub-imm instructions, the 1633// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1). 1634// These patterns capture that transformation. 1635let AddedComplexity = 1 in { 1636def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1637 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1638def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1639 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1640def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1641 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1642def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1643 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1644} 1645 1646def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>; 1647def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>; 1648def : InstAlias<"neg $dst, $src$shift", 1649 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>; 1650def : InstAlias<"neg $dst, $src$shift", 1651 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>; 1652 1653def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>; 1654def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>; 1655def : InstAlias<"negs $dst, $src$shift", 1656 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>; 1657def : InstAlias<"negs $dst, $src$shift", 1658 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>; 1659 1660 1661// Unsigned/Signed divide 1662defm UDIV : Div<0, "udiv", udiv>; 1663defm SDIV : Div<1, "sdiv", sdiv>; 1664 1665def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>; 1666def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>; 1667def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>; 1668def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>; 1669 1670// Variable shift 1671defm ASRV : Shift<0b10, "asr", sra>; 1672defm LSLV : Shift<0b00, "lsl", shl>; 1673defm LSRV : Shift<0b01, "lsr", srl>; 1674defm RORV : Shift<0b11, "ror", rotr>; 1675 1676def : ShiftAlias<"asrv", ASRVWr, GPR32>; 1677def : ShiftAlias<"asrv", ASRVXr, GPR64>; 1678def : ShiftAlias<"lslv", LSLVWr, GPR32>; 1679def : ShiftAlias<"lslv", LSLVXr, GPR64>; 1680def : ShiftAlias<"lsrv", LSRVWr, GPR32>; 1681def : ShiftAlias<"lsrv", LSRVXr, GPR64>; 1682def : ShiftAlias<"rorv", RORVWr, GPR32>; 1683def : ShiftAlias<"rorv", RORVXr, GPR64>; 1684 1685// Multiply-add 1686let AddedComplexity = 5 in { 1687defm MADD : MulAccum<0, "madd">; 1688defm MSUB : MulAccum<1, "msub">; 1689 1690def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)), 1691 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1692def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)), 1693 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1694 1695def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))), 1696 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1697def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))), 1698 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1699def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)), 1700 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1701def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)), 1702 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1703} // AddedComplexity = 5 1704 1705let AddedComplexity = 5 in { 1706def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>; 1707def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>; 1708def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>; 1709def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>; 1710 1711def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))), 1712 (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>; 1713def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))), 1714 (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>; 1715def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))), 1716 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1717def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))), 1718 (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>; 1719def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))), 1720 (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>; 1721def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))), 1722 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1723 1724def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))), 1725 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1726def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))), 1727 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1728 1729def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))), 1730 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1731def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))), 1732 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1733def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))), 1734 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1735 (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1736 1737def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))), 1738 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1739def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))), 1740 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1741def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))), 1742 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1743 (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1744 1745def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)), 1746 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1747def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)), 1748 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1749def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)), 1750 GPR64:$Ra)), 1751 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1752 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1753 1754def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))), 1755 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1756def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))), 1757 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1758def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32), 1759 (s64imm_32bit:$C)))), 1760 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1761 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1762} // AddedComplexity = 5 1763 1764def : MulAccumWAlias<"mul", MADDWrrr>; 1765def : MulAccumXAlias<"mul", MADDXrrr>; 1766def : MulAccumWAlias<"mneg", MSUBWrrr>; 1767def : MulAccumXAlias<"mneg", MSUBXrrr>; 1768def : WideMulAccumAlias<"smull", SMADDLrrr>; 1769def : WideMulAccumAlias<"smnegl", SMSUBLrrr>; 1770def : WideMulAccumAlias<"umull", UMADDLrrr>; 1771def : WideMulAccumAlias<"umnegl", UMSUBLrrr>; 1772 1773// Multiply-high 1774def SMULHrr : MulHi<0b010, "smulh", mulhs>; 1775def UMULHrr : MulHi<0b110, "umulh", mulhu>; 1776 1777// CRC32 1778def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">; 1779def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">; 1780def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">; 1781def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">; 1782 1783def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">; 1784def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">; 1785def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">; 1786def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">; 1787 1788// v8.1 atomic CAS 1789defm CAS : CompareAndSwap<0, 0, "">; 1790defm CASA : CompareAndSwap<1, 0, "a">; 1791defm CASL : CompareAndSwap<0, 1, "l">; 1792defm CASAL : CompareAndSwap<1, 1, "al">; 1793 1794// v8.1 atomic CASP 1795defm CASP : CompareAndSwapPair<0, 0, "">; 1796defm CASPA : CompareAndSwapPair<1, 0, "a">; 1797defm CASPL : CompareAndSwapPair<0, 1, "l">; 1798defm CASPAL : CompareAndSwapPair<1, 1, "al">; 1799 1800// v8.1 atomic SWP 1801defm SWP : Swap<0, 0, "">; 1802defm SWPA : Swap<1, 0, "a">; 1803defm SWPL : Swap<0, 1, "l">; 1804defm SWPAL : Swap<1, 1, "al">; 1805 1806// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register) 1807defm LDADD : LDOPregister<0b000, "add", 0, 0, "">; 1808defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">; 1809defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">; 1810defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">; 1811 1812defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">; 1813defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">; 1814defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">; 1815defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">; 1816 1817defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">; 1818defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">; 1819defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">; 1820defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">; 1821 1822defm LDSET : LDOPregister<0b011, "set", 0, 0, "">; 1823defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">; 1824defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">; 1825defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">; 1826 1827defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">; 1828defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">; 1829defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">; 1830defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">; 1831 1832defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">; 1833defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">; 1834defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">; 1835defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">; 1836 1837defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">; 1838defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">; 1839defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">; 1840defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">; 1841 1842defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">; 1843defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">; 1844defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">; 1845defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">; 1846 1847// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR" 1848defm : STOPregister<"stadd","LDADD">; // STADDx 1849defm : STOPregister<"stclr","LDCLR">; // STCLRx 1850defm : STOPregister<"steor","LDEOR">; // STEORx 1851defm : STOPregister<"stset","LDSET">; // STSETx 1852defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx 1853defm : STOPregister<"stsmin","LDSMIN">;// STSMINx 1854defm : STOPregister<"stumax","LDUMAX">;// STUMAXx 1855defm : STOPregister<"stumin","LDUMIN">;// STUMINx 1856 1857// v8.5 Memory Tagging Extension 1858let Predicates = [HasMTE] in { 1859 1860def IRG : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>, 1861 Sched<[]>{ 1862 let Inst{31} = 1; 1863} 1864def GMI : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{ 1865 let Inst{31} = 1; 1866 let isNotDuplicable = 1; 1867} 1868def ADDG : AddSubG<0, "addg", null_frag>; 1869def SUBG : AddSubG<1, "subg", null_frag>; 1870 1871def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>; 1872 1873def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>; 1874def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{ 1875 let Defs = [NZCV]; 1876} 1877 1878def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>; 1879 1880def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">; 1881 1882def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4), 1883 (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>; 1884def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)), 1885 (LDG GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>; 1886 1887def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>; 1888 1889def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]", 1890 (outs GPR64:$Rt), (ins GPR64sp:$Rn)>; 1891def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]", 1892 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>; 1893def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]", 1894 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> { 1895 let Inst{23} = 0; 1896} 1897 1898defm STG : MemTagStore<0b00, "stg">; 1899defm STZG : MemTagStore<0b01, "stzg">; 1900defm ST2G : MemTagStore<0b10, "st2g">; 1901defm STZ2G : MemTagStore<0b11, "stz2g">; 1902 1903def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1904 (STGOffset $Rn, $Rm, $imm)>; 1905def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1906 (STZGOffset $Rn, $Rm, $imm)>; 1907def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1908 (ST2GOffset $Rn, $Rm, $imm)>; 1909def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1910 (STZ2GOffset $Rn, $Rm, $imm)>; 1911 1912defm STGP : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">; 1913def STGPpre : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">; 1914def STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">; 1915 1916def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)), 1917 (STGOffset GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>; 1918 1919def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2), 1920 (STGPi $Rt, $Rt2, $Rn, $imm)>; 1921 1922def IRGstack 1923 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>, 1924 Sched<[]>; 1925def TAGPstack 1926 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>, 1927 Sched<[]>; 1928 1929// Explicit SP in the first operand prevents ShrinkWrap optimization 1930// from leaving this instruction out of the stack frame. When IRGstack 1931// is transformed into IRG, this operand is replaced with the actual 1932// register / expression for the tagged base pointer of the current function. 1933def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>; 1934 1935// Large STG to be expanded into a loop. $sz is the size, $Rn is start address. 1936// $Rn_wback is one past the end of the range. $Rm is the loop counter. 1937let isCodeGenOnly=1, mayStore=1 in { 1938def STGloop_wback 1939 : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn), 1940 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >, 1941 Sched<[WriteAdr, WriteST]>; 1942 1943def STZGloop_wback 1944 : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn), 1945 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >, 1946 Sched<[WriteAdr, WriteST]>; 1947 1948// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn. 1949// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back). 1950def STGloop 1951 : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn), 1952 [], "@earlyclobber $Rn2,@earlyclobber $Rm" >, 1953 Sched<[WriteAdr, WriteST]>; 1954 1955def STZGloop 1956 : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn), 1957 [], "@earlyclobber $Rn2,@earlyclobber $Rm" >, 1958 Sched<[WriteAdr, WriteST]>; 1959} 1960 1961} // Predicates = [HasMTE] 1962 1963//===----------------------------------------------------------------------===// 1964// Logical instructions. 1965//===----------------------------------------------------------------------===// 1966 1967// (immediate) 1968defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">; 1969defm AND : LogicalImm<0b00, "and", and, "bic">; 1970defm EOR : LogicalImm<0b10, "eor", xor, "eon">; 1971defm ORR : LogicalImm<0b01, "orr", or, "orn">; 1972 1973// FIXME: these aliases *are* canonical sometimes (when movz can't be 1974// used). Actually, it seems to be working right now, but putting logical_immXX 1975// here is a bit dodgy on the AsmParser side too. 1976def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR, 1977 logical_imm32:$imm), 0>; 1978def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR, 1979 logical_imm64:$imm), 0>; 1980 1981 1982// (register) 1983defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>; 1984defm BICS : LogicalRegS<0b11, 1, "bics", 1985 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>; 1986defm AND : LogicalReg<0b00, 0, "and", and>; 1987defm BIC : LogicalReg<0b00, 1, "bic", 1988 BinOpFrag<(and node:$LHS, (not node:$RHS))>>; 1989defm EON : LogicalReg<0b10, 1, "eon", 1990 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>; 1991defm EOR : LogicalReg<0b10, 0, "eor", xor>; 1992defm ORN : LogicalReg<0b01, 1, "orn", 1993 BinOpFrag<(or node:$LHS, (not node:$RHS))>>; 1994defm ORR : LogicalReg<0b01, 0, "orr", or>; 1995 1996def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>; 1997def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>; 1998 1999def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>; 2000def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>; 2001 2002def : InstAlias<"mvn $Wd, $Wm$sh", 2003 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>; 2004def : InstAlias<"mvn $Xd, $Xm$sh", 2005 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>; 2006 2007def : InstAlias<"tst $src1, $src2", 2008 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>; 2009def : InstAlias<"tst $src1, $src2", 2010 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>; 2011 2012def : InstAlias<"tst $src1, $src2", 2013 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>; 2014def : InstAlias<"tst $src1, $src2", 2015 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>; 2016 2017def : InstAlias<"tst $src1, $src2$sh", 2018 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>; 2019def : InstAlias<"tst $src1, $src2$sh", 2020 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>; 2021 2022 2023def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>; 2024def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>; 2025 2026 2027//===----------------------------------------------------------------------===// 2028// One operand data processing instructions. 2029//===----------------------------------------------------------------------===// 2030 2031defm CLS : OneOperandData<0b101, "cls">; 2032defm CLZ : OneOperandData<0b100, "clz", ctlz>; 2033defm RBIT : OneOperandData<0b000, "rbit", bitreverse>; 2034 2035def REV16Wr : OneWRegData<0b001, "rev16", 2036 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>; 2037def REV16Xr : OneXRegData<0b001, "rev16", null_frag>; 2038 2039def : Pat<(cttz GPR32:$Rn), 2040 (CLZWr (RBITWr GPR32:$Rn))>; 2041def : Pat<(cttz GPR64:$Rn), 2042 (CLZXr (RBITXr GPR64:$Rn))>; 2043def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)), 2044 (i32 1))), 2045 (CLSWr GPR32:$Rn)>; 2046def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)), 2047 (i64 1))), 2048 (CLSXr GPR64:$Rn)>; 2049def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>; 2050def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>; 2051 2052// Unlike the other one operand instructions, the instructions with the "rev" 2053// mnemonic do *not* just different in the size bit, but actually use different 2054// opcode bits for the different sizes. 2055def REVWr : OneWRegData<0b010, "rev", bswap>; 2056def REVXr : OneXRegData<0b011, "rev", bswap>; 2057def REV32Xr : OneXRegData<0b010, "rev32", 2058 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>; 2059 2060def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>; 2061 2062// The bswap commutes with the rotr so we want a pattern for both possible 2063// orders. 2064def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>; 2065def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>; 2066 2067// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero. 2068def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>; 2069def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>; 2070 2071//===----------------------------------------------------------------------===// 2072// Bitfield immediate extraction instruction. 2073//===----------------------------------------------------------------------===// 2074let hasSideEffects = 0 in 2075defm EXTR : ExtractImm<"extr">; 2076def : InstAlias<"ror $dst, $src, $shift", 2077 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>; 2078def : InstAlias<"ror $dst, $src, $shift", 2079 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>; 2080 2081def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)), 2082 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>; 2083def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)), 2084 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>; 2085 2086//===----------------------------------------------------------------------===// 2087// Other bitfield immediate instructions. 2088//===----------------------------------------------------------------------===// 2089let hasSideEffects = 0 in { 2090defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">; 2091defm SBFM : BitfieldImm<0b00, "sbfm">; 2092defm UBFM : BitfieldImm<0b10, "ubfm">; 2093} 2094 2095def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{ 2096 uint64_t enc = (32 - N->getZExtValue()) & 0x1f; 2097 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2098}]>; 2099 2100def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{ 2101 uint64_t enc = 31 - N->getZExtValue(); 2102 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2103}]>; 2104 2105// min(7, 31 - shift_amt) 2106def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{ 2107 uint64_t enc = 31 - N->getZExtValue(); 2108 enc = enc > 7 ? 7 : enc; 2109 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2110}]>; 2111 2112// min(15, 31 - shift_amt) 2113def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{ 2114 uint64_t enc = 31 - N->getZExtValue(); 2115 enc = enc > 15 ? 15 : enc; 2116 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2117}]>; 2118 2119def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{ 2120 uint64_t enc = (64 - N->getZExtValue()) & 0x3f; 2121 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2122}]>; 2123 2124def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{ 2125 uint64_t enc = 63 - N->getZExtValue(); 2126 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2127}]>; 2128 2129// min(7, 63 - shift_amt) 2130def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{ 2131 uint64_t enc = 63 - N->getZExtValue(); 2132 enc = enc > 7 ? 7 : enc; 2133 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2134}]>; 2135 2136// min(15, 63 - shift_amt) 2137def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{ 2138 uint64_t enc = 63 - N->getZExtValue(); 2139 enc = enc > 15 ? 15 : enc; 2140 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2141}]>; 2142 2143// min(31, 63 - shift_amt) 2144def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{ 2145 uint64_t enc = 63 - N->getZExtValue(); 2146 enc = enc > 31 ? 31 : enc; 2147 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 2148}]>; 2149 2150def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)), 2151 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 2152 (i64 (i32shift_b imm0_31:$imm)))>; 2153def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)), 2154 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 2155 (i64 (i64shift_b imm0_63:$imm)))>; 2156 2157let AddedComplexity = 10 in { 2158def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)), 2159 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>; 2160def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)), 2161 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>; 2162} 2163 2164def : InstAlias<"asr $dst, $src, $shift", 2165 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>; 2166def : InstAlias<"asr $dst, $src, $shift", 2167 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>; 2168def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>; 2169def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>; 2170def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>; 2171def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>; 2172def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>; 2173 2174def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)), 2175 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>; 2176def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)), 2177 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>; 2178 2179def : InstAlias<"lsr $dst, $src, $shift", 2180 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>; 2181def : InstAlias<"lsr $dst, $src, $shift", 2182 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>; 2183def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>; 2184def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>; 2185def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>; 2186def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>; 2187def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>; 2188 2189//===----------------------------------------------------------------------===// 2190// Conditional comparison instructions. 2191//===----------------------------------------------------------------------===// 2192defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>; 2193defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>; 2194 2195//===----------------------------------------------------------------------===// 2196// Conditional select instructions. 2197//===----------------------------------------------------------------------===// 2198defm CSEL : CondSelect<0, 0b00, "csel">; 2199 2200def inc : PatFrag<(ops node:$in), (add node:$in, 1)>; 2201defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>; 2202defm CSINV : CondSelectOp<1, 0b00, "csinv", not>; 2203defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>; 2204 2205def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 2206 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 2207def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 2208 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 2209def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 2210 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 2211def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 2212 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 2213def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 2214 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 2215def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 2216 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 2217 2218def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV), 2219 (CSINCWr WZR, WZR, (i32 imm:$cc))>; 2220def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV), 2221 (CSINCXr XZR, XZR, (i32 imm:$cc))>; 2222def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV), 2223 (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>; 2224def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV), 2225 (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>; 2226def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV), 2227 (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>; 2228def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV), 2229 (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>; 2230def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV), 2231 (CSINVWr WZR, WZR, (i32 imm:$cc))>; 2232def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV), 2233 (CSINVXr XZR, XZR, (i32 imm:$cc))>; 2234def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV), 2235 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>; 2236def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV), 2237 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>; 2238def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV), 2239 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>; 2240def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV), 2241 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>; 2242 2243def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)), 2244 (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>; 2245def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))), 2246 (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>; 2247 2248// The inverse of the condition code from the alias instruction is what is used 2249// in the aliased instruction. The parser all ready inverts the condition code 2250// for these aliases. 2251def : InstAlias<"cset $dst, $cc", 2252 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>; 2253def : InstAlias<"cset $dst, $cc", 2254 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>; 2255 2256def : InstAlias<"csetm $dst, $cc", 2257 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>; 2258def : InstAlias<"csetm $dst, $cc", 2259 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>; 2260 2261def : InstAlias<"cinc $dst, $src, $cc", 2262 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 2263def : InstAlias<"cinc $dst, $src, $cc", 2264 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 2265 2266def : InstAlias<"cinv $dst, $src, $cc", 2267 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 2268def : InstAlias<"cinv $dst, $src, $cc", 2269 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 2270 2271def : InstAlias<"cneg $dst, $src, $cc", 2272 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 2273def : InstAlias<"cneg $dst, $src, $cc", 2274 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 2275 2276//===----------------------------------------------------------------------===// 2277// PC-relative instructions. 2278//===----------------------------------------------------------------------===// 2279let isReMaterializable = 1 in { 2280let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { 2281def ADR : ADRI<0, "adr", adrlabel, 2282 [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>; 2283} // hasSideEffects = 0 2284 2285def ADRP : ADRI<1, "adrp", adrplabel, 2286 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>; 2287} // isReMaterializable = 1 2288 2289// page address of a constant pool entry, block address 2290def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>; 2291def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>; 2292def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>; 2293def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>; 2294def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>; 2295def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>; 2296def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>; 2297 2298//===----------------------------------------------------------------------===// 2299// Unconditional branch (register) instructions. 2300//===----------------------------------------------------------------------===// 2301 2302let isReturn = 1, isTerminator = 1, isBarrier = 1 in { 2303def RET : BranchReg<0b0010, "ret", []>; 2304def DRPS : SpecialReturn<0b0101, "drps">; 2305def ERET : SpecialReturn<0b0100, "eret">; 2306} // isReturn = 1, isTerminator = 1, isBarrier = 1 2307 2308// Default to the LR register. 2309def : InstAlias<"ret", (RET LR)>; 2310 2311let isCall = 1, Defs = [LR], Uses = [SP] in { 2312 def BLR : BranchReg<0b0001, "blr", []>; 2313 def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>, 2314 Sched<[WriteBrReg]>, 2315 PseudoInstExpansion<(BLR GPR64:$Rn)>; 2316 def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>, 2317 Sched<[WriteBrReg]>; 2318} // isCall 2319 2320def : Pat<(AArch64call GPR64:$Rn), 2321 (BLR GPR64:$Rn)>, 2322 Requires<[NoSLSBLRMitigation]>; 2323def : Pat<(AArch64call GPR64noip:$Rn), 2324 (BLRNoIP GPR64noip:$Rn)>, 2325 Requires<[SLSBLRMitigation]>; 2326 2327def : Pat<(AArch64call_rvmarker GPR64:$Rn), 2328 (BLR_RVMARKER GPR64:$Rn)>, 2329 Requires<[NoSLSBLRMitigation]>; 2330 2331let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { 2332def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>; 2333} // isBranch, isTerminator, isBarrier, isIndirectBranch 2334 2335// Create a separate pseudo-instruction for codegen to use so that we don't 2336// flag lr as used in every function. It'll be restored before the RET by the 2337// epilogue if it's legitimately used. 2338def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>, 2339 Sched<[WriteBrReg]> { 2340 let isTerminator = 1; 2341 let isBarrier = 1; 2342 let isReturn = 1; 2343} 2344 2345// This is a directive-like pseudo-instruction. The purpose is to insert an 2346// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction 2347// (which in the usual case is a BLR). 2348let hasSideEffects = 1 in 2349def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> { 2350 let AsmString = ".tlsdesccall $sym"; 2351} 2352 2353// Pseudo instruction to tell the streamer to emit a 'B' character into the 2354// augmentation string. 2355def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {} 2356 2357// FIXME: maybe the scratch register used shouldn't be fixed to X1? 2358// FIXME: can "hasSideEffects be dropped? 2359let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, 2360 isCodeGenOnly = 1 in 2361def TLSDESC_CALLSEQ 2362 : Pseudo<(outs), (ins i64imm:$sym), 2363 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>, 2364 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>; 2365def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym), 2366 (TLSDESC_CALLSEQ texternalsym:$sym)>; 2367 2368//===----------------------------------------------------------------------===// 2369// Conditional branch (immediate) instruction. 2370//===----------------------------------------------------------------------===// 2371def Bcc : BranchCond<0, "b">; 2372 2373// Armv8.8-A variant form which hints to the branch predictor that 2374// this branch is very likely to go the same way nearly all the time 2375// (even though it is not known at compile time _which_ way that is). 2376def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>; 2377 2378//===----------------------------------------------------------------------===// 2379// Compare-and-branch instructions. 2380//===----------------------------------------------------------------------===// 2381defm CBZ : CmpBranch<0, "cbz", AArch64cbz>; 2382defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>; 2383 2384//===----------------------------------------------------------------------===// 2385// Test-bit-and-branch instructions. 2386//===----------------------------------------------------------------------===// 2387defm TBZ : TestBranch<0, "tbz", AArch64tbz>; 2388defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>; 2389 2390//===----------------------------------------------------------------------===// 2391// Unconditional branch (immediate) instructions. 2392//===----------------------------------------------------------------------===// 2393let isBranch = 1, isTerminator = 1, isBarrier = 1 in { 2394def B : BranchImm<0, "b", [(br bb:$addr)]>; 2395} // isBranch, isTerminator, isBarrier 2396 2397let isCall = 1, Defs = [LR], Uses = [SP] in { 2398def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>; 2399} // isCall 2400def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>; 2401 2402//===----------------------------------------------------------------------===// 2403// Exception generation instructions. 2404//===----------------------------------------------------------------------===// 2405let isTrap = 1 in { 2406def BRK : ExceptionGeneration<0b001, 0b00, "brk">; 2407} 2408def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">; 2409def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">; 2410def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>; 2411def HLT : ExceptionGeneration<0b010, 0b00, "hlt">; 2412def HVC : ExceptionGeneration<0b000, 0b10, "hvc">; 2413def SMC : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>; 2414def SVC : ExceptionGeneration<0b000, 0b01, "svc">; 2415 2416// DCPSn defaults to an immediate operand of zero if unspecified. 2417def : InstAlias<"dcps1", (DCPS1 0)>; 2418def : InstAlias<"dcps2", (DCPS2 0)>; 2419def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>; 2420 2421def UDF : UDFType<0, "udf">; 2422 2423//===----------------------------------------------------------------------===// 2424// Load instructions. 2425//===----------------------------------------------------------------------===// 2426 2427// Pair (indexed, offset) 2428defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">; 2429defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">; 2430defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">; 2431defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">; 2432defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">; 2433 2434defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">; 2435 2436// Pair (pre-indexed) 2437def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">; 2438def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">; 2439def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">; 2440def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">; 2441def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">; 2442 2443def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">; 2444 2445// Pair (post-indexed) 2446def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">; 2447def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">; 2448def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">; 2449def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">; 2450def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">; 2451 2452def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">; 2453 2454 2455// Pair (no allocate) 2456defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">; 2457defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">; 2458defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">; 2459defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">; 2460defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">; 2461 2462def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)), 2463 (LDPXi GPR64sp:$Rn, simm7s8:$offset)>; 2464 2465//--- 2466// (register offset) 2467//--- 2468 2469// Integer 2470defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>; 2471defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>; 2472defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>; 2473defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>; 2474 2475// Floating-point 2476defm LDRB : Load8RO<0b00, 1, 0b01, FPR8Op, "ldr", untyped, load>; 2477defm LDRH : Load16RO<0b01, 1, 0b01, FPR16Op, "ldr", f16, load>; 2478defm LDRS : Load32RO<0b10, 1, 0b01, FPR32Op, "ldr", f32, load>; 2479defm LDRD : Load64RO<0b11, 1, 0b01, FPR64Op, "ldr", f64, load>; 2480defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>; 2481 2482// Load sign-extended half-word 2483defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>; 2484defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>; 2485 2486// Load sign-extended byte 2487defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>; 2488defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>; 2489 2490// Load sign-extended word 2491defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>; 2492 2493// Pre-fetch. 2494defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">; 2495 2496// For regular load, we do not have any alignment requirement. 2497// Thus, it is safe to directly map the vector loads with interesting 2498// addressing modes. 2499// FIXME: We could do the same for bitconvert to floating point vectors. 2500multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop, 2501 ValueType ScalTy, ValueType VecTy, 2502 Instruction LOADW, Instruction LOADX, 2503 SubRegIndex sub> { 2504 def : Pat<(VecTy (scalar_to_vector (ScalTy 2505 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))), 2506 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)), 2507 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset), 2508 sub)>; 2509 2510 def : Pat<(VecTy (scalar_to_vector (ScalTy 2511 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))), 2512 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)), 2513 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset), 2514 sub)>; 2515} 2516 2517let AddedComplexity = 10 in { 2518defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>; 2519defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>; 2520 2521defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>; 2522defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>; 2523 2524defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>; 2525defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>; 2526 2527defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>; 2528defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>; 2529 2530defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>; 2531defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>; 2532 2533defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>; 2534 2535defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>; 2536 2537 2538def : Pat <(v1i64 (scalar_to_vector (i64 2539 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm, 2540 ro_Wextend64:$extend))))), 2541 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>; 2542 2543def : Pat <(v1i64 (scalar_to_vector (i64 2544 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm, 2545 ro_Xextend64:$extend))))), 2546 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>; 2547} 2548 2549// Match all load 64 bits width whose type is compatible with FPR64 2550multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy, 2551 Instruction LOADW, Instruction LOADX> { 2552 2553 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 2554 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 2555 2556 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 2557 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 2558} 2559 2560let AddedComplexity = 10 in { 2561let Predicates = [IsLE] in { 2562 // We must do vector loads with LD1 in big-endian. 2563 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>; 2564 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>; 2565 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>; 2566 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>; 2567 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>; 2568 defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>; 2569} 2570 2571defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>; 2572defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>; 2573 2574// Match all load 128 bits width whose type is compatible with FPR128 2575let Predicates = [IsLE] in { 2576 // We must do vector loads with LD1 in big-endian. 2577 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>; 2578 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>; 2579 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>; 2580 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>; 2581 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>; 2582 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>; 2583 defm : VecROLoadPat<ro128, v8bf16, LDRQroW, LDRQroX>; 2584 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>; 2585} 2586} // AddedComplexity = 10 2587 2588// zextload -> i64 2589multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop, 2590 Instruction INSTW, Instruction INSTX> { 2591 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 2592 (SUBREG_TO_REG (i64 0), 2593 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), 2594 sub_32)>; 2595 2596 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 2597 (SUBREG_TO_REG (i64 0), 2598 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), 2599 sub_32)>; 2600} 2601 2602let AddedComplexity = 10 in { 2603 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>; 2604 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>; 2605 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>; 2606 2607 // zextloadi1 -> zextloadi8 2608 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>; 2609 2610 // extload -> zextload 2611 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>; 2612 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>; 2613 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>; 2614 2615 // extloadi1 -> zextloadi8 2616 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>; 2617} 2618 2619 2620// zextload -> i64 2621multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop, 2622 Instruction INSTW, Instruction INSTX> { 2623 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 2624 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 2625 2626 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 2627 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 2628 2629} 2630 2631let AddedComplexity = 10 in { 2632 // extload -> zextload 2633 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>; 2634 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>; 2635 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>; 2636 2637 // zextloadi1 -> zextloadi8 2638 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>; 2639} 2640 2641//--- 2642// (unsigned immediate) 2643//--- 2644defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr", 2645 [(set GPR64z:$Rt, 2646 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>; 2647defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr", 2648 [(set GPR32z:$Rt, 2649 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>; 2650defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr", 2651 [(set FPR8Op:$Rt, 2652 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>; 2653defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr", 2654 [(set (f16 FPR16Op:$Rt), 2655 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>; 2656defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr", 2657 [(set (f32 FPR32Op:$Rt), 2658 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>; 2659defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr", 2660 [(set (f64 FPR64Op:$Rt), 2661 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>; 2662defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr", 2663 [(set (f128 FPR128Op:$Rt), 2664 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>; 2665 2666// bf16 load pattern 2667def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2668 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>; 2669 2670// For regular load, we do not have any alignment requirement. 2671// Thus, it is safe to directly map the vector loads with interesting 2672// addressing modes. 2673// FIXME: We could do the same for bitconvert to floating point vectors. 2674def : Pat <(v8i8 (scalar_to_vector (i32 2675 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 2676 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 2677 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>; 2678def : Pat <(v16i8 (scalar_to_vector (i32 2679 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 2680 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 2681 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>; 2682def : Pat <(v4i16 (scalar_to_vector (i32 2683 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 2684 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 2685 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2686def : Pat <(v8i16 (scalar_to_vector (i32 2687 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 2688 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 2689 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2690def : Pat <(v2i32 (scalar_to_vector (i32 2691 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 2692 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), 2693 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>; 2694def : Pat <(v4i32 (scalar_to_vector (i32 2695 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 2696 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 2697 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>; 2698def : Pat <(v1i64 (scalar_to_vector (i64 2699 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))), 2700 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2701def : Pat <(v2i64 (scalar_to_vector (i64 2702 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))), 2703 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 2704 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>; 2705 2706// Match all load 64 bits width whose type is compatible with FPR64 2707let Predicates = [IsLE] in { 2708 // We must use LD1 to perform vector loads in big-endian. 2709 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2710 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2711 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2712 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2713 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2714 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2715 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2716 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2717 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2718 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2719 def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2720 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2721} 2722def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2723 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2724def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2725 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2726 2727// Match all load 128 bits width whose type is compatible with FPR128 2728let Predicates = [IsLE] in { 2729 // We must use LD1 to perform vector loads in big-endian. 2730 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2731 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2732 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2733 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2734 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2735 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2736 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2737 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2738 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2739 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2740 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2741 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2742 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2743 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2744 def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2745 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2746} 2747def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2748 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2749 2750defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh", 2751 [(set GPR32:$Rt, 2752 (zextloadi16 (am_indexed16 GPR64sp:$Rn, 2753 uimm12s2:$offset)))]>; 2754defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb", 2755 [(set GPR32:$Rt, 2756 (zextloadi8 (am_indexed8 GPR64sp:$Rn, 2757 uimm12s1:$offset)))]>; 2758// zextload -> i64 2759def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2760 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2761def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2762 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>; 2763 2764// zextloadi1 -> zextloadi8 2765def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2766 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2767def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2768 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2769 2770// extload -> zextload 2771def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2772 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>; 2773def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2774 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2775def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2776 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2777def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))), 2778 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>; 2779def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2780 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>; 2781def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2782 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2783def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2784 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2785 2786// load sign-extended half-word 2787defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh", 2788 [(set GPR32:$Rt, 2789 (sextloadi16 (am_indexed16 GPR64sp:$Rn, 2790 uimm12s2:$offset)))]>; 2791defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh", 2792 [(set GPR64:$Rt, 2793 (sextloadi16 (am_indexed16 GPR64sp:$Rn, 2794 uimm12s2:$offset)))]>; 2795 2796// load sign-extended byte 2797defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb", 2798 [(set GPR32:$Rt, 2799 (sextloadi8 (am_indexed8 GPR64sp:$Rn, 2800 uimm12s1:$offset)))]>; 2801defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb", 2802 [(set GPR64:$Rt, 2803 (sextloadi8 (am_indexed8 GPR64sp:$Rn, 2804 uimm12s1:$offset)))]>; 2805 2806// load sign-extended word 2807defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw", 2808 [(set GPR64:$Rt, 2809 (sextloadi32 (am_indexed32 GPR64sp:$Rn, 2810 uimm12s4:$offset)))]>; 2811 2812// load zero-extended word 2813def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))), 2814 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>; 2815 2816// Pre-fetch. 2817def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm", 2818 [(AArch64Prefetch imm:$Rt, 2819 (am_indexed64 GPR64sp:$Rn, 2820 uimm12s8:$offset))]>; 2821 2822def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>; 2823 2824//--- 2825// (literal) 2826 2827def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{ 2828 if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) { 2829 const DataLayout &DL = MF->getDataLayout(); 2830 Align Align = G->getGlobal()->getPointerAlignment(DL); 2831 return Align >= 4 && G->getOffset() % 4 == 0; 2832 } 2833 if (auto *C = dyn_cast<ConstantPoolSDNode>(N)) 2834 return C->getAlign() >= 4 && C->getOffset() % 4 == 0; 2835 return false; 2836}]>; 2837 2838def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr", 2839 [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>; 2840def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr", 2841 [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>; 2842def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr", 2843 [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2844def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr", 2845 [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2846def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr", 2847 [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2848 2849// load sign-extended word 2850def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw", 2851 [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>; 2852 2853let AddedComplexity = 20 in { 2854def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))), 2855 (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>; 2856} 2857 2858// prefetch 2859def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>; 2860// [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>; 2861 2862//--- 2863// (unscaled immediate) 2864defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur", 2865 [(set GPR64z:$Rt, 2866 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>; 2867defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur", 2868 [(set GPR32z:$Rt, 2869 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 2870defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur", 2871 [(set FPR8Op:$Rt, 2872 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 2873defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur", 2874 [(set (f16 FPR16Op:$Rt), 2875 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2876defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur", 2877 [(set (f32 FPR32Op:$Rt), 2878 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 2879defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur", 2880 [(set (f64 FPR64Op:$Rt), 2881 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>; 2882defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur", 2883 [(set (f128 FPR128Op:$Rt), 2884 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>; 2885 2886defm LDURHH 2887 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh", 2888 [(set GPR32:$Rt, 2889 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2890defm LDURBB 2891 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb", 2892 [(set GPR32:$Rt, 2893 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2894 2895// Match all load 64 bits width whose type is compatible with FPR64 2896let Predicates = [IsLE] in { 2897 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2898 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2899 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2900 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2901 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2902 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2903 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2904 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2905 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2906 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2907} 2908def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2909 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2910def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2911 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2912 2913// Match all load 128 bits width whose type is compatible with FPR128 2914let Predicates = [IsLE] in { 2915 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2916 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2917 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2918 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2919 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2920 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2921 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2922 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2923 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2924 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2925 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2926 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2927 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2928 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2929} 2930 2931// anyext -> zext 2932def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2933 (LDURHHi GPR64sp:$Rn, simm9:$offset)>; 2934def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2935 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2936def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2937 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2938def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))), 2939 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2940def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2941 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2942def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2943 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2944def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2945 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2946// unscaled zext 2947def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2948 (LDURHHi GPR64sp:$Rn, simm9:$offset)>; 2949def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2950 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2951def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2952 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2953def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))), 2954 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2955def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2956 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2957def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2958 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2959def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2960 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2961 2962 2963//--- 2964// LDR mnemonics fall back to LDUR for negative or unaligned offsets. 2965 2966// Define new assembler match classes as we want to only match these when 2967// the don't otherwise match the scaled addressing mode for LDR/STR. Don't 2968// associate a DiagnosticType either, as we want the diagnostic for the 2969// canonical form (the scaled operand) to take precedence. 2970class SImm9OffsetOperand<int Width> : AsmOperandClass { 2971 let Name = "SImm9OffsetFB" # Width; 2972 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">"; 2973 let RenderMethod = "addImmOperands"; 2974} 2975 2976def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>; 2977def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>; 2978def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>; 2979def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>; 2980def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>; 2981 2982def simm9_offset_fb8 : Operand<i64> { 2983 let ParserMatchClass = SImm9OffsetFB8Operand; 2984} 2985def simm9_offset_fb16 : Operand<i64> { 2986 let ParserMatchClass = SImm9OffsetFB16Operand; 2987} 2988def simm9_offset_fb32 : Operand<i64> { 2989 let ParserMatchClass = SImm9OffsetFB32Operand; 2990} 2991def simm9_offset_fb64 : Operand<i64> { 2992 let ParserMatchClass = SImm9OffsetFB64Operand; 2993} 2994def simm9_offset_fb128 : Operand<i64> { 2995 let ParserMatchClass = SImm9OffsetFB128Operand; 2996} 2997 2998def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2999 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 3000def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3001 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 3002def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3003 (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3004def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3005 (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3006def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3007 (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 3008def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3009 (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 3010def : InstAlias<"ldr $Rt, [$Rn, $offset]", 3011 (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>; 3012 3013// zextload -> i64 3014def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 3015 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 3016def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 3017 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 3018 3019// load sign-extended half-word 3020defm LDURSHW 3021 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh", 3022 [(set GPR32:$Rt, 3023 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 3024defm LDURSHX 3025 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh", 3026 [(set GPR64:$Rt, 3027 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 3028 3029// load sign-extended byte 3030defm LDURSBW 3031 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb", 3032 [(set GPR32:$Rt, 3033 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 3034defm LDURSBX 3035 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb", 3036 [(set GPR64:$Rt, 3037 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 3038 3039// load sign-extended word 3040defm LDURSW 3041 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw", 3042 [(set GPR64:$Rt, 3043 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 3044 3045// zero and sign extending aliases from generic LDR* mnemonics to LDUR*. 3046def : InstAlias<"ldrb $Rt, [$Rn, $offset]", 3047 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3048def : InstAlias<"ldrh $Rt, [$Rn, $offset]", 3049 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3050def : InstAlias<"ldrsb $Rt, [$Rn, $offset]", 3051 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3052def : InstAlias<"ldrsb $Rt, [$Rn, $offset]", 3053 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3054def : InstAlias<"ldrsh $Rt, [$Rn, $offset]", 3055 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3056def : InstAlias<"ldrsh $Rt, [$Rn, $offset]", 3057 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3058def : InstAlias<"ldrsw $Rt, [$Rn, $offset]", 3059 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 3060 3061// Pre-fetch. 3062defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum", 3063 [(AArch64Prefetch imm:$Rt, 3064 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 3065 3066//--- 3067// (unscaled immediate, unprivileged) 3068defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">; 3069defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">; 3070 3071defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">; 3072defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">; 3073 3074// load sign-extended half-word 3075defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">; 3076defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">; 3077 3078// load sign-extended byte 3079defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">; 3080defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">; 3081 3082// load sign-extended word 3083defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">; 3084 3085//--- 3086// (immediate pre-indexed) 3087def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">; 3088def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">; 3089def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op, "ldr">; 3090def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">; 3091def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">; 3092def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">; 3093def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">; 3094 3095// load sign-extended half-word 3096def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">; 3097def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">; 3098 3099// load sign-extended byte 3100def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">; 3101def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">; 3102 3103// load zero-extended byte 3104def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">; 3105def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">; 3106 3107// load sign-extended word 3108def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">; 3109 3110//--- 3111// (immediate post-indexed) 3112def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">; 3113def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">; 3114def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op, "ldr">; 3115def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">; 3116def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">; 3117def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">; 3118def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">; 3119 3120// load sign-extended half-word 3121def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">; 3122def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">; 3123 3124// load sign-extended byte 3125def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">; 3126def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">; 3127 3128// load zero-extended byte 3129def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">; 3130def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">; 3131 3132// load sign-extended word 3133def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">; 3134 3135//===----------------------------------------------------------------------===// 3136// Store instructions. 3137//===----------------------------------------------------------------------===// 3138 3139// Pair (indexed, offset) 3140// FIXME: Use dedicated range-checked addressing mode operand here. 3141defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">; 3142defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">; 3143defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">; 3144defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">; 3145defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">; 3146 3147// Pair (pre-indexed) 3148def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">; 3149def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">; 3150def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">; 3151def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">; 3152def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">; 3153 3154// Pair (pre-indexed) 3155def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">; 3156def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">; 3157def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">; 3158def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">; 3159def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">; 3160 3161// Pair (no allocate) 3162defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">; 3163defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">; 3164defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">; 3165defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">; 3166defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">; 3167 3168def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)), 3169 (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>; 3170 3171def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)), 3172 (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>; 3173 3174 3175//--- 3176// (Register offset) 3177 3178// Integer 3179defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>; 3180defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>; 3181defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>; 3182defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>; 3183 3184 3185// Floating-point 3186defm STRB : Store8RO< 0b00, 1, 0b00, FPR8Op, "str", untyped, store>; 3187defm STRH : Store16RO<0b01, 1, 0b00, FPR16Op, "str", f16, store>; 3188defm STRS : Store32RO<0b10, 1, 0b00, FPR32Op, "str", f32, store>; 3189defm STRD : Store64RO<0b11, 1, 0b00, FPR64Op, "str", f64, store>; 3190defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">; 3191 3192let Predicates = [UseSTRQro], AddedComplexity = 10 in { 3193 def : Pat<(store (f128 FPR128:$Rt), 3194 (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm, 3195 ro_Wextend128:$extend)), 3196 (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>; 3197 def : Pat<(store (f128 FPR128:$Rt), 3198 (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm, 3199 ro_Xextend128:$extend)), 3200 (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>; 3201} 3202 3203multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop, 3204 Instruction STRW, Instruction STRX> { 3205 3206 def : Pat<(storeop GPR64:$Rt, 3207 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 3208 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32), 3209 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 3210 3211 def : Pat<(storeop GPR64:$Rt, 3212 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 3213 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32), 3214 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 3215} 3216 3217let AddedComplexity = 10 in { 3218 // truncstore i64 3219 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>; 3220 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>; 3221 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>; 3222} 3223 3224multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR, 3225 Instruction STRW, Instruction STRX> { 3226 def : Pat<(store (VecTy FPR:$Rt), 3227 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 3228 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 3229 3230 def : Pat<(store (VecTy FPR:$Rt), 3231 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 3232 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 3233} 3234 3235let AddedComplexity = 10 in { 3236// Match all store 64 bits width whose type is compatible with FPR64 3237let Predicates = [IsLE] in { 3238 // We must use ST1 to store vectors in big-endian. 3239 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>; 3240 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>; 3241 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>; 3242 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>; 3243 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>; 3244 defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>; 3245} 3246 3247defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>; 3248defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>; 3249 3250// Match all store 128 bits width whose type is compatible with FPR128 3251let Predicates = [IsLE, UseSTRQro] in { 3252 // We must use ST1 to store vectors in big-endian. 3253 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>; 3254 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>; 3255 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>; 3256 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>; 3257 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>; 3258 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>; 3259 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>; 3260 defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>; 3261} 3262} // AddedComplexity = 10 3263 3264// Match stores from lane 0 to the appropriate subreg's store. 3265multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop, 3266 ValueType VecTy, ValueType STy, 3267 SubRegIndex SubRegIdx, 3268 Instruction STRW, Instruction STRX> { 3269 3270 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)), 3271 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 3272 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 3273 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 3274 3275 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)), 3276 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 3277 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 3278 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 3279} 3280 3281let AddedComplexity = 19 in { 3282 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>; 3283 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>; 3284 defm : VecROStoreLane0Pat<ro32, store, v4i32, i32, ssub, STRSroW, STRSroX>; 3285 defm : VecROStoreLane0Pat<ro32, store, v4f32, f32, ssub, STRSroW, STRSroX>; 3286 defm : VecROStoreLane0Pat<ro64, store, v2i64, i64, dsub, STRDroW, STRDroX>; 3287 defm : VecROStoreLane0Pat<ro64, store, v2f64, f64, dsub, STRDroW, STRDroX>; 3288} 3289 3290//--- 3291// (unsigned immediate) 3292defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str", 3293 [(store GPR64z:$Rt, 3294 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>; 3295defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str", 3296 [(store GPR32z:$Rt, 3297 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>; 3298defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str", 3299 [(store FPR8Op:$Rt, 3300 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>; 3301defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str", 3302 [(store (f16 FPR16Op:$Rt), 3303 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>; 3304defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str", 3305 [(store (f32 FPR32Op:$Rt), 3306 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>; 3307defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str", 3308 [(store (f64 FPR64Op:$Rt), 3309 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>; 3310defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>; 3311 3312defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh", 3313 [(truncstorei16 GPR32z:$Rt, 3314 (am_indexed16 GPR64sp:$Rn, 3315 uimm12s2:$offset))]>; 3316defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1, "strb", 3317 [(truncstorei8 GPR32z:$Rt, 3318 (am_indexed8 GPR64sp:$Rn, 3319 uimm12s1:$offset))]>; 3320 3321// bf16 store pattern 3322def : Pat<(store (bf16 FPR16Op:$Rt), 3323 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)), 3324 (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>; 3325 3326let AddedComplexity = 10 in { 3327 3328// Match all store 64 bits width whose type is compatible with FPR64 3329def : Pat<(store (v1i64 FPR64:$Rt), 3330 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3331 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3332def : Pat<(store (v1f64 FPR64:$Rt), 3333 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3334 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3335 3336let Predicates = [IsLE] in { 3337 // We must use ST1 to store vectors in big-endian. 3338 def : Pat<(store (v2f32 FPR64:$Rt), 3339 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3340 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3341 def : Pat<(store (v8i8 FPR64:$Rt), 3342 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3343 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3344 def : Pat<(store (v4i16 FPR64:$Rt), 3345 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3346 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3347 def : Pat<(store (v2i32 FPR64:$Rt), 3348 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3349 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3350 def : Pat<(store (v4f16 FPR64:$Rt), 3351 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3352 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3353 def : Pat<(store (v4bf16 FPR64:$Rt), 3354 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 3355 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 3356} 3357 3358// Match all store 128 bits width whose type is compatible with FPR128 3359def : Pat<(store (f128 FPR128:$Rt), 3360 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3361 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3362 3363let Predicates = [IsLE] in { 3364 // We must use ST1 to store vectors in big-endian. 3365 def : Pat<(store (v4f32 FPR128:$Rt), 3366 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3367 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3368 def : Pat<(store (v2f64 FPR128:$Rt), 3369 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3370 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3371 def : Pat<(store (v16i8 FPR128:$Rt), 3372 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3373 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3374 def : Pat<(store (v8i16 FPR128:$Rt), 3375 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3376 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3377 def : Pat<(store (v4i32 FPR128:$Rt), 3378 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3379 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3380 def : Pat<(store (v2i64 FPR128:$Rt), 3381 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3382 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3383 def : Pat<(store (v8f16 FPR128:$Rt), 3384 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3385 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3386 def : Pat<(store (v8bf16 FPR128:$Rt), 3387 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 3388 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 3389} 3390 3391// truncstore i64 3392def : Pat<(truncstorei32 GPR64:$Rt, 3393 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)), 3394 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>; 3395def : Pat<(truncstorei16 GPR64:$Rt, 3396 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)), 3397 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>; 3398def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)), 3399 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>; 3400 3401} // AddedComplexity = 10 3402 3403// Match stores from lane 0 to the appropriate subreg's store. 3404multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop, 3405 ValueType VTy, ValueType STy, 3406 SubRegIndex SubRegIdx, Operand IndexType, 3407 Instruction STR> { 3408 def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)), 3409 (UIAddrMode GPR64sp:$Rn, IndexType:$offset)), 3410 (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 3411 GPR64sp:$Rn, IndexType:$offset)>; 3412} 3413 3414let AddedComplexity = 19 in { 3415 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>; 3416 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>; 3417 defm : VecStoreLane0Pat<am_indexed32, store, v4i32, i32, ssub, uimm12s4, STRSui>; 3418 defm : VecStoreLane0Pat<am_indexed32, store, v4f32, f32, ssub, uimm12s4, STRSui>; 3419 defm : VecStoreLane0Pat<am_indexed64, store, v2i64, i64, dsub, uimm12s8, STRDui>; 3420 defm : VecStoreLane0Pat<am_indexed64, store, v2f64, f64, dsub, uimm12s8, STRDui>; 3421} 3422 3423//--- 3424// (unscaled immediate) 3425defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur", 3426 [(store GPR64z:$Rt, 3427 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 3428defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur", 3429 [(store GPR32z:$Rt, 3430 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>; 3431defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur", 3432 [(store FPR8Op:$Rt, 3433 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>; 3434defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur", 3435 [(store (f16 FPR16Op:$Rt), 3436 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>; 3437defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur", 3438 [(store (f32 FPR32Op:$Rt), 3439 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>; 3440defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur", 3441 [(store (f64 FPR64Op:$Rt), 3442 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 3443defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur", 3444 [(store (f128 FPR128Op:$Rt), 3445 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>; 3446defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh", 3447 [(truncstorei16 GPR32z:$Rt, 3448 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>; 3449defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb", 3450 [(truncstorei8 GPR32z:$Rt, 3451 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>; 3452 3453// Armv8.4 Weaker Release Consistency enhancements 3454// LDAPR & STLR with Immediate Offset instructions 3455let Predicates = [HasRCPC_IMMO] in { 3456defm STLURB : BaseStoreUnscaleV84<"stlurb", 0b00, 0b00, GPR32>; 3457defm STLURH : BaseStoreUnscaleV84<"stlurh", 0b01, 0b00, GPR32>; 3458defm STLURW : BaseStoreUnscaleV84<"stlur", 0b10, 0b00, GPR32>; 3459defm STLURX : BaseStoreUnscaleV84<"stlur", 0b11, 0b00, GPR64>; 3460defm LDAPURB : BaseLoadUnscaleV84<"ldapurb", 0b00, 0b01, GPR32>; 3461defm LDAPURSBW : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>; 3462defm LDAPURSBX : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>; 3463defm LDAPURH : BaseLoadUnscaleV84<"ldapurh", 0b01, 0b01, GPR32>; 3464defm LDAPURSHW : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>; 3465defm LDAPURSHX : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>; 3466defm LDAPUR : BaseLoadUnscaleV84<"ldapur", 0b10, 0b01, GPR32>; 3467defm LDAPURSW : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>; 3468defm LDAPURX : BaseLoadUnscaleV84<"ldapur", 0b11, 0b01, GPR64>; 3469} 3470 3471// Match all store 64 bits width whose type is compatible with FPR64 3472def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3473 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3474def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3475 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3476 3477let AddedComplexity = 10 in { 3478 3479let Predicates = [IsLE] in { 3480 // We must use ST1 to store vectors in big-endian. 3481 def : Pat<(store (v2f32 FPR64:$Rt), 3482 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3483 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3484 def : Pat<(store (v8i8 FPR64:$Rt), 3485 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3486 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3487 def : Pat<(store (v4i16 FPR64:$Rt), 3488 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3489 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3490 def : Pat<(store (v2i32 FPR64:$Rt), 3491 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3492 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3493 def : Pat<(store (v4f16 FPR64:$Rt), 3494 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3495 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3496 def : Pat<(store (v4bf16 FPR64:$Rt), 3497 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 3498 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3499} 3500 3501// Match all store 128 bits width whose type is compatible with FPR128 3502def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3503 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3504 3505let Predicates = [IsLE] in { 3506 // We must use ST1 to store vectors in big-endian. 3507 def : Pat<(store (v4f32 FPR128:$Rt), 3508 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3509 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3510 def : Pat<(store (v2f64 FPR128:$Rt), 3511 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3512 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3513 def : Pat<(store (v16i8 FPR128:$Rt), 3514 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3515 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3516 def : Pat<(store (v8i16 FPR128:$Rt), 3517 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3518 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3519 def : Pat<(store (v4i32 FPR128:$Rt), 3520 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3521 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3522 def : Pat<(store (v2i64 FPR128:$Rt), 3523 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3524 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3525 def : Pat<(store (v2f64 FPR128:$Rt), 3526 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3527 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3528 def : Pat<(store (v8f16 FPR128:$Rt), 3529 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3530 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3531 def : Pat<(store (v8bf16 FPR128:$Rt), 3532 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 3533 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 3534} 3535 3536} // AddedComplexity = 10 3537 3538// unscaled i64 truncating stores 3539def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)), 3540 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 3541def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)), 3542 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 3543def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)), 3544 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 3545 3546// Match stores from lane 0 to the appropriate subreg's store. 3547multiclass VecStoreULane0Pat<SDPatternOperator StoreOp, 3548 ValueType VTy, ValueType STy, 3549 SubRegIndex SubRegIdx, Instruction STR> { 3550 defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>; 3551} 3552 3553let AddedComplexity = 19 in { 3554 defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>; 3555 defm : VecStoreULane0Pat<store, v8f16, f16, hsub, STURHi>; 3556 defm : VecStoreULane0Pat<store, v4i32, i32, ssub, STURSi>; 3557 defm : VecStoreULane0Pat<store, v4f32, f32, ssub, STURSi>; 3558 defm : VecStoreULane0Pat<store, v2i64, i64, dsub, STURDi>; 3559 defm : VecStoreULane0Pat<store, v2f64, f64, dsub, STURDi>; 3560} 3561 3562//--- 3563// STR mnemonics fall back to STUR for negative or unaligned offsets. 3564def : InstAlias<"str $Rt, [$Rn, $offset]", 3565 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 3566def : InstAlias<"str $Rt, [$Rn, $offset]", 3567 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 3568def : InstAlias<"str $Rt, [$Rn, $offset]", 3569 (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3570def : InstAlias<"str $Rt, [$Rn, $offset]", 3571 (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3572def : InstAlias<"str $Rt, [$Rn, $offset]", 3573 (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 3574def : InstAlias<"str $Rt, [$Rn, $offset]", 3575 (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 3576def : InstAlias<"str $Rt, [$Rn, $offset]", 3577 (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>; 3578 3579def : InstAlias<"strb $Rt, [$Rn, $offset]", 3580 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 3581def : InstAlias<"strh $Rt, [$Rn, $offset]", 3582 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 3583 3584//--- 3585// (unscaled immediate, unprivileged) 3586defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">; 3587defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">; 3588 3589defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">; 3590defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">; 3591 3592//--- 3593// (immediate pre-indexed) 3594def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str", pre_store, i32>; 3595def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str", pre_store, i64>; 3596def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op, "str", pre_store, untyped>; 3597def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str", pre_store, f16>; 3598def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str", pre_store, f32>; 3599def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str", pre_store, f64>; 3600def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>; 3601 3602def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8, i32>; 3603def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>; 3604 3605// truncstore i64 3606def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3607 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3608 simm9:$off)>; 3609def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3610 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3611 simm9:$off)>; 3612def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3613 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3614 simm9:$off)>; 3615 3616def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3617 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3618def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3619 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3620def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3621 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3622def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3623 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3624def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3625 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3626def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3627 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3628def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3629 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3630 3631def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3632 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3633def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3634 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3635def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3636 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3637def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3638 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3639def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3640 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3641def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3642 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3643def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3644 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3645 3646//--- 3647// (immediate post-indexed) 3648def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z, "str", post_store, i32>; 3649def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z, "str", post_store, i64>; 3650def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op, "str", post_store, untyped>; 3651def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op, "str", post_store, f16>; 3652def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op, "str", post_store, f32>; 3653def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op, "str", post_store, f64>; 3654def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>; 3655 3656def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>; 3657def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>; 3658 3659// truncstore i64 3660def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3661 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3662 simm9:$off)>; 3663def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3664 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3665 simm9:$off)>; 3666def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 3667 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 3668 simm9:$off)>; 3669 3670def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off), 3671 (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>; 3672 3673def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3674 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3675def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3676 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3677def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3678 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3679def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3680 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3681def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3682 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3683def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3684 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3685def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3686 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3687def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 3688 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 3689 3690def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3691 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3692def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3693 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3694def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3695 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3696def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3697 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3698def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3699 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3700def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3701 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3702def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3703 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3704def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3705 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3706 3707//===----------------------------------------------------------------------===// 3708// Load/store exclusive instructions. 3709//===----------------------------------------------------------------------===// 3710 3711def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">; 3712def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">; 3713def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">; 3714def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">; 3715 3716def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">; 3717def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">; 3718def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">; 3719def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">; 3720 3721def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">; 3722def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">; 3723def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">; 3724def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">; 3725 3726def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">; 3727def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">; 3728def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">; 3729def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">; 3730 3731def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">; 3732def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">; 3733def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">; 3734def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">; 3735 3736def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">; 3737def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">; 3738def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">; 3739def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">; 3740 3741def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">; 3742def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">; 3743 3744def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">; 3745def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">; 3746 3747def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">; 3748def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">; 3749 3750def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">; 3751def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">; 3752 3753let Predicates = [HasLOR] in { 3754 // v8.1a "Limited Order Region" extension load-acquire instructions 3755 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">; 3756 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">; 3757 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">; 3758 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">; 3759 3760 // v8.1a "Limited Order Region" extension store-release instructions 3761 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">; 3762 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">; 3763 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">; 3764 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">; 3765} 3766 3767//===----------------------------------------------------------------------===// 3768// Scaled floating point to integer conversion instructions. 3769//===----------------------------------------------------------------------===// 3770 3771defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>; 3772defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>; 3773defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>; 3774defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>; 3775defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>; 3776defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>; 3777defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>; 3778defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>; 3779defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>; 3780defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>; 3781defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>; 3782defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>; 3783 3784// AArch64's FCVT instructions saturate when out of range. 3785multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> { 3786 let Predicates = [HasFullFP16] in { 3787 def : Pat<(i32 (to_int_sat f16:$Rn, i32)), 3788 (!cast<Instruction>(INST # UWHr) f16:$Rn)>; 3789 def : Pat<(i64 (to_int_sat f16:$Rn, i64)), 3790 (!cast<Instruction>(INST # UXHr) f16:$Rn)>; 3791 } 3792 def : Pat<(i32 (to_int_sat f32:$Rn, i32)), 3793 (!cast<Instruction>(INST # UWSr) f32:$Rn)>; 3794 def : Pat<(i64 (to_int_sat f32:$Rn, i64)), 3795 (!cast<Instruction>(INST # UXSr) f32:$Rn)>; 3796 def : Pat<(i32 (to_int_sat f64:$Rn, i32)), 3797 (!cast<Instruction>(INST # UWDr) f64:$Rn)>; 3798 def : Pat<(i64 (to_int_sat f64:$Rn, i64)), 3799 (!cast<Instruction>(INST # UXDr) f64:$Rn)>; 3800 3801 let Predicates = [HasFullFP16] in { 3802 def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)), 3803 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>; 3804 def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)), 3805 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>; 3806 } 3807 def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)), 3808 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>; 3809 def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)), 3810 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>; 3811 def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)), 3812 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>; 3813 def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)), 3814 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>; 3815} 3816 3817defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">; 3818defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">; 3819 3820multiclass FPToIntegerIntPats<Intrinsic round, string INST> { 3821 let Predicates = [HasFullFP16] in { 3822 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>; 3823 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>; 3824 } 3825 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>; 3826 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>; 3827 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>; 3828 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>; 3829 3830 let Predicates = [HasFullFP16] in { 3831 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))), 3832 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>; 3833 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))), 3834 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>; 3835 } 3836 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))), 3837 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>; 3838 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))), 3839 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>; 3840 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))), 3841 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>; 3842 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))), 3843 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>; 3844} 3845 3846defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">; 3847defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">; 3848 3849multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> { 3850 def : Pat<(i32 (to_int (round f32:$Rn))), 3851 (!cast<Instruction>(INST # UWSr) f32:$Rn)>; 3852 def : Pat<(i64 (to_int (round f32:$Rn))), 3853 (!cast<Instruction>(INST # UXSr) f32:$Rn)>; 3854 def : Pat<(i32 (to_int (round f64:$Rn))), 3855 (!cast<Instruction>(INST # UWDr) f64:$Rn)>; 3856 def : Pat<(i64 (to_int (round f64:$Rn))), 3857 (!cast<Instruction>(INST # UXDr) f64:$Rn)>; 3858 3859 // These instructions saturate like fp_to_[su]int_sat. 3860 let Predicates = [HasFullFP16] in { 3861 def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)), 3862 (!cast<Instruction>(INST # UWHr) f16:$Rn)>; 3863 def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)), 3864 (!cast<Instruction>(INST # UXHr) f16:$Rn)>; 3865 } 3866 def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)), 3867 (!cast<Instruction>(INST # UWSr) f32:$Rn)>; 3868 def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)), 3869 (!cast<Instruction>(INST # UXSr) f32:$Rn)>; 3870 def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)), 3871 (!cast<Instruction>(INST # UWDr) f64:$Rn)>; 3872 def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)), 3873 (!cast<Instruction>(INST # UXDr) f64:$Rn)>; 3874} 3875 3876defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil, "FCVTPS">; 3877defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil, "FCVTPU">; 3878defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">; 3879defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">; 3880defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">; 3881defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">; 3882defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">; 3883defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">; 3884 3885 3886 3887let Predicates = [HasFullFP16] in { 3888 def : Pat<(i32 (lround f16:$Rn)), 3889 (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>; 3890 def : Pat<(i64 (lround f16:$Rn)), 3891 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>; 3892 def : Pat<(i64 (llround f16:$Rn)), 3893 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>; 3894} 3895def : Pat<(i32 (lround f32:$Rn)), 3896 (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>; 3897def : Pat<(i32 (lround f64:$Rn)), 3898 (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>; 3899def : Pat<(i64 (lround f32:$Rn)), 3900 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>; 3901def : Pat<(i64 (lround f64:$Rn)), 3902 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>; 3903def : Pat<(i64 (llround f32:$Rn)), 3904 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>; 3905def : Pat<(i64 (llround f64:$Rn)), 3906 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>; 3907 3908//===----------------------------------------------------------------------===// 3909// Scaled integer to floating point conversion instructions. 3910//===----------------------------------------------------------------------===// 3911 3912defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>; 3913defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>; 3914 3915//===----------------------------------------------------------------------===// 3916// Unscaled integer to floating point conversion instruction. 3917//===----------------------------------------------------------------------===// 3918 3919defm FMOV : UnscaledConversion<"fmov">; 3920 3921// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable 3922let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in { 3923def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>, 3924 Sched<[WriteF]>, Requires<[HasFullFP16]>; 3925def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>, 3926 Sched<[WriteF]>; 3927def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>, 3928 Sched<[WriteF]>; 3929} 3930// Similarly add aliases 3931def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>, 3932 Requires<[HasFullFP16]>; 3933def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>; 3934def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>; 3935 3936//===----------------------------------------------------------------------===// 3937// Floating point conversion instruction. 3938//===----------------------------------------------------------------------===// 3939 3940defm FCVT : FPConversion<"fcvt">; 3941 3942//===----------------------------------------------------------------------===// 3943// Floating point single operand instructions. 3944//===----------------------------------------------------------------------===// 3945 3946defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>; 3947defm FMOV : SingleOperandFPData<0b0000, "fmov">; 3948defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>; 3949defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>; 3950defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>; 3951defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>; 3952defm FRINTN : SingleOperandFPData<0b1000, "frintn", froundeven>; 3953defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>; 3954 3955defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>; 3956defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>; 3957 3958let SchedRW = [WriteFDiv] in { 3959defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>; 3960} 3961 3962let Predicates = [HasFRInt3264] in { 3963 defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>; 3964 defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>; 3965 defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>; 3966 defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>; 3967} // HasFRInt3264 3968 3969let Predicates = [HasFullFP16] in { 3970 def : Pat<(i32 (lrint f16:$Rn)), 3971 (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3972 def : Pat<(i64 (lrint f16:$Rn)), 3973 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3974 def : Pat<(i64 (llrint f16:$Rn)), 3975 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3976} 3977def : Pat<(i32 (lrint f32:$Rn)), 3978 (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3979def : Pat<(i32 (lrint f64:$Rn)), 3980 (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3981def : Pat<(i64 (lrint f32:$Rn)), 3982 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3983def : Pat<(i64 (lrint f64:$Rn)), 3984 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3985def : Pat<(i64 (llrint f32:$Rn)), 3986 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3987def : Pat<(i64 (llrint f64:$Rn)), 3988 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3989 3990//===----------------------------------------------------------------------===// 3991// Floating point two operand instructions. 3992//===----------------------------------------------------------------------===// 3993 3994defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>; 3995let SchedRW = [WriteFDiv] in { 3996defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>; 3997} 3998defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>; 3999defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaximum>; 4000defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>; 4001defm FMIN : TwoOperandFPData<0b0101, "fmin", fminimum>; 4002let SchedRW = [WriteFMul] in { 4003defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>; 4004defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>; 4005} 4006defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>; 4007 4008def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 4009 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>; 4010def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 4011 (FMINDrr FPR64:$Rn, FPR64:$Rm)>; 4012def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 4013 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>; 4014def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 4015 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>; 4016 4017//===----------------------------------------------------------------------===// 4018// Floating point three operand instructions. 4019//===----------------------------------------------------------------------===// 4020 4021defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>; 4022defm FMSUB : ThreeOperandFPData<0, 1, "fmsub", 4023 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >; 4024defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd", 4025 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >; 4026defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub", 4027 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >; 4028 4029// The following def pats catch the case where the LHS of an FMA is negated. 4030// The TriOpFrag above catches the case where the middle operand is negated. 4031 4032// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike 4033// the NEON variant. 4034 4035// Here we handle first -(a + b*c) for FNMADD: 4036 4037let Predicates = [HasNEON, HasFullFP16] in 4038def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)), 4039 (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>; 4040 4041def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)), 4042 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; 4043 4044def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)), 4045 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; 4046 4047// Now it's time for "(-a) + (-b)*c" 4048 4049let Predicates = [HasNEON, HasFullFP16] in 4050def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))), 4051 (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>; 4052 4053def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))), 4054 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; 4055 4056def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))), 4057 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; 4058 4059//===----------------------------------------------------------------------===// 4060// Floating point comparison instructions. 4061//===----------------------------------------------------------------------===// 4062 4063defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>; 4064defm FCMP : FPComparison<0, "fcmp", AArch64any_fcmp>; 4065 4066//===----------------------------------------------------------------------===// 4067// Floating point conditional comparison instructions. 4068//===----------------------------------------------------------------------===// 4069 4070defm FCCMPE : FPCondComparison<1, "fccmpe">; 4071defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>; 4072 4073//===----------------------------------------------------------------------===// 4074// Floating point conditional select instruction. 4075//===----------------------------------------------------------------------===// 4076 4077defm FCSEL : FPCondSelect<"fcsel">; 4078 4079// CSEL instructions providing f128 types need to be handled by a 4080// pseudo-instruction since the eventual code will need to introduce basic 4081// blocks and control flow. 4082def F128CSEL : Pseudo<(outs FPR128:$Rd), 4083 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond), 4084 [(set (f128 FPR128:$Rd), 4085 (AArch64csel FPR128:$Rn, FPR128:$Rm, 4086 (i32 imm:$cond), NZCV))]> { 4087 let Uses = [NZCV]; 4088 let usesCustomInserter = 1; 4089 let hasNoSchedulingInfo = 1; 4090} 4091 4092//===----------------------------------------------------------------------===// 4093// Instructions used for emitting unwind opcodes on ARM64 Windows. 4094//===----------------------------------------------------------------------===// 4095let isPseudo = 1 in { 4096 def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>; 4097 def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 4098 def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 4099 def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 4100 def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 4101 def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 4102 def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 4103 def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 4104 def SEH_SaveFReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 4105 def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 4106 def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 4107 def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>; 4108 def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 4109 def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>; 4110 def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>; 4111 def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>; 4112 def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>; 4113} 4114 4115// Pseudo instructions for Windows EH 4116//===----------------------------------------------------------------------===// 4117let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 4118 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in { 4119 def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>; 4120 let usesCustomInserter = 1 in 4121 def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>, 4122 Sched<[]>; 4123} 4124 4125// Pseudo instructions for homogeneous prolog/epilog 4126let isPseudo = 1 in { 4127 // Save CSRs in order, {FPOffset} 4128 def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>; 4129 // Restore CSRs in order 4130 def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>; 4131} 4132 4133//===----------------------------------------------------------------------===// 4134// Floating point immediate move. 4135//===----------------------------------------------------------------------===// 4136 4137let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 4138defm FMOV : FPMoveImmediate<"fmov">; 4139} 4140 4141//===----------------------------------------------------------------------===// 4142// Advanced SIMD two vector instructions. 4143//===----------------------------------------------------------------------===// 4144 4145defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl", 4146 AArch64uabd>; 4147// Match UABDL in log2-shuffle patterns. 4148def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)), 4149 (zext (v8i8 V64:$opB))))), 4150 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; 4151def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), 4152 (v8i16 (add (sub (zext (v8i8 V64:$opA)), 4153 (zext (v8i8 V64:$opB))), 4154 (AArch64vashr v8i16:$src, (i32 15))))), 4155 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; 4156def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)), 4157 (zext (extract_high_v16i8 V128:$opB))))), 4158 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; 4159def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), 4160 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)), 4161 (zext (extract_high_v16i8 V128:$opB))), 4162 (AArch64vashr v8i16:$src, (i32 15))))), 4163 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; 4164def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)), 4165 (zext (v4i16 V64:$opB))))), 4166 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>; 4167def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)), 4168 (zext (extract_high_v8i16 V128:$opB))))), 4169 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>; 4170def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)), 4171 (zext (v2i32 V64:$opB))))), 4172 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>; 4173def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)), 4174 (zext (extract_high_v4i32 V128:$opB))))), 4175 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>; 4176 4177defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>; 4178defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>; 4179defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>; 4180defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>; 4181defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>; 4182defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>; 4183defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>; 4184defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>; 4185defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>; 4186defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>; 4187 4188def : Pat<(v8i8 (AArch64vashr (v8i8 V64:$Rn), (i32 7))), 4189 (CMLTv8i8rz V64:$Rn)>; 4190def : Pat<(v4i16 (AArch64vashr (v4i16 V64:$Rn), (i32 15))), 4191 (CMLTv4i16rz V64:$Rn)>; 4192def : Pat<(v2i32 (AArch64vashr (v2i32 V64:$Rn), (i32 31))), 4193 (CMLTv2i32rz V64:$Rn)>; 4194def : Pat<(v16i8 (AArch64vashr (v16i8 V128:$Rn), (i32 7))), 4195 (CMLTv16i8rz V128:$Rn)>; 4196def : Pat<(v8i16 (AArch64vashr (v8i16 V128:$Rn), (i32 15))), 4197 (CMLTv8i16rz V128:$Rn)>; 4198def : Pat<(v4i32 (AArch64vashr (v4i32 V128:$Rn), (i32 31))), 4199 (CMLTv4i32rz V128:$Rn)>; 4200def : Pat<(v2i64 (AArch64vashr (v2i64 V128:$Rn), (i32 63))), 4201 (CMLTv2i64rz V128:$Rn)>; 4202 4203defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>; 4204defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>; 4205defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>; 4206defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>; 4207defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>; 4208defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>; 4209defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>; 4210defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">; 4211def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))), 4212 (FCVTLv4i16 V64:$Rn)>; 4213def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn), 4214 (i64 4)))), 4215 (FCVTLv8i16 V128:$Rn)>; 4216def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>; 4217 4218def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>; 4219 4220defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>; 4221defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>; 4222defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>; 4223defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>; 4224defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">; 4225def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))), 4226 (FCVTNv4i16 V128:$Rn)>; 4227def : Pat<(concat_vectors V64:$Rd, 4228 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))), 4229 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>; 4230def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>; 4231def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>; 4232def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))), 4233 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>; 4234defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>; 4235defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>; 4236defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn", 4237 int_aarch64_neon_fcvtxn>; 4238defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>; 4239defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>; 4240 4241// AArch64's FCVT instructions saturate when out of range. 4242multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> { 4243 def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)), 4244 (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>; 4245 def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)), 4246 (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>; 4247 def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)), 4248 (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>; 4249 def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)), 4250 (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>; 4251 def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)), 4252 (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>; 4253} 4254defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">; 4255defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">; 4256 4257def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>; 4258def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>; 4259def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>; 4260def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>; 4261def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>; 4262 4263def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>; 4264def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>; 4265def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>; 4266def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>; 4267def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>; 4268 4269defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>; 4270defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>; 4271defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>; 4272defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>; 4273defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>; 4274defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", froundeven>; 4275defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>; 4276defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>; 4277defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>; 4278 4279let Predicates = [HasFRInt3264] in { 4280 defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>; 4281 defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>; 4282 defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>; 4283 defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>; 4284} // HasFRInt3264 4285 4286defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>; 4287defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>; 4288defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg", 4289 UnOpFrag<(sub immAllZerosV, node:$LHS)> >; 4290defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>; 4291// Aliases for MVN -> NOT. 4292def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}", 4293 (NOTv8i8 V64:$Vd, V64:$Vn)>; 4294def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}", 4295 (NOTv16i8 V128:$Vd, V128:$Vn)>; 4296 4297def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 4298def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 4299def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 4300def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 4301def : Pat<(vnot (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 4302def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 4303 4304defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>; 4305defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>; 4306defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>; 4307defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>; 4308defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp", 4309 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >; 4310defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>; 4311defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>; 4312defm SHLL : SIMDVectorLShiftLongBySizeBHS; 4313defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>; 4314defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>; 4315defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>; 4316defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>; 4317defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>; 4318defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp", 4319 BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >; 4320defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>; 4321defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>; 4322defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>; 4323defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>; 4324defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>; 4325defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>; 4326defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>; 4327 4328def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>; 4329def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>; 4330def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>; 4331def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>; 4332def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>; 4333def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>; 4334def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>; 4335def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>; 4336def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>; 4337def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>; 4338 4339// Patterns for vector long shift (by element width). These need to match all 4340// three of zext, sext and anyext so it's easier to pull the patterns out of the 4341// definition. 4342multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> { 4343 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)), 4344 (SHLLv8i8 V64:$Rn)>; 4345 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)), 4346 (SHLLv16i8 V128:$Rn)>; 4347 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)), 4348 (SHLLv4i16 V64:$Rn)>; 4349 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)), 4350 (SHLLv8i16 V128:$Rn)>; 4351 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)), 4352 (SHLLv2i32 V64:$Rn)>; 4353 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)), 4354 (SHLLv4i32 V128:$Rn)>; 4355} 4356 4357defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>; 4358defm : SIMDVectorLShiftLongBySizeBHSPats<zext>; 4359defm : SIMDVectorLShiftLongBySizeBHSPats<sext>; 4360 4361// Constant vector values, used in the S/UQXTN patterns below. 4362def VImmFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>; 4363def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>; 4364def VImm7F: PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>; 4365def VImm80: PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>; 4366def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>; 4367def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>; 4368 4369// trunc(umin(X, 255)) -> UQXTRN v8i8 4370def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))), 4371 (UQXTNv8i8 V128:$Vn)>; 4372// trunc(umin(X, 65535)) -> UQXTRN v4i16 4373def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))), 4374 (UQXTNv4i16 V128:$Vn)>; 4375// trunc(smin(smax(X, -128), 128)) -> SQXTRN 4376// with reversed min/max 4377def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)), 4378 (v8i16 VImm7F)))), 4379 (SQXTNv8i8 V128:$Vn)>; 4380def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)), 4381 (v8i16 VImm80)))), 4382 (SQXTNv8i8 V128:$Vn)>; 4383// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN 4384// with reversed min/max 4385def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)), 4386 (v4i32 VImm7FFF)))), 4387 (SQXTNv4i16 V128:$Vn)>; 4388def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)), 4389 (v4i32 VImm8000)))), 4390 (SQXTNv4i16 V128:$Vn)>; 4391 4392// concat_vectors(Vd, trunc(smin(smax Vm, -128), 127) ~> SQXTN2(Vd, Vn) 4393// with reversed min/max 4394def : Pat<(v16i8 (concat_vectors 4395 (v8i8 V64:$Vd), 4396 (v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)), 4397 (v8i16 VImm7F)))))), 4398 (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>; 4399def : Pat<(v16i8 (concat_vectors 4400 (v8i8 V64:$Vd), 4401 (v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)), 4402 (v8i16 VImm80)))))), 4403 (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>; 4404 4405// concat_vectors(Vd, trunc(smin(smax Vm, -32768), 32767) ~> SQXTN2(Vd, Vn) 4406// with reversed min/max 4407def : Pat<(v8i16 (concat_vectors 4408 (v4i16 V64:$Vd), 4409 (v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)), 4410 (v4i32 VImm7FFF)))))), 4411 (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>; 4412def : Pat<(v8i16 (concat_vectors 4413 (v4i16 V64:$Vd), 4414 (v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)), 4415 (v4i32 VImm8000)))))), 4416 (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>; 4417 4418//===----------------------------------------------------------------------===// 4419// Advanced SIMD three vector instructions. 4420//===----------------------------------------------------------------------===// 4421 4422defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>; 4423defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>; 4424defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>; 4425defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>; 4426defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>; 4427defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>; 4428defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>; 4429defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>; 4430foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in { 4431def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>; 4432} 4433defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>; 4434let Predicates = [HasNEON] in { 4435foreach VT = [ v2f32, v4f32, v2f64 ] in 4436def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>; 4437} 4438let Predicates = [HasNEON, HasFullFP16] in { 4439foreach VT = [ v4f16, v8f16 ] in 4440def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>; 4441} 4442defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>; 4443defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>; 4444defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>; 4445defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>; 4446defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>; 4447defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>; 4448defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>; 4449defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>; 4450defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>; 4451defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>; 4452defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>; 4453defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>; 4454defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>; 4455defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>; 4456defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>; 4457defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>; 4458 4459// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the 4460// instruction expects the addend first, while the fma intrinsic puts it last. 4461defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla", 4462 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >; 4463defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls", 4464 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >; 4465 4466defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>; 4467defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>; 4468defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>; 4469defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>; 4470defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>; 4471 4472// MLA and MLS are generated in MachineCombine 4473defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>; 4474defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>; 4475 4476defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>; 4477defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>; 4478defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba", 4479 TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >; 4480defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>; 4481defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>; 4482defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>; 4483defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>; 4484defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>; 4485defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>; 4486defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>; 4487defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>; 4488defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>; 4489defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>; 4490defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>; 4491defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>; 4492defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>; 4493defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>; 4494defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>; 4495defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>; 4496defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>; 4497defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba", 4498 TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >; 4499defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>; 4500defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>; 4501defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>; 4502defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>; 4503defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>; 4504defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>; 4505defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>; 4506defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>; 4507defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>; 4508defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>; 4509defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>; 4510defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>; 4511defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>; 4512defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>; 4513defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah", 4514 int_aarch64_neon_sqrdmlah>; 4515defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh", 4516 int_aarch64_neon_sqrdmlsh>; 4517 4518// Extra saturate patterns, other than the intrinsics matches above 4519defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>; 4520defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>; 4521defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>; 4522defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>; 4523 4524defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>; 4525defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic", 4526 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >; 4527defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>; 4528defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn", 4529 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >; 4530defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>; 4531 4532// Pseudo bitwise select pattern BSP. 4533// It is expanded into BSL/BIT/BIF after register allocation. 4534defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS), 4535 (and (vnot node:$LHS), node:$RHS))>>; 4536defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">; 4537defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>; 4538defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">; 4539 4540def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm), 4541 (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 4542def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm), 4543 (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 4544def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm), 4545 (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 4546def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm), 4547 (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 4548 4549def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm), 4550 (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 4551def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm), 4552 (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 4553def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm), 4554 (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 4555def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm), 4556 (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 4557 4558def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}", 4559 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>; 4560def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}", 4561 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 4562def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}", 4563 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 4564def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}", 4565 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 4566 4567def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}", 4568 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>; 4569def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}", 4570 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 4571def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}", 4572 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 4573def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}", 4574 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 4575 4576def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" # 4577 "|cmls.8b\t$dst, $src1, $src2}", 4578 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 4579def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" # 4580 "|cmls.16b\t$dst, $src1, $src2}", 4581 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 4582def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" # 4583 "|cmls.4h\t$dst, $src1, $src2}", 4584 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 4585def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" # 4586 "|cmls.8h\t$dst, $src1, $src2}", 4587 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 4588def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" # 4589 "|cmls.2s\t$dst, $src1, $src2}", 4590 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 4591def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" # 4592 "|cmls.4s\t$dst, $src1, $src2}", 4593 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 4594def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" # 4595 "|cmls.2d\t$dst, $src1, $src2}", 4596 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 4597 4598def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" # 4599 "|cmlo.8b\t$dst, $src1, $src2}", 4600 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 4601def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" # 4602 "|cmlo.16b\t$dst, $src1, $src2}", 4603 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 4604def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" # 4605 "|cmlo.4h\t$dst, $src1, $src2}", 4606 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 4607def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" # 4608 "|cmlo.8h\t$dst, $src1, $src2}", 4609 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 4610def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" # 4611 "|cmlo.2s\t$dst, $src1, $src2}", 4612 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 4613def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" # 4614 "|cmlo.4s\t$dst, $src1, $src2}", 4615 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 4616def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" # 4617 "|cmlo.2d\t$dst, $src1, $src2}", 4618 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 4619 4620def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" # 4621 "|cmle.8b\t$dst, $src1, $src2}", 4622 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 4623def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" # 4624 "|cmle.16b\t$dst, $src1, $src2}", 4625 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 4626def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" # 4627 "|cmle.4h\t$dst, $src1, $src2}", 4628 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 4629def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" # 4630 "|cmle.8h\t$dst, $src1, $src2}", 4631 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 4632def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" # 4633 "|cmle.2s\t$dst, $src1, $src2}", 4634 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 4635def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" # 4636 "|cmle.4s\t$dst, $src1, $src2}", 4637 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 4638def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" # 4639 "|cmle.2d\t$dst, $src1, $src2}", 4640 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 4641 4642def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" # 4643 "|cmlt.8b\t$dst, $src1, $src2}", 4644 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 4645def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" # 4646 "|cmlt.16b\t$dst, $src1, $src2}", 4647 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 4648def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" # 4649 "|cmlt.4h\t$dst, $src1, $src2}", 4650 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 4651def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" # 4652 "|cmlt.8h\t$dst, $src1, $src2}", 4653 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 4654def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" # 4655 "|cmlt.2s\t$dst, $src1, $src2}", 4656 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 4657def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" # 4658 "|cmlt.4s\t$dst, $src1, $src2}", 4659 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 4660def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" # 4661 "|cmlt.2d\t$dst, $src1, $src2}", 4662 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 4663 4664let Predicates = [HasNEON, HasFullFP16] in { 4665def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" # 4666 "|fcmle.4h\t$dst, $src1, $src2}", 4667 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 4668def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" # 4669 "|fcmle.8h\t$dst, $src1, $src2}", 4670 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 4671} 4672def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" # 4673 "|fcmle.2s\t$dst, $src1, $src2}", 4674 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 4675def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" # 4676 "|fcmle.4s\t$dst, $src1, $src2}", 4677 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 4678def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" # 4679 "|fcmle.2d\t$dst, $src1, $src2}", 4680 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 4681 4682let Predicates = [HasNEON, HasFullFP16] in { 4683def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" # 4684 "|fcmlt.4h\t$dst, $src1, $src2}", 4685 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 4686def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" # 4687 "|fcmlt.8h\t$dst, $src1, $src2}", 4688 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 4689} 4690def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" # 4691 "|fcmlt.2s\t$dst, $src1, $src2}", 4692 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 4693def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" # 4694 "|fcmlt.4s\t$dst, $src1, $src2}", 4695 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 4696def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" # 4697 "|fcmlt.2d\t$dst, $src1, $src2}", 4698 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 4699 4700let Predicates = [HasNEON, HasFullFP16] in { 4701def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" # 4702 "|facle.4h\t$dst, $src1, $src2}", 4703 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 4704def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" # 4705 "|facle.8h\t$dst, $src1, $src2}", 4706 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 4707} 4708def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" # 4709 "|facle.2s\t$dst, $src1, $src2}", 4710 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 4711def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" # 4712 "|facle.4s\t$dst, $src1, $src2}", 4713 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 4714def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" # 4715 "|facle.2d\t$dst, $src1, $src2}", 4716 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 4717 4718let Predicates = [HasNEON, HasFullFP16] in { 4719def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" # 4720 "|faclt.4h\t$dst, $src1, $src2}", 4721 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 4722def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" # 4723 "|faclt.8h\t$dst, $src1, $src2}", 4724 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 4725} 4726def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" # 4727 "|faclt.2s\t$dst, $src1, $src2}", 4728 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 4729def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" # 4730 "|faclt.4s\t$dst, $src1, $src2}", 4731 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 4732def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" # 4733 "|faclt.2d\t$dst, $src1, $src2}", 4734 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 4735 4736//===----------------------------------------------------------------------===// 4737// Advanced SIMD three scalar instructions. 4738//===----------------------------------------------------------------------===// 4739 4740defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>; 4741defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>; 4742defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>; 4743defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>; 4744defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>; 4745defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>; 4746defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>; 4747defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>; 4748def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 4749 (FABD64 FPR64:$Rn, FPR64:$Rm)>; 4750let Predicates = [HasFullFP16] in { 4751def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>; 4752} 4753def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>; 4754def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>; 4755defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge", 4756 int_aarch64_neon_facge>; 4757defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt", 4758 int_aarch64_neon_facgt>; 4759defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>; 4760defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>; 4761defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>; 4762defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorStreamingSVE>; 4763defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorStreamingSVE>; 4764defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorStreamingSVE>; 4765defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>; 4766defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>; 4767defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>; 4768defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>; 4769defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>; 4770defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>; 4771defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>; 4772defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>; 4773defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>; 4774defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>; 4775defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>; 4776defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>; 4777defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>; 4778defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>; 4779defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>; 4780let Predicates = [HasRDM] in { 4781 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">; 4782 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">; 4783 def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn), 4784 (i32 FPR32:$Rm))), 4785 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4786 def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn), 4787 (i32 FPR32:$Rm))), 4788 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4789} 4790 4791def : InstAlias<"cmls $dst, $src1, $src2", 4792 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4793def : InstAlias<"cmle $dst, $src1, $src2", 4794 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4795def : InstAlias<"cmlo $dst, $src1, $src2", 4796 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4797def : InstAlias<"cmlt $dst, $src1, $src2", 4798 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4799def : InstAlias<"fcmle $dst, $src1, $src2", 4800 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 4801def : InstAlias<"fcmle $dst, $src1, $src2", 4802 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4803def : InstAlias<"fcmlt $dst, $src1, $src2", 4804 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 4805def : InstAlias<"fcmlt $dst, $src1, $src2", 4806 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4807def : InstAlias<"facle $dst, $src1, $src2", 4808 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 4809def : InstAlias<"facle $dst, $src1, $src2", 4810 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4811def : InstAlias<"faclt $dst, $src1, $src2", 4812 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 4813def : InstAlias<"faclt $dst, $src1, $src2", 4814 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 4815 4816//===----------------------------------------------------------------------===// 4817// Advanced SIMD three scalar instructions (mixed operands). 4818//===----------------------------------------------------------------------===// 4819defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull", 4820 int_aarch64_neon_sqdmulls_scalar>; 4821defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">; 4822defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">; 4823 4824def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd), 4825 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 4826 (i32 FPR32:$Rm))))), 4827 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4828def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd), 4829 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 4830 (i32 FPR32:$Rm))))), 4831 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4832 4833//===----------------------------------------------------------------------===// 4834// Advanced SIMD two scalar instructions. 4835//===----------------------------------------------------------------------===// 4836 4837defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>; 4838defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>; 4839defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>; 4840defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>; 4841defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>; 4842defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>; 4843defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>; 4844defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>; 4845defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>; 4846defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>; 4847defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>; 4848defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">; 4849defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">; 4850defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">; 4851defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">; 4852defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">; 4853defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">; 4854defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">; 4855defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">; 4856def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">; 4857defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">; 4858defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">; 4859defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe", HasNEONorStreamingSVE>; 4860defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx", HasNEONorStreamingSVE>; 4861defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte", HasNEONorStreamingSVE>; 4862defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg", 4863 UnOpFrag<(sub immAllZerosV, node:$LHS)> >; 4864defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>; 4865defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>; 4866defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>; 4867defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>; 4868defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>; 4869defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd", 4870 int_aarch64_neon_suqadd>; 4871defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>; 4872defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>; 4873defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd", 4874 int_aarch64_neon_usqadd>; 4875 4876def : Pat<(v1i64 (AArch64vashr (v1i64 V64:$Rn), (i32 63))), 4877 (CMLTv1i64rz V64:$Rn)>; 4878 4879def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))), 4880 (FCVTASv1i64 FPR64:$Rn)>; 4881def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))), 4882 (FCVTAUv1i64 FPR64:$Rn)>; 4883def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))), 4884 (FCVTMSv1i64 FPR64:$Rn)>; 4885def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))), 4886 (FCVTMUv1i64 FPR64:$Rn)>; 4887def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))), 4888 (FCVTNSv1i64 FPR64:$Rn)>; 4889def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))), 4890 (FCVTNUv1i64 FPR64:$Rn)>; 4891def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))), 4892 (FCVTPSv1i64 FPR64:$Rn)>; 4893def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))), 4894 (FCVTPUv1i64 FPR64:$Rn)>; 4895def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))), 4896 (FCVTZSv1i64 FPR64:$Rn)>; 4897def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))), 4898 (FCVTZUv1i64 FPR64:$Rn)>; 4899 4900def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))), 4901 (FRECPEv1f16 FPR16:$Rn)>; 4902def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))), 4903 (FRECPEv1i32 FPR32:$Rn)>; 4904def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))), 4905 (FRECPEv1i64 FPR64:$Rn)>; 4906def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))), 4907 (FRECPEv1i64 FPR64:$Rn)>; 4908 4909def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))), 4910 (FRECPEv1i32 FPR32:$Rn)>; 4911def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))), 4912 (FRECPEv2f32 V64:$Rn)>; 4913def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))), 4914 (FRECPEv4f32 FPR128:$Rn)>; 4915def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))), 4916 (FRECPEv1i64 FPR64:$Rn)>; 4917def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))), 4918 (FRECPEv1i64 FPR64:$Rn)>; 4919def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))), 4920 (FRECPEv2f64 FPR128:$Rn)>; 4921 4922def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))), 4923 (FRECPS32 FPR32:$Rn, FPR32:$Rm)>; 4924def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 4925 (FRECPSv2f32 V64:$Rn, V64:$Rm)>; 4926def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))), 4927 (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>; 4928def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))), 4929 (FRECPS64 FPR64:$Rn, FPR64:$Rm)>; 4930def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))), 4931 (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>; 4932 4933def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))), 4934 (FRECPXv1f16 FPR16:$Rn)>; 4935def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))), 4936 (FRECPXv1i32 FPR32:$Rn)>; 4937def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))), 4938 (FRECPXv1i64 FPR64:$Rn)>; 4939 4940def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))), 4941 (FRSQRTEv1f16 FPR16:$Rn)>; 4942def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))), 4943 (FRSQRTEv1i32 FPR32:$Rn)>; 4944def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))), 4945 (FRSQRTEv1i64 FPR64:$Rn)>; 4946def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))), 4947 (FRSQRTEv1i64 FPR64:$Rn)>; 4948 4949def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))), 4950 (FRSQRTEv1i32 FPR32:$Rn)>; 4951def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))), 4952 (FRSQRTEv2f32 V64:$Rn)>; 4953def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))), 4954 (FRSQRTEv4f32 FPR128:$Rn)>; 4955def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))), 4956 (FRSQRTEv1i64 FPR64:$Rn)>; 4957def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))), 4958 (FRSQRTEv1i64 FPR64:$Rn)>; 4959def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))), 4960 (FRSQRTEv2f64 FPR128:$Rn)>; 4961 4962def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))), 4963 (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>; 4964def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 4965 (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>; 4966def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))), 4967 (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>; 4968def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))), 4969 (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>; 4970def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))), 4971 (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>; 4972 4973// Some float -> int -> float conversion patterns for which we want to keep the 4974// int values in FP registers using the corresponding NEON instructions to 4975// avoid more costly int <-> fp register transfers. 4976let Predicates = [HasNEON] in { 4977def : Pat<(f64 (sint_to_fp (i64 (fp_to_sint f64:$Rn)))), 4978 (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>; 4979def : Pat<(f32 (sint_to_fp (i32 (fp_to_sint f32:$Rn)))), 4980 (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>; 4981def : Pat<(f64 (uint_to_fp (i64 (fp_to_uint f64:$Rn)))), 4982 (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>; 4983def : Pat<(f32 (uint_to_fp (i32 (fp_to_uint f32:$Rn)))), 4984 (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>; 4985 4986let Predicates = [HasFullFP16] in { 4987def : Pat<(f16 (sint_to_fp (i32 (fp_to_sint f16:$Rn)))), 4988 (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>; 4989def : Pat<(f16 (uint_to_fp (i32 (fp_to_uint f16:$Rn)))), 4990 (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>; 4991} 4992} 4993 4994// If an integer is about to be converted to a floating point value, 4995// just load it on the floating point unit. 4996// Here are the patterns for 8 and 16-bits to float. 4997// 8-bits -> float. 4998multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy, 4999 SDPatternOperator loadop, Instruction UCVTF, 5000 ROAddrMode ro, Instruction LDRW, Instruction LDRX, 5001 SubRegIndex sub> { 5002 def : Pat<(DstTy (uint_to_fp (SrcTy 5003 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, 5004 ro.Wext:$extend))))), 5005 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)), 5006 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), 5007 sub))>; 5008 5009 def : Pat<(DstTy (uint_to_fp (SrcTy 5010 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, 5011 ro.Wext:$extend))))), 5012 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)), 5013 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), 5014 sub))>; 5015} 5016 5017defm : UIntToFPROLoadPat<f32, i32, zextloadi8, 5018 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>; 5019def : Pat <(f32 (uint_to_fp (i32 5020 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 5021 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 5022 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>; 5023def : Pat <(f32 (uint_to_fp (i32 5024 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))), 5025 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 5026 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>; 5027// 16-bits -> float. 5028defm : UIntToFPROLoadPat<f32, i32, zextloadi16, 5029 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>; 5030def : Pat <(f32 (uint_to_fp (i32 5031 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 5032 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 5033 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>; 5034def : Pat <(f32 (uint_to_fp (i32 5035 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))), 5036 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 5037 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>; 5038// 32-bits are handled in target specific dag combine: 5039// performIntToFpCombine. 5040// 64-bits integer to 32-bits floating point, not possible with 5041// UCVTF on floating point registers (both source and destination 5042// must have the same size). 5043 5044// Here are the patterns for 8, 16, 32, and 64-bits to double. 5045// 8-bits -> double. 5046defm : UIntToFPROLoadPat<f64, i32, zextloadi8, 5047 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>; 5048def : Pat <(f64 (uint_to_fp (i32 5049 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 5050 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5051 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>; 5052def : Pat <(f64 (uint_to_fp (i32 5053 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))), 5054 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5055 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>; 5056// 16-bits -> double. 5057defm : UIntToFPROLoadPat<f64, i32, zextloadi16, 5058 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>; 5059def : Pat <(f64 (uint_to_fp (i32 5060 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 5061 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5062 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>; 5063def : Pat <(f64 (uint_to_fp (i32 5064 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))), 5065 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5066 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>; 5067// 32-bits -> double. 5068defm : UIntToFPROLoadPat<f64, i32, load, 5069 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>; 5070def : Pat <(f64 (uint_to_fp (i32 5071 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 5072 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5073 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>; 5074def : Pat <(f64 (uint_to_fp (i32 5075 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))), 5076 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5077 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>; 5078// 64-bits -> double are handled in target specific dag combine: 5079// performIntToFpCombine. 5080 5081//===----------------------------------------------------------------------===// 5082// Advanced SIMD three different-sized vector instructions. 5083//===----------------------------------------------------------------------===// 5084 5085defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>; 5086defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>; 5087defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>; 5088defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>; 5089defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>; 5090defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal", 5091 AArch64sabd>; 5092defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl", 5093 AArch64sabd>; 5094defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl", 5095 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>; 5096defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw", 5097 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>; 5098defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal", 5099 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 5100defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl", 5101 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 5102defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>; 5103defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal", 5104 int_aarch64_neon_sqadd>; 5105defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl", 5106 int_aarch64_neon_sqsub>; 5107defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull", 5108 int_aarch64_neon_sqdmull>; 5109defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl", 5110 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>; 5111defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw", 5112 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>; 5113defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal", 5114 AArch64uabd>; 5115defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl", 5116 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>; 5117defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw", 5118 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>; 5119defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal", 5120 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 5121defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl", 5122 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 5123defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>; 5124defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl", 5125 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>; 5126defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw", 5127 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>; 5128 5129// Additional patterns for [SU]ML[AS]L 5130multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode, 5131 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 5132 def : Pat<(v4i16 (opnode 5133 V64:$Ra, 5134 (v4i16 (extract_subvector 5135 (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)), 5136 (i64 0))))), 5137 (EXTRACT_SUBREG (v8i16 (INST8B 5138 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub), 5139 V64:$Rn, V64:$Rm)), dsub)>; 5140 def : Pat<(v2i32 (opnode 5141 V64:$Ra, 5142 (v2i32 (extract_subvector 5143 (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)), 5144 (i64 0))))), 5145 (EXTRACT_SUBREG (v4i32 (INST4H 5146 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub), 5147 V64:$Rn, V64:$Rm)), dsub)>; 5148 def : Pat<(v1i64 (opnode 5149 V64:$Ra, 5150 (v1i64 (extract_subvector 5151 (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)), 5152 (i64 0))))), 5153 (EXTRACT_SUBREG (v2i64 (INST2S 5154 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub), 5155 V64:$Rn, V64:$Rm)), dsub)>; 5156} 5157 5158defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull, 5159 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>; 5160defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull, 5161 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>; 5162defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull, 5163 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>; 5164defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull, 5165 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>; 5166 5167// Additional patterns for SMULL and UMULL 5168multiclass Neon_mul_widen_patterns<SDPatternOperator opnode, 5169 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 5170 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))), 5171 (INST8B V64:$Rn, V64:$Rm)>; 5172 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))), 5173 (INST4H V64:$Rn, V64:$Rm)>; 5174 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))), 5175 (INST2S V64:$Rn, V64:$Rm)>; 5176} 5177 5178defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16, 5179 SMULLv4i16_v4i32, SMULLv2i32_v2i64>; 5180defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16, 5181 UMULLv4i16_v4i32, UMULLv2i32_v2i64>; 5182 5183// Patterns for smull2/umull2. 5184multiclass Neon_mul_high_patterns<SDPatternOperator opnode, 5185 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 5186 def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn), 5187 (extract_high_v16i8 V128:$Rm))), 5188 (INST8B V128:$Rn, V128:$Rm)>; 5189 def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn), 5190 (extract_high_v8i16 V128:$Rm))), 5191 (INST4H V128:$Rn, V128:$Rm)>; 5192 def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn), 5193 (extract_high_v4i32 V128:$Rm))), 5194 (INST2S V128:$Rn, V128:$Rm)>; 5195} 5196 5197defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16, 5198 SMULLv8i16_v4i32, SMULLv4i32_v2i64>; 5199defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16, 5200 UMULLv8i16_v4i32, UMULLv4i32_v2i64>; 5201 5202// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL 5203multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode, 5204 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 5205 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))), 5206 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>; 5207 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))), 5208 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>; 5209 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))), 5210 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>; 5211} 5212 5213defm : Neon_mulacc_widen_patterns< 5214 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>, 5215 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>; 5216defm : Neon_mulacc_widen_patterns< 5217 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>, 5218 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>; 5219defm : Neon_mulacc_widen_patterns< 5220 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>, 5221 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>; 5222defm : Neon_mulacc_widen_patterns< 5223 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>, 5224 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>; 5225 5226// Patterns for 64-bit pmull 5227def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm), 5228 (PMULLv1i64 V64:$Rn, V64:$Rm)>; 5229def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)), 5230 (extractelt (v2i64 V128:$Rm), (i64 1))), 5231 (PMULLv2i64 V128:$Rn, V128:$Rm)>; 5232 5233// CodeGen patterns for addhn and subhn instructions, which can actually be 5234// written in LLVM IR without too much difficulty. 5235 5236// ADDHN 5237def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))), 5238 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>; 5239def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm), 5240 (i32 16))))), 5241 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>; 5242def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm), 5243 (i32 32))))), 5244 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>; 5245def : Pat<(concat_vectors (v8i8 V64:$Rd), 5246 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), 5247 (i32 8))))), 5248 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5249 V128:$Rn, V128:$Rm)>; 5250def : Pat<(concat_vectors (v4i16 V64:$Rd), 5251 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm), 5252 (i32 16))))), 5253 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5254 V128:$Rn, V128:$Rm)>; 5255def : Pat<(concat_vectors (v2i32 V64:$Rd), 5256 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm), 5257 (i32 32))))), 5258 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5259 V128:$Rn, V128:$Rm)>; 5260 5261// SUBHN 5262def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))), 5263 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>; 5264def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 5265 (i32 16))))), 5266 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>; 5267def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 5268 (i32 32))))), 5269 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>; 5270def : Pat<(concat_vectors (v8i8 V64:$Rd), 5271 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 5272 (i32 8))))), 5273 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5274 V128:$Rn, V128:$Rm)>; 5275def : Pat<(concat_vectors (v4i16 V64:$Rd), 5276 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 5277 (i32 16))))), 5278 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5279 V128:$Rn, V128:$Rm)>; 5280def : Pat<(concat_vectors (v2i32 V64:$Rd), 5281 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 5282 (i32 32))))), 5283 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 5284 V128:$Rn, V128:$Rm)>; 5285 5286//---------------------------------------------------------------------------- 5287// AdvSIMD bitwise extract from vector instruction. 5288//---------------------------------------------------------------------------- 5289 5290defm EXT : SIMDBitwiseExtract<"ext">; 5291 5292def AdjustExtImm : SDNodeXForm<imm, [{ 5293 return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32); 5294}]>; 5295multiclass ExtPat<ValueType VT64, ValueType VT128, int N> { 5296 def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))), 5297 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>; 5298 def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))), 5299 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>; 5300 // We use EXT to handle extract_subvector to copy the upper 64-bits of a 5301 // 128-bit vector. 5302 def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))), 5303 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>; 5304 // A 64-bit EXT of two halves of the same 128-bit register can be done as a 5305 // single 128-bit EXT. 5306 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)), 5307 (extract_subvector V128:$Rn, (i64 N)), 5308 (i32 imm:$imm))), 5309 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>; 5310 // A 64-bit EXT of the high half of a 128-bit register can be done using a 5311 // 128-bit EXT of the whole register with an adjustment to the immediate. The 5312 // top half of the other operand will be unset, but that doesn't matter as it 5313 // will not be used. 5314 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)), 5315 V64:$Rm, 5316 (i32 imm:$imm))), 5317 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, 5318 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 5319 (AdjustExtImm imm:$imm)), dsub)>; 5320} 5321 5322defm : ExtPat<v8i8, v16i8, 8>; 5323defm : ExtPat<v4i16, v8i16, 4>; 5324defm : ExtPat<v4f16, v8f16, 4>; 5325defm : ExtPat<v4bf16, v8bf16, 4>; 5326defm : ExtPat<v2i32, v4i32, 2>; 5327defm : ExtPat<v2f32, v4f32, 2>; 5328defm : ExtPat<v1i64, v2i64, 1>; 5329defm : ExtPat<v1f64, v2f64, 1>; 5330 5331//---------------------------------------------------------------------------- 5332// AdvSIMD zip vector 5333//---------------------------------------------------------------------------- 5334 5335defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>; 5336defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>; 5337defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>; 5338defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>; 5339defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>; 5340defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>; 5341 5342def : Pat<(v16i8 (concat_vectors (v8i8 (trunc (v8i16 V128:$Vn))), 5343 (v8i8 (trunc (v8i16 V128:$Vm))))), 5344 (UZP1v16i8 V128:$Vn, V128:$Vm)>; 5345def : Pat<(v8i16 (concat_vectors (v4i16 (trunc (v4i32 V128:$Vn))), 5346 (v4i16 (trunc (v4i32 V128:$Vm))))), 5347 (UZP1v8i16 V128:$Vn, V128:$Vm)>; 5348def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))), 5349 (v2i32 (trunc (v2i64 V128:$Vm))))), 5350 (UZP1v4i32 V128:$Vn, V128:$Vm)>; 5351 5352//---------------------------------------------------------------------------- 5353// AdvSIMD TBL/TBX instructions 5354//---------------------------------------------------------------------------- 5355 5356defm TBL : SIMDTableLookup< 0, "tbl">; 5357defm TBX : SIMDTableLookupTied<1, "tbx">; 5358 5359def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))), 5360 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>; 5361def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))), 5362 (TBLv16i8One V128:$Ri, V128:$Rn)>; 5363 5364def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd), 5365 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))), 5366 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>; 5367def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd), 5368 (v16i8 V128:$Ri), (v16i8 V128:$Rn))), 5369 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>; 5370 5371 5372//---------------------------------------------------------------------------- 5373// AdvSIMD scalar DUP instruction 5374//---------------------------------------------------------------------------- 5375 5376defm DUP : SIMDScalarDUP<"mov">; 5377 5378//---------------------------------------------------------------------------- 5379// AdvSIMD scalar pairwise instructions 5380//---------------------------------------------------------------------------- 5381 5382defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">; 5383defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">; 5384defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">; 5385defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">; 5386defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">; 5387defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">; 5388 5389let Predicates = [HasFullFP16] in { 5390def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))), 5391 (FADDPv2i16p 5392 (EXTRACT_SUBREG 5393 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))), 5394 dsub))>; 5395def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))), 5396 (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>; 5397} 5398def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))), 5399 (FADDPv2i32p 5400 (EXTRACT_SUBREG 5401 (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))), 5402 dsub))>; 5403def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))), 5404 (FADDPv2i32p V64:$Rn)>; 5405def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))), 5406 (FADDPv2i64p V128:$Rn)>; 5407 5408def : Pat<(v2i64 (AArch64saddv V128:$Rn)), 5409 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>; 5410def : Pat<(v2i64 (AArch64uaddv V128:$Rn)), 5411 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>; 5412def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))), 5413 (FADDPv2i32p V64:$Rn)>; 5414def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))), 5415 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>; 5416def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))), 5417 (FADDPv2i64p V128:$Rn)>; 5418def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))), 5419 (FMAXNMPv2i32p V64:$Rn)>; 5420def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))), 5421 (FMAXNMPv2i64p V128:$Rn)>; 5422def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))), 5423 (FMAXPv2i32p V64:$Rn)>; 5424def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))), 5425 (FMAXPv2i64p V128:$Rn)>; 5426def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))), 5427 (FMINNMPv2i32p V64:$Rn)>; 5428def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))), 5429 (FMINNMPv2i64p V128:$Rn)>; 5430def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))), 5431 (FMINPv2i32p V64:$Rn)>; 5432def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))), 5433 (FMINPv2i64p V128:$Rn)>; 5434 5435//---------------------------------------------------------------------------- 5436// AdvSIMD INS/DUP instructions 5437//---------------------------------------------------------------------------- 5438 5439def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>; 5440def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>; 5441def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>; 5442def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>; 5443def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>; 5444def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>; 5445def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>; 5446 5447def DUPv2i64lane : SIMDDup64FromElement; 5448def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>; 5449def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>; 5450def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>; 5451def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>; 5452def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>; 5453def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>; 5454 5455// DUP from a 64-bit register to a 64-bit register is just a copy 5456def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))), 5457 (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>; 5458def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))), 5459 (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>; 5460 5461def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))), 5462 (v2f32 (DUPv2i32lane 5463 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub), 5464 (i64 0)))>; 5465def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))), 5466 (v4f32 (DUPv4i32lane 5467 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub), 5468 (i64 0)))>; 5469def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))), 5470 (v2f64 (DUPv2i64lane 5471 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub), 5472 (i64 0)))>; 5473def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))), 5474 (v4f16 (DUPv4i16lane 5475 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 5476 (i64 0)))>; 5477def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))), 5478 (v4bf16 (DUPv4i16lane 5479 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 5480 (i64 0)))>; 5481def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))), 5482 (v8f16 (DUPv8i16lane 5483 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 5484 (i64 0)))>; 5485def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))), 5486 (v8bf16 (DUPv8i16lane 5487 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 5488 (i64 0)))>; 5489 5490def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)), 5491 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>; 5492def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)), 5493 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>; 5494 5495def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)), 5496 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>; 5497def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)), 5498 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>; 5499 5500def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)), 5501 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>; 5502def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)), 5503 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>; 5504def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)), 5505 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>; 5506 5507// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane 5508// instruction even if the types don't match: we just have to remap the lane 5509// carefully. N.b. this trick only applies to truncations. 5510def VecIndex_x2 : SDNodeXForm<imm, [{ 5511 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64); 5512}]>; 5513def VecIndex_x4 : SDNodeXForm<imm, [{ 5514 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64); 5515}]>; 5516def VecIndex_x8 : SDNodeXForm<imm, [{ 5517 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64); 5518}]>; 5519 5520multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT, 5521 ValueType Src128VT, ValueType ScalVT, 5522 Instruction DUP, SDNodeXForm IdxXFORM> { 5523 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn), 5524 imm:$idx)))), 5525 (DUP V128:$Rn, (IdxXFORM imm:$idx))>; 5526 5527 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn), 5528 imm:$idx)))), 5529 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>; 5530} 5531 5532defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>; 5533defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>; 5534defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>; 5535 5536defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>; 5537defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>; 5538defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>; 5539 5540multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP, 5541 SDNodeXForm IdxXFORM> { 5542 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn), 5543 imm:$idx))))), 5544 (DUP V128:$Rn, (IdxXFORM imm:$idx))>; 5545 5546 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn), 5547 imm:$idx))))), 5548 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>; 5549} 5550 5551defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>; 5552defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>; 5553defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>; 5554 5555defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>; 5556defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>; 5557defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>; 5558 5559// SMOV and UMOV definitions, with some extra patterns for convenience 5560defm SMOV : SMov; 5561defm UMOV : UMov; 5562 5563def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8), 5564 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>; 5565def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8), 5566 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>; 5567def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 5568 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>; 5569def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 5570 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>; 5571def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 5572 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>; 5573def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))), 5574 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>; 5575 5576def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn), 5577 VectorIndexB:$idx)))), i8), 5578 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>; 5579def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn), 5580 VectorIndexH:$idx)))), i16), 5581 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>; 5582 5583// Extracting i8 or i16 elements will have the zero-extend transformed to 5584// an 'and' mask by type legalization since neither i8 nor i16 are legal types 5585// for AArch64. Match these patterns here since UMOV already zeroes out the high 5586// bits of the destination register. 5587def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), 5588 (i32 0xff)), 5589 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>; 5590def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx), 5591 (i32 0xffff)), 5592 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>; 5593 5594def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn), 5595 VectorIndexB:$idx)))), (i64 0xff))), 5596 (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>; 5597def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn), 5598 VectorIndexH:$idx)))), (i64 0xffff))), 5599 (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>; 5600 5601defm INS : SIMDIns; 5602 5603def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)), 5604 (SUBREG_TO_REG (i32 0), 5605 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 5606def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)), 5607 (SUBREG_TO_REG (i32 0), 5608 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 5609 5610def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)), 5611 (SUBREG_TO_REG (i32 0), 5612 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 5613def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)), 5614 (SUBREG_TO_REG (i32 0), 5615 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 5616 5617def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))), 5618 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5619def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))), 5620 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5621 5622def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))), 5623 (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5624def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))), 5625 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5626 5627def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))), 5628 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), 5629 (i32 FPR32:$Rn), ssub))>; 5630def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))), 5631 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 5632 (i32 FPR32:$Rn), ssub))>; 5633 5634def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))), 5635 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 5636 (i64 FPR64:$Rn), dsub))>; 5637 5638def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))), 5639 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5640def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))), 5641 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5642 5643def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))), 5644 (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5645def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))), 5646 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 5647 5648def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))), 5649 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>; 5650def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))), 5651 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>; 5652 5653def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))), 5654 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>; 5655 5656def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn), 5657 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))), 5658 (EXTRACT_SUBREG 5659 (INSvi16lane 5660 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)), 5661 VectorIndexS:$imm, 5662 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 5663 (i64 0)), 5664 dsub)>; 5665 5666def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0), 5667 (i64 VectorIndexH:$imm)), 5668 (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>; 5669def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0), 5670 (i64 VectorIndexS:$imm)), 5671 (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>; 5672def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0), 5673 (i64 VectorIndexD:$imm)), 5674 (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>; 5675 5676def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn), 5677 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))), 5678 (INSvi16lane 5679 V128:$Rn, VectorIndexH:$imm, 5680 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 5681 (i64 0))>; 5682 5683def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn), 5684 (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))), 5685 (EXTRACT_SUBREG 5686 (INSvi16lane 5687 (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)), 5688 VectorIndexS:$imm, 5689 (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 5690 (i64 0)), 5691 dsub)>; 5692 5693def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn), 5694 (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))), 5695 (INSvi16lane 5696 V128:$Rn, VectorIndexH:$imm, 5697 (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 5698 (i64 0))>; 5699 5700def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn), 5701 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))), 5702 (EXTRACT_SUBREG 5703 (INSvi32lane 5704 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)), 5705 VectorIndexS:$imm, 5706 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)), 5707 (i64 0)), 5708 dsub)>; 5709def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn), 5710 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))), 5711 (INSvi32lane 5712 V128:$Rn, VectorIndexS:$imm, 5713 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)), 5714 (i64 0))>; 5715def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn), 5716 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))), 5717 (INSvi64lane 5718 V128:$Rn, VectorIndexD:$imm, 5719 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)), 5720 (i64 0))>; 5721 5722// Copy an element at a constant index in one vector into a constant indexed 5723// element of another. 5724// FIXME refactor to a shared class/dev parameterized on vector type, vector 5725// index type and INS extension 5726def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane 5727 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs), 5728 VectorIndexB:$idx2)), 5729 (v16i8 (INSvi8lane 5730 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2) 5731 )>; 5732def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane 5733 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs), 5734 VectorIndexH:$idx2)), 5735 (v8i16 (INSvi16lane 5736 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2) 5737 )>; 5738def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane 5739 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs), 5740 VectorIndexS:$idx2)), 5741 (v4i32 (INSvi32lane 5742 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2) 5743 )>; 5744def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane 5745 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs), 5746 VectorIndexD:$idx2)), 5747 (v2i64 (INSvi64lane 5748 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2) 5749 )>; 5750 5751multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64, 5752 ValueType VTScal, Instruction INS> { 5753 def : Pat<(VT128 (vector_insert V128:$src, 5754 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)), 5755 imm:$Immd)), 5756 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>; 5757 5758 def : Pat<(VT128 (vector_insert V128:$src, 5759 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)), 5760 imm:$Immd)), 5761 (INS V128:$src, imm:$Immd, 5762 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>; 5763 5764 def : Pat<(VT64 (vector_insert V64:$src, 5765 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)), 5766 imm:$Immd)), 5767 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), 5768 imm:$Immd, V128:$Rn, imm:$Immn), 5769 dsub)>; 5770 5771 def : Pat<(VT64 (vector_insert V64:$src, 5772 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)), 5773 imm:$Immd)), 5774 (EXTRACT_SUBREG 5775 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd, 5776 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn), 5777 dsub)>; 5778} 5779 5780defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>; 5781defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>; 5782defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>; 5783defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>; 5784 5785 5786// Floating point vector extractions are codegen'd as either a sequence of 5787// subregister extractions, or a MOV (aka DUP here) if 5788// the lane number is anything other than zero. 5789def : Pat<(vector_extract (v2f64 V128:$Rn), 0), 5790 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>; 5791def : Pat<(vector_extract (v4f32 V128:$Rn), 0), 5792 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>; 5793def : Pat<(vector_extract (v8f16 V128:$Rn), 0), 5794 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>; 5795def : Pat<(vector_extract (v8bf16 V128:$Rn), 0), 5796 (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>; 5797 5798 5799def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx), 5800 (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>; 5801def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx), 5802 (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>; 5803def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx), 5804 (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>; 5805def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx), 5806 (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>; 5807 5808// All concat_vectors operations are canonicalised to act on i64 vectors for 5809// AArch64. In the general case we need an instruction, which had just as well be 5810// INS. 5811class ConcatPat<ValueType DstTy, ValueType SrcTy> 5812 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)), 5813 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1, 5814 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>; 5815 5816def : ConcatPat<v2i64, v1i64>; 5817def : ConcatPat<v2f64, v1f64>; 5818def : ConcatPat<v4i32, v2i32>; 5819def : ConcatPat<v4f32, v2f32>; 5820def : ConcatPat<v8i16, v4i16>; 5821def : ConcatPat<v8f16, v4f16>; 5822def : ConcatPat<v8bf16, v4bf16>; 5823def : ConcatPat<v16i8, v8i8>; 5824 5825// If the high lanes are undef, though, we can just ignore them: 5826class ConcatUndefPat<ValueType DstTy, ValueType SrcTy> 5827 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)), 5828 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>; 5829 5830def : ConcatUndefPat<v2i64, v1i64>; 5831def : ConcatUndefPat<v2f64, v1f64>; 5832def : ConcatUndefPat<v4i32, v2i32>; 5833def : ConcatUndefPat<v4f32, v2f32>; 5834def : ConcatUndefPat<v8i16, v4i16>; 5835def : ConcatUndefPat<v16i8, v8i8>; 5836 5837//---------------------------------------------------------------------------- 5838// AdvSIMD across lanes instructions 5839//---------------------------------------------------------------------------- 5840 5841defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">; 5842defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">; 5843defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">; 5844defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">; 5845defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">; 5846defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">; 5847defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">; 5848defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>; 5849defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>; 5850defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>; 5851defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>; 5852 5853// Patterns for uaddv(uaddlp(x)) ==> uaddlv 5854def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef, 5855 (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))), 5856 (i64 0))), (i64 0))), 5857 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 5858 (UADDLVv8i8v V64:$op), hsub), ssub)>; 5859def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp 5860 (v16i8 V128:$op))))), (i64 0))), 5861 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 5862 (UADDLVv16i8v V128:$op), hsub), ssub)>; 5863def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))), 5864 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>; 5865 5866// Patterns for addp(uaddlp(x))) ==> uaddlv 5867def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))), 5868 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>; 5869def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))), 5870 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>; 5871 5872// Patterns for across-vector intrinsics, that have a node equivalent, that 5873// returns a vector (with only the low lane defined) instead of a scalar. 5874// In effect, opNode is the same as (scalar_to_vector (IntNode)). 5875multiclass SIMDAcrossLanesIntrinsic<string baseOpc, 5876 SDPatternOperator opNode> { 5877// If a lane instruction caught the vector_extract around opNode, we can 5878// directly match the latter to the instruction. 5879def : Pat<(v8i8 (opNode V64:$Rn)), 5880 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 5881 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>; 5882def : Pat<(v16i8 (opNode V128:$Rn)), 5883 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5884 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>; 5885def : Pat<(v4i16 (opNode V64:$Rn)), 5886 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 5887 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>; 5888def : Pat<(v8i16 (opNode V128:$Rn)), 5889 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 5890 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>; 5891def : Pat<(v4i32 (opNode V128:$Rn)), 5892 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 5893 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>; 5894 5895 5896// If none did, fallback to the explicit patterns, consuming the vector_extract. 5897def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)), 5898 (i64 0)), (i64 0))), 5899 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 5900 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), 5901 bsub), ssub)>; 5902def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))), 5903 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5904 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), 5905 bsub), ssub)>; 5906def : Pat<(i32 (vector_extract (insert_subvector undef, 5907 (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))), 5908 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 5909 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), 5910 hsub), ssub)>; 5911def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))), 5912 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 5913 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), 5914 hsub), ssub)>; 5915def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))), 5916 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 5917 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), 5918 ssub), ssub)>; 5919 5920} 5921 5922multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc, 5923 SDPatternOperator opNode> 5924 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> { 5925// If there is a sign extension after this intrinsic, consume it as smov already 5926// performed it 5927def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef, 5928 (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)), 5929 (i32 (SMOVvi8to32 5930 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5931 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub), 5932 (i64 0)))>; 5933def : Pat<(i32 (sext_inreg (i32 (vector_extract 5934 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)), 5935 (i32 (SMOVvi8to32 5936 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5937 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub), 5938 (i64 0)))>; 5939def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef, 5940 (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)), 5941 (i32 (SMOVvi16to32 5942 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5943 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub), 5944 (i64 0)))>; 5945def : Pat<(i32 (sext_inreg (i32 (vector_extract 5946 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)), 5947 (i32 (SMOVvi16to32 5948 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5949 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub), 5950 (i64 0)))>; 5951} 5952 5953multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc, 5954 SDPatternOperator opNode> 5955 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> { 5956// If there is a masking operation keeping only what has been actually 5957// generated, consume it. 5958def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef, 5959 (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)), 5960 (i32 (EXTRACT_SUBREG 5961 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5962 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub), 5963 ssub))>; 5964def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))), 5965 maski8_or_more)), 5966 (i32 (EXTRACT_SUBREG 5967 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5968 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub), 5969 ssub))>; 5970def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef, 5971 (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)), 5972 (i32 (EXTRACT_SUBREG 5973 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5974 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub), 5975 ssub))>; 5976def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))), 5977 maski16_or_more)), 5978 (i32 (EXTRACT_SUBREG 5979 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5980 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub), 5981 ssub))>; 5982} 5983 5984defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>; 5985// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm 5986def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))), 5987 (ADDPv2i32 V64:$Rn, V64:$Rn)>; 5988 5989defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>; 5990// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm 5991def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))), 5992 (ADDPv2i32 V64:$Rn, V64:$Rn)>; 5993 5994defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>; 5995def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))), 5996 (SMAXPv2i32 V64:$Rn, V64:$Rn)>; 5997 5998defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>; 5999def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))), 6000 (SMINPv2i32 V64:$Rn, V64:$Rn)>; 6001 6002defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>; 6003def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))), 6004 (UMAXPv2i32 V64:$Rn, V64:$Rn)>; 6005 6006defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>; 6007def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))), 6008 (UMINPv2i32 V64:$Rn, V64:$Rn)>; 6009 6010multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> { 6011 def : Pat<(i32 (intOp (v8i8 V64:$Rn))), 6012 (i32 (SMOVvi16to32 6013 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6014 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub), 6015 (i64 0)))>; 6016def : Pat<(i32 (intOp (v16i8 V128:$Rn))), 6017 (i32 (SMOVvi16to32 6018 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6019 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub), 6020 (i64 0)))>; 6021 6022def : Pat<(i32 (intOp (v4i16 V64:$Rn))), 6023 (i32 (EXTRACT_SUBREG 6024 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6025 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub), 6026 ssub))>; 6027def : Pat<(i32 (intOp (v8i16 V128:$Rn))), 6028 (i32 (EXTRACT_SUBREG 6029 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6030 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub), 6031 ssub))>; 6032 6033def : Pat<(i64 (intOp (v4i32 V128:$Rn))), 6034 (i64 (EXTRACT_SUBREG 6035 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6036 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub), 6037 dsub))>; 6038} 6039 6040multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc, 6041 Intrinsic intOp> { 6042 def : Pat<(i32 (intOp (v8i8 V64:$Rn))), 6043 (i32 (EXTRACT_SUBREG 6044 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6045 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub), 6046 ssub))>; 6047def : Pat<(i32 (intOp (v16i8 V128:$Rn))), 6048 (i32 (EXTRACT_SUBREG 6049 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6050 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub), 6051 ssub))>; 6052 6053def : Pat<(i32 (intOp (v4i16 V64:$Rn))), 6054 (i32 (EXTRACT_SUBREG 6055 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6056 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub), 6057 ssub))>; 6058def : Pat<(i32 (intOp (v8i16 V128:$Rn))), 6059 (i32 (EXTRACT_SUBREG 6060 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6061 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub), 6062 ssub))>; 6063 6064def : Pat<(i64 (intOp (v4i32 V128:$Rn))), 6065 (i64 (EXTRACT_SUBREG 6066 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6067 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub), 6068 dsub))>; 6069} 6070 6071defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>; 6072defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>; 6073 6074// The vaddlv_s32 intrinsic gets mapped to SADDLP. 6075def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))), 6076 (i64 (EXTRACT_SUBREG 6077 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6078 (SADDLPv2i32_v1i64 V64:$Rn), dsub), 6079 dsub))>; 6080// The vaddlv_u32 intrinsic gets mapped to UADDLP. 6081def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))), 6082 (i64 (EXTRACT_SUBREG 6083 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 6084 (UADDLPv2i32_v1i64 V64:$Rn), dsub), 6085 dsub))>; 6086 6087//------------------------------------------------------------------------------ 6088// AdvSIMD modified immediate instructions 6089//------------------------------------------------------------------------------ 6090 6091// AdvSIMD BIC 6092defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>; 6093// AdvSIMD ORR 6094defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>; 6095 6096def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>; 6097def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>; 6098def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>; 6099def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>; 6100 6101def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>; 6102def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>; 6103def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>; 6104def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>; 6105 6106def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>; 6107def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>; 6108def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>; 6109def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>; 6110 6111def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>; 6112def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>; 6113def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>; 6114def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>; 6115 6116// AdvSIMD FMOV 6117def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8, 6118 "fmov", ".2d", 6119 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 6120def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8, 6121 "fmov", ".2s", 6122 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>; 6123def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8, 6124 "fmov", ".4s", 6125 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 6126let Predicates = [HasNEON, HasFullFP16] in { 6127def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8, 6128 "fmov", ".4h", 6129 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>; 6130def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8, 6131 "fmov", ".8h", 6132 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 6133} // Predicates = [HasNEON, HasFullFP16] 6134 6135// AdvSIMD MOVI 6136 6137// EDIT byte mask: scalar 6138let isReMaterializable = 1, isAsCheapAsAMove = 1 in 6139def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi", 6140 [(set FPR64:$Rd, simdimmtype10:$imm8)]>; 6141// The movi_edit node has the immediate value already encoded, so we use 6142// a plain imm0_255 here. 6143def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)), 6144 (MOVID imm0_255:$shift)>; 6145 6146// EDIT byte mask: 2d 6147 6148// The movi_edit node has the immediate value already encoded, so we use 6149// a plain imm0_255 in the pattern 6150let isReMaterializable = 1, isAsCheapAsAMove = 1 in 6151def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128, 6152 simdimmtype10, 6153 "movi", ".2d", 6154 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>; 6155 6156def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>; 6157def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>; 6158def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>; 6159def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>; 6160 6161def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>; 6162def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>; 6163def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>; 6164def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>; 6165 6166// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the 6167// extract is free and this gives better MachineCSE results. 6168def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 6169def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 6170def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 6171def : Pat<(v8i8 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 6172 6173def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 6174def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 6175def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 6176def : Pat<(v8i8 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 6177 6178// EDIT per word & halfword: 2s, 4h, 4s, & 8h 6179let isReMaterializable = 1, isAsCheapAsAMove = 1 in 6180defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">; 6181 6182def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 6183def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 6184def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 6185def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 6186 6187def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 6188def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 6189def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 6190def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 6191 6192def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 6193 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>; 6194def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 6195 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>; 6196def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 6197 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>; 6198def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 6199 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>; 6200 6201let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 6202// EDIT per word: 2s & 4s with MSL shifter 6203def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s", 6204 [(set (v2i32 V64:$Rd), 6205 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 6206def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s", 6207 [(set (v4i32 V128:$Rd), 6208 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 6209 6210// Per byte: 8b & 16b 6211def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255, 6212 "movi", ".8b", 6213 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>; 6214 6215def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255, 6216 "movi", ".16b", 6217 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>; 6218} 6219 6220// AdvSIMD MVNI 6221 6222// EDIT per word & halfword: 2s, 4h, 4s, & 8h 6223let isReMaterializable = 1, isAsCheapAsAMove = 1 in 6224defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">; 6225 6226def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 6227def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 6228def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 6229def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 6230 6231def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 6232def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 6233def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 6234def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 6235 6236def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 6237 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>; 6238def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 6239 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>; 6240def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 6241 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>; 6242def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 6243 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>; 6244 6245// EDIT per word: 2s & 4s with MSL shifter 6246let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 6247def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s", 6248 [(set (v2i32 V64:$Rd), 6249 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 6250def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s", 6251 [(set (v4i32 V128:$Rd), 6252 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 6253} 6254 6255//---------------------------------------------------------------------------- 6256// AdvSIMD indexed element 6257//---------------------------------------------------------------------------- 6258 6259let hasSideEffects = 0 in { 6260 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">; 6261 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">; 6262} 6263 6264// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the 6265// instruction expects the addend first, while the intrinsic expects it last. 6266 6267// On the other hand, there are quite a few valid combinatorial options due to 6268// the commutativity of multiplication and the fact that (-x) * y = x * (-y). 6269defm : SIMDFPIndexedTiedPatterns<"FMLA", 6270 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>; 6271defm : SIMDFPIndexedTiedPatterns<"FMLA", 6272 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>; 6273 6274defm : SIMDFPIndexedTiedPatterns<"FMLS", 6275 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >; 6276defm : SIMDFPIndexedTiedPatterns<"FMLS", 6277 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >; 6278defm : SIMDFPIndexedTiedPatterns<"FMLS", 6279 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >; 6280defm : SIMDFPIndexedTiedPatterns<"FMLS", 6281 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >; 6282 6283multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> { 6284 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit 6285 // and DUP scalar. 6286 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 6287 (AArch64duplane32 (v4f32 (fneg V128:$Rm)), 6288 VectorIndexS:$idx))), 6289 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>; 6290 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 6291 (v2f32 (AArch64duplane32 6292 (v4f32 (insert_subvector undef, 6293 (v2f32 (fneg V64:$Rm)), 6294 (i64 0))), 6295 VectorIndexS:$idx)))), 6296 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, 6297 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 6298 VectorIndexS:$idx)>; 6299 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 6300 (AArch64dup (f32 (fneg FPR32Op:$Rm))))), 6301 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, 6302 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>; 6303 6304 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit 6305 // and DUP scalar. 6306 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 6307 (AArch64duplane32 (v4f32 (fneg V128:$Rm)), 6308 VectorIndexS:$idx))), 6309 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm, 6310 VectorIndexS:$idx)>; 6311 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 6312 (v4f32 (AArch64duplane32 6313 (v4f32 (insert_subvector undef, 6314 (v2f32 (fneg V64:$Rm)), 6315 (i64 0))), 6316 VectorIndexS:$idx)))), 6317 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, 6318 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 6319 VectorIndexS:$idx)>; 6320 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 6321 (AArch64dup (f32 (fneg FPR32Op:$Rm))))), 6322 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, 6323 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>; 6324 6325 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar 6326 // (DUPLANE from 64-bit would be trivial). 6327 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn), 6328 (AArch64duplane64 (v2f64 (fneg V128:$Rm)), 6329 VectorIndexD:$idx))), 6330 (FMLSv2i64_indexed 6331 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>; 6332 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn), 6333 (AArch64dup (f64 (fneg FPR64Op:$Rm))))), 6334 (FMLSv2i64_indexed V128:$Rd, V128:$Rn, 6335 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>; 6336 6337 // 2 variants for 32-bit scalar version: extract from .2s or from .4s 6338 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn), 6339 (vector_extract (v4f32 (fneg V128:$Rm)), 6340 VectorIndexS:$idx))), 6341 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn, 6342 V128:$Rm, VectorIndexS:$idx)>; 6343 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn), 6344 (vector_extract (v4f32 (insert_subvector undef, 6345 (v2f32 (fneg V64:$Rm)), 6346 (i64 0))), 6347 VectorIndexS:$idx))), 6348 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn, 6349 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>; 6350 6351 // 1 variant for 64-bit scalar version: extract from .1d or from .2d 6352 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn), 6353 (vector_extract (v2f64 (fneg V128:$Rm)), 6354 VectorIndexS:$idx))), 6355 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn, 6356 V128:$Rm, VectorIndexS:$idx)>; 6357} 6358 6359defm : FMLSIndexedAfterNegPatterns< 6360 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >; 6361defm : FMLSIndexedAfterNegPatterns< 6362 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >; 6363 6364defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>; 6365defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>; 6366 6367def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))), 6368 (FMULv2i32_indexed V64:$Rn, 6369 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub), 6370 (i64 0))>; 6371def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))), 6372 (FMULv4i32_indexed V128:$Rn, 6373 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub), 6374 (i64 0))>; 6375def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))), 6376 (FMULv2i64_indexed V128:$Rn, 6377 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub), 6378 (i64 0))>; 6379 6380defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>; 6381defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>; 6382 6383defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane, 6384 int_aarch64_neon_sqdmulh_laneq>; 6385defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane, 6386 int_aarch64_neon_sqrdmulh_laneq>; 6387 6388// Generated by MachineCombine 6389defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>; 6390defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>; 6391 6392defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>; 6393defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal", 6394 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 6395defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl", 6396 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 6397defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull", 6398 int_aarch64_neon_smull>; 6399defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal", 6400 int_aarch64_neon_sqadd>; 6401defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl", 6402 int_aarch64_neon_sqsub>; 6403defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah", 6404 int_aarch64_neon_sqrdmlah>; 6405defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh", 6406 int_aarch64_neon_sqrdmlsh>; 6407defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>; 6408defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal", 6409 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 6410defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl", 6411 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 6412defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull", 6413 int_aarch64_neon_umull>; 6414 6415// A scalar sqdmull with the second operand being a vector lane can be 6416// handled directly with the indexed instruction encoding. 6417def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 6418 (vector_extract (v4i32 V128:$Vm), 6419 VectorIndexS:$idx)), 6420 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>; 6421 6422// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands 6423// have no common bits. 6424def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs), 6425 [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{ 6426 if (N->getOpcode() == ISD::ADD) 6427 return true; 6428 return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1)); 6429}]> { 6430 let GISelPredicateCode = [{ 6431 // Only handle G_ADD for now. FIXME. build capability to compute whether 6432 // operands of G_OR have common bits set or not. 6433 return MI.getOpcode() == TargetOpcode::G_ADD; 6434 }]; 6435} 6436 6437 6438//---------------------------------------------------------------------------- 6439// AdvSIMD scalar shift instructions 6440//---------------------------------------------------------------------------- 6441defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">; 6442defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">; 6443defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">; 6444defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">; 6445// Codegen patterns for the above. We don't put these directly on the 6446// instructions because TableGen's type inference can't handle the truth. 6447// Having the same base pattern for fp <--> int totally freaks it out. 6448def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm), 6449 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>; 6450def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm), 6451 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>; 6452def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)), 6453 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>; 6454def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)), 6455 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>; 6456def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn), 6457 vecshiftR64:$imm)), 6458 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>; 6459def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn), 6460 vecshiftR64:$imm)), 6461 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>; 6462def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm), 6463 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>; 6464def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)), 6465 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 6466def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn), 6467 vecshiftR64:$imm)), 6468 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 6469def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)), 6470 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 6471def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn), 6472 vecshiftR64:$imm)), 6473 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 6474def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm), 6475 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>; 6476 6477// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported. 6478 6479def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)), 6480 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 6481def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)), 6482 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 6483def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)), 6484 (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>; 6485def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp 6486 (and FPR32:$Rn, (i32 65535)), 6487 vecshiftR16:$imm)), 6488 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 6489def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)), 6490 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 6491def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)), 6492 (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>; 6493def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)), 6494 (i32 (INSERT_SUBREG 6495 (i32 (IMPLICIT_DEF)), 6496 (FCVTZSh FPR16:$Rn, vecshiftR32:$imm), 6497 hsub))>; 6498def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)), 6499 (i64 (INSERT_SUBREG 6500 (i64 (IMPLICIT_DEF)), 6501 (FCVTZSh FPR16:$Rn, vecshiftR64:$imm), 6502 hsub))>; 6503def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)), 6504 (i32 (INSERT_SUBREG 6505 (i32 (IMPLICIT_DEF)), 6506 (FCVTZUh FPR16:$Rn, vecshiftR32:$imm), 6507 hsub))>; 6508def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)), 6509 (i64 (INSERT_SUBREG 6510 (i64 (IMPLICIT_DEF)), 6511 (FCVTZUh FPR16:$Rn, vecshiftR64:$imm), 6512 hsub))>; 6513def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))), 6514 (i32 (INSERT_SUBREG 6515 (i32 (IMPLICIT_DEF)), 6516 (FACGE16 FPR16:$Rn, FPR16:$Rm), 6517 hsub))>; 6518def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))), 6519 (i32 (INSERT_SUBREG 6520 (i32 (IMPLICIT_DEF)), 6521 (FACGT16 FPR16:$Rn, FPR16:$Rm), 6522 hsub))>; 6523 6524defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>; 6525defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">; 6526defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn", 6527 int_aarch64_neon_sqrshrn>; 6528defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun", 6529 int_aarch64_neon_sqrshrun>; 6530defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>; 6531defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>; 6532defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn", 6533 int_aarch64_neon_sqshrn>; 6534defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun", 6535 int_aarch64_neon_sqshrun>; 6536defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">; 6537defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>; 6538defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra", 6539 TriOpFrag<(add node:$LHS, 6540 (AArch64srshri node:$MHS, node:$RHS))>>; 6541defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>; 6542defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra", 6543 TriOpFrag<(add_and_or_is_add node:$LHS, 6544 (AArch64vashr node:$MHS, node:$RHS))>>; 6545defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn", 6546 int_aarch64_neon_uqrshrn>; 6547defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>; 6548defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn", 6549 int_aarch64_neon_uqshrn>; 6550defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>; 6551defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra", 6552 TriOpFrag<(add node:$LHS, 6553 (AArch64urshri node:$MHS, node:$RHS))>>; 6554defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>; 6555defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra", 6556 TriOpFrag<(add_and_or_is_add node:$LHS, 6557 (AArch64vlshr node:$MHS, node:$RHS))>>; 6558 6559//---------------------------------------------------------------------------- 6560// AdvSIMD vector shift instructions 6561//---------------------------------------------------------------------------- 6562defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>; 6563defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>; 6564defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf", 6565 int_aarch64_neon_vcvtfxs2fp>; 6566defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn", 6567 int_aarch64_neon_rshrn>; 6568defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>; 6569defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn", 6570 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>; 6571defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>; 6572def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn), 6573 (i32 vecshiftL64:$imm))), 6574 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>; 6575defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn", 6576 int_aarch64_neon_sqrshrn>; 6577defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun", 6578 int_aarch64_neon_sqrshrun>; 6579defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>; 6580defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>; 6581defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn", 6582 int_aarch64_neon_sqshrn>; 6583defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun", 6584 int_aarch64_neon_sqshrun>; 6585defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>; 6586def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn), 6587 (i32 vecshiftR64:$imm))), 6588 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>; 6589defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>; 6590defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra", 6591 TriOpFrag<(add node:$LHS, 6592 (AArch64srshri node:$MHS, node:$RHS))> >; 6593defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll", 6594 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>; 6595 6596defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>; 6597defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra", 6598 TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>; 6599defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf", 6600 int_aarch64_neon_vcvtfxu2fp>; 6601defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn", 6602 int_aarch64_neon_uqrshrn>; 6603defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>; 6604defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn", 6605 int_aarch64_neon_uqshrn>; 6606defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>; 6607defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra", 6608 TriOpFrag<(add node:$LHS, 6609 (AArch64urshri node:$MHS, node:$RHS))> >; 6610defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll", 6611 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>; 6612defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>; 6613defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra", 6614 TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >; 6615 6616// RADDHN patterns for when RSHRN shifts by half the size of the vector element 6617def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))), 6618 (RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>; 6619def : Pat<(v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))), 6620 (RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>; 6621def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))), 6622 (RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>; 6623 6624// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element 6625def : Pat<(v16i8 (concat_vectors 6626 (v8i8 V64:$Vd), 6627 (v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))))), 6628 (RADDHNv8i16_v16i8 6629 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn, 6630 (v8i16 (MOVIv2d_ns (i32 0))))>; 6631def : Pat<(v8i16 (concat_vectors 6632 (v4i16 V64:$Vd), 6633 (v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))))), 6634 (RADDHNv4i32_v8i16 6635 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn, 6636 (v4i32 (MOVIv2d_ns (i32 0))))>; 6637def : Pat<(v4i32 (concat_vectors 6638 (v2i32 V64:$Vd), 6639 (v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))))), 6640 (RADDHNv2i64_v4i32 6641 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn, 6642 (v2i64 (MOVIv2d_ns (i32 0))))>; 6643 6644// SHRN patterns for when a logical right shift was used instead of arithmetic 6645// (the immediate guarantees no sign bits actually end up in the result so it 6646// doesn't matter). 6647def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))), 6648 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>; 6649def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))), 6650 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>; 6651def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))), 6652 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>; 6653 6654def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd), 6655 (trunc (AArch64vlshr (v8i16 V128:$Rn), 6656 vecshiftR16Narrow:$imm)))), 6657 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 6658 V128:$Rn, vecshiftR16Narrow:$imm)>; 6659def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd), 6660 (trunc (AArch64vlshr (v4i32 V128:$Rn), 6661 vecshiftR32Narrow:$imm)))), 6662 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 6663 V128:$Rn, vecshiftR32Narrow:$imm)>; 6664def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd), 6665 (trunc (AArch64vlshr (v2i64 V128:$Rn), 6666 vecshiftR64Narrow:$imm)))), 6667 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 6668 V128:$Rn, vecshiftR32Narrow:$imm)>; 6669 6670// Vector sign and zero extensions are implemented with SSHLL and USSHLL. 6671// Anyexts are implemented as zexts. 6672def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>; 6673def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>; 6674def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>; 6675def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>; 6676def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>; 6677def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>; 6678def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>; 6679def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>; 6680def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>; 6681// Also match an extend from the upper half of a 128 bit source register. 6682def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 6683 (USHLLv16i8_shift V128:$Rn, (i32 0))>; 6684def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 6685 (USHLLv16i8_shift V128:$Rn, (i32 0))>; 6686def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 6687 (SSHLLv16i8_shift V128:$Rn, (i32 0))>; 6688def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 6689 (USHLLv8i16_shift V128:$Rn, (i32 0))>; 6690def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 6691 (USHLLv8i16_shift V128:$Rn, (i32 0))>; 6692def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 6693 (SSHLLv8i16_shift V128:$Rn, (i32 0))>; 6694def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 6695 (USHLLv4i32_shift V128:$Rn, (i32 0))>; 6696def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 6697 (USHLLv4i32_shift V128:$Rn, (i32 0))>; 6698def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 6699 (SSHLLv4i32_shift V128:$Rn, (i32 0))>; 6700 6701// Vector shift sxtl aliases 6702def : InstAlias<"sxtl.8h $dst, $src1", 6703 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 6704def : InstAlias<"sxtl $dst.8h, $src1.8b", 6705 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 6706def : InstAlias<"sxtl.4s $dst, $src1", 6707 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 6708def : InstAlias<"sxtl $dst.4s, $src1.4h", 6709 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 6710def : InstAlias<"sxtl.2d $dst, $src1", 6711 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 6712def : InstAlias<"sxtl $dst.2d, $src1.2s", 6713 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 6714 6715// Vector shift sxtl2 aliases 6716def : InstAlias<"sxtl2.8h $dst, $src1", 6717 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 6718def : InstAlias<"sxtl2 $dst.8h, $src1.16b", 6719 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 6720def : InstAlias<"sxtl2.4s $dst, $src1", 6721 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 6722def : InstAlias<"sxtl2 $dst.4s, $src1.8h", 6723 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 6724def : InstAlias<"sxtl2.2d $dst, $src1", 6725 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 6726def : InstAlias<"sxtl2 $dst.2d, $src1.4s", 6727 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 6728 6729// Vector shift uxtl aliases 6730def : InstAlias<"uxtl.8h $dst, $src1", 6731 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 6732def : InstAlias<"uxtl $dst.8h, $src1.8b", 6733 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 6734def : InstAlias<"uxtl.4s $dst, $src1", 6735 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 6736def : InstAlias<"uxtl $dst.4s, $src1.4h", 6737 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 6738def : InstAlias<"uxtl.2d $dst, $src1", 6739 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 6740def : InstAlias<"uxtl $dst.2d, $src1.2s", 6741 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 6742 6743// Vector shift uxtl2 aliases 6744def : InstAlias<"uxtl2.8h $dst, $src1", 6745 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 6746def : InstAlias<"uxtl2 $dst.8h, $src1.16b", 6747 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 6748def : InstAlias<"uxtl2.4s $dst, $src1", 6749 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 6750def : InstAlias<"uxtl2 $dst.4s, $src1.8h", 6751 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 6752def : InstAlias<"uxtl2.2d $dst, $src1", 6753 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 6754def : InstAlias<"uxtl2 $dst.2d, $src1.4s", 6755 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 6756 6757// If an integer is about to be converted to a floating point value, 6758// just load it on the floating point unit. 6759// These patterns are more complex because floating point loads do not 6760// support sign extension. 6761// The sign extension has to be explicitly added and is only supported for 6762// one step: byte-to-half, half-to-word, word-to-doubleword. 6763// SCVTF GPR -> FPR is 9 cycles. 6764// SCVTF FPR -> FPR is 4 cyclces. 6765// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles. 6766// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR 6767// and still being faster. 6768// However, this is not good for code size. 6769// 8-bits -> float. 2 sizes step-up. 6770class SExtLoadi8CVTf32Pat<dag addrmode, dag INST> 6771 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))), 6772 (SCVTFv1i32 (f32 (EXTRACT_SUBREG 6773 (SSHLLv4i16_shift 6774 (f64 6775 (EXTRACT_SUBREG 6776 (SSHLLv8i8_shift 6777 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 6778 INST, 6779 bsub), 6780 0), 6781 dsub)), 6782 0), 6783 ssub)))>, 6784 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>; 6785 6786def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext), 6787 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>; 6788def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext), 6789 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>; 6790def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), 6791 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>; 6792def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset), 6793 (LDURBi GPR64sp:$Rn, simm9:$offset)>; 6794 6795// 16-bits -> float. 1 size step-up. 6796class SExtLoadi16CVTf32Pat<dag addrmode, dag INST> 6797 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))), 6798 (SCVTFv1i32 (f32 (EXTRACT_SUBREG 6799 (SSHLLv4i16_shift 6800 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 6801 INST, 6802 hsub), 6803 0), 6804 ssub)))>, Requires<[NotForCodeSize]>; 6805 6806def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext), 6807 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>; 6808def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext), 6809 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>; 6810def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), 6811 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>; 6812def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset), 6813 (LDURHi GPR64sp:$Rn, simm9:$offset)>; 6814 6815// 32-bits to 32-bits are handled in target specific dag combine: 6816// performIntToFpCombine. 6817// 64-bits integer to 32-bits floating point, not possible with 6818// SCVTF on floating point registers (both source and destination 6819// must have the same size). 6820 6821// Here are the patterns for 8, 16, 32, and 64-bits to double. 6822// 8-bits -> double. 3 size step-up: give up. 6823// 16-bits -> double. 2 size step. 6824class SExtLoadi16CVTf64Pat<dag addrmode, dag INST> 6825 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))), 6826 (SCVTFv1i64 (f64 (EXTRACT_SUBREG 6827 (SSHLLv2i32_shift 6828 (f64 6829 (EXTRACT_SUBREG 6830 (SSHLLv4i16_shift 6831 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 6832 INST, 6833 hsub), 6834 0), 6835 dsub)), 6836 0), 6837 dsub)))>, 6838 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>; 6839 6840def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext), 6841 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>; 6842def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext), 6843 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>; 6844def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), 6845 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>; 6846def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset), 6847 (LDURHi GPR64sp:$Rn, simm9:$offset)>; 6848// 32-bits -> double. 1 size step-up. 6849class SExtLoadi32CVTf64Pat<dag addrmode, dag INST> 6850 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))), 6851 (SCVTFv1i64 (f64 (EXTRACT_SUBREG 6852 (SSHLLv2i32_shift 6853 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 6854 INST, 6855 ssub), 6856 0), 6857 dsub)))>, Requires<[NotForCodeSize]>; 6858 6859def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext), 6860 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>; 6861def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext), 6862 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>; 6863def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), 6864 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>; 6865def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset), 6866 (LDURSi GPR64sp:$Rn, simm9:$offset)>; 6867 6868// 64-bits -> double are handled in target specific dag combine: 6869// performIntToFpCombine. 6870 6871 6872//---------------------------------------------------------------------------- 6873// AdvSIMD Load-Store Structure 6874//---------------------------------------------------------------------------- 6875defm LD1 : SIMDLd1Multiple<"ld1">; 6876defm LD2 : SIMDLd2Multiple<"ld2">; 6877defm LD3 : SIMDLd3Multiple<"ld3">; 6878defm LD4 : SIMDLd4Multiple<"ld4">; 6879 6880defm ST1 : SIMDSt1Multiple<"st1">; 6881defm ST2 : SIMDSt2Multiple<"st2">; 6882defm ST3 : SIMDSt3Multiple<"st3">; 6883defm ST4 : SIMDSt4Multiple<"st4">; 6884 6885class Ld1Pat<ValueType ty, Instruction INST> 6886 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>; 6887 6888def : Ld1Pat<v16i8, LD1Onev16b>; 6889def : Ld1Pat<v8i16, LD1Onev8h>; 6890def : Ld1Pat<v4i32, LD1Onev4s>; 6891def : Ld1Pat<v2i64, LD1Onev2d>; 6892def : Ld1Pat<v8i8, LD1Onev8b>; 6893def : Ld1Pat<v4i16, LD1Onev4h>; 6894def : Ld1Pat<v2i32, LD1Onev2s>; 6895def : Ld1Pat<v1i64, LD1Onev1d>; 6896 6897class St1Pat<ValueType ty, Instruction INST> 6898 : Pat<(store ty:$Vt, GPR64sp:$Rn), 6899 (INST ty:$Vt, GPR64sp:$Rn)>; 6900 6901def : St1Pat<v16i8, ST1Onev16b>; 6902def : St1Pat<v8i16, ST1Onev8h>; 6903def : St1Pat<v4i32, ST1Onev4s>; 6904def : St1Pat<v2i64, ST1Onev2d>; 6905def : St1Pat<v8i8, ST1Onev8b>; 6906def : St1Pat<v4i16, ST1Onev4h>; 6907def : St1Pat<v2i32, ST1Onev2s>; 6908def : St1Pat<v1i64, ST1Onev1d>; 6909 6910//--- 6911// Single-element 6912//--- 6913 6914defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>; 6915defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>; 6916defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>; 6917defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>; 6918let mayLoad = 1, hasSideEffects = 0 in { 6919defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>; 6920defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>; 6921defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>; 6922defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>; 6923defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>; 6924defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>; 6925defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>; 6926defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>; 6927defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>; 6928defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>; 6929defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>; 6930defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>; 6931defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>; 6932defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>; 6933defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>; 6934defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>; 6935} 6936 6937def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))), 6938 (LD1Rv8b GPR64sp:$Rn)>; 6939def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))), 6940 (LD1Rv16b GPR64sp:$Rn)>; 6941def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))), 6942 (LD1Rv4h GPR64sp:$Rn)>; 6943def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))), 6944 (LD1Rv8h GPR64sp:$Rn)>; 6945def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))), 6946 (LD1Rv2s GPR64sp:$Rn)>; 6947def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))), 6948 (LD1Rv4s GPR64sp:$Rn)>; 6949def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))), 6950 (LD1Rv2d GPR64sp:$Rn)>; 6951def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))), 6952 (LD1Rv1d GPR64sp:$Rn)>; 6953// Grab the floating point version too 6954def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))), 6955 (LD1Rv2s GPR64sp:$Rn)>; 6956def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))), 6957 (LD1Rv4s GPR64sp:$Rn)>; 6958def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))), 6959 (LD1Rv2d GPR64sp:$Rn)>; 6960def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))), 6961 (LD1Rv1d GPR64sp:$Rn)>; 6962def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))), 6963 (LD1Rv4h GPR64sp:$Rn)>; 6964def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))), 6965 (LD1Rv8h GPR64sp:$Rn)>; 6966def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))), 6967 (LD1Rv4h GPR64sp:$Rn)>; 6968def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))), 6969 (LD1Rv8h GPR64sp:$Rn)>; 6970 6971class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex, 6972 ValueType VTy, ValueType STy, Instruction LD1> 6973 : Pat<(vector_insert (VTy VecListOne128:$Rd), 6974 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx), 6975 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>; 6976 6977def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>; 6978def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>; 6979def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>; 6980def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>; 6981def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>; 6982def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>; 6983def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>; 6984def : Ld1Lane128Pat<load, VectorIndexH, v8bf16, bf16, LD1i16>; 6985 6986// Generate LD1 for extload if memory type does not match the 6987// destination type, for example: 6988// 6989// (v4i32 (insert_vector_elt (load anyext from i8) idx)) 6990// 6991// In this case, the index must be adjusted to match LD1 type. 6992// 6993class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand 6994 VecIndex, ValueType VTy, ValueType STy, 6995 Instruction LD1, SDNodeXForm IdxOp> 6996 : Pat<(vector_insert (VTy VecListOne128:$Rd), 6997 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx), 6998 (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>; 6999 7000def VectorIndexStoH : SDNodeXForm<imm, [{ 7001 return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64); 7002}]>; 7003def VectorIndexStoB : SDNodeXForm<imm, [{ 7004 return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64); 7005}]>; 7006def VectorIndexHtoB : SDNodeXForm<imm, [{ 7007 return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64); 7008}]>; 7009 7010def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>; 7011def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>; 7012def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>; 7013 7014// Same as above, but the first element is populated using 7015// scalar_to_vector + insert_subvector instead of insert_vector_elt. 7016class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy, 7017 SDPatternOperator ExtLoad, Instruction LD1> 7018 : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))), 7019 (ResultTy (EXTRACT_SUBREG 7020 (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>; 7021 7022def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>; 7023def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>; 7024def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>; 7025 7026class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex, 7027 ValueType VTy, ValueType STy, Instruction LD1> 7028 : Pat<(vector_insert (VTy VecListOne64:$Rd), 7029 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx), 7030 (EXTRACT_SUBREG 7031 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub), 7032 VecIndex:$idx, GPR64sp:$Rn), 7033 dsub)>; 7034 7035def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>; 7036def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>; 7037def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>; 7038def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>; 7039def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>; 7040def : Ld1Lane64Pat<load, VectorIndexH, v4bf16, bf16, LD1i16>; 7041 7042 7043defm LD1 : SIMDLdSt1SingleAliases<"ld1">; 7044defm LD2 : SIMDLdSt2SingleAliases<"ld2">; 7045defm LD3 : SIMDLdSt3SingleAliases<"ld3">; 7046defm LD4 : SIMDLdSt4SingleAliases<"ld4">; 7047 7048// Stores 7049defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>; 7050defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>; 7051defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>; 7052defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>; 7053 7054let AddedComplexity = 19 in 7055class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex, 7056 ValueType VTy, ValueType STy, Instruction ST1> 7057 : Pat<(scalar_store 7058 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 7059 GPR64sp:$Rn), 7060 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>; 7061 7062def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>; 7063def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>; 7064def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>; 7065def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>; 7066def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>; 7067def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>; 7068def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>; 7069def : St1Lane128Pat<store, VectorIndexH, v8bf16, bf16, ST1i16>; 7070 7071let AddedComplexity = 19 in 7072class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex, 7073 ValueType VTy, ValueType STy, Instruction ST1> 7074 : Pat<(scalar_store 7075 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 7076 GPR64sp:$Rn), 7077 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 7078 VecIndex:$idx, GPR64sp:$Rn)>; 7079 7080def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>; 7081def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>; 7082def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>; 7083def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>; 7084def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>; 7085def : St1Lane64Pat<store, VectorIndexH, v4bf16, bf16, ST1i16>; 7086 7087multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex, 7088 ValueType VTy, ValueType STy, Instruction ST1, 7089 int offset> { 7090 def : Pat<(scalar_store 7091 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 7092 GPR64sp:$Rn, offset), 7093 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 7094 VecIndex:$idx, GPR64sp:$Rn, XZR)>; 7095 7096 def : Pat<(scalar_store 7097 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 7098 GPR64sp:$Rn, GPR64:$Rm), 7099 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 7100 VecIndex:$idx, GPR64sp:$Rn, $Rm)>; 7101} 7102 7103defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>; 7104defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST, 7105 2>; 7106defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>; 7107defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>; 7108defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>; 7109defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>; 7110defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>; 7111defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>; 7112 7113multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex, 7114 ValueType VTy, ValueType STy, Instruction ST1, 7115 int offset> { 7116 def : Pat<(scalar_store 7117 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 7118 GPR64sp:$Rn, offset), 7119 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>; 7120 7121 def : Pat<(scalar_store 7122 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 7123 GPR64sp:$Rn, GPR64:$Rm), 7124 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>; 7125} 7126 7127defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST, 7128 1>; 7129defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST, 7130 2>; 7131defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>; 7132defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>; 7133defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>; 7134defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>; 7135defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>; 7136defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>; 7137 7138let mayStore = 1, hasSideEffects = 0 in { 7139defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>; 7140defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>; 7141defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>; 7142defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>; 7143defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>; 7144defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>; 7145defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>; 7146defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>; 7147defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>; 7148defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>; 7149defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>; 7150defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>; 7151} 7152 7153defm ST1 : SIMDLdSt1SingleAliases<"st1">; 7154defm ST2 : SIMDLdSt2SingleAliases<"st2">; 7155defm ST3 : SIMDLdSt3SingleAliases<"st3">; 7156defm ST4 : SIMDLdSt4SingleAliases<"st4">; 7157 7158//---------------------------------------------------------------------------- 7159// Crypto extensions 7160//---------------------------------------------------------------------------- 7161 7162let Predicates = [HasAES] in { 7163def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>; 7164def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>; 7165def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>; 7166def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>; 7167} 7168 7169// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required 7170// for AES fusion on some CPUs. 7171let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { 7172def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, 7173 Sched<[WriteVq]>; 7174def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, 7175 Sched<[WriteVq]>; 7176} 7177 7178// Only use constrained versions of AES(I)MC instructions if they are paired with 7179// AESE/AESD. 7180def : Pat<(v16i8 (int_aarch64_crypto_aesmc 7181 (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1), 7182 (v16i8 V128:$src2))))), 7183 (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1), 7184 (v16i8 V128:$src2)))))>, 7185 Requires<[HasFuseAES]>; 7186 7187def : Pat<(v16i8 (int_aarch64_crypto_aesimc 7188 (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1), 7189 (v16i8 V128:$src2))))), 7190 (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1), 7191 (v16i8 V128:$src2)))))>, 7192 Requires<[HasFuseAES]>; 7193 7194let Predicates = [HasSHA2] in { 7195def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>; 7196def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>; 7197def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>; 7198def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>; 7199def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>; 7200def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>; 7201def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>; 7202 7203def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>; 7204def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>; 7205def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>; 7206} 7207 7208//---------------------------------------------------------------------------- 7209// Compiler-pseudos 7210//---------------------------------------------------------------------------- 7211// FIXME: Like for X86, these should go in their own separate .td file. 7212 7213def def32 : PatLeaf<(i32 GPR32:$src), [{ 7214 return isDef32(*N); 7215}]>; 7216 7217// In the case of a 32-bit def that is known to implicitly zero-extend, 7218// we can use a SUBREG_TO_REG. 7219def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>; 7220 7221// For an anyext, we don't care what the high bits are, so we can perform an 7222// INSERT_SUBREF into an IMPLICIT_DEF. 7223def : Pat<(i64 (anyext GPR32:$src)), 7224 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>; 7225 7226// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and 7227// then assert the extension has happened. 7228def : Pat<(i64 (zext GPR32:$src)), 7229 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>; 7230 7231// To sign extend, we use a signed bitfield move instruction (SBFM) on the 7232// containing super-reg. 7233def : Pat<(i64 (sext GPR32:$src)), 7234 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>; 7235def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>; 7236def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>; 7237def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>; 7238def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>; 7239def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>; 7240def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>; 7241def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>; 7242 7243def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)), 7244 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 7245 (i64 (i32shift_sext_i8 imm0_31:$imm)))>; 7246def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)), 7247 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 7248 (i64 (i64shift_sext_i8 imm0_63:$imm)))>; 7249 7250def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)), 7251 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 7252 (i64 (i32shift_sext_i16 imm0_31:$imm)))>; 7253def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)), 7254 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 7255 (i64 (i64shift_sext_i16 imm0_63:$imm)))>; 7256 7257def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)), 7258 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32), 7259 (i64 (i64shift_a imm0_63:$imm)), 7260 (i64 (i64shift_sext_i32 imm0_63:$imm)))>; 7261 7262// sra patterns have an AddedComplexity of 10, so make sure we have a higher 7263// AddedComplexity for the following patterns since we want to match sext + sra 7264// patterns before we attempt to match a single sra node. 7265let AddedComplexity = 20 in { 7266// We support all sext + sra combinations which preserve at least one bit of the 7267// original value which is to be sign extended. E.g. we support shifts up to 7268// bitwidth-1 bits. 7269def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)), 7270 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>; 7271def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)), 7272 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>; 7273 7274def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)), 7275 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>; 7276def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)), 7277 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>; 7278 7279def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)), 7280 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32), 7281 (i64 imm0_31:$imm), 31)>; 7282} // AddedComplexity = 20 7283 7284// To truncate, we can simply extract from a subregister. 7285def : Pat<(i32 (trunc GPR64sp:$src)), 7286 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>; 7287 7288// __builtin_trap() uses the BRK instruction on AArch64. 7289def : Pat<(trap), (BRK 1)>; 7290def : Pat<(debugtrap), (BRK 0xF000)>; 7291 7292def ubsan_trap_xform : SDNodeXForm<timm, [{ 7293 return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32); 7294}]>; 7295 7296def ubsan_trap_imm : TImmLeaf<i32, [{ 7297 return isUInt<8>(Imm); 7298}], ubsan_trap_xform>; 7299 7300def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>; 7301 7302// Multiply high patterns which multiply the lower subvector using smull/umull 7303// and the upper subvector with smull2/umull2. Then shuffle the high the high 7304// part of both results together. 7305def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)), 7306 (UZP2v16i8 7307 (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub), 7308 (EXTRACT_SUBREG V128:$Rm, dsub)), 7309 (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>; 7310def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)), 7311 (UZP2v8i16 7312 (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub), 7313 (EXTRACT_SUBREG V128:$Rm, dsub)), 7314 (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>; 7315def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)), 7316 (UZP2v4i32 7317 (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub), 7318 (EXTRACT_SUBREG V128:$Rm, dsub)), 7319 (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>; 7320 7321def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)), 7322 (UZP2v16i8 7323 (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub), 7324 (EXTRACT_SUBREG V128:$Rm, dsub)), 7325 (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>; 7326def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)), 7327 (UZP2v8i16 7328 (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub), 7329 (EXTRACT_SUBREG V128:$Rm, dsub)), 7330 (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>; 7331def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)), 7332 (UZP2v4i32 7333 (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub), 7334 (EXTRACT_SUBREG V128:$Rm, dsub)), 7335 (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>; 7336 7337// Conversions within AdvSIMD types in the same register size are free. 7338// But because we need a consistent lane ordering, in big endian many 7339// conversions require one or more REV instructions. 7340// 7341// Consider a simple memory load followed by a bitconvert then a store. 7342// v0 = load v2i32 7343// v1 = BITCAST v2i32 v0 to v4i16 7344// store v4i16 v2 7345// 7346// In big endian mode every memory access has an implicit byte swap. LDR and 7347// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that 7348// is, they treat the vector as a sequence of elements to be byte-swapped. 7349// The two pairs of instructions are fundamentally incompatible. We've decided 7350// to use LD1/ST1 only to simplify compiler implementation. 7351// 7352// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes 7353// the original code sequence: 7354// v0 = load v2i32 7355// v1 = REV v2i32 (implicit) 7356// v2 = BITCAST v2i32 v1 to v4i16 7357// v3 = REV v4i16 v2 (implicit) 7358// store v4i16 v3 7359// 7360// But this is now broken - the value stored is different to the value loaded 7361// due to lane reordering. To fix this, on every BITCAST we must perform two 7362// other REVs: 7363// v0 = load v2i32 7364// v1 = REV v2i32 (implicit) 7365// v2 = REV v2i32 7366// v3 = BITCAST v2i32 v2 to v4i16 7367// v4 = REV v4i16 7368// v5 = REV v4i16 v4 (implicit) 7369// store v4i16 v5 7370// 7371// This means an extra two instructions, but actually in most cases the two REV 7372// instructions can be combined into one. For example: 7373// (REV64_2s (REV64_4h X)) === (REV32_4h X) 7374// 7375// There is also no 128-bit REV instruction. This must be synthesized with an 7376// EXT instruction. 7377// 7378// Most bitconverts require some sort of conversion. The only exceptions are: 7379// a) Identity conversions - vNfX <-> vNiX 7380// b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX 7381// 7382 7383// Natural vector casts (64 bit) 7384def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>; 7385def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>; 7386def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>; 7387def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>; 7388def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>; 7389def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>; 7390def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>; 7391 7392def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>; 7393def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>; 7394def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; 7395def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>; 7396def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>; 7397def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>; 7398 7399def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>; 7400def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>; 7401def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>; 7402def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>; 7403def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>; 7404def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>; 7405def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>; 7406 7407def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>; 7408def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>; 7409def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>; 7410def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>; 7411def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>; 7412def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>; 7413def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>; 7414def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>; 7415 7416def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>; 7417def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>; 7418def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>; 7419def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>; 7420def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>; 7421def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>; 7422 7423// Natural vector casts (128 bit) 7424def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>; 7425def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>; 7426def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>; 7427def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>; 7428def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>; 7429def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>; 7430def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>; 7431def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>; 7432 7433def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>; 7434def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>; 7435def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; 7436def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>; 7437def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>; 7438def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>; 7439def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>; 7440def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>; 7441 7442def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>; 7443def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>; 7444def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>; 7445def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>; 7446def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>; 7447def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>; 7448def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>; 7449def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>; 7450 7451def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>; 7452def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>; 7453def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>; 7454def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>; 7455def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>; 7456def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>; 7457def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>; 7458def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>; 7459 7460def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>; 7461def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>; 7462def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>; 7463def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>; 7464def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>; 7465def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>; 7466def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>; 7467def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>; 7468 7469def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>; 7470def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>; 7471def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>; 7472def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>; 7473def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>; 7474def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>; 7475def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>; 7476def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>; 7477 7478let Predicates = [IsLE] in { 7479def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7480def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7481def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7482def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7483def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7484def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7485 7486def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))), 7487 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7488def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))), 7489 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7490def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))), 7491 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7492def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))), 7493 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7494def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))), 7495 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7496def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))), 7497 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7498def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))), 7499 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7500} 7501let Predicates = [IsBE] in { 7502def : Pat<(v8i8 (bitconvert GPR64:$Xn)), 7503 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7504def : Pat<(v4i16 (bitconvert GPR64:$Xn)), 7505 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7506def : Pat<(v2i32 (bitconvert GPR64:$Xn)), 7507 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7508def : Pat<(v4f16 (bitconvert GPR64:$Xn)), 7509 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7510def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), 7511 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7512def : Pat<(v2f32 (bitconvert GPR64:$Xn)), 7513 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 7514 7515def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))), 7516 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7517def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))), 7518 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7519def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))), 7520 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7521def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))), 7522 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7523def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))), 7524 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7525def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))), 7526 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 7527} 7528def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7529def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7530def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))), 7531 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7532def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)), 7533 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7534def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)), 7535 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7536def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>; 7537 7538def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))), 7539 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>; 7540def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))), 7541 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>; 7542def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))), 7543 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 7544def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))), 7545 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>; 7546def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))), 7547 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 7548 7549let Predicates = [IsLE] in { 7550def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>; 7551def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>; 7552def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>; 7553def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>; 7554def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>; 7555def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>; 7556} 7557let Predicates = [IsBE] in { 7558def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), 7559 (v1i64 (REV64v2i32 FPR64:$src))>; 7560def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), 7561 (v1i64 (REV64v4i16 FPR64:$src))>; 7562def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), 7563 (v1i64 (REV64v8i8 FPR64:$src))>; 7564def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), 7565 (v1i64 (REV64v4i16 FPR64:$src))>; 7566def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), 7567 (v1i64 (REV64v4i16 FPR64:$src))>; 7568def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), 7569 (v1i64 (REV64v2i32 FPR64:$src))>; 7570} 7571def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>; 7572def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>; 7573 7574let Predicates = [IsLE] in { 7575def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>; 7576def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>; 7577def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>; 7578def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>; 7579def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>; 7580def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>; 7581def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>; 7582} 7583let Predicates = [IsBE] in { 7584def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), 7585 (v2i32 (REV64v2i32 FPR64:$src))>; 7586def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), 7587 (v2i32 (REV32v4i16 FPR64:$src))>; 7588def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), 7589 (v2i32 (REV32v8i8 FPR64:$src))>; 7590def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), 7591 (v2i32 (REV64v2i32 FPR64:$src))>; 7592def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), 7593 (v2i32 (REV64v2i32 FPR64:$src))>; 7594def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), 7595 (v2i32 (REV32v4i16 FPR64:$src))>; 7596def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), 7597 (v2i32 (REV32v4i16 FPR64:$src))>; 7598} 7599def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>; 7600 7601let Predicates = [IsLE] in { 7602def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>; 7603def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>; 7604def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>; 7605def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>; 7606def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>; 7607def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>; 7608} 7609let Predicates = [IsBE] in { 7610def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), 7611 (v4i16 (REV64v4i16 FPR64:$src))>; 7612def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), 7613 (v4i16 (REV32v4i16 FPR64:$src))>; 7614def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), 7615 (v4i16 (REV16v8i8 FPR64:$src))>; 7616def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), 7617 (v4i16 (REV64v4i16 FPR64:$src))>; 7618def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), 7619 (v4i16 (REV32v4i16 FPR64:$src))>; 7620def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), 7621 (v4i16 (REV64v4i16 FPR64:$src))>; 7622} 7623def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>; 7624def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>; 7625 7626let Predicates = [IsLE] in { 7627def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>; 7628def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>; 7629def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>; 7630def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>; 7631def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>; 7632def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>; 7633 7634def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>; 7635def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>; 7636def : Pat<(v4bf16 (bitconvert (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>; 7637def : Pat<(v4bf16 (bitconvert (f64 FPR64:$src))), (v4bf16 FPR64:$src)>; 7638def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>; 7639def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>; 7640} 7641let Predicates = [IsBE] in { 7642def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), 7643 (v4f16 (REV64v4i16 FPR64:$src))>; 7644def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), 7645 (v4f16 (REV32v4i16 FPR64:$src))>; 7646def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), 7647 (v4f16 (REV16v8i8 FPR64:$src))>; 7648def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), 7649 (v4f16 (REV64v4i16 FPR64:$src))>; 7650def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), 7651 (v4f16 (REV32v4i16 FPR64:$src))>; 7652def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), 7653 (v4f16 (REV64v4i16 FPR64:$src))>; 7654 7655def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), 7656 (v4bf16 (REV64v4i16 FPR64:$src))>; 7657def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), 7658 (v4bf16 (REV32v4i16 FPR64:$src))>; 7659def : Pat<(v4bf16 (bitconvert (v8i8 FPR64:$src))), 7660 (v4bf16 (REV16v8i8 FPR64:$src))>; 7661def : Pat<(v4bf16 (bitconvert (f64 FPR64:$src))), 7662 (v4bf16 (REV64v4i16 FPR64:$src))>; 7663def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), 7664 (v4bf16 (REV32v4i16 FPR64:$src))>; 7665def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), 7666 (v4bf16 (REV64v4i16 FPR64:$src))>; 7667} 7668def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; 7669def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>; 7670 7671let Predicates = [IsLE] in { 7672def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>; 7673def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>; 7674def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>; 7675def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>; 7676def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>; 7677def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>; 7678def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>; 7679def : Pat<(v8i8 (bitconvert (v4bf16 FPR64:$src))), (v8i8 FPR64:$src)>; 7680} 7681let Predicates = [IsBE] in { 7682def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), 7683 (v8i8 (REV64v8i8 FPR64:$src))>; 7684def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), 7685 (v8i8 (REV32v8i8 FPR64:$src))>; 7686def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), 7687 (v8i8 (REV16v8i8 FPR64:$src))>; 7688def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), 7689 (v8i8 (REV64v8i8 FPR64:$src))>; 7690def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), 7691 (v8i8 (REV32v8i8 FPR64:$src))>; 7692def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), 7693 (v8i8 (REV64v8i8 FPR64:$src))>; 7694def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), 7695 (v8i8 (REV16v8i8 FPR64:$src))>; 7696def : Pat<(v8i8 (bitconvert (v4bf16 FPR64:$src))), 7697 (v8i8 (REV16v8i8 FPR64:$src))>; 7698} 7699 7700let Predicates = [IsLE] in { 7701def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>; 7702def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>; 7703def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>; 7704def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>; 7705def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>; 7706def : Pat<(f64 (bitconvert (v4bf16 FPR64:$src))), (f64 FPR64:$src)>; 7707} 7708let Predicates = [IsBE] in { 7709def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), 7710 (f64 (REV64v2i32 FPR64:$src))>; 7711def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), 7712 (f64 (REV64v4i16 FPR64:$src))>; 7713def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), 7714 (f64 (REV64v2i32 FPR64:$src))>; 7715def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), 7716 (f64 (REV64v8i8 FPR64:$src))>; 7717def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), 7718 (f64 (REV64v4i16 FPR64:$src))>; 7719def : Pat<(f64 (bitconvert (v4bf16 FPR64:$src))), 7720 (f64 (REV64v4i16 FPR64:$src))>; 7721} 7722def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>; 7723def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>; 7724 7725let Predicates = [IsLE] in { 7726def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>; 7727def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>; 7728def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>; 7729def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>; 7730def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>; 7731def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>; 7732} 7733let Predicates = [IsBE] in { 7734def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), 7735 (v1f64 (REV64v2i32 FPR64:$src))>; 7736def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), 7737 (v1f64 (REV64v4i16 FPR64:$src))>; 7738def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), 7739 (v1f64 (REV64v8i8 FPR64:$src))>; 7740def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), 7741 (v1f64 (REV64v2i32 FPR64:$src))>; 7742def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), 7743 (v1f64 (REV64v4i16 FPR64:$src))>; 7744def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), 7745 (v1f64 (REV64v4i16 FPR64:$src))>; 7746} 7747def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>; 7748def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>; 7749 7750let Predicates = [IsLE] in { 7751def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>; 7752def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>; 7753def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>; 7754def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>; 7755def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>; 7756def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>; 7757def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>; 7758} 7759let Predicates = [IsBE] in { 7760def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), 7761 (v2f32 (REV64v2i32 FPR64:$src))>; 7762def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), 7763 (v2f32 (REV32v4i16 FPR64:$src))>; 7764def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), 7765 (v2f32 (REV32v8i8 FPR64:$src))>; 7766def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), 7767 (v2f32 (REV64v2i32 FPR64:$src))>; 7768def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), 7769 (v2f32 (REV64v2i32 FPR64:$src))>; 7770def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), 7771 (v2f32 (REV32v4i16 FPR64:$src))>; 7772def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), 7773 (v2f32 (REV32v4i16 FPR64:$src))>; 7774} 7775def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>; 7776 7777let Predicates = [IsLE] in { 7778def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>; 7779def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>; 7780def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>; 7781def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>; 7782def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>; 7783def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>; 7784def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>; 7785def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>; 7786} 7787let Predicates = [IsBE] in { 7788def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), 7789 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>; 7790def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), 7791 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src), 7792 (REV64v4i32 FPR128:$src), (i32 8)))>; 7793def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), 7794 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src), 7795 (REV64v8i16 FPR128:$src), (i32 8)))>; 7796def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), 7797 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src), 7798 (REV64v8i16 FPR128:$src), (i32 8)))>; 7799def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), 7800 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src), 7801 (REV64v8i16 FPR128:$src), (i32 8)))>; 7802def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), 7803 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>; 7804def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), 7805 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src), 7806 (REV64v4i32 FPR128:$src), (i32 8)))>; 7807def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), 7808 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src), 7809 (REV64v16i8 FPR128:$src), (i32 8)))>; 7810} 7811 7812let Predicates = [IsLE] in { 7813def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>; 7814def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>; 7815def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>; 7816def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>; 7817def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>; 7818def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>; 7819def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>; 7820} 7821let Predicates = [IsBE] in { 7822def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), 7823 (v2f64 (EXTv16i8 FPR128:$src, 7824 FPR128:$src, (i32 8)))>; 7825def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), 7826 (v2f64 (REV64v4i32 FPR128:$src))>; 7827def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), 7828 (v2f64 (REV64v8i16 FPR128:$src))>; 7829def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), 7830 (v2f64 (REV64v8i16 FPR128:$src))>; 7831def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), 7832 (v2f64 (REV64v8i16 FPR128:$src))>; 7833def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), 7834 (v2f64 (REV64v16i8 FPR128:$src))>; 7835def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), 7836 (v2f64 (REV64v4i32 FPR128:$src))>; 7837} 7838def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>; 7839 7840let Predicates = [IsLE] in { 7841def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>; 7842def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>; 7843def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>; 7844def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>; 7845def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>; 7846def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>; 7847def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>; 7848} 7849let Predicates = [IsBE] in { 7850def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), 7851 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src), 7852 (REV64v4i32 FPR128:$src), (i32 8)))>; 7853def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), 7854 (v4f32 (REV32v8i16 FPR128:$src))>; 7855def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), 7856 (v4f32 (REV32v8i16 FPR128:$src))>; 7857def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), 7858 (v4f32 (REV32v8i16 FPR128:$src))>; 7859def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), 7860 (v4f32 (REV32v16i8 FPR128:$src))>; 7861def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), 7862 (v4f32 (REV64v4i32 FPR128:$src))>; 7863def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), 7864 (v4f32 (REV64v4i32 FPR128:$src))>; 7865} 7866def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>; 7867 7868let Predicates = [IsLE] in { 7869def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>; 7870def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>; 7871def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>; 7872def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>; 7873def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>; 7874def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>; 7875def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>; 7876} 7877let Predicates = [IsBE] in { 7878def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), 7879 (v2i64 (EXTv16i8 FPR128:$src, 7880 FPR128:$src, (i32 8)))>; 7881def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), 7882 (v2i64 (REV64v4i32 FPR128:$src))>; 7883def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), 7884 (v2i64 (REV64v8i16 FPR128:$src))>; 7885def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), 7886 (v2i64 (REV64v16i8 FPR128:$src))>; 7887def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), 7888 (v2i64 (REV64v4i32 FPR128:$src))>; 7889def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), 7890 (v2i64 (REV64v8i16 FPR128:$src))>; 7891def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), 7892 (v2i64 (REV64v8i16 FPR128:$src))>; 7893} 7894def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>; 7895 7896let Predicates = [IsLE] in { 7897def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>; 7898def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>; 7899def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>; 7900def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>; 7901def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>; 7902def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>; 7903def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>; 7904} 7905let Predicates = [IsBE] in { 7906def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), 7907 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src), 7908 (REV64v4i32 FPR128:$src), 7909 (i32 8)))>; 7910def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), 7911 (v4i32 (REV64v4i32 FPR128:$src))>; 7912def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), 7913 (v4i32 (REV32v8i16 FPR128:$src))>; 7914def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), 7915 (v4i32 (REV32v16i8 FPR128:$src))>; 7916def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), 7917 (v4i32 (REV64v4i32 FPR128:$src))>; 7918def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), 7919 (v4i32 (REV32v8i16 FPR128:$src))>; 7920def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), 7921 (v4i32 (REV32v8i16 FPR128:$src))>; 7922} 7923def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>; 7924 7925let Predicates = [IsLE] in { 7926def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>; 7927def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>; 7928def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>; 7929def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>; 7930def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>; 7931def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>; 7932} 7933let Predicates = [IsBE] in { 7934def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), 7935 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src), 7936 (REV64v8i16 FPR128:$src), 7937 (i32 8)))>; 7938def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), 7939 (v8i16 (REV64v8i16 FPR128:$src))>; 7940def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), 7941 (v8i16 (REV32v8i16 FPR128:$src))>; 7942def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), 7943 (v8i16 (REV16v16i8 FPR128:$src))>; 7944def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), 7945 (v8i16 (REV64v8i16 FPR128:$src))>; 7946def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), 7947 (v8i16 (REV32v8i16 FPR128:$src))>; 7948} 7949def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>; 7950def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>; 7951 7952let Predicates = [IsLE] in { 7953def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>; 7954def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>; 7955def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>; 7956def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>; 7957def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>; 7958def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>; 7959 7960def : Pat<(v8bf16 (bitconvert (f128 FPR128:$src))), (v8bf16 FPR128:$src)>; 7961def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>; 7962def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>; 7963def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>; 7964def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>; 7965def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>; 7966} 7967let Predicates = [IsBE] in { 7968def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), 7969 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src), 7970 (REV64v8i16 FPR128:$src), 7971 (i32 8)))>; 7972def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), 7973 (v8f16 (REV64v8i16 FPR128:$src))>; 7974def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), 7975 (v8f16 (REV32v8i16 FPR128:$src))>; 7976def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), 7977 (v8f16 (REV16v16i8 FPR128:$src))>; 7978def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), 7979 (v8f16 (REV64v8i16 FPR128:$src))>; 7980def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), 7981 (v8f16 (REV32v8i16 FPR128:$src))>; 7982 7983def : Pat<(v8bf16 (bitconvert (f128 FPR128:$src))), 7984 (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src), 7985 (REV64v8i16 FPR128:$src), 7986 (i32 8)))>; 7987def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), 7988 (v8bf16 (REV64v8i16 FPR128:$src))>; 7989def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), 7990 (v8bf16 (REV32v8i16 FPR128:$src))>; 7991def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), 7992 (v8bf16 (REV16v16i8 FPR128:$src))>; 7993def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), 7994 (v8bf16 (REV64v8i16 FPR128:$src))>; 7995def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), 7996 (v8bf16 (REV32v8i16 FPR128:$src))>; 7997} 7998def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; 7999def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>; 8000 8001let Predicates = [IsLE] in { 8002def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>; 8003def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>; 8004def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>; 8005def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>; 8006def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>; 8007def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>; 8008def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>; 8009def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>; 8010} 8011let Predicates = [IsBE] in { 8012def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), 8013 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src), 8014 (REV64v16i8 FPR128:$src), 8015 (i32 8)))>; 8016def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), 8017 (v16i8 (REV64v16i8 FPR128:$src))>; 8018def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), 8019 (v16i8 (REV32v16i8 FPR128:$src))>; 8020def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), 8021 (v16i8 (REV16v16i8 FPR128:$src))>; 8022def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), 8023 (v16i8 (REV64v16i8 FPR128:$src))>; 8024def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), 8025 (v16i8 (REV32v16i8 FPR128:$src))>; 8026def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), 8027 (v16i8 (REV16v16i8 FPR128:$src))>; 8028def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), 8029 (v16i8 (REV16v16i8 FPR128:$src))>; 8030} 8031 8032def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))), 8033 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8034def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))), 8035 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8036def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))), 8037 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8038def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))), 8039 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8040def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))), 8041 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8042def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))), 8043 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8044def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))), 8045 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8046def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))), 8047 (EXTRACT_SUBREG V128:$Rn, dsub)>; 8048 8049def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))), 8050 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 8051def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))), 8052 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 8053def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))), 8054 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 8055def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))), 8056 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 8057 8058// A 64-bit subvector insert to the first 128-bit vector position 8059// is a subregister copy that needs no instruction. 8060multiclass InsertSubvectorUndef<ValueType Ty> { 8061 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)), 8062 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8063 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)), 8064 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8065 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)), 8066 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8067 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)), 8068 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8069 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)), 8070 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8071 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)), 8072 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8073 def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)), 8074 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8075 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)), 8076 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 8077} 8078 8079defm : InsertSubvectorUndef<i32>; 8080defm : InsertSubvectorUndef<i64>; 8081 8082// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64 8083// or v2f32. 8084def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)), 8085 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))), 8086 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>; 8087def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)), 8088 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))), 8089 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>; 8090 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector, 8091 // so we match on v4f32 here, not v2f32. This will also catch adding 8092 // the low two lanes of a true v4f32 vector. 8093def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)), 8094 (vector_extract (v4f32 FPR128:$Rn), (i64 1))), 8095 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>; 8096def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)), 8097 (vector_extract (v8f16 FPR128:$Rn), (i64 1))), 8098 (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>; 8099 8100// Scalar 64-bit shifts in FPR64 registers. 8101def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 8102 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 8103def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 8104 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 8105def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 8106 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 8107def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 8108 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 8109 8110// Patterns for nontemporal/no-allocate stores. 8111// We have to resort to tricks to turn a single-input store into a store pair, 8112// because there is no single-input nontemporal store, only STNP. 8113let Predicates = [IsLE] in { 8114let AddedComplexity = 15 in { 8115class NTStore128Pat<ValueType VT> : 8116 Pat<(nontemporalstore (VT FPR128:$Rt), 8117 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)), 8118 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub), 8119 (DUPi64 FPR128:$Rt, (i64 1)), 8120 GPR64sp:$Rn, simm7s8:$offset)>; 8121 8122def : NTStore128Pat<v2i64>; 8123def : NTStore128Pat<v4i32>; 8124def : NTStore128Pat<v8i16>; 8125def : NTStore128Pat<v16i8>; 8126 8127class NTStore64Pat<ValueType VT> : 8128 Pat<(nontemporalstore (VT FPR64:$Rt), 8129 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)), 8130 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub), 8131 (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)), 8132 GPR64sp:$Rn, simm7s4:$offset)>; 8133 8134// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64? 8135def : NTStore64Pat<v1f64>; 8136def : NTStore64Pat<v1i64>; 8137def : NTStore64Pat<v2i32>; 8138def : NTStore64Pat<v4i16>; 8139def : NTStore64Pat<v8i8>; 8140 8141def : Pat<(nontemporalstore GPR64:$Rt, 8142 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)), 8143 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), 8144 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32), 8145 GPR64sp:$Rn, simm7s4:$offset)>; 8146} // AddedComplexity=10 8147} // Predicates = [IsLE] 8148 8149// Tail call return handling. These are all compiler pseudo-instructions, 8150// so no encoding information or anything like that. 8151let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in { 8152 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>, 8153 Sched<[WriteBrReg]>; 8154 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>, 8155 Sched<[WriteBrReg]>; 8156 // Indirect tail-call with any register allowed, used by MachineOutliner when 8157 // this is proven safe. 8158 // FIXME: If we have to add any more hacks like this, we should instead relax 8159 // some verifier checks for outlined functions. 8160 def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>, 8161 Sched<[WriteBrReg]>; 8162 // Indirect tail-call limited to only use registers (x16 and x17) which are 8163 // allowed to tail-call a "BTI c" instruction. 8164 def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>, 8165 Sched<[WriteBrReg]>; 8166} 8167 8168def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)), 8169 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>, 8170 Requires<[NotUseBTI]>; 8171def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)), 8172 (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>, 8173 Requires<[UseBTI]>; 8174def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)), 8175 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>; 8176def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)), 8177 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>; 8178 8179def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>; 8180def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>; 8181 8182// Extracting lane zero is a special case where we can just use a plain 8183// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the 8184// rest of the compiler, especially the register allocator and copy propagation, 8185// to reason about, so is preferred when it's possible to use it. 8186let AddedComplexity = 10 in { 8187 def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>; 8188 def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>; 8189 def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>; 8190} 8191 8192// dot_v4i8 8193class mul_v4i8<SDPatternOperator ldop> : 8194 PatFrag<(ops node:$Rn, node:$Rm, node:$offset), 8195 (mul (ldop (add node:$Rn, node:$offset)), 8196 (ldop (add node:$Rm, node:$offset)))>; 8197class mulz_v4i8<SDPatternOperator ldop> : 8198 PatFrag<(ops node:$Rn, node:$Rm), 8199 (mul (ldop node:$Rn), (ldop node:$Rm))>; 8200 8201def load_v4i8 : 8202 OutPatFrag<(ops node:$R), 8203 (INSERT_SUBREG 8204 (v2i32 (IMPLICIT_DEF)), 8205 (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)), 8206 ssub)>; 8207 8208class dot_v4i8<Instruction DOT, SDPatternOperator ldop> : 8209 Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)), 8210 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)), 8211 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)), 8212 (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))), 8213 (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR), 8214 (load_v4i8 GPR64sp:$Rn), 8215 (load_v4i8 GPR64sp:$Rm))), 8216 sub_32)>, Requires<[HasDotProd]>; 8217 8218// dot_v8i8 8219class ee_v8i8<SDPatternOperator extend> : 8220 PatFrag<(ops node:$V, node:$K), 8221 (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>; 8222 8223class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> : 8224 PatFrag<(ops node:$M, node:$N, node:$K), 8225 (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)), 8226 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>; 8227 8228class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> : 8229 PatFrag<(ops node:$M, node:$N), 8230 (i32 (extractelt 8231 (v4i32 (AArch64uaddv 8232 (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)), 8233 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))), 8234 (i64 0)))>; 8235 8236// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm 8237def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>; 8238 8239class odot_v8i8<Instruction DOT> : 8240 OutPatFrag<(ops node:$Vm, node:$Vn), 8241 (EXTRACT_SUBREG 8242 (VADDV_32 8243 (i64 (DOT (DUPv2i32gpr WZR), 8244 (v8i8 node:$Vm), 8245 (v8i8 node:$Vn)))), 8246 sub_32)>; 8247 8248class dot_v8i8<Instruction DOT, SDPatternOperator mulop, 8249 SDPatternOperator extend> : 8250 Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn), 8251 (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>, 8252 Requires<[HasDotProd]>; 8253 8254// dot_v16i8 8255class ee_v16i8<SDPatternOperator extend> : 8256 PatFrag<(ops node:$V, node:$K1, node:$K2), 8257 (v4i16 (extract_subvector 8258 (v8i16 (extend 8259 (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>; 8260 8261class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> : 8262 PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2), 8263 (v4i32 8264 (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)), 8265 (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>; 8266 8267class idot_v16i8<SDPatternOperator m, SDPatternOperator x> : 8268 PatFrag<(ops node:$M, node:$N), 8269 (i32 (extractelt 8270 (v4i32 (AArch64uaddv 8271 (add 8272 (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)), 8273 (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))), 8274 (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)), 8275 (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))), 8276 (i64 0)))>; 8277 8278class odot_v16i8<Instruction DOT> : 8279 OutPatFrag<(ops node:$Vm, node:$Vn), 8280 (i32 (ADDVv4i32v 8281 (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>; 8282 8283class dot_v16i8<Instruction DOT, SDPatternOperator mulop, 8284 SDPatternOperator extend> : 8285 Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn), 8286 (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>, 8287 Requires<[HasDotProd]>; 8288 8289let AddedComplexity = 10 in { 8290 def : dot_v4i8<SDOTv8i8, sextloadi8>; 8291 def : dot_v4i8<UDOTv8i8, zextloadi8>; 8292 def : dot_v8i8<SDOTv8i8, AArch64smull, sext>; 8293 def : dot_v8i8<UDOTv8i8, AArch64umull, zext>; 8294 def : dot_v16i8<SDOTv16i8, AArch64smull, sext>; 8295 def : dot_v16i8<UDOTv16i8, AArch64umull, zext>; 8296 8297 // FIXME: add patterns to generate vector by element dot product. 8298 // FIXME: add SVE dot-product patterns. 8299} 8300 8301// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs, 8302// so that it can be used as input to inline asm, and vice versa. 8303def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>; 8304def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>; 8305def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, 8306 GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)), 8307 (REG_SEQUENCE GPR64x8Class, 8308 $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, 8309 $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>; 8310foreach i = 0-7 in { 8311 def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))), 8312 (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>; 8313} 8314 8315let Predicates = [HasLS64] in { 8316 def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn), 8317 (outs GPR64x8:$Rt)>; 8318 def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn), 8319 (outs)>; 8320 def ST64BV: Store64BV<0b011, "st64bv">; 8321 def ST64BV0: Store64BV<0b010, "st64bv0">; 8322 8323 class ST64BPattern<Intrinsic intrinsic, Instruction instruction> 8324 : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7), 8325 (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>; 8326 8327 def : ST64BPattern<int_aarch64_st64b, ST64B>; 8328 def : ST64BPattern<int_aarch64_st64bv, ST64BV>; 8329 def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>; 8330} 8331 8332let Predicates = [HasMOPS] in { 8333 defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">; 8334 defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">; 8335 defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">; 8336 8337 defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">; 8338 defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">; 8339 defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">; 8340 8341 defm SETP : MOPSMemorySetInsns<0b00, "setp">; 8342 defm SETM : MOPSMemorySetInsns<0b01, "setm">; 8343 defm SETE : MOPSMemorySetInsns<0b10, "sete">; 8344} 8345let Predicates = [HasMOPS, HasMTE] in { 8346 defm SETGP : MOPSMemorySetTaggingInsns<0b00, "setgp">; 8347 defm SETGM : MOPSMemorySetTaggingInsns<0b01, "setgm">; 8348 // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td 8349 defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">; 8350} 8351 8352let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1 in 8353def StoreSwiftAsyncContext 8354 : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset), 8355 []>, Sched<[]>; 8356 8357def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>; 8358def : Pat<(AArch64AssertZExtBool GPR32:$op), 8359 (i32 GPR32:$op)>; 8360 8361include "AArch64InstrAtomics.td" 8362include "AArch64SVEInstrInfo.td" 8363include "AArch64SMEInstrInfo.td" 8364include "AArch64InstrGISel.td" 8365