1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicateWithAll<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicateWithAll<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicateWithAll<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicateWithAll<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicateWithAll<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicateWithAll<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicateWithAll<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicateWithAll<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicateWithAll<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicateWithAll<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV9_3a         : Predicate<"Subtarget->hasV9_3aOps()">,
37                                 AssemblerPredicateWithAll<(all_of HasV9_3aOps), "armv9.3a">;
38def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
39                                 AssemblerPredicateWithAll<(all_of HasV8_0rOps), "armv8-r">;
40
41def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
42                       AssemblerPredicateWithAll<(all_of FeatureEL2VMSA), "el2vmsa">;
43
44def HasEL3           : Predicate<"Subtarget->hasEL3()">,
45                       AssemblerPredicateWithAll<(all_of FeatureEL3), "el3">;
46
47def HasVH            : Predicate<"Subtarget->hasVH()">,
48                       AssemblerPredicateWithAll<(all_of FeatureVH), "vh">;
49
50def HasLOR           : Predicate<"Subtarget->hasLOR()">,
51                       AssemblerPredicateWithAll<(all_of FeatureLOR), "lor">;
52
53def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
54                       AssemblerPredicateWithAll<(all_of FeaturePAuth), "pauth">;
55
56def HasJS            : Predicate<"Subtarget->hasJS()">,
57                       AssemblerPredicateWithAll<(all_of FeatureJS), "jsconv">;
58
59def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
60                       AssemblerPredicateWithAll<(all_of FeatureCCIDX), "ccidx">;
61
62def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
63                       AssemblerPredicateWithAll<(all_of FeatureComplxNum), "complxnum">;
64
65def HasNV            : Predicate<"Subtarget->hasNV()">,
66                       AssemblerPredicateWithAll<(all_of FeatureNV), "nv">;
67
68def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
69                       AssemblerPredicateWithAll<(all_of FeatureMPAM), "mpam">;
70
71def HasDIT           : Predicate<"Subtarget->hasDIT()">,
72                       AssemblerPredicateWithAll<(all_of FeatureDIT), "dit">;
73
74def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
75                       AssemblerPredicateWithAll<(all_of FeatureTRACEV8_4), "tracev8.4">;
76
77def HasAM            : Predicate<"Subtarget->hasAM()">,
78                       AssemblerPredicateWithAll<(all_of FeatureAM), "am">;
79
80def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
81                       AssemblerPredicateWithAll<(all_of FeatureSEL2), "sel2">;
82
83def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
84                       AssemblerPredicateWithAll<(all_of FeatureTLB_RMI), "tlb-rmi">;
85
86def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
87                       AssemblerPredicateWithAll<(all_of FeatureFlagM), "flagm">;
88
89def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
90                       AssemblerPredicateWithAll<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
91
92def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
93                               AssemblerPredicateWithAll<(all_of FeatureFPARMv8), "fp-armv8">;
94def HasNEON          : Predicate<"Subtarget->hasNEON()">,
95                                 AssemblerPredicateWithAll<(all_of FeatureNEON), "neon">;
96def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
97                                 AssemblerPredicateWithAll<(all_of FeatureCrypto), "crypto">;
98def HasSM4           : Predicate<"Subtarget->hasSM4()">,
99                                 AssemblerPredicateWithAll<(all_of FeatureSM4), "sm4">;
100def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
101                                 AssemblerPredicateWithAll<(all_of FeatureSHA3), "sha3">;
102def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
103                                 AssemblerPredicateWithAll<(all_of FeatureSHA2), "sha2">;
104def HasAES           : Predicate<"Subtarget->hasAES()">,
105                                 AssemblerPredicateWithAll<(all_of FeatureAES), "aes">;
106def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
107                                 AssemblerPredicateWithAll<(all_of FeatureDotProd), "dotprod">;
108def HasCRC           : Predicate<"Subtarget->hasCRC()">,
109                                 AssemblerPredicateWithAll<(all_of FeatureCRC), "crc">;
110def HasLSE           : Predicate<"Subtarget->hasLSE()">,
111                                 AssemblerPredicateWithAll<(all_of FeatureLSE), "lse">;
112def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
113def HasRAS           : Predicate<"Subtarget->hasRAS()">,
114                                 AssemblerPredicateWithAll<(all_of FeatureRAS), "ras">;
115def HasRDM           : Predicate<"Subtarget->hasRDM()">,
116                                 AssemblerPredicateWithAll<(all_of FeatureRDM), "rdm">;
117def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
118def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
119                                 AssemblerPredicateWithAll<(all_of FeatureFullFP16), "fullfp16">;
120def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
121                                 AssemblerPredicateWithAll<(all_of FeatureFP16FML), "fp16fml">;
122def HasSPE           : Predicate<"Subtarget->hasSPE()">,
123                                 AssemblerPredicateWithAll<(all_of FeatureSPE), "spe">;
124def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
125                                 AssemblerPredicateWithAll<(all_of FeatureFuseAES),
126                                 "fuse-aes">;
127def HasSVE           : Predicate<"Subtarget->hasSVE()">,
128                                 AssemblerPredicateWithAll<(all_of FeatureSVE), "sve">;
129def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
130                                 AssemblerPredicateWithAll<(all_of FeatureSVE2), "sve2">;
131def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
132                                 AssemblerPredicateWithAll<(all_of FeatureSVE2AES), "sve2-aes">;
133def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
134                                 AssemblerPredicateWithAll<(all_of FeatureSVE2SM4), "sve2-sm4">;
135def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
136                                 AssemblerPredicateWithAll<(all_of FeatureSVE2SHA3), "sve2-sha3">;
137def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
138                                 AssemblerPredicateWithAll<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
139def HasSME           : Predicate<"Subtarget->hasSME()">,
140                                 AssemblerPredicateWithAll<(all_of FeatureSME), "sme">;
141def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
142                                 AssemblerPredicateWithAll<(all_of FeatureSMEF64), "sme-f64">;
143def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
144                                 AssemblerPredicateWithAll<(all_of FeatureSMEI64), "sme-i64">;
145// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
146// they should be enabled if either has been specified.
147def HasSVEorSME
148    : Predicate<"Subtarget->hasSVE() || Subtarget->hasSME()">,
149                AssemblerPredicateWithAll<(any_of FeatureSVE, FeatureSME),
150                "sve or sme">;
151def HasSVE2orSME
152    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasSME()">,
153                AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSME),
154                "sve2 or sme">;
155// A subset of NEON instructions are legal in Streaming SVE execution mode,
156// they should be enabled if either has been specified.
157def HasNEONorSME
158    : Predicate<"Subtarget->hasNEON() || Subtarget->hasSME()">,
159                AssemblerPredicateWithAll<(any_of FeatureNEON, FeatureSME),
160                "neon or sme">;
161def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
162                                 AssemblerPredicateWithAll<(all_of FeatureRCPC), "rcpc">;
163def HasLDAPR         : Predicate<"Subtarget->hasLDAPR()">,
164                                 AssemblerPredicateWithAll<(all_of FeatureLDAPR), "ldapr">;
165def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
166                       AssemblerPredicateWithAll<(all_of FeatureAltFPCmp), "altnzcv">;
167def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
168                       AssemblerPredicateWithAll<(all_of FeatureFRInt3264), "frint3264">;
169def HasSB            : Predicate<"Subtarget->hasSB()">,
170                       AssemblerPredicateWithAll<(all_of FeatureSB), "sb">;
171def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
172                       AssemblerPredicateWithAll<(all_of FeaturePredRes), "predres">;
173def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
174                       AssemblerPredicateWithAll<(all_of FeatureCacheDeepPersist), "ccdp">;
175def HasBTI           : Predicate<"Subtarget->hasBTI()">,
176                       AssemblerPredicateWithAll<(all_of FeatureBranchTargetId), "bti">;
177def HasMTE           : Predicate<"Subtarget->hasMTE()">,
178                       AssemblerPredicateWithAll<(all_of FeatureMTE), "mte">;
179def HasTME           : Predicate<"Subtarget->hasTME()">,
180                       AssemblerPredicateWithAll<(all_of FeatureTME), "tme">;
181def HasETE           : Predicate<"Subtarget->hasETE()">,
182                       AssemblerPredicateWithAll<(all_of FeatureETE), "ete">;
183def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
184                       AssemblerPredicateWithAll<(all_of FeatureTRBE), "trbe">;
185def HasBF16          : Predicate<"Subtarget->hasBF16()">,
186                       AssemblerPredicateWithAll<(all_of FeatureBF16), "bf16">;
187def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
188                       AssemblerPredicateWithAll<(all_of FeatureMatMulInt8), "i8mm">;
189def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
190                       AssemblerPredicateWithAll<(all_of FeatureMatMulFP32), "f32mm">;
191def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
192                       AssemblerPredicateWithAll<(all_of FeatureMatMulFP64), "f64mm">;
193def HasXS            : Predicate<"Subtarget->hasXS()">,
194                       AssemblerPredicateWithAll<(all_of FeatureXS), "xs">;
195def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
196                       AssemblerPredicateWithAll<(all_of FeatureWFxT), "wfxt">;
197def HasLS64          : Predicate<"Subtarget->hasLS64()">,
198                       AssemblerPredicateWithAll<(all_of FeatureLS64), "ls64">;
199def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
200                       AssemblerPredicateWithAll<(all_of FeatureBRBE), "brbe">;
201def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
202                       AssemblerPredicateWithAll<(all_of FeatureSPE_EEF), "spe-eef">;
203def HasHBC           : Predicate<"Subtarget->hasHBC()">,
204                       AssemblerPredicateWithAll<(all_of FeatureHBC), "hbc">;
205def HasMOPS          : Predicate<"Subtarget->hasMOPS()">,
206                       AssemblerPredicateWithAll<(all_of FeatureMOPS), "mops">;
207def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
208def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
209def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
210def UseExperimentalZeroingPseudos
211    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
212def UseAlternateSExtLoadCVTF32
213    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
214
215def UseNegativeImmediates
216    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
217                                             "NegativeImmediates">;
218
219def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
220
221def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
222                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
223                                                       SDTCisInt<1>]>>;
224
225
226//===----------------------------------------------------------------------===//
227// AArch64-specific DAG Nodes.
228//
229
230// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
231def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
232                                              [SDTCisSameAs<0, 2>,
233                                               SDTCisSameAs<0, 3>,
234                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
235
236// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
237def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
238                                            [SDTCisSameAs<0, 1>,
239                                             SDTCisSameAs<0, 2>,
240                                             SDTCisInt<0>,
241                                             SDTCisVT<3, i32>]>;
242
243// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
244def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
245                                            [SDTCisSameAs<0, 2>,
246                                             SDTCisSameAs<0, 3>,
247                                             SDTCisInt<0>,
248                                             SDTCisVT<1, i32>,
249                                             SDTCisVT<4, i32>]>;
250
251def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
252                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
253                                      SDTCisVT<2, i32>]>;
254def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
255def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
256                                        SDTCisVT<2, OtherVT>]>;
257
258
259def SDT_AArch64CSel  : SDTypeProfile<1, 4,
260                                   [SDTCisSameAs<0, 1>,
261                                    SDTCisSameAs<0, 2>,
262                                    SDTCisInt<3>,
263                                    SDTCisVT<4, i32>]>;
264def SDT_AArch64CCMP : SDTypeProfile<1, 5,
265                                    [SDTCisVT<0, i32>,
266                                     SDTCisInt<1>,
267                                     SDTCisSameAs<1, 2>,
268                                     SDTCisInt<3>,
269                                     SDTCisInt<4>,
270                                     SDTCisVT<5, i32>]>;
271def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
272                                     [SDTCisVT<0, i32>,
273                                      SDTCisFP<1>,
274                                      SDTCisSameAs<1, 2>,
275                                      SDTCisInt<3>,
276                                      SDTCisInt<4>,
277                                      SDTCisVT<5, i32>]>;
278def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
279                                   [SDTCisFP<0>,
280                                    SDTCisSameAs<0, 1>]>;
281def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
282def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
283def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
284def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
285                                          SDTCisSameAs<0, 1>,
286                                          SDTCisSameAs<0, 2>]>;
287def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
288def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
289def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                           SDTCisInt<2>, SDTCisInt<3>]>;
291def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
292def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
293                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
294def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
295def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
296                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
297
298def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
299                                                 SDTCisSameAs<0,1>,
300                                                 SDTCisSameAs<0,2>]>;
301
302def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
303def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
304def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
305def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
306                                           SDTCisSameAs<0,2>]>;
307def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
308                                           SDTCisSameAs<0,2>,
309                                           SDTCisSameAs<0,3>]>;
310def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
311def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
312
313def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
314
315def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
316                                                 SDTCisPtrTy<1>]>;
317
318def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
319
320def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
321def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
322def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
323
324// Generates the general dynamic sequences, i.e.
325//  adrp  x0, :tlsdesc:var
326//  ldr   x1, [x0, #:tlsdesc_lo12:var]
327//  add   x0, x0, #:tlsdesc_lo12:var
328//  .tlsdesccall var
329//  blr   x1
330
331// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
332// number of operands (the variable)
333def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
334                                          [SDTCisPtrTy<0>]>;
335
336def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
337                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
338                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
339                                         SDTCisSameAs<1, 4>]>;
340
341def SDT_AArch64TBL : SDTypeProfile<1, 2, [
342  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
343]>;
344
345// non-extending masked load fragment.
346def nonext_masked_load :
347  PatFrag<(ops node:$ptr, node:$pred, node:$def),
348          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
349  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
350         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
351         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
352}]>;
353// Any/Zero extending masked load fragments.
354def azext_masked_load :
355  PatFrag<(ops node:$ptr, node:$pred, node:$def),
356          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
357  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
358          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD) &&
359         cast<MaskedLoadSDNode>(N)->isUnindexed();
360}]>;
361def azext_masked_load_i8 :
362  PatFrag<(ops node:$ptr, node:$pred, node:$def),
363          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
364  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
365}]>;
366def azext_masked_load_i16 :
367  PatFrag<(ops node:$ptr, node:$pred, node:$def),
368          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
369  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
370}]>;
371def azext_masked_load_i32 :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
375}]>;
376// Sign extending masked load fragments.
377def sext_masked_load :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD &&
381         cast<MaskedLoadSDNode>(N)->isUnindexed();
382}]>;
383def sext_masked_load_i8 :
384  PatFrag<(ops node:$ptr, node:$pred, node:$def),
385          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
386  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
387}]>;
388def sext_masked_load_i16 :
389  PatFrag<(ops node:$ptr, node:$pred, node:$def),
390          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
391  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
392}]>;
393def sext_masked_load_i32 :
394  PatFrag<(ops node:$ptr, node:$pred, node:$def),
395          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
396  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
397}]>;
398
399def non_temporal_load :
400   PatFrag<(ops node:$ptr, node:$pred, node:$def),
401           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
402   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
403          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
404          cast<MaskedLoadSDNode>(N)->isNonTemporal();
405}]>;
406
407// non-truncating masked store fragment.
408def nontrunc_masked_store :
409  PatFrag<(ops node:$val, node:$ptr, node:$pred),
410          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
411  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
412         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
413         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
414}]>;
415// truncating masked store fragments.
416def trunc_masked_store :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
420         cast<MaskedStoreSDNode>(N)->isUnindexed();
421}]>;
422def trunc_masked_store_i8 :
423  PatFrag<(ops node:$val, node:$ptr, node:$pred),
424          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
425  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
426}]>;
427def trunc_masked_store_i16 :
428  PatFrag<(ops node:$val, node:$ptr, node:$pred),
429          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
430  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
431}]>;
432def trunc_masked_store_i32 :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
435  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
436}]>;
437
438def non_temporal_store :
439  PatFrag<(ops node:$val, node:$ptr, node:$pred),
440          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
441  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
442         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
443         cast<MaskedStoreSDNode>(N)->isNonTemporal();
444}]>;
445
446multiclass masked_gather_scatter<PatFrags GatherScatterOp> {
447  // offsets = (signed)Index << sizeof(elt)
448  def NAME#_signed_scaled :
449    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
450            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
451    auto MGS = cast<MaskedGatherScatterSDNode>(N);
452    bool Signed = MGS->isIndexSigned() ||
453        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
454    return Signed && MGS->isIndexScaled();
455  }]>;
456  // offsets = (signed)Index
457  def NAME#_signed_unscaled :
458    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
459            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
460    auto MGS = cast<MaskedGatherScatterSDNode>(N);
461    bool Signed = MGS->isIndexSigned() ||
462        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
463    return Signed && !MGS->isIndexScaled();
464  }]>;
465  // offsets = (unsigned)Index << sizeof(elt)
466  def NAME#_unsigned_scaled :
467    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
468            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
469    auto MGS = cast<MaskedGatherScatterSDNode>(N);
470    bool Signed = MGS->isIndexSigned() ||
471        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
472    return !Signed && MGS->isIndexScaled();
473  }]>;
474  // offsets = (unsigned)Index
475  def NAME#_unsigned_unscaled :
476    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
477            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
478    auto MGS = cast<MaskedGatherScatterSDNode>(N);
479    bool Signed = MGS->isIndexSigned() ||
480        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
481    return !Signed && !MGS->isIndexScaled();
482  }]>;
483}
484
485defm nonext_masked_gather    : masked_gather_scatter<nonext_masked_gather>;
486defm azext_masked_gather_i8  : masked_gather_scatter<azext_masked_gather_i8>;
487defm azext_masked_gather_i16 : masked_gather_scatter<azext_masked_gather_i16>;
488defm azext_masked_gather_i32 : masked_gather_scatter<azext_masked_gather_i32>;
489defm sext_masked_gather_i8   : masked_gather_scatter<sext_masked_gather_i8>;
490defm sext_masked_gather_i16  : masked_gather_scatter<sext_masked_gather_i16>;
491defm sext_masked_gather_i32  : masked_gather_scatter<sext_masked_gather_i32>;
492
493defm nontrunc_masked_scatter  : masked_gather_scatter<nontrunc_masked_scatter>;
494defm trunc_masked_scatter_i8  : masked_gather_scatter<trunc_masked_scatter_i8>;
495defm trunc_masked_scatter_i16 : masked_gather_scatter<trunc_masked_scatter_i16>;
496defm trunc_masked_scatter_i32 : masked_gather_scatter<trunc_masked_scatter_i32>;
497
498// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
499def top16Zero: PatLeaf<(i32 GPR32:$src), [{
500  return SDValue(N,0)->getValueType(0) == MVT::i32 &&
501         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
502  }]>;
503
504// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise
505def top32Zero: PatLeaf<(i64 GPR64:$src), [{
506  return SDValue(N,0)->getValueType(0) == MVT::i64 &&
507         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32));
508  }]>;
509
510// Node definitions.
511def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
512def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
513def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
514def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
515def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
516                                SDCallSeqStart<[ SDTCisVT<0, i32>,
517                                                 SDTCisVT<1, i32> ]>,
518                                [SDNPHasChain, SDNPOutGlue]>;
519def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
520                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
521                                               SDTCisVT<1, i32> ]>,
522                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
523def AArch64call          : SDNode<"AArch64ISD::CALL",
524                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
525                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
526                                 SDNPVariadic]>;
527
528def AArch64call_bti      : SDNode<"AArch64ISD::CALL_BTI",
529                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
530                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
531                                 SDNPVariadic]>;
532
533def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
534                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
535                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
536                              SDNPVariadic]>;
537
538def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
539                                [SDNPHasChain]>;
540def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
541                                [SDNPHasChain]>;
542def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
543                                [SDNPHasChain]>;
544def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
545                                [SDNPHasChain]>;
546def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
547                                [SDNPHasChain]>;
548
549
550def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
551def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
552def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
553def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
554def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
555                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
556def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
557def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
558def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
559                            [SDNPCommutative]>;
560def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
561def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
562                            [SDNPCommutative]>;
563def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
564def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
565
566def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
567def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
568def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
569
570def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
571
572def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
573def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
574                                 [SDNPHasChain]>;
575def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
576                                 [SDNPHasChain]>;
577def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
578                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
579                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
580
581def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
582def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
583def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
584def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
585def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
586def AArch64duplane128 : SDNode<"AArch64ISD::DUPLANE128", SDT_AArch64DupLane>;
587
588def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
589
590def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
591def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
592def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
593def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
594def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
595def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
596
597def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
598def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
599def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
600def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
601def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
602def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
603def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
604
605def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
606def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
607def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
608def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
609
610def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
611def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
612def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
613def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
614def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
615def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
616def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
617def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
618def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
619def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
620
621def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
622def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
623
624def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
625def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
626def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
627def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
628def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
629
630def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
631def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
632def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
633
634def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
635def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
636def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
637def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
638def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
639def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
640                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
641
642def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
643def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
644def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
645def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
646def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
647
648def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
649def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
650
651def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
652                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
653
654def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
655                               [SDNPHasChain, SDNPSideEffect]>;
656
657def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
658def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
659
660def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
661                                    SDT_AArch64TLSDescCallSeq,
662                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
663                                     SDNPVariadic]>;
664
665
666def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
667                                 SDT_AArch64WrapperLarge>;
668
669def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
670
671def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
672                                    SDTCisSameAs<1, 2>]>;
673def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull,
674                             [SDNPCommutative]>;
675def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull,
676                             [SDNPCommutative]>;
677
678def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
679def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
680def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
681def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
682
683def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
684def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
685
686def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
687def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
688def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
689def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
690def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
691def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
692
693def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
694                               [(abdu node:$lhs, node:$rhs),
695                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
696def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
697                               [(abds node:$lhs, node:$rhs),
698                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
699
700def AArch64addp_n   : SDNode<"AArch64ISD::ADDP", SDT_AArch64Zip>;
701def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
702def AArch64saddlp_n : SDNode<"AArch64ISD::SADDLP", SDT_AArch64uaddlp>;
703def AArch64addp     : PatFrags<(ops node:$Rn, node:$Rm),
704                               [(AArch64addp_n node:$Rn, node:$Rm),
705                                (int_aarch64_neon_addp node:$Rn, node:$Rm)]>;
706def AArch64uaddlp   : PatFrags<(ops node:$src),
707                               [(AArch64uaddlp_n node:$src),
708                                (int_aarch64_neon_uaddlp node:$src)]>;
709def AArch64saddlp   : PatFrags<(ops node:$src),
710                               [(AArch64saddlp_n node:$src),
711                                (int_aarch64_neon_saddlp node:$src)]>;
712def AArch64faddp     : PatFrags<(ops node:$Rn, node:$Rm),
713                                [(AArch64addp_n node:$Rn, node:$Rm),
714                                 (int_aarch64_neon_faddp node:$Rn, node:$Rm)]>;
715
716def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
717def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
718def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
719def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
720def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
721
722def SDT_AArch64unpk : SDTypeProfile<1, 1, [
723    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
724]>;
725def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
726def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
727def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
728def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
729
730def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
731def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
732def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
733
734def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
735def AArch64mrs : SDNode<"AArch64ISD::MRS",
736                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
737                        [SDNPHasChain, SDNPOutGlue]>;
738
739// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands
740// have no common bits.
741def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs),
742                         [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{
743   if (N->getOpcode() == ISD::ADD)
744     return true;
745   return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
746}]> {
747  let GISelPredicateCode = [{
748     // Only handle G_ADD for now. FIXME. build capability to compute whether
749     // operands of G_OR have common bits set or not.
750     return MI.getOpcode() == TargetOpcode::G_ADD;
751  }];
752}
753
754//===----------------------------------------------------------------------===//
755
756//===----------------------------------------------------------------------===//
757
758// AArch64 Instruction Predicate Definitions.
759// We could compute these on a per-module basis but doing so requires accessing
760// the Function object through the <Target>Subtarget and objections were raised
761// to that (see post-commit review comments for r301750).
762let RecomputePerFunction = 1 in {
763  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
764  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
765  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
766  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
767
768  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
769  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
770
771  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
772  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
773  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
774  // optimizing. This allows us to selectively use patterns without impacting
775  // SelectionDAG's behaviour.
776  // FIXME: One day there will probably be a nicer way to check for this, but
777  // today is not that day.
778  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
779}
780
781include "AArch64InstrFormats.td"
782include "SVEInstrFormats.td"
783include "SMEInstrFormats.td"
784
785//===----------------------------------------------------------------------===//
786
787//===----------------------------------------------------------------------===//
788// Miscellaneous instructions.
789//===----------------------------------------------------------------------===//
790
791let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
792// We set Sched to empty list because we expect these instructions to simply get
793// removed in most cases.
794def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
795                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
796                              Sched<[]>;
797def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
798                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
799                            Sched<[]>;
800} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
801
802let isReMaterializable = 1, isCodeGenOnly = 1 in {
803// FIXME: The following pseudo instructions are only needed because remat
804// cannot handle multiple instructions.  When that changes, they can be
805// removed, along with the AArch64Wrapper node.
806
807let AddedComplexity = 10 in
808def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
809                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
810              Sched<[WriteLDAdr]>;
811
812// The MOVaddr instruction should match only when the add is not folded
813// into a load or store address.
814def MOVaddr
815    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
816             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
817                                            tglobaladdr:$low))]>,
818      Sched<[WriteAdrAdr]>;
819def MOVaddrJT
820    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
821             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
822                                             tjumptable:$low))]>,
823      Sched<[WriteAdrAdr]>;
824def MOVaddrCP
825    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
826             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
827                                             tconstpool:$low))]>,
828      Sched<[WriteAdrAdr]>;
829def MOVaddrBA
830    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
831             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
832                                             tblockaddress:$low))]>,
833      Sched<[WriteAdrAdr]>;
834def MOVaddrTLS
835    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
836             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
837                                            tglobaltlsaddr:$low))]>,
838      Sched<[WriteAdrAdr]>;
839def MOVaddrEXT
840    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
841             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
842                                            texternalsym:$low))]>,
843      Sched<[WriteAdrAdr]>;
844// Normally AArch64addlow either gets folded into a following ldr/str,
845// or together with an adrp into MOVaddr above. For cases with TLS, it
846// might appear without either of them, so allow lowering it into a plain
847// add.
848def ADDlowTLS
849    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
850             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
851                                            tglobaltlsaddr:$low))]>,
852      Sched<[WriteAdr]>;
853
854} // isReMaterializable, isCodeGenOnly
855
856def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
857          (LOADgot tglobaltlsaddr:$addr)>;
858
859def : Pat<(AArch64LOADgot texternalsym:$addr),
860          (LOADgot texternalsym:$addr)>;
861
862def : Pat<(AArch64LOADgot tconstpool:$addr),
863          (LOADgot tconstpool:$addr)>;
864
865// In general these get lowered into a sequence of three 4-byte instructions.
866// 32-bit jump table destination is actually only 2 instructions since we can
867// use the table itself as a PC-relative base. But optimization occurs after
868// branch relaxation so be pessimistic.
869let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
870    isNotDuplicable = 1 in {
871def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
872                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
873                      Sched<[]>;
874def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
875                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
876                      Sched<[]>;
877def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
878                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
879                     Sched<[]>;
880}
881
882// Space-consuming pseudo to aid testing of placement and reachability
883// algorithms. Immediate operand is the number of bytes this "instruction"
884// occupies; register operands can be used to enforce dependency and constrain
885// the scheduler.
886let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
887def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
888                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
889            Sched<[]>;
890
891let hasSideEffects = 1, isCodeGenOnly = 1 in {
892  def SpeculationSafeValueX
893      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
894  def SpeculationSafeValueW
895      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
896}
897
898// SpeculationBarrierEndBB must only be used after an unconditional control
899// flow, i.e. after a terminator for which isBarrier is True.
900let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
901  // This gets lowered to a pair of 4-byte instructions.
902  let Size = 8 in
903  def SpeculationBarrierISBDSBEndBB
904      : Pseudo<(outs), (ins), []>, Sched<[]>;
905  // This gets lowered to a 4-byte instruction.
906  let Size = 4 in
907  def SpeculationBarrierSBEndBB
908      : Pseudo<(outs), (ins), []>, Sched<[]>;
909}
910
911//===----------------------------------------------------------------------===//
912// System instructions.
913//===----------------------------------------------------------------------===//
914
915def HINT : HintI<"hint">;
916def : InstAlias<"nop",  (HINT 0b000)>;
917def : InstAlias<"yield",(HINT 0b001)>;
918def : InstAlias<"wfe",  (HINT 0b010)>;
919def : InstAlias<"wfi",  (HINT 0b011)>;
920def : InstAlias<"sev",  (HINT 0b100)>;
921def : InstAlias<"sevl", (HINT 0b101)>;
922def : InstAlias<"dgh",  (HINT 0b110)>;
923def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
924def : InstAlias<"csdb", (HINT 20)>;
925// In order to be able to write readable assembly, LLVM should accept assembly
926// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
927// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
928// should not emit these mnemonics unless BTI is enabled.
929def : InstAlias<"bti",  (HINT 32), 0>;
930def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
931def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
932def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
933
934// v8.2a Statistical Profiling extension
935def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
936
937// As far as LLVM is concerned this writes to the system's exclusive monitors.
938let mayLoad = 1, mayStore = 1 in
939def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
940
941// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
942// model patterns with sufficiently fine granularity.
943let mayLoad = ?, mayStore = ? in {
944def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
945                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
946
947def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
948                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
949
950def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
951                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
952
953def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
954  let CRm        = 0b0010;
955  let Inst{12}   = 0;
956  let Predicates = [HasTRACEV8_4];
957}
958
959def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
960  let CRm{1-0}   = 0b11;
961  let Inst{9-8}  = 0b10;
962  let Predicates = [HasXS];
963}
964
965let Predicates = [HasWFxT] in {
966def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
967def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
968}
969
970// Branch Record Buffer two-word mnemonic instructions
971class BRBEI<bits<3> op2, string keyword>
972    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
973  let Inst{31-8} = 0b110101010000100101110010;
974  let Inst{7-5} = op2;
975  let Predicates = [HasBRBE];
976}
977def BRB_IALL: BRBEI<0b100, "\tiall">;
978def BRB_INJ:  BRBEI<0b101, "\tinj">;
979
980}
981
982// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
983def : TokenAlias<"INJ", "inj">;
984def : TokenAlias<"IALL", "iall">;
985
986// ARMv8.2-A Dot Product
987let Predicates = [HasDotProd] in {
988defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
989defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
990defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
991defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
992}
993
994// ARMv8.6-A BFloat
995let Predicates = [HasNEON, HasBF16] in {
996defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
997defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
998def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
999def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
1000def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
1001def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
1002def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
1003def BFCVTN       : SIMD_BFCVTN;
1004def BFCVTN2      : SIMD_BFCVTN2;
1005
1006// Vector-scalar BFDOT:
1007// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
1008// register (the instruction uses a single 32-bit lane from it), so the pattern
1009// is a bit tricky.
1010def : Pat<(v2f32 (int_aarch64_neon_bfdot
1011                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
1012                    (v4bf16 (bitconvert
1013                      (v2i32 (AArch64duplane32
1014                        (v4i32 (bitconvert
1015                          (v8bf16 (insert_subvector undef,
1016                            (v4bf16 V64:$Rm),
1017                            (i64 0))))),
1018                        VectorIndexS:$idx)))))),
1019          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
1020                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
1021                             VectorIndexS:$idx)>;
1022}
1023
1024let Predicates = [HasNEONorSME, HasBF16] in {
1025def BFCVT : BF16ToSinglePrecision<"bfcvt">;
1026}
1027
1028// ARMv8.6A AArch64 matrix multiplication
1029let Predicates = [HasMatMulInt8] in {
1030def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
1031def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
1032def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
1033defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
1034defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
1035
1036// sudot lane has a pattern where usdot is expected (there is no sudot).
1037// The second operand is used in the dup operation to repeat the indexed
1038// element.
1039class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
1040                         string rhs_kind, RegisterOperand RegType,
1041                         ValueType AccumType, ValueType InputType>
1042      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
1043                                        lhs_kind, rhs_kind, RegType, AccumType,
1044                                        InputType, null_frag> {
1045  let Pattern = [(set (AccumType RegType:$dst),
1046                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
1047                                 (InputType (bitconvert (AccumType
1048                                    (AArch64duplane32 (v4i32 V128:$Rm),
1049                                        VectorIndexS:$idx)))),
1050                                 (InputType RegType:$Rn))))];
1051}
1052
1053multiclass SIMDSUDOTIndex {
1054  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
1055  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
1056}
1057
1058defm SUDOTlane : SIMDSUDOTIndex;
1059
1060}
1061
1062// ARMv8.2-A FP16 Fused Multiply-Add Long
1063let Predicates = [HasNEON, HasFP16FML] in {
1064defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
1065defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
1066defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
1067defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
1068defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
1069defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
1070defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
1071defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
1072}
1073
1074// Armv8.2-A Crypto extensions
1075let Predicates = [HasSHA3] in {
1076def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
1077def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
1078def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
1079def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
1080def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
1081def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
1082def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
1083def XAR       : CryptoRRRi6<"xar">;
1084
1085class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
1086  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
1087        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
1088
1089def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1090          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1091
1092def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
1093def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
1094def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
1095
1096def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
1097def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
1098def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
1099def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
1100
1101class EOR3_pattern<ValueType VecTy>
1102  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
1103        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1104
1105def : EOR3_pattern<v16i8>;
1106def : EOR3_pattern<v8i16>;
1107def : EOR3_pattern<v4i32>;
1108def : EOR3_pattern<v2i64>;
1109
1110class BCAX_pattern<ValueType VecTy>
1111  : Pat<(xor (VecTy V128:$Vn), (and (VecTy V128:$Vm), (vnot (VecTy V128:$Va)))),
1112        (BCAX (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1113
1114def : BCAX_pattern<v16i8>;
1115def : BCAX_pattern<v8i16>;
1116def : BCAX_pattern<v4i32>;
1117def : BCAX_pattern<v2i64>;
1118
1119def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1120def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1121def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1122def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1123
1124def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1125def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1126def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1127def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1128
1129def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1130def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1131def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1132def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1133
1134def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1135          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1136
1137def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1138          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1139
1140
1141} // HasSHA3
1142
1143let Predicates = [HasSM4] in {
1144def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1145def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1146def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1147def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1148def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1149def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1150def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1151def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1152def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1153
1154def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1155          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1156
1157class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1158  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1159        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1160
1161class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1162  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1163        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1164
1165class SM4_pattern<Instruction INST, Intrinsic OpNode>
1166  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1167        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1168
1169def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1170def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1171
1172def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1173def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1174def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1175def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1176
1177def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1178def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1179} // HasSM4
1180
1181let Predicates = [HasRCPC] in {
1182  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1183  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1184  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1185  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1186  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1187}
1188
1189// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1190// inside the multiclass as the FP16 versions need different predicates.
1191defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1192                                               "fcmla", null_frag>;
1193defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1194                                           "fcadd", null_frag>;
1195defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1196
1197let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1198  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1199            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1200  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1201            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1202  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1203            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1204  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1205            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1206}
1207
1208let Predicates = [HasComplxNum, HasNEON] in {
1209  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1210            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1211  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1212            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1213  foreach Ty = [v4f32, v2f64] in {
1214    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1215              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1216    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1217              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1218  }
1219}
1220
1221multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1222  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1223            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1224  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1225            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1226  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1227            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1228  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1229            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1230}
1231
1232multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1233  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1234            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1235  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1236            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1237  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1238            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1239  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1240            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1241}
1242
1243
1244let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1245  defm : FCMLA_PATS<v4f16, V64>;
1246  defm : FCMLA_PATS<v8f16, V128>;
1247
1248  defm : FCMLA_LANE_PATS<v4f16, V64,
1249                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1250  defm : FCMLA_LANE_PATS<v8f16, V128,
1251                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1252}
1253let Predicates = [HasComplxNum, HasNEON] in {
1254  defm : FCMLA_PATS<v2f32, V64>;
1255  defm : FCMLA_PATS<v4f32, V128>;
1256  defm : FCMLA_PATS<v2f64, V128>;
1257
1258  defm : FCMLA_LANE_PATS<v4f32, V128,
1259                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1260}
1261
1262// v8.3a Pointer Authentication
1263// These instructions inhabit part of the hint space and so can be used for
1264// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1265// important for compatibility with other assemblers (e.g. GAS) when building
1266// software compatible with both CPUs that do or don't implement PA.
1267let Uses = [LR], Defs = [LR] in {
1268  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1269  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1270  let isAuthenticated = 1 in {
1271    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1272    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1273  }
1274}
1275let Uses = [LR, SP], Defs = [LR] in {
1276  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1277  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1278  let isAuthenticated = 1 in {
1279    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1280    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1281  }
1282}
1283let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1284  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1285  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1286  let isAuthenticated = 1 in {
1287    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1288    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1289  }
1290}
1291
1292let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1293  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1294}
1295
1296// In order to be able to write readable assembly, LLVM should accept assembly
1297// inputs that use pointer authentication mnemonics, even with PA disabled.
1298// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1299// should not emit these mnemonics unless PA is enabled.
1300def : InstAlias<"paciaz", (PACIAZ), 0>;
1301def : InstAlias<"pacibz", (PACIBZ), 0>;
1302def : InstAlias<"autiaz", (AUTIAZ), 0>;
1303def : InstAlias<"autibz", (AUTIBZ), 0>;
1304def : InstAlias<"paciasp", (PACIASP), 0>;
1305def : InstAlias<"pacibsp", (PACIBSP), 0>;
1306def : InstAlias<"autiasp", (AUTIASP), 0>;
1307def : InstAlias<"autibsp", (AUTIBSP), 0>;
1308def : InstAlias<"pacia1716", (PACIA1716), 0>;
1309def : InstAlias<"pacib1716", (PACIB1716), 0>;
1310def : InstAlias<"autia1716", (AUTIA1716), 0>;
1311def : InstAlias<"autib1716", (AUTIB1716), 0>;
1312def : InstAlias<"xpaclri", (XPACLRI), 0>;
1313
1314// These pointer authentication instructions require armv8.3a
1315let Predicates = [HasPAuth] in {
1316
1317  // When PA is enabled, a better mnemonic should be emitted.
1318  def : InstAlias<"paciaz", (PACIAZ), 1>;
1319  def : InstAlias<"pacibz", (PACIBZ), 1>;
1320  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1321  def : InstAlias<"autibz", (AUTIBZ), 1>;
1322  def : InstAlias<"paciasp", (PACIASP), 1>;
1323  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1324  def : InstAlias<"autiasp", (AUTIASP), 1>;
1325  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1326  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1327  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1328  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1329  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1330  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1331
1332  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1333                      SDPatternOperator op> {
1334    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1335    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1336    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1337    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1338    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1339    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1340    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1341    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1342  }
1343
1344  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1345  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1346
1347  def XPACI : ClearAuth<0, "xpaci">;
1348  def XPACD : ClearAuth<1, "xpacd">;
1349
1350  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1351
1352  // Combined Instructions
1353  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1354    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1355    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1356  }
1357  let isCall = 1, Defs = [LR], Uses = [SP] in {
1358    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1359    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1360  }
1361
1362  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1363    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1364    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1365  }
1366  let isCall = 1, Defs = [LR], Uses = [SP] in {
1367    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1368    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1369  }
1370
1371  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1372    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1373    def RETAB   : AuthReturn<0b010, 1, "retab">;
1374    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1375    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1376  }
1377
1378  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1379  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1380
1381}
1382
1383// v8.3a floating point conversion for javascript
1384let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1385def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1386                                      "fjcvtzs",
1387                                      [(set GPR32:$Rd,
1388                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1389  let Inst{31} = 0;
1390} // HasJS, HasFPARMv8
1391
1392// v8.4 Flag manipulation instructions
1393let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1394def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1395  let Inst{20-5} = 0b0000001000000000;
1396}
1397def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1398def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1399def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1400                        "{\t$Rn, $imm, $mask}">;
1401} // HasFlagM
1402
1403// v8.5 flag manipulation instructions
1404let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1405
1406def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1407  let Inst{18-16} = 0b000;
1408  let Inst{11-8} = 0b0000;
1409  let Unpredictable{11-8} = 0b1111;
1410  let Inst{7-5} = 0b001;
1411}
1412
1413def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1414  let Inst{18-16} = 0b000;
1415  let Inst{11-8} = 0b0000;
1416  let Unpredictable{11-8} = 0b1111;
1417  let Inst{7-5} = 0b010;
1418}
1419} // HasAltNZCV
1420
1421
1422// Armv8.5-A speculation barrier
1423def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1424  let Inst{20-5} = 0b0001100110000111;
1425  let Unpredictable{11-8} = 0b1111;
1426  let Predicates = [HasSB];
1427  let hasSideEffects = 1;
1428}
1429
1430def : InstAlias<"clrex", (CLREX 0xf)>;
1431def : InstAlias<"isb", (ISB 0xf)>;
1432def : InstAlias<"ssbb", (DSB 0)>;
1433def : InstAlias<"pssbb", (DSB 4)>;
1434def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1435
1436def MRS    : MRSI;
1437def MSR    : MSRI;
1438def MSRpstateImm1 : MSRpstateImm0_1;
1439def MSRpstateImm4 : MSRpstateImm0_15;
1440
1441def : Pat<(AArch64mrs imm:$id),
1442          (MRS imm:$id)>;
1443
1444// The thread pointer (on Linux, at least, where this has been implemented) is
1445// TPIDR_EL0.
1446def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1447                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1448
1449let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1450def HWASAN_CHECK_MEMACCESS : Pseudo<
1451  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1452  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1453  Sched<[]>;
1454}
1455
1456let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1457def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1458  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1459  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1460  Sched<[]>;
1461}
1462
1463// The cycle counter PMC register is PMCCNTR_EL0.
1464let Predicates = [HasPerfMon] in
1465def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1466
1467// FPCR register
1468def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1469def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1470
1471// Generic system instructions
1472def SYSxt  : SystemXtI<0, "sys">;
1473def SYSLxt : SystemLXtI<1, "sysl">;
1474
1475def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1476                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1477                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1478
1479
1480let Predicates = [HasTME] in {
1481
1482def TSTART : TMSystemI<0b0000, "tstart",
1483                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1484
1485def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1486
1487def TCANCEL : TMSystemException<0b011, "tcancel",
1488                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1489
1490def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1491  let mayLoad = 0;
1492  let mayStore = 0;
1493}
1494} // HasTME
1495
1496//===----------------------------------------------------------------------===//
1497// Move immediate instructions.
1498//===----------------------------------------------------------------------===//
1499
1500defm MOVK : InsertImmediate<0b11, "movk">;
1501defm MOVN : MoveImmediate<0b00, "movn">;
1502
1503let PostEncoderMethod = "fixMOVZ" in
1504defm MOVZ : MoveImmediate<0b10, "movz">;
1505
1506// First group of aliases covers an implicit "lsl #0".
1507def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1508def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1509def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1510def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1511def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1512def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1513
1514// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1515def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1516def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1517def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1518def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1519
1520def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1521def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1522def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1523def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1524
1525def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1526def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1527def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1528def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1529
1530def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1531def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1532
1533def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1534def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1535
1536def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1537def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1538
1539// Final group of aliases covers true "mov $Rd, $imm" cases.
1540multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1541                          int width, int shift> {
1542  def _asmoperand : AsmOperandClass {
1543    let Name = basename # width # "_lsl" # shift # "MovAlias";
1544    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1545                               # shift # ">";
1546    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1547  }
1548
1549  def _movimm : Operand<i32> {
1550    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1551  }
1552
1553  def : InstAlias<"mov $Rd, $imm",
1554                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1555}
1556
1557defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1558defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1559
1560defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1561defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1562defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1563defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1564
1565defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1566defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1567
1568defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1569defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1570defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1571defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1572
1573let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1574    isAsCheapAsAMove = 1 in {
1575// FIXME: The following pseudo instructions are only needed because remat
1576// cannot handle multiple instructions.  When that changes, we can select
1577// directly to the real instructions and get rid of these pseudos.
1578
1579def MOVi32imm
1580    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1581             [(set GPR32:$dst, imm:$src)]>,
1582      Sched<[WriteImm]>;
1583def MOVi64imm
1584    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1585             [(set GPR64:$dst, imm:$src)]>,
1586      Sched<[WriteImm]>;
1587} // isReMaterializable, isCodeGenOnly
1588
1589// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1590// eventual expansion code fewer bits to worry about getting right. Marshalling
1591// the types is a little tricky though:
1592def i64imm_32bit : ImmLeaf<i64, [{
1593  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1594}]>;
1595
1596def s64imm_32bit : ImmLeaf<i64, [{
1597  int64_t Imm64 = static_cast<int64_t>(Imm);
1598  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1599         Imm64 <= std::numeric_limits<int32_t>::max();
1600}]>;
1601
1602def trunc_imm : SDNodeXForm<imm, [{
1603  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1604}]>;
1605
1606def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1607  GISDNodeXFormEquiv<trunc_imm>;
1608
1609let Predicates = [OptimizedGISelOrOtherSelector] in {
1610// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1611// copies.
1612def : Pat<(i64 i64imm_32bit:$src),
1613          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1614}
1615
1616// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1617def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1618return CurDAG->getTargetConstant(
1619  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1620}]>;
1621
1622def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1623return CurDAG->getTargetConstant(
1624  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1625}]>;
1626
1627
1628def : Pat<(f32 fpimm:$in),
1629  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1630def : Pat<(f64 fpimm:$in),
1631  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1632
1633
1634// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1635// sequences.
1636def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1637                             tglobaladdr:$g1, tglobaladdr:$g0),
1638          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1639                                  tglobaladdr:$g1, 16),
1640                          tglobaladdr:$g2, 32),
1641                  tglobaladdr:$g3, 48)>;
1642
1643def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1644                             tblockaddress:$g1, tblockaddress:$g0),
1645          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1646                                  tblockaddress:$g1, 16),
1647                          tblockaddress:$g2, 32),
1648                  tblockaddress:$g3, 48)>;
1649
1650def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1651                             tconstpool:$g1, tconstpool:$g0),
1652          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1653                                  tconstpool:$g1, 16),
1654                          tconstpool:$g2, 32),
1655                  tconstpool:$g3, 48)>;
1656
1657def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1658                             tjumptable:$g1, tjumptable:$g0),
1659          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1660                                  tjumptable:$g1, 16),
1661                          tjumptable:$g2, 32),
1662                  tjumptable:$g3, 48)>;
1663
1664
1665//===----------------------------------------------------------------------===//
1666// Arithmetic instructions.
1667//===----------------------------------------------------------------------===//
1668
1669// Add/subtract with carry.
1670defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1671defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1672
1673def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1674def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1675def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1676def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1677
1678// Add/subtract
1679defm ADD : AddSub<0, "add", "sub", add>;
1680defm SUB : AddSub<1, "sub", "add">;
1681
1682def : InstAlias<"mov $dst, $src",
1683                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1684def : InstAlias<"mov $dst, $src",
1685                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1686def : InstAlias<"mov $dst, $src",
1687                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1688def : InstAlias<"mov $dst, $src",
1689                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1690
1691defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1692defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1693
1694// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1695def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1696          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1697def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1698          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1699def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1700          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1701def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1702          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1703def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1704          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1705def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1706          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1707let AddedComplexity = 1 in {
1708def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1709          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1710def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1711          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1712}
1713
1714// Because of the immediate format for add/sub-imm instructions, the
1715// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1716//  These patterns capture that transformation.
1717let AddedComplexity = 1 in {
1718def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1719          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1720def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1721          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1722def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1723          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1724def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1725          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1726}
1727
1728// Because of the immediate format for add/sub-imm instructions, the
1729// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1730//  These patterns capture that transformation.
1731let AddedComplexity = 1 in {
1732def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1733          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1734def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1735          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1736def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1737          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1738def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1739          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1740}
1741
1742def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1743def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1744def : InstAlias<"neg $dst, $src$shift",
1745                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1746def : InstAlias<"neg $dst, $src$shift",
1747                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1748
1749def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1750def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1751def : InstAlias<"negs $dst, $src$shift",
1752                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1753def : InstAlias<"negs $dst, $src$shift",
1754                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1755
1756
1757// Unsigned/Signed divide
1758defm UDIV : Div<0, "udiv", udiv>;
1759defm SDIV : Div<1, "sdiv", sdiv>;
1760
1761def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1762def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1763def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1764def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1765
1766// Variable shift
1767defm ASRV : Shift<0b10, "asr", sra>;
1768defm LSLV : Shift<0b00, "lsl", shl>;
1769defm LSRV : Shift<0b01, "lsr", srl>;
1770defm RORV : Shift<0b11, "ror", rotr>;
1771
1772def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1773def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1774def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1775def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1776def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1777def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1778def : ShiftAlias<"rorv", RORVWr, GPR32>;
1779def : ShiftAlias<"rorv", RORVXr, GPR64>;
1780
1781// Multiply-add
1782let AddedComplexity = 5 in {
1783defm MADD : MulAccum<0, "madd">;
1784defm MSUB : MulAccum<1, "msub">;
1785
1786def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1787          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1788def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1789          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1790
1791def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1792          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1793def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1794          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1795def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1796          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1797def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1798          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1799} // AddedComplexity = 5
1800
1801let AddedComplexity = 5 in {
1802def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1803def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1804def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1805def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1806
1807def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1808          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1809def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1810          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1811def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1812          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1813def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1814          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1815def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1816          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1817def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1818          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1819
1820def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1821          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1822def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1823          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1824
1825def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1826          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1827def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1828          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1829def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1830          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1831                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1832
1833def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1834          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1835def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1836          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1837def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1838          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1839                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1840
1841def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1842          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1843def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1844          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1845def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1846                    GPR64:$Ra)),
1847          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1848                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1849
1850def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1851          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1852def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1853          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1854def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1855                                    (s64imm_32bit:$C)))),
1856          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1857                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1858} // AddedComplexity = 5
1859
1860def : MulAccumWAlias<"mul", MADDWrrr>;
1861def : MulAccumXAlias<"mul", MADDXrrr>;
1862def : MulAccumWAlias<"mneg", MSUBWrrr>;
1863def : MulAccumXAlias<"mneg", MSUBXrrr>;
1864def : WideMulAccumAlias<"smull", SMADDLrrr>;
1865def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1866def : WideMulAccumAlias<"umull", UMADDLrrr>;
1867def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1868
1869// Multiply-high
1870def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1871def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1872
1873// CRC32
1874def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1875def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1876def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1877def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1878
1879def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1880def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1881def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1882def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1883
1884// v8.1 atomic CAS
1885defm CAS   : CompareAndSwap<0, 0, "">;
1886defm CASA  : CompareAndSwap<1, 0, "a">;
1887defm CASL  : CompareAndSwap<0, 1, "l">;
1888defm CASAL : CompareAndSwap<1, 1, "al">;
1889
1890// v8.1 atomic CASP
1891defm CASP   : CompareAndSwapPair<0, 0, "">;
1892defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1893defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1894defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1895
1896// v8.1 atomic SWP
1897defm SWP   : Swap<0, 0, "">;
1898defm SWPA  : Swap<1, 0, "a">;
1899defm SWPL  : Swap<0, 1, "l">;
1900defm SWPAL : Swap<1, 1, "al">;
1901
1902// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1903defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1904defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1905defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1906defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1907
1908defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1909defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1910defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1911defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1912
1913defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1914defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1915defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1916defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1917
1918defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1919defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1920defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1921defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1922
1923defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1924defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1925defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1926defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1927
1928defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1929defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1930defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1931defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1932
1933defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1934defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1935defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1936defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1937
1938defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1939defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1940defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1941defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1942
1943// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1944defm : STOPregister<"stadd","LDADD">; // STADDx
1945defm : STOPregister<"stclr","LDCLR">; // STCLRx
1946defm : STOPregister<"steor","LDEOR">; // STEORx
1947defm : STOPregister<"stset","LDSET">; // STSETx
1948defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1949defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1950defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1951defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1952
1953// v8.5 Memory Tagging Extension
1954let Predicates = [HasMTE] in {
1955
1956def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1957            Sched<[]>{
1958  let Inst{31} = 1;
1959}
1960def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1961  let Inst{31} = 1;
1962  let isNotDuplicable = 1;
1963}
1964def ADDG  : AddSubG<0, "addg", null_frag>;
1965def SUBG  : AddSubG<1, "subg", null_frag>;
1966
1967def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1968
1969def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1970def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1971  let Defs = [NZCV];
1972}
1973
1974def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1975
1976def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1977
1978def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1979          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1980def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1981          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1982
1983def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1984
1985def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1986                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1987def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1988                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1989def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1990                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1991  let Inst{23} = 0;
1992}
1993
1994defm STG   : MemTagStore<0b00, "stg">;
1995defm STZG  : MemTagStore<0b01, "stzg">;
1996defm ST2G  : MemTagStore<0b10, "st2g">;
1997defm STZ2G : MemTagStore<0b11, "stz2g">;
1998
1999def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2000          (STGOffset $Rn, $Rm, $imm)>;
2001def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2002          (STZGOffset $Rn, $Rm, $imm)>;
2003def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2004          (ST2GOffset $Rn, $Rm, $imm)>;
2005def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2006          (STZ2GOffset $Rn, $Rm, $imm)>;
2007
2008defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
2009def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
2010def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
2011
2012def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
2013          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
2014
2015def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
2016          (STGPi $Rt, $Rt2, $Rn, $imm)>;
2017
2018def IRGstack
2019    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
2020      Sched<[]>;
2021def TAGPstack
2022    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
2023      Sched<[]>;
2024
2025// Explicit SP in the first operand prevents ShrinkWrap optimization
2026// from leaving this instruction out of the stack frame. When IRGstack
2027// is transformed into IRG, this operand is replaced with the actual
2028// register / expression for the tagged base pointer of the current function.
2029def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
2030
2031// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
2032// $Rn_wback is one past the end of the range. $Rm is the loop counter.
2033let isCodeGenOnly=1, mayStore=1 in {
2034def STGloop_wback
2035    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
2036             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
2037      Sched<[WriteAdr, WriteST]>;
2038
2039def STZGloop_wback
2040    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
2041             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
2042      Sched<[WriteAdr, WriteST]>;
2043
2044// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
2045// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
2046def STGloop
2047    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
2048             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
2049      Sched<[WriteAdr, WriteST]>;
2050
2051def STZGloop
2052    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
2053             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
2054      Sched<[WriteAdr, WriteST]>;
2055}
2056
2057} // Predicates = [HasMTE]
2058
2059//===----------------------------------------------------------------------===//
2060// Logical instructions.
2061//===----------------------------------------------------------------------===//
2062
2063// (immediate)
2064defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
2065defm AND  : LogicalImm<0b00, "and", and, "bic">;
2066defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
2067defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
2068
2069// FIXME: these aliases *are* canonical sometimes (when movz can't be
2070// used). Actually, it seems to be working right now, but putting logical_immXX
2071// here is a bit dodgy on the AsmParser side too.
2072def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
2073                                          logical_imm32:$imm), 0>;
2074def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
2075                                          logical_imm64:$imm), 0>;
2076
2077
2078// (register)
2079defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
2080defm BICS : LogicalRegS<0b11, 1, "bics",
2081                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
2082defm AND  : LogicalReg<0b00, 0, "and", and>;
2083defm BIC  : LogicalReg<0b00, 1, "bic",
2084                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
2085defm EON  : LogicalReg<0b10, 1, "eon",
2086                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
2087defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
2088defm ORN  : LogicalReg<0b01, 1, "orn",
2089                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
2090defm ORR  : LogicalReg<0b01, 0, "orr", or>;
2091
2092def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
2093def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
2094
2095def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
2096def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
2097
2098def : InstAlias<"mvn $Wd, $Wm$sh",
2099                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
2100def : InstAlias<"mvn $Xd, $Xm$sh",
2101                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
2102
2103def : InstAlias<"tst $src1, $src2",
2104                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
2105def : InstAlias<"tst $src1, $src2",
2106                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
2107
2108def : InstAlias<"tst $src1, $src2",
2109                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
2110def : InstAlias<"tst $src1, $src2",
2111                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
2112
2113def : InstAlias<"tst $src1, $src2$sh",
2114               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2115def : InstAlias<"tst $src1, $src2$sh",
2116               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2117
2118
2119def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2120def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2121
2122
2123//===----------------------------------------------------------------------===//
2124// One operand data processing instructions.
2125//===----------------------------------------------------------------------===//
2126
2127defm CLS    : OneOperandData<0b101, "cls">;
2128defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2129defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2130
2131def  REV16Wr : OneWRegData<0b001, "rev16",
2132                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2133def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2134
2135def : Pat<(cttz GPR32:$Rn),
2136          (CLZWr (RBITWr GPR32:$Rn))>;
2137def : Pat<(cttz GPR64:$Rn),
2138          (CLZXr (RBITXr GPR64:$Rn))>;
2139def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2140                (i32 1))),
2141          (CLSWr GPR32:$Rn)>;
2142def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2143                (i64 1))),
2144          (CLSXr GPR64:$Rn)>;
2145def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2146def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2147
2148// Unlike the other one operand instructions, the instructions with the "rev"
2149// mnemonic do *not* just different in the size bit, but actually use different
2150// opcode bits for the different sizes.
2151def REVWr   : OneWRegData<0b010, "rev", bswap>;
2152def REVXr   : OneXRegData<0b011, "rev", bswap>;
2153def REV32Xr : OneXRegData<0b010, "rev32",
2154                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2155
2156def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2157
2158// The bswap commutes with the rotr so we want a pattern for both possible
2159// orders.
2160def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2161def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2162
2163// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
2164def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
2165def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
2166
2167def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),
2168              (and (shl GPR64:$Rn, (i64 8)), (i64 0xff00ff00ff00ff00))),
2169          (REV16Xr GPR64:$Rn)>;
2170
2171//===----------------------------------------------------------------------===//
2172// Bitfield immediate extraction instruction.
2173//===----------------------------------------------------------------------===//
2174let hasSideEffects = 0 in
2175defm EXTR : ExtractImm<"extr">;
2176def : InstAlias<"ror $dst, $src, $shift",
2177            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2178def : InstAlias<"ror $dst, $src, $shift",
2179            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2180
2181def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2182          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2183def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2184          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2185
2186//===----------------------------------------------------------------------===//
2187// Other bitfield immediate instructions.
2188//===----------------------------------------------------------------------===//
2189let hasSideEffects = 0 in {
2190defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2191defm SBFM : BitfieldImm<0b00, "sbfm">;
2192defm UBFM : BitfieldImm<0b10, "ubfm">;
2193}
2194
2195def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2196  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2197  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2198}]>;
2199
2200def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2201  uint64_t enc = 31 - N->getZExtValue();
2202  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2203}]>;
2204
2205// min(7, 31 - shift_amt)
2206def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2207  uint64_t enc = 31 - N->getZExtValue();
2208  enc = enc > 7 ? 7 : enc;
2209  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2210}]>;
2211
2212// min(15, 31 - shift_amt)
2213def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2214  uint64_t enc = 31 - N->getZExtValue();
2215  enc = enc > 15 ? 15 : enc;
2216  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2217}]>;
2218
2219def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2220  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2221  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2222}]>;
2223
2224def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2225  uint64_t enc = 63 - N->getZExtValue();
2226  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2227}]>;
2228
2229// min(7, 63 - shift_amt)
2230def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2231  uint64_t enc = 63 - N->getZExtValue();
2232  enc = enc > 7 ? 7 : enc;
2233  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2234}]>;
2235
2236// min(15, 63 - shift_amt)
2237def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2238  uint64_t enc = 63 - N->getZExtValue();
2239  enc = enc > 15 ? 15 : enc;
2240  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2241}]>;
2242
2243// min(31, 63 - shift_amt)
2244def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2245  uint64_t enc = 63 - N->getZExtValue();
2246  enc = enc > 31 ? 31 : enc;
2247  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2248}]>;
2249
2250def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2251          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2252                              (i64 (i32shift_b imm0_31:$imm)))>;
2253def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2254          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2255                              (i64 (i64shift_b imm0_63:$imm)))>;
2256
2257let AddedComplexity = 10 in {
2258def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2259          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2260def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2261          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2262}
2263
2264def : InstAlias<"asr $dst, $src, $shift",
2265                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2266def : InstAlias<"asr $dst, $src, $shift",
2267                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2268def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2269def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2270def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2271def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2272def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2273
2274def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2275          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2276def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2277          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2278
2279def : InstAlias<"lsr $dst, $src, $shift",
2280                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2281def : InstAlias<"lsr $dst, $src, $shift",
2282                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2283def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2284def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2285def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2286def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2287def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2288
2289//===----------------------------------------------------------------------===//
2290// Conditional comparison instructions.
2291//===----------------------------------------------------------------------===//
2292defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2293defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2294
2295//===----------------------------------------------------------------------===//
2296// Conditional select instructions.
2297//===----------------------------------------------------------------------===//
2298defm CSEL  : CondSelect<0, 0b00, "csel">;
2299
2300def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2301defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2302defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2303defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2304
2305def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2306          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2307def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2308          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2309def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2310          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2311def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2312          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2313def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2314          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2315def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2316          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2317
2318def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2319          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2320def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2321          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2322def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2323          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2324def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2325          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2326def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2327          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2328def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2329          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2330def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2331          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2332def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2333          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2334def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2335          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2336def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2337          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2338def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2339          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2340def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2341          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2342
2343def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2344          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2345def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2346          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2347
2348// The inverse of the condition code from the alias instruction is what is used
2349// in the aliased instruction. The parser all ready inverts the condition code
2350// for these aliases.
2351def : InstAlias<"cset $dst, $cc",
2352                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2353def : InstAlias<"cset $dst, $cc",
2354                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2355
2356def : InstAlias<"csetm $dst, $cc",
2357                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2358def : InstAlias<"csetm $dst, $cc",
2359                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2360
2361def : InstAlias<"cinc $dst, $src, $cc",
2362                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2363def : InstAlias<"cinc $dst, $src, $cc",
2364                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2365
2366def : InstAlias<"cinv $dst, $src, $cc",
2367                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2368def : InstAlias<"cinv $dst, $src, $cc",
2369                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2370
2371def : InstAlias<"cneg $dst, $src, $cc",
2372                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2373def : InstAlias<"cneg $dst, $src, $cc",
2374                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2375
2376//===----------------------------------------------------------------------===//
2377// PC-relative instructions.
2378//===----------------------------------------------------------------------===//
2379let isReMaterializable = 1 in {
2380let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2381def ADR  : ADRI<0, "adr", adrlabel,
2382                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2383} // hasSideEffects = 0
2384
2385def ADRP : ADRI<1, "adrp", adrplabel,
2386                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2387} // isReMaterializable = 1
2388
2389// page address of a constant pool entry, block address
2390def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2391def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2392def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2393def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2394def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2395def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2396def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2397
2398//===----------------------------------------------------------------------===//
2399// Unconditional branch (register) instructions.
2400//===----------------------------------------------------------------------===//
2401
2402let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2403def RET  : BranchReg<0b0010, "ret", []>;
2404def DRPS : SpecialReturn<0b0101, "drps">;
2405def ERET : SpecialReturn<0b0100, "eret">;
2406} // isReturn = 1, isTerminator = 1, isBarrier = 1
2407
2408// Default to the LR register.
2409def : InstAlias<"ret", (RET LR)>;
2410
2411let isCall = 1, Defs = [LR], Uses = [SP] in {
2412  def BLR : BranchReg<0b0001, "blr", []>;
2413  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2414                Sched<[WriteBrReg]>,
2415                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2416  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2417                     Sched<[WriteBrReg]>;
2418  def BLR_BTI : Pseudo<(outs), (ins variable_ops), []>,
2419                Sched<[WriteBrReg]>;
2420} // isCall
2421
2422def : Pat<(AArch64call GPR64:$Rn),
2423          (BLR GPR64:$Rn)>,
2424      Requires<[NoSLSBLRMitigation]>;
2425def : Pat<(AArch64call GPR64noip:$Rn),
2426          (BLRNoIP GPR64noip:$Rn)>,
2427      Requires<[SLSBLRMitigation]>;
2428
2429def : Pat<(AArch64call_rvmarker (i64 tglobaladdr:$rvfunc), GPR64:$Rn),
2430          (BLR_RVMARKER tglobaladdr:$rvfunc, GPR64:$Rn)>,
2431      Requires<[NoSLSBLRMitigation]>;
2432
2433def : Pat<(AArch64call_bti GPR64:$Rn),
2434          (BLR_BTI GPR64:$Rn)>,
2435      Requires<[NoSLSBLRMitigation]>;
2436
2437let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2438def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2439} // isBranch, isTerminator, isBarrier, isIndirectBranch
2440
2441// Create a separate pseudo-instruction for codegen to use so that we don't
2442// flag lr as used in every function. It'll be restored before the RET by the
2443// epilogue if it's legitimately used.
2444def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2445                   Sched<[WriteBrReg]> {
2446  let isTerminator = 1;
2447  let isBarrier = 1;
2448  let isReturn = 1;
2449}
2450
2451// This is a directive-like pseudo-instruction. The purpose is to insert an
2452// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2453// (which in the usual case is a BLR).
2454let hasSideEffects = 1 in
2455def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2456  let AsmString = ".tlsdesccall $sym";
2457}
2458
2459// Pseudo instruction to tell the streamer to emit a 'B' character into the
2460// augmentation string.
2461def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2462
2463// Pseudo instruction to tell the streamer to emit a 'G' character into the
2464// augmentation string.
2465def EMITMTETAGGED : Pseudo<(outs), (ins), []>, Sched<[]> {}
2466
2467// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2468// FIXME: can "hasSideEffects be dropped?
2469// This gets lowered to an instruction sequence which takes 16 bytes
2470let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, Size = 16,
2471    isCodeGenOnly = 1 in
2472def TLSDESC_CALLSEQ
2473    : Pseudo<(outs), (ins i64imm:$sym),
2474             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2475      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2476def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2477          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2478
2479//===----------------------------------------------------------------------===//
2480// Conditional branch (immediate) instruction.
2481//===----------------------------------------------------------------------===//
2482def Bcc : BranchCond<0, "b">;
2483
2484// Armv8.8-A variant form which hints to the branch predictor that
2485// this branch is very likely to go the same way nearly all the time
2486// (even though it is not known at compile time _which_ way that is).
2487def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>;
2488
2489//===----------------------------------------------------------------------===//
2490// Compare-and-branch instructions.
2491//===----------------------------------------------------------------------===//
2492defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2493defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2494
2495//===----------------------------------------------------------------------===//
2496// Test-bit-and-branch instructions.
2497//===----------------------------------------------------------------------===//
2498defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2499defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2500
2501//===----------------------------------------------------------------------===//
2502// Unconditional branch (immediate) instructions.
2503//===----------------------------------------------------------------------===//
2504let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2505def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2506} // isBranch, isTerminator, isBarrier
2507
2508let isCall = 1, Defs = [LR], Uses = [SP] in {
2509def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2510} // isCall
2511def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2512
2513//===----------------------------------------------------------------------===//
2514// Exception generation instructions.
2515//===----------------------------------------------------------------------===//
2516let isTrap = 1 in {
2517def BRK   : ExceptionGeneration<0b001, 0b00, "brk",
2518                                [(int_aarch64_break timm32_0_65535:$imm)]>;
2519}
2520def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2521def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2522def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2523def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2524def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2525def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2526def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2527
2528// DCPSn defaults to an immediate operand of zero if unspecified.
2529def : InstAlias<"dcps1", (DCPS1 0)>;
2530def : InstAlias<"dcps2", (DCPS2 0)>;
2531def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2532
2533def UDF : UDFType<0, "udf">;
2534
2535//===----------------------------------------------------------------------===//
2536// Load instructions.
2537//===----------------------------------------------------------------------===//
2538
2539// Pair (indexed, offset)
2540defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2541defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2542defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2543defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2544defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2545
2546defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2547
2548// Pair (pre-indexed)
2549def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2550def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2551def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2552def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2553def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2554
2555def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2556
2557// Pair (post-indexed)
2558def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2559def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2560def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2561def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2562def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2563
2564def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2565
2566
2567// Pair (no allocate)
2568defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2569defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2570defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2571defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2572defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2573
2574def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2575          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2576
2577//---
2578// (register offset)
2579//---
2580
2581// Integer
2582defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2583defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2584defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2585defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2586
2587// Floating-point
2588defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2589defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2590defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2591defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2592defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2593
2594// Load sign-extended half-word
2595defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2596defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2597
2598// Load sign-extended byte
2599defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2600defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2601
2602// Load sign-extended word
2603defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2604
2605// Pre-fetch.
2606defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2607
2608// For regular load, we do not have any alignment requirement.
2609// Thus, it is safe to directly map the vector loads with interesting
2610// addressing modes.
2611// FIXME: We could do the same for bitconvert to floating point vectors.
2612multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2613                              ValueType ScalTy, ValueType VecTy,
2614                              Instruction LOADW, Instruction LOADX,
2615                              SubRegIndex sub> {
2616  def : Pat<(VecTy (scalar_to_vector (ScalTy
2617              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2618            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2619                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2620                           sub)>;
2621
2622  def : Pat<(VecTy (scalar_to_vector (ScalTy
2623              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2624            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2625                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2626                           sub)>;
2627}
2628
2629let AddedComplexity = 10 in {
2630defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2631defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2632
2633defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2634defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2635
2636defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2637defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2638
2639defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2640defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2641
2642defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2643defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2644
2645defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2646
2647defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2648
2649
2650def : Pat <(v1i64 (scalar_to_vector (i64
2651                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2652                                           ro_Wextend64:$extend))))),
2653           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2654
2655def : Pat <(v1i64 (scalar_to_vector (i64
2656                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2657                                           ro_Xextend64:$extend))))),
2658           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2659}
2660
2661// Match all load 64 bits width whose type is compatible with FPR64
2662multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2663                        Instruction LOADW, Instruction LOADX> {
2664
2665  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2666            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2667
2668  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2669            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2670}
2671
2672let AddedComplexity = 10 in {
2673let Predicates = [IsLE] in {
2674  // We must do vector loads with LD1 in big-endian.
2675  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2676  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2677  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2678  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2679  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2680  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2681}
2682
2683defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2684defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2685
2686// Match all load 128 bits width whose type is compatible with FPR128
2687let Predicates = [IsLE] in {
2688  // We must do vector loads with LD1 in big-endian.
2689  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2690  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2691  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2692  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2693  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2694  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2695  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2696  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2697}
2698} // AddedComplexity = 10
2699
2700// zextload -> i64
2701multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2702                            Instruction INSTW, Instruction INSTX> {
2703  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2704            (SUBREG_TO_REG (i64 0),
2705                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2706                           sub_32)>;
2707
2708  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2709            (SUBREG_TO_REG (i64 0),
2710                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2711                           sub_32)>;
2712}
2713
2714let AddedComplexity = 10 in {
2715  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2716  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2717  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2718
2719  // zextloadi1 -> zextloadi8
2720  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2721
2722  // extload -> zextload
2723  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2724  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2725  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2726
2727  // extloadi1 -> zextloadi8
2728  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2729}
2730
2731
2732// zextload -> i64
2733multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2734                            Instruction INSTW, Instruction INSTX> {
2735  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2736            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2737
2738  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2739            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2740
2741}
2742
2743let AddedComplexity = 10 in {
2744  // extload -> zextload
2745  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2746  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2747  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2748
2749  // zextloadi1 -> zextloadi8
2750  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2751}
2752
2753//---
2754// (unsigned immediate)
2755//---
2756defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2757                   [(set GPR64z:$Rt,
2758                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2759defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2760                   [(set GPR32z:$Rt,
2761                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2762defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2763                   [(set FPR8Op:$Rt,
2764                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2765defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2766                   [(set (f16 FPR16Op:$Rt),
2767                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2768defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2769                   [(set (f32 FPR32Op:$Rt),
2770                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2771defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2772                   [(set (f64 FPR64Op:$Rt),
2773                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2774defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2775                 [(set (f128 FPR128Op:$Rt),
2776                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2777
2778// bf16 load pattern
2779def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2780           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2781
2782// For regular load, we do not have any alignment requirement.
2783// Thus, it is safe to directly map the vector loads with interesting
2784// addressing modes.
2785// FIXME: We could do the same for bitconvert to floating point vectors.
2786def : Pat <(v8i8 (scalar_to_vector (i32
2787               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2788           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2789                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2790def : Pat <(v16i8 (scalar_to_vector (i32
2791               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2792           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2793                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2794def : Pat <(v4i16 (scalar_to_vector (i32
2795               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2796           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2797                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2798def : Pat <(v8i16 (scalar_to_vector (i32
2799               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2800           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2801                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2802def : Pat <(v2i32 (scalar_to_vector (i32
2803               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2804           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2805                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2806def : Pat <(v4i32 (scalar_to_vector (i32
2807               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2808           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2809                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2810def : Pat <(v1i64 (scalar_to_vector (i64
2811               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2812           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2813def : Pat <(v2i64 (scalar_to_vector (i64
2814               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2815           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2816                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2817
2818// Match all load 64 bits width whose type is compatible with FPR64
2819let Predicates = [IsLE] in {
2820  // We must use LD1 to perform vector loads in big-endian.
2821  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2822            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2823  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2824            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2825  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2826            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2827  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2828            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2829  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2830            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2831  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2832            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2833}
2834def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2835          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2836def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2837          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2838
2839// Match all load 128 bits width whose type is compatible with FPR128
2840let Predicates = [IsLE] in {
2841  // We must use LD1 to perform vector loads in big-endian.
2842  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2843            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2844  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2845            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2846  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2847            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2848  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2849            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2850  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2851            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2852  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2853            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2854  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2855            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2856  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2857            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2858}
2859def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2860          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2861
2862defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2863                    [(set GPR32:$Rt,
2864                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2865                                                     uimm12s2:$offset)))]>;
2866defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2867                    [(set GPR32:$Rt,
2868                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2869                                                   uimm12s1:$offset)))]>;
2870// zextload -> i64
2871def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2872    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2873def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2874    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2875
2876// zextloadi1 -> zextloadi8
2877def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2878          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2879def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2880    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2881
2882// extload -> zextload
2883def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2884          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2885def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2886          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2887def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2888          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2889def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2890    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2891def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2892    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2893def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2894    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2895def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2896    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2897
2898// load sign-extended half-word
2899defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2900                     [(set GPR32:$Rt,
2901                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2902                                                      uimm12s2:$offset)))]>;
2903defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2904                     [(set GPR64:$Rt,
2905                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2906                                                      uimm12s2:$offset)))]>;
2907
2908// load sign-extended byte
2909defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2910                     [(set GPR32:$Rt,
2911                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2912                                                    uimm12s1:$offset)))]>;
2913defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2914                     [(set GPR64:$Rt,
2915                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2916                                                    uimm12s1:$offset)))]>;
2917
2918// load sign-extended word
2919defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2920                     [(set GPR64:$Rt,
2921                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2922                                                      uimm12s4:$offset)))]>;
2923
2924// load zero-extended word
2925def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2926      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2927
2928// Pre-fetch.
2929def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2930                        [(AArch64Prefetch imm:$Rt,
2931                                        (am_indexed64 GPR64sp:$Rn,
2932                                                      uimm12s8:$offset))]>;
2933
2934def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2935
2936//---
2937// (literal)
2938
2939def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2940  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2941    const DataLayout &DL = MF->getDataLayout();
2942    Align Align = G->getGlobal()->getPointerAlignment(DL);
2943    return Align >= 4 && G->getOffset() % 4 == 0;
2944  }
2945  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2946    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2947  return false;
2948}]>;
2949
2950def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2951  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2952def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2953  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2954def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2955  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2956def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2957  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2958def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2959  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2960
2961// load sign-extended word
2962def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2963  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2964
2965let AddedComplexity = 20 in {
2966def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2967        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2968}
2969
2970// prefetch
2971def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2972//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2973
2974//---
2975// (unscaled immediate)
2976defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2977                    [(set GPR64z:$Rt,
2978                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2979defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2980                    [(set GPR32z:$Rt,
2981                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2982defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2983                    [(set FPR8Op:$Rt,
2984                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2985defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2986                    [(set (f16 FPR16Op:$Rt),
2987                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2988defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2989                    [(set (f32 FPR32Op:$Rt),
2990                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2991defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2992                    [(set (f64 FPR64Op:$Rt),
2993                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2994defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2995                    [(set (f128 FPR128Op:$Rt),
2996                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2997
2998defm LDURHH
2999    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
3000             [(set GPR32:$Rt,
3001                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3002defm LDURBB
3003    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
3004             [(set GPR32:$Rt,
3005                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3006
3007// Match all load 64 bits width whose type is compatible with FPR64
3008let Predicates = [IsLE] in {
3009  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3010            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3011  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3012            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3013  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3014            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3015  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3016            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3017  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3018            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3019}
3020def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3021          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3022def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3023          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3024
3025// Match all load 128 bits width whose type is compatible with FPR128
3026let Predicates = [IsLE] in {
3027  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3028            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3029  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3030            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3031  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3032            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3033  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3034            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3035  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3036            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3037  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3038            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3039  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3040            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3041}
3042
3043//  anyext -> zext
3044def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3045          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
3046def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3047          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3048def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3049          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3050def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
3051    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3052def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3053    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3054def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3055    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3056def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3057    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3058// unscaled zext
3059def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3060          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
3061def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3062          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3063def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3064          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3065def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
3066    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3067def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3068    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3069def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3070    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3071def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3072    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3073
3074
3075//---
3076// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
3077
3078// Define new assembler match classes as we want to only match these when
3079// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
3080// associate a DiagnosticType either, as we want the diagnostic for the
3081// canonical form (the scaled operand) to take precedence.
3082class SImm9OffsetOperand<int Width> : AsmOperandClass {
3083  let Name = "SImm9OffsetFB" # Width;
3084  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
3085  let RenderMethod = "addImmOperands";
3086}
3087
3088def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
3089def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
3090def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
3091def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
3092def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
3093
3094def simm9_offset_fb8 : Operand<i64> {
3095  let ParserMatchClass = SImm9OffsetFB8Operand;
3096}
3097def simm9_offset_fb16 : Operand<i64> {
3098  let ParserMatchClass = SImm9OffsetFB16Operand;
3099}
3100def simm9_offset_fb32 : Operand<i64> {
3101  let ParserMatchClass = SImm9OffsetFB32Operand;
3102}
3103def simm9_offset_fb64 : Operand<i64> {
3104  let ParserMatchClass = SImm9OffsetFB64Operand;
3105}
3106def simm9_offset_fb128 : Operand<i64> {
3107  let ParserMatchClass = SImm9OffsetFB128Operand;
3108}
3109
3110def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3111                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3112def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3113                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3114def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3115                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3116def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3117                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3118def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3119                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3120def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3121                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3122def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3123               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3124
3125// zextload -> i64
3126def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3127  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3128def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3129  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3130
3131// load sign-extended half-word
3132defm LDURSHW
3133    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
3134               [(set GPR32:$Rt,
3135                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3136defm LDURSHX
3137    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
3138              [(set GPR64:$Rt,
3139                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3140
3141// load sign-extended byte
3142defm LDURSBW
3143    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3144                [(set GPR32:$Rt,
3145                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3146defm LDURSBX
3147    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3148                [(set GPR64:$Rt,
3149                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3150
3151// load sign-extended word
3152defm LDURSW
3153    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3154              [(set GPR64:$Rt,
3155                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3156
3157// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3158def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3159                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3160def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3161                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3162def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3163                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3164def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3165                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3166def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3167                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3168def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3169                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3170def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3171                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3172
3173// Pre-fetch.
3174defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3175                  [(AArch64Prefetch imm:$Rt,
3176                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3177
3178//---
3179// (unscaled immediate, unprivileged)
3180defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3181defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3182
3183defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3184defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3185
3186// load sign-extended half-word
3187defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3188defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3189
3190// load sign-extended byte
3191defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3192defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3193
3194// load sign-extended word
3195defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3196
3197//---
3198// (immediate pre-indexed)
3199def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3200def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3201def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3202def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3203def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3204def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3205def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3206
3207// load sign-extended half-word
3208def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3209def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3210
3211// load sign-extended byte
3212def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3213def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3214
3215// load zero-extended byte
3216def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3217def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3218
3219// load sign-extended word
3220def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3221
3222//---
3223// (immediate post-indexed)
3224def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3225def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3226def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3227def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3228def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3229def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3230def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3231
3232// load sign-extended half-word
3233def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3234def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3235
3236// load sign-extended byte
3237def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3238def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3239
3240// load zero-extended byte
3241def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3242def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3243
3244// load sign-extended word
3245def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3246
3247//===----------------------------------------------------------------------===//
3248// Store instructions.
3249//===----------------------------------------------------------------------===//
3250
3251// Pair (indexed, offset)
3252// FIXME: Use dedicated range-checked addressing mode operand here.
3253defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3254defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3255defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3256defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3257defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3258
3259// Pair (pre-indexed)
3260def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3261def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3262def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3263def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3264def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3265
3266// Pair (pre-indexed)
3267def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3268def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3269def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3270def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3271def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3272
3273// Pair (no allocate)
3274defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3275defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3276defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3277defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3278defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3279
3280def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3281          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3282
3283def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3284          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3285
3286
3287//---
3288// (Register offset)
3289
3290// Integer
3291defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3292defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3293defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3294defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3295
3296
3297// Floating-point
3298defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3299defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3300defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3301defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3302defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3303
3304let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3305  def : Pat<(store (f128 FPR128:$Rt),
3306                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3307                                        ro_Wextend128:$extend)),
3308            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3309  def : Pat<(store (f128 FPR128:$Rt),
3310                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3311                                        ro_Xextend128:$extend)),
3312            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3313}
3314
3315multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3316                                 Instruction STRW, Instruction STRX> {
3317
3318  def : Pat<(storeop GPR64:$Rt,
3319                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3320            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3321                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3322
3323  def : Pat<(storeop GPR64:$Rt,
3324                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3325            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3326                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3327}
3328
3329let AddedComplexity = 10 in {
3330  // truncstore i64
3331  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3332  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3333  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3334}
3335
3336multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3337                         Instruction STRW, Instruction STRX> {
3338  def : Pat<(store (VecTy FPR:$Rt),
3339                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3340            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3341
3342  def : Pat<(store (VecTy FPR:$Rt),
3343                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3344            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3345}
3346
3347let AddedComplexity = 10 in {
3348// Match all store 64 bits width whose type is compatible with FPR64
3349let Predicates = [IsLE] in {
3350  // We must use ST1 to store vectors in big-endian.
3351  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3352  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3353  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3354  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3355  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3356  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3357}
3358
3359defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3360defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3361
3362// Match all store 128 bits width whose type is compatible with FPR128
3363let Predicates = [IsLE, UseSTRQro] in {
3364  // We must use ST1 to store vectors in big-endian.
3365  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3366  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3367  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3368  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3369  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3370  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3371  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3372  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3373}
3374} // AddedComplexity = 10
3375
3376// Match stores from lane 0 to the appropriate subreg's store.
3377multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3378                              ValueType VecTy, ValueType STy,
3379                              SubRegIndex SubRegIdx,
3380                              Instruction STRW, Instruction STRX> {
3381
3382  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3383                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3384            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3385                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3386
3387  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3388                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3389            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3390                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3391}
3392
3393let AddedComplexity = 19 in {
3394  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3395  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3396  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3397  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3398  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3399  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3400}
3401
3402//---
3403// (unsigned immediate)
3404defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3405                   [(store GPR64z:$Rt,
3406                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3407defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3408                    [(store GPR32z:$Rt,
3409                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3410defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3411                    [(store FPR8Op:$Rt,
3412                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3413defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3414                    [(store (f16 FPR16Op:$Rt),
3415                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3416defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3417                    [(store (f32 FPR32Op:$Rt),
3418                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3419defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3420                    [(store (f64 FPR64Op:$Rt),
3421                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3422defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3423
3424defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3425                     [(truncstorei16 GPR32z:$Rt,
3426                                     (am_indexed16 GPR64sp:$Rn,
3427                                                   uimm12s2:$offset))]>;
3428defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3429                     [(truncstorei8 GPR32z:$Rt,
3430                                    (am_indexed8 GPR64sp:$Rn,
3431                                                 uimm12s1:$offset))]>;
3432
3433// bf16 store pattern
3434def : Pat<(store (bf16 FPR16Op:$Rt),
3435                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3436          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3437
3438let AddedComplexity = 10 in {
3439
3440// Match all store 64 bits width whose type is compatible with FPR64
3441def : Pat<(store (v1i64 FPR64:$Rt),
3442                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3443          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3444def : Pat<(store (v1f64 FPR64:$Rt),
3445                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3446          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3447
3448let Predicates = [IsLE] in {
3449  // We must use ST1 to store vectors in big-endian.
3450  def : Pat<(store (v2f32 FPR64:$Rt),
3451                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3452            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3453  def : Pat<(store (v8i8 FPR64:$Rt),
3454                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3455            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3456  def : Pat<(store (v4i16 FPR64:$Rt),
3457                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3458            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3459  def : Pat<(store (v2i32 FPR64:$Rt),
3460                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3461            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3462  def : Pat<(store (v4f16 FPR64:$Rt),
3463                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3464            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3465  def : Pat<(store (v4bf16 FPR64:$Rt),
3466                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3467            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3468}
3469
3470// Match all store 128 bits width whose type is compatible with FPR128
3471def : Pat<(store (f128  FPR128:$Rt),
3472                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3473          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3474
3475let Predicates = [IsLE] in {
3476  // We must use ST1 to store vectors in big-endian.
3477  def : Pat<(store (v4f32 FPR128:$Rt),
3478                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3479            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3480  def : Pat<(store (v2f64 FPR128:$Rt),
3481                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3482            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3483  def : Pat<(store (v16i8 FPR128:$Rt),
3484                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3485            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3486  def : Pat<(store (v8i16 FPR128:$Rt),
3487                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3488            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3489  def : Pat<(store (v4i32 FPR128:$Rt),
3490                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3491            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3492  def : Pat<(store (v2i64 FPR128:$Rt),
3493                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3494            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3495  def : Pat<(store (v8f16 FPR128:$Rt),
3496                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3497            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3498  def : Pat<(store (v8bf16 FPR128:$Rt),
3499                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3500            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3501}
3502
3503// truncstore i64
3504def : Pat<(truncstorei32 GPR64:$Rt,
3505                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3506  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3507def : Pat<(truncstorei16 GPR64:$Rt,
3508                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3509  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3510def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3511  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3512
3513} // AddedComplexity = 10
3514
3515// Match stores from lane 0 to the appropriate subreg's store.
3516multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3517                            ValueType VTy, ValueType STy,
3518                            SubRegIndex SubRegIdx, Operand IndexType,
3519                            Instruction STR> {
3520  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3521                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3522            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3523                 GPR64sp:$Rn, IndexType:$offset)>;
3524}
3525
3526let AddedComplexity = 19 in {
3527  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3528  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3529  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3530  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3531  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3532  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3533}
3534
3535//---
3536// (unscaled immediate)
3537defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3538                         [(store GPR64z:$Rt,
3539                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3540defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3541                         [(store GPR32z:$Rt,
3542                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3543defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3544                         [(store FPR8Op:$Rt,
3545                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3546defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3547                         [(store (f16 FPR16Op:$Rt),
3548                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3549defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3550                         [(store (f32 FPR32Op:$Rt),
3551                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3552defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3553                         [(store (f64 FPR64Op:$Rt),
3554                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3555defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3556                         [(store (f128 FPR128Op:$Rt),
3557                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3558defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3559                         [(truncstorei16 GPR32z:$Rt,
3560                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3561defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3562                         [(truncstorei8 GPR32z:$Rt,
3563                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3564
3565// Armv8.4 Weaker Release Consistency enhancements
3566//         LDAPR & STLR with Immediate Offset instructions
3567let Predicates = [HasRCPC_IMMO] in {
3568defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3569defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3570defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3571defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3572defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3573defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3574defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3575defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3576defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3577defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3578defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3579defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3580defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3581}
3582
3583// Match all store 64 bits width whose type is compatible with FPR64
3584def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3585          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3586def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3587          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3588
3589let AddedComplexity = 10 in {
3590
3591let Predicates = [IsLE] in {
3592  // We must use ST1 to store vectors in big-endian.
3593  def : Pat<(store (v2f32 FPR64:$Rt),
3594                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3595            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3596  def : Pat<(store (v8i8 FPR64:$Rt),
3597                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3598            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3599  def : Pat<(store (v4i16 FPR64:$Rt),
3600                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3601            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3602  def : Pat<(store (v2i32 FPR64:$Rt),
3603                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3604            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3605  def : Pat<(store (v4f16 FPR64:$Rt),
3606                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3607            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3608  def : Pat<(store (v4bf16 FPR64:$Rt),
3609                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3610            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3611}
3612
3613// Match all store 128 bits width whose type is compatible with FPR128
3614def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3615          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3616
3617let Predicates = [IsLE] in {
3618  // We must use ST1 to store vectors in big-endian.
3619  def : Pat<(store (v4f32 FPR128:$Rt),
3620                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3621            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3622  def : Pat<(store (v2f64 FPR128:$Rt),
3623                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3624            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3625  def : Pat<(store (v16i8 FPR128:$Rt),
3626                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3627            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3628  def : Pat<(store (v8i16 FPR128:$Rt),
3629                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3630            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3631  def : Pat<(store (v4i32 FPR128:$Rt),
3632                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3633            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3634  def : Pat<(store (v2i64 FPR128:$Rt),
3635                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3636            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3637  def : Pat<(store (v2f64 FPR128:$Rt),
3638                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3639            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3640  def : Pat<(store (v8f16 FPR128:$Rt),
3641                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3642            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3643  def : Pat<(store (v8bf16 FPR128:$Rt),
3644                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3645            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3646}
3647
3648} // AddedComplexity = 10
3649
3650// unscaled i64 truncating stores
3651def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3652  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3653def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3654  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3655def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3656  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3657
3658// Match stores from lane 0 to the appropriate subreg's store.
3659multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3660                             ValueType VTy, ValueType STy,
3661                             SubRegIndex SubRegIdx, Instruction STR> {
3662  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3663}
3664
3665let AddedComplexity = 19 in {
3666  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3667  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3668  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3669  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3670  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3671  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3672}
3673
3674//---
3675// STR mnemonics fall back to STUR for negative or unaligned offsets.
3676def : InstAlias<"str $Rt, [$Rn, $offset]",
3677                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3678def : InstAlias<"str $Rt, [$Rn, $offset]",
3679                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3680def : InstAlias<"str $Rt, [$Rn, $offset]",
3681                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3682def : InstAlias<"str $Rt, [$Rn, $offset]",
3683                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3684def : InstAlias<"str $Rt, [$Rn, $offset]",
3685                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3686def : InstAlias<"str $Rt, [$Rn, $offset]",
3687                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3688def : InstAlias<"str $Rt, [$Rn, $offset]",
3689                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3690
3691def : InstAlias<"strb $Rt, [$Rn, $offset]",
3692                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3693def : InstAlias<"strh $Rt, [$Rn, $offset]",
3694                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3695
3696//---
3697// (unscaled immediate, unprivileged)
3698defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3699defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3700
3701defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3702defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3703
3704//---
3705// (immediate pre-indexed)
3706def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3707def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3708def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3709def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3710def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3711def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3712def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3713
3714def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3715def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3716
3717// truncstore i64
3718def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3719  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3720           simm9:$off)>;
3721def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3722  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3723            simm9:$off)>;
3724def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3725  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3726            simm9:$off)>;
3727
3728def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3729          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3730def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3731          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3732def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3733          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3734def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3735          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3736def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3737          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3738def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3739          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3740def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3741          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3742
3743def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3744          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3745def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3746          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3747def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3748          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3749def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3750          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3751def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3752          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3753def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3754          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3755def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3756          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3757
3758//---
3759// (immediate post-indexed)
3760def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3761def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3762def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3763def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3764def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3765def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3766def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3767
3768def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3769def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3770
3771// truncstore i64
3772def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3773  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3774            simm9:$off)>;
3775def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3776  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3777             simm9:$off)>;
3778def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3779  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3780             simm9:$off)>;
3781
3782def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3783          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3784
3785def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3786          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3787def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3788          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3789def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3790          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3791def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3792          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3793def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3794          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3795def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3796          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3797def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3798          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3799def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3800          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3801
3802def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3803          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3804def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3805          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3806def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3807          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3808def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3809          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3810def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3811          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3812def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3813          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3814def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3815          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3816def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3817          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3818
3819//===----------------------------------------------------------------------===//
3820// Load/store exclusive instructions.
3821//===----------------------------------------------------------------------===//
3822
3823def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3824def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3825def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3826def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3827
3828def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3829def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3830def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3831def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3832
3833def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3834def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3835def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3836def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3837
3838def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3839def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3840def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3841def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3842
3843def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3844def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3845def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3846def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3847
3848def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3849def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3850def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3851def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3852
3853def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3854def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3855
3856def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3857def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3858
3859def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3860def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3861
3862def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3863def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3864
3865let Predicates = [HasLOR] in {
3866  // v8.1a "Limited Order Region" extension load-acquire instructions
3867  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3868  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3869  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3870  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3871
3872  // v8.1a "Limited Order Region" extension store-release instructions
3873  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3874  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3875  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3876  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3877}
3878
3879//===----------------------------------------------------------------------===//
3880// Scaled floating point to integer conversion instructions.
3881//===----------------------------------------------------------------------===//
3882
3883defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3884defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3885defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3886defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3887defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3888defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3889defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3890defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3891defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3892defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3893defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3894defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3895
3896// AArch64's FCVT instructions saturate when out of range.
3897multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3898  let Predicates = [HasFullFP16] in {
3899  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3900            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3901  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3902            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3903  }
3904  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3905            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3906  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3907            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3908  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3909            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3910  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3911            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3912
3913  let Predicates = [HasFullFP16] in {
3914  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3915            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3916  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3917            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3918  }
3919  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3920            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3921  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3922            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3923  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3924            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3925  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3926            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3927}
3928
3929defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3930defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3931
3932multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3933  let Predicates = [HasFullFP16] in {
3934  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3935  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3936  }
3937  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3938  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3939  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3940  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3941
3942  let Predicates = [HasFullFP16] in {
3943  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3944            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3945  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3946            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3947  }
3948  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3949            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3950  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3951            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3952  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3953            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3954  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3955            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3956}
3957
3958defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3959defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3960
3961multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3962  def : Pat<(i32 (to_int (round f32:$Rn))),
3963            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3964  def : Pat<(i64 (to_int (round f32:$Rn))),
3965            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3966  def : Pat<(i32 (to_int (round f64:$Rn))),
3967            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3968  def : Pat<(i64 (to_int (round f64:$Rn))),
3969            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3970
3971  // These instructions saturate like fp_to_[su]int_sat.
3972  let Predicates = [HasFullFP16] in {
3973  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3974            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3975  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3976            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3977  }
3978  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3979            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3980  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3981            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3982  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3983            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3984  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3985            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3986}
3987
3988defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3989defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3990defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3991defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3992defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
3993defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
3994defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
3995defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
3996
3997
3998
3999let Predicates = [HasFullFP16] in {
4000  def : Pat<(i32 (any_lround f16:$Rn)),
4001            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
4002  def : Pat<(i64 (any_lround f16:$Rn)),
4003            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
4004  def : Pat<(i64 (any_llround f16:$Rn)),
4005            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
4006}
4007def : Pat<(i32 (any_lround f32:$Rn)),
4008          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
4009def : Pat<(i32 (any_lround f64:$Rn)),
4010          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
4011def : Pat<(i64 (any_lround f32:$Rn)),
4012          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
4013def : Pat<(i64 (any_lround f64:$Rn)),
4014          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
4015def : Pat<(i64 (any_llround f32:$Rn)),
4016          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
4017def : Pat<(i64 (any_llround f64:$Rn)),
4018          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
4019
4020//===----------------------------------------------------------------------===//
4021// Scaled integer to floating point conversion instructions.
4022//===----------------------------------------------------------------------===//
4023
4024defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
4025defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
4026
4027//===----------------------------------------------------------------------===//
4028// Unscaled integer to floating point conversion instruction.
4029//===----------------------------------------------------------------------===//
4030
4031defm FMOV : UnscaledConversion<"fmov">;
4032
4033// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
4034let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
4035def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
4036    Sched<[WriteF]>, Requires<[HasFullFP16]>;
4037def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
4038    Sched<[WriteF]>;
4039def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
4040    Sched<[WriteF]>;
4041}
4042// Similarly add aliases
4043def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
4044    Requires<[HasFullFP16]>;
4045def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
4046def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
4047
4048//===----------------------------------------------------------------------===//
4049// Floating point conversion instruction.
4050//===----------------------------------------------------------------------===//
4051
4052defm FCVT : FPConversion<"fcvt">;
4053
4054//===----------------------------------------------------------------------===//
4055// Floating point single operand instructions.
4056//===----------------------------------------------------------------------===//
4057
4058defm FABS   : SingleOperandFPDataNoException<0b0001, "fabs", fabs>;
4059defm FMOV   : SingleOperandFPDataNoException<0b0000, "fmov">;
4060defm FNEG   : SingleOperandFPDataNoException<0b0010, "fneg", fneg>;
4061defm FRINTA : SingleOperandFPData<0b1100, "frinta", any_fround>;
4062defm FRINTI : SingleOperandFPData<0b1111, "frinti", any_fnearbyint>;
4063defm FRINTM : SingleOperandFPData<0b1010, "frintm", any_ffloor>;
4064defm FRINTN : SingleOperandFPData<0b1000, "frintn", any_froundeven>;
4065defm FRINTP : SingleOperandFPData<0b1001, "frintp", any_fceil>;
4066
4067defm FRINTX : SingleOperandFPData<0b1110, "frintx", any_frint>;
4068defm FRINTZ : SingleOperandFPData<0b1011, "frintz", any_ftrunc>;
4069
4070let SchedRW = [WriteFDiv] in {
4071defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", any_fsqrt>;
4072}
4073
4074let Predicates = [HasFRInt3264] in {
4075  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
4076  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
4077  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
4078  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
4079} // HasFRInt3264
4080
4081// Emitting strict_lrint as two instructions is valid as any exceptions that
4082// occur will happen in exactly one of the instructions (e.g. if the input is
4083// not an integer the inexact exception will happen in the FRINTX but not then
4084// in the FCVTZS as the output of FRINTX is an integer).
4085let Predicates = [HasFullFP16] in {
4086  def : Pat<(i32 (any_lrint f16:$Rn)),
4087            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4088  def : Pat<(i64 (any_lrint f16:$Rn)),
4089            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4090  def : Pat<(i64 (any_llrint f16:$Rn)),
4091            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4092}
4093def : Pat<(i32 (any_lrint f32:$Rn)),
4094          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4095def : Pat<(i32 (any_lrint f64:$Rn)),
4096          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4097def : Pat<(i64 (any_lrint f32:$Rn)),
4098          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4099def : Pat<(i64 (any_lrint f64:$Rn)),
4100          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4101def : Pat<(i64 (any_llrint f32:$Rn)),
4102          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4103def : Pat<(i64 (any_llrint f64:$Rn)),
4104          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4105
4106//===----------------------------------------------------------------------===//
4107// Floating point two operand instructions.
4108//===----------------------------------------------------------------------===//
4109
4110defm FADD   : TwoOperandFPData<0b0010, "fadd", any_fadd>;
4111let SchedRW = [WriteFDiv] in {
4112defm FDIV   : TwoOperandFPData<0b0001, "fdiv", any_fdiv>;
4113}
4114defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", any_fmaxnum>;
4115defm FMAX   : TwoOperandFPData<0b0100, "fmax", any_fmaximum>;
4116defm FMINNM : TwoOperandFPData<0b0111, "fminnm", any_fminnum>;
4117defm FMIN   : TwoOperandFPData<0b0101, "fmin", any_fminimum>;
4118let SchedRW = [WriteFMul] in {
4119defm FMUL   : TwoOperandFPData<0b0000, "fmul", any_fmul>;
4120defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", any_fmul>;
4121}
4122defm FSUB   : TwoOperandFPData<0b0011, "fsub", any_fsub>;
4123
4124def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4125          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
4126def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4127          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
4128def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4129          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
4130def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4131          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
4132
4133//===----------------------------------------------------------------------===//
4134// Floating point three operand instructions.
4135//===----------------------------------------------------------------------===//
4136
4137defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", any_fma>;
4138defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
4139     TriOpFrag<(any_fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
4140defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
4141     TriOpFrag<(fneg (any_fma node:$LHS, node:$MHS, node:$RHS))> >;
4142defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4143     TriOpFrag<(any_fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4144
4145// The following def pats catch the case where the LHS of an FMA is negated.
4146// The TriOpFrag above catches the case where the middle operand is negated.
4147
4148// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4149// the NEON variant.
4150
4151// Here we handle first -(a + b*c) for FNMADD:
4152
4153let Predicates = [HasNEON, HasFullFP16] in
4154def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4155          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4156
4157def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4158          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4159
4160def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4161          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4162
4163// Now it's time for "(-a) + (-b)*c"
4164
4165let Predicates = [HasNEON, HasFullFP16] in
4166def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4167          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4168
4169def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4170          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4171
4172def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4173          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4174
4175//===----------------------------------------------------------------------===//
4176// Floating point comparison instructions.
4177//===----------------------------------------------------------------------===//
4178
4179defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4180defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4181
4182//===----------------------------------------------------------------------===//
4183// Floating point conditional comparison instructions.
4184//===----------------------------------------------------------------------===//
4185
4186defm FCCMPE : FPCondComparison<1, "fccmpe">;
4187defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4188
4189//===----------------------------------------------------------------------===//
4190// Floating point conditional select instruction.
4191//===----------------------------------------------------------------------===//
4192
4193defm FCSEL : FPCondSelect<"fcsel">;
4194
4195// CSEL instructions providing f128 types need to be handled by a
4196// pseudo-instruction since the eventual code will need to introduce basic
4197// blocks and control flow.
4198def F128CSEL : Pseudo<(outs FPR128:$Rd),
4199                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4200                      [(set (f128 FPR128:$Rd),
4201                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4202                                       (i32 imm:$cond), NZCV))]> {
4203  let Uses = [NZCV];
4204  let usesCustomInserter = 1;
4205  let hasNoSchedulingInfo = 1;
4206}
4207
4208//===----------------------------------------------------------------------===//
4209// Instructions used for emitting unwind opcodes on ARM64 Windows.
4210//===----------------------------------------------------------------------===//
4211let isPseudo = 1 in {
4212  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4213  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4214  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4215  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4216  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4217  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4218  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4219  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4220  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4221  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4222  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4223  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4224  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4225  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4226  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4227  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4228  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4229}
4230
4231// Pseudo instructions for Windows EH
4232//===----------------------------------------------------------------------===//
4233let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4234    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4235   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4236   let usesCustomInserter = 1 in
4237     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4238                    Sched<[]>;
4239}
4240
4241// Pseudo instructions for homogeneous prolog/epilog
4242let isPseudo = 1 in {
4243  // Save CSRs in order, {FPOffset}
4244  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4245  // Restore CSRs in order
4246  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4247}
4248
4249//===----------------------------------------------------------------------===//
4250// Floating point immediate move.
4251//===----------------------------------------------------------------------===//
4252
4253let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4254defm FMOV : FPMoveImmediate<"fmov">;
4255}
4256
4257//===----------------------------------------------------------------------===//
4258// Advanced SIMD two vector instructions.
4259//===----------------------------------------------------------------------===//
4260
4261defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4262                                          AArch64uabd>;
4263// Match UABDL in log2-shuffle patterns.
4264def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4265                           (zext (v8i8 V64:$opB))))),
4266          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4267def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4268               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4269                                (zext (v8i8 V64:$opB))),
4270                           (AArch64vashr v8i16:$src, (i32 15))))),
4271          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4272def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
4273                           (zext (extract_high_v16i8 (v16i8 V128:$opB)))))),
4274          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4275def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4276               (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
4277                                (zext (extract_high_v16i8 (v16i8 V128:$opB)))),
4278                           (AArch64vashr v8i16:$src, (i32 15))))),
4279          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4280def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4281                           (zext (v4i16 V64:$opB))))),
4282          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4283def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 (v8i16 V128:$opA))),
4284                           (zext (extract_high_v8i16 (v8i16 V128:$opB)))))),
4285          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4286def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4287                           (zext (v2i32 V64:$opB))))),
4288          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4289def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 (v4i32 V128:$opA))),
4290                           (zext (extract_high_v4i32 (v4i32 V128:$opB)))))),
4291          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4292
4293defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4294defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4295defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4296defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4297defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4298defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4299defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4300defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4301defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4302defm FABS   : SIMDTwoVectorFPNoException<0, 1, 0b01111, "fabs", fabs>;
4303
4304def : Pat<(v8i8 (AArch64vashr (v8i8 V64:$Rn), (i32 7))),
4305          (CMLTv8i8rz V64:$Rn)>;
4306def : Pat<(v4i16 (AArch64vashr (v4i16 V64:$Rn), (i32 15))),
4307          (CMLTv4i16rz V64:$Rn)>;
4308def : Pat<(v2i32 (AArch64vashr (v2i32 V64:$Rn), (i32 31))),
4309          (CMLTv2i32rz V64:$Rn)>;
4310def : Pat<(v16i8 (AArch64vashr (v16i8 V128:$Rn), (i32 7))),
4311          (CMLTv16i8rz V128:$Rn)>;
4312def : Pat<(v8i16 (AArch64vashr (v8i16 V128:$Rn), (i32 15))),
4313          (CMLTv8i16rz V128:$Rn)>;
4314def : Pat<(v4i32 (AArch64vashr (v4i32 V128:$Rn), (i32 31))),
4315          (CMLTv4i32rz V128:$Rn)>;
4316def : Pat<(v2i64 (AArch64vashr (v2i64 V128:$Rn), (i32 63))),
4317          (CMLTv2i64rz V128:$Rn)>;
4318
4319defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4320defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4321defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4322defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4323defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4324defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4325defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4326defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4327def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4328          (FCVTLv4i16 V64:$Rn)>;
4329def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4330                                                              (i64 4)))),
4331          (FCVTLv8i16 V128:$Rn)>;
4332def : Pat<(v2f64 (any_fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4333
4334def : Pat<(v4f32 (any_fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4335
4336defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4337defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4338defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4339defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4340defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4341def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4342          (FCVTNv4i16 V128:$Rn)>;
4343def : Pat<(concat_vectors V64:$Rd,
4344                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4345          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4346def : Pat<(v2f32 (any_fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4347def : Pat<(v4f16 (any_fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4348def : Pat<(concat_vectors V64:$Rd, (v2f32 (any_fpround (v2f64 V128:$Rn)))),
4349          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4350defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4351defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4352defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4353                                        int_aarch64_neon_fcvtxn>;
4354defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", any_fp_to_sint>;
4355defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", any_fp_to_uint>;
4356
4357// AArch64's FCVT instructions saturate when out of range.
4358multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4359  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4360            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4361  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4362            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4363  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4364            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4365  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4366            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4367  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4368            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4369}
4370defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4371defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4372
4373def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4374def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4375def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4376def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4377def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4378
4379def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4380def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4381def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4382def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4383def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4384
4385defm FNEG   : SIMDTwoVectorFPNoException<1, 1, 0b01111, "fneg", fneg>;
4386defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4387defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", any_fround>;
4388defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", any_fnearbyint>;
4389defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", any_ffloor>;
4390defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", any_froundeven>;
4391defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", any_fceil>;
4392defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", any_frint>;
4393defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", any_ftrunc>;
4394
4395let Predicates = [HasFRInt3264] in {
4396  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4397  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4398  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4399  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4400} // HasFRInt3264
4401
4402defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4403defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", any_fsqrt>;
4404defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4405                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4406defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4407// Aliases for MVN -> NOT.
4408def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4409                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4410def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4411                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4412
4413def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4414def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4415def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4416def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4417def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4418def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4419
4420defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4421defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4422defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4423defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4424defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4425       BinOpFrag<(add node:$LHS, (AArch64saddlp node:$RHS))> >;
4426defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", AArch64saddlp>;
4427defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", any_sint_to_fp>;
4428defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4429defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4430defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4431defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4432defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4433defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4434defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4435       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4436defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4437defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", any_uint_to_fp>;
4438defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4439defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4440defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4441defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4442defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4443
4444def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4445def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4446def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4447def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4448def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4449def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4450def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4451def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4452def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4453def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4454
4455// Patterns for vector long shift (by element width). These need to match all
4456// three of zext, sext and anyext so it's easier to pull the patterns out of the
4457// definition.
4458multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4459  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4460            (SHLLv8i8 V64:$Rn)>;
4461  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 (v16i8 V128:$Rn)))), (i32 8)),
4462            (SHLLv16i8 V128:$Rn)>;
4463  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4464            (SHLLv4i16 V64:$Rn)>;
4465  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 (v8i16 V128:$Rn)))), (i32 16)),
4466            (SHLLv8i16 V128:$Rn)>;
4467  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4468            (SHLLv2i32 V64:$Rn)>;
4469  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 (v4i32 V128:$Rn)))), (i32 32)),
4470            (SHLLv4i32 V128:$Rn)>;
4471}
4472
4473defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4474defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4475defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4476
4477// Constant vector values, used in the S/UQXTN patterns below.
4478def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4479def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4480def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4481def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4482def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4483def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4484
4485// trunc(umin(X, 255)) -> UQXTRN v8i8
4486def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4487          (UQXTNv8i8 V128:$Vn)>;
4488// trunc(umin(X, 65535)) -> UQXTRN v4i16
4489def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4490          (UQXTNv4i16 V128:$Vn)>;
4491// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4492//  with reversed min/max
4493def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4494                             (v8i16 VImm7F)))),
4495          (SQXTNv8i8 V128:$Vn)>;
4496def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4497                             (v8i16 VImm80)))),
4498          (SQXTNv8i8 V128:$Vn)>;
4499// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4500//  with reversed min/max
4501def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4502                              (v4i32 VImm7FFF)))),
4503          (SQXTNv4i16 V128:$Vn)>;
4504def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4505                              (v4i32 VImm8000)))),
4506          (SQXTNv4i16 V128:$Vn)>;
4507
4508// concat_vectors(Vd, trunc(smin(smax Vm, -128), 127) ~> SQXTN2(Vd, Vn)
4509// with reversed min/max
4510def : Pat<(v16i8 (concat_vectors
4511                 (v8i8 V64:$Vd),
4512                 (v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4513                                          (v8i16 VImm7F)))))),
4514          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4515def : Pat<(v16i8 (concat_vectors
4516                 (v8i8 V64:$Vd),
4517                 (v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4518                                          (v8i16 VImm80)))))),
4519          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4520
4521// concat_vectors(Vd, trunc(smin(smax Vm, -32768), 32767) ~> SQXTN2(Vd, Vn)
4522// with reversed min/max
4523def : Pat<(v8i16 (concat_vectors
4524                 (v4i16 V64:$Vd),
4525                 (v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4526                                           (v4i32 VImm7FFF)))))),
4527          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4528def : Pat<(v8i16 (concat_vectors
4529                 (v4i16 V64:$Vd),
4530                 (v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4531                                           (v4i32 VImm8000)))))),
4532          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4533
4534//===----------------------------------------------------------------------===//
4535// Advanced SIMD three vector instructions.
4536//===----------------------------------------------------------------------===//
4537
4538defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4539defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", AArch64addp>;
4540defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4541defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4542defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4543defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4544defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4545defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4546foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4547def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4548}
4549defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4550let Predicates = [HasNEON] in {
4551foreach VT = [ v2f32, v4f32, v2f64 ] in
4552def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4553}
4554let Predicates = [HasNEON, HasFullFP16] in {
4555foreach VT = [ v4f16, v8f16 ] in
4556def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4557}
4558defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4559defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4560defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp", AArch64faddp>;
4561defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", any_fadd>;
4562defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4563defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4564defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4565defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", any_fdiv>;
4566defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4567defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", any_fmaxnum>;
4568defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4569defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", any_fmaximum>;
4570defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4571defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", any_fminnum>;
4572defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4573defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", any_fminimum>;
4574
4575// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4576// instruction expects the addend first, while the fma intrinsic puts it last.
4577defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4578            TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)> >;
4579defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4580            TriOpFrag<(any_fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4581
4582defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4583defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", any_fmul>;
4584defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4585defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4586defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", any_fsub>;
4587
4588// MLA and MLS are generated in MachineCombine
4589defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4590defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4591
4592defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4593defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4594defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4595      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4596defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4597defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", avgfloors>;
4598defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4599defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4600defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4601defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4602defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4603defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4604defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4605defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4606defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4607defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4608defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4609defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", avgceils>;
4610defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4611defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4612defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4613defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4614      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4615defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4616defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", avgflooru>;
4617defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4618defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4619defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4620defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4621defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4622defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4623defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4624defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4625defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4626defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", avgceilu>;
4627defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4628defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4629defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4630                                                  int_aarch64_neon_sqrdmlah>;
4631defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4632                                                    int_aarch64_neon_sqrdmlsh>;
4633
4634// Extra saturate patterns, other than the intrinsics matches above
4635defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4636defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4637defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4638defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4639
4640defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4641defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4642                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4643defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4644defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4645                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4646defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4647
4648// Pseudo bitwise select pattern BSP.
4649// It is expanded into BSL/BIT/BIF after register allocation.
4650defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4651                                                      (and (vnot node:$LHS), node:$RHS))>>;
4652defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4653defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4654defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4655
4656def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4657          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4658def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4659          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4660def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4661          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4662def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4663          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4664
4665def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4666          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4667def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4668          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4669def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4670          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4671def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4672          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4673
4674def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4675                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4676def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4677                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4678def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4679                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4680def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4681                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4682
4683def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4684                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4685def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4686                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4687def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4688                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4689def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4690                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4691
4692def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4693                "|cmls.8b\t$dst, $src1, $src2}",
4694                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4695def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4696                "|cmls.16b\t$dst, $src1, $src2}",
4697                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4698def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4699                "|cmls.4h\t$dst, $src1, $src2}",
4700                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4701def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4702                "|cmls.8h\t$dst, $src1, $src2}",
4703                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4704def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4705                "|cmls.2s\t$dst, $src1, $src2}",
4706                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4707def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4708                "|cmls.4s\t$dst, $src1, $src2}",
4709                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4710def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4711                "|cmls.2d\t$dst, $src1, $src2}",
4712                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4713
4714def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4715                "|cmlo.8b\t$dst, $src1, $src2}",
4716                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4717def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4718                "|cmlo.16b\t$dst, $src1, $src2}",
4719                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4720def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4721                "|cmlo.4h\t$dst, $src1, $src2}",
4722                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4723def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4724                "|cmlo.8h\t$dst, $src1, $src2}",
4725                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4726def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4727                "|cmlo.2s\t$dst, $src1, $src2}",
4728                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4729def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4730                "|cmlo.4s\t$dst, $src1, $src2}",
4731                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4732def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4733                "|cmlo.2d\t$dst, $src1, $src2}",
4734                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4735
4736def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4737                "|cmle.8b\t$dst, $src1, $src2}",
4738                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4739def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4740                "|cmle.16b\t$dst, $src1, $src2}",
4741                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4742def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4743                "|cmle.4h\t$dst, $src1, $src2}",
4744                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4745def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4746                "|cmle.8h\t$dst, $src1, $src2}",
4747                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4748def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4749                "|cmle.2s\t$dst, $src1, $src2}",
4750                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4751def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4752                "|cmle.4s\t$dst, $src1, $src2}",
4753                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4754def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4755                "|cmle.2d\t$dst, $src1, $src2}",
4756                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4757
4758def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4759                "|cmlt.8b\t$dst, $src1, $src2}",
4760                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4761def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4762                "|cmlt.16b\t$dst, $src1, $src2}",
4763                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4764def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4765                "|cmlt.4h\t$dst, $src1, $src2}",
4766                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4767def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4768                "|cmlt.8h\t$dst, $src1, $src2}",
4769                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4770def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4771                "|cmlt.2s\t$dst, $src1, $src2}",
4772                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4773def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4774                "|cmlt.4s\t$dst, $src1, $src2}",
4775                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4776def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4777                "|cmlt.2d\t$dst, $src1, $src2}",
4778                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4779
4780let Predicates = [HasNEON, HasFullFP16] in {
4781def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4782                "|fcmle.4h\t$dst, $src1, $src2}",
4783                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4784def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4785                "|fcmle.8h\t$dst, $src1, $src2}",
4786                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4787}
4788def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4789                "|fcmle.2s\t$dst, $src1, $src2}",
4790                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4791def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4792                "|fcmle.4s\t$dst, $src1, $src2}",
4793                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4794def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4795                "|fcmle.2d\t$dst, $src1, $src2}",
4796                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4797
4798let Predicates = [HasNEON, HasFullFP16] in {
4799def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4800                "|fcmlt.4h\t$dst, $src1, $src2}",
4801                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4802def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4803                "|fcmlt.8h\t$dst, $src1, $src2}",
4804                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4805}
4806def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4807                "|fcmlt.2s\t$dst, $src1, $src2}",
4808                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4809def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4810                "|fcmlt.4s\t$dst, $src1, $src2}",
4811                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4812def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4813                "|fcmlt.2d\t$dst, $src1, $src2}",
4814                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4815
4816let Predicates = [HasNEON, HasFullFP16] in {
4817def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4818                "|facle.4h\t$dst, $src1, $src2}",
4819                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4820def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4821                "|facle.8h\t$dst, $src1, $src2}",
4822                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4823}
4824def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4825                "|facle.2s\t$dst, $src1, $src2}",
4826                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4827def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4828                "|facle.4s\t$dst, $src1, $src2}",
4829                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4830def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4831                "|facle.2d\t$dst, $src1, $src2}",
4832                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4833
4834let Predicates = [HasNEON, HasFullFP16] in {
4835def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4836                "|faclt.4h\t$dst, $src1, $src2}",
4837                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4838def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4839                "|faclt.8h\t$dst, $src1, $src2}",
4840                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4841}
4842def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4843                "|faclt.2s\t$dst, $src1, $src2}",
4844                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4845def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4846                "|faclt.4s\t$dst, $src1, $src2}",
4847                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4848def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4849                "|faclt.2d\t$dst, $src1, $src2}",
4850                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4851
4852//===----------------------------------------------------------------------===//
4853// Advanced SIMD three scalar instructions.
4854//===----------------------------------------------------------------------===//
4855
4856defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4857defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4858defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4859defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4860defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4861defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4862defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4863defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4864def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4865          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4866let Predicates = [HasNEON, HasFullFP16] in {
4867def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4868}
4869let Predicates = [HasNEON] in {
4870def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4871def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4872}
4873defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4874                                     int_aarch64_neon_facge>;
4875defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4876                                     int_aarch64_neon_facgt>;
4877defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4878defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4879defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4880defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorSME>;
4881defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorSME>;
4882defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorSME>;
4883defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4884defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4885defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4886defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4887defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4888defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4889defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4890defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4891defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4892defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4893defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4894defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4895defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4896defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4897defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4898let Predicates = [HasRDM] in {
4899  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4900  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4901  def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4902                                            (i32 FPR32:$Rm))),
4903            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4904  def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4905                                            (i32 FPR32:$Rm))),
4906            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4907}
4908
4909def : InstAlias<"cmls $dst, $src1, $src2",
4910                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4911def : InstAlias<"cmle $dst, $src1, $src2",
4912                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4913def : InstAlias<"cmlo $dst, $src1, $src2",
4914                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4915def : InstAlias<"cmlt $dst, $src1, $src2",
4916                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4917def : InstAlias<"fcmle $dst, $src1, $src2",
4918                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4919def : InstAlias<"fcmle $dst, $src1, $src2",
4920                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4921def : InstAlias<"fcmlt $dst, $src1, $src2",
4922                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4923def : InstAlias<"fcmlt $dst, $src1, $src2",
4924                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4925def : InstAlias<"facle $dst, $src1, $src2",
4926                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4927def : InstAlias<"facle $dst, $src1, $src2",
4928                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4929def : InstAlias<"faclt $dst, $src1, $src2",
4930                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4931def : InstAlias<"faclt $dst, $src1, $src2",
4932                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4933
4934//===----------------------------------------------------------------------===//
4935// Advanced SIMD three scalar instructions (mixed operands).
4936//===----------------------------------------------------------------------===//
4937defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4938                                       int_aarch64_neon_sqdmulls_scalar>;
4939defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4940defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4941
4942def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4943                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4944                                                        (i32 FPR32:$Rm))))),
4945          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4946def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4947                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4948                                                        (i32 FPR32:$Rm))))),
4949          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4950
4951//===----------------------------------------------------------------------===//
4952// Advanced SIMD two scalar instructions.
4953//===----------------------------------------------------------------------===//
4954
4955defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4956defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4957defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4958defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4959defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4960defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4961defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4962defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4963defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4964defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4965defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4966defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4967defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4968defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4969defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4970defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4971defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4972defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4973defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4974def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4975defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4976defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4977defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorSME>;
4978defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorSME>;
4979defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorSME>;
4980defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4981                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4982defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4983defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4984defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4985defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4986defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4987defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4988                                     int_aarch64_neon_suqadd>;
4989defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4990defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4991defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4992                                    int_aarch64_neon_usqadd>;
4993
4994def : Pat<(v1i64 (AArch64vashr (v1i64 V64:$Rn), (i32 63))),
4995          (CMLTv1i64rz V64:$Rn)>;
4996
4997def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4998          (FCVTASv1i64 FPR64:$Rn)>;
4999def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
5000          (FCVTAUv1i64 FPR64:$Rn)>;
5001def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
5002          (FCVTMSv1i64 FPR64:$Rn)>;
5003def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
5004          (FCVTMUv1i64 FPR64:$Rn)>;
5005def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
5006          (FCVTNSv1i64 FPR64:$Rn)>;
5007def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
5008          (FCVTNUv1i64 FPR64:$Rn)>;
5009def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
5010          (FCVTPSv1i64 FPR64:$Rn)>;
5011def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
5012          (FCVTPUv1i64 FPR64:$Rn)>;
5013def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
5014          (FCVTZSv1i64 FPR64:$Rn)>;
5015def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
5016          (FCVTZUv1i64 FPR64:$Rn)>;
5017
5018def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
5019          (FRECPEv1f16 FPR16:$Rn)>;
5020def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
5021          (FRECPEv1i32 FPR32:$Rn)>;
5022def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
5023          (FRECPEv1i64 FPR64:$Rn)>;
5024def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
5025          (FRECPEv1i64 FPR64:$Rn)>;
5026
5027def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
5028          (FRECPEv1i32 FPR32:$Rn)>;
5029def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
5030          (FRECPEv2f32 V64:$Rn)>;
5031def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
5032          (FRECPEv4f32 FPR128:$Rn)>;
5033def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
5034          (FRECPEv1i64 FPR64:$Rn)>;
5035def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
5036          (FRECPEv1i64 FPR64:$Rn)>;
5037def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
5038          (FRECPEv2f64 FPR128:$Rn)>;
5039
5040def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5041          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
5042def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
5043          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
5044def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
5045          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
5046def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5047          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
5048def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
5049          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
5050
5051def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
5052          (FRECPXv1f16 FPR16:$Rn)>;
5053def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
5054          (FRECPXv1i32 FPR32:$Rn)>;
5055def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
5056          (FRECPXv1i64 FPR64:$Rn)>;
5057
5058def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
5059          (FRSQRTEv1f16 FPR16:$Rn)>;
5060def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
5061          (FRSQRTEv1i32 FPR32:$Rn)>;
5062def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
5063          (FRSQRTEv1i64 FPR64:$Rn)>;
5064def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
5065          (FRSQRTEv1i64 FPR64:$Rn)>;
5066
5067def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
5068          (FRSQRTEv1i32 FPR32:$Rn)>;
5069def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
5070          (FRSQRTEv2f32 V64:$Rn)>;
5071def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
5072          (FRSQRTEv4f32 FPR128:$Rn)>;
5073def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
5074          (FRSQRTEv1i64 FPR64:$Rn)>;
5075def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
5076          (FRSQRTEv1i64 FPR64:$Rn)>;
5077def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
5078          (FRSQRTEv2f64 FPR128:$Rn)>;
5079
5080def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5081          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
5082def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
5083          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
5084def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
5085          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
5086def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5087          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
5088def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
5089          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
5090
5091// Some float -> int -> float conversion patterns for which we want to keep the
5092// int values in FP registers using the corresponding NEON instructions to
5093// avoid more costly int <-> fp register transfers.
5094let Predicates = [HasNEON] in {
5095def : Pat<(f64 (any_sint_to_fp (i64 (any_fp_to_sint f64:$Rn)))),
5096          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
5097def : Pat<(f32 (any_sint_to_fp (i32 (any_fp_to_sint f32:$Rn)))),
5098          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
5099def : Pat<(f64 (any_uint_to_fp (i64 (any_fp_to_uint f64:$Rn)))),
5100          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
5101def : Pat<(f32 (any_uint_to_fp (i32 (any_fp_to_uint f32:$Rn)))),
5102          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
5103
5104let Predicates = [HasFullFP16] in {
5105def : Pat<(f16 (any_sint_to_fp (i32 (any_fp_to_sint f16:$Rn)))),
5106          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
5107def : Pat<(f16 (any_uint_to_fp (i32 (any_fp_to_uint f16:$Rn)))),
5108          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
5109}
5110// If an integer is about to be converted to a floating point value,
5111// just load it on the floating point unit.
5112// Here are the patterns for 8 and 16-bits to float.
5113// 8-bits -> float.
5114multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
5115                             SDPatternOperator loadop, Instruction UCVTF,
5116                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
5117                             SubRegIndex sub> {
5118  def : Pat<(DstTy (uint_to_fp (SrcTy
5119                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
5120                                      ro.Wext:$extend))))),
5121           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5122                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
5123                                 sub))>;
5124
5125  def : Pat<(DstTy (uint_to_fp (SrcTy
5126                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
5127                                      ro.Wext:$extend))))),
5128           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5129                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
5130                                 sub))>;
5131}
5132
5133defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
5134                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
5135def : Pat <(f32 (uint_to_fp (i32
5136               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5137           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5138                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5139def : Pat <(f32 (uint_to_fp (i32
5140                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5141           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5142                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5143// 16-bits -> float.
5144defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
5145                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
5146def : Pat <(f32 (uint_to_fp (i32
5147                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5148           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5149                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5150def : Pat <(f32 (uint_to_fp (i32
5151                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5152           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5153                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5154// 32-bits are handled in target specific dag combine:
5155// performIntToFpCombine.
5156// 64-bits integer to 32-bits floating point, not possible with
5157// UCVTF on floating point registers (both source and destination
5158// must have the same size).
5159
5160// Here are the patterns for 8, 16, 32, and 64-bits to double.
5161// 8-bits -> double.
5162defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
5163                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
5164def : Pat <(f64 (uint_to_fp (i32
5165                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5166           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5167                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5168def : Pat <(f64 (uint_to_fp (i32
5169                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5170           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5171                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5172// 16-bits -> double.
5173defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
5174                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
5175def : Pat <(f64 (uint_to_fp (i32
5176                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5177           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5178                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5179def : Pat <(f64 (uint_to_fp (i32
5180                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5181           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5182                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5183// 32-bits -> double.
5184defm : UIntToFPROLoadPat<f64, i32, load,
5185                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5186def : Pat <(f64 (uint_to_fp (i32
5187                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5188           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5189                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5190def : Pat <(f64 (uint_to_fp (i32
5191                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5192           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5193                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5194// 64-bits -> double are handled in target specific dag combine:
5195// performIntToFpCombine.
5196} // let Predicates = [HasNEON]
5197
5198//===----------------------------------------------------------------------===//
5199// Advanced SIMD three different-sized vector instructions.
5200//===----------------------------------------------------------------------===//
5201
5202defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5203defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5204defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5205defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5206defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5207defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5208                                             AArch64sabd>;
5209defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5210                                          AArch64sabd>;
5211defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5212            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5213defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5214                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5215defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5216    TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
5217defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5218    TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
5219defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", AArch64smull>;
5220defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5221                                               int_aarch64_neon_sqadd>;
5222defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5223                                               int_aarch64_neon_sqsub>;
5224defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5225                                     int_aarch64_neon_sqdmull>;
5226defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5227                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5228defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5229                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5230defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5231                                              AArch64uabd>;
5232defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5233                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5234defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5235                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5236defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5237    TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
5238defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5239    TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
5240defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", AArch64umull>;
5241defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5242                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5243defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5244                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5245
5246// Additional patterns for [SU]ML[AS]L
5247multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5248  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5249  def : Pat<(v4i16 (opnode
5250                    V64:$Ra,
5251                    (v4i16 (extract_subvector
5252                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5253                            (i64 0))))),
5254             (EXTRACT_SUBREG (v8i16 (INST8B
5255                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5256                                     V64:$Rn, V64:$Rm)), dsub)>;
5257  def : Pat<(v2i32 (opnode
5258                    V64:$Ra,
5259                    (v2i32 (extract_subvector
5260                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5261                            (i64 0))))),
5262             (EXTRACT_SUBREG (v4i32 (INST4H
5263                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5264                                     V64:$Rn, V64:$Rm)), dsub)>;
5265  def : Pat<(v1i64 (opnode
5266                    V64:$Ra,
5267                    (v1i64 (extract_subvector
5268                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5269                            (i64 0))))),
5270             (EXTRACT_SUBREG (v2i64 (INST2S
5271                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5272                                     V64:$Rn, V64:$Rm)), dsub)>;
5273}
5274
5275defm : Neon_mul_acc_widen_patterns<add, AArch64umull,
5276     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5277defm : Neon_mul_acc_widen_patterns<add, AArch64smull,
5278     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5279defm : Neon_mul_acc_widen_patterns<sub, AArch64umull,
5280     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5281defm : Neon_mul_acc_widen_patterns<sub, AArch64smull,
5282     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5283
5284// Patterns for 64-bit pmull
5285def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5286          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5287def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5288                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5289          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5290
5291// CodeGen patterns for addhn and subhn instructions, which can actually be
5292// written in LLVM IR without too much difficulty.
5293
5294// ADDHN
5295def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5296          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5297def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5298                                           (i32 16))))),
5299          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5300def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5301                                           (i32 32))))),
5302          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5303def : Pat<(concat_vectors (v8i8 V64:$Rd),
5304                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5305                                                    (i32 8))))),
5306          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5307                            V128:$Rn, V128:$Rm)>;
5308def : Pat<(concat_vectors (v4i16 V64:$Rd),
5309                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5310                                                    (i32 16))))),
5311          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5312                            V128:$Rn, V128:$Rm)>;
5313def : Pat<(concat_vectors (v2i32 V64:$Rd),
5314                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5315                                                    (i32 32))))),
5316          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5317                            V128:$Rn, V128:$Rm)>;
5318
5319// SUBHN
5320def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5321          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5322def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5323                                           (i32 16))))),
5324          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5325def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5326                                           (i32 32))))),
5327          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5328def : Pat<(concat_vectors (v8i8 V64:$Rd),
5329                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5330                                                    (i32 8))))),
5331          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5332                            V128:$Rn, V128:$Rm)>;
5333def : Pat<(concat_vectors (v4i16 V64:$Rd),
5334                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5335                                                    (i32 16))))),
5336          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5337                            V128:$Rn, V128:$Rm)>;
5338def : Pat<(concat_vectors (v2i32 V64:$Rd),
5339                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5340                                                    (i32 32))))),
5341          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5342                            V128:$Rn, V128:$Rm)>;
5343
5344//----------------------------------------------------------------------------
5345// AdvSIMD bitwise extract from vector instruction.
5346//----------------------------------------------------------------------------
5347
5348defm EXT : SIMDBitwiseExtract<"ext">;
5349
5350def AdjustExtImm : SDNodeXForm<imm, [{
5351  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5352}]>;
5353multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5354  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5355            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5356  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5357            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5358  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5359  // 128-bit vector.
5360  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5361            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5362  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5363  // single 128-bit EXT.
5364  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5365                              (extract_subvector V128:$Rn, (i64 N)),
5366                              (i32 imm:$imm))),
5367            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5368  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5369  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5370  // top half of the other operand will be unset, but that doesn't matter as it
5371  // will not be used.
5372  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5373                              V64:$Rm,
5374                              (i32 imm:$imm))),
5375            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5376                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5377                                      (AdjustExtImm imm:$imm)), dsub)>;
5378}
5379
5380defm : ExtPat<v8i8, v16i8, 8>;
5381defm : ExtPat<v4i16, v8i16, 4>;
5382defm : ExtPat<v4f16, v8f16, 4>;
5383defm : ExtPat<v4bf16, v8bf16, 4>;
5384defm : ExtPat<v2i32, v4i32, 2>;
5385defm : ExtPat<v2f32, v4f32, 2>;
5386defm : ExtPat<v1i64, v2i64, 1>;
5387defm : ExtPat<v1f64, v2f64, 1>;
5388
5389//----------------------------------------------------------------------------
5390// AdvSIMD zip vector
5391//----------------------------------------------------------------------------
5392
5393defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5394defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5395defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5396defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5397defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5398defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5399
5400def : Pat<(v16i8 (concat_vectors (v8i8 (trunc (v8i16 V128:$Vn))),
5401                                 (v8i8 (trunc (v8i16 V128:$Vm))))),
5402          (UZP1v16i8 V128:$Vn, V128:$Vm)>;
5403def : Pat<(v8i16 (concat_vectors (v4i16 (trunc (v4i32 V128:$Vn))),
5404                                 (v4i16 (trunc (v4i32 V128:$Vm))))),
5405          (UZP1v8i16 V128:$Vn, V128:$Vm)>;
5406def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))),
5407                                 (v2i32 (trunc (v2i64 V128:$Vm))))),
5408          (UZP1v4i32 V128:$Vn, V128:$Vm)>;
5409
5410//----------------------------------------------------------------------------
5411// AdvSIMD TBL/TBX instructions
5412//----------------------------------------------------------------------------
5413
5414defm TBL : SIMDTableLookup<    0, "tbl">;
5415defm TBX : SIMDTableLookupTied<1, "tbx">;
5416
5417def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5418          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5419def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5420          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5421
5422def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5423                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5424          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5425def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5426                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5427          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5428
5429
5430//----------------------------------------------------------------------------
5431// AdvSIMD scalar DUP instruction
5432//----------------------------------------------------------------------------
5433
5434defm DUP : SIMDScalarDUP<"mov">;
5435
5436//----------------------------------------------------------------------------
5437// AdvSIMD scalar pairwise instructions
5438//----------------------------------------------------------------------------
5439
5440defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5441defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5442defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5443defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5444defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5445defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5446
5447// Only the lower half of the result of the inner FADDP is used in the patterns
5448// below, so the second operand does not matter. Re-use the first input
5449// operand, so no additional dependencies need to be introduced.
5450let Predicates = [HasFullFP16] in {
5451def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5452            (FADDPv2i16p
5453              (EXTRACT_SUBREG
5454                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, V128:$Rn), V128:$Rn),
5455               dsub))>;
5456def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5457          (FADDPv2i16p (FADDPv4f16 V64:$Rn, V64:$Rn))>;
5458}
5459def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5460          (FADDPv2i32p
5461            (EXTRACT_SUBREG
5462              (FADDPv4f32 V128:$Rn, V128:$Rn),
5463             dsub))>;
5464def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5465          (FADDPv2i32p V64:$Rn)>;
5466def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5467          (FADDPv2i64p V128:$Rn)>;
5468
5469def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5470          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5471def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5472          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5473def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5474          (FADDPv2i32p V64:$Rn)>;
5475def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5476          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5477def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5478          (FADDPv2i64p V128:$Rn)>;
5479def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5480          (FMAXNMPv2i32p V64:$Rn)>;
5481def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5482          (FMAXNMPv2i64p V128:$Rn)>;
5483def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5484          (FMAXPv2i32p V64:$Rn)>;
5485def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5486          (FMAXPv2i64p V128:$Rn)>;
5487def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5488          (FMINNMPv2i32p V64:$Rn)>;
5489def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5490          (FMINNMPv2i64p V128:$Rn)>;
5491def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5492          (FMINPv2i32p V64:$Rn)>;
5493def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5494          (FMINPv2i64p V128:$Rn)>;
5495
5496//----------------------------------------------------------------------------
5497// AdvSIMD INS/DUP instructions
5498//----------------------------------------------------------------------------
5499
5500def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5501def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5502def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5503def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5504def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5505def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5506def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5507
5508def DUPv2i64lane : SIMDDup64FromElement;
5509def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5510def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5511def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5512def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5513def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5514def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5515
5516// DUP from a 64-bit register to a 64-bit register is just a copy
5517def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5518          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5519def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5520          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5521
5522def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5523          (v2f32 (DUPv2i32lane
5524            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5525            (i64 0)))>;
5526def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5527          (v4f32 (DUPv4i32lane
5528            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5529            (i64 0)))>;
5530def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5531          (v2f64 (DUPv2i64lane
5532            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5533            (i64 0)))>;
5534def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5535          (v4f16 (DUPv4i16lane
5536            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5537            (i64 0)))>;
5538def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5539          (v4bf16 (DUPv4i16lane
5540            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5541            (i64 0)))>;
5542def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5543          (v8f16 (DUPv8i16lane
5544            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5545            (i64 0)))>;
5546def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5547          (v8bf16 (DUPv8i16lane
5548            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5549            (i64 0)))>;
5550
5551def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5552          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5553def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5554          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5555
5556def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5557          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5558def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5559          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5560
5561def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5562          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5563def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5564         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5565def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5566          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5567
5568// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5569// instruction even if the types don't match: we just have to remap the lane
5570// carefully. N.b. this trick only applies to truncations.
5571def VecIndex_x2 : SDNodeXForm<imm, [{
5572  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5573}]>;
5574def VecIndex_x4 : SDNodeXForm<imm, [{
5575  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5576}]>;
5577def VecIndex_x8 : SDNodeXForm<imm, [{
5578  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5579}]>;
5580
5581multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5582                            ValueType Src128VT, ValueType ScalVT,
5583                            Instruction DUP, SDNodeXForm IdxXFORM> {
5584  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5585                                                     imm:$idx)))),
5586            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5587
5588  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5589                                                     imm:$idx)))),
5590            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5591}
5592
5593defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5594defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5595defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5596
5597defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5598defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5599defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5600
5601multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5602                               SDNodeXForm IdxXFORM> {
5603  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5604                                                         imm:$idx))))),
5605            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5606
5607  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5608                                                       imm:$idx))))),
5609            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5610}
5611
5612defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5613defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5614defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5615
5616defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5617defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5618defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5619
5620// SMOV and UMOV definitions, with some extra patterns for convenience
5621defm SMOV : SMov;
5622defm UMOV : UMov;
5623
5624def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5625          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5626def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5627          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5628def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5629          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5630def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5631          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5632def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5633          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5634def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5635          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5636
5637def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5638            VectorIndexB:$idx)))), i8),
5639          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5640def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5641            VectorIndexH:$idx)))), i16),
5642          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5643
5644// Extracting i8 or i16 elements will have the zero-extend transformed to
5645// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5646// for AArch64. Match these patterns here since UMOV already zeroes out the high
5647// bits of the destination register.
5648def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5649               (i32 0xff)),
5650          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5651def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5652               (i32 0xffff)),
5653          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5654
5655def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5656            VectorIndexB:$idx)))), (i64 0xff))),
5657          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5658def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5659            VectorIndexH:$idx)))), (i64 0xffff))),
5660          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5661
5662defm INS : SIMDIns;
5663
5664def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5665          (SUBREG_TO_REG (i32 0),
5666                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5667def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5668          (SUBREG_TO_REG (i32 0),
5669                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5670
5671def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5672          (SUBREG_TO_REG (i32 0),
5673                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5674def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5675          (SUBREG_TO_REG (i32 0),
5676                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5677
5678def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5679          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5680def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5681          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5682
5683def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5684          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5685def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5686          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5687
5688def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5689            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5690                                  (i32 FPR32:$Rn), ssub))>;
5691def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5692            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5693                                  (i32 FPR32:$Rn), ssub))>;
5694
5695def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5696            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5697                                  (i64 FPR64:$Rn), dsub))>;
5698
5699def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5700          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5701def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5702          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5703
5704def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5705          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5706def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5707          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5708
5709def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5710          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5711def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5712          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5713
5714def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5715          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5716
5717def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5718            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5719          (EXTRACT_SUBREG
5720            (INSvi16lane
5721              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5722              VectorIndexS:$imm,
5723              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5724              (i64 0)),
5725            dsub)>;
5726
5727def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5728            (i64 VectorIndexH:$imm)),
5729          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5730def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5731            (i64 VectorIndexS:$imm)),
5732          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5733def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5734            (i64 VectorIndexD:$imm)),
5735          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5736
5737def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5738            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5739          (INSvi16lane
5740            V128:$Rn, VectorIndexH:$imm,
5741            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5742            (i64 0))>;
5743
5744def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5745            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5746          (EXTRACT_SUBREG
5747            (INSvi16lane
5748              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5749              VectorIndexS:$imm,
5750              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5751              (i64 0)),
5752            dsub)>;
5753
5754def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5755            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5756          (INSvi16lane
5757            V128:$Rn, VectorIndexH:$imm,
5758            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5759            (i64 0))>;
5760
5761def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5762            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5763          (EXTRACT_SUBREG
5764            (INSvi32lane
5765              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5766              VectorIndexS:$imm,
5767              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5768              (i64 0)),
5769            dsub)>;
5770def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5771            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5772          (INSvi32lane
5773            V128:$Rn, VectorIndexS:$imm,
5774            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5775            (i64 0))>;
5776def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5777            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5778          (INSvi64lane
5779            V128:$Rn, VectorIndexD:$imm,
5780            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5781            (i64 0))>;
5782
5783// Copy an element at a constant index in one vector into a constant indexed
5784// element of another.
5785// FIXME refactor to a shared class/dev parameterized on vector type, vector
5786// index type and INS extension
5787def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5788                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5789                   VectorIndexB:$idx2)),
5790          (v16i8 (INSvi8lane
5791                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5792          )>;
5793def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5794                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5795                   VectorIndexH:$idx2)),
5796          (v8i16 (INSvi16lane
5797                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5798          )>;
5799def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5800                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5801                   VectorIndexS:$idx2)),
5802          (v4i32 (INSvi32lane
5803                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5804          )>;
5805def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5806                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5807                   VectorIndexD:$idx2)),
5808          (v2i64 (INSvi64lane
5809                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5810          )>;
5811
5812multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5813                                ValueType VTScal, Instruction INS> {
5814  def : Pat<(VT128 (vector_insert V128:$src,
5815                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5816                        imm:$Immd)),
5817            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5818
5819  def : Pat<(VT128 (vector_insert V128:$src,
5820                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5821                        imm:$Immd)),
5822            (INS V128:$src, imm:$Immd,
5823                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5824
5825  def : Pat<(VT64 (vector_insert V64:$src,
5826                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5827                        imm:$Immd)),
5828            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5829                                 imm:$Immd, V128:$Rn, imm:$Immn),
5830                            dsub)>;
5831
5832  def : Pat<(VT64 (vector_insert V64:$src,
5833                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5834                        imm:$Immd)),
5835            (EXTRACT_SUBREG
5836                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5837                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5838                dsub)>;
5839}
5840
5841defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5842defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5843defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5844defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5845
5846
5847// Floating point vector extractions are codegen'd as either a sequence of
5848// subregister extractions, or a MOV (aka DUP here) if
5849// the lane number is anything other than zero.
5850def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5851          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5852def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5853          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5854def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5855          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5856def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5857          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5858
5859
5860def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5861          (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>;
5862def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5863          (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>;
5864def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5865          (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5866def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5867          (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5868
5869// All concat_vectors operations are canonicalised to act on i64 vectors for
5870// AArch64. In the general case we need an instruction, which had just as well be
5871// INS.
5872class ConcatPat<ValueType DstTy, ValueType SrcTy>
5873  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5874        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5875                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5876
5877def : ConcatPat<v2i64, v1i64>;
5878def : ConcatPat<v2f64, v1f64>;
5879def : ConcatPat<v4i32, v2i32>;
5880def : ConcatPat<v4f32, v2f32>;
5881def : ConcatPat<v8i16, v4i16>;
5882def : ConcatPat<v8f16, v4f16>;
5883def : ConcatPat<v8bf16, v4bf16>;
5884def : ConcatPat<v16i8, v8i8>;
5885
5886// If the high lanes are undef, though, we can just ignore them:
5887class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5888  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5889        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5890
5891def : ConcatUndefPat<v2i64, v1i64>;
5892def : ConcatUndefPat<v2f64, v1f64>;
5893def : ConcatUndefPat<v4i32, v2i32>;
5894def : ConcatUndefPat<v4f32, v2f32>;
5895def : ConcatUndefPat<v8i16, v4i16>;
5896def : ConcatUndefPat<v16i8, v8i8>;
5897
5898//----------------------------------------------------------------------------
5899// AdvSIMD across lanes instructions
5900//----------------------------------------------------------------------------
5901
5902defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5903defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5904defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5905defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5906defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5907defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5908defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5909defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5910defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5911defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5912defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5913
5914multiclass SIMDAcrossLaneLongPairIntrinsic<string Opc, SDPatternOperator addlp> {
5915  // Patterns for addv(addlp(x)) ==> addlv
5916  def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5917              (v4i16 (AArch64uaddv (v4i16 (addlp (v8i8 V64:$op))))),
5918              (i64 0))), (i64 0))),
5919            (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5920              (!cast<Instruction>(Opc#"v8i8v") V64:$op), hsub), ssub)>;
5921  def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (addlp (v16i8 V128:$op))))), (i64 0))),
5922            (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5923              (!cast<Instruction>(Opc#"v16i8v") V128:$op), hsub), ssub)>;
5924  def : Pat<(v4i32 (AArch64uaddv (v4i32 (addlp (v8i16 V128:$op))))),
5925            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v8i16v") V128:$op), ssub)>;
5926
5927  // Patterns for addp(addlp(x))) ==> addlv
5928  def : Pat<(v2i32 (AArch64uaddv (v2i32 (addlp (v4i16 V64:$op))))),
5929            (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v4i16v") V64:$op), ssub)>;
5930  def : Pat<(v2i64 (AArch64uaddv (v2i64 (addlp (v4i32 V128:$op))))),
5931            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v4i32v") V128:$op), dsub)>;
5932}
5933
5934defm : SIMDAcrossLaneLongPairIntrinsic<"UADDLV", AArch64uaddlp>;
5935defm : SIMDAcrossLaneLongPairIntrinsic<"SADDLV", AArch64saddlp>;
5936
5937// Patterns for across-vector intrinsics, that have a node equivalent, that
5938// returns a vector (with only the low lane defined) instead of a scalar.
5939// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5940multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5941                                    SDPatternOperator opNode> {
5942// If a lane instruction caught the vector_extract around opNode, we can
5943// directly match the latter to the instruction.
5944def : Pat<(v8i8 (opNode V64:$Rn)),
5945          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5946           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5947def : Pat<(v16i8 (opNode V128:$Rn)),
5948          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5949           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5950def : Pat<(v4i16 (opNode V64:$Rn)),
5951          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5952           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5953def : Pat<(v8i16 (opNode V128:$Rn)),
5954          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5955           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5956def : Pat<(v4i32 (opNode V128:$Rn)),
5957          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5958           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5959
5960
5961// If none did, fallback to the explicit patterns, consuming the vector_extract.
5962def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5963            (i64 0)), (i64 0))),
5964          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5965            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5966            bsub), ssub)>;
5967def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5968          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5969            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5970            bsub), ssub)>;
5971def : Pat<(i32 (vector_extract (insert_subvector undef,
5972            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
5973          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5974            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5975            hsub), ssub)>;
5976def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5977          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5978            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5979            hsub), ssub)>;
5980def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5981          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5982            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5983            ssub), ssub)>;
5984
5985}
5986
5987multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5988                                          SDPatternOperator opNode>
5989    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5990// If there is a sign extension after this intrinsic, consume it as smov already
5991// performed it
5992def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5993            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
5994          (i32 (SMOVvi8to32
5995            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5996              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5997            (i64 0)))>;
5998def : Pat<(i32 (sext_inreg (i32 (vector_extract
5999            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
6000          (i32 (SMOVvi8to32
6001            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6002             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
6003            (i64 0)))>;
6004def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
6005            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
6006          (i32 (SMOVvi16to32
6007           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6008            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
6009           (i64 0)))>;
6010def : Pat<(i32 (sext_inreg (i32 (vector_extract
6011            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
6012          (i32 (SMOVvi16to32
6013            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6014             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
6015            (i64 0)))>;
6016}
6017
6018multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
6019                                            SDPatternOperator opNode>
6020    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
6021// If there is a masking operation keeping only what has been actually
6022// generated, consume it.
6023def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
6024            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
6025      (i32 (EXTRACT_SUBREG
6026        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6027          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
6028        ssub))>;
6029def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
6030            maski8_or_more)),
6031        (i32 (EXTRACT_SUBREG
6032          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6033            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
6034          ssub))>;
6035def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
6036            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
6037          (i32 (EXTRACT_SUBREG
6038            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6039              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
6040            ssub))>;
6041def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
6042            maski16_or_more)),
6043        (i32 (EXTRACT_SUBREG
6044          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6045            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
6046          ssub))>;
6047}
6048
6049defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
6050// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6051def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
6052          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6053
6054defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
6055// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6056def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
6057          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6058
6059defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
6060def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
6061          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
6062
6063defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
6064def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
6065          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
6066
6067defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
6068def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
6069          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
6070
6071defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
6072def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
6073          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
6074
6075multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
6076  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6077        (i32 (SMOVvi16to32
6078          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6079            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6080          (i64 0)))>;
6081def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6082        (i32 (SMOVvi16to32
6083          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6084           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6085          (i64 0)))>;
6086
6087def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6088          (i32 (EXTRACT_SUBREG
6089           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6090            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6091           ssub))>;
6092def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6093        (i32 (EXTRACT_SUBREG
6094          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6095           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6096          ssub))>;
6097
6098def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6099        (i64 (EXTRACT_SUBREG
6100          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6101           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6102          dsub))>;
6103}
6104
6105multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
6106                                                Intrinsic intOp> {
6107  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6108        (i32 (EXTRACT_SUBREG
6109          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6110            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6111          ssub))>;
6112def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6113        (i32 (EXTRACT_SUBREG
6114          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6115            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6116          ssub))>;
6117
6118def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6119          (i32 (EXTRACT_SUBREG
6120            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6121              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6122            ssub))>;
6123def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6124        (i32 (EXTRACT_SUBREG
6125          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6126            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6127          ssub))>;
6128
6129def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6130        (i64 (EXTRACT_SUBREG
6131          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6132            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6133          dsub))>;
6134}
6135
6136defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
6137defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
6138
6139// The vaddlv_s32 intrinsic gets mapped to SADDLP.
6140def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
6141          (i64 (EXTRACT_SUBREG
6142            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6143              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6144            dsub))>;
6145// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6146def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6147          (i64 (EXTRACT_SUBREG
6148            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6149              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6150            dsub))>;
6151
6152//------------------------------------------------------------------------------
6153// AdvSIMD modified immediate instructions
6154//------------------------------------------------------------------------------
6155
6156// AdvSIMD BIC
6157defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6158// AdvSIMD ORR
6159defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6160
6161def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6162def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6163def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6164def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6165
6166def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6167def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6168def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6169def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6170
6171def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6172def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6173def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6174def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6175
6176def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6177def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6178def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6179def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6180
6181// AdvSIMD FMOV
6182def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6183                                              "fmov", ".2d",
6184                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6185def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6186                                              "fmov", ".2s",
6187                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6188def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6189                                              "fmov", ".4s",
6190                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6191let Predicates = [HasNEON, HasFullFP16] in {
6192def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6193                                              "fmov", ".4h",
6194                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6195def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6196                                              "fmov", ".8h",
6197                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6198} // Predicates = [HasNEON, HasFullFP16]
6199
6200// AdvSIMD MOVI
6201
6202// EDIT byte mask: scalar
6203let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6204def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6205                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6206// The movi_edit node has the immediate value already encoded, so we use
6207// a plain imm0_255 here.
6208def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6209          (MOVID imm0_255:$shift)>;
6210
6211// EDIT byte mask: 2d
6212
6213// The movi_edit node has the immediate value already encoded, so we use
6214// a plain imm0_255 in the pattern
6215let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6216def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6217                                                simdimmtype10,
6218                                                "movi", ".2d",
6219                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6220
6221def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6222def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6223def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6224def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6225
6226def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6227def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6228def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6229def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6230
6231// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6232// extract is free and this gives better MachineCSE results.
6233def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6234def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6235def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6236def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6237
6238def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6239def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6240def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6241def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6242
6243// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6244let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6245defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6246
6247let Predicates = [HasNEON] in {
6248  // Using the MOVI to materialize fp constants.
6249  def : Pat<(f32 fpimm32SIMDModImmType4:$in),
6250            (EXTRACT_SUBREG (MOVIv2i32 (fpimm32SIMDModImmType4XForm f32:$in),
6251                                       (i32 24)),
6252                            ssub)>;
6253}
6254
6255def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6256def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6257def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6258def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6259
6260def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6261def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6262def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6263def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6264
6265def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6266          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6267def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6268          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6269def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6270          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6271def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6272          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6273
6274let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6275// EDIT per word: 2s & 4s with MSL shifter
6276def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6277                      [(set (v2i32 V64:$Rd),
6278                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6279def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6280                      [(set (v4i32 V128:$Rd),
6281                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6282
6283// Per byte: 8b & 16b
6284def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6285                                                 "movi", ".8b",
6286                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6287
6288def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6289                                                 "movi", ".16b",
6290                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6291}
6292
6293// AdvSIMD MVNI
6294
6295// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6296let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6297defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6298
6299def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6300def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6301def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6302def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6303
6304def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6305def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6306def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6307def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6308
6309def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6310          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6311def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6312          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6313def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6314          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6315def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6316          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6317
6318// EDIT per word: 2s & 4s with MSL shifter
6319let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6320def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6321                      [(set (v2i32 V64:$Rd),
6322                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6323def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6324                      [(set (v4i32 V128:$Rd),
6325                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6326}
6327
6328//----------------------------------------------------------------------------
6329// AdvSIMD indexed element
6330//----------------------------------------------------------------------------
6331
6332let hasSideEffects = 0 in {
6333  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6334  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6335}
6336
6337// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6338// instruction expects the addend first, while the intrinsic expects it last.
6339
6340// On the other hand, there are quite a few valid combinatorial options due to
6341// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6342defm : SIMDFPIndexedTiedPatterns<"FMLA",
6343           TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)>>;
6344defm : SIMDFPIndexedTiedPatterns<"FMLA",
6345           TriOpFrag<(any_fma node:$MHS, node:$RHS, node:$LHS)>>;
6346
6347defm : SIMDFPIndexedTiedPatterns<"FMLS",
6348           TriOpFrag<(any_fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6349defm : SIMDFPIndexedTiedPatterns<"FMLS",
6350           TriOpFrag<(any_fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6351defm : SIMDFPIndexedTiedPatterns<"FMLS",
6352           TriOpFrag<(any_fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6353defm : SIMDFPIndexedTiedPatterns<"FMLS",
6354           TriOpFrag<(any_fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6355
6356multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6357  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6358  // and DUP scalar.
6359  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6360                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6361                                           VectorIndexS:$idx))),
6362            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6363  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6364                           (v2f32 (AArch64duplane32
6365                                      (v4f32 (insert_subvector undef,
6366                                                 (v2f32 (fneg V64:$Rm)),
6367                                                 (i64 0))),
6368                                      VectorIndexS:$idx)))),
6369            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6370                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6371                               VectorIndexS:$idx)>;
6372  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6373                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6374            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6375                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6376
6377  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6378  // and DUP scalar.
6379  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6380                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6381                                           VectorIndexS:$idx))),
6382            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6383                               VectorIndexS:$idx)>;
6384  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6385                           (v4f32 (AArch64duplane32
6386                                      (v4f32 (insert_subvector undef,
6387                                                 (v2f32 (fneg V64:$Rm)),
6388                                                 (i64 0))),
6389                                      VectorIndexS:$idx)))),
6390            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6391                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6392                               VectorIndexS:$idx)>;
6393  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6394                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6395            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6396                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6397
6398  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6399  // (DUPLANE from 64-bit would be trivial).
6400  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6401                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6402                                           VectorIndexD:$idx))),
6403            (FMLSv2i64_indexed
6404                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6405  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6406                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6407            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6408                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6409
6410  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6411  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6412                         (vector_extract (v4f32 (fneg V128:$Rm)),
6413                                         VectorIndexS:$idx))),
6414            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6415                V128:$Rm, VectorIndexS:$idx)>;
6416  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6417                         (vector_extract (v4f32 (insert_subvector undef,
6418                                                    (v2f32 (fneg V64:$Rm)),
6419                                                    (i64 0))),
6420                                         VectorIndexS:$idx))),
6421            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6422                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6423
6424  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6425  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6426                         (vector_extract (v2f64 (fneg V128:$Rm)),
6427                                         VectorIndexS:$idx))),
6428            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6429                V128:$Rm, VectorIndexS:$idx)>;
6430}
6431
6432defm : FMLSIndexedAfterNegPatterns<
6433           TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)> >;
6434defm : FMLSIndexedAfterNegPatterns<
6435           TriOpFrag<(any_fma node:$MHS, node:$RHS, node:$LHS)> >;
6436
6437defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6438defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", any_fmul>;
6439
6440def : Pat<(v2f32 (any_fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6441          (FMULv2i32_indexed V64:$Rn,
6442            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6443            (i64 0))>;
6444def : Pat<(v4f32 (any_fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6445          (FMULv4i32_indexed V128:$Rn,
6446            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6447            (i64 0))>;
6448def : Pat<(v2f64 (any_fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6449          (FMULv2i64_indexed V128:$Rn,
6450            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6451            (i64 0))>;
6452
6453defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6454defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6455
6456defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6457                                     int_aarch64_neon_sqdmulh_laneq>;
6458defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6459                                      int_aarch64_neon_sqrdmulh_laneq>;
6460
6461// Generated by MachineCombine
6462defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6463defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6464
6465defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6466defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6467    TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
6468defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6469    TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
6470defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull", AArch64smull>;
6471defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6472                                           int_aarch64_neon_sqadd>;
6473defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6474                                           int_aarch64_neon_sqsub>;
6475defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6476                                          int_aarch64_neon_sqrdmlah>;
6477defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6478                                          int_aarch64_neon_sqrdmlsh>;
6479defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6480defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6481    TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
6482defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6483    TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
6484defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull", AArch64umull>;
6485
6486// A scalar sqdmull with the second operand being a vector lane can be
6487// handled directly with the indexed instruction encoding.
6488def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6489                                          (vector_extract (v4i32 V128:$Vm),
6490                                                           VectorIndexS:$idx)),
6491          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6492
6493//----------------------------------------------------------------------------
6494// AdvSIMD scalar shift instructions
6495//----------------------------------------------------------------------------
6496defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6497defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6498defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6499defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6500// Codegen patterns for the above. We don't put these directly on the
6501// instructions because TableGen's type inference can't handle the truth.
6502// Having the same base pattern for fp <--> int totally freaks it out.
6503def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6504          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6505def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6506          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6507def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6508          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6509def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6510          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6511def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6512                                            vecshiftR64:$imm)),
6513          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6514def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6515                                            vecshiftR64:$imm)),
6516          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6517def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6518          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6519def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6520          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6521def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6522                                            vecshiftR64:$imm)),
6523          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6524def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6525          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6526def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6527                                            vecshiftR64:$imm)),
6528          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6529def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6530          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6531
6532// Patterns for FP16 Intrinsics - requires reg copy to/from as i16s not supported.
6533
6534def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6535          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6536def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6537          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6538def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6539          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6540def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6541            (and FPR32:$Rn, (i32 65535)),
6542            vecshiftR16:$imm)),
6543          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6544def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6545          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6546def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6547          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6548def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6549          (i32 (INSERT_SUBREG
6550            (i32 (IMPLICIT_DEF)),
6551            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6552            hsub))>;
6553def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6554          (i64 (INSERT_SUBREG
6555            (i64 (IMPLICIT_DEF)),
6556            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6557            hsub))>;
6558def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6559          (i32 (INSERT_SUBREG
6560            (i32 (IMPLICIT_DEF)),
6561            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6562            hsub))>;
6563def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6564          (i64 (INSERT_SUBREG
6565            (i64 (IMPLICIT_DEF)),
6566            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6567            hsub))>;
6568def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6569          (i32 (INSERT_SUBREG
6570            (i32 (IMPLICIT_DEF)),
6571            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6572            hsub))>;
6573def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6574          (i32 (INSERT_SUBREG
6575            (i32 (IMPLICIT_DEF)),
6576            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6577            hsub))>;
6578
6579defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6580defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6581defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6582                                     int_aarch64_neon_sqrshrn>;
6583defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6584                                     int_aarch64_neon_sqrshrun>;
6585defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6586defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6587defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6588                                     int_aarch64_neon_sqshrn>;
6589defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6590                                     int_aarch64_neon_sqshrun>;
6591defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6592defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6593defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6594    TriOpFrag<(add node:$LHS,
6595                   (AArch64srshri node:$MHS, node:$RHS))>>;
6596defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6597defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6598    TriOpFrag<(add_and_or_is_add node:$LHS,
6599                   (AArch64vashr node:$MHS, node:$RHS))>>;
6600defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6601                                     int_aarch64_neon_uqrshrn>;
6602defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6603defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6604                                     int_aarch64_neon_uqshrn>;
6605defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6606defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6607    TriOpFrag<(add node:$LHS,
6608                   (AArch64urshri node:$MHS, node:$RHS))>>;
6609defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6610defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6611    TriOpFrag<(add_and_or_is_add node:$LHS,
6612                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6613
6614//----------------------------------------------------------------------------
6615// AdvSIMD vector shift instructions
6616//----------------------------------------------------------------------------
6617defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6618defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6619defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6620                                   int_aarch64_neon_vcvtfxs2fp>;
6621defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6622                                         int_aarch64_neon_rshrn>;
6623defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6624defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6625                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6626defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6627def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6628                                      (i32 vecshiftL64:$imm))),
6629          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6630defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6631                                         int_aarch64_neon_sqrshrn>;
6632defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6633                                         int_aarch64_neon_sqrshrun>;
6634defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6635defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6636defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6637                                         int_aarch64_neon_sqshrn>;
6638defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6639                                         int_aarch64_neon_sqshrun>;
6640defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6641def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6642                                      (i32 vecshiftR64:$imm))),
6643          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6644defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6645defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6646                 TriOpFrag<(add node:$LHS,
6647                                (AArch64srshri node:$MHS, node:$RHS))> >;
6648defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6649                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6650
6651defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6652defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6653                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6654defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6655                        int_aarch64_neon_vcvtfxu2fp>;
6656defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6657                                         int_aarch64_neon_uqrshrn>;
6658defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6659defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6660                                         int_aarch64_neon_uqshrn>;
6661defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6662defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6663                TriOpFrag<(add node:$LHS,
6664                               (AArch64urshri node:$MHS, node:$RHS))> >;
6665defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6666                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6667defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6668defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6669                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6670
6671// RADDHN patterns for when RSHRN shifts by half the size of the vector element
6672def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
6673          (RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
6674def : Pat<(v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))),
6675          (RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>;
6676def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))),
6677          (RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>;
6678
6679// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element
6680def : Pat<(v16i8 (concat_vectors
6681                 (v8i8 V64:$Vd),
6682                 (v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))))),
6683          (RADDHNv8i16_v16i8
6684                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6685                 (v8i16 (MOVIv2d_ns (i32 0))))>;
6686def : Pat<(v8i16 (concat_vectors
6687                 (v4i16 V64:$Vd),
6688                 (v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))))),
6689          (RADDHNv4i32_v8i16
6690                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6691                 (v4i32 (MOVIv2d_ns (i32 0))))>;
6692def : Pat<(v4i32 (concat_vectors
6693                 (v2i32 V64:$Vd),
6694                 (v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))))),
6695          (RADDHNv2i64_v4i32
6696                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6697                 (v2i64 (MOVIv2d_ns (i32 0))))>;
6698
6699// SHRN patterns for when a logical right shift was used instead of arithmetic
6700// (the immediate guarantees no sign bits actually end up in the result so it
6701// doesn't matter).
6702def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6703          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6704def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6705          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6706def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6707          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6708
6709def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6710                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6711                                                    vecshiftR16Narrow:$imm)))),
6712          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6713                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6714def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6715                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6716                                                    vecshiftR32Narrow:$imm)))),
6717          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6718                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6719def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6720                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6721                                                    vecshiftR64Narrow:$imm)))),
6722          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6723                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6724
6725// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6726// Anyexts are implemented as zexts.
6727def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6728def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6729def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6730def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6731def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6732def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6733def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6734def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6735def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6736// Also match an extend from the upper half of a 128 bit source register.
6737def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6738          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6739def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6740          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6741def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6742          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6743def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6744          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6745def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6746          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6747def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6748          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6749def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6750          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6751def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6752          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6753def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6754          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6755
6756// Vector shift sxtl aliases
6757def : InstAlias<"sxtl.8h $dst, $src1",
6758                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6759def : InstAlias<"sxtl $dst.8h, $src1.8b",
6760                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6761def : InstAlias<"sxtl.4s $dst, $src1",
6762                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6763def : InstAlias<"sxtl $dst.4s, $src1.4h",
6764                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6765def : InstAlias<"sxtl.2d $dst, $src1",
6766                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6767def : InstAlias<"sxtl $dst.2d, $src1.2s",
6768                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6769
6770// Vector shift sxtl2 aliases
6771def : InstAlias<"sxtl2.8h $dst, $src1",
6772                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6773def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6774                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6775def : InstAlias<"sxtl2.4s $dst, $src1",
6776                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6777def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6778                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6779def : InstAlias<"sxtl2.2d $dst, $src1",
6780                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6781def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6782                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6783
6784// Vector shift uxtl aliases
6785def : InstAlias<"uxtl.8h $dst, $src1",
6786                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6787def : InstAlias<"uxtl $dst.8h, $src1.8b",
6788                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6789def : InstAlias<"uxtl.4s $dst, $src1",
6790                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6791def : InstAlias<"uxtl $dst.4s, $src1.4h",
6792                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6793def : InstAlias<"uxtl.2d $dst, $src1",
6794                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6795def : InstAlias<"uxtl $dst.2d, $src1.2s",
6796                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6797
6798// Vector shift uxtl2 aliases
6799def : InstAlias<"uxtl2.8h $dst, $src1",
6800                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6801def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6802                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6803def : InstAlias<"uxtl2.4s $dst, $src1",
6804                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6805def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6806                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6807def : InstAlias<"uxtl2.2d $dst, $src1",
6808                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6809def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6810                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6811
6812// If an integer is about to be converted to a floating point value,
6813// just load it on the floating point unit.
6814// These patterns are more complex because floating point loads do not
6815// support sign extension.
6816// The sign extension has to be explicitly added and is only supported for
6817// one step: byte-to-half, half-to-word, word-to-doubleword.
6818// SCVTF GPR -> FPR is 9 cycles.
6819// SCVTF FPR -> FPR is 4 cyclces.
6820// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6821// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6822// and still being faster.
6823// However, this is not good for code size.
6824// 8-bits -> float. 2 sizes step-up.
6825class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6826  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6827        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6828                            (SSHLLv4i16_shift
6829                              (f64
6830                                (EXTRACT_SUBREG
6831                                  (SSHLLv8i8_shift
6832                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6833                                        INST,
6834                                        bsub),
6835                                    0),
6836                                  dsub)),
6837                               0),
6838                             ssub)))>,
6839    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6840
6841def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6842                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6843def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6844                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6845def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6846                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6847def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6848                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6849
6850// 16-bits -> float. 1 size step-up.
6851class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6852  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6853        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6854                            (SSHLLv4i16_shift
6855                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6856                                  INST,
6857                                  hsub),
6858                                0),
6859                            ssub)))>,
6860    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6861
6862def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6863                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6864def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6865                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6866def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6867                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6868def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6869                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6870
6871// 32-bits to 32-bits are handled in target specific dag combine:
6872// performIntToFpCombine.
6873// 64-bits integer to 32-bits floating point, not possible with
6874// SCVTF on floating point registers (both source and destination
6875// must have the same size).
6876
6877// Here are the patterns for 8, 16, 32, and 64-bits to double.
6878// 8-bits -> double. 3 size step-up: give up.
6879// 16-bits -> double. 2 size step.
6880class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6881  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6882           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6883                              (SSHLLv2i32_shift
6884                                 (f64
6885                                  (EXTRACT_SUBREG
6886                                    (SSHLLv4i16_shift
6887                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6888                                        INST,
6889                                        hsub),
6890                                     0),
6891                                   dsub)),
6892                               0),
6893                             dsub)))>,
6894    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6895
6896def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6897                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6898def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6899                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6900def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6901                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6902def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6903                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6904// 32-bits -> double. 1 size step-up.
6905class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6906  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6907           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6908                              (SSHLLv2i32_shift
6909                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6910                                  INST,
6911                                  ssub),
6912                               0),
6913                             dsub)))>,
6914    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6915
6916def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6917                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6918def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6919                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6920def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6921                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6922def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6923                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6924
6925// 64-bits -> double are handled in target specific dag combine:
6926// performIntToFpCombine.
6927
6928
6929//----------------------------------------------------------------------------
6930// AdvSIMD Load-Store Structure
6931//----------------------------------------------------------------------------
6932defm LD1 : SIMDLd1Multiple<"ld1">;
6933defm LD2 : SIMDLd2Multiple<"ld2">;
6934defm LD3 : SIMDLd3Multiple<"ld3">;
6935defm LD4 : SIMDLd4Multiple<"ld4">;
6936
6937defm ST1 : SIMDSt1Multiple<"st1">;
6938defm ST2 : SIMDSt2Multiple<"st2">;
6939defm ST3 : SIMDSt3Multiple<"st3">;
6940defm ST4 : SIMDSt4Multiple<"st4">;
6941
6942class Ld1Pat<ValueType ty, Instruction INST>
6943  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6944
6945def : Ld1Pat<v16i8, LD1Onev16b>;
6946def : Ld1Pat<v8i16, LD1Onev8h>;
6947def : Ld1Pat<v4i32, LD1Onev4s>;
6948def : Ld1Pat<v2i64, LD1Onev2d>;
6949def : Ld1Pat<v8i8,  LD1Onev8b>;
6950def : Ld1Pat<v4i16, LD1Onev4h>;
6951def : Ld1Pat<v2i32, LD1Onev2s>;
6952def : Ld1Pat<v1i64, LD1Onev1d>;
6953
6954class St1Pat<ValueType ty, Instruction INST>
6955  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6956        (INST ty:$Vt, GPR64sp:$Rn)>;
6957
6958def : St1Pat<v16i8, ST1Onev16b>;
6959def : St1Pat<v8i16, ST1Onev8h>;
6960def : St1Pat<v4i32, ST1Onev4s>;
6961def : St1Pat<v2i64, ST1Onev2d>;
6962def : St1Pat<v8i8,  ST1Onev8b>;
6963def : St1Pat<v4i16, ST1Onev4h>;
6964def : St1Pat<v2i32, ST1Onev2s>;
6965def : St1Pat<v1i64, ST1Onev1d>;
6966
6967//---
6968// Single-element
6969//---
6970
6971defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6972defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6973defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6974defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6975let mayLoad = 1, hasSideEffects = 0 in {
6976defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6977defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6978defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6979defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6980defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6981defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6982defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6983defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6984defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6985defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6986defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6987defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6988defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6989defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6990defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6991defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6992}
6993
6994def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6995          (LD1Rv8b GPR64sp:$Rn)>;
6996def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6997          (LD1Rv16b GPR64sp:$Rn)>;
6998def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6999          (LD1Rv4h GPR64sp:$Rn)>;
7000def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
7001          (LD1Rv8h GPR64sp:$Rn)>;
7002def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
7003          (LD1Rv2s GPR64sp:$Rn)>;
7004def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
7005          (LD1Rv4s GPR64sp:$Rn)>;
7006def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
7007          (LD1Rv2d GPR64sp:$Rn)>;
7008def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
7009          (LD1Rv1d GPR64sp:$Rn)>;
7010// Grab the floating point version too
7011def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
7012          (LD1Rv2s GPR64sp:$Rn)>;
7013def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
7014          (LD1Rv4s GPR64sp:$Rn)>;
7015def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
7016          (LD1Rv2d GPR64sp:$Rn)>;
7017def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
7018          (LD1Rv1d GPR64sp:$Rn)>;
7019def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
7020          (LD1Rv4h GPR64sp:$Rn)>;
7021def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
7022          (LD1Rv8h GPR64sp:$Rn)>;
7023def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
7024          (LD1Rv4h GPR64sp:$Rn)>;
7025def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
7026          (LD1Rv8h GPR64sp:$Rn)>;
7027
7028class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
7029                    ValueType VTy, ValueType STy, Instruction LD1>
7030  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7031           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7032        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
7033
7034def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
7035def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
7036def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
7037def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
7038def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
7039def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
7040def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
7041def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
7042
7043// Generate LD1 for extload if memory type does not match the
7044// destination type, for example:
7045//
7046//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
7047//
7048// In this case, the index must be adjusted to match LD1 type.
7049//
7050class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
7051                    VecIndex, ValueType VTy, ValueType STy,
7052                    Instruction LD1, SDNodeXForm IdxOp>
7053  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7054                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7055        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
7056
7057def VectorIndexStoH : SDNodeXForm<imm, [{
7058  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7059}]>;
7060def VectorIndexStoB : SDNodeXForm<imm, [{
7061  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
7062}]>;
7063def VectorIndexHtoB : SDNodeXForm<imm, [{
7064  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7065}]>;
7066
7067def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
7068def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
7069def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
7070
7071// Same as above, but the first element is populated using
7072// scalar_to_vector + insert_subvector instead of insert_vector_elt.
7073class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
7074                        SDPatternOperator ExtLoad, Instruction LD1>
7075  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
7076          (ResultTy (EXTRACT_SUBREG
7077            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
7078
7079def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
7080def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
7081def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
7082
7083class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
7084                   ValueType VTy, ValueType STy, Instruction LD1>
7085  : Pat<(vector_insert (VTy VecListOne64:$Rd),
7086           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7087        (EXTRACT_SUBREG
7088            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
7089                          VecIndex:$idx, GPR64sp:$Rn),
7090            dsub)>;
7091
7092def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
7093def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
7094def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
7095def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
7096def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
7097def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
7098
7099
7100defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
7101defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
7102defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
7103defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
7104
7105// Stores
7106defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
7107defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
7108defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
7109defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
7110
7111let AddedComplexity = 19 in
7112class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7113                    ValueType VTy, ValueType STy, Instruction ST1>
7114  : Pat<(scalar_store
7115             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7116             GPR64sp:$Rn),
7117        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
7118
7119def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
7120def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
7121def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
7122def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
7123def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
7124def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
7125def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
7126def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
7127
7128let AddedComplexity = 19 in
7129class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7130                   ValueType VTy, ValueType STy, Instruction ST1>
7131  : Pat<(scalar_store
7132             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7133             GPR64sp:$Rn),
7134        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7135             VecIndex:$idx, GPR64sp:$Rn)>;
7136
7137def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
7138def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
7139def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
7140def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
7141def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
7142def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
7143
7144multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7145                             ValueType VTy, ValueType STy, Instruction ST1,
7146                             int offset> {
7147  def : Pat<(scalar_store
7148              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7149              GPR64sp:$Rn, offset),
7150        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7151             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7152
7153  def : Pat<(scalar_store
7154              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7155              GPR64sp:$Rn, GPR64:$Rm),
7156        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7157             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7158}
7159
7160defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
7161defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
7162                        2>;
7163defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
7164defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
7165defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
7166defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
7167defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
7168defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
7169
7170multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7171                             ValueType VTy, ValueType STy, Instruction ST1,
7172                             int offset> {
7173  def : Pat<(scalar_store
7174              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7175              GPR64sp:$Rn, offset),
7176        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7177
7178  def : Pat<(scalar_store
7179              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7180              GPR64sp:$Rn, GPR64:$Rm),
7181        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7182}
7183
7184defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7185                         1>;
7186defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7187                         2>;
7188defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7189defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7190defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7191defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7192defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7193defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7194
7195let mayStore = 1, hasSideEffects = 0 in {
7196defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7197defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7198defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7199defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7200defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7201defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7202defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7203defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7204defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7205defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7206defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7207defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7208}
7209
7210defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7211defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7212defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7213defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7214
7215//----------------------------------------------------------------------------
7216// Crypto extensions
7217//----------------------------------------------------------------------------
7218
7219let Predicates = [HasAES] in {
7220def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7221def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7222def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7223def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7224}
7225
7226// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7227// for AES fusion on some CPUs.
7228let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7229def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7230                        Sched<[WriteVq]>;
7231def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7232                         Sched<[WriteVq]>;
7233}
7234
7235// Only use constrained versions of AES(I)MC instructions if they are paired with
7236// AESE/AESD.
7237def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7238            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7239                                            (v16i8 V128:$src2))))),
7240          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7241                                             (v16i8 V128:$src2)))))>,
7242          Requires<[HasFuseAES]>;
7243
7244def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7245            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7246                                            (v16i8 V128:$src2))))),
7247          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7248                                              (v16i8 V128:$src2)))))>,
7249          Requires<[HasFuseAES]>;
7250
7251let Predicates = [HasSHA2] in {
7252def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7253def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7254def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7255def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7256def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7257def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7258def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7259
7260def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7261def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7262def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7263}
7264
7265//----------------------------------------------------------------------------
7266// Compiler-pseudos
7267//----------------------------------------------------------------------------
7268// FIXME: Like for X86, these should go in their own separate .td file.
7269
7270// For an anyext, we don't care what the high bits are, so we can perform an
7271// INSERT_SUBREF into an IMPLICIT_DEF.
7272def : Pat<(i64 (anyext GPR32:$src)),
7273          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7274
7275// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7276// then assert the extension has happened.
7277def : Pat<(i64 (zext GPR32:$src)),
7278          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7279
7280// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7281// containing super-reg.
7282def : Pat<(i64 (sext GPR32:$src)),
7283   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7284def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7285def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7286def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7287def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7288def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7289def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7290def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7291
7292def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7293          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7294                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7295def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7296          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7297                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7298
7299def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7300          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7301                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7302def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7303          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7304                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7305
7306def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7307          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7308                   (i64 (i64shift_a        imm0_63:$imm)),
7309                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7310
7311// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7312// AddedComplexity for the following patterns since we want to match sext + sra
7313// patterns before we attempt to match a single sra node.
7314let AddedComplexity = 20 in {
7315// We support all sext + sra combinations which preserve at least one bit of the
7316// original value which is to be sign extended. E.g. we support shifts up to
7317// bitwidth-1 bits.
7318def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7319          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7320def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7321          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7322
7323def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7324          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7325def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7326          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7327
7328def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7329          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7330                   (i64 imm0_31:$imm), 31)>;
7331} // AddedComplexity = 20
7332
7333// To truncate, we can simply extract from a subregister.
7334def : Pat<(i32 (trunc GPR64sp:$src)),
7335          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7336
7337// __builtin_trap() uses the BRK instruction on AArch64.
7338def : Pat<(trap), (BRK 1)>;
7339def : Pat<(debugtrap), (BRK 0xF000)>;
7340
7341def ubsan_trap_xform : SDNodeXForm<timm, [{
7342  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7343}]>;
7344
7345def ubsan_trap_imm : TImmLeaf<i32, [{
7346  return isUInt<8>(Imm);
7347}], ubsan_trap_xform>;
7348
7349def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7350
7351// Multiply high patterns which multiply the lower subvector using smull/umull
7352// and the upper subvector with smull2/umull2. Then shuffle the high the high
7353// part of both results together.
7354def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7355          (UZP2v16i8
7356           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7357                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7358           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7359def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7360          (UZP2v8i16
7361           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7362                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7363           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7364def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7365          (UZP2v4i32
7366           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7367                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7368           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7369
7370def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7371          (UZP2v16i8
7372           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7373                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7374           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7375def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7376          (UZP2v8i16
7377           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7378                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7379           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7380def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7381          (UZP2v4i32
7382           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7383                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7384           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7385
7386// Conversions within AdvSIMD types in the same register size are free.
7387// But because we need a consistent lane ordering, in big endian many
7388// conversions require one or more REV instructions.
7389//
7390// Consider a simple memory load followed by a bitconvert then a store.
7391//   v0 = load v2i32
7392//   v1 = BITCAST v2i32 v0 to v4i16
7393//        store v4i16 v2
7394//
7395// In big endian mode every memory access has an implicit byte swap. LDR and
7396// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7397// is, they treat the vector as a sequence of elements to be byte-swapped.
7398// The two pairs of instructions are fundamentally incompatible. We've decided
7399// to use LD1/ST1 only to simplify compiler implementation.
7400//
7401// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7402// the original code sequence:
7403//   v0 = load v2i32
7404//   v1 = REV v2i32                  (implicit)
7405//   v2 = BITCAST v2i32 v1 to v4i16
7406//   v3 = REV v4i16 v2               (implicit)
7407//        store v4i16 v3
7408//
7409// But this is now broken - the value stored is different to the value loaded
7410// due to lane reordering. To fix this, on every BITCAST we must perform two
7411// other REVs:
7412//   v0 = load v2i32
7413//   v1 = REV v2i32                  (implicit)
7414//   v2 = REV v2i32
7415//   v3 = BITCAST v2i32 v2 to v4i16
7416//   v4 = REV v4i16
7417//   v5 = REV v4i16 v4               (implicit)
7418//        store v4i16 v5
7419//
7420// This means an extra two instructions, but actually in most cases the two REV
7421// instructions can be combined into one. For example:
7422//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7423//
7424// There is also no 128-bit REV instruction. This must be synthesized with an
7425// EXT instruction.
7426//
7427// Most bitconverts require some sort of conversion. The only exceptions are:
7428//   a) Identity conversions -  vNfX <-> vNiX
7429//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7430//
7431
7432// Natural vector casts (64 bit)
7433foreach VT = [ v8i8, v4i16, v4f16, v4bf16, v2i32, v2f32, v1i64, v1f64, f64 ] in
7434  foreach VT2 = [ v8i8, v4i16, v4f16, v4bf16, v2i32, v2f32, v1i64, v1f64, f64 ] in
7435    def : Pat<(VT (AArch64NvCast (VT2 FPR64:$src))),
7436              (VT FPR64:$src)>;
7437
7438// Natural vector casts (128 bit)
7439foreach VT = [ v16i8, v8i16, v8f16, v8bf16, v4i32, v4f32, v2i64, v2f64 ] in
7440  foreach VT2 = [ v16i8, v8i16, v8f16, v8bf16, v4i32, v4f32, v2i64, v2f64 ] in
7441    def : Pat<(VT (AArch64NvCast (VT2 FPR128:$src))),
7442              (VT FPR128:$src)>;
7443
7444let Predicates = [IsLE] in {
7445def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7446def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7447def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7448def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7449def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7450def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7451
7452def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7453          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7454def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7455          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7456def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7457          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7458def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7459          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7460def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7461          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7462def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7463          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7464def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7465          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7466}
7467let Predicates = [IsBE] in {
7468def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7469                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7470def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7471                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7472def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7473                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7474def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7475                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7476def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7477                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7478def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7479                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7480
7481def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7482          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7483def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7484          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7485def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7486          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7487def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7488          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7489def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7490          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7491def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7492          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7493}
7494def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7495def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7496def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7497          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7498def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7499          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7500def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7501          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7502def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7503
7504def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7505          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7506def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7507          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7508def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7509          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7510def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7511          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7512def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7513          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7514
7515def : Pat<(f16 (bitconvert (bf16 FPR16:$src))), (f16 FPR16:$src)>;
7516def : Pat<(bf16 (bitconvert (f16 FPR16:$src))), (bf16 FPR16:$src)>;
7517
7518let Predicates = [IsLE] in {
7519def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7520def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7521def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7522def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7523def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7524def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7525}
7526let Predicates = [IsBE] in {
7527def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7528                             (v1i64 (REV64v2i32 FPR64:$src))>;
7529def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7530                             (v1i64 (REV64v4i16 FPR64:$src))>;
7531def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7532                             (v1i64 (REV64v8i8 FPR64:$src))>;
7533def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7534                             (v1i64 (REV64v4i16 FPR64:$src))>;
7535def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7536                             (v1i64 (REV64v4i16 FPR64:$src))>;
7537def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7538                             (v1i64 (REV64v2i32 FPR64:$src))>;
7539}
7540def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7541def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7542
7543let Predicates = [IsLE] in {
7544def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7545def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7546def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7547def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7548def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7549def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7550def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7551}
7552let Predicates = [IsBE] in {
7553def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7554                             (v2i32 (REV64v2i32 FPR64:$src))>;
7555def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7556                             (v2i32 (REV32v4i16 FPR64:$src))>;
7557def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7558                             (v2i32 (REV32v8i8 FPR64:$src))>;
7559def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7560                             (v2i32 (REV64v2i32 FPR64:$src))>;
7561def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7562                             (v2i32 (REV64v2i32 FPR64:$src))>;
7563def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7564                             (v2i32 (REV32v4i16 FPR64:$src))>;
7565def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7566                             (v2i32 (REV32v4i16 FPR64:$src))>;
7567}
7568def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7569
7570let Predicates = [IsLE] in {
7571def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7572def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7573def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7574def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7575def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7576def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7577}
7578let Predicates = [IsBE] in {
7579def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7580                             (v4i16 (REV64v4i16 FPR64:$src))>;
7581def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7582                             (v4i16 (REV32v4i16 FPR64:$src))>;
7583def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7584                             (v4i16 (REV16v8i8 FPR64:$src))>;
7585def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7586                             (v4i16 (REV64v4i16 FPR64:$src))>;
7587def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7588                             (v4i16 (REV32v4i16 FPR64:$src))>;
7589def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7590                             (v4i16 (REV64v4i16 FPR64:$src))>;
7591}
7592def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7593def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7594
7595let Predicates = [IsLE] in {
7596def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7597def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7598def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7599def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7600def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7601def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7602
7603def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7604def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7605def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7606def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7607def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7608def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7609}
7610let Predicates = [IsBE] in {
7611def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7612                             (v4f16 (REV64v4i16 FPR64:$src))>;
7613def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7614                             (v4f16 (REV32v4i16 FPR64:$src))>;
7615def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7616                             (v4f16 (REV16v8i8 FPR64:$src))>;
7617def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7618                             (v4f16 (REV64v4i16 FPR64:$src))>;
7619def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7620                             (v4f16 (REV32v4i16 FPR64:$src))>;
7621def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7622                             (v4f16 (REV64v4i16 FPR64:$src))>;
7623
7624def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7625                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7626def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7627                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7628def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7629                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7630def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7631                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7632def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7633                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7634def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7635                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7636}
7637def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7638def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7639
7640let Predicates = [IsLE] in {
7641def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7642def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7643def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7644def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7645def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7646def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7647def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7648def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7649}
7650let Predicates = [IsBE] in {
7651def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7652                             (v8i8 (REV64v8i8 FPR64:$src))>;
7653def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7654                             (v8i8 (REV32v8i8 FPR64:$src))>;
7655def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7656                             (v8i8 (REV16v8i8 FPR64:$src))>;
7657def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7658                             (v8i8 (REV64v8i8 FPR64:$src))>;
7659def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7660                             (v8i8 (REV32v8i8 FPR64:$src))>;
7661def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7662                             (v8i8 (REV64v8i8 FPR64:$src))>;
7663def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7664                             (v8i8 (REV16v8i8 FPR64:$src))>;
7665def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7666                             (v8i8 (REV16v8i8 FPR64:$src))>;
7667}
7668
7669let Predicates = [IsLE] in {
7670def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7671def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7672def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7673def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7674def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7675def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7676}
7677let Predicates = [IsBE] in {
7678def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7679                             (f64 (REV64v2i32 FPR64:$src))>;
7680def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7681                             (f64 (REV64v4i16 FPR64:$src))>;
7682def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7683                             (f64 (REV64v2i32 FPR64:$src))>;
7684def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7685                             (f64 (REV64v8i8 FPR64:$src))>;
7686def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7687                             (f64 (REV64v4i16 FPR64:$src))>;
7688def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7689                             (f64 (REV64v4i16 FPR64:$src))>;
7690}
7691def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7692def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7693
7694let Predicates = [IsLE] in {
7695def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7696def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7697def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7698def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7699def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7700def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7701}
7702let Predicates = [IsBE] in {
7703def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7704                             (v1f64 (REV64v2i32 FPR64:$src))>;
7705def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7706                             (v1f64 (REV64v4i16 FPR64:$src))>;
7707def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7708                             (v1f64 (REV64v8i8 FPR64:$src))>;
7709def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7710                             (v1f64 (REV64v2i32 FPR64:$src))>;
7711def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7712                             (v1f64 (REV64v4i16 FPR64:$src))>;
7713def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7714                             (v1f64 (REV64v4i16 FPR64:$src))>;
7715}
7716def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7717def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7718
7719let Predicates = [IsLE] in {
7720def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7721def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7722def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7723def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7724def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7725def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7726def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7727}
7728let Predicates = [IsBE] in {
7729def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7730                             (v2f32 (REV64v2i32 FPR64:$src))>;
7731def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7732                             (v2f32 (REV32v4i16 FPR64:$src))>;
7733def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7734                             (v2f32 (REV32v8i8 FPR64:$src))>;
7735def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7736                             (v2f32 (REV64v2i32 FPR64:$src))>;
7737def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7738                             (v2f32 (REV64v2i32 FPR64:$src))>;
7739def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7740                             (v2f32 (REV32v4i16 FPR64:$src))>;
7741def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7742                             (v2f32 (REV32v4i16 FPR64:$src))>;
7743}
7744def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7745
7746let Predicates = [IsLE] in {
7747def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7748def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7749def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7750def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7751def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7752def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7753def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7754def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7755}
7756let Predicates = [IsBE] in {
7757def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7758                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7759def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7760                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7761                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7762def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7763                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7764                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7765def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7766                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7767                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7768def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7769                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7770                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7771def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7772                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7773def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7774                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7775                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7776def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7777                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7778                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7779}
7780
7781let Predicates = [IsLE] in {
7782def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7783def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7784def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7785def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7786def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7787def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7788def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7789}
7790let Predicates = [IsBE] in {
7791def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7792                             (v2f64 (EXTv16i8 FPR128:$src,
7793                                              FPR128:$src, (i32 8)))>;
7794def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7795                             (v2f64 (REV64v4i32 FPR128:$src))>;
7796def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7797                             (v2f64 (REV64v8i16 FPR128:$src))>;
7798def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7799                             (v2f64 (REV64v8i16 FPR128:$src))>;
7800def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7801                             (v2f64 (REV64v8i16 FPR128:$src))>;
7802def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7803                             (v2f64 (REV64v16i8 FPR128:$src))>;
7804def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7805                             (v2f64 (REV64v4i32 FPR128:$src))>;
7806}
7807def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7808
7809let Predicates = [IsLE] in {
7810def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7811def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7812def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7813def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7814def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7815def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7816def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7817}
7818let Predicates = [IsBE] in {
7819def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7820                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7821                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7822def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7823                             (v4f32 (REV32v8i16 FPR128:$src))>;
7824def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7825                             (v4f32 (REV32v8i16 FPR128:$src))>;
7826def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7827                             (v4f32 (REV32v8i16 FPR128:$src))>;
7828def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7829                             (v4f32 (REV32v16i8 FPR128:$src))>;
7830def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7831                             (v4f32 (REV64v4i32 FPR128:$src))>;
7832def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7833                             (v4f32 (REV64v4i32 FPR128:$src))>;
7834}
7835def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7836
7837let Predicates = [IsLE] in {
7838def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7839def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7840def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7841def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7842def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7843def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7844def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7845}
7846let Predicates = [IsBE] in {
7847def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7848                             (v2i64 (EXTv16i8 FPR128:$src,
7849                                              FPR128:$src, (i32 8)))>;
7850def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7851                             (v2i64 (REV64v4i32 FPR128:$src))>;
7852def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7853                             (v2i64 (REV64v8i16 FPR128:$src))>;
7854def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7855                             (v2i64 (REV64v16i8 FPR128:$src))>;
7856def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7857                             (v2i64 (REV64v4i32 FPR128:$src))>;
7858def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7859                             (v2i64 (REV64v8i16 FPR128:$src))>;
7860def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7861                             (v2i64 (REV64v8i16 FPR128:$src))>;
7862}
7863def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7864
7865let Predicates = [IsLE] in {
7866def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7867def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7868def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7869def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7870def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7871def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7872def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7873}
7874let Predicates = [IsBE] in {
7875def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7876                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7877                                              (REV64v4i32 FPR128:$src),
7878                                              (i32 8)))>;
7879def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7880                             (v4i32 (REV64v4i32 FPR128:$src))>;
7881def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7882                             (v4i32 (REV32v8i16 FPR128:$src))>;
7883def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7884                             (v4i32 (REV32v16i8 FPR128:$src))>;
7885def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7886                             (v4i32 (REV64v4i32 FPR128:$src))>;
7887def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7888                             (v4i32 (REV32v8i16 FPR128:$src))>;
7889def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7890                             (v4i32 (REV32v8i16 FPR128:$src))>;
7891}
7892def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7893
7894let Predicates = [IsLE] in {
7895def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7896def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7897def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7898def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7899def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7900def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7901}
7902let Predicates = [IsBE] in {
7903def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7904                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7905                                              (REV64v8i16 FPR128:$src),
7906                                              (i32 8)))>;
7907def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7908                             (v8i16 (REV64v8i16 FPR128:$src))>;
7909def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7910                             (v8i16 (REV32v8i16 FPR128:$src))>;
7911def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7912                             (v8i16 (REV16v16i8 FPR128:$src))>;
7913def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7914                             (v8i16 (REV64v8i16 FPR128:$src))>;
7915def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7916                             (v8i16 (REV32v8i16 FPR128:$src))>;
7917}
7918def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7919def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7920
7921let Predicates = [IsLE] in {
7922def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7923def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7924def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7925def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7926def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7927def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7928
7929def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7930def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7931def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7932def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7933def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7934def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7935}
7936let Predicates = [IsBE] in {
7937def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7938                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7939                                              (REV64v8i16 FPR128:$src),
7940                                              (i32 8)))>;
7941def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7942                             (v8f16 (REV64v8i16 FPR128:$src))>;
7943def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7944                             (v8f16 (REV32v8i16 FPR128:$src))>;
7945def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7946                             (v8f16 (REV16v16i8 FPR128:$src))>;
7947def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7948                             (v8f16 (REV64v8i16 FPR128:$src))>;
7949def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7950                             (v8f16 (REV32v8i16 FPR128:$src))>;
7951
7952def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7953                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7954                                              (REV64v8i16 FPR128:$src),
7955                                              (i32 8)))>;
7956def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7957                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7958def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7959                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7960def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7961                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7962def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7963                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7964def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7965                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7966}
7967def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7968def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7969
7970let Predicates = [IsLE] in {
7971def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7972def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7973def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7974def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7975def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7976def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7977def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7978def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
7979}
7980let Predicates = [IsBE] in {
7981def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7982                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7983                                              (REV64v16i8 FPR128:$src),
7984                                              (i32 8)))>;
7985def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7986                             (v16i8 (REV64v16i8 FPR128:$src))>;
7987def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7988                             (v16i8 (REV32v16i8 FPR128:$src))>;
7989def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7990                             (v16i8 (REV16v16i8 FPR128:$src))>;
7991def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7992                             (v16i8 (REV64v16i8 FPR128:$src))>;
7993def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7994                             (v16i8 (REV32v16i8 FPR128:$src))>;
7995def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7996                             (v16i8 (REV16v16i8 FPR128:$src))>;
7997def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
7998                             (v16i8 (REV16v16i8 FPR128:$src))>;
7999}
8000
8001def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
8002           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8003def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
8004           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8005def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
8006           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8007def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
8008           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8009def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
8010           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8011def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
8012           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8013def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
8014           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8015def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
8016           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8017
8018def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
8019          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8020def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
8021          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8022def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
8023          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8024def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
8025          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8026
8027// A 64-bit subvector insert to the first 128-bit vector position
8028// is a subregister copy that needs no instruction.
8029multiclass InsertSubvectorUndef<ValueType Ty> {
8030  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
8031            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8032  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
8033            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8034  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
8035            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8036  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
8037            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8038  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
8039            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8040  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
8041            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8042  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
8043            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8044  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
8045            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8046}
8047
8048defm : InsertSubvectorUndef<i32>;
8049defm : InsertSubvectorUndef<i64>;
8050
8051// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
8052// or v2f32.
8053def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
8054                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
8055           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
8056def : Pat<(f64 (any_fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
8057                         (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
8058           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
8059    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
8060    // so we match on v4f32 here, not v2f32. This will also catch adding
8061    // the low two lanes of a true v4f32 vector.
8062def : Pat<(any_fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
8063                    (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
8064          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8065def : Pat<(any_fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
8066                    (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
8067          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8068
8069// Scalar 64-bit shifts in FPR64 registers.
8070def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8071          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8072def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8073          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8074def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8075          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8076def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8077          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8078
8079// Patterns for nontemporal/no-allocate stores.
8080// We have to resort to tricks to turn a single-input store into a store pair,
8081// because there is no single-input nontemporal store, only STNP.
8082let Predicates = [IsLE] in {
8083let AddedComplexity = 15 in {
8084class NTStore128Pat<ValueType VT> :
8085  Pat<(nontemporalstore (VT FPR128:$Rt),
8086        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
8087      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
8088              (DUPi64 FPR128:$Rt, (i64 1)),
8089              GPR64sp:$Rn, simm7s8:$offset)>;
8090
8091def : NTStore128Pat<v2i64>;
8092def : NTStore128Pat<v4i32>;
8093def : NTStore128Pat<v8i16>;
8094def : NTStore128Pat<v16i8>;
8095
8096class NTStore64Pat<ValueType VT> :
8097  Pat<(nontemporalstore (VT FPR64:$Rt),
8098        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8099      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8100              (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8101              GPR64sp:$Rn, simm7s4:$offset)>;
8102
8103// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8104def : NTStore64Pat<v1f64>;
8105def : NTStore64Pat<v1i64>;
8106def : NTStore64Pat<v2i32>;
8107def : NTStore64Pat<v4i16>;
8108def : NTStore64Pat<v8i8>;
8109
8110def : Pat<(nontemporalstore GPR64:$Rt,
8111            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8112          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8113                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8114                  GPR64sp:$Rn, simm7s4:$offset)>;
8115} // AddedComplexity=10
8116} // Predicates = [IsLE]
8117
8118// Tail call return handling. These are all compiler pseudo-instructions,
8119// so no encoding information or anything like that.
8120let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8121  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8122                   Sched<[WriteBrReg]>;
8123  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8124                   Sched<[WriteBrReg]>;
8125  // Indirect tail-call with any register allowed, used by MachineOutliner when
8126  // this is proven safe.
8127  // FIXME: If we have to add any more hacks like this, we should instead relax
8128  // some verifier checks for outlined functions.
8129  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8130                      Sched<[WriteBrReg]>;
8131  // Indirect tail-call limited to only use registers (x16 and x17) which are
8132  // allowed to tail-call a "BTI c" instruction.
8133  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8134                      Sched<[WriteBrReg]>;
8135}
8136
8137def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8138          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8139      Requires<[NotUseBTI]>;
8140def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8141          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8142      Requires<[UseBTI]>;
8143def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8144          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8145def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8146          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8147
8148def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8149def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8150
8151// Extracting lane zero is a special case where we can just use a plain
8152// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8153// rest of the compiler, especially the register allocator and copy propagation,
8154// to reason about, so is preferred when it's possible to use it.
8155let AddedComplexity = 10 in {
8156  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8157  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8158  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8159}
8160
8161// dot_v4i8
8162class mul_v4i8<SDPatternOperator ldop> :
8163  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8164          (mul (ldop (add node:$Rn, node:$offset)),
8165               (ldop (add node:$Rm, node:$offset)))>;
8166class mulz_v4i8<SDPatternOperator ldop> :
8167  PatFrag<(ops node:$Rn, node:$Rm),
8168          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8169
8170def load_v4i8 :
8171  OutPatFrag<(ops node:$R),
8172             (INSERT_SUBREG
8173              (v2i32 (IMPLICIT_DEF)),
8174               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8175              ssub)>;
8176
8177class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8178  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8179           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8180           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8181                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8182      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8183                                (load_v4i8 GPR64sp:$Rn),
8184                                (load_v4i8 GPR64sp:$Rm))),
8185                      sub_32)>, Requires<[HasDotProd]>;
8186
8187// dot_v8i8
8188class ee_v8i8<SDPatternOperator extend> :
8189  PatFrag<(ops node:$V, node:$K),
8190          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8191
8192class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8193  PatFrag<(ops node:$M, node:$N, node:$K),
8194          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8195                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8196
8197class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8198  PatFrag<(ops node:$M, node:$N),
8199          (i32 (extractelt
8200           (v4i32 (AArch64uaddv
8201            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8202                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8203           (i64 0)))>;
8204
8205// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8206def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8207
8208class odot_v8i8<Instruction DOT> :
8209  OutPatFrag<(ops node:$Vm, node:$Vn),
8210             (EXTRACT_SUBREG
8211              (VADDV_32
8212               (i64 (DOT (DUPv2i32gpr WZR),
8213                         (v8i8 node:$Vm),
8214                         (v8i8 node:$Vn)))),
8215              sub_32)>;
8216
8217class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8218                    SDPatternOperator extend> :
8219  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8220      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8221  Requires<[HasDotProd]>;
8222
8223// dot_v16i8
8224class ee_v16i8<SDPatternOperator extend> :
8225  PatFrag<(ops node:$V, node:$K1, node:$K2),
8226          (v4i16 (extract_subvector
8227           (v8i16 (extend
8228            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8229
8230class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8231  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8232          (v4i32
8233           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8234                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8235
8236class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8237  PatFrag<(ops node:$M, node:$N),
8238          (i32 (extractelt
8239           (v4i32 (AArch64uaddv
8240            (add
8241             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8242                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8243             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8244                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8245           (i64 0)))>;
8246
8247class odot_v16i8<Instruction DOT> :
8248  OutPatFrag<(ops node:$Vm, node:$Vn),
8249             (i32 (ADDVv4i32v
8250              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8251
8252class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8253                SDPatternOperator extend> :
8254  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8255      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8256  Requires<[HasDotProd]>;
8257
8258let AddedComplexity = 10 in {
8259  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8260  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8261  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8262  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8263  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8264  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8265
8266  // FIXME: add patterns to generate vector by element dot product.
8267  // FIXME: add SVE dot-product patterns.
8268}
8269
8270// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8271// so that it can be used as input to inline asm, and vice versa.
8272def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8273def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8274def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8275                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8276          (REG_SEQUENCE GPR64x8Class,
8277              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8278              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8279foreach i = 0-7 in {
8280  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8281            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8282}
8283
8284let Predicates = [HasLS64] in {
8285  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8286                                          (outs GPR64x8:$Rt)>;
8287  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8288                                          (outs)>;
8289  def ST64BV:   Store64BV<0b011, "st64bv">;
8290  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8291
8292  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8293    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8294          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8295
8296  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8297  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8298  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8299}
8300
8301let Predicates = [HasMOPS] in {
8302  let Defs = [NZCV] in {
8303    defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">;
8304
8305    defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">;
8306
8307    defm SETP : MOPSMemorySetInsns<0b00, "setp">;
8308  }
8309  let Uses = [NZCV] in {
8310    defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">;
8311    defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">;
8312
8313    defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">;
8314    defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">;
8315
8316    defm SETM : MOPSMemorySetInsns<0b01, "setm">;
8317    defm SETE : MOPSMemorySetInsns<0b10, "sete">;
8318  }
8319}
8320let Predicates = [HasMOPS, HasMTE] in {
8321  let Defs = [NZCV] in {
8322    defm SETGP     : MOPSMemorySetTaggingInsns<0b00, "setgp">;
8323  }
8324  let Uses = [NZCV] in {
8325    defm SETGM     : MOPSMemorySetTaggingInsns<0b01, "setgm">;
8326    // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td
8327    defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">;
8328  }
8329}
8330
8331// MOPS Node operands: 0: Dst, 1: Src or Value, 2: Size, 3: Chain
8332// MOPS Node results: 0: Dst writeback, 1: Size writeback, 2: Chain
8333def SDT_AArch64mops : SDTypeProfile<2, 3, [ SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2> ]>;
8334def AArch64mops_memset : SDNode<"AArch64ISD::MOPS_MEMSET", SDT_AArch64mops>;
8335def AArch64mops_memset_tagging : SDNode<"AArch64ISD::MOPS_MEMSET_TAGGING", SDT_AArch64mops>;
8336def AArch64mops_memcopy : SDNode<"AArch64ISD::MOPS_MEMCOPY", SDT_AArch64mops>;
8337def AArch64mops_memmove : SDNode<"AArch64ISD::MOPS_MEMMOVE", SDT_AArch64mops>;
8338
8339// MOPS operations always contain three 4-byte instructions
8340let Predicates = [HasMOPS], Defs = [NZCV], Size = 12, mayStore = 1 in {
8341  let mayLoad = 1 in {
8342    def MOPSMemoryCopyPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8343                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8344                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8345    def MOPSMemoryMovePseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8346                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8347                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8348  }
8349  let mayLoad = 0 in {
8350    def MOPSMemorySetPseudo  : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8351                                      (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8352                                      [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8353  }
8354}
8355let Predicates = [HasMOPS, HasMTE], Defs = [NZCV], Size = 12, mayLoad = 0, mayStore = 1 in {
8356  def MOPSMemorySetTaggingPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8357                                          (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8358                                          [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8359}
8360
8361// This gets lowered into an instruction sequence of 20 bytes
8362let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1, Size = 20 in
8363def StoreSwiftAsyncContext
8364      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8365               []>, Sched<[]>;
8366
8367def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8368def : Pat<(AArch64AssertZExtBool GPR32:$op),
8369          (i32 GPR32:$op)>;
8370
8371include "AArch64InstrAtomics.td"
8372include "AArch64SVEInstrInfo.td"
8373include "AArch64SMEInstrInfo.td"
8374include "AArch64InstrGISel.td"
8375