1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicate<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
37                                 AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">;
38
39def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
40                       AssemblerPredicate<(all_of FeatureEL2VMSA), "el2vmsa">;
41
42def HasEL3           : Predicate<"Subtarget->hasEL3()">,
43                       AssemblerPredicate<(all_of FeatureEL3), "el3">;
44
45def HasVH            : Predicate<"Subtarget->hasVH()">,
46                       AssemblerPredicate<(all_of FeatureVH), "vh">;
47
48def HasLOR           : Predicate<"Subtarget->hasLOR()">,
49                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
50
51def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
52                       AssemblerPredicate<(all_of FeaturePAuth), "pauth">;
53
54def HasJS            : Predicate<"Subtarget->hasJS()">,
55                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
56
57def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
58                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
59
60def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
61                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
62
63def HasNV            : Predicate<"Subtarget->hasNV()">,
64                       AssemblerPredicate<(all_of FeatureNV), "nv">;
65
66def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
67                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
68
69def HasDIT           : Predicate<"Subtarget->hasDIT()">,
70                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
71
72def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
73                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
74
75def HasAM            : Predicate<"Subtarget->hasAM()">,
76                       AssemblerPredicate<(all_of FeatureAM), "am">;
77
78def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
79                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
80
81def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
82                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
83
84def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
85                       AssemblerPredicate<(all_of FeatureFlagM), "flagm">;
86
87def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
88                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
89
90def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
91                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
92def HasNEON          : Predicate<"Subtarget->hasNEON()">,
93                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
94def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
95                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
96def HasSM4           : Predicate<"Subtarget->hasSM4()">,
97                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
98def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
99                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
100def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
101                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
102def HasAES           : Predicate<"Subtarget->hasAES()">,
103                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
104def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
105                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
106def HasCRC           : Predicate<"Subtarget->hasCRC()">,
107                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
108def HasLSE           : Predicate<"Subtarget->hasLSE()">,
109                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
110def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
111def HasRAS           : Predicate<"Subtarget->hasRAS()">,
112                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
113def HasRDM           : Predicate<"Subtarget->hasRDM()">,
114                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
115def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
116def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
117                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
118def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
119                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
120def HasSPE           : Predicate<"Subtarget->hasSPE()">,
121                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
122def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
123                                 AssemblerPredicate<(all_of FeatureFuseAES),
124                                 "fuse-aes">;
125def HasSVE           : Predicate<"Subtarget->hasSVE()">,
126                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
127def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
128                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
129def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
130                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
131def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
132                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
133def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
134                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
135def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
136                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
137def HasSME           : Predicate<"Subtarget->hasSME()">,
138                                 AssemblerPredicate<(all_of FeatureSME), "sme">;
139def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
140                                 AssemblerPredicate<(all_of FeatureSMEF64), "sme-f64">;
141def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
142                                 AssemblerPredicate<(all_of FeatureSMEI64), "sme-i64">;
143def HasStreamingSVE  : Predicate<"Subtarget->hasStreamingSVE()">,
144                                 AssemblerPredicate<(all_of FeatureStreamingSVE), "streaming-sve">;
145// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
146// they should be enabled if either has been specified.
147def HasSVEorStreamingSVE
148    : Predicate<"Subtarget->hasSVE() || Subtarget->hasStreamingSVE()">,
149                AssemblerPredicate<(any_of FeatureSVE, FeatureStreamingSVE),
150                "streaming-sve or sve">;
151def HasSVE2orStreamingSVE
152    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasStreamingSVE()">,
153                AssemblerPredicate<(any_of FeatureSVE2, FeatureStreamingSVE),
154                "streaming-sve or sve2">;
155// A subset of NEON instructions are legal in Streaming SVE execution mode,
156// they should be enabled if either has been specified.
157def HasNEONorStreamingSVE
158    : Predicate<"Subtarget->hasNEON() || Subtarget->hasStreamingSVE()">,
159                AssemblerPredicate<(any_of FeatureNEON, FeatureStreamingSVE),
160                "streaming-sve or neon">;
161def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
162                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
163def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
164                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
165def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
166                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
167def HasSB            : Predicate<"Subtarget->hasSB()">,
168                       AssemblerPredicate<(all_of FeatureSB), "sb">;
169def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
170                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
171def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
172                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
173def HasBTI           : Predicate<"Subtarget->hasBTI()">,
174                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
175def HasMTE           : Predicate<"Subtarget->hasMTE()">,
176                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
177def HasTME           : Predicate<"Subtarget->hasTME()">,
178                       AssemblerPredicate<(all_of FeatureTME), "tme">;
179def HasETE           : Predicate<"Subtarget->hasETE()">,
180                       AssemblerPredicate<(all_of FeatureETE), "ete">;
181def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
182                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
183def HasBF16          : Predicate<"Subtarget->hasBF16()">,
184                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
185def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
186                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
187def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
188                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
189def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
190                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
191def HasXS            : Predicate<"Subtarget->hasXS()">,
192                       AssemblerPredicate<(all_of FeatureXS), "xs">;
193def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
194                       AssemblerPredicate<(all_of FeatureWFxT), "wfxt">;
195def HasLS64          : Predicate<"Subtarget->hasLS64()">,
196                       AssemblerPredicate<(all_of FeatureLS64), "ls64">;
197def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
198                       AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
199def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
200                       AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
201def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
202def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
203def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
204def UseExperimentalZeroingPseudos
205    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
206def UseAlternateSExtLoadCVTF32
207    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
208
209def UseNegativeImmediates
210    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
211                                             "NegativeImmediates">;
212
213def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
214
215def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
216                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
217                                                       SDTCisInt<1>]>>;
218
219
220//===----------------------------------------------------------------------===//
221// AArch64-specific DAG Nodes.
222//
223
224// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
225def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
226                                              [SDTCisSameAs<0, 2>,
227                                               SDTCisSameAs<0, 3>,
228                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
229
230// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
231def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
232                                            [SDTCisSameAs<0, 1>,
233                                             SDTCisSameAs<0, 2>,
234                                             SDTCisInt<0>,
235                                             SDTCisVT<3, i32>]>;
236
237// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
238def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
239                                            [SDTCisSameAs<0, 2>,
240                                             SDTCisSameAs<0, 3>,
241                                             SDTCisInt<0>,
242                                             SDTCisVT<1, i32>,
243                                             SDTCisVT<4, i32>]>;
244
245def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
246                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
247                                      SDTCisVT<2, i32>]>;
248def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
249def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
250                                        SDTCisVT<2, OtherVT>]>;
251
252
253def SDT_AArch64CSel  : SDTypeProfile<1, 4,
254                                   [SDTCisSameAs<0, 1>,
255                                    SDTCisSameAs<0, 2>,
256                                    SDTCisInt<3>,
257                                    SDTCisVT<4, i32>]>;
258def SDT_AArch64CCMP : SDTypeProfile<1, 5,
259                                    [SDTCisVT<0, i32>,
260                                     SDTCisInt<1>,
261                                     SDTCisSameAs<1, 2>,
262                                     SDTCisInt<3>,
263                                     SDTCisInt<4>,
264                                     SDTCisVT<5, i32>]>;
265def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
266                                     [SDTCisVT<0, i32>,
267                                      SDTCisFP<1>,
268                                      SDTCisSameAs<1, 2>,
269                                      SDTCisInt<3>,
270                                      SDTCisInt<4>,
271                                      SDTCisVT<5, i32>]>;
272def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
273                                   [SDTCisFP<0>,
274                                    SDTCisSameAs<0, 1>]>;
275def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
276def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
277def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
278def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
279                                          SDTCisSameAs<0, 1>,
280                                          SDTCisSameAs<0, 2>]>;
281def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
282def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
283def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
284                                           SDTCisInt<2>, SDTCisInt<3>]>;
285def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
286def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
287                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
288def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
289def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
291
292def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
293                                                 SDTCisSameAs<0,1>,
294                                                 SDTCisSameAs<0,2>]>;
295
296def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
297def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
298def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
299def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
300                                           SDTCisSameAs<0,2>]>;
301def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
302                                           SDTCisSameAs<0,2>,
303                                           SDTCisSameAs<0,3>]>;
304def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
305def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
306
307def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
308
309def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
310                                                 SDTCisPtrTy<1>]>;
311
312def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
313
314def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
315def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
316def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
317
318// Generates the general dynamic sequences, i.e.
319//  adrp  x0, :tlsdesc:var
320//  ldr   x1, [x0, #:tlsdesc_lo12:var]
321//  add   x0, x0, #:tlsdesc_lo12:var
322//  .tlsdesccall var
323//  blr   x1
324
325// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
326// number of operands (the variable)
327def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
328                                          [SDTCisPtrTy<0>]>;
329
330def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
331                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
332                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
333                                         SDTCisSameAs<1, 4>]>;
334
335def SDT_AArch64TBL : SDTypeProfile<1, 2, [
336  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
337]>;
338
339// non-extending masked load fragment.
340def nonext_masked_load :
341  PatFrag<(ops node:$ptr, node:$pred, node:$def),
342          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
343  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
344         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
345         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
346}]>;
347// sign extending masked load fragments.
348def asext_masked_load :
349  PatFrag<(ops node:$ptr, node:$pred, node:$def),
350          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
351  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
352          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
353         cast<MaskedLoadSDNode>(N)->isUnindexed();
354}]>;
355def asext_masked_load_i8 :
356  PatFrag<(ops node:$ptr, node:$pred, node:$def),
357          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
358  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
359}]>;
360def asext_masked_load_i16 :
361  PatFrag<(ops node:$ptr, node:$pred, node:$def),
362          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
363  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
364}]>;
365def asext_masked_load_i32 :
366  PatFrag<(ops node:$ptr, node:$pred, node:$def),
367          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
368  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
369}]>;
370// zero extending masked load fragments.
371def zext_masked_load :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
375         cast<MaskedLoadSDNode>(N)->isUnindexed();
376}]>;
377def zext_masked_load_i8 :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
381}]>;
382def zext_masked_load_i16 :
383  PatFrag<(ops node:$ptr, node:$pred, node:$def),
384          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
385  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
386}]>;
387def zext_masked_load_i32 :
388  PatFrag<(ops node:$ptr, node:$pred, node:$def),
389          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
390  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
391}]>;
392
393def non_temporal_load :
394   PatFrag<(ops node:$ptr, node:$pred, node:$def),
395           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
396   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
397          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
398          cast<MaskedLoadSDNode>(N)->isNonTemporal();
399}]>;
400
401// non-truncating masked store fragment.
402def nontrunc_masked_store :
403  PatFrag<(ops node:$val, node:$ptr, node:$pred),
404          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
405  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
406         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
407         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
408}]>;
409// truncating masked store fragments.
410def trunc_masked_store :
411  PatFrag<(ops node:$val, node:$ptr, node:$pred),
412          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
413  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
414         cast<MaskedStoreSDNode>(N)->isUnindexed();
415}]>;
416def trunc_masked_store_i8 :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
420}]>;
421def trunc_masked_store_i16 :
422  PatFrag<(ops node:$val, node:$ptr, node:$pred),
423          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
424  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
425}]>;
426def trunc_masked_store_i32 :
427  PatFrag<(ops node:$val, node:$ptr, node:$pred),
428          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
429  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
430}]>;
431
432def non_temporal_store :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
435  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
436         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
437         cast<MaskedStoreSDNode>(N)->isNonTemporal();
438}]>;
439
440// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
441def top16Zero: PatLeaf<(i32 GPR32:$src), [{
442  return SDValue(N,0)->getValueType(0) == MVT::i32 &&
443         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
444  }]>;
445
446// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise
447def top32Zero: PatLeaf<(i64 GPR64:$src), [{
448  return SDValue(N,0)->getValueType(0) == MVT::i64 &&
449         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32));
450  }]>;
451
452// Node definitions.
453def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
454def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
455def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
456def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
457def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
458                                SDCallSeqStart<[ SDTCisVT<0, i32>,
459                                                 SDTCisVT<1, i32> ]>,
460                                [SDNPHasChain, SDNPOutGlue]>;
461def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
462                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
463                                               SDTCisVT<1, i32> ]>,
464                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
465def AArch64call          : SDNode<"AArch64ISD::CALL",
466                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
467                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
468                                 SDNPVariadic]>;
469
470def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
471                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
472                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
473                              SDNPVariadic]>;
474
475def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
476                                [SDNPHasChain]>;
477def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
478                                [SDNPHasChain]>;
479def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
480                                [SDNPHasChain]>;
481def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
482                                [SDNPHasChain]>;
483def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
484                                [SDNPHasChain]>;
485
486
487def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
488def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
489def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
490def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
491def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
492                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
493def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
494def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
495def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
496                            [SDNPCommutative]>;
497def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
498def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
499                            [SDNPCommutative]>;
500def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
501def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
502
503def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
504def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
505def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
506
507def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
508
509def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
510def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
511                                 [SDNPHasChain]>;
512def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
513                                 [SDNPHasChain]>;
514def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
515                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
516                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
517
518def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
519def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
520def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
521def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
522def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
523
524def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
525
526def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
527def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
528def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
529def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
530def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
531def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
532
533def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
534def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
535def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
536def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
537def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
538def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
539def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
540
541def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
542def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
543def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
544def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
545
546def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
547def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
548def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
549def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
550def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
551def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
552def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
553def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
554def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
555def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
556
557def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
558def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
559
560def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
561def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
562def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
563def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
564def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
565
566def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
567def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
568def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
569
570def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
571def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
572def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
573def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
574def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
575def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
576                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
577
578def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
579def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
580def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
581def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
582def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
583
584def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
585def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
586
587def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
588                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
589
590def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
591                               [SDNPHasChain, SDNPSideEffect]>;
592
593def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
594def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
595
596def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
597                                    SDT_AArch64TLSDescCallSeq,
598                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
599                                     SDNPVariadic]>;
600
601
602def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
603                                 SDT_AArch64WrapperLarge>;
604
605def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
606
607def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
608                                    SDTCisSameAs<1, 2>]>;
609def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
610def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
611
612def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
613def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
614def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
615def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
616
617def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
618def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
619
620def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
621def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
622def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
623def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
624def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
625def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
626
627def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
628def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
629def AArch64shadd   : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
630def AArch64uhadd   : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
631
632def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
633                               [(abdu node:$lhs, node:$rhs),
634                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
635def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
636                               [(abds node:$lhs, node:$rhs),
637                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
638
639def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
640def AArch64uaddlp   : PatFrags<(ops node:$src),
641                               [(AArch64uaddlp_n node:$src),
642                                (int_aarch64_neon_uaddlp node:$src)]>;
643
644def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
645def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
646def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
647def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
648def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
649
650def SDT_AArch64unpk : SDTypeProfile<1, 1, [
651    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
652]>;
653def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
654def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
655def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
656def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
657
658def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
659def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
660def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
661
662def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
663def AArch64mrs : SDNode<"AArch64ISD::MRS",
664                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
665                        [SDNPHasChain, SDNPOutGlue]>;
666//===----------------------------------------------------------------------===//
667
668//===----------------------------------------------------------------------===//
669
670// AArch64 Instruction Predicate Definitions.
671// We could compute these on a per-module basis but doing so requires accessing
672// the Function object through the <Target>Subtarget and objections were raised
673// to that (see post-commit review comments for r301750).
674let RecomputePerFunction = 1 in {
675  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
676  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
677  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
678  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
679
680  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
681  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
682
683  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
684  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
685  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
686  // optimizing. This allows us to selectively use patterns without impacting
687  // SelectionDAG's behaviour.
688  // FIXME: One day there will probably be a nicer way to check for this, but
689  // today is not that day.
690  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
691}
692
693include "AArch64InstrFormats.td"
694include "SVEInstrFormats.td"
695include "SMEInstrFormats.td"
696
697//===----------------------------------------------------------------------===//
698
699//===----------------------------------------------------------------------===//
700// Miscellaneous instructions.
701//===----------------------------------------------------------------------===//
702
703let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
704// We set Sched to empty list because we expect these instructions to simply get
705// removed in most cases.
706def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
707                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
708                              Sched<[]>;
709def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
710                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
711                            Sched<[]>;
712} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
713
714let isReMaterializable = 1, isCodeGenOnly = 1 in {
715// FIXME: The following pseudo instructions are only needed because remat
716// cannot handle multiple instructions.  When that changes, they can be
717// removed, along with the AArch64Wrapper node.
718
719let AddedComplexity = 10 in
720def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
721                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
722              Sched<[WriteLDAdr]>;
723
724// The MOVaddr instruction should match only when the add is not folded
725// into a load or store address.
726def MOVaddr
727    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
728             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
729                                            tglobaladdr:$low))]>,
730      Sched<[WriteAdrAdr]>;
731def MOVaddrJT
732    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
733             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
734                                             tjumptable:$low))]>,
735      Sched<[WriteAdrAdr]>;
736def MOVaddrCP
737    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
738             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
739                                             tconstpool:$low))]>,
740      Sched<[WriteAdrAdr]>;
741def MOVaddrBA
742    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
743             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
744                                             tblockaddress:$low))]>,
745      Sched<[WriteAdrAdr]>;
746def MOVaddrTLS
747    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
748             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
749                                            tglobaltlsaddr:$low))]>,
750      Sched<[WriteAdrAdr]>;
751def MOVaddrEXT
752    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
753             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
754                                            texternalsym:$low))]>,
755      Sched<[WriteAdrAdr]>;
756// Normally AArch64addlow either gets folded into a following ldr/str,
757// or together with an adrp into MOVaddr above. For cases with TLS, it
758// might appear without either of them, so allow lowering it into a plain
759// add.
760def ADDlowTLS
761    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
762             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
763                                            tglobaltlsaddr:$low))]>,
764      Sched<[WriteAdr]>;
765
766} // isReMaterializable, isCodeGenOnly
767
768def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
769          (LOADgot tglobaltlsaddr:$addr)>;
770
771def : Pat<(AArch64LOADgot texternalsym:$addr),
772          (LOADgot texternalsym:$addr)>;
773
774def : Pat<(AArch64LOADgot tconstpool:$addr),
775          (LOADgot tconstpool:$addr)>;
776
777// 32-bit jump table destination is actually only 2 instructions since we can
778// use the table itself as a PC-relative base. But optimization occurs after
779// branch relaxation so be pessimistic.
780let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
781    isNotDuplicable = 1 in {
782def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
783                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
784                      Sched<[]>;
785def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
786                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
787                      Sched<[]>;
788def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
789                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
790                     Sched<[]>;
791}
792
793// Space-consuming pseudo to aid testing of placement and reachability
794// algorithms. Immediate operand is the number of bytes this "instruction"
795// occupies; register operands can be used to enforce dependency and constrain
796// the scheduler.
797let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
798def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
799                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
800            Sched<[]>;
801
802let hasSideEffects = 1, isCodeGenOnly = 1 in {
803  def SpeculationSafeValueX
804      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
805  def SpeculationSafeValueW
806      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
807}
808
809// SpeculationBarrierEndBB must only be used after an unconditional control
810// flow, i.e. after a terminator for which isBarrier is True.
811let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
812  def SpeculationBarrierISBDSBEndBB
813      : Pseudo<(outs), (ins), []>, Sched<[]>;
814  def SpeculationBarrierSBEndBB
815      : Pseudo<(outs), (ins), []>, Sched<[]>;
816}
817
818//===----------------------------------------------------------------------===//
819// System instructions.
820//===----------------------------------------------------------------------===//
821
822def HINT : HintI<"hint">;
823def : InstAlias<"nop",  (HINT 0b000)>;
824def : InstAlias<"yield",(HINT 0b001)>;
825def : InstAlias<"wfe",  (HINT 0b010)>;
826def : InstAlias<"wfi",  (HINT 0b011)>;
827def : InstAlias<"sev",  (HINT 0b100)>;
828def : InstAlias<"sevl", (HINT 0b101)>;
829def : InstAlias<"dgh",  (HINT 0b110)>;
830def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
831def : InstAlias<"csdb", (HINT 20)>;
832// In order to be able to write readable assembly, LLVM should accept assembly
833// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
834// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
835// should not emit these mnemonics unless BTI is enabled.
836def : InstAlias<"bti",  (HINT 32), 0>;
837def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
838def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
839def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
840
841// v8.2a Statistical Profiling extension
842def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
843
844// As far as LLVM is concerned this writes to the system's exclusive monitors.
845let mayLoad = 1, mayStore = 1 in
846def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
847
848// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
849// model patterns with sufficiently fine granularity.
850let mayLoad = ?, mayStore = ? in {
851def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
852                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
853
854def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
855                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
856
857def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
858                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
859
860def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
861  let CRm        = 0b0010;
862  let Inst{12}   = 0;
863  let Predicates = [HasTRACEV8_4];
864}
865
866def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
867  let CRm{1-0}   = 0b11;
868  let Inst{9-8}  = 0b10;
869  let Predicates = [HasXS];
870}
871
872let Predicates = [HasWFxT] in {
873def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
874def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
875}
876
877// Branch Record Buffer two-word mnemonic instructions
878class BRBEI<bits<3> op2, string keyword>
879    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
880  let Inst{31-8} = 0b110101010000100101110010;
881  let Inst{7-5} = op2;
882  let Predicates = [HasBRBE];
883}
884def BRB_IALL: BRBEI<0b100, "\tiall">;
885def BRB_INJ:  BRBEI<0b101, "\tinj">;
886
887}
888
889// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
890def : TokenAlias<"INJ", "inj">;
891def : TokenAlias<"IALL", "iall">;
892
893// ARMv8.2-A Dot Product
894let Predicates = [HasDotProd] in {
895defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
896defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
897defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
898defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
899}
900
901// ARMv8.6-A BFloat
902let Predicates = [HasNEON, HasBF16] in {
903defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
904defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
905def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
906def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
907def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
908def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
909def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
910def BFCVTN       : SIMD_BFCVTN;
911def BFCVTN2      : SIMD_BFCVTN2;
912
913// Vector-scalar BFDOT:
914// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
915// register (the instruction uses a single 32-bit lane from it), so the pattern
916// is a bit tricky.
917def : Pat<(v2f32 (int_aarch64_neon_bfdot
918                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
919                    (v4bf16 (bitconvert
920                      (v2i32 (AArch64duplane32
921                        (v4i32 (bitconvert
922                          (v8bf16 (insert_subvector undef,
923                            (v4bf16 V64:$Rm),
924                            (i64 0))))),
925                        VectorIndexS:$idx)))))),
926          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
927                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
928                             VectorIndexS:$idx)>;
929}
930
931let Predicates = [HasNEONorStreamingSVE, HasBF16] in {
932def BFCVT : BF16ToSinglePrecision<"bfcvt">;
933}
934
935// ARMv8.6A AArch64 matrix multiplication
936let Predicates = [HasMatMulInt8] in {
937def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
938def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
939def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
940defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
941defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
942
943// sudot lane has a pattern where usdot is expected (there is no sudot).
944// The second operand is used in the dup operation to repeat the indexed
945// element.
946class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
947                         string rhs_kind, RegisterOperand RegType,
948                         ValueType AccumType, ValueType InputType>
949      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
950                                        lhs_kind, rhs_kind, RegType, AccumType,
951                                        InputType, null_frag> {
952  let Pattern = [(set (AccumType RegType:$dst),
953                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
954                                 (InputType (bitconvert (AccumType
955                                    (AArch64duplane32 (v4i32 V128:$Rm),
956                                        VectorIndexS:$idx)))),
957                                 (InputType RegType:$Rn))))];
958}
959
960multiclass SIMDSUDOTIndex {
961  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
962  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
963}
964
965defm SUDOTlane : SIMDSUDOTIndex;
966
967}
968
969// ARMv8.2-A FP16 Fused Multiply-Add Long
970let Predicates = [HasNEON, HasFP16FML] in {
971defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
972defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
973defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
974defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
975defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
976defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
977defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
978defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
979}
980
981// Armv8.2-A Crypto extensions
982let Predicates = [HasSHA3] in {
983def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
984def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
985def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
986def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
987def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
988def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
989def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
990def XAR       : CryptoRRRi6<"xar">;
991
992class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
993  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
994        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
995
996def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
997          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
998
999def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
1000def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
1001def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
1002
1003def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
1004def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
1005def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
1006def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
1007
1008class EOR3_pattern<ValueType VecTy>
1009  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
1010        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1011
1012def : EOR3_pattern<v16i8>;
1013def : EOR3_pattern<v8i16>;
1014def : EOR3_pattern<v4i32>;
1015def : EOR3_pattern<v2i64>;
1016
1017def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1018def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1019def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1020def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1021
1022def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1023def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1024def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1025def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1026
1027def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1028def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1029def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1030def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1031
1032def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1033          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1034
1035def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1036          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1037
1038
1039} // HasSHA3
1040
1041let Predicates = [HasSM4] in {
1042def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1043def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1044def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1045def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1046def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1047def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1048def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1049def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1050def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1051
1052def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1053          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1054
1055class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1056  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1057        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1058
1059class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1060  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1061        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1062
1063class SM4_pattern<Instruction INST, Intrinsic OpNode>
1064  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1065        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1066
1067def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1068def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1069
1070def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1071def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1072def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1073def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1074
1075def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1076def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1077} // HasSM4
1078
1079let Predicates = [HasRCPC] in {
1080  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1081  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1082  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1083  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1084  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1085}
1086
1087// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1088// inside the multiclass as the FP16 versions need different predicates.
1089defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1090                                               "fcmla", null_frag>;
1091defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1092                                           "fcadd", null_frag>;
1093defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1094
1095let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1096  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1097            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1098  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1099            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1100  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1101            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1102  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1103            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1104}
1105
1106let Predicates = [HasComplxNum, HasNEON] in {
1107  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1108            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1109  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1110            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1111  foreach Ty = [v4f32, v2f64] in {
1112    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1113              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1114    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1115              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1116  }
1117}
1118
1119multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1120  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1121            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1122  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1123            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1124  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1125            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1126  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1127            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1128}
1129
1130multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1131  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1132            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1133  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1134            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1135  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1136            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1137  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1138            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1139}
1140
1141
1142let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1143  defm : FCMLA_PATS<v4f16, V64>;
1144  defm : FCMLA_PATS<v8f16, V128>;
1145
1146  defm : FCMLA_LANE_PATS<v4f16, V64,
1147                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1148  defm : FCMLA_LANE_PATS<v8f16, V128,
1149                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1150}
1151let Predicates = [HasComplxNum, HasNEON] in {
1152  defm : FCMLA_PATS<v2f32, V64>;
1153  defm : FCMLA_PATS<v4f32, V128>;
1154  defm : FCMLA_PATS<v2f64, V128>;
1155
1156  defm : FCMLA_LANE_PATS<v4f32, V128,
1157                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1158}
1159
1160// v8.3a Pointer Authentication
1161// These instructions inhabit part of the hint space and so can be used for
1162// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1163// important for compatibility with other assemblers (e.g. GAS) when building
1164// software compatible with both CPUs that do or don't implement PA.
1165let Uses = [LR], Defs = [LR] in {
1166  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1167  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1168  let isAuthenticated = 1 in {
1169    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1170    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1171  }
1172}
1173let Uses = [LR, SP], Defs = [LR] in {
1174  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1175  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1176  let isAuthenticated = 1 in {
1177    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1178    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1179  }
1180}
1181let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1182  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1183  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1184  let isAuthenticated = 1 in {
1185    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1186    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1187  }
1188}
1189
1190let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1191  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1192}
1193
1194// In order to be able to write readable assembly, LLVM should accept assembly
1195// inputs that use pointer authentication mnemonics, even with PA disabled.
1196// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1197// should not emit these mnemonics unless PA is enabled.
1198def : InstAlias<"paciaz", (PACIAZ), 0>;
1199def : InstAlias<"pacibz", (PACIBZ), 0>;
1200def : InstAlias<"autiaz", (AUTIAZ), 0>;
1201def : InstAlias<"autibz", (AUTIBZ), 0>;
1202def : InstAlias<"paciasp", (PACIASP), 0>;
1203def : InstAlias<"pacibsp", (PACIBSP), 0>;
1204def : InstAlias<"autiasp", (AUTIASP), 0>;
1205def : InstAlias<"autibsp", (AUTIBSP), 0>;
1206def : InstAlias<"pacia1716", (PACIA1716), 0>;
1207def : InstAlias<"pacib1716", (PACIB1716), 0>;
1208def : InstAlias<"autia1716", (AUTIA1716), 0>;
1209def : InstAlias<"autib1716", (AUTIB1716), 0>;
1210def : InstAlias<"xpaclri", (XPACLRI), 0>;
1211
1212// These pointer authentication instructions require armv8.3a
1213let Predicates = [HasPAuth] in {
1214
1215  // When PA is enabled, a better mnemonic should be emitted.
1216  def : InstAlias<"paciaz", (PACIAZ), 1>;
1217  def : InstAlias<"pacibz", (PACIBZ), 1>;
1218  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1219  def : InstAlias<"autibz", (AUTIBZ), 1>;
1220  def : InstAlias<"paciasp", (PACIASP), 1>;
1221  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1222  def : InstAlias<"autiasp", (AUTIASP), 1>;
1223  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1224  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1225  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1226  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1227  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1228  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1229
1230  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1231                      SDPatternOperator op> {
1232    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1233    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1234    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1235    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1236    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1237    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1238    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1239    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1240  }
1241
1242  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1243  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1244
1245  def XPACI : ClearAuth<0, "xpaci">;
1246  def XPACD : ClearAuth<1, "xpacd">;
1247
1248  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1249
1250  // Combined Instructions
1251  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1252    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1253    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1254  }
1255  let isCall = 1, Defs = [LR], Uses = [SP] in {
1256    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1257    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1258  }
1259
1260  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1261    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1262    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1263  }
1264  let isCall = 1, Defs = [LR], Uses = [SP] in {
1265    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1266    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1267  }
1268
1269  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1270    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1271    def RETAB   : AuthReturn<0b010, 1, "retab">;
1272    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1273    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1274  }
1275
1276  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1277  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1278
1279}
1280
1281// v8.3a floating point conversion for javascript
1282let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1283def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1284                                      "fjcvtzs",
1285                                      [(set GPR32:$Rd,
1286                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1287  let Inst{31} = 0;
1288} // HasJS, HasFPARMv8
1289
1290// v8.4 Flag manipulation instructions
1291let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1292def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1293  let Inst{20-5} = 0b0000001000000000;
1294}
1295def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1296def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1297def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1298                        "{\t$Rn, $imm, $mask}">;
1299} // HasFlagM
1300
1301// v8.5 flag manipulation instructions
1302let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1303
1304def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1305  let Inst{18-16} = 0b000;
1306  let Inst{11-8} = 0b0000;
1307  let Unpredictable{11-8} = 0b1111;
1308  let Inst{7-5} = 0b001;
1309}
1310
1311def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1312  let Inst{18-16} = 0b000;
1313  let Inst{11-8} = 0b0000;
1314  let Unpredictable{11-8} = 0b1111;
1315  let Inst{7-5} = 0b010;
1316}
1317} // HasAltNZCV
1318
1319
1320// Armv8.5-A speculation barrier
1321def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1322  let Inst{20-5} = 0b0001100110000111;
1323  let Unpredictable{11-8} = 0b1111;
1324  let Predicates = [HasSB];
1325  let hasSideEffects = 1;
1326}
1327
1328def : InstAlias<"clrex", (CLREX 0xf)>;
1329def : InstAlias<"isb", (ISB 0xf)>;
1330def : InstAlias<"ssbb", (DSB 0)>;
1331def : InstAlias<"pssbb", (DSB 4)>;
1332def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1333
1334def MRS    : MRSI;
1335def MSR    : MSRI;
1336def MSRpstateImm1 : MSRpstateImm0_1;
1337def MSRpstateImm4 : MSRpstateImm0_15;
1338
1339def : Pat<(AArch64mrs imm:$id),
1340          (MRS imm:$id)>;
1341
1342// The thread pointer (on Linux, at least, where this has been implemented) is
1343// TPIDR_EL0.
1344def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1345                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1346
1347let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1348def HWASAN_CHECK_MEMACCESS : Pseudo<
1349  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1350  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1351  Sched<[]>;
1352}
1353
1354let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1355def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1356  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1357  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1358  Sched<[]>;
1359}
1360
1361// The cycle counter PMC register is PMCCNTR_EL0.
1362let Predicates = [HasPerfMon] in
1363def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1364
1365// FPCR register
1366def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1367def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1368
1369// Generic system instructions
1370def SYSxt  : SystemXtI<0, "sys">;
1371def SYSLxt : SystemLXtI<1, "sysl">;
1372
1373def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1374                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1375                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1376
1377
1378let Predicates = [HasTME] in {
1379
1380def TSTART : TMSystemI<0b0000, "tstart",
1381                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1382
1383def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1384
1385def TCANCEL : TMSystemException<0b011, "tcancel",
1386                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1387
1388def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1389  let mayLoad = 0;
1390  let mayStore = 0;
1391}
1392} // HasTME
1393
1394//===----------------------------------------------------------------------===//
1395// Move immediate instructions.
1396//===----------------------------------------------------------------------===//
1397
1398defm MOVK : InsertImmediate<0b11, "movk">;
1399defm MOVN : MoveImmediate<0b00, "movn">;
1400
1401let PostEncoderMethod = "fixMOVZ" in
1402defm MOVZ : MoveImmediate<0b10, "movz">;
1403
1404// First group of aliases covers an implicit "lsl #0".
1405def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1406def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1407def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1408def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1409def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1410def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1411
1412// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1413def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1414def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1415def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1416def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1417
1418def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1419def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1420def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1421def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1422
1423def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1424def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1425def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1426def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1427
1428def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1429def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1430
1431def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1432def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1433
1434def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1435def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1436
1437// Final group of aliases covers true "mov $Rd, $imm" cases.
1438multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1439                          int width, int shift> {
1440  def _asmoperand : AsmOperandClass {
1441    let Name = basename # width # "_lsl" # shift # "MovAlias";
1442    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1443                               # shift # ">";
1444    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1445  }
1446
1447  def _movimm : Operand<i32> {
1448    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1449  }
1450
1451  def : InstAlias<"mov $Rd, $imm",
1452                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1453}
1454
1455defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1456defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1457
1458defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1459defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1460defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1461defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1462
1463defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1464defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1465
1466defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1467defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1468defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1469defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1470
1471let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1472    isAsCheapAsAMove = 1 in {
1473// FIXME: The following pseudo instructions are only needed because remat
1474// cannot handle multiple instructions.  When that changes, we can select
1475// directly to the real instructions and get rid of these pseudos.
1476
1477def MOVi32imm
1478    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1479             [(set GPR32:$dst, imm:$src)]>,
1480      Sched<[WriteImm]>;
1481def MOVi64imm
1482    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1483             [(set GPR64:$dst, imm:$src)]>,
1484      Sched<[WriteImm]>;
1485} // isReMaterializable, isCodeGenOnly
1486
1487// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1488// eventual expansion code fewer bits to worry about getting right. Marshalling
1489// the types is a little tricky though:
1490def i64imm_32bit : ImmLeaf<i64, [{
1491  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1492}]>;
1493
1494def s64imm_32bit : ImmLeaf<i64, [{
1495  int64_t Imm64 = static_cast<int64_t>(Imm);
1496  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1497         Imm64 <= std::numeric_limits<int32_t>::max();
1498}]>;
1499
1500def trunc_imm : SDNodeXForm<imm, [{
1501  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1502}]>;
1503
1504def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1505  GISDNodeXFormEquiv<trunc_imm>;
1506
1507let Predicates = [OptimizedGISelOrOtherSelector] in {
1508// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1509// copies.
1510def : Pat<(i64 i64imm_32bit:$src),
1511          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1512}
1513
1514// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1515def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1516return CurDAG->getTargetConstant(
1517  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1518}]>;
1519
1520def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1521return CurDAG->getTargetConstant(
1522  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1523}]>;
1524
1525
1526def : Pat<(f32 fpimm:$in),
1527  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1528def : Pat<(f64 fpimm:$in),
1529  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1530
1531
1532// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1533// sequences.
1534def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1535                             tglobaladdr:$g1, tglobaladdr:$g0),
1536          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1537                                  tglobaladdr:$g1, 16),
1538                          tglobaladdr:$g2, 32),
1539                  tglobaladdr:$g3, 48)>;
1540
1541def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1542                             tblockaddress:$g1, tblockaddress:$g0),
1543          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1544                                  tblockaddress:$g1, 16),
1545                          tblockaddress:$g2, 32),
1546                  tblockaddress:$g3, 48)>;
1547
1548def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1549                             tconstpool:$g1, tconstpool:$g0),
1550          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1551                                  tconstpool:$g1, 16),
1552                          tconstpool:$g2, 32),
1553                  tconstpool:$g3, 48)>;
1554
1555def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1556                             tjumptable:$g1, tjumptable:$g0),
1557          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1558                                  tjumptable:$g1, 16),
1559                          tjumptable:$g2, 32),
1560                  tjumptable:$g3, 48)>;
1561
1562
1563//===----------------------------------------------------------------------===//
1564// Arithmetic instructions.
1565//===----------------------------------------------------------------------===//
1566
1567// Add/subtract with carry.
1568defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1569defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1570
1571def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1572def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1573def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1574def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1575
1576// Add/subtract
1577defm ADD : AddSub<0, "add", "sub", add>;
1578defm SUB : AddSub<1, "sub", "add">;
1579
1580def : InstAlias<"mov $dst, $src",
1581                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1582def : InstAlias<"mov $dst, $src",
1583                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1584def : InstAlias<"mov $dst, $src",
1585                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1586def : InstAlias<"mov $dst, $src",
1587                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1588
1589defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1590defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1591
1592// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1593def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1594          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1595def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1596          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1597def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1598          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1599def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1600          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1601def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1602          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1603def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1604          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1605let AddedComplexity = 1 in {
1606def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1607          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1608def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1609          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1610}
1611
1612// Because of the immediate format for add/sub-imm instructions, the
1613// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1614//  These patterns capture that transformation.
1615let AddedComplexity = 1 in {
1616def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1617          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1618def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1619          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1620def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1621          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1622def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1623          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1624}
1625
1626// Because of the immediate format for add/sub-imm instructions, the
1627// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1628//  These patterns capture that transformation.
1629let AddedComplexity = 1 in {
1630def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1631          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1632def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1633          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1634def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1635          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1636def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1637          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1638}
1639
1640def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1641def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1642def : InstAlias<"neg $dst, $src$shift",
1643                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1644def : InstAlias<"neg $dst, $src$shift",
1645                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1646
1647def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1648def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1649def : InstAlias<"negs $dst, $src$shift",
1650                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1651def : InstAlias<"negs $dst, $src$shift",
1652                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1653
1654
1655// Unsigned/Signed divide
1656defm UDIV : Div<0, "udiv", udiv>;
1657defm SDIV : Div<1, "sdiv", sdiv>;
1658
1659def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1660def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1661def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1662def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1663
1664// Variable shift
1665defm ASRV : Shift<0b10, "asr", sra>;
1666defm LSLV : Shift<0b00, "lsl", shl>;
1667defm LSRV : Shift<0b01, "lsr", srl>;
1668defm RORV : Shift<0b11, "ror", rotr>;
1669
1670def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1671def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1672def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1673def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1674def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1675def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1676def : ShiftAlias<"rorv", RORVWr, GPR32>;
1677def : ShiftAlias<"rorv", RORVXr, GPR64>;
1678
1679// Multiply-add
1680let AddedComplexity = 5 in {
1681defm MADD : MulAccum<0, "madd">;
1682defm MSUB : MulAccum<1, "msub">;
1683
1684def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1685          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1686def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1687          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1688
1689def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1690          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1691def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1692          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1693def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1694          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1695def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1696          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1697} // AddedComplexity = 5
1698
1699let AddedComplexity = 5 in {
1700def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1701def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1702def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1703def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1704
1705def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1706          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1707def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1708          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1709def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1710          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1711def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1712          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1713def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1714          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1715def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1716          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1717
1718def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1719          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1720def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1721          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1722
1723def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1724          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1725def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1726          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1727def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1728          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1729                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1730
1731def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1732          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1733def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1734          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1735def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1736          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1737                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1738
1739def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1740          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1741def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1742          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1743def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1744                    GPR64:$Ra)),
1745          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1746                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1747
1748def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1749          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1750def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1751          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1752def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1753                                    (s64imm_32bit:$C)))),
1754          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1755                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1756} // AddedComplexity = 5
1757
1758def : MulAccumWAlias<"mul", MADDWrrr>;
1759def : MulAccumXAlias<"mul", MADDXrrr>;
1760def : MulAccumWAlias<"mneg", MSUBWrrr>;
1761def : MulAccumXAlias<"mneg", MSUBXrrr>;
1762def : WideMulAccumAlias<"smull", SMADDLrrr>;
1763def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1764def : WideMulAccumAlias<"umull", UMADDLrrr>;
1765def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1766
1767// Multiply-high
1768def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1769def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1770
1771// CRC32
1772def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1773def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1774def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1775def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1776
1777def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1778def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1779def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1780def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1781
1782// v8.1 atomic CAS
1783defm CAS   : CompareAndSwap<0, 0, "">;
1784defm CASA  : CompareAndSwap<1, 0, "a">;
1785defm CASL  : CompareAndSwap<0, 1, "l">;
1786defm CASAL : CompareAndSwap<1, 1, "al">;
1787
1788// v8.1 atomic CASP
1789defm CASP   : CompareAndSwapPair<0, 0, "">;
1790defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1791defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1792defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1793
1794// v8.1 atomic SWP
1795defm SWP   : Swap<0, 0, "">;
1796defm SWPA  : Swap<1, 0, "a">;
1797defm SWPL  : Swap<0, 1, "l">;
1798defm SWPAL : Swap<1, 1, "al">;
1799
1800// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1801defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1802defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1803defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1804defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1805
1806defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1807defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1808defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1809defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1810
1811defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1812defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1813defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1814defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1815
1816defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1817defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1818defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1819defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1820
1821defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1822defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1823defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1824defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1825
1826defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1827defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1828defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1829defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1830
1831defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1832defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1833defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1834defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1835
1836defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1837defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1838defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1839defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1840
1841// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1842defm : STOPregister<"stadd","LDADD">; // STADDx
1843defm : STOPregister<"stclr","LDCLR">; // STCLRx
1844defm : STOPregister<"steor","LDEOR">; // STEORx
1845defm : STOPregister<"stset","LDSET">; // STSETx
1846defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1847defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1848defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1849defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1850
1851// v8.5 Memory Tagging Extension
1852let Predicates = [HasMTE] in {
1853
1854def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1855            Sched<[]>{
1856  let Inst{31} = 1;
1857}
1858def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1859  let Inst{31} = 1;
1860  let isNotDuplicable = 1;
1861}
1862def ADDG  : AddSubG<0, "addg", null_frag>;
1863def SUBG  : AddSubG<1, "subg", null_frag>;
1864
1865def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1866
1867def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1868def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1869  let Defs = [NZCV];
1870}
1871
1872def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1873
1874def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1875
1876def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1877          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1878def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1879          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1880
1881def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1882
1883def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1884                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1885def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1886                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1887def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1888                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1889  let Inst{23} = 0;
1890}
1891
1892defm STG   : MemTagStore<0b00, "stg">;
1893defm STZG  : MemTagStore<0b01, "stzg">;
1894defm ST2G  : MemTagStore<0b10, "st2g">;
1895defm STZ2G : MemTagStore<0b11, "stz2g">;
1896
1897def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1898          (STGOffset $Rn, $Rm, $imm)>;
1899def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1900          (STZGOffset $Rn, $Rm, $imm)>;
1901def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1902          (ST2GOffset $Rn, $Rm, $imm)>;
1903def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1904          (STZ2GOffset $Rn, $Rm, $imm)>;
1905
1906defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1907def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1908def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1909
1910def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1911          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1912
1913def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1914          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1915
1916def IRGstack
1917    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1918      Sched<[]>;
1919def TAGPstack
1920    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1921      Sched<[]>;
1922
1923// Explicit SP in the first operand prevents ShrinkWrap optimization
1924// from leaving this instruction out of the stack frame. When IRGstack
1925// is transformed into IRG, this operand is replaced with the actual
1926// register / expression for the tagged base pointer of the current function.
1927def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1928
1929// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1930// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1931let isCodeGenOnly=1, mayStore=1 in {
1932def STGloop_wback
1933    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1934             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1935      Sched<[WriteAdr, WriteST]>;
1936
1937def STZGloop_wback
1938    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1939             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1940      Sched<[WriteAdr, WriteST]>;
1941
1942// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1943// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1944def STGloop
1945    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1946             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1947      Sched<[WriteAdr, WriteST]>;
1948
1949def STZGloop
1950    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1951             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1952      Sched<[WriteAdr, WriteST]>;
1953}
1954
1955} // Predicates = [HasMTE]
1956
1957//===----------------------------------------------------------------------===//
1958// Logical instructions.
1959//===----------------------------------------------------------------------===//
1960
1961// (immediate)
1962defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1963defm AND  : LogicalImm<0b00, "and", and, "bic">;
1964defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1965defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1966
1967// FIXME: these aliases *are* canonical sometimes (when movz can't be
1968// used). Actually, it seems to be working right now, but putting logical_immXX
1969// here is a bit dodgy on the AsmParser side too.
1970def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1971                                          logical_imm32:$imm), 0>;
1972def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1973                                          logical_imm64:$imm), 0>;
1974
1975
1976// (register)
1977defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1978defm BICS : LogicalRegS<0b11, 1, "bics",
1979                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1980defm AND  : LogicalReg<0b00, 0, "and", and>;
1981defm BIC  : LogicalReg<0b00, 1, "bic",
1982                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1983defm EON  : LogicalReg<0b10, 1, "eon",
1984                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1985defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1986defm ORN  : LogicalReg<0b01, 1, "orn",
1987                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1988defm ORR  : LogicalReg<0b01, 0, "orr", or>;
1989
1990def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1991def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1992
1993def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1994def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1995
1996def : InstAlias<"mvn $Wd, $Wm$sh",
1997                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1998def : InstAlias<"mvn $Xd, $Xm$sh",
1999                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
2000
2001def : InstAlias<"tst $src1, $src2",
2002                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
2003def : InstAlias<"tst $src1, $src2",
2004                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
2005
2006def : InstAlias<"tst $src1, $src2",
2007                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
2008def : InstAlias<"tst $src1, $src2",
2009                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
2010
2011def : InstAlias<"tst $src1, $src2$sh",
2012               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2013def : InstAlias<"tst $src1, $src2$sh",
2014               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2015
2016
2017def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2018def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2019
2020
2021//===----------------------------------------------------------------------===//
2022// One operand data processing instructions.
2023//===----------------------------------------------------------------------===//
2024
2025defm CLS    : OneOperandData<0b101, "cls">;
2026defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2027defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2028
2029def  REV16Wr : OneWRegData<0b001, "rev16",
2030                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2031def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2032
2033def : Pat<(cttz GPR32:$Rn),
2034          (CLZWr (RBITWr GPR32:$Rn))>;
2035def : Pat<(cttz GPR64:$Rn),
2036          (CLZXr (RBITXr GPR64:$Rn))>;
2037def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2038                (i32 1))),
2039          (CLSWr GPR32:$Rn)>;
2040def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2041                (i64 1))),
2042          (CLSXr GPR64:$Rn)>;
2043def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2044def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2045
2046// Unlike the other one operand instructions, the instructions with the "rev"
2047// mnemonic do *not* just different in the size bit, but actually use different
2048// opcode bits for the different sizes.
2049def REVWr   : OneWRegData<0b010, "rev", bswap>;
2050def REVXr   : OneXRegData<0b011, "rev", bswap>;
2051def REV32Xr : OneXRegData<0b010, "rev32",
2052                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2053
2054def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2055
2056// The bswap commutes with the rotr so we want a pattern for both possible
2057// orders.
2058def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2059def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2060
2061// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
2062def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
2063def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
2064
2065//===----------------------------------------------------------------------===//
2066// Bitfield immediate extraction instruction.
2067//===----------------------------------------------------------------------===//
2068let hasSideEffects = 0 in
2069defm EXTR : ExtractImm<"extr">;
2070def : InstAlias<"ror $dst, $src, $shift",
2071            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2072def : InstAlias<"ror $dst, $src, $shift",
2073            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2074
2075def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2076          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2077def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2078          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2079
2080//===----------------------------------------------------------------------===//
2081// Other bitfield immediate instructions.
2082//===----------------------------------------------------------------------===//
2083let hasSideEffects = 0 in {
2084defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2085defm SBFM : BitfieldImm<0b00, "sbfm">;
2086defm UBFM : BitfieldImm<0b10, "ubfm">;
2087}
2088
2089def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2090  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2091  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2092}]>;
2093
2094def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2095  uint64_t enc = 31 - N->getZExtValue();
2096  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2097}]>;
2098
2099// min(7, 31 - shift_amt)
2100def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2101  uint64_t enc = 31 - N->getZExtValue();
2102  enc = enc > 7 ? 7 : enc;
2103  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2104}]>;
2105
2106// min(15, 31 - shift_amt)
2107def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2108  uint64_t enc = 31 - N->getZExtValue();
2109  enc = enc > 15 ? 15 : enc;
2110  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2111}]>;
2112
2113def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2114  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2115  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2116}]>;
2117
2118def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2119  uint64_t enc = 63 - N->getZExtValue();
2120  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2121}]>;
2122
2123// min(7, 63 - shift_amt)
2124def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2125  uint64_t enc = 63 - N->getZExtValue();
2126  enc = enc > 7 ? 7 : enc;
2127  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2128}]>;
2129
2130// min(15, 63 - shift_amt)
2131def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2132  uint64_t enc = 63 - N->getZExtValue();
2133  enc = enc > 15 ? 15 : enc;
2134  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2135}]>;
2136
2137// min(31, 63 - shift_amt)
2138def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2139  uint64_t enc = 63 - N->getZExtValue();
2140  enc = enc > 31 ? 31 : enc;
2141  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2142}]>;
2143
2144def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2145          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2146                              (i64 (i32shift_b imm0_31:$imm)))>;
2147def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2148          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2149                              (i64 (i64shift_b imm0_63:$imm)))>;
2150
2151let AddedComplexity = 10 in {
2152def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2153          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2154def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2155          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2156}
2157
2158def : InstAlias<"asr $dst, $src, $shift",
2159                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2160def : InstAlias<"asr $dst, $src, $shift",
2161                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2162def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2163def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2164def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2165def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2166def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2167
2168def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2169          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2170def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2171          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2172
2173def : InstAlias<"lsr $dst, $src, $shift",
2174                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2175def : InstAlias<"lsr $dst, $src, $shift",
2176                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2177def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2178def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2179def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2180def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2181def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2182
2183//===----------------------------------------------------------------------===//
2184// Conditional comparison instructions.
2185//===----------------------------------------------------------------------===//
2186defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2187defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2188
2189//===----------------------------------------------------------------------===//
2190// Conditional select instructions.
2191//===----------------------------------------------------------------------===//
2192defm CSEL  : CondSelect<0, 0b00, "csel">;
2193
2194def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2195defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2196defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2197defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2198
2199def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2200          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2201def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2202          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2203def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2204          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2205def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2206          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2207def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2208          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2209def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2210          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2211
2212def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2213          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2214def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2215          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2216def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2217          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2218def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2219          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2220def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2221          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2222def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2223          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2224def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2225          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2226def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2227          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2228def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2229          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2230def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2231          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2232def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2233          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2234def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2235          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2236
2237def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2238          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2239def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2240          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2241
2242// The inverse of the condition code from the alias instruction is what is used
2243// in the aliased instruction. The parser all ready inverts the condition code
2244// for these aliases.
2245def : InstAlias<"cset $dst, $cc",
2246                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2247def : InstAlias<"cset $dst, $cc",
2248                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2249
2250def : InstAlias<"csetm $dst, $cc",
2251                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2252def : InstAlias<"csetm $dst, $cc",
2253                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2254
2255def : InstAlias<"cinc $dst, $src, $cc",
2256                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2257def : InstAlias<"cinc $dst, $src, $cc",
2258                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2259
2260def : InstAlias<"cinv $dst, $src, $cc",
2261                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2262def : InstAlias<"cinv $dst, $src, $cc",
2263                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2264
2265def : InstAlias<"cneg $dst, $src, $cc",
2266                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2267def : InstAlias<"cneg $dst, $src, $cc",
2268                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2269
2270//===----------------------------------------------------------------------===//
2271// PC-relative instructions.
2272//===----------------------------------------------------------------------===//
2273let isReMaterializable = 1 in {
2274let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2275def ADR  : ADRI<0, "adr", adrlabel,
2276                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2277} // hasSideEffects = 0
2278
2279def ADRP : ADRI<1, "adrp", adrplabel,
2280                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2281} // isReMaterializable = 1
2282
2283// page address of a constant pool entry, block address
2284def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2285def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2286def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2287def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2288def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2289def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2290def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2291
2292//===----------------------------------------------------------------------===//
2293// Unconditional branch (register) instructions.
2294//===----------------------------------------------------------------------===//
2295
2296let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2297def RET  : BranchReg<0b0010, "ret", []>;
2298def DRPS : SpecialReturn<0b0101, "drps">;
2299def ERET : SpecialReturn<0b0100, "eret">;
2300} // isReturn = 1, isTerminator = 1, isBarrier = 1
2301
2302// Default to the LR register.
2303def : InstAlias<"ret", (RET LR)>;
2304
2305let isCall = 1, Defs = [LR], Uses = [SP] in {
2306  def BLR : BranchReg<0b0001, "blr", []>;
2307  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2308                Sched<[WriteBrReg]>,
2309                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2310  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2311                     Sched<[WriteBrReg]>;
2312} // isCall
2313
2314def : Pat<(AArch64call GPR64:$Rn),
2315          (BLR GPR64:$Rn)>,
2316      Requires<[NoSLSBLRMitigation]>;
2317def : Pat<(AArch64call GPR64noip:$Rn),
2318          (BLRNoIP GPR64noip:$Rn)>,
2319      Requires<[SLSBLRMitigation]>;
2320
2321def : Pat<(AArch64call_rvmarker GPR64:$Rn),
2322          (BLR_RVMARKER GPR64:$Rn)>,
2323      Requires<[NoSLSBLRMitigation]>;
2324
2325let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2326def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2327} // isBranch, isTerminator, isBarrier, isIndirectBranch
2328
2329// Create a separate pseudo-instruction for codegen to use so that we don't
2330// flag lr as used in every function. It'll be restored before the RET by the
2331// epilogue if it's legitimately used.
2332def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2333                   Sched<[WriteBrReg]> {
2334  let isTerminator = 1;
2335  let isBarrier = 1;
2336  let isReturn = 1;
2337}
2338
2339// This is a directive-like pseudo-instruction. The purpose is to insert an
2340// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2341// (which in the usual case is a BLR).
2342let hasSideEffects = 1 in
2343def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2344  let AsmString = ".tlsdesccall $sym";
2345}
2346
2347// Pseudo instruction to tell the streamer to emit a 'B' character into the
2348// augmentation string.
2349def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2350
2351// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2352// FIXME: can "hasSideEffects be dropped?
2353let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
2354    isCodeGenOnly = 1 in
2355def TLSDESC_CALLSEQ
2356    : Pseudo<(outs), (ins i64imm:$sym),
2357             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2358      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2359def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2360          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2361
2362//===----------------------------------------------------------------------===//
2363// Conditional branch (immediate) instruction.
2364//===----------------------------------------------------------------------===//
2365def Bcc : BranchCond;
2366
2367//===----------------------------------------------------------------------===//
2368// Compare-and-branch instructions.
2369//===----------------------------------------------------------------------===//
2370defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2371defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2372
2373//===----------------------------------------------------------------------===//
2374// Test-bit-and-branch instructions.
2375//===----------------------------------------------------------------------===//
2376defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2377defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2378
2379//===----------------------------------------------------------------------===//
2380// Unconditional branch (immediate) instructions.
2381//===----------------------------------------------------------------------===//
2382let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2383def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2384} // isBranch, isTerminator, isBarrier
2385
2386let isCall = 1, Defs = [LR], Uses = [SP] in {
2387def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2388} // isCall
2389def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2390
2391//===----------------------------------------------------------------------===//
2392// Exception generation instructions.
2393//===----------------------------------------------------------------------===//
2394let isTrap = 1 in {
2395def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2396}
2397def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2398def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2399def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2400def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2401def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2402def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2403def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2404
2405// DCPSn defaults to an immediate operand of zero if unspecified.
2406def : InstAlias<"dcps1", (DCPS1 0)>;
2407def : InstAlias<"dcps2", (DCPS2 0)>;
2408def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2409
2410def UDF : UDFType<0, "udf">;
2411
2412//===----------------------------------------------------------------------===//
2413// Load instructions.
2414//===----------------------------------------------------------------------===//
2415
2416// Pair (indexed, offset)
2417defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2418defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2419defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2420defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2421defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2422
2423defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2424
2425// Pair (pre-indexed)
2426def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2427def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2428def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2429def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2430def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2431
2432def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2433
2434// Pair (post-indexed)
2435def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2436def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2437def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2438def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2439def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2440
2441def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2442
2443
2444// Pair (no allocate)
2445defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2446defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2447defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2448defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2449defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2450
2451def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2452          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2453
2454//---
2455// (register offset)
2456//---
2457
2458// Integer
2459defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2460defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2461defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2462defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2463
2464// Floating-point
2465defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2466defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2467defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2468defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2469defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2470
2471// Load sign-extended half-word
2472defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2473defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2474
2475// Load sign-extended byte
2476defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2477defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2478
2479// Load sign-extended word
2480defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2481
2482// Pre-fetch.
2483defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2484
2485// For regular load, we do not have any alignment requirement.
2486// Thus, it is safe to directly map the vector loads with interesting
2487// addressing modes.
2488// FIXME: We could do the same for bitconvert to floating point vectors.
2489multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2490                              ValueType ScalTy, ValueType VecTy,
2491                              Instruction LOADW, Instruction LOADX,
2492                              SubRegIndex sub> {
2493  def : Pat<(VecTy (scalar_to_vector (ScalTy
2494              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2495            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2496                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2497                           sub)>;
2498
2499  def : Pat<(VecTy (scalar_to_vector (ScalTy
2500              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2501            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2502                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2503                           sub)>;
2504}
2505
2506let AddedComplexity = 10 in {
2507defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2508defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2509
2510defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2511defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2512
2513defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2514defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2515
2516defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2517defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2518
2519defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2520defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2521
2522defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2523
2524defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2525
2526
2527def : Pat <(v1i64 (scalar_to_vector (i64
2528                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2529                                           ro_Wextend64:$extend))))),
2530           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2531
2532def : Pat <(v1i64 (scalar_to_vector (i64
2533                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2534                                           ro_Xextend64:$extend))))),
2535           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2536}
2537
2538// Match all load 64 bits width whose type is compatible with FPR64
2539multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2540                        Instruction LOADW, Instruction LOADX> {
2541
2542  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2543            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2544
2545  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2546            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2547}
2548
2549let AddedComplexity = 10 in {
2550let Predicates = [IsLE] in {
2551  // We must do vector loads with LD1 in big-endian.
2552  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2553  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2554  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2555  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2556  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2557  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2558}
2559
2560defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2561defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2562
2563// Match all load 128 bits width whose type is compatible with FPR128
2564let Predicates = [IsLE] in {
2565  // We must do vector loads with LD1 in big-endian.
2566  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2567  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2568  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2569  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2570  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2571  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2572  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2573  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2574}
2575} // AddedComplexity = 10
2576
2577// zextload -> i64
2578multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2579                            Instruction INSTW, Instruction INSTX> {
2580  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2581            (SUBREG_TO_REG (i64 0),
2582                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2583                           sub_32)>;
2584
2585  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2586            (SUBREG_TO_REG (i64 0),
2587                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2588                           sub_32)>;
2589}
2590
2591let AddedComplexity = 10 in {
2592  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2593  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2594  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2595
2596  // zextloadi1 -> zextloadi8
2597  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2598
2599  // extload -> zextload
2600  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2601  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2602  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2603
2604  // extloadi1 -> zextloadi8
2605  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2606}
2607
2608
2609// zextload -> i64
2610multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2611                            Instruction INSTW, Instruction INSTX> {
2612  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2613            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2614
2615  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2616            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2617
2618}
2619
2620let AddedComplexity = 10 in {
2621  // extload -> zextload
2622  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2623  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2624  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2625
2626  // zextloadi1 -> zextloadi8
2627  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2628}
2629
2630//---
2631// (unsigned immediate)
2632//---
2633defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2634                   [(set GPR64z:$Rt,
2635                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2636defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2637                   [(set GPR32z:$Rt,
2638                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2639defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2640                   [(set FPR8Op:$Rt,
2641                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2642defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2643                   [(set (f16 FPR16Op:$Rt),
2644                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2645defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2646                   [(set (f32 FPR32Op:$Rt),
2647                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2648defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2649                   [(set (f64 FPR64Op:$Rt),
2650                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2651defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2652                 [(set (f128 FPR128Op:$Rt),
2653                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2654
2655// bf16 load pattern
2656def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2657           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2658
2659// For regular load, we do not have any alignment requirement.
2660// Thus, it is safe to directly map the vector loads with interesting
2661// addressing modes.
2662// FIXME: We could do the same for bitconvert to floating point vectors.
2663def : Pat <(v8i8 (scalar_to_vector (i32
2664               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2665           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2666                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2667def : Pat <(v16i8 (scalar_to_vector (i32
2668               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2669           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2670                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2671def : Pat <(v4i16 (scalar_to_vector (i32
2672               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2673           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2674                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2675def : Pat <(v8i16 (scalar_to_vector (i32
2676               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2677           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2678                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2679def : Pat <(v2i32 (scalar_to_vector (i32
2680               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2681           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2682                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2683def : Pat <(v4i32 (scalar_to_vector (i32
2684               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2685           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2686                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2687def : Pat <(v1i64 (scalar_to_vector (i64
2688               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2689           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2690def : Pat <(v2i64 (scalar_to_vector (i64
2691               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2692           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2693                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2694
2695// Match all load 64 bits width whose type is compatible with FPR64
2696let Predicates = [IsLE] in {
2697  // We must use LD1 to perform vector loads in big-endian.
2698  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2699            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2700  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2701            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2702  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2703            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2704  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2705            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2706  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2707            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2708  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2709            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2710}
2711def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2712          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2713def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2714          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2715
2716// Match all load 128 bits width whose type is compatible with FPR128
2717let Predicates = [IsLE] in {
2718  // We must use LD1 to perform vector loads in big-endian.
2719  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2720            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2721  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2722            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2723  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2724            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2725  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2726            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2727  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2728            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2729  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2730            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2731  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2732            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2733  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2734            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2735}
2736def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2737          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2738
2739defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2740                    [(set GPR32:$Rt,
2741                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2742                                                     uimm12s2:$offset)))]>;
2743defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2744                    [(set GPR32:$Rt,
2745                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2746                                                   uimm12s1:$offset)))]>;
2747// zextload -> i64
2748def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2749    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2750def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2751    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2752
2753// zextloadi1 -> zextloadi8
2754def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2755          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2756def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2757    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2758
2759// extload -> zextload
2760def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2761          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2762def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2763          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2764def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2765          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2766def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2767    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2768def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2769    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2770def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2771    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2772def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2773    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2774
2775// load sign-extended half-word
2776defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2777                     [(set GPR32:$Rt,
2778                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2779                                                      uimm12s2:$offset)))]>;
2780defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2781                     [(set GPR64:$Rt,
2782                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2783                                                      uimm12s2:$offset)))]>;
2784
2785// load sign-extended byte
2786defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2787                     [(set GPR32:$Rt,
2788                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2789                                                    uimm12s1:$offset)))]>;
2790defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2791                     [(set GPR64:$Rt,
2792                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2793                                                    uimm12s1:$offset)))]>;
2794
2795// load sign-extended word
2796defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2797                     [(set GPR64:$Rt,
2798                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2799                                                      uimm12s4:$offset)))]>;
2800
2801// load zero-extended word
2802def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2803      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2804
2805// Pre-fetch.
2806def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2807                        [(AArch64Prefetch imm:$Rt,
2808                                        (am_indexed64 GPR64sp:$Rn,
2809                                                      uimm12s8:$offset))]>;
2810
2811def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2812
2813//---
2814// (literal)
2815
2816def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2817  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2818    const DataLayout &DL = MF->getDataLayout();
2819    Align Align = G->getGlobal()->getPointerAlignment(DL);
2820    return Align >= 4 && G->getOffset() % 4 == 0;
2821  }
2822  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2823    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2824  return false;
2825}]>;
2826
2827def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2828  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2829def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2830  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2831def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2832  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2833def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2834  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2835def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2836  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2837
2838// load sign-extended word
2839def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2840  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2841
2842let AddedComplexity = 20 in {
2843def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2844        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2845}
2846
2847// prefetch
2848def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2849//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2850
2851//---
2852// (unscaled immediate)
2853defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2854                    [(set GPR64z:$Rt,
2855                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2856defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2857                    [(set GPR32z:$Rt,
2858                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2859defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2860                    [(set FPR8Op:$Rt,
2861                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2862defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2863                    [(set (f16 FPR16Op:$Rt),
2864                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2865defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2866                    [(set (f32 FPR32Op:$Rt),
2867                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2868defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2869                    [(set (f64 FPR64Op:$Rt),
2870                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2871defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2872                    [(set (f128 FPR128Op:$Rt),
2873                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2874
2875defm LDURHH
2876    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2877             [(set GPR32:$Rt,
2878                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2879defm LDURBB
2880    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2881             [(set GPR32:$Rt,
2882                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2883
2884// Match all load 64 bits width whose type is compatible with FPR64
2885let Predicates = [IsLE] in {
2886  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2887            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2888  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2889            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2890  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2891            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2892  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2893            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2894  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2895            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2896}
2897def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2898          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2899def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2900          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2901
2902// Match all load 128 bits width whose type is compatible with FPR128
2903let Predicates = [IsLE] in {
2904  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2905            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2906  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2907            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2908  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2909            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2910  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2911            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2912  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2913            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2914  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2915            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2916  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2917            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2918}
2919
2920//  anyext -> zext
2921def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2922          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2923def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2924          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2925def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2926          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2927def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2928    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2929def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2930    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2931def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2932    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2933def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2934    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2935// unscaled zext
2936def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2937          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2938def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2939          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2940def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2941          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2942def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2943    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2944def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2945    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2946def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2947    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2948def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2949    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2950
2951
2952//---
2953// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2954
2955// Define new assembler match classes as we want to only match these when
2956// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2957// associate a DiagnosticType either, as we want the diagnostic for the
2958// canonical form (the scaled operand) to take precedence.
2959class SImm9OffsetOperand<int Width> : AsmOperandClass {
2960  let Name = "SImm9OffsetFB" # Width;
2961  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2962  let RenderMethod = "addImmOperands";
2963}
2964
2965def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2966def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2967def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2968def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2969def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2970
2971def simm9_offset_fb8 : Operand<i64> {
2972  let ParserMatchClass = SImm9OffsetFB8Operand;
2973}
2974def simm9_offset_fb16 : Operand<i64> {
2975  let ParserMatchClass = SImm9OffsetFB16Operand;
2976}
2977def simm9_offset_fb32 : Operand<i64> {
2978  let ParserMatchClass = SImm9OffsetFB32Operand;
2979}
2980def simm9_offset_fb64 : Operand<i64> {
2981  let ParserMatchClass = SImm9OffsetFB64Operand;
2982}
2983def simm9_offset_fb128 : Operand<i64> {
2984  let ParserMatchClass = SImm9OffsetFB128Operand;
2985}
2986
2987def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2988                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2989def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2990                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2991def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2992                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2993def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2994                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2995def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2996                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2997def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2998                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2999def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3000               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3001
3002// zextload -> i64
3003def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3004  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3005def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3006  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3007
3008// load sign-extended half-word
3009defm LDURSHW
3010    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
3011               [(set GPR32:$Rt,
3012                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3013defm LDURSHX
3014    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
3015              [(set GPR64:$Rt,
3016                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3017
3018// load sign-extended byte
3019defm LDURSBW
3020    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3021                [(set GPR32:$Rt,
3022                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3023defm LDURSBX
3024    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3025                [(set GPR64:$Rt,
3026                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3027
3028// load sign-extended word
3029defm LDURSW
3030    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3031              [(set GPR64:$Rt,
3032                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3033
3034// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3035def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3036                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3037def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3038                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3039def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3040                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3041def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3042                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3043def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3044                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3045def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3046                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3047def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3048                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3049
3050// Pre-fetch.
3051defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3052                  [(AArch64Prefetch imm:$Rt,
3053                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3054
3055//---
3056// (unscaled immediate, unprivileged)
3057defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3058defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3059
3060defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3061defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3062
3063// load sign-extended half-word
3064defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3065defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3066
3067// load sign-extended byte
3068defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3069defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3070
3071// load sign-extended word
3072defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3073
3074//---
3075// (immediate pre-indexed)
3076def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3077def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3078def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3079def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3080def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3081def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3082def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3083
3084// load sign-extended half-word
3085def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3086def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3087
3088// load sign-extended byte
3089def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3090def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3091
3092// load zero-extended byte
3093def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3094def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3095
3096// load sign-extended word
3097def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3098
3099//---
3100// (immediate post-indexed)
3101def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3102def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3103def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3104def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3105def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3106def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3107def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3108
3109// load sign-extended half-word
3110def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3111def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3112
3113// load sign-extended byte
3114def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3115def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3116
3117// load zero-extended byte
3118def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3119def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3120
3121// load sign-extended word
3122def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3123
3124//===----------------------------------------------------------------------===//
3125// Store instructions.
3126//===----------------------------------------------------------------------===//
3127
3128// Pair (indexed, offset)
3129// FIXME: Use dedicated range-checked addressing mode operand here.
3130defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3131defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3132defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3133defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3134defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3135
3136// Pair (pre-indexed)
3137def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3138def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3139def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3140def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3141def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3142
3143// Pair (pre-indexed)
3144def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3145def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3146def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3147def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3148def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3149
3150// Pair (no allocate)
3151defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3152defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3153defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3154defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3155defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3156
3157def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3158          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3159
3160def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3161          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3162
3163
3164//---
3165// (Register offset)
3166
3167// Integer
3168defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3169defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3170defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3171defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3172
3173
3174// Floating-point
3175defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3176defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3177defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3178defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3179defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3180
3181let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3182  def : Pat<(store (f128 FPR128:$Rt),
3183                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3184                                        ro_Wextend128:$extend)),
3185            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3186  def : Pat<(store (f128 FPR128:$Rt),
3187                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3188                                        ro_Xextend128:$extend)),
3189            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3190}
3191
3192multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3193                                 Instruction STRW, Instruction STRX> {
3194
3195  def : Pat<(storeop GPR64:$Rt,
3196                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3197            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3198                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3199
3200  def : Pat<(storeop GPR64:$Rt,
3201                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3202            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3203                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3204}
3205
3206let AddedComplexity = 10 in {
3207  // truncstore i64
3208  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3209  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3210  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3211}
3212
3213multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3214                         Instruction STRW, Instruction STRX> {
3215  def : Pat<(store (VecTy FPR:$Rt),
3216                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3217            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3218
3219  def : Pat<(store (VecTy FPR:$Rt),
3220                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3221            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3222}
3223
3224let AddedComplexity = 10 in {
3225// Match all store 64 bits width whose type is compatible with FPR64
3226let Predicates = [IsLE] in {
3227  // We must use ST1 to store vectors in big-endian.
3228  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3229  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3230  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3231  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3232  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3233  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3234}
3235
3236defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3237defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3238
3239// Match all store 128 bits width whose type is compatible with FPR128
3240let Predicates = [IsLE, UseSTRQro] in {
3241  // We must use ST1 to store vectors in big-endian.
3242  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3243  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3244  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3245  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3246  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3247  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3248  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3249  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3250}
3251} // AddedComplexity = 10
3252
3253// Match stores from lane 0 to the appropriate subreg's store.
3254multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3255                              ValueType VecTy, ValueType STy,
3256                              SubRegIndex SubRegIdx,
3257                              Instruction STRW, Instruction STRX> {
3258
3259  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3260                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3261            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3262                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3263
3264  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3265                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3266            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3267                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3268}
3269
3270let AddedComplexity = 19 in {
3271  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3272  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3273  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3274  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3275  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3276  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3277}
3278
3279//---
3280// (unsigned immediate)
3281defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3282                   [(store GPR64z:$Rt,
3283                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3284defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3285                    [(store GPR32z:$Rt,
3286                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3287defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3288                    [(store FPR8Op:$Rt,
3289                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3290defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3291                    [(store (f16 FPR16Op:$Rt),
3292                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3293defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3294                    [(store (f32 FPR32Op:$Rt),
3295                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3296defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3297                    [(store (f64 FPR64Op:$Rt),
3298                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3299defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3300
3301defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3302                     [(truncstorei16 GPR32z:$Rt,
3303                                     (am_indexed16 GPR64sp:$Rn,
3304                                                   uimm12s2:$offset))]>;
3305defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3306                     [(truncstorei8 GPR32z:$Rt,
3307                                    (am_indexed8 GPR64sp:$Rn,
3308                                                 uimm12s1:$offset))]>;
3309
3310// bf16 store pattern
3311def : Pat<(store (bf16 FPR16Op:$Rt),
3312                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3313          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3314
3315let AddedComplexity = 10 in {
3316
3317// Match all store 64 bits width whose type is compatible with FPR64
3318def : Pat<(store (v1i64 FPR64:$Rt),
3319                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3320          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3321def : Pat<(store (v1f64 FPR64:$Rt),
3322                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3323          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3324
3325let Predicates = [IsLE] in {
3326  // We must use ST1 to store vectors in big-endian.
3327  def : Pat<(store (v2f32 FPR64:$Rt),
3328                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3329            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3330  def : Pat<(store (v8i8 FPR64:$Rt),
3331                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3332            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3333  def : Pat<(store (v4i16 FPR64:$Rt),
3334                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3335            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3336  def : Pat<(store (v2i32 FPR64:$Rt),
3337                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3338            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3339  def : Pat<(store (v4f16 FPR64:$Rt),
3340                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3341            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3342  def : Pat<(store (v4bf16 FPR64:$Rt),
3343                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3344            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3345}
3346
3347// Match all store 128 bits width whose type is compatible with FPR128
3348def : Pat<(store (f128  FPR128:$Rt),
3349                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3350          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3351
3352let Predicates = [IsLE] in {
3353  // We must use ST1 to store vectors in big-endian.
3354  def : Pat<(store (v4f32 FPR128:$Rt),
3355                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3356            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3357  def : Pat<(store (v2f64 FPR128:$Rt),
3358                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3359            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3360  def : Pat<(store (v16i8 FPR128:$Rt),
3361                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3362            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3363  def : Pat<(store (v8i16 FPR128:$Rt),
3364                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3365            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3366  def : Pat<(store (v4i32 FPR128:$Rt),
3367                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3368            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3369  def : Pat<(store (v2i64 FPR128:$Rt),
3370                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3371            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3372  def : Pat<(store (v8f16 FPR128:$Rt),
3373                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3374            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3375  def : Pat<(store (v8bf16 FPR128:$Rt),
3376                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3377            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3378}
3379
3380// truncstore i64
3381def : Pat<(truncstorei32 GPR64:$Rt,
3382                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3383  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3384def : Pat<(truncstorei16 GPR64:$Rt,
3385                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3386  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3387def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3388  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3389
3390} // AddedComplexity = 10
3391
3392// Match stores from lane 0 to the appropriate subreg's store.
3393multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3394                            ValueType VTy, ValueType STy,
3395                            SubRegIndex SubRegIdx, Operand IndexType,
3396                            Instruction STR> {
3397  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3398                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3399            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3400                 GPR64sp:$Rn, IndexType:$offset)>;
3401}
3402
3403let AddedComplexity = 19 in {
3404  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3405  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3406  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3407  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3408  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3409  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3410}
3411
3412//---
3413// (unscaled immediate)
3414defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3415                         [(store GPR64z:$Rt,
3416                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3417defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3418                         [(store GPR32z:$Rt,
3419                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3420defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3421                         [(store FPR8Op:$Rt,
3422                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3423defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3424                         [(store (f16 FPR16Op:$Rt),
3425                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3426defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3427                         [(store (f32 FPR32Op:$Rt),
3428                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3429defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3430                         [(store (f64 FPR64Op:$Rt),
3431                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3432defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3433                         [(store (f128 FPR128Op:$Rt),
3434                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3435defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3436                         [(truncstorei16 GPR32z:$Rt,
3437                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3438defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3439                         [(truncstorei8 GPR32z:$Rt,
3440                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3441
3442// Armv8.4 Weaker Release Consistency enhancements
3443//         LDAPR & STLR with Immediate Offset instructions
3444let Predicates = [HasRCPC_IMMO] in {
3445defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3446defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3447defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3448defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3449defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3450defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3451defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3452defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3453defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3454defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3455defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3456defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3457defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3458}
3459
3460// Match all store 64 bits width whose type is compatible with FPR64
3461def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3462          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3463def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3464          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3465
3466let AddedComplexity = 10 in {
3467
3468let Predicates = [IsLE] in {
3469  // We must use ST1 to store vectors in big-endian.
3470  def : Pat<(store (v2f32 FPR64:$Rt),
3471                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3472            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3473  def : Pat<(store (v8i8 FPR64:$Rt),
3474                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3475            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3476  def : Pat<(store (v4i16 FPR64:$Rt),
3477                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3478            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3479  def : Pat<(store (v2i32 FPR64:$Rt),
3480                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3481            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3482  def : Pat<(store (v4f16 FPR64:$Rt),
3483                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3484            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3485  def : Pat<(store (v4bf16 FPR64:$Rt),
3486                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3487            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3488}
3489
3490// Match all store 128 bits width whose type is compatible with FPR128
3491def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3492          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3493
3494let Predicates = [IsLE] in {
3495  // We must use ST1 to store vectors in big-endian.
3496  def : Pat<(store (v4f32 FPR128:$Rt),
3497                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3498            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3499  def : Pat<(store (v2f64 FPR128:$Rt),
3500                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3501            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3502  def : Pat<(store (v16i8 FPR128:$Rt),
3503                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3504            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3505  def : Pat<(store (v8i16 FPR128:$Rt),
3506                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3507            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3508  def : Pat<(store (v4i32 FPR128:$Rt),
3509                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3510            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3511  def : Pat<(store (v2i64 FPR128:$Rt),
3512                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3513            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3514  def : Pat<(store (v2f64 FPR128:$Rt),
3515                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3516            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3517  def : Pat<(store (v8f16 FPR128:$Rt),
3518                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3519            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3520  def : Pat<(store (v8bf16 FPR128:$Rt),
3521                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3522            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3523}
3524
3525} // AddedComplexity = 10
3526
3527// unscaled i64 truncating stores
3528def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3529  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3530def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3531  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3532def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3533  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3534
3535// Match stores from lane 0 to the appropriate subreg's store.
3536multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3537                             ValueType VTy, ValueType STy,
3538                             SubRegIndex SubRegIdx, Instruction STR> {
3539  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3540}
3541
3542let AddedComplexity = 19 in {
3543  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3544  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3545  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3546  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3547  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3548  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3549}
3550
3551//---
3552// STR mnemonics fall back to STUR for negative or unaligned offsets.
3553def : InstAlias<"str $Rt, [$Rn, $offset]",
3554                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3555def : InstAlias<"str $Rt, [$Rn, $offset]",
3556                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3557def : InstAlias<"str $Rt, [$Rn, $offset]",
3558                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3559def : InstAlias<"str $Rt, [$Rn, $offset]",
3560                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3561def : InstAlias<"str $Rt, [$Rn, $offset]",
3562                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3563def : InstAlias<"str $Rt, [$Rn, $offset]",
3564                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3565def : InstAlias<"str $Rt, [$Rn, $offset]",
3566                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3567
3568def : InstAlias<"strb $Rt, [$Rn, $offset]",
3569                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3570def : InstAlias<"strh $Rt, [$Rn, $offset]",
3571                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3572
3573//---
3574// (unscaled immediate, unprivileged)
3575defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3576defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3577
3578defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3579defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3580
3581//---
3582// (immediate pre-indexed)
3583def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3584def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3585def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3586def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3587def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3588def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3589def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3590
3591def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3592def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3593
3594// truncstore i64
3595def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3596  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3597           simm9:$off)>;
3598def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3599  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3600            simm9:$off)>;
3601def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3602  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3603            simm9:$off)>;
3604
3605def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3606          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3607def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3608          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3609def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3610          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3611def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3612          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3613def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3614          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3615def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3616          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3617def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3618          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3619
3620def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3621          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3622def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3623          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3624def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3625          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3626def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3627          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3628def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3629          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3630def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3631          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3632def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3633          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3634
3635//---
3636// (immediate post-indexed)
3637def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3638def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3639def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3640def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3641def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3642def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3643def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3644
3645def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3646def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3647
3648// truncstore i64
3649def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3650  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3651            simm9:$off)>;
3652def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3653  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3654             simm9:$off)>;
3655def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3656  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3657             simm9:$off)>;
3658
3659def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3660          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3661
3662def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3663          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3664def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3665          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3666def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3667          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3668def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3669          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3670def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3671          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3672def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3673          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3674def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3675          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3676def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3677          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3678
3679def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3680          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3681def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3682          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3683def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3684          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3685def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3686          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3687def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3688          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3689def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3690          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3691def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3692          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3693def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3694          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3695
3696//===----------------------------------------------------------------------===//
3697// Load/store exclusive instructions.
3698//===----------------------------------------------------------------------===//
3699
3700def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3701def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3702def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3703def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3704
3705def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3706def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3707def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3708def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3709
3710def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3711def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3712def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3713def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3714
3715def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3716def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3717def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3718def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3719
3720def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3721def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3722def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3723def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3724
3725def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3726def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3727def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3728def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3729
3730def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3731def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3732
3733def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3734def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3735
3736def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3737def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3738
3739def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3740def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3741
3742let Predicates = [HasLOR] in {
3743  // v8.1a "Limited Order Region" extension load-acquire instructions
3744  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3745  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3746  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3747  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3748
3749  // v8.1a "Limited Order Region" extension store-release instructions
3750  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3751  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3752  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3753  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3754}
3755
3756//===----------------------------------------------------------------------===//
3757// Scaled floating point to integer conversion instructions.
3758//===----------------------------------------------------------------------===//
3759
3760defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3761defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3762defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3763defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3764defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3765defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3766defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3767defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3768defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3769defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3770defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3771defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3772
3773// AArch64's FCVT instructions saturate when out of range.
3774multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3775  let Predicates = [HasFullFP16] in {
3776  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3777            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3778  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3779            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3780  }
3781  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3782            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3783  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3784            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3785  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3786            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3787  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3788            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3789
3790  let Predicates = [HasFullFP16] in {
3791  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3792            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3793  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3794            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3795  }
3796  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3797            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3798  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3799            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3800  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3801            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3802  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3803            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3804}
3805
3806defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3807defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3808
3809multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3810  let Predicates = [HasFullFP16] in {
3811  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3812  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3813  }
3814  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3815  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3816  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3817  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3818
3819  let Predicates = [HasFullFP16] in {
3820  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3821            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3822  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3823            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3824  }
3825  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3826            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3827  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3828            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3829  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3830            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3831  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3832            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3833}
3834
3835defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3836defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3837
3838multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3839  def : Pat<(i32 (to_int (round f32:$Rn))),
3840            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3841  def : Pat<(i64 (to_int (round f32:$Rn))),
3842            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3843  def : Pat<(i32 (to_int (round f64:$Rn))),
3844            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3845  def : Pat<(i64 (to_int (round f64:$Rn))),
3846            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3847
3848  // These instructions saturate like fp_to_[su]int_sat.
3849  let Predicates = [HasFullFP16] in {
3850  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3851            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3852  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3853            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3854  }
3855  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3856            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3857  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3858            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3859  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3860            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3861  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3862            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3863}
3864
3865defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3866defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3867defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3868defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3869defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
3870defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
3871defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
3872defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
3873
3874
3875
3876let Predicates = [HasFullFP16] in {
3877  def : Pat<(i32 (lround f16:$Rn)),
3878            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3879  def : Pat<(i64 (lround f16:$Rn)),
3880            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3881  def : Pat<(i64 (llround f16:$Rn)),
3882            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3883}
3884def : Pat<(i32 (lround f32:$Rn)),
3885          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3886def : Pat<(i32 (lround f64:$Rn)),
3887          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3888def : Pat<(i64 (lround f32:$Rn)),
3889          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3890def : Pat<(i64 (lround f64:$Rn)),
3891          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3892def : Pat<(i64 (llround f32:$Rn)),
3893          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3894def : Pat<(i64 (llround f64:$Rn)),
3895          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3896
3897//===----------------------------------------------------------------------===//
3898// Scaled integer to floating point conversion instructions.
3899//===----------------------------------------------------------------------===//
3900
3901defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3902defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3903
3904//===----------------------------------------------------------------------===//
3905// Unscaled integer to floating point conversion instruction.
3906//===----------------------------------------------------------------------===//
3907
3908defm FMOV : UnscaledConversion<"fmov">;
3909
3910// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3911let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3912def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3913    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3914def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3915    Sched<[WriteF]>;
3916def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3917    Sched<[WriteF]>;
3918}
3919// Similarly add aliases
3920def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3921    Requires<[HasFullFP16]>;
3922def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3923def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3924
3925//===----------------------------------------------------------------------===//
3926// Floating point conversion instruction.
3927//===----------------------------------------------------------------------===//
3928
3929defm FCVT : FPConversion<"fcvt">;
3930
3931//===----------------------------------------------------------------------===//
3932// Floating point single operand instructions.
3933//===----------------------------------------------------------------------===//
3934
3935defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3936defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3937defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3938defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3939defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3940defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3941defm FRINTN : SingleOperandFPData<0b1000, "frintn", froundeven>;
3942defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3943
3944defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3945defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3946
3947let SchedRW = [WriteFDiv] in {
3948defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3949}
3950
3951let Predicates = [HasFRInt3264] in {
3952  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
3953  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
3954  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
3955  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
3956} // HasFRInt3264
3957
3958let Predicates = [HasFullFP16] in {
3959  def : Pat<(i32 (lrint f16:$Rn)),
3960            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3961  def : Pat<(i64 (lrint f16:$Rn)),
3962            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3963  def : Pat<(i64 (llrint f16:$Rn)),
3964            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3965}
3966def : Pat<(i32 (lrint f32:$Rn)),
3967          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3968def : Pat<(i32 (lrint f64:$Rn)),
3969          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3970def : Pat<(i64 (lrint f32:$Rn)),
3971          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3972def : Pat<(i64 (lrint f64:$Rn)),
3973          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3974def : Pat<(i64 (llrint f32:$Rn)),
3975          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3976def : Pat<(i64 (llrint f64:$Rn)),
3977          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3978
3979//===----------------------------------------------------------------------===//
3980// Floating point two operand instructions.
3981//===----------------------------------------------------------------------===//
3982
3983defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
3984let SchedRW = [WriteFDiv] in {
3985defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3986}
3987defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3988defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3989defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3990defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
3991let SchedRW = [WriteFMul] in {
3992defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
3993defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3994}
3995defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
3996
3997def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3998          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3999def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4000          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
4001def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4002          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
4003def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4004          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
4005
4006//===----------------------------------------------------------------------===//
4007// Floating point three operand instructions.
4008//===----------------------------------------------------------------------===//
4009
4010defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
4011defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
4012     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
4013defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
4014     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
4015defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4016     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4017
4018// The following def pats catch the case where the LHS of an FMA is negated.
4019// The TriOpFrag above catches the case where the middle operand is negated.
4020
4021// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4022// the NEON variant.
4023
4024// Here we handle first -(a + b*c) for FNMADD:
4025
4026let Predicates = [HasNEON, HasFullFP16] in
4027def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4028          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4029
4030def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4031          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4032
4033def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4034          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4035
4036// Now it's time for "(-a) + (-b)*c"
4037
4038let Predicates = [HasNEON, HasFullFP16] in
4039def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4040          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4041
4042def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4043          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4044
4045def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4046          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4047
4048//===----------------------------------------------------------------------===//
4049// Floating point comparison instructions.
4050//===----------------------------------------------------------------------===//
4051
4052defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4053defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4054
4055//===----------------------------------------------------------------------===//
4056// Floating point conditional comparison instructions.
4057//===----------------------------------------------------------------------===//
4058
4059defm FCCMPE : FPCondComparison<1, "fccmpe">;
4060defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4061
4062//===----------------------------------------------------------------------===//
4063// Floating point conditional select instruction.
4064//===----------------------------------------------------------------------===//
4065
4066defm FCSEL : FPCondSelect<"fcsel">;
4067
4068// CSEL instructions providing f128 types need to be handled by a
4069// pseudo-instruction since the eventual code will need to introduce basic
4070// blocks and control flow.
4071def F128CSEL : Pseudo<(outs FPR128:$Rd),
4072                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4073                      [(set (f128 FPR128:$Rd),
4074                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4075                                       (i32 imm:$cond), NZCV))]> {
4076  let Uses = [NZCV];
4077  let usesCustomInserter = 1;
4078  let hasNoSchedulingInfo = 1;
4079}
4080
4081//===----------------------------------------------------------------------===//
4082// Instructions used for emitting unwind opcodes on ARM64 Windows.
4083//===----------------------------------------------------------------------===//
4084let isPseudo = 1 in {
4085  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4086  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4087  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4088  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4089  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4090  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4091  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4092  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4093  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4094  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4095  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4096  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4097  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4098  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4099  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4100  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4101  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4102}
4103
4104// Pseudo instructions for Windows EH
4105//===----------------------------------------------------------------------===//
4106let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4107    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4108   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4109   let usesCustomInserter = 1 in
4110     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4111                    Sched<[]>;
4112}
4113
4114// Pseudo instructions for homogeneous prolog/epilog
4115let isPseudo = 1 in {
4116  // Save CSRs in order, {FPOffset}
4117  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4118  // Restore CSRs in order
4119  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4120}
4121
4122//===----------------------------------------------------------------------===//
4123// Floating point immediate move.
4124//===----------------------------------------------------------------------===//
4125
4126let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4127defm FMOV : FPMoveImmediate<"fmov">;
4128}
4129
4130//===----------------------------------------------------------------------===//
4131// Advanced SIMD two vector instructions.
4132//===----------------------------------------------------------------------===//
4133
4134defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4135                                          AArch64uabd>;
4136// Match UABDL in log2-shuffle patterns.
4137def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4138                           (zext (v8i8 V64:$opB))))),
4139          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4140def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4141               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4142                                (zext (v8i8 V64:$opB))),
4143                           (AArch64vashr v8i16:$src, (i32 15))))),
4144          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4145def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
4146                           (zext (extract_high_v16i8 V128:$opB))))),
4147          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4148def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4149               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
4150                                (zext (extract_high_v16i8 V128:$opB))),
4151                           (AArch64vashr v8i16:$src, (i32 15))))),
4152          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4153def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4154                           (zext (v4i16 V64:$opB))))),
4155          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4156def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
4157                           (zext (extract_high_v8i16 V128:$opB))))),
4158          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4159def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4160                           (zext (v2i32 V64:$opB))))),
4161          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4162def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
4163                           (zext (extract_high_v4i32 V128:$opB))))),
4164          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4165
4166defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4167defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4168defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4169defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4170defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4171defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4172defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4173defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4174defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4175defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
4176
4177defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4178defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4179defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4180defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4181defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4182defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4183defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4184defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4185def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4186          (FCVTLv4i16 V64:$Rn)>;
4187def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4188                                                              (i64 4)))),
4189          (FCVTLv8i16 V128:$Rn)>;
4190def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4191
4192def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4193
4194defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4195defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4196defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4197defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4198defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4199def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4200          (FCVTNv4i16 V128:$Rn)>;
4201def : Pat<(concat_vectors V64:$Rd,
4202                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4203          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4204def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4205def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4206def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
4207          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4208defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4209defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4210defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4211                                        int_aarch64_neon_fcvtxn>;
4212defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
4213defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
4214
4215// AArch64's FCVT instructions saturate when out of range.
4216multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4217  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4218            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4219  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4220            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4221  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4222            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4223  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4224            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4225  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4226            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4227}
4228defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4229defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4230
4231def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4232def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4233def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4234def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4235def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4236
4237def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4238def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4239def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4240def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4241def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4242
4243defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
4244defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4245defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
4246defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
4247defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
4248defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", froundeven>;
4249defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
4250defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
4251defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
4252
4253let Predicates = [HasFRInt3264] in {
4254  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4255  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4256  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4257  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4258} // HasFRInt3264
4259
4260defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4261defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
4262defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4263                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4264defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4265// Aliases for MVN -> NOT.
4266def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4267                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4268def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4269                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4270
4271def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4272def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4273def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4274def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4275def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4276def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4277
4278defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4279defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4280defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4281defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4282defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4283       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
4284defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
4285defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
4286defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4287defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4288defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4289defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4290defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4291defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4292defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4293       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4294defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4295defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
4296defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4297defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4298defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4299defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4300defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4301
4302def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4303def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4304def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4305def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4306def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4307def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4308def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4309def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4310def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4311def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4312
4313// Patterns for vector long shift (by element width). These need to match all
4314// three of zext, sext and anyext so it's easier to pull the patterns out of the
4315// definition.
4316multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4317  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4318            (SHLLv8i8 V64:$Rn)>;
4319  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
4320            (SHLLv16i8 V128:$Rn)>;
4321  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4322            (SHLLv4i16 V64:$Rn)>;
4323  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
4324            (SHLLv8i16 V128:$Rn)>;
4325  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4326            (SHLLv2i32 V64:$Rn)>;
4327  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
4328            (SHLLv4i32 V128:$Rn)>;
4329}
4330
4331defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4332defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4333defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4334
4335// Constant vector values, used in the S/UQXTN patterns below.
4336def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4337def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4338def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4339def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4340def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4341def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4342
4343// trunc(umin(X, 255)) -> UQXTRN v8i8
4344def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4345          (UQXTNv8i8 V128:$Vn)>;
4346// trunc(umin(X, 65535)) -> UQXTRN v4i16
4347def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4348          (UQXTNv4i16 V128:$Vn)>;
4349// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4350//  with reversed min/max
4351def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4352                             (v8i16 VImm7F)))),
4353          (SQXTNv8i8 V128:$Vn)>;
4354def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4355                             (v8i16 VImm80)))),
4356          (SQXTNv8i8 V128:$Vn)>;
4357// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4358//  with reversed min/max
4359def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4360                              (v4i32 VImm7FFF)))),
4361          (SQXTNv4i16 V128:$Vn)>;
4362def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4363                              (v4i32 VImm8000)))),
4364          (SQXTNv4i16 V128:$Vn)>;
4365
4366//===----------------------------------------------------------------------===//
4367// Advanced SIMD three vector instructions.
4368//===----------------------------------------------------------------------===//
4369
4370defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4371defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4372defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4373defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4374defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4375defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4376defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4377defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4378foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4379def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4380}
4381defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4382let Predicates = [HasNEON] in {
4383foreach VT = [ v2f32, v4f32, v2f64 ] in
4384def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4385}
4386let Predicates = [HasNEON, HasFullFP16] in {
4387foreach VT = [ v4f16, v8f16 ] in
4388def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4389}
4390defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4391defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4392defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4393defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4394defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4395defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4396defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4397defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4398defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4399defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4400defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4401defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4402defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4403defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4404defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4405defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4406
4407// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4408// instruction expects the addend first, while the fma intrinsic puts it last.
4409defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4410            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4411defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4412            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4413
4414defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4415defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4416defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4417defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4418defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4419
4420// MLA and MLS are generated in MachineCombine
4421defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4422defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4423
4424defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4425defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4426defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4427      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4428defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4429defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
4430defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4431defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4432defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4433defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4434defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4435defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4436defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4437defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4438defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4439defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4440defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4441defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4442defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4443defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4444defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4445defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4446      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4447defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4448defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
4449defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4450defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4451defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4452defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4453defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4454defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4455defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4456defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4457defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4458defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4459defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4460defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4461defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4462                                                  int_aarch64_neon_sqadd>;
4463defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4464                                                    int_aarch64_neon_sqsub>;
4465
4466// Extra saturate patterns, other than the intrinsics matches above
4467defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4468defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4469defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4470defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4471
4472defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4473defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4474                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4475defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4476defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4477                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4478defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4479
4480// Pseudo bitwise select pattern BSP.
4481// It is expanded into BSL/BIT/BIF after register allocation.
4482defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4483                                                      (and (vnot node:$LHS), node:$RHS))>>;
4484defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4485defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4486defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4487
4488def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4489          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4490def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4491          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4492def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4493          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4494def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4495          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4496
4497def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4498          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4499def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4500          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4501def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4502          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4503def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4504          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4505
4506def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4507                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4508def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4509                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4510def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4511                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4512def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4513                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4514
4515def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4516                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4517def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4518                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4519def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4520                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4521def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4522                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4523
4524def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4525                "|cmls.8b\t$dst, $src1, $src2}",
4526                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4527def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4528                "|cmls.16b\t$dst, $src1, $src2}",
4529                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4530def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4531                "|cmls.4h\t$dst, $src1, $src2}",
4532                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4533def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4534                "|cmls.8h\t$dst, $src1, $src2}",
4535                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4536def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4537                "|cmls.2s\t$dst, $src1, $src2}",
4538                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4539def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4540                "|cmls.4s\t$dst, $src1, $src2}",
4541                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4542def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4543                "|cmls.2d\t$dst, $src1, $src2}",
4544                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4545
4546def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4547                "|cmlo.8b\t$dst, $src1, $src2}",
4548                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4549def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4550                "|cmlo.16b\t$dst, $src1, $src2}",
4551                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4552def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4553                "|cmlo.4h\t$dst, $src1, $src2}",
4554                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4555def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4556                "|cmlo.8h\t$dst, $src1, $src2}",
4557                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4558def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4559                "|cmlo.2s\t$dst, $src1, $src2}",
4560                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4561def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4562                "|cmlo.4s\t$dst, $src1, $src2}",
4563                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4564def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4565                "|cmlo.2d\t$dst, $src1, $src2}",
4566                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4567
4568def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4569                "|cmle.8b\t$dst, $src1, $src2}",
4570                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4571def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4572                "|cmle.16b\t$dst, $src1, $src2}",
4573                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4574def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4575                "|cmle.4h\t$dst, $src1, $src2}",
4576                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4577def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4578                "|cmle.8h\t$dst, $src1, $src2}",
4579                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4580def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4581                "|cmle.2s\t$dst, $src1, $src2}",
4582                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4583def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4584                "|cmle.4s\t$dst, $src1, $src2}",
4585                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4586def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4587                "|cmle.2d\t$dst, $src1, $src2}",
4588                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4589
4590def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4591                "|cmlt.8b\t$dst, $src1, $src2}",
4592                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4593def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4594                "|cmlt.16b\t$dst, $src1, $src2}",
4595                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4596def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4597                "|cmlt.4h\t$dst, $src1, $src2}",
4598                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4599def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4600                "|cmlt.8h\t$dst, $src1, $src2}",
4601                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4602def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4603                "|cmlt.2s\t$dst, $src1, $src2}",
4604                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4605def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4606                "|cmlt.4s\t$dst, $src1, $src2}",
4607                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4608def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4609                "|cmlt.2d\t$dst, $src1, $src2}",
4610                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4611
4612let Predicates = [HasNEON, HasFullFP16] in {
4613def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4614                "|fcmle.4h\t$dst, $src1, $src2}",
4615                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4616def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4617                "|fcmle.8h\t$dst, $src1, $src2}",
4618                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4619}
4620def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4621                "|fcmle.2s\t$dst, $src1, $src2}",
4622                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4623def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4624                "|fcmle.4s\t$dst, $src1, $src2}",
4625                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4626def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4627                "|fcmle.2d\t$dst, $src1, $src2}",
4628                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4629
4630let Predicates = [HasNEON, HasFullFP16] in {
4631def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4632                "|fcmlt.4h\t$dst, $src1, $src2}",
4633                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4634def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4635                "|fcmlt.8h\t$dst, $src1, $src2}",
4636                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4637}
4638def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4639                "|fcmlt.2s\t$dst, $src1, $src2}",
4640                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4641def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4642                "|fcmlt.4s\t$dst, $src1, $src2}",
4643                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4644def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4645                "|fcmlt.2d\t$dst, $src1, $src2}",
4646                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4647
4648let Predicates = [HasNEON, HasFullFP16] in {
4649def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4650                "|facle.4h\t$dst, $src1, $src2}",
4651                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4652def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4653                "|facle.8h\t$dst, $src1, $src2}",
4654                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4655}
4656def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4657                "|facle.2s\t$dst, $src1, $src2}",
4658                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4659def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4660                "|facle.4s\t$dst, $src1, $src2}",
4661                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4662def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4663                "|facle.2d\t$dst, $src1, $src2}",
4664                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4665
4666let Predicates = [HasNEON, HasFullFP16] in {
4667def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4668                "|faclt.4h\t$dst, $src1, $src2}",
4669                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4670def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4671                "|faclt.8h\t$dst, $src1, $src2}",
4672                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4673}
4674def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4675                "|faclt.2s\t$dst, $src1, $src2}",
4676                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4677def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4678                "|faclt.4s\t$dst, $src1, $src2}",
4679                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4680def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4681                "|faclt.2d\t$dst, $src1, $src2}",
4682                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4683
4684//===----------------------------------------------------------------------===//
4685// Advanced SIMD three scalar instructions.
4686//===----------------------------------------------------------------------===//
4687
4688defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4689defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4690defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4691defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4692defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4693defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4694defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4695defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4696def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4697          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4698let Predicates = [HasFullFP16] in {
4699def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4700}
4701def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4702def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4703defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4704                                     int_aarch64_neon_facge>;
4705defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4706                                     int_aarch64_neon_facgt>;
4707defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4708defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4709defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4710defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorStreamingSVE>;
4711defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorStreamingSVE>;
4712defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorStreamingSVE>;
4713defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4714defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4715defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4716defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4717defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4718defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4719defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4720defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4721defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4722defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4723defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4724defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4725defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4726defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4727defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4728let Predicates = [HasRDM] in {
4729  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4730  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4731  def : Pat<(i32 (int_aarch64_neon_sqadd
4732                   (i32 FPR32:$Rd),
4733                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4734                                                   (i32 FPR32:$Rm))))),
4735            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4736  def : Pat<(i32 (int_aarch64_neon_sqsub
4737                   (i32 FPR32:$Rd),
4738                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4739                                                   (i32 FPR32:$Rm))))),
4740            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4741}
4742
4743def : InstAlias<"cmls $dst, $src1, $src2",
4744                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4745def : InstAlias<"cmle $dst, $src1, $src2",
4746                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4747def : InstAlias<"cmlo $dst, $src1, $src2",
4748                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4749def : InstAlias<"cmlt $dst, $src1, $src2",
4750                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4751def : InstAlias<"fcmle $dst, $src1, $src2",
4752                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4753def : InstAlias<"fcmle $dst, $src1, $src2",
4754                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4755def : InstAlias<"fcmlt $dst, $src1, $src2",
4756                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4757def : InstAlias<"fcmlt $dst, $src1, $src2",
4758                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4759def : InstAlias<"facle $dst, $src1, $src2",
4760                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4761def : InstAlias<"facle $dst, $src1, $src2",
4762                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4763def : InstAlias<"faclt $dst, $src1, $src2",
4764                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4765def : InstAlias<"faclt $dst, $src1, $src2",
4766                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4767
4768//===----------------------------------------------------------------------===//
4769// Advanced SIMD three scalar instructions (mixed operands).
4770//===----------------------------------------------------------------------===//
4771defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4772                                       int_aarch64_neon_sqdmulls_scalar>;
4773defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4774defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4775
4776def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4777                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4778                                                        (i32 FPR32:$Rm))))),
4779          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4780def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4781                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4782                                                        (i32 FPR32:$Rm))))),
4783          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4784
4785//===----------------------------------------------------------------------===//
4786// Advanced SIMD two scalar instructions.
4787//===----------------------------------------------------------------------===//
4788
4789defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4790defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4791defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4792defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4793defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4794defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4795defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4796defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4797defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4798defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4799defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4800defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4801defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4802defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4803defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4804defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4805defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4806defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4807defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4808def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4809defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4810defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4811defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorStreamingSVE>;
4812defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorStreamingSVE>;
4813defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorStreamingSVE>;
4814defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4815                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4816defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4817defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4818defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4819defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4820defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4821defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4822                                     int_aarch64_neon_suqadd>;
4823defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4824defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4825defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4826                                    int_aarch64_neon_usqadd>;
4827
4828def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4829          (FCVTASv1i64 FPR64:$Rn)>;
4830def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4831          (FCVTAUv1i64 FPR64:$Rn)>;
4832def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4833          (FCVTMSv1i64 FPR64:$Rn)>;
4834def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4835          (FCVTMUv1i64 FPR64:$Rn)>;
4836def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4837          (FCVTNSv1i64 FPR64:$Rn)>;
4838def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4839          (FCVTNUv1i64 FPR64:$Rn)>;
4840def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4841          (FCVTPSv1i64 FPR64:$Rn)>;
4842def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4843          (FCVTPUv1i64 FPR64:$Rn)>;
4844def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
4845          (FCVTZSv1i64 FPR64:$Rn)>;
4846def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
4847          (FCVTZUv1i64 FPR64:$Rn)>;
4848
4849def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4850          (FRECPEv1f16 FPR16:$Rn)>;
4851def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4852          (FRECPEv1i32 FPR32:$Rn)>;
4853def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4854          (FRECPEv1i64 FPR64:$Rn)>;
4855def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4856          (FRECPEv1i64 FPR64:$Rn)>;
4857
4858def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4859          (FRECPEv1i32 FPR32:$Rn)>;
4860def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4861          (FRECPEv2f32 V64:$Rn)>;
4862def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4863          (FRECPEv4f32 FPR128:$Rn)>;
4864def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4865          (FRECPEv1i64 FPR64:$Rn)>;
4866def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4867          (FRECPEv1i64 FPR64:$Rn)>;
4868def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4869          (FRECPEv2f64 FPR128:$Rn)>;
4870
4871def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4872          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4873def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4874          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4875def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4876          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4877def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4878          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4879def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4880          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4881
4882def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4883          (FRECPXv1f16 FPR16:$Rn)>;
4884def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4885          (FRECPXv1i32 FPR32:$Rn)>;
4886def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4887          (FRECPXv1i64 FPR64:$Rn)>;
4888
4889def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4890          (FRSQRTEv1f16 FPR16:$Rn)>;
4891def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4892          (FRSQRTEv1i32 FPR32:$Rn)>;
4893def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4894          (FRSQRTEv1i64 FPR64:$Rn)>;
4895def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4896          (FRSQRTEv1i64 FPR64:$Rn)>;
4897
4898def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4899          (FRSQRTEv1i32 FPR32:$Rn)>;
4900def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4901          (FRSQRTEv2f32 V64:$Rn)>;
4902def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4903          (FRSQRTEv4f32 FPR128:$Rn)>;
4904def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4905          (FRSQRTEv1i64 FPR64:$Rn)>;
4906def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4907          (FRSQRTEv1i64 FPR64:$Rn)>;
4908def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4909          (FRSQRTEv2f64 FPR128:$Rn)>;
4910
4911def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4912          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4913def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4914          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4915def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4916          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4917def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4918          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4919def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4920          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4921
4922// Some float -> int -> float conversion patterns for which we want to keep the
4923// int values in FP registers using the corresponding NEON instructions to
4924// avoid more costly int <-> fp register transfers.
4925let Predicates = [HasNEON] in {
4926def : Pat<(f64 (sint_to_fp (i64 (fp_to_sint f64:$Rn)))),
4927          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
4928def : Pat<(f32 (sint_to_fp (i32 (fp_to_sint f32:$Rn)))),
4929          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
4930def : Pat<(f64 (uint_to_fp (i64 (fp_to_uint f64:$Rn)))),
4931          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
4932def : Pat<(f32 (uint_to_fp (i32 (fp_to_uint f32:$Rn)))),
4933          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
4934
4935let Predicates = [HasFullFP16] in {
4936def : Pat<(f16 (sint_to_fp (i32 (fp_to_sint f16:$Rn)))),
4937          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
4938def : Pat<(f16 (uint_to_fp (i32 (fp_to_uint f16:$Rn)))),
4939          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
4940}
4941}
4942
4943// If an integer is about to be converted to a floating point value,
4944// just load it on the floating point unit.
4945// Here are the patterns for 8 and 16-bits to float.
4946// 8-bits -> float.
4947multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4948                             SDPatternOperator loadop, Instruction UCVTF,
4949                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4950                             SubRegIndex sub> {
4951  def : Pat<(DstTy (uint_to_fp (SrcTy
4952                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4953                                      ro.Wext:$extend))))),
4954           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4955                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4956                                 sub))>;
4957
4958  def : Pat<(DstTy (uint_to_fp (SrcTy
4959                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4960                                      ro.Wext:$extend))))),
4961           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4962                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4963                                 sub))>;
4964}
4965
4966defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4967                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4968def : Pat <(f32 (uint_to_fp (i32
4969               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4970           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4971                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4972def : Pat <(f32 (uint_to_fp (i32
4973                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4974           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4975                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4976// 16-bits -> float.
4977defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4978                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4979def : Pat <(f32 (uint_to_fp (i32
4980                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4981           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4982                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4983def : Pat <(f32 (uint_to_fp (i32
4984                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4985           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4986                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4987// 32-bits are handled in target specific dag combine:
4988// performIntToFpCombine.
4989// 64-bits integer to 32-bits floating point, not possible with
4990// UCVTF on floating point registers (both source and destination
4991// must have the same size).
4992
4993// Here are the patterns for 8, 16, 32, and 64-bits to double.
4994// 8-bits -> double.
4995defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4996                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4997def : Pat <(f64 (uint_to_fp (i32
4998                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4999           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5000                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5001def : Pat <(f64 (uint_to_fp (i32
5002                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5003           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5004                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5005// 16-bits -> double.
5006defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
5007                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
5008def : Pat <(f64 (uint_to_fp (i32
5009                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5010           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5011                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5012def : Pat <(f64 (uint_to_fp (i32
5013                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5014           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5015                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5016// 32-bits -> double.
5017defm : UIntToFPROLoadPat<f64, i32, load,
5018                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5019def : Pat <(f64 (uint_to_fp (i32
5020                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5021           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5022                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5023def : Pat <(f64 (uint_to_fp (i32
5024                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5025           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5026                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5027// 64-bits -> double are handled in target specific dag combine:
5028// performIntToFpCombine.
5029
5030//===----------------------------------------------------------------------===//
5031// Advanced SIMD three different-sized vector instructions.
5032//===----------------------------------------------------------------------===//
5033
5034defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5035defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5036defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5037defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5038defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5039defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5040                                             AArch64sabd>;
5041defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5042                                          AArch64sabd>;
5043defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5044            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5045defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5046                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5047defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5048    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5049defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5050    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5051defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
5052defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5053                                               int_aarch64_neon_sqadd>;
5054defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5055                                               int_aarch64_neon_sqsub>;
5056defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5057                                     int_aarch64_neon_sqdmull>;
5058defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5059                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5060defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5061                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5062defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5063                                              AArch64uabd>;
5064defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5065                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5066defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5067                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5068defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5069    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5070defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5071    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5072defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
5073defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5074                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5075defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5076                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5077
5078// Additional patterns for [SU]ML[AS]L
5079multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5080  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5081  def : Pat<(v4i16 (opnode
5082                    V64:$Ra,
5083                    (v4i16 (extract_subvector
5084                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5085                            (i64 0))))),
5086             (EXTRACT_SUBREG (v8i16 (INST8B
5087                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5088                                     V64:$Rn, V64:$Rm)), dsub)>;
5089  def : Pat<(v2i32 (opnode
5090                    V64:$Ra,
5091                    (v2i32 (extract_subvector
5092                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5093                            (i64 0))))),
5094             (EXTRACT_SUBREG (v4i32 (INST4H
5095                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5096                                     V64:$Rn, V64:$Rm)), dsub)>;
5097  def : Pat<(v1i64 (opnode
5098                    V64:$Ra,
5099                    (v1i64 (extract_subvector
5100                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5101                            (i64 0))))),
5102             (EXTRACT_SUBREG (v2i64 (INST2S
5103                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5104                                     V64:$Rn, V64:$Rm)), dsub)>;
5105}
5106
5107defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull,
5108     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5109defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull,
5110     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5111defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull,
5112     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5113defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull,
5114     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5115
5116// Additional patterns for SMULL and UMULL
5117multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
5118  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5119  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5120            (INST8B V64:$Rn, V64:$Rm)>;
5121  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5122            (INST4H V64:$Rn, V64:$Rm)>;
5123  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5124            (INST2S V64:$Rn, V64:$Rm)>;
5125}
5126
5127defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
5128  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
5129defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
5130  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
5131
5132// Patterns for smull2/umull2.
5133multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
5134  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5135  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
5136                           (extract_high_v16i8 V128:$Rm))),
5137             (INST8B V128:$Rn, V128:$Rm)>;
5138  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
5139                           (extract_high_v8i16 V128:$Rm))),
5140             (INST4H V128:$Rn, V128:$Rm)>;
5141  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
5142                           (extract_high_v4i32 V128:$Rm))),
5143             (INST2S V128:$Rn, V128:$Rm)>;
5144}
5145
5146defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
5147  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
5148defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
5149  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
5150
5151// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
5152multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
5153  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5154  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5155            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
5156  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5157            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
5158  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5159            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
5160}
5161
5162defm : Neon_mulacc_widen_patterns<
5163  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5164  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5165defm : Neon_mulacc_widen_patterns<
5166  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5167  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5168defm : Neon_mulacc_widen_patterns<
5169  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5170  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5171defm : Neon_mulacc_widen_patterns<
5172  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5173  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5174
5175// Patterns for 64-bit pmull
5176def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5177          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5178def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5179                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5180          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5181
5182// CodeGen patterns for addhn and subhn instructions, which can actually be
5183// written in LLVM IR without too much difficulty.
5184
5185// ADDHN
5186def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5187          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5188def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5189                                           (i32 16))))),
5190          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5191def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5192                                           (i32 32))))),
5193          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5194def : Pat<(concat_vectors (v8i8 V64:$Rd),
5195                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5196                                                    (i32 8))))),
5197          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5198                            V128:$Rn, V128:$Rm)>;
5199def : Pat<(concat_vectors (v4i16 V64:$Rd),
5200                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5201                                                    (i32 16))))),
5202          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5203                            V128:$Rn, V128:$Rm)>;
5204def : Pat<(concat_vectors (v2i32 V64:$Rd),
5205                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5206                                                    (i32 32))))),
5207          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5208                            V128:$Rn, V128:$Rm)>;
5209
5210// SUBHN
5211def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5212          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5213def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5214                                           (i32 16))))),
5215          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5216def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5217                                           (i32 32))))),
5218          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5219def : Pat<(concat_vectors (v8i8 V64:$Rd),
5220                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5221                                                    (i32 8))))),
5222          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5223                            V128:$Rn, V128:$Rm)>;
5224def : Pat<(concat_vectors (v4i16 V64:$Rd),
5225                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5226                                                    (i32 16))))),
5227          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5228                            V128:$Rn, V128:$Rm)>;
5229def : Pat<(concat_vectors (v2i32 V64:$Rd),
5230                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5231                                                    (i32 32))))),
5232          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5233                            V128:$Rn, V128:$Rm)>;
5234
5235//----------------------------------------------------------------------------
5236// AdvSIMD bitwise extract from vector instruction.
5237//----------------------------------------------------------------------------
5238
5239defm EXT : SIMDBitwiseExtract<"ext">;
5240
5241def AdjustExtImm : SDNodeXForm<imm, [{
5242  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5243}]>;
5244multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5245  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5246            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5247  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5248            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5249  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5250  // 128-bit vector.
5251  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5252            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5253  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5254  // single 128-bit EXT.
5255  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5256                              (extract_subvector V128:$Rn, (i64 N)),
5257                              (i32 imm:$imm))),
5258            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5259  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5260  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5261  // top half of the other operand will be unset, but that doesn't matter as it
5262  // will not be used.
5263  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5264                              V64:$Rm,
5265                              (i32 imm:$imm))),
5266            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5267                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5268                                      (AdjustExtImm imm:$imm)), dsub)>;
5269}
5270
5271defm : ExtPat<v8i8, v16i8, 8>;
5272defm : ExtPat<v4i16, v8i16, 4>;
5273defm : ExtPat<v4f16, v8f16, 4>;
5274defm : ExtPat<v4bf16, v8bf16, 4>;
5275defm : ExtPat<v2i32, v4i32, 2>;
5276defm : ExtPat<v2f32, v4f32, 2>;
5277defm : ExtPat<v1i64, v2i64, 1>;
5278defm : ExtPat<v1f64, v2f64, 1>;
5279
5280//----------------------------------------------------------------------------
5281// AdvSIMD zip vector
5282//----------------------------------------------------------------------------
5283
5284defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5285defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5286defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5287defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5288defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5289defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5290
5291//----------------------------------------------------------------------------
5292// AdvSIMD TBL/TBX instructions
5293//----------------------------------------------------------------------------
5294
5295defm TBL : SIMDTableLookup<    0, "tbl">;
5296defm TBX : SIMDTableLookupTied<1, "tbx">;
5297
5298def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5299          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5300def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5301          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5302
5303def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5304                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5305          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5306def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5307                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5308          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5309
5310
5311//----------------------------------------------------------------------------
5312// AdvSIMD scalar CPY instruction
5313//----------------------------------------------------------------------------
5314
5315defm CPY : SIMDScalarCPY<"mov">;
5316
5317//----------------------------------------------------------------------------
5318// AdvSIMD scalar pairwise instructions
5319//----------------------------------------------------------------------------
5320
5321defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5322defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5323defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5324defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5325defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5326defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5327
5328let Predicates = [HasFullFP16] in {
5329def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5330            (FADDPv2i16p
5331              (EXTRACT_SUBREG
5332                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))),
5333               dsub))>;
5334def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5335          (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>;
5336}
5337def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5338          (FADDPv2i32p
5339            (EXTRACT_SUBREG
5340              (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))),
5341             dsub))>;
5342def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5343          (FADDPv2i32p V64:$Rn)>;
5344def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5345          (FADDPv2i64p V128:$Rn)>;
5346
5347def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5348          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5349def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5350          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5351def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5352          (FADDPv2i32p V64:$Rn)>;
5353def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5354          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5355def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5356          (FADDPv2i64p V128:$Rn)>;
5357def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5358          (FMAXNMPv2i32p V64:$Rn)>;
5359def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5360          (FMAXNMPv2i64p V128:$Rn)>;
5361def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5362          (FMAXPv2i32p V64:$Rn)>;
5363def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5364          (FMAXPv2i64p V128:$Rn)>;
5365def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5366          (FMINNMPv2i32p V64:$Rn)>;
5367def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5368          (FMINNMPv2i64p V128:$Rn)>;
5369def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5370          (FMINPv2i32p V64:$Rn)>;
5371def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5372          (FMINPv2i64p V128:$Rn)>;
5373
5374//----------------------------------------------------------------------------
5375// AdvSIMD INS/DUP instructions
5376//----------------------------------------------------------------------------
5377
5378def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5379def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5380def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5381def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5382def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5383def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5384def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5385
5386def DUPv2i64lane : SIMDDup64FromElement;
5387def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5388def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5389def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5390def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5391def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5392def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5393
5394// DUP from a 64-bit register to a 64-bit register is just a copy
5395def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5396          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5397def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5398          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5399
5400def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5401          (v2f32 (DUPv2i32lane
5402            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5403            (i64 0)))>;
5404def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5405          (v4f32 (DUPv4i32lane
5406            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5407            (i64 0)))>;
5408def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5409          (v2f64 (DUPv2i64lane
5410            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5411            (i64 0)))>;
5412def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5413          (v4f16 (DUPv4i16lane
5414            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5415            (i64 0)))>;
5416def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5417          (v4bf16 (DUPv4i16lane
5418            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5419            (i64 0)))>;
5420def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5421          (v8f16 (DUPv8i16lane
5422            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5423            (i64 0)))>;
5424def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5425          (v8bf16 (DUPv8i16lane
5426            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5427            (i64 0)))>;
5428
5429def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5430          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5431def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5432          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5433
5434def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5435          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5436def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5437          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5438
5439def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5440          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5441def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5442         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5443def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5444          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5445
5446// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5447// instruction even if the types don't match: we just have to remap the lane
5448// carefully. N.b. this trick only applies to truncations.
5449def VecIndex_x2 : SDNodeXForm<imm, [{
5450  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5451}]>;
5452def VecIndex_x4 : SDNodeXForm<imm, [{
5453  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5454}]>;
5455def VecIndex_x8 : SDNodeXForm<imm, [{
5456  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5457}]>;
5458
5459multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5460                            ValueType Src128VT, ValueType ScalVT,
5461                            Instruction DUP, SDNodeXForm IdxXFORM> {
5462  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5463                                                     imm:$idx)))),
5464            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5465
5466  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5467                                                     imm:$idx)))),
5468            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5469}
5470
5471defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5472defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5473defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5474
5475defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5476defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5477defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5478
5479multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5480                               SDNodeXForm IdxXFORM> {
5481  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5482                                                         imm:$idx))))),
5483            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5484
5485  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5486                                                       imm:$idx))))),
5487            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5488}
5489
5490defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5491defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5492defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5493
5494defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5495defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5496defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5497
5498// SMOV and UMOV definitions, with some extra patterns for convenience
5499defm SMOV : SMov;
5500defm UMOV : UMov;
5501
5502def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5503          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5504def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5505          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5506def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5507          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5508def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5509          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5510def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5511          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5512def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5513          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5514
5515def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5516            VectorIndexB:$idx)))), i8),
5517          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5518def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5519            VectorIndexH:$idx)))), i16),
5520          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5521
5522// Extracting i8 or i16 elements will have the zero-extend transformed to
5523// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5524// for AArch64. Match these patterns here since UMOV already zeroes out the high
5525// bits of the destination register.
5526def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5527               (i32 0xff)),
5528          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5529def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5530               (i32 0xffff)),
5531          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5532
5533def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5534            VectorIndexB:$idx)))), (i64 0xff))),
5535          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5536def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5537            VectorIndexH:$idx)))), (i64 0xffff))),
5538          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5539
5540defm INS : SIMDIns;
5541
5542def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5543          (SUBREG_TO_REG (i32 0),
5544                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5545def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5546          (SUBREG_TO_REG (i32 0),
5547                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5548
5549def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5550          (SUBREG_TO_REG (i32 0),
5551                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5552def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5553          (SUBREG_TO_REG (i32 0),
5554                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5555
5556def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5557          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5558def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5559          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5560
5561def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5562          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5563def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5564          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5565
5566def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5567            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5568                                  (i32 FPR32:$Rn), ssub))>;
5569def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5570            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5571                                  (i32 FPR32:$Rn), ssub))>;
5572
5573def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5574            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5575                                  (i64 FPR64:$Rn), dsub))>;
5576
5577def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5578          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5579def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5580          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5581
5582def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5583          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5584def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5585          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5586
5587def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5588          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5589def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5590          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5591
5592def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5593          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5594
5595def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5596            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5597          (EXTRACT_SUBREG
5598            (INSvi16lane
5599              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5600              VectorIndexS:$imm,
5601              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5602              (i64 0)),
5603            dsub)>;
5604
5605def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5606            (i64 VectorIndexH:$imm)),
5607          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5608def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5609            (i64 VectorIndexS:$imm)),
5610          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5611def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5612            (i64 VectorIndexD:$imm)),
5613          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5614
5615def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5616            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5617          (INSvi16lane
5618            V128:$Rn, VectorIndexH:$imm,
5619            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5620            (i64 0))>;
5621
5622def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5623            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5624          (EXTRACT_SUBREG
5625            (INSvi16lane
5626              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5627              VectorIndexS:$imm,
5628              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5629              (i64 0)),
5630            dsub)>;
5631
5632def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5633            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5634          (INSvi16lane
5635            V128:$Rn, VectorIndexH:$imm,
5636            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5637            (i64 0))>;
5638
5639def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5640            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5641          (EXTRACT_SUBREG
5642            (INSvi32lane
5643              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5644              VectorIndexS:$imm,
5645              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5646              (i64 0)),
5647            dsub)>;
5648def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5649            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5650          (INSvi32lane
5651            V128:$Rn, VectorIndexS:$imm,
5652            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5653            (i64 0))>;
5654def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5655            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5656          (INSvi64lane
5657            V128:$Rn, VectorIndexD:$imm,
5658            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5659            (i64 0))>;
5660
5661// Copy an element at a constant index in one vector into a constant indexed
5662// element of another.
5663// FIXME refactor to a shared class/dev parameterized on vector type, vector
5664// index type and INS extension
5665def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5666                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5667                   VectorIndexB:$idx2)),
5668          (v16i8 (INSvi8lane
5669                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5670          )>;
5671def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5672                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5673                   VectorIndexH:$idx2)),
5674          (v8i16 (INSvi16lane
5675                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5676          )>;
5677def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5678                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5679                   VectorIndexS:$idx2)),
5680          (v4i32 (INSvi32lane
5681                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5682          )>;
5683def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5684                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5685                   VectorIndexD:$idx2)),
5686          (v2i64 (INSvi64lane
5687                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5688          )>;
5689
5690multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5691                                ValueType VTScal, Instruction INS> {
5692  def : Pat<(VT128 (vector_insert V128:$src,
5693                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5694                        imm:$Immd)),
5695            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5696
5697  def : Pat<(VT128 (vector_insert V128:$src,
5698                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5699                        imm:$Immd)),
5700            (INS V128:$src, imm:$Immd,
5701                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5702
5703  def : Pat<(VT64 (vector_insert V64:$src,
5704                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5705                        imm:$Immd)),
5706            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5707                                 imm:$Immd, V128:$Rn, imm:$Immn),
5708                            dsub)>;
5709
5710  def : Pat<(VT64 (vector_insert V64:$src,
5711                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5712                        imm:$Immd)),
5713            (EXTRACT_SUBREG
5714                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5715                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5716                dsub)>;
5717}
5718
5719defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5720defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5721defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5722defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5723
5724
5725// Floating point vector extractions are codegen'd as either a sequence of
5726// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5727// the lane number is anything other than zero.
5728def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5729          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5730def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5731          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5732def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5733          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5734def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5735          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5736
5737
5738def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5739          (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5740def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5741          (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5742def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5743          (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5744def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5745          (bf16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5746
5747// All concat_vectors operations are canonicalised to act on i64 vectors for
5748// AArch64. In the general case we need an instruction, which had just as well be
5749// INS.
5750class ConcatPat<ValueType DstTy, ValueType SrcTy>
5751  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5752        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5753                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5754
5755def : ConcatPat<v2i64, v1i64>;
5756def : ConcatPat<v2f64, v1f64>;
5757def : ConcatPat<v4i32, v2i32>;
5758def : ConcatPat<v4f32, v2f32>;
5759def : ConcatPat<v8i16, v4i16>;
5760def : ConcatPat<v8f16, v4f16>;
5761def : ConcatPat<v8bf16, v4bf16>;
5762def : ConcatPat<v16i8, v8i8>;
5763
5764// If the high lanes are undef, though, we can just ignore them:
5765class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5766  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5767        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5768
5769def : ConcatUndefPat<v2i64, v1i64>;
5770def : ConcatUndefPat<v2f64, v1f64>;
5771def : ConcatUndefPat<v4i32, v2i32>;
5772def : ConcatUndefPat<v4f32, v2f32>;
5773def : ConcatUndefPat<v8i16, v4i16>;
5774def : ConcatUndefPat<v16i8, v8i8>;
5775
5776//----------------------------------------------------------------------------
5777// AdvSIMD across lanes instructions
5778//----------------------------------------------------------------------------
5779
5780defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5781defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5782defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5783defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5784defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5785defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5786defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5787defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5788defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5789defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5790defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5791
5792// Patterns for uaddv(uaddlp(x)) ==> uaddlv
5793def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5794            (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))),
5795            (i64 0))), (i64 0))),
5796          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5797           (UADDLVv8i8v V64:$op), hsub), ssub)>;
5798def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp
5799           (v16i8 V128:$op))))), (i64 0))),
5800          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5801           (UADDLVv16i8v V128:$op), hsub), ssub)>;
5802def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))),
5803          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>;
5804
5805// Patterns for addp(uaddlp(x))) ==> uaddlv
5806def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))),
5807          (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>;
5808def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))),
5809          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>;
5810
5811// Patterns for across-vector intrinsics, that have a node equivalent, that
5812// returns a vector (with only the low lane defined) instead of a scalar.
5813// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5814multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5815                                    SDPatternOperator opNode> {
5816// If a lane instruction caught the vector_extract around opNode, we can
5817// directly match the latter to the instruction.
5818def : Pat<(v8i8 (opNode V64:$Rn)),
5819          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5820           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5821def : Pat<(v16i8 (opNode V128:$Rn)),
5822          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5823           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5824def : Pat<(v4i16 (opNode V64:$Rn)),
5825          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5826           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5827def : Pat<(v8i16 (opNode V128:$Rn)),
5828          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5829           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5830def : Pat<(v4i32 (opNode V128:$Rn)),
5831          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5832           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5833
5834
5835// If none did, fallback to the explicit patterns, consuming the vector_extract.
5836def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5837            (i64 0)), (i64 0))),
5838          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5839            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5840            bsub), ssub)>;
5841def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5842          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5843            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5844            bsub), ssub)>;
5845def : Pat<(i32 (vector_extract (insert_subvector undef,
5846            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
5847          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5848            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5849            hsub), ssub)>;
5850def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5851          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5852            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5853            hsub), ssub)>;
5854def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5855          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5856            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5857            ssub), ssub)>;
5858
5859}
5860
5861multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5862                                          SDPatternOperator opNode>
5863    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5864// If there is a sign extension after this intrinsic, consume it as smov already
5865// performed it
5866def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5867            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
5868          (i32 (SMOVvi8to32
5869            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5870              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5871            (i64 0)))>;
5872def : Pat<(i32 (sext_inreg (i32 (vector_extract
5873            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5874          (i32 (SMOVvi8to32
5875            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5876             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5877            (i64 0)))>;
5878def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5879            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
5880          (i32 (SMOVvi16to32
5881           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5882            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5883           (i64 0)))>;
5884def : Pat<(i32 (sext_inreg (i32 (vector_extract
5885            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5886          (i32 (SMOVvi16to32
5887            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5888             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5889            (i64 0)))>;
5890}
5891
5892multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5893                                            SDPatternOperator opNode>
5894    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5895// If there is a masking operation keeping only what has been actually
5896// generated, consume it.
5897def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5898            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
5899      (i32 (EXTRACT_SUBREG
5900        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5901          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5902        ssub))>;
5903def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5904            maski8_or_more)),
5905        (i32 (EXTRACT_SUBREG
5906          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5907            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5908          ssub))>;
5909def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5910            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
5911          (i32 (EXTRACT_SUBREG
5912            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5913              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5914            ssub))>;
5915def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5916            maski16_or_more)),
5917        (i32 (EXTRACT_SUBREG
5918          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5919            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5920          ssub))>;
5921}
5922
5923defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5924// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5925def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5926          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5927
5928defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5929// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5930def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5931          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5932
5933defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5934def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5935          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5936
5937defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5938def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5939          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5940
5941defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5942def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5943          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5944
5945defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5946def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5947          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5948
5949multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5950  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5951        (i32 (SMOVvi16to32
5952          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5953            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5954          (i64 0)))>;
5955def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5956        (i32 (SMOVvi16to32
5957          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5958           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5959          (i64 0)))>;
5960
5961def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5962          (i32 (EXTRACT_SUBREG
5963           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5964            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5965           ssub))>;
5966def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5967        (i32 (EXTRACT_SUBREG
5968          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5969           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5970          ssub))>;
5971
5972def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5973        (i64 (EXTRACT_SUBREG
5974          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5975           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5976          dsub))>;
5977}
5978
5979multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5980                                                Intrinsic intOp> {
5981  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5982        (i32 (EXTRACT_SUBREG
5983          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5984            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5985          ssub))>;
5986def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5987        (i32 (EXTRACT_SUBREG
5988          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5989            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5990          ssub))>;
5991
5992def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5993          (i32 (EXTRACT_SUBREG
5994            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5995              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5996            ssub))>;
5997def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5998        (i32 (EXTRACT_SUBREG
5999          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6000            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6001          ssub))>;
6002
6003def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6004        (i64 (EXTRACT_SUBREG
6005          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6006            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6007          dsub))>;
6008}
6009
6010defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
6011defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
6012
6013// The vaddlv_s32 intrinsic gets mapped to SADDLP.
6014def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
6015          (i64 (EXTRACT_SUBREG
6016            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6017              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6018            dsub))>;
6019// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6020def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6021          (i64 (EXTRACT_SUBREG
6022            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6023              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6024            dsub))>;
6025
6026//------------------------------------------------------------------------------
6027// AdvSIMD modified immediate instructions
6028//------------------------------------------------------------------------------
6029
6030// AdvSIMD BIC
6031defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6032// AdvSIMD ORR
6033defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6034
6035def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6036def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6037def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6038def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6039
6040def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6041def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6042def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6043def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6044
6045def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6046def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6047def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6048def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6049
6050def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6051def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6052def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6053def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6054
6055// AdvSIMD FMOV
6056def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6057                                              "fmov", ".2d",
6058                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6059def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6060                                              "fmov", ".2s",
6061                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6062def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6063                                              "fmov", ".4s",
6064                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6065let Predicates = [HasNEON, HasFullFP16] in {
6066def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6067                                              "fmov", ".4h",
6068                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6069def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6070                                              "fmov", ".8h",
6071                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6072} // Predicates = [HasNEON, HasFullFP16]
6073
6074// AdvSIMD MOVI
6075
6076// EDIT byte mask: scalar
6077let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6078def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6079                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6080// The movi_edit node has the immediate value already encoded, so we use
6081// a plain imm0_255 here.
6082def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6083          (MOVID imm0_255:$shift)>;
6084
6085// EDIT byte mask: 2d
6086
6087// The movi_edit node has the immediate value already encoded, so we use
6088// a plain imm0_255 in the pattern
6089let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6090def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6091                                                simdimmtype10,
6092                                                "movi", ".2d",
6093                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6094
6095def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6096def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6097def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6098def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6099
6100def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6101def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6102def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6103def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6104
6105// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6106// extract is free and this gives better MachineCSE results.
6107def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6108def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6109def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6110def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6111
6112def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6113def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6114def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6115def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6116
6117// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6118let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6119defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6120
6121def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6122def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6123def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6124def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6125
6126def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6127def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6128def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6129def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6130
6131def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6132          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6133def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6134          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6135def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6136          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6137def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6138          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6139
6140let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6141// EDIT per word: 2s & 4s with MSL shifter
6142def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6143                      [(set (v2i32 V64:$Rd),
6144                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6145def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6146                      [(set (v4i32 V128:$Rd),
6147                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6148
6149// Per byte: 8b & 16b
6150def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6151                                                 "movi", ".8b",
6152                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6153
6154def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6155                                                 "movi", ".16b",
6156                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6157}
6158
6159// AdvSIMD MVNI
6160
6161// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6162let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6163defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6164
6165def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6166def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6167def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6168def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6169
6170def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6171def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6172def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6173def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6174
6175def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6176          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6177def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6178          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6179def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6180          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6181def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6182          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6183
6184// EDIT per word: 2s & 4s with MSL shifter
6185let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6186def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6187                      [(set (v2i32 V64:$Rd),
6188                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6189def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6190                      [(set (v4i32 V128:$Rd),
6191                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6192}
6193
6194//----------------------------------------------------------------------------
6195// AdvSIMD indexed element
6196//----------------------------------------------------------------------------
6197
6198let hasSideEffects = 0 in {
6199  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6200  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6201}
6202
6203// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6204// instruction expects the addend first, while the intrinsic expects it last.
6205
6206// On the other hand, there are quite a few valid combinatorial options due to
6207// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6208defm : SIMDFPIndexedTiedPatterns<"FMLA",
6209           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
6210defm : SIMDFPIndexedTiedPatterns<"FMLA",
6211           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
6212
6213defm : SIMDFPIndexedTiedPatterns<"FMLS",
6214           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6215defm : SIMDFPIndexedTiedPatterns<"FMLS",
6216           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6217defm : SIMDFPIndexedTiedPatterns<"FMLS",
6218           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6219defm : SIMDFPIndexedTiedPatterns<"FMLS",
6220           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6221
6222multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6223  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6224  // and DUP scalar.
6225  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6226                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6227                                           VectorIndexS:$idx))),
6228            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6229  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6230                           (v2f32 (AArch64duplane32
6231                                      (v4f32 (insert_subvector undef,
6232                                                 (v2f32 (fneg V64:$Rm)),
6233                                                 (i64 0))),
6234                                      VectorIndexS:$idx)))),
6235            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6236                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6237                               VectorIndexS:$idx)>;
6238  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6239                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6240            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6241                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6242
6243  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6244  // and DUP scalar.
6245  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6246                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6247                                           VectorIndexS:$idx))),
6248            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6249                               VectorIndexS:$idx)>;
6250  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6251                           (v4f32 (AArch64duplane32
6252                                      (v4f32 (insert_subvector undef,
6253                                                 (v2f32 (fneg V64:$Rm)),
6254                                                 (i64 0))),
6255                                      VectorIndexS:$idx)))),
6256            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6257                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6258                               VectorIndexS:$idx)>;
6259  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6260                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6261            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6262                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6263
6264  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6265  // (DUPLANE from 64-bit would be trivial).
6266  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6267                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6268                                           VectorIndexD:$idx))),
6269            (FMLSv2i64_indexed
6270                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6271  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6272                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6273            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6274                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6275
6276  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6277  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6278                         (vector_extract (v4f32 (fneg V128:$Rm)),
6279                                         VectorIndexS:$idx))),
6280            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6281                V128:$Rm, VectorIndexS:$idx)>;
6282  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6283                         (vector_extract (v4f32 (insert_subvector undef,
6284                                                    (v2f32 (fneg V64:$Rm)),
6285                                                    (i64 0))),
6286                                         VectorIndexS:$idx))),
6287            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6288                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6289
6290  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6291  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6292                         (vector_extract (v2f64 (fneg V128:$Rm)),
6293                                         VectorIndexS:$idx))),
6294            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6295                V128:$Rm, VectorIndexS:$idx)>;
6296}
6297
6298defm : FMLSIndexedAfterNegPatterns<
6299           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
6300defm : FMLSIndexedAfterNegPatterns<
6301           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
6302
6303defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6304defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
6305
6306def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6307          (FMULv2i32_indexed V64:$Rn,
6308            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6309            (i64 0))>;
6310def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6311          (FMULv4i32_indexed V128:$Rn,
6312            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6313            (i64 0))>;
6314def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6315          (FMULv2i64_indexed V128:$Rn,
6316            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6317            (i64 0))>;
6318
6319defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6320defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6321
6322defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6323                                     int_aarch64_neon_sqdmulh_laneq>;
6324defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6325                                      int_aarch64_neon_sqrdmulh_laneq>;
6326
6327// Generated by MachineCombine
6328defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6329defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6330
6331defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6332defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6333    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6334defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6335    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6336defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
6337                int_aarch64_neon_smull>;
6338defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6339                                           int_aarch64_neon_sqadd>;
6340defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6341                                           int_aarch64_neon_sqsub>;
6342defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6343                                          int_aarch64_neon_sqadd>;
6344defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6345                                          int_aarch64_neon_sqsub>;
6346defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6347defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6348    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6349defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6350    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6351defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
6352                int_aarch64_neon_umull>;
6353
6354// A scalar sqdmull with the second operand being a vector lane can be
6355// handled directly with the indexed instruction encoding.
6356def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6357                                          (vector_extract (v4i32 V128:$Vm),
6358                                                           VectorIndexS:$idx)),
6359          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6360
6361//----------------------------------------------------------------------------
6362// AdvSIMD scalar shift instructions
6363//----------------------------------------------------------------------------
6364defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6365defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6366defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6367defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6368// Codegen patterns for the above. We don't put these directly on the
6369// instructions because TableGen's type inference can't handle the truth.
6370// Having the same base pattern for fp <--> int totally freaks it out.
6371def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6372          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6373def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6374          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6375def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6376          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6377def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6378          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6379def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6380                                            vecshiftR64:$imm)),
6381          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6382def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6383                                            vecshiftR64:$imm)),
6384          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6385def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6386          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6387def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6388          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6389def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6390                                            vecshiftR64:$imm)),
6391          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6392def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6393          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6394def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6395                                            vecshiftR64:$imm)),
6396          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6397def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6398          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6399
6400// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
6401
6402def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6403          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6404def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6405          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6406def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6407          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6408def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6409            (and FPR32:$Rn, (i32 65535)),
6410            vecshiftR16:$imm)),
6411          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6412def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6413          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6414def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6415          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6416def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6417          (i32 (INSERT_SUBREG
6418            (i32 (IMPLICIT_DEF)),
6419            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6420            hsub))>;
6421def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6422          (i64 (INSERT_SUBREG
6423            (i64 (IMPLICIT_DEF)),
6424            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6425            hsub))>;
6426def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6427          (i32 (INSERT_SUBREG
6428            (i32 (IMPLICIT_DEF)),
6429            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6430            hsub))>;
6431def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6432          (i64 (INSERT_SUBREG
6433            (i64 (IMPLICIT_DEF)),
6434            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6435            hsub))>;
6436def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6437          (i32 (INSERT_SUBREG
6438            (i32 (IMPLICIT_DEF)),
6439            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6440            hsub))>;
6441def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6442          (i32 (INSERT_SUBREG
6443            (i32 (IMPLICIT_DEF)),
6444            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6445            hsub))>;
6446
6447defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6448defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6449defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6450                                     int_aarch64_neon_sqrshrn>;
6451defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6452                                     int_aarch64_neon_sqrshrun>;
6453defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6454defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6455defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6456                                     int_aarch64_neon_sqshrn>;
6457defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6458                                     int_aarch64_neon_sqshrun>;
6459defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6460defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6461defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6462    TriOpFrag<(add node:$LHS,
6463                   (AArch64srshri node:$MHS, node:$RHS))>>;
6464defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6465defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6466    TriOpFrag<(add node:$LHS,
6467                   (AArch64vashr node:$MHS, node:$RHS))>>;
6468defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6469                                     int_aarch64_neon_uqrshrn>;
6470defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6471defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6472                                     int_aarch64_neon_uqshrn>;
6473defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6474defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6475    TriOpFrag<(add node:$LHS,
6476                   (AArch64urshri node:$MHS, node:$RHS))>>;
6477defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6478defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6479    TriOpFrag<(add node:$LHS,
6480                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6481
6482//----------------------------------------------------------------------------
6483// AdvSIMD vector shift instructions
6484//----------------------------------------------------------------------------
6485defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6486defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6487defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6488                                   int_aarch64_neon_vcvtfxs2fp>;
6489defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6490                                         int_aarch64_neon_rshrn>;
6491defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6492defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6493                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6494defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6495def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6496                                      (i32 vecshiftL64:$imm))),
6497          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6498defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6499                                         int_aarch64_neon_sqrshrn>;
6500defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6501                                         int_aarch64_neon_sqrshrun>;
6502defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6503defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6504defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6505                                         int_aarch64_neon_sqshrn>;
6506defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6507                                         int_aarch64_neon_sqshrun>;
6508defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6509def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6510                                      (i32 vecshiftR64:$imm))),
6511          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6512defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6513defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6514                 TriOpFrag<(add node:$LHS,
6515                                (AArch64srshri node:$MHS, node:$RHS))> >;
6516defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6517                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6518
6519defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6520defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6521                TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6522defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6523                        int_aarch64_neon_vcvtfxu2fp>;
6524defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6525                                         int_aarch64_neon_uqrshrn>;
6526defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6527defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6528                                         int_aarch64_neon_uqshrn>;
6529defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6530defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6531                TriOpFrag<(add node:$LHS,
6532                               (AArch64urshri node:$MHS, node:$RHS))> >;
6533defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6534                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6535defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6536defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6537                TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6538
6539// SHRN patterns for when a logical right shift was used instead of arithmetic
6540// (the immediate guarantees no sign bits actually end up in the result so it
6541// doesn't matter).
6542def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6543          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6544def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6545          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6546def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6547          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6548
6549def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6550                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6551                                                    vecshiftR16Narrow:$imm)))),
6552          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6553                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6554def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6555                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6556                                                    vecshiftR32Narrow:$imm)))),
6557          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6558                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6559def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6560                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6561                                                    vecshiftR64Narrow:$imm)))),
6562          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6563                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6564
6565// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6566// Anyexts are implemented as zexts.
6567def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6568def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6569def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6570def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6571def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6572def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6573def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6574def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6575def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6576// Also match an extend from the upper half of a 128 bit source register.
6577def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6578          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6579def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6580          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6581def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6582          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6583def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6584          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6585def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6586          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6587def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6588          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6589def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6590          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6591def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6592          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6593def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6594          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6595
6596// Vector shift sxtl aliases
6597def : InstAlias<"sxtl.8h $dst, $src1",
6598                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6599def : InstAlias<"sxtl $dst.8h, $src1.8b",
6600                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6601def : InstAlias<"sxtl.4s $dst, $src1",
6602                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6603def : InstAlias<"sxtl $dst.4s, $src1.4h",
6604                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6605def : InstAlias<"sxtl.2d $dst, $src1",
6606                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6607def : InstAlias<"sxtl $dst.2d, $src1.2s",
6608                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6609
6610// Vector shift sxtl2 aliases
6611def : InstAlias<"sxtl2.8h $dst, $src1",
6612                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6613def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6614                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6615def : InstAlias<"sxtl2.4s $dst, $src1",
6616                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6617def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6618                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6619def : InstAlias<"sxtl2.2d $dst, $src1",
6620                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6621def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6622                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6623
6624// Vector shift uxtl aliases
6625def : InstAlias<"uxtl.8h $dst, $src1",
6626                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6627def : InstAlias<"uxtl $dst.8h, $src1.8b",
6628                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6629def : InstAlias<"uxtl.4s $dst, $src1",
6630                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6631def : InstAlias<"uxtl $dst.4s, $src1.4h",
6632                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6633def : InstAlias<"uxtl.2d $dst, $src1",
6634                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6635def : InstAlias<"uxtl $dst.2d, $src1.2s",
6636                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6637
6638// Vector shift uxtl2 aliases
6639def : InstAlias<"uxtl2.8h $dst, $src1",
6640                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6641def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6642                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6643def : InstAlias<"uxtl2.4s $dst, $src1",
6644                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6645def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6646                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6647def : InstAlias<"uxtl2.2d $dst, $src1",
6648                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6649def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6650                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6651
6652// If an integer is about to be converted to a floating point value,
6653// just load it on the floating point unit.
6654// These patterns are more complex because floating point loads do not
6655// support sign extension.
6656// The sign extension has to be explicitly added and is only supported for
6657// one step: byte-to-half, half-to-word, word-to-doubleword.
6658// SCVTF GPR -> FPR is 9 cycles.
6659// SCVTF FPR -> FPR is 4 cyclces.
6660// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6661// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6662// and still being faster.
6663// However, this is not good for code size.
6664// 8-bits -> float. 2 sizes step-up.
6665class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6666  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6667        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6668                            (SSHLLv4i16_shift
6669                              (f64
6670                                (EXTRACT_SUBREG
6671                                  (SSHLLv8i8_shift
6672                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6673                                        INST,
6674                                        bsub),
6675                                    0),
6676                                  dsub)),
6677                               0),
6678                             ssub)))>,
6679    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6680
6681def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6682                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6683def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6684                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6685def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6686                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6687def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6688                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6689
6690// 16-bits -> float. 1 size step-up.
6691class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6692  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6693        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6694                            (SSHLLv4i16_shift
6695                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6696                                  INST,
6697                                  hsub),
6698                                0),
6699                            ssub)))>, Requires<[NotForCodeSize]>;
6700
6701def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6702                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6703def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6704                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6705def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6706                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6707def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6708                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6709
6710// 32-bits to 32-bits are handled in target specific dag combine:
6711// performIntToFpCombine.
6712// 64-bits integer to 32-bits floating point, not possible with
6713// SCVTF on floating point registers (both source and destination
6714// must have the same size).
6715
6716// Here are the patterns for 8, 16, 32, and 64-bits to double.
6717// 8-bits -> double. 3 size step-up: give up.
6718// 16-bits -> double. 2 size step.
6719class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6720  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6721           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6722                              (SSHLLv2i32_shift
6723                                 (f64
6724                                  (EXTRACT_SUBREG
6725                                    (SSHLLv4i16_shift
6726                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6727                                        INST,
6728                                        hsub),
6729                                     0),
6730                                   dsub)),
6731                               0),
6732                             dsub)))>,
6733    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6734
6735def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6736                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6737def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6738                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6739def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6740                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6741def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6742                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6743// 32-bits -> double. 1 size step-up.
6744class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6745  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6746           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6747                              (SSHLLv2i32_shift
6748                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6749                                  INST,
6750                                  ssub),
6751                               0),
6752                             dsub)))>, Requires<[NotForCodeSize]>;
6753
6754def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6755                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6756def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6757                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6758def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6759                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6760def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6761                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6762
6763// 64-bits -> double are handled in target specific dag combine:
6764// performIntToFpCombine.
6765
6766
6767//----------------------------------------------------------------------------
6768// AdvSIMD Load-Store Structure
6769//----------------------------------------------------------------------------
6770defm LD1 : SIMDLd1Multiple<"ld1">;
6771defm LD2 : SIMDLd2Multiple<"ld2">;
6772defm LD3 : SIMDLd3Multiple<"ld3">;
6773defm LD4 : SIMDLd4Multiple<"ld4">;
6774
6775defm ST1 : SIMDSt1Multiple<"st1">;
6776defm ST2 : SIMDSt2Multiple<"st2">;
6777defm ST3 : SIMDSt3Multiple<"st3">;
6778defm ST4 : SIMDSt4Multiple<"st4">;
6779
6780class Ld1Pat<ValueType ty, Instruction INST>
6781  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6782
6783def : Ld1Pat<v16i8, LD1Onev16b>;
6784def : Ld1Pat<v8i16, LD1Onev8h>;
6785def : Ld1Pat<v4i32, LD1Onev4s>;
6786def : Ld1Pat<v2i64, LD1Onev2d>;
6787def : Ld1Pat<v8i8,  LD1Onev8b>;
6788def : Ld1Pat<v4i16, LD1Onev4h>;
6789def : Ld1Pat<v2i32, LD1Onev2s>;
6790def : Ld1Pat<v1i64, LD1Onev1d>;
6791
6792class St1Pat<ValueType ty, Instruction INST>
6793  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6794        (INST ty:$Vt, GPR64sp:$Rn)>;
6795
6796def : St1Pat<v16i8, ST1Onev16b>;
6797def : St1Pat<v8i16, ST1Onev8h>;
6798def : St1Pat<v4i32, ST1Onev4s>;
6799def : St1Pat<v2i64, ST1Onev2d>;
6800def : St1Pat<v8i8,  ST1Onev8b>;
6801def : St1Pat<v4i16, ST1Onev4h>;
6802def : St1Pat<v2i32, ST1Onev2s>;
6803def : St1Pat<v1i64, ST1Onev1d>;
6804
6805//---
6806// Single-element
6807//---
6808
6809defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6810defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6811defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6812defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6813let mayLoad = 1, hasSideEffects = 0 in {
6814defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6815defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6816defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6817defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6818defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6819defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6820defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6821defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6822defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6823defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6824defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6825defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6826defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6827defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6828defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6829defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6830}
6831
6832def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6833          (LD1Rv8b GPR64sp:$Rn)>;
6834def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6835          (LD1Rv16b GPR64sp:$Rn)>;
6836def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6837          (LD1Rv4h GPR64sp:$Rn)>;
6838def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6839          (LD1Rv8h GPR64sp:$Rn)>;
6840def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6841          (LD1Rv2s GPR64sp:$Rn)>;
6842def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6843          (LD1Rv4s GPR64sp:$Rn)>;
6844def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6845          (LD1Rv2d GPR64sp:$Rn)>;
6846def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6847          (LD1Rv1d GPR64sp:$Rn)>;
6848// Grab the floating point version too
6849def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6850          (LD1Rv2s GPR64sp:$Rn)>;
6851def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6852          (LD1Rv4s GPR64sp:$Rn)>;
6853def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6854          (LD1Rv2d GPR64sp:$Rn)>;
6855def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6856          (LD1Rv1d GPR64sp:$Rn)>;
6857def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6858          (LD1Rv4h GPR64sp:$Rn)>;
6859def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6860          (LD1Rv8h GPR64sp:$Rn)>;
6861def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6862          (LD1Rv4h GPR64sp:$Rn)>;
6863def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6864          (LD1Rv8h GPR64sp:$Rn)>;
6865
6866class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6867                    ValueType VTy, ValueType STy, Instruction LD1>
6868  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6869           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6870        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6871
6872def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6873def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6874def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6875def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6876def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6877def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6878def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6879def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
6880
6881// Generate LD1 for extload if memory type does not match the
6882// destination type, for example:
6883//
6884//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
6885//
6886// In this case, the index must be adjusted to match LD1 type.
6887//
6888class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
6889                    VecIndex, ValueType VTy, ValueType STy,
6890                    Instruction LD1, SDNodeXForm IdxOp>
6891  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6892                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6893        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
6894
6895def VectorIndexStoH : SDNodeXForm<imm, [{
6896  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
6897}]>;
6898def VectorIndexStoB : SDNodeXForm<imm, [{
6899  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
6900}]>;
6901def VectorIndexHtoB : SDNodeXForm<imm, [{
6902  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
6903}]>;
6904
6905def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
6906def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
6907def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
6908
6909// Same as above, but the first element is populated using
6910// scalar_to_vector + insert_subvector instead of insert_vector_elt.
6911class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
6912                        SDPatternOperator ExtLoad, Instruction LD1>
6913  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
6914          (ResultTy (EXTRACT_SUBREG
6915            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
6916
6917def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
6918def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
6919def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
6920
6921class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6922                   ValueType VTy, ValueType STy, Instruction LD1>
6923  : Pat<(vector_insert (VTy VecListOne64:$Rd),
6924           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6925        (EXTRACT_SUBREG
6926            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6927                          VecIndex:$idx, GPR64sp:$Rn),
6928            dsub)>;
6929
6930def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
6931def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6932def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
6933def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
6934def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
6935def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
6936
6937
6938defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6939defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6940defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6941defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6942
6943// Stores
6944defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
6945defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
6946defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6947defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6948
6949let AddedComplexity = 19 in
6950class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6951                    ValueType VTy, ValueType STy, Instruction ST1>
6952  : Pat<(scalar_store
6953             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6954             GPR64sp:$Rn),
6955        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6956
6957def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
6958def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6959def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
6960def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
6961def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
6962def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
6963def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
6964def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
6965
6966let AddedComplexity = 19 in
6967class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6968                   ValueType VTy, ValueType STy, Instruction ST1>
6969  : Pat<(scalar_store
6970             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6971             GPR64sp:$Rn),
6972        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6973             VecIndex:$idx, GPR64sp:$Rn)>;
6974
6975def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
6976def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6977def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
6978def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
6979def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
6980def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
6981
6982multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6983                             ValueType VTy, ValueType STy, Instruction ST1,
6984                             int offset> {
6985  def : Pat<(scalar_store
6986              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6987              GPR64sp:$Rn, offset),
6988        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6989             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6990
6991  def : Pat<(scalar_store
6992              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6993              GPR64sp:$Rn, GPR64:$Rm),
6994        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6995             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6996}
6997
6998defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6999defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
7000                        2>;
7001defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
7002defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
7003defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
7004defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
7005defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
7006defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
7007
7008multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7009                             ValueType VTy, ValueType STy, Instruction ST1,
7010                             int offset> {
7011  def : Pat<(scalar_store
7012              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7013              GPR64sp:$Rn, offset),
7014        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7015
7016  def : Pat<(scalar_store
7017              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7018              GPR64sp:$Rn, GPR64:$Rm),
7019        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7020}
7021
7022defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7023                         1>;
7024defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7025                         2>;
7026defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7027defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7028defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7029defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7030defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7031defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7032
7033let mayStore = 1, hasSideEffects = 0 in {
7034defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7035defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7036defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7037defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7038defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7039defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7040defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7041defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7042defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7043defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7044defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7045defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7046}
7047
7048defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7049defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7050defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7051defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7052
7053//----------------------------------------------------------------------------
7054// Crypto extensions
7055//----------------------------------------------------------------------------
7056
7057let Predicates = [HasAES] in {
7058def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7059def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7060def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7061def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7062}
7063
7064// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7065// for AES fusion on some CPUs.
7066let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7067def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7068                        Sched<[WriteVq]>;
7069def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7070                         Sched<[WriteVq]>;
7071}
7072
7073// Only use constrained versions of AES(I)MC instructions if they are paired with
7074// AESE/AESD.
7075def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7076            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7077                                            (v16i8 V128:$src2))))),
7078          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7079                                             (v16i8 V128:$src2)))))>,
7080          Requires<[HasFuseAES]>;
7081
7082def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7083            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7084                                            (v16i8 V128:$src2))))),
7085          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7086                                              (v16i8 V128:$src2)))))>,
7087          Requires<[HasFuseAES]>;
7088
7089let Predicates = [HasSHA2] in {
7090def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7091def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7092def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7093def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7094def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7095def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7096def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7097
7098def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7099def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7100def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7101}
7102
7103//----------------------------------------------------------------------------
7104// Compiler-pseudos
7105//----------------------------------------------------------------------------
7106// FIXME: Like for X86, these should go in their own separate .td file.
7107
7108def def32 : PatLeaf<(i32 GPR32:$src), [{
7109  return isDef32(*N);
7110}]>;
7111
7112// In the case of a 32-bit def that is known to implicitly zero-extend,
7113// we can use a SUBREG_TO_REG.
7114def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
7115
7116// For an anyext, we don't care what the high bits are, so we can perform an
7117// INSERT_SUBREF into an IMPLICIT_DEF.
7118def : Pat<(i64 (anyext GPR32:$src)),
7119          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7120
7121// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7122// then assert the extension has happened.
7123def : Pat<(i64 (zext GPR32:$src)),
7124          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7125
7126// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7127// containing super-reg.
7128def : Pat<(i64 (sext GPR32:$src)),
7129   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7130def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7131def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7132def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7133def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7134def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7135def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7136def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7137
7138def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7139          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7140                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7141def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7142          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7143                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7144
7145def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7146          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7147                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7148def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7149          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7150                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7151
7152def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7153          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7154                   (i64 (i64shift_a        imm0_63:$imm)),
7155                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7156
7157// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7158// AddedComplexity for the following patterns since we want to match sext + sra
7159// patterns before we attempt to match a single sra node.
7160let AddedComplexity = 20 in {
7161// We support all sext + sra combinations which preserve at least one bit of the
7162// original value which is to be sign extended. E.g. we support shifts up to
7163// bitwidth-1 bits.
7164def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7165          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7166def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7167          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7168
7169def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7170          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7171def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7172          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7173
7174def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7175          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7176                   (i64 imm0_31:$imm), 31)>;
7177} // AddedComplexity = 20
7178
7179// To truncate, we can simply extract from a subregister.
7180def : Pat<(i32 (trunc GPR64sp:$src)),
7181          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7182
7183// __builtin_trap() uses the BRK instruction on AArch64.
7184def : Pat<(trap), (BRK 1)>;
7185def : Pat<(debugtrap), (BRK 0xF000)>;
7186
7187def ubsan_trap_xform : SDNodeXForm<timm, [{
7188  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7189}]>;
7190
7191def ubsan_trap_imm : TImmLeaf<i32, [{
7192  return isUInt<8>(Imm);
7193}], ubsan_trap_xform>;
7194
7195def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7196
7197// Multiply high patterns which multiply the lower subvector using smull/umull
7198// and the upper subvector with smull2/umull2. Then shuffle the high the high
7199// part of both results together.
7200def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7201          (UZP2v16i8
7202           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7203                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7204           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7205def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7206          (UZP2v8i16
7207           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7208                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7209           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7210def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7211          (UZP2v4i32
7212           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7213                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7214           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7215
7216def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7217          (UZP2v16i8
7218           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7219                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7220           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7221def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7222          (UZP2v8i16
7223           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7224                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7225           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7226def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7227          (UZP2v4i32
7228           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7229                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7230           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7231
7232// Conversions within AdvSIMD types in the same register size are free.
7233// But because we need a consistent lane ordering, in big endian many
7234// conversions require one or more REV instructions.
7235//
7236// Consider a simple memory load followed by a bitconvert then a store.
7237//   v0 = load v2i32
7238//   v1 = BITCAST v2i32 v0 to v4i16
7239//        store v4i16 v2
7240//
7241// In big endian mode every memory access has an implicit byte swap. LDR and
7242// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7243// is, they treat the vector as a sequence of elements to be byte-swapped.
7244// The two pairs of instructions are fundamentally incompatible. We've decided
7245// to use LD1/ST1 only to simplify compiler implementation.
7246//
7247// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7248// the original code sequence:
7249//   v0 = load v2i32
7250//   v1 = REV v2i32                  (implicit)
7251//   v2 = BITCAST v2i32 v1 to v4i16
7252//   v3 = REV v4i16 v2               (implicit)
7253//        store v4i16 v3
7254//
7255// But this is now broken - the value stored is different to the value loaded
7256// due to lane reordering. To fix this, on every BITCAST we must perform two
7257// other REVs:
7258//   v0 = load v2i32
7259//   v1 = REV v2i32                  (implicit)
7260//   v2 = REV v2i32
7261//   v3 = BITCAST v2i32 v2 to v4i16
7262//   v4 = REV v4i16
7263//   v5 = REV v4i16 v4               (implicit)
7264//        store v4i16 v5
7265//
7266// This means an extra two instructions, but actually in most cases the two REV
7267// instructions can be combined into one. For example:
7268//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7269//
7270// There is also no 128-bit REV instruction. This must be synthesized with an
7271// EXT instruction.
7272//
7273// Most bitconverts require some sort of conversion. The only exceptions are:
7274//   a) Identity conversions -  vNfX <-> vNiX
7275//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7276//
7277
7278// Natural vector casts (64 bit)
7279def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
7280def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7281def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7282def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7283def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
7284def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7285def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7286
7287def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
7288def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
7289def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7290def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7291def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7292def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7293
7294def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
7295def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
7296def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
7297def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
7298def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
7299def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
7300def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
7301
7302def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
7303def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7304def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7305def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7306def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7307def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7308def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7309def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
7310
7311def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
7312def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7313def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7314def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
7315def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7316def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7317
7318// Natural vector casts (128 bit)
7319def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7320def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7321def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7322def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7323def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
7324def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7325def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7326def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7327
7328def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7329def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
7330def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7331def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7332def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7333def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7334def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7335def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7336
7337def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
7338def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7339def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7340def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7341def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7342def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7343def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7344def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7345
7346def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7347def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7348def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7349def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7350def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7351def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
7352def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7353def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7354
7355def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7356def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7357def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7358def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
7359def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7360def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7361def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7362def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7363
7364def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7365def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7366def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7367def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7368def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
7369def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7370def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7371def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7372
7373let Predicates = [IsLE] in {
7374def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7375def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7376def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7377def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7378def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7379def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7380
7381def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7382          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7383def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7384          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7385def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7386          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7387def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7388          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7389def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7390          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7391def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7392          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7393def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7394          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7395}
7396let Predicates = [IsBE] in {
7397def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7398                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7399def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7400                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7401def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7402                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7403def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7404                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7405def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7406                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7407def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7408                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7409
7410def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7411          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7412def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7413          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7414def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7415          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7416def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7417          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7418def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7419          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7420def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7421          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7422}
7423def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7424def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7425def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7426          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7427def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7428          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7429def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7430          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7431def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7432
7433def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7434          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7435def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7436          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7437def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7438          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7439def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7440          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7441def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7442          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7443
7444let Predicates = [IsLE] in {
7445def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7446def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7447def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7448def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7449def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7450def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7451}
7452let Predicates = [IsBE] in {
7453def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7454                             (v1i64 (REV64v2i32 FPR64:$src))>;
7455def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7456                             (v1i64 (REV64v4i16 FPR64:$src))>;
7457def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7458                             (v1i64 (REV64v8i8 FPR64:$src))>;
7459def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7460                             (v1i64 (REV64v4i16 FPR64:$src))>;
7461def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7462                             (v1i64 (REV64v4i16 FPR64:$src))>;
7463def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7464                             (v1i64 (REV64v2i32 FPR64:$src))>;
7465}
7466def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7467def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7468
7469let Predicates = [IsLE] in {
7470def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7471def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7472def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7473def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7474def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7475def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7476def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7477}
7478let Predicates = [IsBE] in {
7479def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7480                             (v2i32 (REV64v2i32 FPR64:$src))>;
7481def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7482                             (v2i32 (REV32v4i16 FPR64:$src))>;
7483def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7484                             (v2i32 (REV32v8i8 FPR64:$src))>;
7485def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7486                             (v2i32 (REV64v2i32 FPR64:$src))>;
7487def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7488                             (v2i32 (REV64v2i32 FPR64:$src))>;
7489def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7490                             (v2i32 (REV32v4i16 FPR64:$src))>;
7491def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7492                             (v2i32 (REV32v4i16 FPR64:$src))>;
7493}
7494def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7495
7496let Predicates = [IsLE] in {
7497def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7498def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7499def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7500def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7501def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7502def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7503}
7504let Predicates = [IsBE] in {
7505def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7506                             (v4i16 (REV64v4i16 FPR64:$src))>;
7507def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7508                             (v4i16 (REV32v4i16 FPR64:$src))>;
7509def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7510                             (v4i16 (REV16v8i8 FPR64:$src))>;
7511def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7512                             (v4i16 (REV64v4i16 FPR64:$src))>;
7513def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7514                             (v4i16 (REV32v4i16 FPR64:$src))>;
7515def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7516                             (v4i16 (REV64v4i16 FPR64:$src))>;
7517}
7518def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7519def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7520
7521let Predicates = [IsLE] in {
7522def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7523def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7524def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7525def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7526def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7527def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7528
7529def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7530def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7531def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7532def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7533def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7534def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7535}
7536let Predicates = [IsBE] in {
7537def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7538                             (v4f16 (REV64v4i16 FPR64:$src))>;
7539def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7540                             (v4f16 (REV32v4i16 FPR64:$src))>;
7541def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7542                             (v4f16 (REV16v8i8 FPR64:$src))>;
7543def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7544                             (v4f16 (REV64v4i16 FPR64:$src))>;
7545def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7546                             (v4f16 (REV32v4i16 FPR64:$src))>;
7547def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7548                             (v4f16 (REV64v4i16 FPR64:$src))>;
7549
7550def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7551                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7552def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7553                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7554def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7555                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7556def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7557                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7558def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7559                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7560def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7561                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7562}
7563def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7564def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7565
7566let Predicates = [IsLE] in {
7567def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7568def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7569def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7570def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7571def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7572def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7573def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7574def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7575}
7576let Predicates = [IsBE] in {
7577def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7578                             (v8i8 (REV64v8i8 FPR64:$src))>;
7579def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7580                             (v8i8 (REV32v8i8 FPR64:$src))>;
7581def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7582                             (v8i8 (REV16v8i8 FPR64:$src))>;
7583def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7584                             (v8i8 (REV64v8i8 FPR64:$src))>;
7585def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7586                             (v8i8 (REV32v8i8 FPR64:$src))>;
7587def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7588                             (v8i8 (REV64v8i8 FPR64:$src))>;
7589def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7590                             (v8i8 (REV16v8i8 FPR64:$src))>;
7591def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7592                             (v8i8 (REV16v8i8 FPR64:$src))>;
7593}
7594
7595let Predicates = [IsLE] in {
7596def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7597def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7598def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7599def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7600def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7601def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7602}
7603let Predicates = [IsBE] in {
7604def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7605                             (f64 (REV64v2i32 FPR64:$src))>;
7606def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7607                             (f64 (REV64v4i16 FPR64:$src))>;
7608def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7609                             (f64 (REV64v2i32 FPR64:$src))>;
7610def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7611                             (f64 (REV64v8i8 FPR64:$src))>;
7612def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7613                             (f64 (REV64v4i16 FPR64:$src))>;
7614def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7615                             (f64 (REV64v4i16 FPR64:$src))>;
7616}
7617def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7618def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7619
7620let Predicates = [IsLE] in {
7621def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7622def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7623def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7624def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7625def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7626def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7627}
7628let Predicates = [IsBE] in {
7629def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7630                             (v1f64 (REV64v2i32 FPR64:$src))>;
7631def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7632                             (v1f64 (REV64v4i16 FPR64:$src))>;
7633def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7634                             (v1f64 (REV64v8i8 FPR64:$src))>;
7635def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7636                             (v1f64 (REV64v2i32 FPR64:$src))>;
7637def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7638                             (v1f64 (REV64v4i16 FPR64:$src))>;
7639def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7640                             (v1f64 (REV64v4i16 FPR64:$src))>;
7641}
7642def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7643def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7644
7645let Predicates = [IsLE] in {
7646def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7647def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7648def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7649def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7650def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7651def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7652def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7653}
7654let Predicates = [IsBE] in {
7655def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7656                             (v2f32 (REV64v2i32 FPR64:$src))>;
7657def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7658                             (v2f32 (REV32v4i16 FPR64:$src))>;
7659def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7660                             (v2f32 (REV32v8i8 FPR64:$src))>;
7661def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7662                             (v2f32 (REV64v2i32 FPR64:$src))>;
7663def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7664                             (v2f32 (REV64v2i32 FPR64:$src))>;
7665def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7666                             (v2f32 (REV32v4i16 FPR64:$src))>;
7667def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7668                             (v2f32 (REV32v4i16 FPR64:$src))>;
7669}
7670def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7671
7672let Predicates = [IsLE] in {
7673def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7674def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7675def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7676def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7677def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7678def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7679def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7680def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7681}
7682let Predicates = [IsBE] in {
7683def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7684                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7685def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7686                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7687                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7688def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7689                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7690                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7691def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7692                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7693                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7694def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7695                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7696                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7697def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7698                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7699def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7700                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7701                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7702def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7703                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7704                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7705}
7706
7707let Predicates = [IsLE] in {
7708def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7709def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7710def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7711def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7712def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7713def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7714def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7715}
7716let Predicates = [IsBE] in {
7717def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7718                             (v2f64 (EXTv16i8 FPR128:$src,
7719                                              FPR128:$src, (i32 8)))>;
7720def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7721                             (v2f64 (REV64v4i32 FPR128:$src))>;
7722def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7723                             (v2f64 (REV64v8i16 FPR128:$src))>;
7724def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7725                             (v2f64 (REV64v8i16 FPR128:$src))>;
7726def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7727                             (v2f64 (REV64v8i16 FPR128:$src))>;
7728def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7729                             (v2f64 (REV64v16i8 FPR128:$src))>;
7730def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7731                             (v2f64 (REV64v4i32 FPR128:$src))>;
7732}
7733def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7734
7735let Predicates = [IsLE] in {
7736def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7737def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7738def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7739def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7740def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7741def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7742def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7743}
7744let Predicates = [IsBE] in {
7745def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7746                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7747                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7748def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7749                             (v4f32 (REV32v8i16 FPR128:$src))>;
7750def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7751                             (v4f32 (REV32v8i16 FPR128:$src))>;
7752def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7753                             (v4f32 (REV32v8i16 FPR128:$src))>;
7754def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7755                             (v4f32 (REV32v16i8 FPR128:$src))>;
7756def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7757                             (v4f32 (REV64v4i32 FPR128:$src))>;
7758def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7759                             (v4f32 (REV64v4i32 FPR128:$src))>;
7760}
7761def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7762
7763let Predicates = [IsLE] in {
7764def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7765def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7766def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7767def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7768def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7769def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7770def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7771}
7772let Predicates = [IsBE] in {
7773def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7774                             (v2i64 (EXTv16i8 FPR128:$src,
7775                                              FPR128:$src, (i32 8)))>;
7776def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7777                             (v2i64 (REV64v4i32 FPR128:$src))>;
7778def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7779                             (v2i64 (REV64v8i16 FPR128:$src))>;
7780def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7781                             (v2i64 (REV64v16i8 FPR128:$src))>;
7782def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7783                             (v2i64 (REV64v4i32 FPR128:$src))>;
7784def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7785                             (v2i64 (REV64v8i16 FPR128:$src))>;
7786def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7787                             (v2i64 (REV64v8i16 FPR128:$src))>;
7788}
7789def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7790
7791let Predicates = [IsLE] in {
7792def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7793def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7794def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7795def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7796def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7797def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7798def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7799}
7800let Predicates = [IsBE] in {
7801def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7802                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7803                                              (REV64v4i32 FPR128:$src),
7804                                              (i32 8)))>;
7805def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7806                             (v4i32 (REV64v4i32 FPR128:$src))>;
7807def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7808                             (v4i32 (REV32v8i16 FPR128:$src))>;
7809def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7810                             (v4i32 (REV32v16i8 FPR128:$src))>;
7811def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7812                             (v4i32 (REV64v4i32 FPR128:$src))>;
7813def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7814                             (v4i32 (REV32v8i16 FPR128:$src))>;
7815def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7816                             (v4i32 (REV32v8i16 FPR128:$src))>;
7817}
7818def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7819
7820let Predicates = [IsLE] in {
7821def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7822def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7823def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7824def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7825def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7826def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7827}
7828let Predicates = [IsBE] in {
7829def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7830                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7831                                              (REV64v8i16 FPR128:$src),
7832                                              (i32 8)))>;
7833def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7834                             (v8i16 (REV64v8i16 FPR128:$src))>;
7835def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7836                             (v8i16 (REV32v8i16 FPR128:$src))>;
7837def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7838                             (v8i16 (REV16v16i8 FPR128:$src))>;
7839def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7840                             (v8i16 (REV64v8i16 FPR128:$src))>;
7841def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7842                             (v8i16 (REV32v8i16 FPR128:$src))>;
7843}
7844def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7845def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7846
7847let Predicates = [IsLE] in {
7848def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7849def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7850def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7851def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7852def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7853def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7854
7855def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7856def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7857def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7858def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7859def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7860def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7861}
7862let Predicates = [IsBE] in {
7863def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7864                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7865                                              (REV64v8i16 FPR128:$src),
7866                                              (i32 8)))>;
7867def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7868                             (v8f16 (REV64v8i16 FPR128:$src))>;
7869def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7870                             (v8f16 (REV32v8i16 FPR128:$src))>;
7871def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7872                             (v8f16 (REV16v16i8 FPR128:$src))>;
7873def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7874                             (v8f16 (REV64v8i16 FPR128:$src))>;
7875def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7876                             (v8f16 (REV32v8i16 FPR128:$src))>;
7877
7878def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7879                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7880                                              (REV64v8i16 FPR128:$src),
7881                                              (i32 8)))>;
7882def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7883                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7884def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7885                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7886def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7887                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7888def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7889                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7890def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7891                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7892}
7893def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7894def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7895
7896let Predicates = [IsLE] in {
7897def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7898def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7899def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7900def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7901def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7902def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7903def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7904def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
7905}
7906let Predicates = [IsBE] in {
7907def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7908                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7909                                              (REV64v16i8 FPR128:$src),
7910                                              (i32 8)))>;
7911def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7912                             (v16i8 (REV64v16i8 FPR128:$src))>;
7913def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7914                             (v16i8 (REV32v16i8 FPR128:$src))>;
7915def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7916                             (v16i8 (REV16v16i8 FPR128:$src))>;
7917def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7918                             (v16i8 (REV64v16i8 FPR128:$src))>;
7919def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7920                             (v16i8 (REV32v16i8 FPR128:$src))>;
7921def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7922                             (v16i8 (REV16v16i8 FPR128:$src))>;
7923def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
7924                             (v16i8 (REV16v16i8 FPR128:$src))>;
7925}
7926
7927def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7928           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7929def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7930           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7931def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7932           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7933def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7934           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7935def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
7936           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7937def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7938           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7939def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7940           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7941def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7942           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7943
7944def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7945          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7946def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7947          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7948def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7949          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7950def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7951          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7952
7953// A 64-bit subvector insert to the first 128-bit vector position
7954// is a subregister copy that needs no instruction.
7955multiclass InsertSubvectorUndef<ValueType Ty> {
7956  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7957            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7958  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7959            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7960  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7961            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7962  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7963            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7964  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7965            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7966  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7967            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7968  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
7969            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7970  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7971            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7972}
7973
7974defm : InsertSubvectorUndef<i32>;
7975defm : InsertSubvectorUndef<i64>;
7976
7977// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7978// or v2f32.
7979def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7980                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7981           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7982def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7983                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7984           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7985    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7986    // so we match on v4f32 here, not v2f32. This will also catch adding
7987    // the low two lanes of a true v4f32 vector.
7988def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7989                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7990          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7991def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
7992                (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
7993          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7994
7995// Scalar 64-bit shifts in FPR64 registers.
7996def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7997          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7998def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7999          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8000def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8001          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8002def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8003          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8004
8005// Patterns for nontemporal/no-allocate stores.
8006// We have to resort to tricks to turn a single-input store into a store pair,
8007// because there is no single-input nontemporal store, only STNP.
8008let Predicates = [IsLE] in {
8009let AddedComplexity = 15 in {
8010class NTStore128Pat<ValueType VT> :
8011  Pat<(nontemporalstore (VT FPR128:$Rt),
8012        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
8013      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
8014              (CPYi64 FPR128:$Rt, (i64 1)),
8015              GPR64sp:$Rn, simm7s8:$offset)>;
8016
8017def : NTStore128Pat<v2i64>;
8018def : NTStore128Pat<v4i32>;
8019def : NTStore128Pat<v8i16>;
8020def : NTStore128Pat<v16i8>;
8021
8022class NTStore64Pat<ValueType VT> :
8023  Pat<(nontemporalstore (VT FPR64:$Rt),
8024        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8025      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8026              (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8027              GPR64sp:$Rn, simm7s4:$offset)>;
8028
8029// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8030def : NTStore64Pat<v1f64>;
8031def : NTStore64Pat<v1i64>;
8032def : NTStore64Pat<v2i32>;
8033def : NTStore64Pat<v4i16>;
8034def : NTStore64Pat<v8i8>;
8035
8036def : Pat<(nontemporalstore GPR64:$Rt,
8037            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8038          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8039                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8040                  GPR64sp:$Rn, simm7s4:$offset)>;
8041} // AddedComplexity=10
8042} // Predicates = [IsLE]
8043
8044// Tail call return handling. These are all compiler pseudo-instructions,
8045// so no encoding information or anything like that.
8046let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8047  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8048                   Sched<[WriteBrReg]>;
8049  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8050                   Sched<[WriteBrReg]>;
8051  // Indirect tail-call with any register allowed, used by MachineOutliner when
8052  // this is proven safe.
8053  // FIXME: If we have to add any more hacks like this, we should instead relax
8054  // some verifier checks for outlined functions.
8055  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8056                      Sched<[WriteBrReg]>;
8057  // Indirect tail-call limited to only use registers (x16 and x17) which are
8058  // allowed to tail-call a "BTI c" instruction.
8059  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8060                      Sched<[WriteBrReg]>;
8061}
8062
8063def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8064          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8065      Requires<[NotUseBTI]>;
8066def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8067          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8068      Requires<[UseBTI]>;
8069def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8070          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8071def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8072          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8073
8074def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8075def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8076
8077// Extracting lane zero is a special case where we can just use a plain
8078// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8079// rest of the compiler, especially the register allocator and copy propagation,
8080// to reason about, so is preferred when it's possible to use it.
8081let AddedComplexity = 10 in {
8082  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8083  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8084  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8085}
8086
8087// dot_v4i8
8088class mul_v4i8<SDPatternOperator ldop> :
8089  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8090          (mul (ldop (add node:$Rn, node:$offset)),
8091               (ldop (add node:$Rm, node:$offset)))>;
8092class mulz_v4i8<SDPatternOperator ldop> :
8093  PatFrag<(ops node:$Rn, node:$Rm),
8094          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8095
8096def load_v4i8 :
8097  OutPatFrag<(ops node:$R),
8098             (INSERT_SUBREG
8099              (v2i32 (IMPLICIT_DEF)),
8100               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8101              ssub)>;
8102
8103class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8104  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8105           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8106           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8107                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8108      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8109                                (load_v4i8 GPR64sp:$Rn),
8110                                (load_v4i8 GPR64sp:$Rm))),
8111                      sub_32)>, Requires<[HasDotProd]>;
8112
8113// dot_v8i8
8114class ee_v8i8<SDPatternOperator extend> :
8115  PatFrag<(ops node:$V, node:$K),
8116          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8117
8118class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8119  PatFrag<(ops node:$M, node:$N, node:$K),
8120          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8121                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8122
8123class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8124  PatFrag<(ops node:$M, node:$N),
8125          (i32 (extractelt
8126           (v4i32 (AArch64uaddv
8127            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8128                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8129           (i64 0)))>;
8130
8131// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8132def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8133
8134class odot_v8i8<Instruction DOT> :
8135  OutPatFrag<(ops node:$Vm, node:$Vn),
8136             (EXTRACT_SUBREG
8137              (VADDV_32
8138               (i64 (DOT (DUPv2i32gpr WZR),
8139                         (v8i8 node:$Vm),
8140                         (v8i8 node:$Vn)))),
8141              sub_32)>;
8142
8143class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8144                    SDPatternOperator extend> :
8145  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8146      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8147  Requires<[HasDotProd]>;
8148
8149// dot_v16i8
8150class ee_v16i8<SDPatternOperator extend> :
8151  PatFrag<(ops node:$V, node:$K1, node:$K2),
8152          (v4i16 (extract_subvector
8153           (v8i16 (extend
8154            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8155
8156class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8157  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8158          (v4i32
8159           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8160                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8161
8162class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8163  PatFrag<(ops node:$M, node:$N),
8164          (i32 (extractelt
8165           (v4i32 (AArch64uaddv
8166            (add
8167             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8168                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8169             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8170                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8171           (i64 0)))>;
8172
8173class odot_v16i8<Instruction DOT> :
8174  OutPatFrag<(ops node:$Vm, node:$Vn),
8175             (i32 (ADDVv4i32v
8176              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8177
8178class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8179                SDPatternOperator extend> :
8180  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8181      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8182  Requires<[HasDotProd]>;
8183
8184let AddedComplexity = 10 in {
8185  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8186  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8187  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8188  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8189  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8190  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8191
8192  // FIXME: add patterns to generate vector by element dot product.
8193  // FIXME: add SVE dot-product patterns.
8194}
8195
8196// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8197// so that it can be used as input to inline asm, and vice versa.
8198def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8199def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8200def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8201                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8202          (REG_SEQUENCE GPR64x8Class,
8203              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8204              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8205foreach i = 0-7 in {
8206  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8207            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8208}
8209
8210let Predicates = [HasLS64] in {
8211  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8212                                          (outs GPR64x8:$Rt)>;
8213  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8214                                          (outs)>;
8215  def ST64BV:   Store64BV<0b011, "st64bv">;
8216  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8217
8218  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8219    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8220          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8221
8222  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8223  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8224  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8225}
8226
8227let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1 in
8228def StoreSwiftAsyncContext
8229      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8230               []>, Sched<[]>;
8231
8232def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8233def : Pat<(AArch64AssertZExtBool GPR32:$op),
8234          (i32 GPR32:$op)>;
8235
8236include "AArch64InstrAtomics.td"
8237include "AArch64SVEInstrInfo.td"
8238include "AArch64SMEInstrInfo.td"
8239include "AArch64InstrGISel.td"
8240