1 //=== AArch64PostLegalizerLowering.cpp --------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// Post-legalization lowering for instructions.
11 ///
12 /// This is used to offload pattern matching from the selector.
13 ///
14 /// For example, this combiner will notice that a G_SHUFFLE_VECTOR is actually
15 /// a G_ZIP, G_UZP, etc.
16 ///
17 /// General optimization combines should be handled by either the
18 /// AArch64PostLegalizerCombiner or the AArch64PreLegalizerCombiner.
19 ///
20 //===----------------------------------------------------------------------===//
21
22 #include "AArch64GlobalISelUtils.h"
23 #include "AArch64Subtarget.h"
24 #include "AArch64TargetMachine.h"
25 #include "GISel/AArch64LegalizerInfo.h"
26 #include "MCTargetDesc/AArch64MCTargetDesc.h"
27 #include "TargetInfo/AArch64TargetInfo.h"
28 #include "Utils/AArch64BaseInfo.h"
29 #include "llvm/CodeGen/GlobalISel/Combiner.h"
30 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
31 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
32 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
33 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
34 #include "llvm/CodeGen/GlobalISel/Utils.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/TargetOpcodes.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44
45 #define DEBUG_TYPE "aarch64-postlegalizer-lowering"
46
47 using namespace llvm;
48 using namespace MIPatternMatch;
49 using namespace AArch64GISelUtils;
50
51 /// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
52 ///
53 /// Used for matching target-supported shuffles before codegen.
54 struct ShuffleVectorPseudo {
55 unsigned Opc; ///< Opcode for the instruction. (E.g. G_ZIP1)
56 Register Dst; ///< Destination register.
57 SmallVector<SrcOp, 2> SrcOps; ///< Source registers.
ShuffleVectorPseudoShuffleVectorPseudo58 ShuffleVectorPseudo(unsigned Opc, Register Dst,
59 std::initializer_list<SrcOp> SrcOps)
60 : Opc(Opc), Dst(Dst), SrcOps(SrcOps){};
ShuffleVectorPseudoShuffleVectorPseudo61 ShuffleVectorPseudo() {}
62 };
63
64 /// Check if a vector shuffle corresponds to a REV instruction with the
65 /// specified blocksize.
isREVMask(ArrayRef<int> M,unsigned EltSize,unsigned NumElts,unsigned BlockSize)66 static bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
67 unsigned BlockSize) {
68 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
69 "Only possible block sizes for REV are: 16, 32, 64");
70 assert(EltSize != 64 && "EltSize cannot be 64 for REV mask.");
71
72 unsigned BlockElts = M[0] + 1;
73
74 // If the first shuffle index is UNDEF, be optimistic.
75 if (M[0] < 0)
76 BlockElts = BlockSize / EltSize;
77
78 if (BlockSize <= EltSize || BlockSize != BlockElts * EltSize)
79 return false;
80
81 for (unsigned i = 0; i < NumElts; ++i) {
82 // Ignore undef indices.
83 if (M[i] < 0)
84 continue;
85 if (static_cast<unsigned>(M[i]) !=
86 (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
87 return false;
88 }
89
90 return true;
91 }
92
93 /// Determines if \p M is a shuffle vector mask for a TRN of \p NumElts.
94 /// Whether or not G_TRN1 or G_TRN2 should be used is stored in \p WhichResult.
isTRNMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)95 static bool isTRNMask(ArrayRef<int> M, unsigned NumElts,
96 unsigned &WhichResult) {
97 if (NumElts % 2 != 0)
98 return false;
99 WhichResult = (M[0] == 0 ? 0 : 1);
100 for (unsigned i = 0; i < NumElts; i += 2) {
101 if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != i + WhichResult) ||
102 (M[i + 1] >= 0 &&
103 static_cast<unsigned>(M[i + 1]) != i + NumElts + WhichResult))
104 return false;
105 }
106 return true;
107 }
108
109 /// Check if a G_EXT instruction can handle a shuffle mask \p M when the vector
110 /// sources of the shuffle are different.
getExtMask(ArrayRef<int> M,unsigned NumElts)111 static Optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
112 unsigned NumElts) {
113 // Look for the first non-undef element.
114 auto FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
115 if (FirstRealElt == M.end())
116 return None;
117
118 // Use APInt to handle overflow when calculating expected element.
119 unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
120 APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
121
122 // The following shuffle indices must be the successive elements after the
123 // first real element.
124 if (any_of(
125 make_range(std::next(FirstRealElt), M.end()),
126 [&ExpectedElt](int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
127 return None;
128
129 // The index of an EXT is the first element if it is not UNDEF.
130 // Watch out for the beginning UNDEFs. The EXT index should be the expected
131 // value of the first element. E.g.
132 // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
133 // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
134 // ExpectedElt is the last mask index plus 1.
135 uint64_t Imm = ExpectedElt.getZExtValue();
136 bool ReverseExt = false;
137
138 // There are two difference cases requiring to reverse input vectors.
139 // For example, for vector <4 x i32> we have the following cases,
140 // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
141 // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
142 // For both cases, we finally use mask <5, 6, 7, 0>, which requires
143 // to reverse two input vectors.
144 if (Imm < NumElts)
145 ReverseExt = true;
146 else
147 Imm -= NumElts;
148 return std::make_pair(ReverseExt, Imm);
149 }
150
151 /// Determines if \p M is a shuffle vector mask for a UZP of \p NumElts.
152 /// Whether or not G_UZP1 or G_UZP2 should be used is stored in \p WhichResult.
isUZPMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)153 static bool isUZPMask(ArrayRef<int> M, unsigned NumElts,
154 unsigned &WhichResult) {
155 WhichResult = (M[0] == 0 ? 0 : 1);
156 for (unsigned i = 0; i != NumElts; ++i) {
157 // Skip undef indices.
158 if (M[i] < 0)
159 continue;
160 if (static_cast<unsigned>(M[i]) != 2 * i + WhichResult)
161 return false;
162 }
163 return true;
164 }
165
166 /// \return true if \p M is a zip mask for a shuffle vector of \p NumElts.
167 /// Whether or not G_ZIP1 or G_ZIP2 should be used is stored in \p WhichResult.
isZipMask(ArrayRef<int> M,unsigned NumElts,unsigned & WhichResult)168 static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
169 unsigned &WhichResult) {
170 if (NumElts % 2 != 0)
171 return false;
172
173 // 0 means use ZIP1, 1 means use ZIP2.
174 WhichResult = (M[0] == 0 ? 0 : 1);
175 unsigned Idx = WhichResult * NumElts / 2;
176 for (unsigned i = 0; i != NumElts; i += 2) {
177 if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != Idx) ||
178 (M[i + 1] >= 0 && static_cast<unsigned>(M[i + 1]) != Idx + NumElts))
179 return false;
180 Idx += 1;
181 }
182 return true;
183 }
184
185 /// Helper function for matchINS.
186 ///
187 /// \returns a value when \p M is an ins mask for \p NumInputElements.
188 ///
189 /// First element of the returned pair is true when the produced
190 /// G_INSERT_VECTOR_ELT destination should be the LHS of the G_SHUFFLE_VECTOR.
191 ///
192 /// Second element is the destination lane for the G_INSERT_VECTOR_ELT.
isINSMask(ArrayRef<int> M,int NumInputElements)193 static Optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
194 int NumInputElements) {
195 if (M.size() != static_cast<size_t>(NumInputElements))
196 return None;
197 int NumLHSMatch = 0, NumRHSMatch = 0;
198 int LastLHSMismatch = -1, LastRHSMismatch = -1;
199 for (int Idx = 0; Idx < NumInputElements; ++Idx) {
200 if (M[Idx] == -1) {
201 ++NumLHSMatch;
202 ++NumRHSMatch;
203 continue;
204 }
205 M[Idx] == Idx ? ++NumLHSMatch : LastLHSMismatch = Idx;
206 M[Idx] == Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch = Idx;
207 }
208 const int NumNeededToMatch = NumInputElements - 1;
209 if (NumLHSMatch == NumNeededToMatch)
210 return std::make_pair(true, LastLHSMismatch);
211 if (NumRHSMatch == NumNeededToMatch)
212 return std::make_pair(false, LastRHSMismatch);
213 return None;
214 }
215
216 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with a
217 /// G_REV instruction. Returns the appropriate G_REV opcode in \p Opc.
matchREV(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)218 static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
219 ShuffleVectorPseudo &MatchInfo) {
220 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
221 ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
222 Register Dst = MI.getOperand(0).getReg();
223 Register Src = MI.getOperand(1).getReg();
224 LLT Ty = MRI.getType(Dst);
225 unsigned EltSize = Ty.getScalarSizeInBits();
226
227 // Element size for a rev cannot be 64.
228 if (EltSize == 64)
229 return false;
230
231 unsigned NumElts = Ty.getNumElements();
232
233 // Try to produce G_REV64
234 if (isREVMask(ShuffleMask, EltSize, NumElts, 64)) {
235 MatchInfo = ShuffleVectorPseudo(AArch64::G_REV64, Dst, {Src});
236 return true;
237 }
238
239 // TODO: Produce G_REV32 and G_REV16 once we have proper legalization support.
240 // This should be identical to above, but with a constant 32 and constant
241 // 16.
242 return false;
243 }
244
245 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
246 /// a G_TRN1 or G_TRN2 instruction.
matchTRN(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)247 static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
248 ShuffleVectorPseudo &MatchInfo) {
249 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
250 unsigned WhichResult;
251 ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
252 Register Dst = MI.getOperand(0).getReg();
253 unsigned NumElts = MRI.getType(Dst).getNumElements();
254 if (!isTRNMask(ShuffleMask, NumElts, WhichResult))
255 return false;
256 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
257 Register V1 = MI.getOperand(1).getReg();
258 Register V2 = MI.getOperand(2).getReg();
259 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
260 return true;
261 }
262
263 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
264 /// a G_UZP1 or G_UZP2 instruction.
265 ///
266 /// \param [in] MI - The shuffle vector instruction.
267 /// \param [out] MatchInfo - Either G_UZP1 or G_UZP2 on success.
matchUZP(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)268 static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
269 ShuffleVectorPseudo &MatchInfo) {
270 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
271 unsigned WhichResult;
272 ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
273 Register Dst = MI.getOperand(0).getReg();
274 unsigned NumElts = MRI.getType(Dst).getNumElements();
275 if (!isUZPMask(ShuffleMask, NumElts, WhichResult))
276 return false;
277 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
278 Register V1 = MI.getOperand(1).getReg();
279 Register V2 = MI.getOperand(2).getReg();
280 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
281 return true;
282 }
283
matchZip(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)284 static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
285 ShuffleVectorPseudo &MatchInfo) {
286 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
287 unsigned WhichResult;
288 ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
289 Register Dst = MI.getOperand(0).getReg();
290 unsigned NumElts = MRI.getType(Dst).getNumElements();
291 if (!isZipMask(ShuffleMask, NumElts, WhichResult))
292 return false;
293 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
294 Register V1 = MI.getOperand(1).getReg();
295 Register V2 = MI.getOperand(2).getReg();
296 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1, V2});
297 return true;
298 }
299
300 /// Helper function for matchDup.
matchDupFromInsertVectorElt(int Lane,MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)301 static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
302 MachineRegisterInfo &MRI,
303 ShuffleVectorPseudo &MatchInfo) {
304 if (Lane != 0)
305 return false;
306
307 // Try to match a vector splat operation into a dup instruction.
308 // We're looking for this pattern:
309 //
310 // %scalar:gpr(s64) = COPY $x0
311 // %undef:fpr(<2 x s64>) = G_IMPLICIT_DEF
312 // %cst0:gpr(s32) = G_CONSTANT i32 0
313 // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
314 // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
315 // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef, %zerovec(<2 x s32>)
316 //
317 // ...into:
318 // %splat = G_DUP %scalar
319
320 // Begin matching the insert.
321 auto *InsMI = getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
322 MI.getOperand(1).getReg(), MRI);
323 if (!InsMI)
324 return false;
325 // Match the undef vector operand.
326 if (!getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
327 MRI))
328 return false;
329
330 // Match the index constant 0.
331 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ZeroInt()))
332 return false;
333
334 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(),
335 {InsMI->getOperand(2).getReg()});
336 return true;
337 }
338
339 /// Helper function for matchDup.
matchDupFromBuildVector(int Lane,MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)340 static bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
341 MachineRegisterInfo &MRI,
342 ShuffleVectorPseudo &MatchInfo) {
343 assert(Lane >= 0 && "Expected positive lane?");
344 // Test if the LHS is a BUILD_VECTOR. If it is, then we can just reference the
345 // lane's definition directly.
346 auto *BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
347 MI.getOperand(1).getReg(), MRI);
348 if (!BuildVecMI)
349 return false;
350 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
351 MatchInfo =
352 ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(), {Reg});
353 return true;
354 }
355
matchDup(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)356 static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
357 ShuffleVectorPseudo &MatchInfo) {
358 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
359 auto MaybeLane = getSplatIndex(MI);
360 if (!MaybeLane)
361 return false;
362 int Lane = *MaybeLane;
363 // If this is undef splat, generate it via "just" vdup, if possible.
364 if (Lane < 0)
365 Lane = 0;
366 if (matchDupFromInsertVectorElt(Lane, MI, MRI, MatchInfo))
367 return true;
368 if (matchDupFromBuildVector(Lane, MI, MRI, MatchInfo))
369 return true;
370 return false;
371 }
372
matchEXT(MachineInstr & MI,MachineRegisterInfo & MRI,ShuffleVectorPseudo & MatchInfo)373 static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
374 ShuffleVectorPseudo &MatchInfo) {
375 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
376 Register Dst = MI.getOperand(0).getReg();
377 auto ExtInfo = getExtMask(MI.getOperand(3).getShuffleMask(),
378 MRI.getType(Dst).getNumElements());
379 if (!ExtInfo)
380 return false;
381 bool ReverseExt;
382 uint64_t Imm;
383 std::tie(ReverseExt, Imm) = *ExtInfo;
384 Register V1 = MI.getOperand(1).getReg();
385 Register V2 = MI.getOperand(2).getReg();
386 if (ReverseExt)
387 std::swap(V1, V2);
388 uint64_t ExtFactor = MRI.getType(V1).getScalarSizeInBits() / 8;
389 Imm *= ExtFactor;
390 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2, Imm});
391 return true;
392 }
393
394 /// Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
395 /// \p Opc is the opcode to use. \p MI is the G_SHUFFLE_VECTOR.
applyShuffleVectorPseudo(MachineInstr & MI,ShuffleVectorPseudo & MatchInfo)396 static bool applyShuffleVectorPseudo(MachineInstr &MI,
397 ShuffleVectorPseudo &MatchInfo) {
398 MachineIRBuilder MIRBuilder(MI);
399 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
400 MI.eraseFromParent();
401 return true;
402 }
403
404 /// Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
405 /// Special-cased because the constant operand must be emitted as a G_CONSTANT
406 /// for the imported tablegen patterns to work.
applyEXT(MachineInstr & MI,ShuffleVectorPseudo & MatchInfo)407 static bool applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
408 MachineIRBuilder MIRBuilder(MI);
409 // Tablegen patterns expect an i32 G_CONSTANT as the final op.
410 auto Cst =
411 MIRBuilder.buildConstant(LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
412 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
413 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
414 MI.eraseFromParent();
415 return true;
416 }
417
418 /// Match a G_SHUFFLE_VECTOR with a mask which corresponds to a
419 /// G_INSERT_VECTOR_ELT and G_EXTRACT_VECTOR_ELT pair.
420 ///
421 /// e.g.
422 /// %shuf = G_SHUFFLE_VECTOR %left, %right, shufflemask(0, 0)
423 ///
424 /// Can be represented as
425 ///
426 /// %extract = G_EXTRACT_VECTOR_ELT %left, 0
427 /// %ins = G_INSERT_VECTOR_ELT %left, %extract, 1
428 ///
matchINS(MachineInstr & MI,MachineRegisterInfo & MRI,std::tuple<Register,int,Register,int> & MatchInfo)429 static bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
430 std::tuple<Register, int, Register, int> &MatchInfo) {
431 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
432 ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
433 Register Dst = MI.getOperand(0).getReg();
434 int NumElts = MRI.getType(Dst).getNumElements();
435 auto DstIsLeftAndDstLane = isINSMask(ShuffleMask, NumElts);
436 if (!DstIsLeftAndDstLane)
437 return false;
438 bool DstIsLeft;
439 int DstLane;
440 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
441 Register Left = MI.getOperand(1).getReg();
442 Register Right = MI.getOperand(2).getReg();
443 Register DstVec = DstIsLeft ? Left : Right;
444 Register SrcVec = Left;
445
446 int SrcLane = ShuffleMask[DstLane];
447 if (SrcLane >= NumElts) {
448 SrcVec = Right;
449 SrcLane -= NumElts;
450 }
451
452 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
453 return true;
454 }
455
applyINS(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & Builder,std::tuple<Register,int,Register,int> & MatchInfo)456 static bool applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
457 MachineIRBuilder &Builder,
458 std::tuple<Register, int, Register, int> &MatchInfo) {
459 Builder.setInstrAndDebugLoc(MI);
460 Register Dst = MI.getOperand(0).getReg();
461 auto ScalarTy = MRI.getType(Dst).getElementType();
462 Register DstVec, SrcVec;
463 int DstLane, SrcLane;
464 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
465 auto SrcCst = Builder.buildConstant(LLT::scalar(64), SrcLane);
466 auto Extract = Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
467 auto DstCst = Builder.buildConstant(LLT::scalar(64), DstLane);
468 Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
469 MI.eraseFromParent();
470 return true;
471 }
472
473 /// isVShiftRImm - Check if this is a valid vector for the immediate
474 /// operand of a vector shift right operation. The value must be in the range:
475 /// 1 <= Value <= ElementBits for a right shift.
isVShiftRImm(Register Reg,MachineRegisterInfo & MRI,LLT Ty,int64_t & Cnt)476 static bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
477 int64_t &Cnt) {
478 assert(Ty.isVector() && "vector shift count is not a vector type");
479 MachineInstr *MI = MRI.getVRegDef(Reg);
480 auto Cst = getAArch64VectorSplatScalar(*MI, MRI);
481 if (!Cst)
482 return false;
483 Cnt = *Cst;
484 int64_t ElementBits = Ty.getScalarSizeInBits();
485 return Cnt >= 1 && Cnt <= ElementBits;
486 }
487
488 /// Match a vector G_ASHR or G_LSHR with a valid immediate shift.
matchVAshrLshrImm(MachineInstr & MI,MachineRegisterInfo & MRI,int64_t & Imm)489 static bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
490 int64_t &Imm) {
491 assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
492 MI.getOpcode() == TargetOpcode::G_LSHR);
493 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
494 if (!Ty.isVector())
495 return false;
496 return isVShiftRImm(MI.getOperand(2).getReg(), MRI, Ty, Imm);
497 }
498
applyVAshrLshrImm(MachineInstr & MI,MachineRegisterInfo & MRI,int64_t & Imm)499 static bool applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
500 int64_t &Imm) {
501 unsigned Opc = MI.getOpcode();
502 assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
503 unsigned NewOpc =
504 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
505 MachineIRBuilder MIB(MI);
506 auto ImmDef = MIB.buildConstant(LLT::scalar(32), Imm);
507 MIB.buildInstr(NewOpc, {MI.getOperand(0)}, {MI.getOperand(1), ImmDef});
508 MI.eraseFromParent();
509 return true;
510 }
511
512 /// Determine if it is possible to modify the \p RHS and predicate \p P of a
513 /// G_ICMP instruction such that the right-hand side is an arithmetic immediate.
514 ///
515 /// \returns A pair containing the updated immediate and predicate which may
516 /// be used to optimize the instruction.
517 ///
518 /// \note This assumes that the comparison has been legalized.
519 Optional<std::pair<uint64_t, CmpInst::Predicate>>
tryAdjustICmpImmAndPred(Register RHS,CmpInst::Predicate P,const MachineRegisterInfo & MRI)520 tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
521 const MachineRegisterInfo &MRI) {
522 const auto &Ty = MRI.getType(RHS);
523 if (Ty.isVector())
524 return None;
525 unsigned Size = Ty.getSizeInBits();
526 assert((Size == 32 || Size == 64) && "Expected 32 or 64 bit compare only?");
527
528 // If the RHS is not a constant, or the RHS is already a valid arithmetic
529 // immediate, then there is nothing to change.
530 auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI);
531 if (!ValAndVReg)
532 return None;
533 uint64_t C = ValAndVReg->Value.getZExtValue();
534 if (isLegalArithImmed(C))
535 return None;
536
537 // We have a non-arithmetic immediate. Check if adjusting the immediate and
538 // adjusting the predicate will result in a legal arithmetic immediate.
539 switch (P) {
540 default:
541 return None;
542 case CmpInst::ICMP_SLT:
543 case CmpInst::ICMP_SGE:
544 // Check for
545 //
546 // x slt c => x sle c - 1
547 // x sge c => x sgt c - 1
548 //
549 // When c is not the smallest possible negative number.
550 if ((Size == 64 && static_cast<int64_t>(C) == INT64_MIN) ||
551 (Size == 32 && static_cast<int32_t>(C) == INT32_MIN))
552 return None;
553 P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
554 C -= 1;
555 break;
556 case CmpInst::ICMP_ULT:
557 case CmpInst::ICMP_UGE:
558 // Check for
559 //
560 // x ult c => x ule c - 1
561 // x uge c => x ugt c - 1
562 //
563 // When c is not zero.
564 if (C == 0)
565 return None;
566 P = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
567 C -= 1;
568 break;
569 case CmpInst::ICMP_SLE:
570 case CmpInst::ICMP_SGT:
571 // Check for
572 //
573 // x sle c => x slt c + 1
574 // x sgt c => s sge c + 1
575 //
576 // When c is not the largest possible signed integer.
577 if ((Size == 32 && static_cast<int32_t>(C) == INT32_MAX) ||
578 (Size == 64 && static_cast<int64_t>(C) == INT64_MAX))
579 return None;
580 P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
581 C += 1;
582 break;
583 case CmpInst::ICMP_ULE:
584 case CmpInst::ICMP_UGT:
585 // Check for
586 //
587 // x ule c => x ult c + 1
588 // x ugt c => s uge c + 1
589 //
590 // When c is not the largest possible unsigned integer.
591 if ((Size == 32 && static_cast<uint32_t>(C) == UINT32_MAX) ||
592 (Size == 64 && C == UINT64_MAX))
593 return None;
594 P = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
595 C += 1;
596 break;
597 }
598
599 // Check if the new constant is valid, and return the updated constant and
600 // predicate if it is.
601 if (Size == 32)
602 C = static_cast<uint32_t>(C);
603 if (!isLegalArithImmed(C))
604 return None;
605 return {{C, P}};
606 }
607
608 /// Determine whether or not it is possible to update the RHS and predicate of
609 /// a G_ICMP instruction such that the RHS will be selected as an arithmetic
610 /// immediate.
611 ///
612 /// \p MI - The G_ICMP instruction
613 /// \p MatchInfo - The new RHS immediate and predicate on success
614 ///
615 /// See tryAdjustICmpImmAndPred for valid transformations.
matchAdjustICmpImmAndPred(MachineInstr & MI,const MachineRegisterInfo & MRI,std::pair<uint64_t,CmpInst::Predicate> & MatchInfo)616 bool matchAdjustICmpImmAndPred(
617 MachineInstr &MI, const MachineRegisterInfo &MRI,
618 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
619 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
620 Register RHS = MI.getOperand(3).getReg();
621 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
622 if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred, MRI)) {
623 MatchInfo = *MaybeNewImmAndPred;
624 return true;
625 }
626 return false;
627 }
628
applyAdjustICmpImmAndPred(MachineInstr & MI,std::pair<uint64_t,CmpInst::Predicate> & MatchInfo,MachineIRBuilder & MIB,GISelChangeObserver & Observer)629 bool applyAdjustICmpImmAndPred(
630 MachineInstr &MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
631 MachineIRBuilder &MIB, GISelChangeObserver &Observer) {
632 MIB.setInstrAndDebugLoc(MI);
633 MachineOperand &RHS = MI.getOperand(3);
634 MachineRegisterInfo &MRI = *MIB.getMRI();
635 auto Cst = MIB.buildConstant(MRI.cloneVirtualRegister(RHS.getReg()),
636 MatchInfo.first);
637 Observer.changingInstr(MI);
638 RHS.setReg(Cst->getOperand(0).getReg());
639 MI.getOperand(1).setPredicate(MatchInfo.second);
640 Observer.changedInstr(MI);
641 return true;
642 }
643
matchDupLane(MachineInstr & MI,MachineRegisterInfo & MRI,std::pair<unsigned,int> & MatchInfo)644 bool matchDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
645 std::pair<unsigned, int> &MatchInfo) {
646 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
647 Register Src1Reg = MI.getOperand(1).getReg();
648 const LLT SrcTy = MRI.getType(Src1Reg);
649 const LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
650
651 auto LaneIdx = getSplatIndex(MI);
652 if (!LaneIdx)
653 return false;
654
655 // The lane idx should be within the first source vector.
656 if (*LaneIdx >= SrcTy.getNumElements())
657 return false;
658
659 if (DstTy != SrcTy)
660 return false;
661
662 LLT ScalarTy = SrcTy.getElementType();
663 unsigned ScalarSize = ScalarTy.getSizeInBits();
664
665 unsigned Opc = 0;
666 switch (SrcTy.getNumElements()) {
667 case 2:
668 if (ScalarSize == 64)
669 Opc = AArch64::G_DUPLANE64;
670 else if (ScalarSize == 32)
671 Opc = AArch64::G_DUPLANE32;
672 break;
673 case 4:
674 if (ScalarSize == 32)
675 Opc = AArch64::G_DUPLANE32;
676 break;
677 case 8:
678 if (ScalarSize == 16)
679 Opc = AArch64::G_DUPLANE16;
680 break;
681 case 16:
682 if (ScalarSize == 8)
683 Opc = AArch64::G_DUPLANE8;
684 break;
685 default:
686 break;
687 }
688 if (!Opc)
689 return false;
690
691 MatchInfo.first = Opc;
692 MatchInfo.second = *LaneIdx;
693 return true;
694 }
695
applyDupLane(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B,std::pair<unsigned,int> & MatchInfo)696 bool applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
697 MachineIRBuilder &B, std::pair<unsigned, int> &MatchInfo) {
698 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
699 Register Src1Reg = MI.getOperand(1).getReg();
700 const LLT SrcTy = MRI.getType(Src1Reg);
701
702 B.setInstrAndDebugLoc(MI);
703 auto Lane = B.buildConstant(LLT::scalar(64), MatchInfo.second);
704
705 Register DupSrc = MI.getOperand(1).getReg();
706 // For types like <2 x s32>, we can use G_DUPLANE32, with a <4 x s32> source.
707 // To do this, we can use a G_CONCAT_VECTORS to do the widening.
708 if (SrcTy == LLT::fixed_vector(2, LLT::scalar(32))) {
709 assert(MRI.getType(MI.getOperand(0).getReg()).getNumElements() == 2 &&
710 "Unexpected dest elements");
711 auto Undef = B.buildUndef(SrcTy);
712 DupSrc = B.buildConcatVectors(
713 SrcTy.changeElementCount(ElementCount::getFixed(4)),
714 {Src1Reg, Undef.getReg(0)})
715 .getReg(0);
716 }
717 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
718 MI.eraseFromParent();
719 return true;
720 }
721
matchBuildVectorToDup(MachineInstr & MI,MachineRegisterInfo & MRI)722 static bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {
723 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
724 auto Splat = getAArch64VectorSplat(MI, MRI);
725 if (!Splat)
726 return false;
727 if (Splat->isReg())
728 return true;
729 // Later, during selection, we'll try to match imported patterns using
730 // immAllOnesV and immAllZerosV. These require G_BUILD_VECTOR. Don't lower
731 // G_BUILD_VECTORs which could match those patterns.
732 int64_t Cst = Splat->getCst();
733 return (Cst != 0 && Cst != -1);
734 }
735
applyBuildVectorToDup(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B)736 static bool applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
737 MachineIRBuilder &B) {
738 B.setInstrAndDebugLoc(MI);
739 B.buildInstr(AArch64::G_DUP, {MI.getOperand(0).getReg()},
740 {MI.getOperand(1).getReg()});
741 MI.eraseFromParent();
742 return true;
743 }
744
745 /// \returns how many instructions would be saved by folding a G_ICMP's shift
746 /// and/or extension operations.
getCmpOperandFoldingProfit(Register CmpOp,const MachineRegisterInfo & MRI)747 static unsigned getCmpOperandFoldingProfit(Register CmpOp,
748 const MachineRegisterInfo &MRI) {
749 // No instructions to save if there's more than one use or no uses.
750 if (!MRI.hasOneNonDBGUse(CmpOp))
751 return 0;
752
753 // FIXME: This is duplicated with the selector. (See: selectShiftedRegister)
754 auto IsSupportedExtend = [&](const MachineInstr &MI) {
755 if (MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
756 return true;
757 if (MI.getOpcode() != TargetOpcode::G_AND)
758 return false;
759 auto ValAndVReg =
760 getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
761 if (!ValAndVReg)
762 return false;
763 uint64_t Mask = ValAndVReg->Value.getZExtValue();
764 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
765 };
766
767 MachineInstr *Def = getDefIgnoringCopies(CmpOp, MRI);
768 if (IsSupportedExtend(*Def))
769 return 1;
770
771 unsigned Opc = Def->getOpcode();
772 if (Opc != TargetOpcode::G_SHL && Opc != TargetOpcode::G_ASHR &&
773 Opc != TargetOpcode::G_LSHR)
774 return 0;
775
776 auto MaybeShiftAmt =
777 getConstantVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI);
778 if (!MaybeShiftAmt)
779 return 0;
780 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
781 MachineInstr *ShiftLHS =
782 getDefIgnoringCopies(Def->getOperand(1).getReg(), MRI);
783
784 // Check if we can fold an extend and a shift.
785 // FIXME: This is duplicated with the selector. (See:
786 // selectArithExtendedRegister)
787 if (IsSupportedExtend(*ShiftLHS))
788 return (ShiftAmt <= 4) ? 2 : 1;
789
790 LLT Ty = MRI.getType(Def->getOperand(0).getReg());
791 if (Ty.isVector())
792 return 0;
793 unsigned ShiftSize = Ty.getSizeInBits();
794 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
795 (ShiftSize == 64 && ShiftAmt <= 63))
796 return 1;
797 return 0;
798 }
799
800 /// \returns true if it would be profitable to swap the LHS and RHS of a G_ICMP
801 /// instruction \p MI.
trySwapICmpOperands(MachineInstr & MI,const MachineRegisterInfo & MRI)802 static bool trySwapICmpOperands(MachineInstr &MI,
803 const MachineRegisterInfo &MRI) {
804 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
805 // Swap the operands if it would introduce a profitable folding opportunity.
806 // (e.g. a shift + extend).
807 //
808 // For example:
809 // lsl w13, w11, #1
810 // cmp w13, w12
811 // can be turned into:
812 // cmp w12, w11, lsl #1
813
814 // Don't swap if there's a constant on the RHS, because we know we can fold
815 // that.
816 Register RHS = MI.getOperand(3).getReg();
817 auto RHSCst = getConstantVRegValWithLookThrough(RHS, MRI);
818 if (RHSCst && isLegalArithImmed(RHSCst->Value.getSExtValue()))
819 return false;
820
821 Register LHS = MI.getOperand(2).getReg();
822 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
823 auto GetRegForProfit = [&](Register Reg) {
824 MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
825 return isCMN(Def, Pred, MRI) ? Def->getOperand(2).getReg() : Reg;
826 };
827
828 // Don't have a constant on the RHS. If we swap the LHS and RHS of the
829 // compare, would we be able to fold more instructions?
830 Register TheLHS = GetRegForProfit(LHS);
831 Register TheRHS = GetRegForProfit(RHS);
832
833 // If the LHS is more likely to give us a folding opportunity, then swap the
834 // LHS and RHS.
835 return (getCmpOperandFoldingProfit(TheLHS, MRI) >
836 getCmpOperandFoldingProfit(TheRHS, MRI));
837 }
838
applySwapICmpOperands(MachineInstr & MI,GISelChangeObserver & Observer)839 static bool applySwapICmpOperands(MachineInstr &MI,
840 GISelChangeObserver &Observer) {
841 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
842 Register LHS = MI.getOperand(2).getReg();
843 Register RHS = MI.getOperand(3).getReg();
844 Observer.changedInstr(MI);
845 MI.getOperand(1).setPredicate(CmpInst::getSwappedPredicate(Pred));
846 MI.getOperand(2).setReg(RHS);
847 MI.getOperand(3).setReg(LHS);
848 Observer.changedInstr(MI);
849 return true;
850 }
851
852 /// \returns a function which builds a vector floating point compare instruction
853 /// for a condition code \p CC.
854 /// \param [in] IsZero - True if the comparison is against 0.
855 /// \param [in] NoNans - True if the target has NoNansFPMath.
856 static std::function<Register(MachineIRBuilder &)>
getVectorFCMP(AArch64CC::CondCode CC,Register LHS,Register RHS,bool IsZero,bool NoNans,MachineRegisterInfo & MRI)857 getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero,
858 bool NoNans, MachineRegisterInfo &MRI) {
859 LLT DstTy = MRI.getType(LHS);
860 assert(DstTy.isVector() && "Expected vector types only?");
861 assert(DstTy == MRI.getType(RHS) && "Src and Dst types must match!");
862 switch (CC) {
863 default:
864 llvm_unreachable("Unexpected condition code!");
865 case AArch64CC::NE:
866 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
867 auto FCmp = IsZero
868 ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {LHS})
869 : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {LHS, RHS});
870 return MIB.buildNot(DstTy, FCmp).getReg(0);
871 };
872 case AArch64CC::EQ:
873 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
874 return IsZero
875 ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {LHS}).getReg(0)
876 : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {LHS, RHS})
877 .getReg(0);
878 };
879 case AArch64CC::GE:
880 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
881 return IsZero
882 ? MIB.buildInstr(AArch64::G_FCMGEZ, {DstTy}, {LHS}).getReg(0)
883 : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {LHS, RHS})
884 .getReg(0);
885 };
886 case AArch64CC::GT:
887 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
888 return IsZero
889 ? MIB.buildInstr(AArch64::G_FCMGTZ, {DstTy}, {LHS}).getReg(0)
890 : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {LHS, RHS})
891 .getReg(0);
892 };
893 case AArch64CC::LS:
894 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
895 return IsZero
896 ? MIB.buildInstr(AArch64::G_FCMLEZ, {DstTy}, {LHS}).getReg(0)
897 : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {RHS, LHS})
898 .getReg(0);
899 };
900 case AArch64CC::MI:
901 return [LHS, RHS, IsZero, DstTy](MachineIRBuilder &MIB) {
902 return IsZero
903 ? MIB.buildInstr(AArch64::G_FCMLTZ, {DstTy}, {LHS}).getReg(0)
904 : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {RHS, LHS})
905 .getReg(0);
906 };
907 }
908 }
909
910 /// Try to lower a vector G_FCMP \p MI into an AArch64-specific pseudo.
lowerVectorFCMP(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIB)911 static bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
912 MachineIRBuilder &MIB) {
913 assert(MI.getOpcode() == TargetOpcode::G_FCMP);
914 const auto &ST = MI.getMF()->getSubtarget<AArch64Subtarget>();
915 Register Dst = MI.getOperand(0).getReg();
916 LLT DstTy = MRI.getType(Dst);
917 if (!DstTy.isVector() || !ST.hasNEON())
918 return false;
919 const auto Pred =
920 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
921 Register LHS = MI.getOperand(2).getReg();
922 // TODO: Handle v4s16 case.
923 unsigned EltSize = MRI.getType(LHS).getScalarSizeInBits();
924 if (EltSize != 32 && EltSize != 64)
925 return false;
926 Register RHS = MI.getOperand(3).getReg();
927 auto Splat = getAArch64VectorSplat(*MRI.getVRegDef(RHS), MRI);
928
929 // Compares against 0 have special target-specific pseudos.
930 bool IsZero = Splat && Splat->isCst() && Splat->getCst() == 0;
931 bool Invert;
932 AArch64CC::CondCode CC, CC2;
933 changeVectorFCMPPredToAArch64CC(Pred, CC, CC2, Invert);
934 bool NoNans = ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
935
936 // Instead of having an apply function, just build here to simplify things.
937 MIB.setInstrAndDebugLoc(MI);
938 auto Cmp = getVectorFCMP(CC, LHS, RHS, IsZero, NoNans, MRI);
939 Register CmpRes;
940 if (CC2 == AArch64CC::AL)
941 CmpRes = Cmp(MIB);
942 else {
943 auto Cmp2 = getVectorFCMP(CC2, LHS, RHS, IsZero, NoNans, MRI);
944 auto Cmp2Dst = Cmp2(MIB);
945 auto Cmp1Dst = Cmp(MIB);
946 CmpRes = MIB.buildOr(DstTy, Cmp1Dst, Cmp2Dst).getReg(0);
947 }
948 if (Invert)
949 CmpRes = MIB.buildNot(DstTy, CmpRes).getReg(0);
950 MRI.replaceRegWith(Dst, CmpRes);
951 MI.eraseFromParent();
952 return false;
953 }
954
matchFormTruncstore(MachineInstr & MI,MachineRegisterInfo & MRI,Register & SrcReg)955 static bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
956 Register &SrcReg) {
957 assert(MI.getOpcode() == TargetOpcode::G_STORE);
958 Register DstReg = MI.getOperand(0).getReg();
959 if (MRI.getType(DstReg).isVector())
960 return false;
961 // Match a store of a truncate.
962 if (!mi_match(DstReg, MRI, m_GTrunc(m_Reg(SrcReg))))
963 return false;
964 // Only form truncstores for value types of max 64b.
965 return MRI.getType(SrcReg).getSizeInBits() <= 64;
966 }
967
applyFormTruncstore(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & B,GISelChangeObserver & Observer,Register & SrcReg)968 static bool applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
969 MachineIRBuilder &B,
970 GISelChangeObserver &Observer,
971 Register &SrcReg) {
972 assert(MI.getOpcode() == TargetOpcode::G_STORE);
973 Observer.changingInstr(MI);
974 MI.getOperand(0).setReg(SrcReg);
975 Observer.changedInstr(MI);
976 return true;
977 }
978
979 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
980 #include "AArch64GenPostLegalizeGILowering.inc"
981 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
982
983 namespace {
984 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
985 #include "AArch64GenPostLegalizeGILowering.inc"
986 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
987
988 class AArch64PostLegalizerLoweringInfo : public CombinerInfo {
989 public:
990 AArch64GenPostLegalizerLoweringHelperRuleConfig GeneratedRuleCfg;
991
AArch64PostLegalizerLoweringInfo(bool OptSize,bool MinSize)992 AArch64PostLegalizerLoweringInfo(bool OptSize, bool MinSize)
993 : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
994 /*LegalizerInfo*/ nullptr, /*OptEnabled = */ true, OptSize,
995 MinSize) {
996 if (!GeneratedRuleCfg.parseCommandLineOption())
997 report_fatal_error("Invalid rule identifier");
998 }
999
1000 virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
1001 MachineIRBuilder &B) const override;
1002 };
1003
combine(GISelChangeObserver & Observer,MachineInstr & MI,MachineIRBuilder & B) const1004 bool AArch64PostLegalizerLoweringInfo::combine(GISelChangeObserver &Observer,
1005 MachineInstr &MI,
1006 MachineIRBuilder &B) const {
1007 CombinerHelper Helper(Observer, B);
1008 AArch64GenPostLegalizerLoweringHelper Generated(GeneratedRuleCfg);
1009 return Generated.tryCombineAll(Observer, MI, B, Helper);
1010 }
1011
1012 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1013 #include "AArch64GenPostLegalizeGILowering.inc"
1014 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1015
1016 class AArch64PostLegalizerLowering : public MachineFunctionPass {
1017 public:
1018 static char ID;
1019
1020 AArch64PostLegalizerLowering();
1021
getPassName() const1022 StringRef getPassName() const override {
1023 return "AArch64PostLegalizerLowering";
1024 }
1025
1026 bool runOnMachineFunction(MachineFunction &MF) override;
1027 void getAnalysisUsage(AnalysisUsage &AU) const override;
1028 };
1029 } // end anonymous namespace
1030
getAnalysisUsage(AnalysisUsage & AU) const1031 void AArch64PostLegalizerLowering::getAnalysisUsage(AnalysisUsage &AU) const {
1032 AU.addRequired<TargetPassConfig>();
1033 AU.setPreservesCFG();
1034 getSelectionDAGFallbackAnalysisUsage(AU);
1035 MachineFunctionPass::getAnalysisUsage(AU);
1036 }
1037
AArch64PostLegalizerLowering()1038 AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1039 : MachineFunctionPass(ID) {
1040 initializeAArch64PostLegalizerLoweringPass(*PassRegistry::getPassRegistry());
1041 }
1042
runOnMachineFunction(MachineFunction & MF)1043 bool AArch64PostLegalizerLowering::runOnMachineFunction(MachineFunction &MF) {
1044 if (MF.getProperties().hasProperty(
1045 MachineFunctionProperties::Property::FailedISel))
1046 return false;
1047 assert(MF.getProperties().hasProperty(
1048 MachineFunctionProperties::Property::Legalized) &&
1049 "Expected a legalized function?");
1050 auto *TPC = &getAnalysis<TargetPassConfig>();
1051 const Function &F = MF.getFunction();
1052 AArch64PostLegalizerLoweringInfo PCInfo(F.hasOptSize(), F.hasMinSize());
1053 Combiner C(PCInfo, TPC);
1054 return C.combineMachineInstrs(MF, /*CSEInfo*/ nullptr);
1055 }
1056
1057 char AArch64PostLegalizerLowering::ID = 0;
1058 INITIALIZE_PASS_BEGIN(AArch64PostLegalizerLowering, DEBUG_TYPE,
1059 "Lower AArch64 MachineInstrs after legalization", false,
1060 false)
1061 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1062 INITIALIZE_PASS_END(AArch64PostLegalizerLowering, DEBUG_TYPE,
1063 "Lower AArch64 MachineInstrs after legalization", false,
1064 false)
1065
1066 namespace llvm {
createAArch64PostLegalizerLowering()1067 FunctionPass *createAArch64PostLegalizerLowering() {
1068 return new AArch64PostLegalizerLowering();
1069 }
1070 } // end namespace llvm
1071