1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/ADT/SetVector.h"
10 #include "llvm/ADT/SmallBitVector.h"
11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
13 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
14 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/LowLevelType.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterBankInfo.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/DivisionByConstantInfo.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include <tuple>
35 
36 #define DEBUG_TYPE "gi-combiner"
37 
38 using namespace llvm;
39 using namespace MIPatternMatch;
40 
41 // Option to allow testing of the combiner while no targets know about indexed
42 // addressing.
43 static cl::opt<bool>
44     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
45                        cl::desc("Force all indexed operations to be "
46                                 "legal for the GlobalISel combiner"));
47 
48 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
49                                MachineIRBuilder &B, GISelKnownBits *KB,
50                                MachineDominatorTree *MDT,
51                                const LegalizerInfo *LI)
52     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
53       MDT(MDT), LI(LI), RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
54       TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
55   (void)this->KB;
56 }
57 
58 const TargetLowering &CombinerHelper::getTargetLowering() const {
59   return *Builder.getMF().getSubtarget().getTargetLowering();
60 }
61 
62 /// \returns The little endian in-memory byte position of byte \p I in a
63 /// \p ByteWidth bytes wide type.
64 ///
65 /// E.g. Given a 4-byte type x, x[0] -> byte 0
66 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
67   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
68   return I;
69 }
70 
71 /// Determines the LogBase2 value for a non-null input value using the
72 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
73 static Register buildLogBase2(Register V, MachineIRBuilder &MIB) {
74   auto &MRI = *MIB.getMRI();
75   LLT Ty = MRI.getType(V);
76   auto Ctlz = MIB.buildCTLZ(Ty, V);
77   auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1);
78   return MIB.buildSub(Ty, Base, Ctlz).getReg(0);
79 }
80 
81 /// \returns The big endian in-memory byte position of byte \p I in a
82 /// \p ByteWidth bytes wide type.
83 ///
84 /// E.g. Given a 4-byte type x, x[0] -> byte 3
85 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
86   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
87   return ByteWidth - I - 1;
88 }
89 
90 /// Given a map from byte offsets in memory to indices in a load/store,
91 /// determine if that map corresponds to a little or big endian byte pattern.
92 ///
93 /// \param MemOffset2Idx maps memory offsets to address offsets.
94 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
95 ///
96 /// \returns true if the map corresponds to a big endian byte pattern, false
97 /// if it corresponds to a little endian byte pattern, and None otherwise.
98 ///
99 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
100 /// are as follows:
101 ///
102 /// AddrOffset   Little endian    Big endian
103 /// 0            0                3
104 /// 1            1                2
105 /// 2            2                1
106 /// 3            3                0
107 static Optional<bool>
108 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
109             int64_t LowestIdx) {
110   // Need at least two byte positions to decide on endianness.
111   unsigned Width = MemOffset2Idx.size();
112   if (Width < 2)
113     return None;
114   bool BigEndian = true, LittleEndian = true;
115   for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
116     auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
117     if (MemOffsetAndIdx == MemOffset2Idx.end())
118       return None;
119     const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
120     assert(Idx >= 0 && "Expected non-negative byte offset?");
121     LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
122     BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
123     if (!BigEndian && !LittleEndian)
124       return None;
125   }
126 
127   assert((BigEndian != LittleEndian) &&
128          "Pattern cannot be both big and little endian!");
129   return BigEndian;
130 }
131 
132 bool CombinerHelper::isPreLegalize() const { return !LI; }
133 
134 bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
135   assert(LI && "Must have LegalizerInfo to query isLegal!");
136   return LI->getAction(Query).Action == LegalizeActions::Legal;
137 }
138 
139 bool CombinerHelper::isLegalOrBeforeLegalizer(
140     const LegalityQuery &Query) const {
141   return isPreLegalize() || isLegal(Query);
142 }
143 
144 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const {
145   if (!Ty.isVector())
146     return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}});
147   // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
148   if (isPreLegalize())
149     return true;
150   LLT EltTy = Ty.getElementType();
151   return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
152          isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
153 }
154 
155 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
156                                     Register ToReg) const {
157   Observer.changingAllUsesOfReg(MRI, FromReg);
158 
159   if (MRI.constrainRegAttrs(ToReg, FromReg))
160     MRI.replaceRegWith(FromReg, ToReg);
161   else
162     Builder.buildCopy(ToReg, FromReg);
163 
164   Observer.finishedChangingAllUsesOfReg();
165 }
166 
167 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
168                                       MachineOperand &FromRegOp,
169                                       Register ToReg) const {
170   assert(FromRegOp.getParent() && "Expected an operand in an MI");
171   Observer.changingInstr(*FromRegOp.getParent());
172 
173   FromRegOp.setReg(ToReg);
174 
175   Observer.changedInstr(*FromRegOp.getParent());
176 }
177 
178 void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI,
179                                        unsigned ToOpcode) const {
180   Observer.changingInstr(FromMI);
181 
182   FromMI.setDesc(Builder.getTII().get(ToOpcode));
183 
184   Observer.changedInstr(FromMI);
185 }
186 
187 const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
188   return RBI->getRegBank(Reg, MRI, *TRI);
189 }
190 
191 void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) {
192   if (RegBank)
193     MRI.setRegBank(Reg, *RegBank);
194 }
195 
196 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
197   if (matchCombineCopy(MI)) {
198     applyCombineCopy(MI);
199     return true;
200   }
201   return false;
202 }
203 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
204   if (MI.getOpcode() != TargetOpcode::COPY)
205     return false;
206   Register DstReg = MI.getOperand(0).getReg();
207   Register SrcReg = MI.getOperand(1).getReg();
208   return canReplaceReg(DstReg, SrcReg, MRI);
209 }
210 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
211   Register DstReg = MI.getOperand(0).getReg();
212   Register SrcReg = MI.getOperand(1).getReg();
213   MI.eraseFromParent();
214   replaceRegWith(MRI, DstReg, SrcReg);
215 }
216 
217 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
218   bool IsUndef = false;
219   SmallVector<Register, 4> Ops;
220   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
221     applyCombineConcatVectors(MI, IsUndef, Ops);
222     return true;
223   }
224   return false;
225 }
226 
227 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
228                                                SmallVectorImpl<Register> &Ops) {
229   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
230          "Invalid instruction");
231   IsUndef = true;
232   MachineInstr *Undef = nullptr;
233 
234   // Walk over all the operands of concat vectors and check if they are
235   // build_vector themselves or undef.
236   // Then collect their operands in Ops.
237   for (const MachineOperand &MO : MI.uses()) {
238     Register Reg = MO.getReg();
239     MachineInstr *Def = MRI.getVRegDef(Reg);
240     assert(Def && "Operand not defined");
241     switch (Def->getOpcode()) {
242     case TargetOpcode::G_BUILD_VECTOR:
243       IsUndef = false;
244       // Remember the operands of the build_vector to fold
245       // them into the yet-to-build flattened concat vectors.
246       for (const MachineOperand &BuildVecMO : Def->uses())
247         Ops.push_back(BuildVecMO.getReg());
248       break;
249     case TargetOpcode::G_IMPLICIT_DEF: {
250       LLT OpType = MRI.getType(Reg);
251       // Keep one undef value for all the undef operands.
252       if (!Undef) {
253         Builder.setInsertPt(*MI.getParent(), MI);
254         Undef = Builder.buildUndef(OpType.getScalarType());
255       }
256       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
257                  OpType.getScalarType() &&
258              "All undefs should have the same type");
259       // Break the undef vector in as many scalar elements as needed
260       // for the flattening.
261       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
262            EltIdx != EltEnd; ++EltIdx)
263         Ops.push_back(Undef->getOperand(0).getReg());
264       break;
265     }
266     default:
267       return false;
268     }
269   }
270   return true;
271 }
272 void CombinerHelper::applyCombineConcatVectors(
273     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
274   // We determined that the concat_vectors can be flatten.
275   // Generate the flattened build_vector.
276   Register DstReg = MI.getOperand(0).getReg();
277   Builder.setInsertPt(*MI.getParent(), MI);
278   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
279 
280   // Note: IsUndef is sort of redundant. We could have determine it by
281   // checking that at all Ops are undef.  Alternatively, we could have
282   // generate a build_vector of undefs and rely on another combine to
283   // clean that up.  For now, given we already gather this information
284   // in tryCombineConcatVectors, just save compile time and issue the
285   // right thing.
286   if (IsUndef)
287     Builder.buildUndef(NewDstReg);
288   else
289     Builder.buildBuildVector(NewDstReg, Ops);
290   MI.eraseFromParent();
291   replaceRegWith(MRI, DstReg, NewDstReg);
292 }
293 
294 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
295   SmallVector<Register, 4> Ops;
296   if (matchCombineShuffleVector(MI, Ops)) {
297     applyCombineShuffleVector(MI, Ops);
298     return true;
299   }
300   return false;
301 }
302 
303 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
304                                                SmallVectorImpl<Register> &Ops) {
305   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
306          "Invalid instruction kind");
307   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
308   Register Src1 = MI.getOperand(1).getReg();
309   LLT SrcType = MRI.getType(Src1);
310   // As bizarre as it may look, shuffle vector can actually produce
311   // scalar! This is because at the IR level a <1 x ty> shuffle
312   // vector is perfectly valid.
313   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
314   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
315 
316   // If the resulting vector is smaller than the size of the source
317   // vectors being concatenated, we won't be able to replace the
318   // shuffle vector into a concat_vectors.
319   //
320   // Note: We may still be able to produce a concat_vectors fed by
321   //       extract_vector_elt and so on. It is less clear that would
322   //       be better though, so don't bother for now.
323   //
324   // If the destination is a scalar, the size of the sources doesn't
325   // matter. we will lower the shuffle to a plain copy. This will
326   // work only if the source and destination have the same size. But
327   // that's covered by the next condition.
328   //
329   // TODO: If the size between the source and destination don't match
330   //       we could still emit an extract vector element in that case.
331   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
332     return false;
333 
334   // Check that the shuffle mask can be broken evenly between the
335   // different sources.
336   if (DstNumElts % SrcNumElts != 0)
337     return false;
338 
339   // Mask length is a multiple of the source vector length.
340   // Check if the shuffle is some kind of concatenation of the input
341   // vectors.
342   unsigned NumConcat = DstNumElts / SrcNumElts;
343   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
344   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
345   for (unsigned i = 0; i != DstNumElts; ++i) {
346     int Idx = Mask[i];
347     // Undef value.
348     if (Idx < 0)
349       continue;
350     // Ensure the indices in each SrcType sized piece are sequential and that
351     // the same source is used for the whole piece.
352     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
353         (ConcatSrcs[i / SrcNumElts] >= 0 &&
354          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
355       return false;
356     // Remember which source this index came from.
357     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
358   }
359 
360   // The shuffle is concatenating multiple vectors together.
361   // Collect the different operands for that.
362   Register UndefReg;
363   Register Src2 = MI.getOperand(2).getReg();
364   for (auto Src : ConcatSrcs) {
365     if (Src < 0) {
366       if (!UndefReg) {
367         Builder.setInsertPt(*MI.getParent(), MI);
368         UndefReg = Builder.buildUndef(SrcType).getReg(0);
369       }
370       Ops.push_back(UndefReg);
371     } else if (Src == 0)
372       Ops.push_back(Src1);
373     else
374       Ops.push_back(Src2);
375   }
376   return true;
377 }
378 
379 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
380                                                const ArrayRef<Register> Ops) {
381   Register DstReg = MI.getOperand(0).getReg();
382   Builder.setInsertPt(*MI.getParent(), MI);
383   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
384 
385   if (Ops.size() == 1)
386     Builder.buildCopy(NewDstReg, Ops[0]);
387   else
388     Builder.buildMerge(NewDstReg, Ops);
389 
390   MI.eraseFromParent();
391   replaceRegWith(MRI, DstReg, NewDstReg);
392 }
393 
394 namespace {
395 
396 /// Select a preference between two uses. CurrentUse is the current preference
397 /// while *ForCandidate is attributes of the candidate under consideration.
398 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
399                                   const LLT TyForCandidate,
400                                   unsigned OpcodeForCandidate,
401                                   MachineInstr *MIForCandidate) {
402   if (!CurrentUse.Ty.isValid()) {
403     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
404         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
405       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
406     return CurrentUse;
407   }
408 
409   // We permit the extend to hoist through basic blocks but this is only
410   // sensible if the target has extending loads. If you end up lowering back
411   // into a load and extend during the legalizer then the end result is
412   // hoisting the extend up to the load.
413 
414   // Prefer defined extensions to undefined extensions as these are more
415   // likely to reduce the number of instructions.
416   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
417       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
418     return CurrentUse;
419   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
420            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
421     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
422 
423   // Prefer sign extensions to zero extensions as sign-extensions tend to be
424   // more expensive.
425   if (CurrentUse.Ty == TyForCandidate) {
426     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
427         OpcodeForCandidate == TargetOpcode::G_ZEXT)
428       return CurrentUse;
429     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
430              OpcodeForCandidate == TargetOpcode::G_SEXT)
431       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
432   }
433 
434   // This is potentially target specific. We've chosen the largest type
435   // because G_TRUNC is usually free. One potential catch with this is that
436   // some targets have a reduced number of larger registers than smaller
437   // registers and this choice potentially increases the live-range for the
438   // larger value.
439   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
440     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
441   }
442   return CurrentUse;
443 }
444 
445 /// Find a suitable place to insert some instructions and insert them. This
446 /// function accounts for special cases like inserting before a PHI node.
447 /// The current strategy for inserting before PHI's is to duplicate the
448 /// instructions for each predecessor. However, while that's ok for G_TRUNC
449 /// on most targets since it generally requires no code, other targets/cases may
450 /// want to try harder to find a dominating block.
451 static void InsertInsnsWithoutSideEffectsBeforeUse(
452     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
453     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
454                        MachineOperand &UseMO)>
455         Inserter) {
456   MachineInstr &UseMI = *UseMO.getParent();
457 
458   MachineBasicBlock *InsertBB = UseMI.getParent();
459 
460   // If the use is a PHI then we want the predecessor block instead.
461   if (UseMI.isPHI()) {
462     MachineOperand *PredBB = std::next(&UseMO);
463     InsertBB = PredBB->getMBB();
464   }
465 
466   // If the block is the same block as the def then we want to insert just after
467   // the def instead of at the start of the block.
468   if (InsertBB == DefMI.getParent()) {
469     MachineBasicBlock::iterator InsertPt = &DefMI;
470     Inserter(InsertBB, std::next(InsertPt), UseMO);
471     return;
472   }
473 
474   // Otherwise we want the start of the BB
475   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
476 }
477 } // end anonymous namespace
478 
479 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
480   PreferredTuple Preferred;
481   if (matchCombineExtendingLoads(MI, Preferred)) {
482     applyCombineExtendingLoads(MI, Preferred);
483     return true;
484   }
485   return false;
486 }
487 
488 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
489                                                 PreferredTuple &Preferred) {
490   // We match the loads and follow the uses to the extend instead of matching
491   // the extends and following the def to the load. This is because the load
492   // must remain in the same position for correctness (unless we also add code
493   // to find a safe place to sink it) whereas the extend is freely movable.
494   // It also prevents us from duplicating the load for the volatile case or just
495   // for performance.
496   GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
497   if (!LoadMI)
498     return false;
499 
500   Register LoadReg = LoadMI->getDstReg();
501 
502   LLT LoadValueTy = MRI.getType(LoadReg);
503   if (!LoadValueTy.isScalar())
504     return false;
505 
506   // Most architectures are going to legalize <s8 loads into at least a 1 byte
507   // load, and the MMOs can only describe memory accesses in multiples of bytes.
508   // If we try to perform extload combining on those, we can end up with
509   // %a(s8) = extload %ptr (load 1 byte from %ptr)
510   // ... which is an illegal extload instruction.
511   if (LoadValueTy.getSizeInBits() < 8)
512     return false;
513 
514   // For non power-of-2 types, they will very likely be legalized into multiple
515   // loads. Don't bother trying to match them into extending loads.
516   if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
517     return false;
518 
519   // Find the preferred type aside from the any-extends (unless it's the only
520   // one) and non-extending ops. We'll emit an extending load to that type and
521   // and emit a variant of (extend (trunc X)) for the others according to the
522   // relative type sizes. At the same time, pick an extend to use based on the
523   // extend involved in the chosen type.
524   unsigned PreferredOpcode =
525       isa<GLoad>(&MI)
526           ? TargetOpcode::G_ANYEXT
527           : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
528   Preferred = {LLT(), PreferredOpcode, nullptr};
529   for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
530     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
531         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
532         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
533       const auto &MMO = LoadMI->getMMO();
534       // For atomics, only form anyextending loads.
535       if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
536         continue;
537       // Check for legality.
538       if (LI) {
539         LegalityQuery::MemDesc MMDesc(MMO);
540         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
541         LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
542         if (LI->getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}})
543                 .Action != LegalizeActions::Legal)
544           continue;
545       }
546       Preferred = ChoosePreferredUse(Preferred,
547                                      MRI.getType(UseMI.getOperand(0).getReg()),
548                                      UseMI.getOpcode(), &UseMI);
549     }
550   }
551 
552   // There were no extends
553   if (!Preferred.MI)
554     return false;
555   // It should be impossible to chose an extend without selecting a different
556   // type since by definition the result of an extend is larger.
557   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
558 
559   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
560   return true;
561 }
562 
563 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
564                                                 PreferredTuple &Preferred) {
565   // Rewrite the load to the chosen extending load.
566   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
567 
568   // Inserter to insert a truncate back to the original type at a given point
569   // with some basic CSE to limit truncate duplication to one per BB.
570   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
571   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
572                            MachineBasicBlock::iterator InsertBefore,
573                            MachineOperand &UseMO) {
574     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
575     if (PreviouslyEmitted) {
576       Observer.changingInstr(*UseMO.getParent());
577       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
578       Observer.changedInstr(*UseMO.getParent());
579       return;
580     }
581 
582     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
583     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
584     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
585     EmittedInsns[InsertIntoBB] = NewMI;
586     replaceRegOpWith(MRI, UseMO, NewDstReg);
587   };
588 
589   Observer.changingInstr(MI);
590   MI.setDesc(
591       Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
592                                ? TargetOpcode::G_SEXTLOAD
593                                : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
594                                      ? TargetOpcode::G_ZEXTLOAD
595                                      : TargetOpcode::G_LOAD));
596 
597   // Rewrite all the uses to fix up the types.
598   auto &LoadValue = MI.getOperand(0);
599   SmallVector<MachineOperand *, 4> Uses;
600   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
601     Uses.push_back(&UseMO);
602 
603   for (auto *UseMO : Uses) {
604     MachineInstr *UseMI = UseMO->getParent();
605 
606     // If the extend is compatible with the preferred extend then we should fix
607     // up the type and extend so that it uses the preferred use.
608     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
609         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
610       Register UseDstReg = UseMI->getOperand(0).getReg();
611       MachineOperand &UseSrcMO = UseMI->getOperand(1);
612       const LLT UseDstTy = MRI.getType(UseDstReg);
613       if (UseDstReg != ChosenDstReg) {
614         if (Preferred.Ty == UseDstTy) {
615           // If the use has the same type as the preferred use, then merge
616           // the vregs and erase the extend. For example:
617           //    %1:_(s8) = G_LOAD ...
618           //    %2:_(s32) = G_SEXT %1(s8)
619           //    %3:_(s32) = G_ANYEXT %1(s8)
620           //    ... = ... %3(s32)
621           // rewrites to:
622           //    %2:_(s32) = G_SEXTLOAD ...
623           //    ... = ... %2(s32)
624           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
625           Observer.erasingInstr(*UseMO->getParent());
626           UseMO->getParent()->eraseFromParent();
627         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
628           // If the preferred size is smaller, then keep the extend but extend
629           // from the result of the extending load. For example:
630           //    %1:_(s8) = G_LOAD ...
631           //    %2:_(s32) = G_SEXT %1(s8)
632           //    %3:_(s64) = G_ANYEXT %1(s8)
633           //    ... = ... %3(s64)
634           /// rewrites to:
635           //    %2:_(s32) = G_SEXTLOAD ...
636           //    %3:_(s64) = G_ANYEXT %2:_(s32)
637           //    ... = ... %3(s64)
638           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
639         } else {
640           // If the preferred size is large, then insert a truncate. For
641           // example:
642           //    %1:_(s8) = G_LOAD ...
643           //    %2:_(s64) = G_SEXT %1(s8)
644           //    %3:_(s32) = G_ZEXT %1(s8)
645           //    ... = ... %3(s32)
646           /// rewrites to:
647           //    %2:_(s64) = G_SEXTLOAD ...
648           //    %4:_(s8) = G_TRUNC %2:_(s32)
649           //    %3:_(s64) = G_ZEXT %2:_(s8)
650           //    ... = ... %3(s64)
651           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
652                                                  InsertTruncAt);
653         }
654         continue;
655       }
656       // The use is (one of) the uses of the preferred use we chose earlier.
657       // We're going to update the load to def this value later so just erase
658       // the old extend.
659       Observer.erasingInstr(*UseMO->getParent());
660       UseMO->getParent()->eraseFromParent();
661       continue;
662     }
663 
664     // The use isn't an extend. Truncate back to the type we originally loaded.
665     // This is free on many targets.
666     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
667   }
668 
669   MI.getOperand(0).setReg(ChosenDstReg);
670   Observer.changedInstr(MI);
671 }
672 
673 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
674                                                  BuildFnTy &MatchInfo) {
675   assert(MI.getOpcode() == TargetOpcode::G_AND);
676 
677   // If we have the following code:
678   //  %mask = G_CONSTANT 255
679   //  %ld   = G_LOAD %ptr, (load s16)
680   //  %and  = G_AND %ld, %mask
681   //
682   // Try to fold it into
683   //   %ld = G_ZEXTLOAD %ptr, (load s8)
684 
685   Register Dst = MI.getOperand(0).getReg();
686   if (MRI.getType(Dst).isVector())
687     return false;
688 
689   auto MaybeMask =
690       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
691   if (!MaybeMask)
692     return false;
693 
694   APInt MaskVal = MaybeMask->Value;
695 
696   if (!MaskVal.isMask())
697     return false;
698 
699   Register SrcReg = MI.getOperand(1).getReg();
700   // Don't use getOpcodeDef() here since intermediate instructions may have
701   // multiple users.
702   GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg));
703   if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()))
704     return false;
705 
706   Register LoadReg = LoadMI->getDstReg();
707   LLT RegTy = MRI.getType(LoadReg);
708   Register PtrReg = LoadMI->getPointerReg();
709   unsigned RegSize = RegTy.getSizeInBits();
710   uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
711   unsigned MaskSizeBits = MaskVal.countTrailingOnes();
712 
713   // The mask may not be larger than the in-memory type, as it might cover sign
714   // extended bits
715   if (MaskSizeBits > LoadSizeBits)
716     return false;
717 
718   // If the mask covers the whole destination register, there's nothing to
719   // extend
720   if (MaskSizeBits >= RegSize)
721     return false;
722 
723   // Most targets cannot deal with loads of size < 8 and need to re-legalize to
724   // at least byte loads. Avoid creating such loads here
725   if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
726     return false;
727 
728   const MachineMemOperand &MMO = LoadMI->getMMO();
729   LegalityQuery::MemDesc MemDesc(MMO);
730 
731   // Don't modify the memory access size if this is atomic/volatile, but we can
732   // still adjust the opcode to indicate the high bit behavior.
733   if (LoadMI->isSimple())
734     MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
735   else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
736     return false;
737 
738   // TODO: Could check if it's legal with the reduced or original memory size.
739   if (!isLegalOrBeforeLegalizer(
740           {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}}))
741     return false;
742 
743   MatchInfo = [=](MachineIRBuilder &B) {
744     B.setInstrAndDebugLoc(*LoadMI);
745     auto &MF = B.getMF();
746     auto PtrInfo = MMO.getPointerInfo();
747     auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy);
748     B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
749     LoadMI->eraseFromParent();
750   };
751   return true;
752 }
753 
754 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
755                                    const MachineInstr &UseMI) {
756   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
757          "shouldn't consider debug uses");
758   assert(DefMI.getParent() == UseMI.getParent());
759   if (&DefMI == &UseMI)
760     return true;
761   const MachineBasicBlock &MBB = *DefMI.getParent();
762   auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
763     return &MI == &DefMI || &MI == &UseMI;
764   });
765   if (DefOrUse == MBB.end())
766     llvm_unreachable("Block must contain both DefMI and UseMI!");
767   return &*DefOrUse == &DefMI;
768 }
769 
770 bool CombinerHelper::dominates(const MachineInstr &DefMI,
771                                const MachineInstr &UseMI) {
772   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
773          "shouldn't consider debug uses");
774   if (MDT)
775     return MDT->dominates(&DefMI, &UseMI);
776   else if (DefMI.getParent() != UseMI.getParent())
777     return false;
778 
779   return isPredecessor(DefMI, UseMI);
780 }
781 
782 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
783   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
784   Register SrcReg = MI.getOperand(1).getReg();
785   Register LoadUser = SrcReg;
786 
787   if (MRI.getType(SrcReg).isVector())
788     return false;
789 
790   Register TruncSrc;
791   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
792     LoadUser = TruncSrc;
793 
794   uint64_t SizeInBits = MI.getOperand(2).getImm();
795   // If the source is a G_SEXTLOAD from the same bit width, then we don't
796   // need any extend at all, just a truncate.
797   if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
798     // If truncating more than the original extended value, abort.
799     auto LoadSizeBits = LoadMI->getMemSizeInBits();
800     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
801       return false;
802     if (LoadSizeBits == SizeInBits)
803       return true;
804   }
805   return false;
806 }
807 
808 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
809   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
810   Builder.setInstrAndDebugLoc(MI);
811   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
812   MI.eraseFromParent();
813 }
814 
815 bool CombinerHelper::matchSextInRegOfLoad(
816     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
817   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
818 
819   Register DstReg = MI.getOperand(0).getReg();
820   LLT RegTy = MRI.getType(DstReg);
821 
822   // Only supports scalars for now.
823   if (RegTy.isVector())
824     return false;
825 
826   Register SrcReg = MI.getOperand(1).getReg();
827   auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
828   if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
829     return false;
830 
831   uint64_t MemBits = LoadDef->getMemSizeInBits();
832 
833   // If the sign extend extends from a narrower width than the load's width,
834   // then we can narrow the load width when we combine to a G_SEXTLOAD.
835   // Avoid widening the load at all.
836   unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits);
837 
838   // Don't generate G_SEXTLOADs with a < 1 byte width.
839   if (NewSizeBits < 8)
840     return false;
841   // Don't bother creating a non-power-2 sextload, it will likely be broken up
842   // anyway for most targets.
843   if (!isPowerOf2_32(NewSizeBits))
844     return false;
845 
846   const MachineMemOperand &MMO = LoadDef->getMMO();
847   LegalityQuery::MemDesc MMDesc(MMO);
848 
849   // Don't modify the memory access size if this is atomic/volatile, but we can
850   // still adjust the opcode to indicate the high bit behavior.
851   if (LoadDef->isSimple())
852     MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
853   else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
854     return false;
855 
856   // TODO: Could check if it's legal with the reduced or original memory size.
857   if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
858                                  {MRI.getType(LoadDef->getDstReg()),
859                                   MRI.getType(LoadDef->getPointerReg())},
860                                  {MMDesc}}))
861     return false;
862 
863   MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
864   return true;
865 }
866 
867 void CombinerHelper::applySextInRegOfLoad(
868     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
869   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
870   Register LoadReg;
871   unsigned ScalarSizeBits;
872   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
873   GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
874 
875   // If we have the following:
876   // %ld = G_LOAD %ptr, (load 2)
877   // %ext = G_SEXT_INREG %ld, 8
878   //    ==>
879   // %ld = G_SEXTLOAD %ptr (load 1)
880 
881   auto &MMO = LoadDef->getMMO();
882   Builder.setInstrAndDebugLoc(*LoadDef);
883   auto &MF = Builder.getMF();
884   auto PtrInfo = MMO.getPointerInfo();
885   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
886   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
887                          LoadDef->getPointerReg(), *NewMMO);
888   MI.eraseFromParent();
889 }
890 
891 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
892                                             Register &Base, Register &Offset) {
893   auto &MF = *MI.getParent()->getParent();
894   const auto &TLI = *MF.getSubtarget().getTargetLowering();
895 
896 #ifndef NDEBUG
897   unsigned Opcode = MI.getOpcode();
898   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
899          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
900 #endif
901 
902   Base = MI.getOperand(1).getReg();
903   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
904   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
905     return false;
906 
907   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
908   // FIXME: The following use traversal needs a bail out for patholigical cases.
909   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
910     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
911       continue;
912 
913     Offset = Use.getOperand(2).getReg();
914     if (!ForceLegalIndexing &&
915         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
916       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
917                         << Use);
918       continue;
919     }
920 
921     // Make sure the offset calculation is before the potentially indexed op.
922     // FIXME: we really care about dependency here. The offset calculation might
923     // be movable.
924     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
925     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
926       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
927                         << Use);
928       continue;
929     }
930 
931     // FIXME: check whether all uses of Base are load/store with foldable
932     // addressing modes. If so, using the normal addr-modes is better than
933     // forming an indexed one.
934 
935     bool MemOpDominatesAddrUses = true;
936     for (auto &PtrAddUse :
937          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
938       if (!dominates(MI, PtrAddUse)) {
939         MemOpDominatesAddrUses = false;
940         break;
941       }
942     }
943 
944     if (!MemOpDominatesAddrUses) {
945       LLVM_DEBUG(
946           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
947                  << Use);
948       continue;
949     }
950 
951     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
952     Addr = Use.getOperand(0).getReg();
953     return true;
954   }
955 
956   return false;
957 }
958 
959 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
960                                            Register &Base, Register &Offset) {
961   auto &MF = *MI.getParent()->getParent();
962   const auto &TLI = *MF.getSubtarget().getTargetLowering();
963 
964 #ifndef NDEBUG
965   unsigned Opcode = MI.getOpcode();
966   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
967          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
968 #endif
969 
970   Addr = MI.getOperand(1).getReg();
971   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
972   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
973     return false;
974 
975   Base = AddrDef->getOperand(1).getReg();
976   Offset = AddrDef->getOperand(2).getReg();
977 
978   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
979 
980   if (!ForceLegalIndexing &&
981       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
982     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
983     return false;
984   }
985 
986   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
987   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
988     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
989     return false;
990   }
991 
992   if (MI.getOpcode() == TargetOpcode::G_STORE) {
993     // Would require a copy.
994     if (Base == MI.getOperand(0).getReg()) {
995       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
996       return false;
997     }
998 
999     // We're expecting one use of Addr in MI, but it could also be the
1000     // value stored, which isn't actually dominated by the instruction.
1001     if (MI.getOperand(0).getReg() == Addr) {
1002       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
1003       return false;
1004     }
1005   }
1006 
1007   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1008   // That might allow us to end base's liveness here by adjusting the constant.
1009 
1010   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
1011     if (!dominates(MI, UseMI)) {
1012       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
1013       return false;
1014     }
1015   }
1016 
1017   return true;
1018 }
1019 
1020 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
1021   IndexedLoadStoreMatchInfo MatchInfo;
1022   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
1023     applyCombineIndexedLoadStore(MI, MatchInfo);
1024     return true;
1025   }
1026   return false;
1027 }
1028 
1029 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1030   unsigned Opcode = MI.getOpcode();
1031   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1032       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1033     return false;
1034 
1035   // For now, no targets actually support these opcodes so don't waste time
1036   // running these unless we're forced to for testing.
1037   if (!ForceLegalIndexing)
1038     return false;
1039 
1040   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1041                                           MatchInfo.Offset);
1042   if (!MatchInfo.IsPre &&
1043       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1044                               MatchInfo.Offset))
1045     return false;
1046 
1047   return true;
1048 }
1049 
1050 void CombinerHelper::applyCombineIndexedLoadStore(
1051     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1052   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
1053   MachineIRBuilder MIRBuilder(MI);
1054   unsigned Opcode = MI.getOpcode();
1055   bool IsStore = Opcode == TargetOpcode::G_STORE;
1056   unsigned NewOpcode;
1057   switch (Opcode) {
1058   case TargetOpcode::G_LOAD:
1059     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1060     break;
1061   case TargetOpcode::G_SEXTLOAD:
1062     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1063     break;
1064   case TargetOpcode::G_ZEXTLOAD:
1065     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1066     break;
1067   case TargetOpcode::G_STORE:
1068     NewOpcode = TargetOpcode::G_INDEXED_STORE;
1069     break;
1070   default:
1071     llvm_unreachable("Unknown load/store opcode");
1072   }
1073 
1074   auto MIB = MIRBuilder.buildInstr(NewOpcode);
1075   if (IsStore) {
1076     MIB.addDef(MatchInfo.Addr);
1077     MIB.addUse(MI.getOperand(0).getReg());
1078   } else {
1079     MIB.addDef(MI.getOperand(0).getReg());
1080     MIB.addDef(MatchInfo.Addr);
1081   }
1082 
1083   MIB.addUse(MatchInfo.Base);
1084   MIB.addUse(MatchInfo.Offset);
1085   MIB.addImm(MatchInfo.IsPre);
1086   MI.eraseFromParent();
1087   AddrDef.eraseFromParent();
1088 
1089   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
1090 }
1091 
1092 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
1093                                         MachineInstr *&OtherMI) {
1094   unsigned Opcode = MI.getOpcode();
1095   bool IsDiv, IsSigned;
1096 
1097   switch (Opcode) {
1098   default:
1099     llvm_unreachable("Unexpected opcode!");
1100   case TargetOpcode::G_SDIV:
1101   case TargetOpcode::G_UDIV: {
1102     IsDiv = true;
1103     IsSigned = Opcode == TargetOpcode::G_SDIV;
1104     break;
1105   }
1106   case TargetOpcode::G_SREM:
1107   case TargetOpcode::G_UREM: {
1108     IsDiv = false;
1109     IsSigned = Opcode == TargetOpcode::G_SREM;
1110     break;
1111   }
1112   }
1113 
1114   Register Src1 = MI.getOperand(1).getReg();
1115   unsigned DivOpcode, RemOpcode, DivremOpcode;
1116   if (IsSigned) {
1117     DivOpcode = TargetOpcode::G_SDIV;
1118     RemOpcode = TargetOpcode::G_SREM;
1119     DivremOpcode = TargetOpcode::G_SDIVREM;
1120   } else {
1121     DivOpcode = TargetOpcode::G_UDIV;
1122     RemOpcode = TargetOpcode::G_UREM;
1123     DivremOpcode = TargetOpcode::G_UDIVREM;
1124   }
1125 
1126   if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1127     return false;
1128 
1129   // Combine:
1130   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
1131   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
1132   // into:
1133   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1134 
1135   // Combine:
1136   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
1137   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
1138   // into:
1139   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1140 
1141   for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1142     if (MI.getParent() == UseMI.getParent() &&
1143         ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1144          (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1145         matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) &&
1146         matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) {
1147       OtherMI = &UseMI;
1148       return true;
1149     }
1150   }
1151 
1152   return false;
1153 }
1154 
1155 void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1156                                         MachineInstr *&OtherMI) {
1157   unsigned Opcode = MI.getOpcode();
1158   assert(OtherMI && "OtherMI shouldn't be empty.");
1159 
1160   Register DestDivReg, DestRemReg;
1161   if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1162     DestDivReg = MI.getOperand(0).getReg();
1163     DestRemReg = OtherMI->getOperand(0).getReg();
1164   } else {
1165     DestDivReg = OtherMI->getOperand(0).getReg();
1166     DestRemReg = MI.getOperand(0).getReg();
1167   }
1168 
1169   bool IsSigned =
1170       Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1171 
1172   // Check which instruction is first in the block so we don't break def-use
1173   // deps by "moving" the instruction incorrectly.
1174   if (dominates(MI, *OtherMI))
1175     Builder.setInstrAndDebugLoc(MI);
1176   else
1177     Builder.setInstrAndDebugLoc(*OtherMI);
1178 
1179   Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1180                               : TargetOpcode::G_UDIVREM,
1181                      {DestDivReg, DestRemReg},
1182                      {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
1183   MI.eraseFromParent();
1184   OtherMI->eraseFromParent();
1185 }
1186 
1187 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
1188                                                    MachineInstr *&BrCond) {
1189   assert(MI.getOpcode() == TargetOpcode::G_BR);
1190 
1191   // Try to match the following:
1192   // bb1:
1193   //   G_BRCOND %c1, %bb2
1194   //   G_BR %bb3
1195   // bb2:
1196   // ...
1197   // bb3:
1198 
1199   // The above pattern does not have a fall through to the successor bb2, always
1200   // resulting in a branch no matter which path is taken. Here we try to find
1201   // and replace that pattern with conditional branch to bb3 and otherwise
1202   // fallthrough to bb2. This is generally better for branch predictors.
1203 
1204   MachineBasicBlock *MBB = MI.getParent();
1205   MachineBasicBlock::iterator BrIt(MI);
1206   if (BrIt == MBB->begin())
1207     return false;
1208   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1209 
1210   BrCond = &*std::prev(BrIt);
1211   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1212     return false;
1213 
1214   // Check that the next block is the conditional branch target. Also make sure
1215   // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1216   MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1217   return BrCondTarget != MI.getOperand(0).getMBB() &&
1218          MBB->isLayoutSuccessor(BrCondTarget);
1219 }
1220 
1221 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
1222                                                    MachineInstr *&BrCond) {
1223   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1224   Builder.setInstrAndDebugLoc(*BrCond);
1225   LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1226   // FIXME: Does int/fp matter for this? If so, we might need to restrict
1227   // this to i1 only since we might not know for sure what kind of
1228   // compare generated the condition value.
1229   auto True = Builder.buildConstant(
1230       Ty, getICmpTrueVal(getTargetLowering(), false, false));
1231   auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1232 
1233   auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1234   Observer.changingInstr(MI);
1235   MI.getOperand(0).setMBB(FallthroughBB);
1236   Observer.changedInstr(MI);
1237 
1238   // Change the conditional branch to use the inverted condition and
1239   // new target block.
1240   Observer.changingInstr(*BrCond);
1241   BrCond->getOperand(0).setReg(Xor.getReg(0));
1242   BrCond->getOperand(1).setMBB(BrTarget);
1243   Observer.changedInstr(*BrCond);
1244 }
1245 
1246 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1247   if (Ty.isVector())
1248     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1249                                 Ty.getNumElements());
1250   return IntegerType::get(C, Ty.getSizeInBits());
1251 }
1252 
1253 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
1254   MachineIRBuilder HelperBuilder(MI);
1255   GISelObserverWrapper DummyObserver;
1256   LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1257   return Helper.lowerMemcpyInline(MI) ==
1258          LegalizerHelper::LegalizeResult::Legalized;
1259 }
1260 
1261 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1262   MachineIRBuilder HelperBuilder(MI);
1263   GISelObserverWrapper DummyObserver;
1264   LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1265   return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1266          LegalizerHelper::LegalizeResult::Legalized;
1267 }
1268 
1269 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1270                                              const Register Op,
1271                                              const MachineRegisterInfo &MRI) {
1272   const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1273   if (!MaybeCst)
1274     return None;
1275 
1276   APFloat V = MaybeCst->getValueAPF();
1277   switch (Opcode) {
1278   default:
1279     llvm_unreachable("Unexpected opcode!");
1280   case TargetOpcode::G_FNEG: {
1281     V.changeSign();
1282     return V;
1283   }
1284   case TargetOpcode::G_FABS: {
1285     V.clearSign();
1286     return V;
1287   }
1288   case TargetOpcode::G_FPTRUNC:
1289     break;
1290   case TargetOpcode::G_FSQRT: {
1291     bool Unused;
1292     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1293     V = APFloat(sqrt(V.convertToDouble()));
1294     break;
1295   }
1296   case TargetOpcode::G_FLOG2: {
1297     bool Unused;
1298     V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1299     V = APFloat(log2(V.convertToDouble()));
1300     break;
1301   }
1302   }
1303   // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1304   // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1305   // and `G_FLOG2` reach here.
1306   bool Unused;
1307   V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1308   return V;
1309 }
1310 
1311 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
1312                                                      Optional<APFloat> &Cst) {
1313   Register DstReg = MI.getOperand(0).getReg();
1314   Register SrcReg = MI.getOperand(1).getReg();
1315   LLT DstTy = MRI.getType(DstReg);
1316   Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1317   return Cst.has_value();
1318 }
1319 
1320 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1321                                                      Optional<APFloat> &Cst) {
1322   assert(Cst && "Optional is unexpectedly empty!");
1323   Builder.setInstrAndDebugLoc(MI);
1324   MachineFunction &MF = Builder.getMF();
1325   auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1326   Register DstReg = MI.getOperand(0).getReg();
1327   Builder.buildFConstant(DstReg, *FPVal);
1328   MI.eraseFromParent();
1329 }
1330 
1331 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1332                                            PtrAddChain &MatchInfo) {
1333   // We're trying to match the following pattern:
1334   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1335   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1336   // -->
1337   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1338 
1339   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1340     return false;
1341 
1342   Register Add2 = MI.getOperand(1).getReg();
1343   Register Imm1 = MI.getOperand(2).getReg();
1344   auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1345   if (!MaybeImmVal)
1346     return false;
1347 
1348   MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1349   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1350     return false;
1351 
1352   Register Base = Add2Def->getOperand(1).getReg();
1353   Register Imm2 = Add2Def->getOperand(2).getReg();
1354   auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1355   if (!MaybeImm2Val)
1356     return false;
1357 
1358   // Check if the new combined immediate forms an illegal addressing mode.
1359   // Do not combine if it was legal before but would get illegal.
1360   // To do so, we need to find a load/store user of the pointer to get
1361   // the access type.
1362   Type *AccessTy = nullptr;
1363   auto &MF = *MI.getMF();
1364   for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1365     if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1366       AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1367                                MF.getFunction().getContext());
1368       break;
1369     }
1370   }
1371   TargetLoweringBase::AddrMode AMNew;
1372   APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1373   AMNew.BaseOffs = CombinedImm.getSExtValue();
1374   if (AccessTy) {
1375     AMNew.HasBaseReg = true;
1376     TargetLoweringBase::AddrMode AMOld;
1377     AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1378     AMOld.HasBaseReg = true;
1379     unsigned AS = MRI.getType(Add2).getAddressSpace();
1380     const auto &TLI = *MF.getSubtarget().getTargetLowering();
1381     if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1382         !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1383       return false;
1384   }
1385 
1386   // Pass the combined immediate to the apply function.
1387   MatchInfo.Imm = AMNew.BaseOffs;
1388   MatchInfo.Base = Base;
1389   MatchInfo.Bank = getRegBank(Imm2);
1390   return true;
1391 }
1392 
1393 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1394                                            PtrAddChain &MatchInfo) {
1395   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1396   MachineIRBuilder MIB(MI);
1397   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1398   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1399   setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1400   Observer.changingInstr(MI);
1401   MI.getOperand(1).setReg(MatchInfo.Base);
1402   MI.getOperand(2).setReg(NewOffset.getReg(0));
1403   Observer.changedInstr(MI);
1404 }
1405 
1406 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1407                                           RegisterImmPair &MatchInfo) {
1408   // We're trying to match the following pattern with any of
1409   // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1410   //   %t1 = SHIFT %base, G_CONSTANT imm1
1411   //   %root = SHIFT %t1, G_CONSTANT imm2
1412   // -->
1413   //   %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1414 
1415   unsigned Opcode = MI.getOpcode();
1416   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1417           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1418           Opcode == TargetOpcode::G_USHLSAT) &&
1419          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1420 
1421   Register Shl2 = MI.getOperand(1).getReg();
1422   Register Imm1 = MI.getOperand(2).getReg();
1423   auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1424   if (!MaybeImmVal)
1425     return false;
1426 
1427   MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1428   if (Shl2Def->getOpcode() != Opcode)
1429     return false;
1430 
1431   Register Base = Shl2Def->getOperand(1).getReg();
1432   Register Imm2 = Shl2Def->getOperand(2).getReg();
1433   auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1434   if (!MaybeImm2Val)
1435     return false;
1436 
1437   // Pass the combined immediate to the apply function.
1438   MatchInfo.Imm =
1439       (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1440   MatchInfo.Reg = Base;
1441 
1442   // There is no simple replacement for a saturating unsigned left shift that
1443   // exceeds the scalar size.
1444   if (Opcode == TargetOpcode::G_USHLSAT &&
1445       MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1446     return false;
1447 
1448   return true;
1449 }
1450 
1451 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1452                                           RegisterImmPair &MatchInfo) {
1453   unsigned Opcode = MI.getOpcode();
1454   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1455           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1456           Opcode == TargetOpcode::G_USHLSAT) &&
1457          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1458 
1459   Builder.setInstrAndDebugLoc(MI);
1460   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1461   unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1462   auto Imm = MatchInfo.Imm;
1463 
1464   if (Imm >= ScalarSizeInBits) {
1465     // Any logical shift that exceeds scalar size will produce zero.
1466     if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1467       Builder.buildConstant(MI.getOperand(0), 0);
1468       MI.eraseFromParent();
1469       return;
1470     }
1471     // Arithmetic shift and saturating signed left shift have no effect beyond
1472     // scalar size.
1473     Imm = ScalarSizeInBits - 1;
1474   }
1475 
1476   LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1477   Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1478   Observer.changingInstr(MI);
1479   MI.getOperand(1).setReg(MatchInfo.Reg);
1480   MI.getOperand(2).setReg(NewImm);
1481   Observer.changedInstr(MI);
1482 }
1483 
1484 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1485                                               ShiftOfShiftedLogic &MatchInfo) {
1486   // We're trying to match the following pattern with any of
1487   // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1488   // with any of G_AND/G_OR/G_XOR logic instructions.
1489   //   %t1 = SHIFT %X, G_CONSTANT C0
1490   //   %t2 = LOGIC %t1, %Y
1491   //   %root = SHIFT %t2, G_CONSTANT C1
1492   // -->
1493   //   %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1494   //   %t4 = SHIFT %Y, G_CONSTANT C1
1495   //   %root = LOGIC %t3, %t4
1496   unsigned ShiftOpcode = MI.getOpcode();
1497   assert((ShiftOpcode == TargetOpcode::G_SHL ||
1498           ShiftOpcode == TargetOpcode::G_ASHR ||
1499           ShiftOpcode == TargetOpcode::G_LSHR ||
1500           ShiftOpcode == TargetOpcode::G_USHLSAT ||
1501           ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1502          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1503 
1504   // Match a one-use bitwise logic op.
1505   Register LogicDest = MI.getOperand(1).getReg();
1506   if (!MRI.hasOneNonDBGUse(LogicDest))
1507     return false;
1508 
1509   MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1510   unsigned LogicOpcode = LogicMI->getOpcode();
1511   if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1512       LogicOpcode != TargetOpcode::G_XOR)
1513     return false;
1514 
1515   // Find a matching one-use shift by constant.
1516   const Register C1 = MI.getOperand(2).getReg();
1517   auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1518   if (!MaybeImmVal)
1519     return false;
1520 
1521   const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1522 
1523   auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1524     // Shift should match previous one and should be a one-use.
1525     if (MI->getOpcode() != ShiftOpcode ||
1526         !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1527       return false;
1528 
1529     // Must be a constant.
1530     auto MaybeImmVal =
1531         getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1532     if (!MaybeImmVal)
1533       return false;
1534 
1535     ShiftVal = MaybeImmVal->Value.getSExtValue();
1536     return true;
1537   };
1538 
1539   // Logic ops are commutative, so check each operand for a match.
1540   Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1541   MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1542   Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1543   MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1544   uint64_t C0Val;
1545 
1546   if (matchFirstShift(LogicMIOp1, C0Val)) {
1547     MatchInfo.LogicNonShiftReg = LogicMIReg2;
1548     MatchInfo.Shift2 = LogicMIOp1;
1549   } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1550     MatchInfo.LogicNonShiftReg = LogicMIReg1;
1551     MatchInfo.Shift2 = LogicMIOp2;
1552   } else
1553     return false;
1554 
1555   MatchInfo.ValSum = C0Val + C1Val;
1556 
1557   // The fold is not valid if the sum of the shift values exceeds bitwidth.
1558   if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1559     return false;
1560 
1561   MatchInfo.Logic = LogicMI;
1562   return true;
1563 }
1564 
1565 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1566                                               ShiftOfShiftedLogic &MatchInfo) {
1567   unsigned Opcode = MI.getOpcode();
1568   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1569           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1570           Opcode == TargetOpcode::G_SSHLSAT) &&
1571          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1572 
1573   LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1574   LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1575   Builder.setInstrAndDebugLoc(MI);
1576 
1577   Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1578 
1579   Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1580   Register Shift1 =
1581       Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1582 
1583   Register Shift2Const = MI.getOperand(2).getReg();
1584   Register Shift2 = Builder
1585                         .buildInstr(Opcode, {DestType},
1586                                     {MatchInfo.LogicNonShiftReg, Shift2Const})
1587                         .getReg(0);
1588 
1589   Register Dest = MI.getOperand(0).getReg();
1590   Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1591 
1592   // These were one use so it's safe to remove them.
1593   MatchInfo.Shift2->eraseFromParent();
1594   MatchInfo.Logic->eraseFromParent();
1595 
1596   MI.eraseFromParent();
1597 }
1598 
1599 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1600                                           unsigned &ShiftVal) {
1601   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1602   auto MaybeImmVal =
1603       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1604   if (!MaybeImmVal)
1605     return false;
1606 
1607   ShiftVal = MaybeImmVal->Value.exactLogBase2();
1608   return (static_cast<int32_t>(ShiftVal) != -1);
1609 }
1610 
1611 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1612                                           unsigned &ShiftVal) {
1613   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1614   MachineIRBuilder MIB(MI);
1615   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1616   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1617   Observer.changingInstr(MI);
1618   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1619   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1620   Observer.changedInstr(MI);
1621 }
1622 
1623 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1624 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1625                                              RegisterImmPair &MatchData) {
1626   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1627 
1628   Register LHS = MI.getOperand(1).getReg();
1629 
1630   Register ExtSrc;
1631   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1632       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1633       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1634     return false;
1635 
1636   // TODO: Should handle vector splat.
1637   Register RHS = MI.getOperand(2).getReg();
1638   auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1639   if (!MaybeShiftAmtVal)
1640     return false;
1641 
1642   if (LI) {
1643     LLT SrcTy = MRI.getType(ExtSrc);
1644 
1645     // We only really care about the legality with the shifted value. We can
1646     // pick any type the constant shift amount, so ask the target what to
1647     // use. Otherwise we would have to guess and hope it is reported as legal.
1648     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1649     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1650       return false;
1651   }
1652 
1653   int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1654   MatchData.Reg = ExtSrc;
1655   MatchData.Imm = ShiftAmt;
1656 
1657   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1658   return MinLeadingZeros >= ShiftAmt;
1659 }
1660 
1661 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1662                                              const RegisterImmPair &MatchData) {
1663   Register ExtSrcReg = MatchData.Reg;
1664   int64_t ShiftAmtVal = MatchData.Imm;
1665 
1666   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1667   Builder.setInstrAndDebugLoc(MI);
1668   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1669   auto NarrowShift =
1670       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1671   Builder.buildZExt(MI.getOperand(0), NarrowShift);
1672   MI.eraseFromParent();
1673 }
1674 
1675 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
1676                                               Register &MatchInfo) {
1677   GMerge &Merge = cast<GMerge>(MI);
1678   SmallVector<Register, 16> MergedValues;
1679   for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1680     MergedValues.emplace_back(Merge.getSourceReg(I));
1681 
1682   auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1683   if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1684     return false;
1685 
1686   for (unsigned I = 0; I < MergedValues.size(); ++I)
1687     if (MergedValues[I] != Unmerge->getReg(I))
1688       return false;
1689 
1690   MatchInfo = Unmerge->getSourceReg();
1691   return true;
1692 }
1693 
1694 static Register peekThroughBitcast(Register Reg,
1695                                    const MachineRegisterInfo &MRI) {
1696   while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1697     ;
1698 
1699   return Reg;
1700 }
1701 
1702 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1703     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1704   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1705          "Expected an unmerge");
1706   auto &Unmerge = cast<GUnmerge>(MI);
1707   Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1708 
1709   auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg, MRI);
1710   if (!SrcInstr)
1711     return false;
1712 
1713   // Check the source type of the merge.
1714   LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1715   LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1716   bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1717   if (SrcMergeTy != Dst0Ty && !SameSize)
1718     return false;
1719   // They are the same now (modulo a bitcast).
1720   // We can collect all the src registers.
1721   for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1722     Operands.push_back(SrcInstr->getSourceReg(Idx));
1723   return true;
1724 }
1725 
1726 void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1727     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1728   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1729          "Expected an unmerge");
1730   assert((MI.getNumOperands() - 1 == Operands.size()) &&
1731          "Not enough operands to replace all defs");
1732   unsigned NumElems = MI.getNumOperands() - 1;
1733 
1734   LLT SrcTy = MRI.getType(Operands[0]);
1735   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1736   bool CanReuseInputDirectly = DstTy == SrcTy;
1737   Builder.setInstrAndDebugLoc(MI);
1738   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1739     Register DstReg = MI.getOperand(Idx).getReg();
1740     Register SrcReg = Operands[Idx];
1741     if (CanReuseInputDirectly)
1742       replaceRegWith(MRI, DstReg, SrcReg);
1743     else
1744       Builder.buildCast(DstReg, SrcReg);
1745   }
1746   MI.eraseFromParent();
1747 }
1748 
1749 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1750                                                  SmallVectorImpl<APInt> &Csts) {
1751   unsigned SrcIdx = MI.getNumOperands() - 1;
1752   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1753   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1754   if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1755       SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1756     return false;
1757   // Break down the big constant in smaller ones.
1758   const MachineOperand &CstVal = SrcInstr->getOperand(1);
1759   APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1760                   ? CstVal.getCImm()->getValue()
1761                   : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1762 
1763   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1764   unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1765   // Unmerge a constant.
1766   for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1767     Csts.emplace_back(Val.trunc(ShiftAmt));
1768     Val = Val.lshr(ShiftAmt);
1769   }
1770 
1771   return true;
1772 }
1773 
1774 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1775                                                  SmallVectorImpl<APInt> &Csts) {
1776   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1777          "Expected an unmerge");
1778   assert((MI.getNumOperands() - 1 == Csts.size()) &&
1779          "Not enough operands to replace all defs");
1780   unsigned NumElems = MI.getNumOperands() - 1;
1781   Builder.setInstrAndDebugLoc(MI);
1782   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1783     Register DstReg = MI.getOperand(Idx).getReg();
1784     Builder.buildConstant(DstReg, Csts[Idx]);
1785   }
1786 
1787   MI.eraseFromParent();
1788 }
1789 
1790 bool CombinerHelper::matchCombineUnmergeUndef(
1791     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
1792   unsigned SrcIdx = MI.getNumOperands() - 1;
1793   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1794   MatchInfo = [&MI](MachineIRBuilder &B) {
1795     unsigned NumElems = MI.getNumOperands() - 1;
1796     for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1797       Register DstReg = MI.getOperand(Idx).getReg();
1798       B.buildUndef(DstReg);
1799     }
1800   };
1801   return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
1802 }
1803 
1804 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1805   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1806          "Expected an unmerge");
1807   // Check that all the lanes are dead except the first one.
1808   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1809     if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1810       return false;
1811   }
1812   return true;
1813 }
1814 
1815 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1816   Builder.setInstrAndDebugLoc(MI);
1817   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1818   // Truncating a vector is going to truncate every single lane,
1819   // whereas we want the full lowbits.
1820   // Do the operation on a scalar instead.
1821   LLT SrcTy = MRI.getType(SrcReg);
1822   if (SrcTy.isVector())
1823     SrcReg =
1824         Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1825 
1826   Register Dst0Reg = MI.getOperand(0).getReg();
1827   LLT Dst0Ty = MRI.getType(Dst0Reg);
1828   if (Dst0Ty.isVector()) {
1829     auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1830     Builder.buildCast(Dst0Reg, MIB);
1831   } else
1832     Builder.buildTrunc(Dst0Reg, SrcReg);
1833   MI.eraseFromParent();
1834 }
1835 
1836 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1837   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1838          "Expected an unmerge");
1839   Register Dst0Reg = MI.getOperand(0).getReg();
1840   LLT Dst0Ty = MRI.getType(Dst0Reg);
1841   // G_ZEXT on vector applies to each lane, so it will
1842   // affect all destinations. Therefore we won't be able
1843   // to simplify the unmerge to just the first definition.
1844   if (Dst0Ty.isVector())
1845     return false;
1846   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1847   LLT SrcTy = MRI.getType(SrcReg);
1848   if (SrcTy.isVector())
1849     return false;
1850 
1851   Register ZExtSrcReg;
1852   if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1853     return false;
1854 
1855   // Finally we can replace the first definition with
1856   // a zext of the source if the definition is big enough to hold
1857   // all of ZExtSrc bits.
1858   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1859   return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1860 }
1861 
1862 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1863   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1864          "Expected an unmerge");
1865 
1866   Register Dst0Reg = MI.getOperand(0).getReg();
1867 
1868   MachineInstr *ZExtInstr =
1869       MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1870   assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1871          "Expecting a G_ZEXT");
1872 
1873   Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1874   LLT Dst0Ty = MRI.getType(Dst0Reg);
1875   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1876 
1877   Builder.setInstrAndDebugLoc(MI);
1878 
1879   if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1880     Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1881   } else {
1882     assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1883            "ZExt src doesn't fit in destination");
1884     replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1885   }
1886 
1887   Register ZeroReg;
1888   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1889     if (!ZeroReg)
1890       ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1891     replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1892   }
1893   MI.eraseFromParent();
1894 }
1895 
1896 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1897                                                 unsigned TargetShiftSize,
1898                                                 unsigned &ShiftVal) {
1899   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1900           MI.getOpcode() == TargetOpcode::G_LSHR ||
1901           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1902 
1903   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1904   if (Ty.isVector()) // TODO:
1905     return false;
1906 
1907   // Don't narrow further than the requested size.
1908   unsigned Size = Ty.getSizeInBits();
1909   if (Size <= TargetShiftSize)
1910     return false;
1911 
1912   auto MaybeImmVal =
1913       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1914   if (!MaybeImmVal)
1915     return false;
1916 
1917   ShiftVal = MaybeImmVal->Value.getSExtValue();
1918   return ShiftVal >= Size / 2 && ShiftVal < Size;
1919 }
1920 
1921 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1922                                                 const unsigned &ShiftVal) {
1923   Register DstReg = MI.getOperand(0).getReg();
1924   Register SrcReg = MI.getOperand(1).getReg();
1925   LLT Ty = MRI.getType(SrcReg);
1926   unsigned Size = Ty.getSizeInBits();
1927   unsigned HalfSize = Size / 2;
1928   assert(ShiftVal >= HalfSize);
1929 
1930   LLT HalfTy = LLT::scalar(HalfSize);
1931 
1932   Builder.setInstr(MI);
1933   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1934   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1935 
1936   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1937     Register Narrowed = Unmerge.getReg(1);
1938 
1939     //  dst = G_LSHR s64:x, C for C >= 32
1940     // =>
1941     //   lo, hi = G_UNMERGE_VALUES x
1942     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1943 
1944     if (NarrowShiftAmt != 0) {
1945       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1946         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1947     }
1948 
1949     auto Zero = Builder.buildConstant(HalfTy, 0);
1950     Builder.buildMerge(DstReg, { Narrowed, Zero });
1951   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1952     Register Narrowed = Unmerge.getReg(0);
1953     //  dst = G_SHL s64:x, C for C >= 32
1954     // =>
1955     //   lo, hi = G_UNMERGE_VALUES x
1956     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1957     if (NarrowShiftAmt != 0) {
1958       Narrowed = Builder.buildShl(HalfTy, Narrowed,
1959         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1960     }
1961 
1962     auto Zero = Builder.buildConstant(HalfTy, 0);
1963     Builder.buildMerge(DstReg, { Zero, Narrowed });
1964   } else {
1965     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1966     auto Hi = Builder.buildAShr(
1967       HalfTy, Unmerge.getReg(1),
1968       Builder.buildConstant(HalfTy, HalfSize - 1));
1969 
1970     if (ShiftVal == HalfSize) {
1971       // (G_ASHR i64:x, 32) ->
1972       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1973       Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1974     } else if (ShiftVal == Size - 1) {
1975       // Don't need a second shift.
1976       // (G_ASHR i64:x, 63) ->
1977       //   %narrowed = (G_ASHR hi_32(x), 31)
1978       //   G_MERGE_VALUES %narrowed, %narrowed
1979       Builder.buildMerge(DstReg, { Hi, Hi });
1980     } else {
1981       auto Lo = Builder.buildAShr(
1982         HalfTy, Unmerge.getReg(1),
1983         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1984 
1985       // (G_ASHR i64:x, C) ->, for C >= 32
1986       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1987       Builder.buildMerge(DstReg, { Lo, Hi });
1988     }
1989   }
1990 
1991   MI.eraseFromParent();
1992 }
1993 
1994 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
1995                                               unsigned TargetShiftAmount) {
1996   unsigned ShiftAmt;
1997   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1998     applyCombineShiftToUnmerge(MI, ShiftAmt);
1999     return true;
2000   }
2001 
2002   return false;
2003 }
2004 
2005 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2006   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2007   Register DstReg = MI.getOperand(0).getReg();
2008   LLT DstTy = MRI.getType(DstReg);
2009   Register SrcReg = MI.getOperand(1).getReg();
2010   return mi_match(SrcReg, MRI,
2011                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2012 }
2013 
2014 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2015   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2016   Register DstReg = MI.getOperand(0).getReg();
2017   Builder.setInstr(MI);
2018   Builder.buildCopy(DstReg, Reg);
2019   MI.eraseFromParent();
2020 }
2021 
2022 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2023   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2024   Register SrcReg = MI.getOperand(1).getReg();
2025   return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
2026 }
2027 
2028 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2029   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2030   Register DstReg = MI.getOperand(0).getReg();
2031   Builder.setInstr(MI);
2032   Builder.buildZExtOrTrunc(DstReg, Reg);
2033   MI.eraseFromParent();
2034 }
2035 
2036 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2037     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2038   assert(MI.getOpcode() == TargetOpcode::G_ADD);
2039   Register LHS = MI.getOperand(1).getReg();
2040   Register RHS = MI.getOperand(2).getReg();
2041   LLT IntTy = MRI.getType(LHS);
2042 
2043   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2044   // instruction.
2045   PtrReg.second = false;
2046   for (Register SrcReg : {LHS, RHS}) {
2047     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2048       // Don't handle cases where the integer is implicitly converted to the
2049       // pointer width.
2050       LLT PtrTy = MRI.getType(PtrReg.first);
2051       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2052         return true;
2053     }
2054 
2055     PtrReg.second = true;
2056   }
2057 
2058   return false;
2059 }
2060 
2061 void CombinerHelper::applyCombineAddP2IToPtrAdd(
2062     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2063   Register Dst = MI.getOperand(0).getReg();
2064   Register LHS = MI.getOperand(1).getReg();
2065   Register RHS = MI.getOperand(2).getReg();
2066 
2067   const bool DoCommute = PtrReg.second;
2068   if (DoCommute)
2069     std::swap(LHS, RHS);
2070   LHS = PtrReg.first;
2071 
2072   LLT PtrTy = MRI.getType(LHS);
2073 
2074   Builder.setInstrAndDebugLoc(MI);
2075   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2076   Builder.buildPtrToInt(Dst, PtrAdd);
2077   MI.eraseFromParent();
2078 }
2079 
2080 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2081                                                   APInt &NewCst) {
2082   auto &PtrAdd = cast<GPtrAdd>(MI);
2083   Register LHS = PtrAdd.getBaseReg();
2084   Register RHS = PtrAdd.getOffsetReg();
2085   MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2086 
2087   if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
2088     APInt Cst;
2089     if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2090       auto DstTy = MRI.getType(PtrAdd.getReg(0));
2091       // G_INTTOPTR uses zero-extension
2092       NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
2093       NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
2094       return true;
2095     }
2096   }
2097 
2098   return false;
2099 }
2100 
2101 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2102                                                   APInt &NewCst) {
2103   auto &PtrAdd = cast<GPtrAdd>(MI);
2104   Register Dst = PtrAdd.getReg(0);
2105 
2106   Builder.setInstrAndDebugLoc(MI);
2107   Builder.buildConstant(Dst, NewCst);
2108   PtrAdd.eraseFromParent();
2109 }
2110 
2111 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2112   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2113   Register DstReg = MI.getOperand(0).getReg();
2114   Register SrcReg = MI.getOperand(1).getReg();
2115   LLT DstTy = MRI.getType(DstReg);
2116   return mi_match(SrcReg, MRI,
2117                   m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2118 }
2119 
2120 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
2121   assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2122   Register DstReg = MI.getOperand(0).getReg();
2123   Register SrcReg = MI.getOperand(1).getReg();
2124   LLT DstTy = MRI.getType(DstReg);
2125   if (mi_match(SrcReg, MRI,
2126                m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2127     unsigned DstSize = DstTy.getScalarSizeInBits();
2128     unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2129     return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2130   }
2131   return false;
2132 }
2133 
2134 bool CombinerHelper::matchCombineExtOfExt(
2135     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2136   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2137           MI.getOpcode() == TargetOpcode::G_SEXT ||
2138           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2139          "Expected a G_[ASZ]EXT");
2140   Register SrcReg = MI.getOperand(1).getReg();
2141   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2142   // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2143   unsigned Opc = MI.getOpcode();
2144   unsigned SrcOpc = SrcMI->getOpcode();
2145   if (Opc == SrcOpc ||
2146       (Opc == TargetOpcode::G_ANYEXT &&
2147        (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2148       (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2149     MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2150     return true;
2151   }
2152   return false;
2153 }
2154 
2155 void CombinerHelper::applyCombineExtOfExt(
2156     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2157   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2158           MI.getOpcode() == TargetOpcode::G_SEXT ||
2159           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2160          "Expected a G_[ASZ]EXT");
2161 
2162   Register Reg = std::get<0>(MatchInfo);
2163   unsigned SrcExtOp = std::get<1>(MatchInfo);
2164 
2165   // Combine exts with the same opcode.
2166   if (MI.getOpcode() == SrcExtOp) {
2167     Observer.changingInstr(MI);
2168     MI.getOperand(1).setReg(Reg);
2169     Observer.changedInstr(MI);
2170     return;
2171   }
2172 
2173   // Combine:
2174   // - anyext([sz]ext x) to [sz]ext x
2175   // - sext(zext x) to zext x
2176   if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2177       (MI.getOpcode() == TargetOpcode::G_SEXT &&
2178        SrcExtOp == TargetOpcode::G_ZEXT)) {
2179     Register DstReg = MI.getOperand(0).getReg();
2180     Builder.setInstrAndDebugLoc(MI);
2181     Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2182     MI.eraseFromParent();
2183   }
2184 }
2185 
2186 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2187   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2188   Register DstReg = MI.getOperand(0).getReg();
2189   Register SrcReg = MI.getOperand(1).getReg();
2190   LLT DstTy = MRI.getType(DstReg);
2191 
2192   Builder.setInstrAndDebugLoc(MI);
2193   Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2194                    MI.getFlags());
2195   MI.eraseFromParent();
2196 }
2197 
2198 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2199   assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2200   Register SrcReg = MI.getOperand(1).getReg();
2201   return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2202 }
2203 
2204 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2205   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2206   Src = MI.getOperand(1).getReg();
2207   Register AbsSrc;
2208   return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2209 }
2210 
2211 bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI,
2212                                             BuildFnTy &MatchInfo) {
2213   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2214   Register Src = MI.getOperand(1).getReg();
2215   Register NegSrc;
2216 
2217   if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc))))
2218     return false;
2219 
2220   MatchInfo = [=, &MI](MachineIRBuilder &B) {
2221     Observer.changingInstr(MI);
2222     MI.getOperand(1).setReg(NegSrc);
2223     Observer.changedInstr(MI);
2224   };
2225   return true;
2226 }
2227 
2228 bool CombinerHelper::matchCombineTruncOfExt(
2229     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2230   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2231   Register SrcReg = MI.getOperand(1).getReg();
2232   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2233   unsigned SrcOpc = SrcMI->getOpcode();
2234   if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2235       SrcOpc == TargetOpcode::G_ZEXT) {
2236     MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2237     return true;
2238   }
2239   return false;
2240 }
2241 
2242 void CombinerHelper::applyCombineTruncOfExt(
2243     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2244   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2245   Register SrcReg = MatchInfo.first;
2246   unsigned SrcExtOp = MatchInfo.second;
2247   Register DstReg = MI.getOperand(0).getReg();
2248   LLT SrcTy = MRI.getType(SrcReg);
2249   LLT DstTy = MRI.getType(DstReg);
2250   if (SrcTy == DstTy) {
2251     MI.eraseFromParent();
2252     replaceRegWith(MRI, DstReg, SrcReg);
2253     return;
2254   }
2255   Builder.setInstrAndDebugLoc(MI);
2256   if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2257     Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2258   else
2259     Builder.buildTrunc(DstReg, SrcReg);
2260   MI.eraseFromParent();
2261 }
2262 
2263 bool CombinerHelper::matchCombineTruncOfShl(
2264     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2265   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2266   Register DstReg = MI.getOperand(0).getReg();
2267   Register SrcReg = MI.getOperand(1).getReg();
2268   LLT DstTy = MRI.getType(DstReg);
2269   Register ShiftSrc;
2270   Register ShiftAmt;
2271 
2272   if (MRI.hasOneNonDBGUse(SrcReg) &&
2273       mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2274       isLegalOrBeforeLegalizer(
2275           {TargetOpcode::G_SHL,
2276            {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2277     KnownBits Known = KB->getKnownBits(ShiftAmt);
2278     unsigned Size = DstTy.getSizeInBits();
2279     if (Known.countMaxActiveBits() <= Log2_32(Size)) {
2280       MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2281       return true;
2282     }
2283   }
2284   return false;
2285 }
2286 
2287 void CombinerHelper::applyCombineTruncOfShl(
2288     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2289   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2290   Register DstReg = MI.getOperand(0).getReg();
2291   Register SrcReg = MI.getOperand(1).getReg();
2292   LLT DstTy = MRI.getType(DstReg);
2293   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2294 
2295   Register ShiftSrc = MatchInfo.first;
2296   Register ShiftAmt = MatchInfo.second;
2297   Builder.setInstrAndDebugLoc(MI);
2298   auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2299   Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
2300   MI.eraseFromParent();
2301 }
2302 
2303 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2304   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2305     return MO.isReg() &&
2306            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2307   });
2308 }
2309 
2310 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2311   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2312     return !MO.isReg() ||
2313            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2314   });
2315 }
2316 
2317 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2318   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2319   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2320   return all_of(Mask, [](int Elt) { return Elt < 0; });
2321 }
2322 
2323 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2324   assert(MI.getOpcode() == TargetOpcode::G_STORE);
2325   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2326                       MRI);
2327 }
2328 
2329 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2330   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2331   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2332                       MRI);
2333 }
2334 
2335 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2336   GSelect &SelMI = cast<GSelect>(MI);
2337   auto Cst =
2338       isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI);
2339   if (!Cst)
2340     return false;
2341   OpIdx = Cst->isZero() ? 3 : 2;
2342   return true;
2343 }
2344 
2345 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2346   MI.eraseFromParent();
2347   return true;
2348 }
2349 
2350 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2351                                     const MachineOperand &MOP2) {
2352   if (!MOP1.isReg() || !MOP2.isReg())
2353     return false;
2354   auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2355   if (!InstAndDef1)
2356     return false;
2357   auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2358   if (!InstAndDef2)
2359     return false;
2360   MachineInstr *I1 = InstAndDef1->MI;
2361   MachineInstr *I2 = InstAndDef2->MI;
2362 
2363   // Handle a case like this:
2364   //
2365   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2366   //
2367   // Even though %0 and %1 are produced by the same instruction they are not
2368   // the same values.
2369   if (I1 == I2)
2370     return MOP1.getReg() == MOP2.getReg();
2371 
2372   // If we have an instruction which loads or stores, we can't guarantee that
2373   // it is identical.
2374   //
2375   // For example, we may have
2376   //
2377   // %x1 = G_LOAD %addr (load N from @somewhere)
2378   // ...
2379   // call @foo
2380   // ...
2381   // %x2 = G_LOAD %addr (load N from @somewhere)
2382   // ...
2383   // %or = G_OR %x1, %x2
2384   //
2385   // It's possible that @foo will modify whatever lives at the address we're
2386   // loading from. To be safe, let's just assume that all loads and stores
2387   // are different (unless we have something which is guaranteed to not
2388   // change.)
2389   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2390     return false;
2391 
2392   // If both instructions are loads or stores, they are equal only if both
2393   // are dereferenceable invariant loads with the same number of bits.
2394   if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2395     GLoadStore *LS1 = dyn_cast<GLoadStore>(I1);
2396     GLoadStore *LS2 = dyn_cast<GLoadStore>(I2);
2397     if (!LS1 || !LS2)
2398       return false;
2399 
2400     if (!I2->isDereferenceableInvariantLoad() ||
2401         (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2402       return false;
2403   }
2404 
2405   // Check for physical registers on the instructions first to avoid cases
2406   // like this:
2407   //
2408   // %a = COPY $physreg
2409   // ...
2410   // SOMETHING implicit-def $physreg
2411   // ...
2412   // %b = COPY $physreg
2413   //
2414   // These copies are not equivalent.
2415   if (any_of(I1->uses(), [](const MachineOperand &MO) {
2416         return MO.isReg() && MO.getReg().isPhysical();
2417       })) {
2418     // Check if we have a case like this:
2419     //
2420     // %a = COPY $physreg
2421     // %b = COPY %a
2422     //
2423     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2424     // From that, we know that they must have the same value, since they must
2425     // have come from the same COPY.
2426     return I1->isIdenticalTo(*I2);
2427   }
2428 
2429   // We don't have any physical registers, so we don't necessarily need the
2430   // same vreg defs.
2431   //
2432   // On the off-chance that there's some target instruction feeding into the
2433   // instruction, let's use produceSameValue instead of isIdenticalTo.
2434   if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2435     // Handle instructions with multiple defs that produce same values. Values
2436     // are same for operands with same index.
2437     // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2438     // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2439     // I1 and I2 are different instructions but produce same values,
2440     // %1 and %6 are same, %1 and %7 are not the same value.
2441     return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2442            I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2443   }
2444   return false;
2445 }
2446 
2447 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2448   if (!MOP.isReg())
2449     return false;
2450   auto *MI = MRI.getVRegDef(MOP.getReg());
2451   auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI);
2452   return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2453          MaybeCst->getSExtValue() == C;
2454 }
2455 
2456 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2457                                                      unsigned OpIdx) {
2458   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2459   Register OldReg = MI.getOperand(0).getReg();
2460   Register Replacement = MI.getOperand(OpIdx).getReg();
2461   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2462   MI.eraseFromParent();
2463   replaceRegWith(MRI, OldReg, Replacement);
2464   return true;
2465 }
2466 
2467 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2468                                                  Register Replacement) {
2469   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2470   Register OldReg = MI.getOperand(0).getReg();
2471   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2472   MI.eraseFromParent();
2473   replaceRegWith(MRI, OldReg, Replacement);
2474   return true;
2475 }
2476 
2477 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2478   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2479   // Match (cond ? x : x)
2480   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2481          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2482                        MRI);
2483 }
2484 
2485 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2486   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2487          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2488                        MRI);
2489 }
2490 
2491 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2492   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2493          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2494                        MRI);
2495 }
2496 
2497 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2498   MachineOperand &MO = MI.getOperand(OpIdx);
2499   return MO.isReg() &&
2500          getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2501 }
2502 
2503 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2504                                                         unsigned OpIdx) {
2505   MachineOperand &MO = MI.getOperand(OpIdx);
2506   return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2507 }
2508 
2509 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2510   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2511   Builder.setInstr(MI);
2512   Builder.buildFConstant(MI.getOperand(0), C);
2513   MI.eraseFromParent();
2514   return true;
2515 }
2516 
2517 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2518   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2519   Builder.setInstr(MI);
2520   Builder.buildConstant(MI.getOperand(0), C);
2521   MI.eraseFromParent();
2522   return true;
2523 }
2524 
2525 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
2526   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2527   Builder.setInstr(MI);
2528   Builder.buildConstant(MI.getOperand(0), C);
2529   MI.eraseFromParent();
2530   return true;
2531 }
2532 
2533 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2534   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2535   Builder.setInstr(MI);
2536   Builder.buildUndef(MI.getOperand(0));
2537   MI.eraseFromParent();
2538   return true;
2539 }
2540 
2541 bool CombinerHelper::matchSimplifyAddToSub(
2542     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2543   Register LHS = MI.getOperand(1).getReg();
2544   Register RHS = MI.getOperand(2).getReg();
2545   Register &NewLHS = std::get<0>(MatchInfo);
2546   Register &NewRHS = std::get<1>(MatchInfo);
2547 
2548   // Helper lambda to check for opportunities for
2549   // ((0-A) + B) -> B - A
2550   // (A + (0-B)) -> A - B
2551   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2552     if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2553       return false;
2554     NewLHS = MaybeNewLHS;
2555     return true;
2556   };
2557 
2558   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2559 }
2560 
2561 bool CombinerHelper::matchCombineInsertVecElts(
2562     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2563   assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2564          "Invalid opcode");
2565   Register DstReg = MI.getOperand(0).getReg();
2566   LLT DstTy = MRI.getType(DstReg);
2567   assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2568   unsigned NumElts = DstTy.getNumElements();
2569   // If this MI is part of a sequence of insert_vec_elts, then
2570   // don't do the combine in the middle of the sequence.
2571   if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2572                                    TargetOpcode::G_INSERT_VECTOR_ELT)
2573     return false;
2574   MachineInstr *CurrInst = &MI;
2575   MachineInstr *TmpInst;
2576   int64_t IntImm;
2577   Register TmpReg;
2578   MatchInfo.resize(NumElts);
2579   while (mi_match(
2580       CurrInst->getOperand(0).getReg(), MRI,
2581       m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2582     if (IntImm >= NumElts)
2583       return false;
2584     if (!MatchInfo[IntImm])
2585       MatchInfo[IntImm] = TmpReg;
2586     CurrInst = TmpInst;
2587   }
2588   // Variable index.
2589   if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2590     return false;
2591   if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2592     for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2593       if (!MatchInfo[I - 1].isValid())
2594         MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2595     }
2596     return true;
2597   }
2598   // If we didn't end in a G_IMPLICIT_DEF, bail out.
2599   return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2600 }
2601 
2602 void CombinerHelper::applyCombineInsertVecElts(
2603     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2604   Builder.setInstr(MI);
2605   Register UndefReg;
2606   auto GetUndef = [&]() {
2607     if (UndefReg)
2608       return UndefReg;
2609     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2610     UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2611     return UndefReg;
2612   };
2613   for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2614     if (!MatchInfo[I])
2615       MatchInfo[I] = GetUndef();
2616   }
2617   Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2618   MI.eraseFromParent();
2619 }
2620 
2621 void CombinerHelper::applySimplifyAddToSub(
2622     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2623   Builder.setInstr(MI);
2624   Register SubLHS, SubRHS;
2625   std::tie(SubLHS, SubRHS) = MatchInfo;
2626   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2627   MI.eraseFromParent();
2628 }
2629 
2630 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2631     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2632   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2633   //
2634   // Creates the new hand + logic instruction (but does not insert them.)
2635   //
2636   // On success, MatchInfo is populated with the new instructions. These are
2637   // inserted in applyHoistLogicOpWithSameOpcodeHands.
2638   unsigned LogicOpcode = MI.getOpcode();
2639   assert(LogicOpcode == TargetOpcode::G_AND ||
2640          LogicOpcode == TargetOpcode::G_OR ||
2641          LogicOpcode == TargetOpcode::G_XOR);
2642   MachineIRBuilder MIB(MI);
2643   Register Dst = MI.getOperand(0).getReg();
2644   Register LHSReg = MI.getOperand(1).getReg();
2645   Register RHSReg = MI.getOperand(2).getReg();
2646 
2647   // Don't recompute anything.
2648   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2649     return false;
2650 
2651   // Make sure we have (hand x, ...), (hand y, ...)
2652   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2653   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2654   if (!LeftHandInst || !RightHandInst)
2655     return false;
2656   unsigned HandOpcode = LeftHandInst->getOpcode();
2657   if (HandOpcode != RightHandInst->getOpcode())
2658     return false;
2659   if (!LeftHandInst->getOperand(1).isReg() ||
2660       !RightHandInst->getOperand(1).isReg())
2661     return false;
2662 
2663   // Make sure the types match up, and if we're doing this post-legalization,
2664   // we end up with legal types.
2665   Register X = LeftHandInst->getOperand(1).getReg();
2666   Register Y = RightHandInst->getOperand(1).getReg();
2667   LLT XTy = MRI.getType(X);
2668   LLT YTy = MRI.getType(Y);
2669   if (XTy != YTy)
2670     return false;
2671   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2672     return false;
2673 
2674   // Optional extra source register.
2675   Register ExtraHandOpSrcReg;
2676   switch (HandOpcode) {
2677   default:
2678     return false;
2679   case TargetOpcode::G_ANYEXT:
2680   case TargetOpcode::G_SEXT:
2681   case TargetOpcode::G_ZEXT: {
2682     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2683     break;
2684   }
2685   case TargetOpcode::G_AND:
2686   case TargetOpcode::G_ASHR:
2687   case TargetOpcode::G_LSHR:
2688   case TargetOpcode::G_SHL: {
2689     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2690     MachineOperand &ZOp = LeftHandInst->getOperand(2);
2691     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2692       return false;
2693     ExtraHandOpSrcReg = ZOp.getReg();
2694     break;
2695   }
2696   }
2697 
2698   // Record the steps to build the new instructions.
2699   //
2700   // Steps to build (logic x, y)
2701   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2702   OperandBuildSteps LogicBuildSteps = {
2703       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2704       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2705       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2706   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2707 
2708   // Steps to build hand (logic x, y), ...z
2709   OperandBuildSteps HandBuildSteps = {
2710       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2711       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2712   if (ExtraHandOpSrcReg.isValid())
2713     HandBuildSteps.push_back(
2714         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2715   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2716 
2717   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2718   return true;
2719 }
2720 
2721 void CombinerHelper::applyBuildInstructionSteps(
2722     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2723   assert(MatchInfo.InstrsToBuild.size() &&
2724          "Expected at least one instr to build?");
2725   Builder.setInstr(MI);
2726   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2727     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2728     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2729     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2730     for (auto &OperandFn : InstrToBuild.OperandFns)
2731       OperandFn(Instr);
2732   }
2733   MI.eraseFromParent();
2734 }
2735 
2736 bool CombinerHelper::matchAshrShlToSextInreg(
2737     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2738   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2739   int64_t ShlCst, AshrCst;
2740   Register Src;
2741   // FIXME: detect splat constant vectors.
2742   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2743                 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2744     return false;
2745   if (ShlCst != AshrCst)
2746     return false;
2747   if (!isLegalOrBeforeLegalizer(
2748           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2749     return false;
2750   MatchInfo = std::make_tuple(Src, ShlCst);
2751   return true;
2752 }
2753 
2754 void CombinerHelper::applyAshShlToSextInreg(
2755     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2756   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2757   Register Src;
2758   int64_t ShiftAmt;
2759   std::tie(Src, ShiftAmt) = MatchInfo;
2760   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2761   Builder.setInstrAndDebugLoc(MI);
2762   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2763   MI.eraseFromParent();
2764 }
2765 
2766 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2767 bool CombinerHelper::matchOverlappingAnd(
2768     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2769   assert(MI.getOpcode() == TargetOpcode::G_AND);
2770 
2771   Register Dst = MI.getOperand(0).getReg();
2772   LLT Ty = MRI.getType(Dst);
2773 
2774   Register R;
2775   int64_t C1;
2776   int64_t C2;
2777   if (!mi_match(
2778           Dst, MRI,
2779           m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
2780     return false;
2781 
2782   MatchInfo = [=](MachineIRBuilder &B) {
2783     if (C1 & C2) {
2784       B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
2785       return;
2786     }
2787     auto Zero = B.buildConstant(Ty, 0);
2788     replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
2789   };
2790   return true;
2791 }
2792 
2793 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
2794                                        Register &Replacement) {
2795   // Given
2796   //
2797   // %y:_(sN) = G_SOMETHING
2798   // %x:_(sN) = G_SOMETHING
2799   // %res:_(sN) = G_AND %x, %y
2800   //
2801   // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2802   //
2803   // Patterns like this can appear as a result of legalization. E.g.
2804   //
2805   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2806   // %one:_(s32) = G_CONSTANT i32 1
2807   // %and:_(s32) = G_AND %cmp, %one
2808   //
2809   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2810   assert(MI.getOpcode() == TargetOpcode::G_AND);
2811   if (!KB)
2812     return false;
2813 
2814   Register AndDst = MI.getOperand(0).getReg();
2815   LLT DstTy = MRI.getType(AndDst);
2816 
2817   // FIXME: This should be removed once GISelKnownBits supports vectors.
2818   if (DstTy.isVector())
2819     return false;
2820 
2821   Register LHS = MI.getOperand(1).getReg();
2822   Register RHS = MI.getOperand(2).getReg();
2823   KnownBits LHSBits = KB->getKnownBits(LHS);
2824   KnownBits RHSBits = KB->getKnownBits(RHS);
2825 
2826   // Check that x & Mask == x.
2827   // x & 1 == x, always
2828   // x & 0 == x, only if x is also 0
2829   // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2830   //
2831   // Check if we can replace AndDst with the LHS of the G_AND
2832   if (canReplaceReg(AndDst, LHS, MRI) &&
2833       (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2834     Replacement = LHS;
2835     return true;
2836   }
2837 
2838   // Check if we can replace AndDst with the RHS of the G_AND
2839   if (canReplaceReg(AndDst, RHS, MRI) &&
2840       (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2841     Replacement = RHS;
2842     return true;
2843   }
2844 
2845   return false;
2846 }
2847 
2848 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
2849   // Given
2850   //
2851   // %y:_(sN) = G_SOMETHING
2852   // %x:_(sN) = G_SOMETHING
2853   // %res:_(sN) = G_OR %x, %y
2854   //
2855   // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2856   assert(MI.getOpcode() == TargetOpcode::G_OR);
2857   if (!KB)
2858     return false;
2859 
2860   Register OrDst = MI.getOperand(0).getReg();
2861   LLT DstTy = MRI.getType(OrDst);
2862 
2863   // FIXME: This should be removed once GISelKnownBits supports vectors.
2864   if (DstTy.isVector())
2865     return false;
2866 
2867   Register LHS = MI.getOperand(1).getReg();
2868   Register RHS = MI.getOperand(2).getReg();
2869   KnownBits LHSBits = KB->getKnownBits(LHS);
2870   KnownBits RHSBits = KB->getKnownBits(RHS);
2871 
2872   // Check that x | Mask == x.
2873   // x | 0 == x, always
2874   // x | 1 == x, only if x is also 1
2875   // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2876   //
2877   // Check if we can replace OrDst with the LHS of the G_OR
2878   if (canReplaceReg(OrDst, LHS, MRI) &&
2879       (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2880     Replacement = LHS;
2881     return true;
2882   }
2883 
2884   // Check if we can replace OrDst with the RHS of the G_OR
2885   if (canReplaceReg(OrDst, RHS, MRI) &&
2886       (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2887     Replacement = RHS;
2888     return true;
2889   }
2890 
2891   return false;
2892 }
2893 
2894 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
2895   // If the input is already sign extended, just drop the extension.
2896   Register Src = MI.getOperand(1).getReg();
2897   unsigned ExtBits = MI.getOperand(2).getImm();
2898   unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
2899   return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
2900 }
2901 
2902 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
2903                              int64_t Cst, bool IsVector, bool IsFP) {
2904   // For i1, Cst will always be -1 regardless of boolean contents.
2905   return (ScalarSizeBits == 1 && Cst == -1) ||
2906          isConstTrueVal(TLI, Cst, IsVector, IsFP);
2907 }
2908 
2909 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
2910                                  SmallVectorImpl<Register> &RegsToNegate) {
2911   assert(MI.getOpcode() == TargetOpcode::G_XOR);
2912   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2913   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
2914   Register XorSrc;
2915   Register CstReg;
2916   // We match xor(src, true) here.
2917   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2918                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
2919     return false;
2920 
2921   if (!MRI.hasOneNonDBGUse(XorSrc))
2922     return false;
2923 
2924   // Check that XorSrc is the root of a tree of comparisons combined with ANDs
2925   // and ORs. The suffix of RegsToNegate starting from index I is used a work
2926   // list of tree nodes to visit.
2927   RegsToNegate.push_back(XorSrc);
2928   // Remember whether the comparisons are all integer or all floating point.
2929   bool IsInt = false;
2930   bool IsFP = false;
2931   for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
2932     Register Reg = RegsToNegate[I];
2933     if (!MRI.hasOneNonDBGUse(Reg))
2934       return false;
2935     MachineInstr *Def = MRI.getVRegDef(Reg);
2936     switch (Def->getOpcode()) {
2937     default:
2938       // Don't match if the tree contains anything other than ANDs, ORs and
2939       // comparisons.
2940       return false;
2941     case TargetOpcode::G_ICMP:
2942       if (IsFP)
2943         return false;
2944       IsInt = true;
2945       // When we apply the combine we will invert the predicate.
2946       break;
2947     case TargetOpcode::G_FCMP:
2948       if (IsInt)
2949         return false;
2950       IsFP = true;
2951       // When we apply the combine we will invert the predicate.
2952       break;
2953     case TargetOpcode::G_AND:
2954     case TargetOpcode::G_OR:
2955       // Implement De Morgan's laws:
2956       // ~(x & y) -> ~x | ~y
2957       // ~(x | y) -> ~x & ~y
2958       // When we apply the combine we will change the opcode and recursively
2959       // negate the operands.
2960       RegsToNegate.push_back(Def->getOperand(1).getReg());
2961       RegsToNegate.push_back(Def->getOperand(2).getReg());
2962       break;
2963     }
2964   }
2965 
2966   // Now we know whether the comparisons are integer or floating point, check
2967   // the constant in the xor.
2968   int64_t Cst;
2969   if (Ty.isVector()) {
2970     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
2971     auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI);
2972     if (!MaybeCst)
2973       return false;
2974     if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
2975       return false;
2976   } else {
2977     if (!mi_match(CstReg, MRI, m_ICst(Cst)))
2978       return false;
2979     if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
2980       return false;
2981   }
2982 
2983   return true;
2984 }
2985 
2986 void CombinerHelper::applyNotCmp(MachineInstr &MI,
2987                                  SmallVectorImpl<Register> &RegsToNegate) {
2988   for (Register Reg : RegsToNegate) {
2989     MachineInstr *Def = MRI.getVRegDef(Reg);
2990     Observer.changingInstr(*Def);
2991     // For each comparison, invert the opcode. For each AND and OR, change the
2992     // opcode.
2993     switch (Def->getOpcode()) {
2994     default:
2995       llvm_unreachable("Unexpected opcode");
2996     case TargetOpcode::G_ICMP:
2997     case TargetOpcode::G_FCMP: {
2998       MachineOperand &PredOp = Def->getOperand(1);
2999       CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3000           (CmpInst::Predicate)PredOp.getPredicate());
3001       PredOp.setPredicate(NewP);
3002       break;
3003     }
3004     case TargetOpcode::G_AND:
3005       Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3006       break;
3007     case TargetOpcode::G_OR:
3008       Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3009       break;
3010     }
3011     Observer.changedInstr(*Def);
3012   }
3013 
3014   replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3015   MI.eraseFromParent();
3016 }
3017 
3018 bool CombinerHelper::matchXorOfAndWithSameReg(
3019     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3020   // Match (xor (and x, y), y) (or any of its commuted cases)
3021   assert(MI.getOpcode() == TargetOpcode::G_XOR);
3022   Register &X = MatchInfo.first;
3023   Register &Y = MatchInfo.second;
3024   Register AndReg = MI.getOperand(1).getReg();
3025   Register SharedReg = MI.getOperand(2).getReg();
3026 
3027   // Find a G_AND on either side of the G_XOR.
3028   // Look for one of
3029   //
3030   // (xor (and x, y), SharedReg)
3031   // (xor SharedReg, (and x, y))
3032   if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3033     std::swap(AndReg, SharedReg);
3034     if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3035       return false;
3036   }
3037 
3038   // Only do this if we'll eliminate the G_AND.
3039   if (!MRI.hasOneNonDBGUse(AndReg))
3040     return false;
3041 
3042   // We can combine if SharedReg is the same as either the LHS or RHS of the
3043   // G_AND.
3044   if (Y != SharedReg)
3045     std::swap(X, Y);
3046   return Y == SharedReg;
3047 }
3048 
3049 void CombinerHelper::applyXorOfAndWithSameReg(
3050     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3051   // Fold (xor (and x, y), y) -> (and (not x), y)
3052   Builder.setInstrAndDebugLoc(MI);
3053   Register X, Y;
3054   std::tie(X, Y) = MatchInfo;
3055   auto Not = Builder.buildNot(MRI.getType(X), X);
3056   Observer.changingInstr(MI);
3057   MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3058   MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3059   MI.getOperand(2).setReg(Y);
3060   Observer.changedInstr(MI);
3061 }
3062 
3063 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3064   auto &PtrAdd = cast<GPtrAdd>(MI);
3065   Register DstReg = PtrAdd.getReg(0);
3066   LLT Ty = MRI.getType(DstReg);
3067   const DataLayout &DL = Builder.getMF().getDataLayout();
3068 
3069   if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3070     return false;
3071 
3072   if (Ty.isPointer()) {
3073     auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3074     return ConstVal && *ConstVal == 0;
3075   }
3076 
3077   assert(Ty.isVector() && "Expecting a vector type");
3078   const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3079   return isBuildVectorAllZeros(*VecMI, MRI);
3080 }
3081 
3082 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3083   auto &PtrAdd = cast<GPtrAdd>(MI);
3084   Builder.setInstrAndDebugLoc(PtrAdd);
3085   Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3086   PtrAdd.eraseFromParent();
3087 }
3088 
3089 /// The second source operand is known to be a power of 2.
3090 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3091   Register DstReg = MI.getOperand(0).getReg();
3092   Register Src0 = MI.getOperand(1).getReg();
3093   Register Pow2Src1 = MI.getOperand(2).getReg();
3094   LLT Ty = MRI.getType(DstReg);
3095   Builder.setInstrAndDebugLoc(MI);
3096 
3097   // Fold (urem x, pow2) -> (and x, pow2-1)
3098   auto NegOne = Builder.buildConstant(Ty, -1);
3099   auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3100   Builder.buildAnd(DstReg, Src0, Add);
3101   MI.eraseFromParent();
3102 }
3103 
3104 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
3105                                               unsigned &SelectOpNo) {
3106   Register LHS = MI.getOperand(1).getReg();
3107   Register RHS = MI.getOperand(2).getReg();
3108 
3109   Register OtherOperandReg = RHS;
3110   SelectOpNo = 1;
3111   MachineInstr *Select = MRI.getVRegDef(LHS);
3112 
3113   // Don't do this unless the old select is going away. We want to eliminate the
3114   // binary operator, not replace a binop with a select.
3115   if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3116       !MRI.hasOneNonDBGUse(LHS)) {
3117     OtherOperandReg = LHS;
3118     SelectOpNo = 2;
3119     Select = MRI.getVRegDef(RHS);
3120     if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3121         !MRI.hasOneNonDBGUse(RHS))
3122       return false;
3123   }
3124 
3125   MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
3126   MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
3127 
3128   if (!isConstantOrConstantVector(*SelectLHS, MRI,
3129                                   /*AllowFP*/ true,
3130                                   /*AllowOpaqueConstants*/ false))
3131     return false;
3132   if (!isConstantOrConstantVector(*SelectRHS, MRI,
3133                                   /*AllowFP*/ true,
3134                                   /*AllowOpaqueConstants*/ false))
3135     return false;
3136 
3137   unsigned BinOpcode = MI.getOpcode();
3138 
3139   // We know know one of the operands is a select of constants. Now verify that
3140   // the other binary operator operand is either a constant, or we can handle a
3141   // variable.
3142   bool CanFoldNonConst =
3143       (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3144       (isNullOrNullSplat(*SelectLHS, MRI) ||
3145        isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
3146       (isNullOrNullSplat(*SelectRHS, MRI) ||
3147        isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
3148   if (CanFoldNonConst)
3149     return true;
3150 
3151   return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
3152                                     /*AllowFP*/ true,
3153                                     /*AllowOpaqueConstants*/ false);
3154 }
3155 
3156 /// \p SelectOperand is the operand in binary operator \p MI that is the select
3157 /// to fold.
3158 bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
3159                                               const unsigned &SelectOperand) {
3160   Builder.setInstrAndDebugLoc(MI);
3161 
3162   Register Dst = MI.getOperand(0).getReg();
3163   Register LHS = MI.getOperand(1).getReg();
3164   Register RHS = MI.getOperand(2).getReg();
3165   MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
3166 
3167   Register SelectCond = Select->getOperand(1).getReg();
3168   Register SelectTrue = Select->getOperand(2).getReg();
3169   Register SelectFalse = Select->getOperand(3).getReg();
3170 
3171   LLT Ty = MRI.getType(Dst);
3172   unsigned BinOpcode = MI.getOpcode();
3173 
3174   Register FoldTrue, FoldFalse;
3175 
3176   // We have a select-of-constants followed by a binary operator with a
3177   // constant. Eliminate the binop by pulling the constant math into the select.
3178   // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3179   if (SelectOperand == 1) {
3180     // TODO: SelectionDAG verifies this actually constant folds before
3181     // committing to the combine.
3182 
3183     FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
3184     FoldFalse =
3185         Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
3186   } else {
3187     FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
3188     FoldFalse =
3189         Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
3190   }
3191 
3192   Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
3193   Observer.erasingInstr(*Select);
3194   Select->eraseFromParent();
3195   MI.eraseFromParent();
3196 
3197   return true;
3198 }
3199 
3200 Optional<SmallVector<Register, 8>>
3201 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3202   assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3203   // We want to detect if Root is part of a tree which represents a bunch
3204   // of loads being merged into a larger load. We'll try to recognize patterns
3205   // like, for example:
3206   //
3207   //  Reg   Reg
3208   //   \    /
3209   //    OR_1   Reg
3210   //     \    /
3211   //      OR_2
3212   //        \     Reg
3213   //         .. /
3214   //        Root
3215   //
3216   //  Reg   Reg   Reg   Reg
3217   //     \ /       \   /
3218   //     OR_1      OR_2
3219   //       \       /
3220   //        \    /
3221   //         ...
3222   //         Root
3223   //
3224   // Each "Reg" may have been produced by a load + some arithmetic. This
3225   // function will save each of them.
3226   SmallVector<Register, 8> RegsToVisit;
3227   SmallVector<const MachineInstr *, 7> Ors = {Root};
3228 
3229   // In the "worst" case, we're dealing with a load for each byte. So, there
3230   // are at most #bytes - 1 ORs.
3231   const unsigned MaxIter =
3232       MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3233   for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3234     if (Ors.empty())
3235       break;
3236     const MachineInstr *Curr = Ors.pop_back_val();
3237     Register OrLHS = Curr->getOperand(1).getReg();
3238     Register OrRHS = Curr->getOperand(2).getReg();
3239 
3240     // In the combine, we want to elimate the entire tree.
3241     if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3242       return None;
3243 
3244     // If it's a G_OR, save it and continue to walk. If it's not, then it's
3245     // something that may be a load + arithmetic.
3246     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3247       Ors.push_back(Or);
3248     else
3249       RegsToVisit.push_back(OrLHS);
3250     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3251       Ors.push_back(Or);
3252     else
3253       RegsToVisit.push_back(OrRHS);
3254   }
3255 
3256   // We're going to try and merge each register into a wider power-of-2 type,
3257   // so we ought to have an even number of registers.
3258   if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3259     return None;
3260   return RegsToVisit;
3261 }
3262 
3263 /// Helper function for findLoadOffsetsForLoadOrCombine.
3264 ///
3265 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3266 /// and then moving that value into a specific byte offset.
3267 ///
3268 /// e.g. x[i] << 24
3269 ///
3270 /// \returns The load instruction and the byte offset it is moved into.
3271 static Optional<std::pair<GZExtLoad *, int64_t>>
3272 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3273                          const MachineRegisterInfo &MRI) {
3274   assert(MRI.hasOneNonDBGUse(Reg) &&
3275          "Expected Reg to only have one non-debug use?");
3276   Register MaybeLoad;
3277   int64_t Shift;
3278   if (!mi_match(Reg, MRI,
3279                 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3280     Shift = 0;
3281     MaybeLoad = Reg;
3282   }
3283 
3284   if (Shift % MemSizeInBits != 0)
3285     return None;
3286 
3287   // TODO: Handle other types of loads.
3288   auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3289   if (!Load)
3290     return None;
3291 
3292   if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3293     return None;
3294 
3295   return std::make_pair(Load, Shift / MemSizeInBits);
3296 }
3297 
3298 Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3299 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3300     SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3301     const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3302 
3303   // Each load found for the pattern. There should be one for each RegsToVisit.
3304   SmallSetVector<const MachineInstr *, 8> Loads;
3305 
3306   // The lowest index used in any load. (The lowest "i" for each x[i].)
3307   int64_t LowestIdx = INT64_MAX;
3308 
3309   // The load which uses the lowest index.
3310   GZExtLoad *LowestIdxLoad = nullptr;
3311 
3312   // Keeps track of the load indices we see. We shouldn't see any indices twice.
3313   SmallSet<int64_t, 8> SeenIdx;
3314 
3315   // Ensure each load is in the same MBB.
3316   // TODO: Support multiple MachineBasicBlocks.
3317   MachineBasicBlock *MBB = nullptr;
3318   const MachineMemOperand *MMO = nullptr;
3319 
3320   // Earliest instruction-order load in the pattern.
3321   GZExtLoad *EarliestLoad = nullptr;
3322 
3323   // Latest instruction-order load in the pattern.
3324   GZExtLoad *LatestLoad = nullptr;
3325 
3326   // Base pointer which every load should share.
3327   Register BasePtr;
3328 
3329   // We want to find a load for each register. Each load should have some
3330   // appropriate bit twiddling arithmetic. During this loop, we will also keep
3331   // track of the load which uses the lowest index. Later, we will check if we
3332   // can use its pointer in the final, combined load.
3333   for (auto Reg : RegsToVisit) {
3334     // Find the load, and find the position that it will end up in (e.g. a
3335     // shifted) value.
3336     auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3337     if (!LoadAndPos)
3338       return None;
3339     GZExtLoad *Load;
3340     int64_t DstPos;
3341     std::tie(Load, DstPos) = *LoadAndPos;
3342 
3343     // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3344     // it is difficult to check for stores/calls/etc between loads.
3345     MachineBasicBlock *LoadMBB = Load->getParent();
3346     if (!MBB)
3347       MBB = LoadMBB;
3348     if (LoadMBB != MBB)
3349       return None;
3350 
3351     // Make sure that the MachineMemOperands of every seen load are compatible.
3352     auto &LoadMMO = Load->getMMO();
3353     if (!MMO)
3354       MMO = &LoadMMO;
3355     if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3356       return None;
3357 
3358     // Find out what the base pointer and index for the load is.
3359     Register LoadPtr;
3360     int64_t Idx;
3361     if (!mi_match(Load->getOperand(1).getReg(), MRI,
3362                   m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3363       LoadPtr = Load->getOperand(1).getReg();
3364       Idx = 0;
3365     }
3366 
3367     // Don't combine things like a[i], a[i] -> a bigger load.
3368     if (!SeenIdx.insert(Idx).second)
3369       return None;
3370 
3371     // Every load must share the same base pointer; don't combine things like:
3372     //
3373     // a[i], b[i + 1] -> a bigger load.
3374     if (!BasePtr.isValid())
3375       BasePtr = LoadPtr;
3376     if (BasePtr != LoadPtr)
3377       return None;
3378 
3379     if (Idx < LowestIdx) {
3380       LowestIdx = Idx;
3381       LowestIdxLoad = Load;
3382     }
3383 
3384     // Keep track of the byte offset that this load ends up at. If we have seen
3385     // the byte offset, then stop here. We do not want to combine:
3386     //
3387     // a[i] << 16, a[i + k] << 16 -> a bigger load.
3388     if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3389       return None;
3390     Loads.insert(Load);
3391 
3392     // Keep track of the position of the earliest/latest loads in the pattern.
3393     // We will check that there are no load fold barriers between them later
3394     // on.
3395     //
3396     // FIXME: Is there a better way to check for load fold barriers?
3397     if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3398       EarliestLoad = Load;
3399     if (!LatestLoad || dominates(*LatestLoad, *Load))
3400       LatestLoad = Load;
3401   }
3402 
3403   // We found a load for each register. Let's check if each load satisfies the
3404   // pattern.
3405   assert(Loads.size() == RegsToVisit.size() &&
3406          "Expected to find a load for each register?");
3407   assert(EarliestLoad != LatestLoad && EarliestLoad &&
3408          LatestLoad && "Expected at least two loads?");
3409 
3410   // Check if there are any stores, calls, etc. between any of the loads. If
3411   // there are, then we can't safely perform the combine.
3412   //
3413   // MaxIter is chosen based off the (worst case) number of iterations it
3414   // typically takes to succeed in the LLVM test suite plus some padding.
3415   //
3416   // FIXME: Is there a better way to check for load fold barriers?
3417   const unsigned MaxIter = 20;
3418   unsigned Iter = 0;
3419   for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3420                                                  LatestLoad->getIterator())) {
3421     if (Loads.count(&MI))
3422       continue;
3423     if (MI.isLoadFoldBarrier())
3424       return None;
3425     if (Iter++ == MaxIter)
3426       return None;
3427   }
3428 
3429   return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3430 }
3431 
3432 bool CombinerHelper::matchLoadOrCombine(
3433     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3434   assert(MI.getOpcode() == TargetOpcode::G_OR);
3435   MachineFunction &MF = *MI.getMF();
3436   // Assuming a little-endian target, transform:
3437   //  s8 *a = ...
3438   //  s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3439   // =>
3440   //  s32 val = *((i32)a)
3441   //
3442   //  s8 *a = ...
3443   //  s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3444   // =>
3445   //  s32 val = BSWAP(*((s32)a))
3446   Register Dst = MI.getOperand(0).getReg();
3447   LLT Ty = MRI.getType(Dst);
3448   if (Ty.isVector())
3449     return false;
3450 
3451   // We need to combine at least two loads into this type. Since the smallest
3452   // possible load is into a byte, we need at least a 16-bit wide type.
3453   const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3454   if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3455     return false;
3456 
3457   // Match a collection of non-OR instructions in the pattern.
3458   auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3459   if (!RegsToVisit)
3460     return false;
3461 
3462   // We have a collection of non-OR instructions. Figure out how wide each of
3463   // the small loads should be based off of the number of potential loads we
3464   // found.
3465   const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3466   if (NarrowMemSizeInBits % 8 != 0)
3467     return false;
3468 
3469   // Check if each register feeding into each OR is a load from the same
3470   // base pointer + some arithmetic.
3471   //
3472   // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3473   //
3474   // Also verify that each of these ends up putting a[i] into the same memory
3475   // offset as a load into a wide type would.
3476   SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3477   GZExtLoad *LowestIdxLoad, *LatestLoad;
3478   int64_t LowestIdx;
3479   auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3480       MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3481   if (!MaybeLoadInfo)
3482     return false;
3483   std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3484 
3485   // We have a bunch of loads being OR'd together. Using the addresses + offsets
3486   // we found before, check if this corresponds to a big or little endian byte
3487   // pattern. If it does, then we can represent it using a load + possibly a
3488   // BSWAP.
3489   bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3490   Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3491   if (!IsBigEndian)
3492     return false;
3493   bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3494   if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3495     return false;
3496 
3497   // Make sure that the load from the lowest index produces offset 0 in the
3498   // final value.
3499   //
3500   // This ensures that we won't combine something like this:
3501   //
3502   // load x[i] -> byte 2
3503   // load x[i+1] -> byte 0 ---> wide_load x[i]
3504   // load x[i+2] -> byte 1
3505   const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3506   const unsigned ZeroByteOffset =
3507       *IsBigEndian
3508           ? bigEndianByteAt(NumLoadsInTy, 0)
3509           : littleEndianByteAt(NumLoadsInTy, 0);
3510   auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3511   if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3512       ZeroOffsetIdx->second != LowestIdx)
3513     return false;
3514 
3515   // We wil reuse the pointer from the load which ends up at byte offset 0. It
3516   // may not use index 0.
3517   Register Ptr = LowestIdxLoad->getPointerReg();
3518   const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3519   LegalityQuery::MemDesc MMDesc(MMO);
3520   MMDesc.MemoryTy = Ty;
3521   if (!isLegalOrBeforeLegalizer(
3522           {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3523     return false;
3524   auto PtrInfo = MMO.getPointerInfo();
3525   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3526 
3527   // Load must be allowed and fast on the target.
3528   LLVMContext &C = MF.getFunction().getContext();
3529   auto &DL = MF.getDataLayout();
3530   bool Fast = false;
3531   if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3532       !Fast)
3533     return false;
3534 
3535   MatchInfo = [=](MachineIRBuilder &MIB) {
3536     MIB.setInstrAndDebugLoc(*LatestLoad);
3537     Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3538     MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3539     if (NeedsBSwap)
3540       MIB.buildBSwap(Dst, LoadDst);
3541   };
3542   return true;
3543 }
3544 
3545 /// Check if the store \p Store is a truncstore that can be merged. That is,
3546 /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
3547 /// Register then it does not need to match and SrcVal is set to the source
3548 /// value found.
3549 /// On match, returns the start byte offset of the \p SrcVal that is being
3550 /// stored.
3551 static Optional<int64_t> getTruncStoreByteOffset(GStore &Store, Register &SrcVal,
3552                                                  MachineRegisterInfo &MRI) {
3553   Register TruncVal;
3554   if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
3555     return None;
3556 
3557   // The shift amount must be a constant multiple of the narrow type.
3558   // It is translated to the offset address in the wide source value "y".
3559   //
3560   // x = G_LSHR y, ShiftAmtC
3561   // s8 z = G_TRUNC x
3562   // store z, ...
3563   Register FoundSrcVal;
3564   int64_t ShiftAmt;
3565   if (!mi_match(TruncVal, MRI,
3566                 m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
3567                          m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
3568     if (!SrcVal.isValid() || TruncVal == SrcVal) {
3569       if (!SrcVal.isValid())
3570         SrcVal = TruncVal;
3571       return 0; // If it's the lowest index store.
3572     }
3573     return None;
3574   }
3575 
3576   unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
3577   if (ShiftAmt % NarrowBits!= 0)
3578     return None;
3579   const unsigned Offset = ShiftAmt / NarrowBits;
3580 
3581   if (SrcVal.isValid() && FoundSrcVal != SrcVal)
3582     return None;
3583 
3584   if (!SrcVal.isValid())
3585     SrcVal = FoundSrcVal;
3586   else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
3587     return None;
3588   return Offset;
3589 }
3590 
3591 /// Match a pattern where a wide type scalar value is stored by several narrow
3592 /// stores. Fold it into a single store or a BSWAP and a store if the targets
3593 /// supports it.
3594 ///
3595 /// Assuming little endian target:
3596 ///  i8 *p = ...
3597 ///  i32 val = ...
3598 ///  p[0] = (val >> 0) & 0xFF;
3599 ///  p[1] = (val >> 8) & 0xFF;
3600 ///  p[2] = (val >> 16) & 0xFF;
3601 ///  p[3] = (val >> 24) & 0xFF;
3602 /// =>
3603 ///  *((i32)p) = val;
3604 ///
3605 ///  i8 *p = ...
3606 ///  i32 val = ...
3607 ///  p[0] = (val >> 24) & 0xFF;
3608 ///  p[1] = (val >> 16) & 0xFF;
3609 ///  p[2] = (val >> 8) & 0xFF;
3610 ///  p[3] = (val >> 0) & 0xFF;
3611 /// =>
3612 ///  *((i32)p) = BSWAP(val);
3613 bool CombinerHelper::matchTruncStoreMerge(MachineInstr &MI,
3614                                           MergeTruncStoresInfo &MatchInfo) {
3615   auto &StoreMI = cast<GStore>(MI);
3616   LLT MemTy = StoreMI.getMMO().getMemoryType();
3617 
3618   // We only handle merging simple stores of 1-4 bytes.
3619   if (!MemTy.isScalar())
3620     return false;
3621   switch (MemTy.getSizeInBits()) {
3622   case 8:
3623   case 16:
3624   case 32:
3625     break;
3626   default:
3627     return false;
3628   }
3629   if (!StoreMI.isSimple())
3630     return false;
3631 
3632   // We do a simple search for mergeable stores prior to this one.
3633   // Any potential alias hazard along the way terminates the search.
3634   SmallVector<GStore *> FoundStores;
3635 
3636   // We're looking for:
3637   // 1) a (store(trunc(...)))
3638   // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
3639   //    the partial value stored.
3640   // 3) where the offsets form either a little or big-endian sequence.
3641 
3642   auto &LastStore = StoreMI;
3643 
3644   // The single base pointer that all stores must use.
3645   Register BaseReg;
3646   int64_t LastOffset;
3647   if (!mi_match(LastStore.getPointerReg(), MRI,
3648                 m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) {
3649     BaseReg = LastStore.getPointerReg();
3650     LastOffset = 0;
3651   }
3652 
3653   GStore *LowestIdxStore = &LastStore;
3654   int64_t LowestIdxOffset = LastOffset;
3655 
3656   Register WideSrcVal;
3657   auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, MRI);
3658   if (!LowestShiftAmt)
3659     return false; // Didn't match a trunc.
3660   assert(WideSrcVal.isValid());
3661 
3662   LLT WideStoreTy = MRI.getType(WideSrcVal);
3663   // The wide type might not be a multiple of the memory type, e.g. s48 and s32.
3664   if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0)
3665     return false;
3666   const unsigned NumStoresRequired =
3667       WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
3668 
3669   SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
3670   OffsetMap[*LowestShiftAmt] = LastOffset;
3671   FoundStores.emplace_back(&LastStore);
3672 
3673   // Search the block up for more stores.
3674   // We use a search threshold of 10 instructions here because the combiner
3675   // works top-down within a block, and we don't want to search an unbounded
3676   // number of predecessor instructions trying to find matching stores.
3677   // If we moved this optimization into a separate pass then we could probably
3678   // use a more efficient search without having a hard-coded threshold.
3679   const int MaxInstsToCheck = 10;
3680   int NumInstsChecked = 0;
3681   for (auto II = ++LastStore.getReverseIterator();
3682        II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
3683        ++II) {
3684     NumInstsChecked++;
3685     GStore *NewStore;
3686     if ((NewStore = dyn_cast<GStore>(&*II))) {
3687       if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
3688         break;
3689     } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
3690       break;
3691     } else {
3692       continue; // This is a safe instruction we can look past.
3693     }
3694 
3695     Register NewBaseReg;
3696     int64_t MemOffset;
3697     // Check we're storing to the same base + some offset.
3698     if (!mi_match(NewStore->getPointerReg(), MRI,
3699                   m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) {
3700       NewBaseReg = NewStore->getPointerReg();
3701       MemOffset = 0;
3702     }
3703     if (BaseReg != NewBaseReg)
3704       break;
3705 
3706     auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, MRI);
3707     if (!ShiftByteOffset)
3708       break;
3709     if (MemOffset < LowestIdxOffset) {
3710       LowestIdxOffset = MemOffset;
3711       LowestIdxStore = NewStore;
3712     }
3713 
3714     // Map the offset in the store and the offset in the combined value, and
3715     // early return if it has been set before.
3716     if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
3717         OffsetMap[*ShiftByteOffset] != INT64_MAX)
3718       break;
3719     OffsetMap[*ShiftByteOffset] = MemOffset;
3720 
3721     FoundStores.emplace_back(NewStore);
3722     // Reset counter since we've found a matching inst.
3723     NumInstsChecked = 0;
3724     if (FoundStores.size() == NumStoresRequired)
3725       break;
3726   }
3727 
3728   if (FoundStores.size() != NumStoresRequired) {
3729     return false;
3730   }
3731 
3732   const auto &DL = LastStore.getMF()->getDataLayout();
3733   auto &C = LastStore.getMF()->getFunction().getContext();
3734   // Check that a store of the wide type is both allowed and fast on the target
3735   bool Fast = false;
3736   bool Allowed = getTargetLowering().allowsMemoryAccess(
3737       C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);
3738   if (!Allowed || !Fast)
3739     return false;
3740 
3741   // Check if the pieces of the value are going to the expected places in memory
3742   // to merge the stores.
3743   unsigned NarrowBits = MemTy.getScalarSizeInBits();
3744   auto checkOffsets = [&](bool MatchLittleEndian) {
3745     if (MatchLittleEndian) {
3746       for (unsigned i = 0; i != NumStoresRequired; ++i)
3747         if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
3748           return false;
3749     } else { // MatchBigEndian by reversing loop counter.
3750       for (unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired;
3751            ++i, --j)
3752         if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
3753           return false;
3754     }
3755     return true;
3756   };
3757 
3758   // Check if the offsets line up for the native data layout of this target.
3759   bool NeedBswap = false;
3760   bool NeedRotate = false;
3761   if (!checkOffsets(DL.isLittleEndian())) {
3762     // Special-case: check if byte offsets line up for the opposite endian.
3763     if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
3764       NeedBswap = true;
3765     else if (NumStoresRequired == 2 && checkOffsets(DL.isBigEndian()))
3766       NeedRotate = true;
3767     else
3768       return false;
3769   }
3770 
3771   if (NeedBswap &&
3772       !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}))
3773     return false;
3774   if (NeedRotate &&
3775       !isLegalOrBeforeLegalizer({TargetOpcode::G_ROTR, {WideStoreTy}}))
3776     return false;
3777 
3778   MatchInfo.NeedBSwap = NeedBswap;
3779   MatchInfo.NeedRotate = NeedRotate;
3780   MatchInfo.LowestIdxStore = LowestIdxStore;
3781   MatchInfo.WideSrcVal = WideSrcVal;
3782   MatchInfo.FoundStores = std::move(FoundStores);
3783   return true;
3784 }
3785 
3786 void CombinerHelper::applyTruncStoreMerge(MachineInstr &MI,
3787                                           MergeTruncStoresInfo &MatchInfo) {
3788 
3789   Builder.setInstrAndDebugLoc(MI);
3790   Register WideSrcVal = MatchInfo.WideSrcVal;
3791   LLT WideStoreTy = MRI.getType(WideSrcVal);
3792 
3793   if (MatchInfo.NeedBSwap) {
3794     WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);
3795   } else if (MatchInfo.NeedRotate) {
3796     assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
3797            "Unexpected type for rotate");
3798     auto RotAmt =
3799         Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);
3800     WideSrcVal =
3801         Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);
3802   }
3803 
3804   Builder.buildStore(WideSrcVal, MatchInfo.LowestIdxStore->getPointerReg(),
3805                      MatchInfo.LowestIdxStore->getMMO().getPointerInfo(),
3806                      MatchInfo.LowestIdxStore->getMMO().getAlign());
3807 
3808   // Erase the old stores.
3809   for (auto *ST : MatchInfo.FoundStores)
3810     ST->eraseFromParent();
3811 }
3812 
3813 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
3814                                             MachineInstr *&ExtMI) {
3815   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3816 
3817   Register DstReg = MI.getOperand(0).getReg();
3818 
3819   // TODO: Extending a vector may be expensive, don't do this until heuristics
3820   // are better.
3821   if (MRI.getType(DstReg).isVector())
3822     return false;
3823 
3824   // Try to match a phi, whose only use is an extend.
3825   if (!MRI.hasOneNonDBGUse(DstReg))
3826     return false;
3827   ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3828   switch (ExtMI->getOpcode()) {
3829   case TargetOpcode::G_ANYEXT:
3830     return true; // G_ANYEXT is usually free.
3831   case TargetOpcode::G_ZEXT:
3832   case TargetOpcode::G_SEXT:
3833     break;
3834   default:
3835     return false;
3836   }
3837 
3838   // If the target is likely to fold this extend away, don't propagate.
3839   if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3840     return false;
3841 
3842   // We don't want to propagate the extends unless there's a good chance that
3843   // they'll be optimized in some way.
3844   // Collect the unique incoming values.
3845   SmallPtrSet<MachineInstr *, 4> InSrcs;
3846   for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3847     auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3848     switch (DefMI->getOpcode()) {
3849     case TargetOpcode::G_LOAD:
3850     case TargetOpcode::G_TRUNC:
3851     case TargetOpcode::G_SEXT:
3852     case TargetOpcode::G_ZEXT:
3853     case TargetOpcode::G_ANYEXT:
3854     case TargetOpcode::G_CONSTANT:
3855       InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3856       // Don't try to propagate if there are too many places to create new
3857       // extends, chances are it'll increase code size.
3858       if (InSrcs.size() > 2)
3859         return false;
3860       break;
3861     default:
3862       return false;
3863     }
3864   }
3865   return true;
3866 }
3867 
3868 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
3869                                             MachineInstr *&ExtMI) {
3870   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3871   Register DstReg = ExtMI->getOperand(0).getReg();
3872   LLT ExtTy = MRI.getType(DstReg);
3873 
3874   // Propagate the extension into the block of each incoming reg's block.
3875   // Use a SetVector here because PHIs can have duplicate edges, and we want
3876   // deterministic iteration order.
3877   SmallSetVector<MachineInstr *, 8> SrcMIs;
3878   SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
3879   for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3880     auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3881     if (!SrcMIs.insert(SrcMI))
3882       continue;
3883 
3884     // Build an extend after each src inst.
3885     auto *MBB = SrcMI->getParent();
3886     MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3887     if (InsertPt != MBB->end() && InsertPt->isPHI())
3888       InsertPt = MBB->getFirstNonPHI();
3889 
3890     Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3891     Builder.setDebugLoc(MI.getDebugLoc());
3892     auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3893                                           SrcMI->getOperand(0).getReg());
3894     OldToNewSrcMap[SrcMI] = NewExt;
3895   }
3896 
3897   // Create a new phi with the extended inputs.
3898   Builder.setInstrAndDebugLoc(MI);
3899   auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3900   NewPhi.addDef(DstReg);
3901   for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
3902     if (!MO.isReg()) {
3903       NewPhi.addMBB(MO.getMBB());
3904       continue;
3905     }
3906     auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3907     NewPhi.addUse(NewSrc->getOperand(0).getReg());
3908   }
3909   Builder.insertInstr(NewPhi);
3910   ExtMI->eraseFromParent();
3911 }
3912 
3913 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
3914                                                 Register &Reg) {
3915   assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3916   // If we have a constant index, look for a G_BUILD_VECTOR source
3917   // and find the source register that the index maps to.
3918   Register SrcVec = MI.getOperand(1).getReg();
3919   LLT SrcTy = MRI.getType(SrcVec);
3920   if (!isLegalOrBeforeLegalizer(
3921           {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}}))
3922     return false;
3923 
3924   auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3925   if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3926     return false;
3927 
3928   unsigned VecIdx = Cst->Value.getZExtValue();
3929   MachineInstr *BuildVecMI =
3930       getOpcodeDef(TargetOpcode::G_BUILD_VECTOR, SrcVec, MRI);
3931   if (!BuildVecMI) {
3932     BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec, MRI);
3933     if (!BuildVecMI)
3934       return false;
3935     LLT ScalarTy = MRI.getType(BuildVecMI->getOperand(1).getReg());
3936     if (!isLegalOrBeforeLegalizer(
3937             {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}}))
3938       return false;
3939   }
3940 
3941   EVT Ty(getMVTForLLT(SrcTy));
3942   if (!MRI.hasOneNonDBGUse(SrcVec) &&
3943       !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3944     return false;
3945 
3946   Reg = BuildVecMI->getOperand(VecIdx + 1).getReg();
3947   return true;
3948 }
3949 
3950 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
3951                                                 Register &Reg) {
3952   // Check the type of the register, since it may have come from a
3953   // G_BUILD_VECTOR_TRUNC.
3954   LLT ScalarTy = MRI.getType(Reg);
3955   Register DstReg = MI.getOperand(0).getReg();
3956   LLT DstTy = MRI.getType(DstReg);
3957 
3958   Builder.setInstrAndDebugLoc(MI);
3959   if (ScalarTy != DstTy) {
3960     assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
3961     Builder.buildTrunc(DstReg, Reg);
3962     MI.eraseFromParent();
3963     return;
3964   }
3965   replaceSingleDefInstWithReg(MI, Reg);
3966 }
3967 
3968 bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3969     MachineInstr &MI,
3970     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3971   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3972   // This combine tries to find build_vector's which have every source element
3973   // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3974   // the masked load scalarization is run late in the pipeline. There's already
3975   // a combine for a similar pattern starting from the extract, but that
3976   // doesn't attempt to do it if there are multiple uses of the build_vector,
3977   // which in this case is true. Starting the combine from the build_vector
3978   // feels more natural than trying to find sibling nodes of extracts.
3979   // E.g.
3980   //  %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3981   //  %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3982   //  %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3983   //  %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3984   //  %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3985   // ==>
3986   // replace ext{1,2,3,4} with %s{1,2,3,4}
3987 
3988   Register DstReg = MI.getOperand(0).getReg();
3989   LLT DstTy = MRI.getType(DstReg);
3990   unsigned NumElts = DstTy.getNumElements();
3991 
3992   SmallBitVector ExtractedElts(NumElts);
3993   for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) {
3994     if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3995       return false;
3996     auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
3997     if (!Cst)
3998       return false;
3999     unsigned Idx = Cst->getZExtValue();
4000     if (Idx >= NumElts)
4001       return false; // Out of range.
4002     ExtractedElts.set(Idx);
4003     SrcDstPairs.emplace_back(
4004         std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
4005   }
4006   // Match if every element was extracted.
4007   return ExtractedElts.all();
4008 }
4009 
4010 void CombinerHelper::applyExtractAllEltsFromBuildVector(
4011     MachineInstr &MI,
4012     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
4013   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4014   for (auto &Pair : SrcDstPairs) {
4015     auto *ExtMI = Pair.second;
4016     replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
4017     ExtMI->eraseFromParent();
4018   }
4019   MI.eraseFromParent();
4020 }
4021 
4022 void CombinerHelper::applyBuildFn(
4023     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4024   Builder.setInstrAndDebugLoc(MI);
4025   MatchInfo(Builder);
4026   MI.eraseFromParent();
4027 }
4028 
4029 void CombinerHelper::applyBuildFnNoErase(
4030     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4031   Builder.setInstrAndDebugLoc(MI);
4032   MatchInfo(Builder);
4033 }
4034 
4035 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
4036                                                BuildFnTy &MatchInfo) {
4037   assert(MI.getOpcode() == TargetOpcode::G_OR);
4038 
4039   Register Dst = MI.getOperand(0).getReg();
4040   LLT Ty = MRI.getType(Dst);
4041   unsigned BitWidth = Ty.getScalarSizeInBits();
4042 
4043   Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4044   unsigned FshOpc = 0;
4045 
4046   // Match (or (shl ...), (lshr ...)).
4047   if (!mi_match(Dst, MRI,
4048                 // m_GOr() handles the commuted version as well.
4049                 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
4050                       m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
4051     return false;
4052 
4053   // Given constants C0 and C1 such that C0 + C1 is bit-width:
4054   // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
4055   int64_t CstShlAmt, CstLShrAmt;
4056   if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) &&
4057       mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) &&
4058       CstShlAmt + CstLShrAmt == BitWidth) {
4059     FshOpc = TargetOpcode::G_FSHR;
4060     Amt = LShrAmt;
4061 
4062   } else if (mi_match(LShrAmt, MRI,
4063                       m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
4064              ShlAmt == Amt) {
4065     // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
4066     FshOpc = TargetOpcode::G_FSHL;
4067 
4068   } else if (mi_match(ShlAmt, MRI,
4069                       m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
4070              LShrAmt == Amt) {
4071     // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
4072     FshOpc = TargetOpcode::G_FSHR;
4073 
4074   } else {
4075     return false;
4076   }
4077 
4078   LLT AmtTy = MRI.getType(Amt);
4079   if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
4080     return false;
4081 
4082   MatchInfo = [=](MachineIRBuilder &B) {
4083     B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4084   };
4085   return true;
4086 }
4087 
4088 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
4089 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
4090   unsigned Opc = MI.getOpcode();
4091   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4092   Register X = MI.getOperand(1).getReg();
4093   Register Y = MI.getOperand(2).getReg();
4094   if (X != Y)
4095     return false;
4096   unsigned RotateOpc =
4097       Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4098   return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
4099 }
4100 
4101 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
4102   unsigned Opc = MI.getOpcode();
4103   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4104   bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4105   Observer.changingInstr(MI);
4106   MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
4107                                          : TargetOpcode::G_ROTR));
4108   MI.removeOperand(2);
4109   Observer.changedInstr(MI);
4110 }
4111 
4112 // Fold (rot x, c) -> (rot x, c % BitSize)
4113 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
4114   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4115          MI.getOpcode() == TargetOpcode::G_ROTR);
4116   unsigned Bitsize =
4117       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4118   Register AmtReg = MI.getOperand(2).getReg();
4119   bool OutOfRange = false;
4120   auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
4121     if (auto *CI = dyn_cast<ConstantInt>(C))
4122       OutOfRange |= CI->getValue().uge(Bitsize);
4123     return true;
4124   };
4125   return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
4126 }
4127 
4128 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
4129   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4130          MI.getOpcode() == TargetOpcode::G_ROTR);
4131   unsigned Bitsize =
4132       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4133   Builder.setInstrAndDebugLoc(MI);
4134   Register Amt = MI.getOperand(2).getReg();
4135   LLT AmtTy = MRI.getType(Amt);
4136   auto Bits = Builder.buildConstant(AmtTy, Bitsize);
4137   Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
4138   Observer.changingInstr(MI);
4139   MI.getOperand(2).setReg(Amt);
4140   Observer.changedInstr(MI);
4141 }
4142 
4143 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
4144                                                    int64_t &MatchInfo) {
4145   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4146   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4147   auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
4148   auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
4149   Optional<bool> KnownVal;
4150   switch (Pred) {
4151   default:
4152     llvm_unreachable("Unexpected G_ICMP predicate?");
4153   case CmpInst::ICMP_EQ:
4154     KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
4155     break;
4156   case CmpInst::ICMP_NE:
4157     KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
4158     break;
4159   case CmpInst::ICMP_SGE:
4160     KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
4161     break;
4162   case CmpInst::ICMP_SGT:
4163     KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
4164     break;
4165   case CmpInst::ICMP_SLE:
4166     KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4167     break;
4168   case CmpInst::ICMP_SLT:
4169     KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4170     break;
4171   case CmpInst::ICMP_UGE:
4172     KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4173     break;
4174   case CmpInst::ICMP_UGT:
4175     KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4176     break;
4177   case CmpInst::ICMP_ULE:
4178     KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4179     break;
4180   case CmpInst::ICMP_ULT:
4181     KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4182     break;
4183   }
4184   if (!KnownVal)
4185     return false;
4186   MatchInfo =
4187       *KnownVal
4188           ? getICmpTrueVal(getTargetLowering(),
4189                            /*IsVector = */
4190                            MRI.getType(MI.getOperand(0).getReg()).isVector(),
4191                            /* IsFP = */ false)
4192           : 0;
4193   return true;
4194 }
4195 
4196 bool CombinerHelper::matchICmpToLHSKnownBits(
4197     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4198   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4199   // Given:
4200   //
4201   // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4202   // %cmp = G_ICMP ne %x, 0
4203   //
4204   // Or:
4205   //
4206   // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4207   // %cmp = G_ICMP eq %x, 1
4208   //
4209   // We can replace %cmp with %x assuming true is 1 on the target.
4210   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4211   if (!CmpInst::isEquality(Pred))
4212     return false;
4213   Register Dst = MI.getOperand(0).getReg();
4214   LLT DstTy = MRI.getType(Dst);
4215   if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(),
4216                      /* IsFP = */ false) != 1)
4217     return false;
4218   int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4219   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
4220     return false;
4221   Register LHS = MI.getOperand(2).getReg();
4222   auto KnownLHS = KB->getKnownBits(LHS);
4223   if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4224     return false;
4225   // Make sure replacing Dst with the LHS is a legal operation.
4226   LLT LHSTy = MRI.getType(LHS);
4227   unsigned LHSSize = LHSTy.getSizeInBits();
4228   unsigned DstSize = DstTy.getSizeInBits();
4229   unsigned Op = TargetOpcode::COPY;
4230   if (DstSize != LHSSize)
4231     Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4232   if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
4233     return false;
4234   MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
4235   return true;
4236 }
4237 
4238 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4239 bool CombinerHelper::matchAndOrDisjointMask(
4240     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4241   assert(MI.getOpcode() == TargetOpcode::G_AND);
4242 
4243   // Ignore vector types to simplify matching the two constants.
4244   // TODO: do this for vectors and scalars via a demanded bits analysis.
4245   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4246   if (Ty.isVector())
4247     return false;
4248 
4249   Register Src;
4250   Register AndMaskReg;
4251   int64_t AndMaskBits;
4252   int64_t OrMaskBits;
4253   if (!mi_match(MI, MRI,
4254                 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)),
4255                        m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg)))))
4256     return false;
4257 
4258   // Check if OrMask could turn on any bits in Src.
4259   if (AndMaskBits & OrMaskBits)
4260     return false;
4261 
4262   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4263     Observer.changingInstr(MI);
4264     // Canonicalize the result to have the constant on the RHS.
4265     if (MI.getOperand(1).getReg() == AndMaskReg)
4266       MI.getOperand(2).setReg(AndMaskReg);
4267     MI.getOperand(1).setReg(Src);
4268     Observer.changedInstr(MI);
4269   };
4270   return true;
4271 }
4272 
4273 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4274 bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4275     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4276   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4277   Register Dst = MI.getOperand(0).getReg();
4278   Register Src = MI.getOperand(1).getReg();
4279   LLT Ty = MRI.getType(Src);
4280   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4281   if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4282     return false;
4283   int64_t Width = MI.getOperand(2).getImm();
4284   Register ShiftSrc;
4285   int64_t ShiftImm;
4286   if (!mi_match(
4287           Src, MRI,
4288           m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4289                                   m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4290     return false;
4291   if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4292     return false;
4293 
4294   MatchInfo = [=](MachineIRBuilder &B) {
4295     auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4296     auto Cst2 = B.buildConstant(ExtractTy, Width);
4297     B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4298   };
4299   return true;
4300 }
4301 
4302 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4303 bool CombinerHelper::matchBitfieldExtractFromAnd(
4304     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4305   assert(MI.getOpcode() == TargetOpcode::G_AND);
4306   Register Dst = MI.getOperand(0).getReg();
4307   LLT Ty = MRI.getType(Dst);
4308   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4309   if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4310           TargetOpcode::G_UBFX, Ty, ExtractTy))
4311     return false;
4312 
4313   int64_t AndImm, LSBImm;
4314   Register ShiftSrc;
4315   const unsigned Size = Ty.getScalarSizeInBits();
4316   if (!mi_match(MI.getOperand(0).getReg(), MRI,
4317                 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4318                        m_ICst(AndImm))))
4319     return false;
4320 
4321   // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4322   auto MaybeMask = static_cast<uint64_t>(AndImm);
4323   if (MaybeMask & (MaybeMask + 1))
4324     return false;
4325 
4326   // LSB must fit within the register.
4327   if (static_cast<uint64_t>(LSBImm) >= Size)
4328     return false;
4329 
4330   uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
4331   MatchInfo = [=](MachineIRBuilder &B) {
4332     auto WidthCst = B.buildConstant(ExtractTy, Width);
4333     auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4334     B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4335   };
4336   return true;
4337 }
4338 
4339 bool CombinerHelper::matchBitfieldExtractFromShr(
4340     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4341   const unsigned Opcode = MI.getOpcode();
4342   assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4343 
4344   const Register Dst = MI.getOperand(0).getReg();
4345 
4346   const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4347                                   ? TargetOpcode::G_SBFX
4348                                   : TargetOpcode::G_UBFX;
4349 
4350   // Check if the type we would use for the extract is legal
4351   LLT Ty = MRI.getType(Dst);
4352   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4353   if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4354     return false;
4355 
4356   Register ShlSrc;
4357   int64_t ShrAmt;
4358   int64_t ShlAmt;
4359   const unsigned Size = Ty.getScalarSizeInBits();
4360 
4361   // Try to match shr (shl x, c1), c2
4362   if (!mi_match(Dst, MRI,
4363                 m_BinOp(Opcode,
4364                         m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4365                         m_ICst(ShrAmt))))
4366     return false;
4367 
4368   // Make sure that the shift sizes can fit a bitfield extract
4369   if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4370     return false;
4371 
4372   // Skip this combine if the G_SEXT_INREG combine could handle it
4373   if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4374     return false;
4375 
4376   // Calculate start position and width of the extract
4377   const int64_t Pos = ShrAmt - ShlAmt;
4378   const int64_t Width = Size - ShrAmt;
4379 
4380   MatchInfo = [=](MachineIRBuilder &B) {
4381     auto WidthCst = B.buildConstant(ExtractTy, Width);
4382     auto PosCst = B.buildConstant(ExtractTy, Pos);
4383     B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4384   };
4385   return true;
4386 }
4387 
4388 bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4389     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4390   const unsigned Opcode = MI.getOpcode();
4391   assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4392 
4393   const Register Dst = MI.getOperand(0).getReg();
4394   LLT Ty = MRI.getType(Dst);
4395   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4396   if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4397           TargetOpcode::G_UBFX, Ty, ExtractTy))
4398     return false;
4399 
4400   // Try to match shr (and x, c1), c2
4401   Register AndSrc;
4402   int64_t ShrAmt;
4403   int64_t SMask;
4404   if (!mi_match(Dst, MRI,
4405                 m_BinOp(Opcode,
4406                         m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))),
4407                         m_ICst(ShrAmt))))
4408     return false;
4409 
4410   const unsigned Size = Ty.getScalarSizeInBits();
4411   if (ShrAmt < 0 || ShrAmt >= Size)
4412     return false;
4413 
4414   // If the shift subsumes the mask, emit the 0 directly.
4415   if (0 == (SMask >> ShrAmt)) {
4416     MatchInfo = [=](MachineIRBuilder &B) {
4417       B.buildConstant(Dst, 0);
4418     };
4419     return true;
4420   }
4421 
4422   // Check that ubfx can do the extraction, with no holes in the mask.
4423   uint64_t UMask = SMask;
4424   UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4425   UMask &= maskTrailingOnes<uint64_t>(Size);
4426   if (!isMask_64(UMask))
4427     return false;
4428 
4429   // Calculate start position and width of the extract.
4430   const int64_t Pos = ShrAmt;
4431   const int64_t Width = countTrailingOnes(UMask) - ShrAmt;
4432 
4433   // It's preferable to keep the shift, rather than form G_SBFX.
4434   // TODO: remove the G_AND via demanded bits analysis.
4435   if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
4436     return false;
4437 
4438   MatchInfo = [=](MachineIRBuilder &B) {
4439     auto WidthCst = B.buildConstant(ExtractTy, Width);
4440     auto PosCst = B.buildConstant(ExtractTy, Pos);
4441     B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4442   };
4443   return true;
4444 }
4445 
4446 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4447     MachineInstr &PtrAdd) {
4448   assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD);
4449 
4450   Register Src1Reg = PtrAdd.getOperand(1).getReg();
4451   MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4452   if (!Src1Def)
4453     return false;
4454 
4455   Register Src2Reg = PtrAdd.getOperand(2).getReg();
4456 
4457   if (MRI.hasOneNonDBGUse(Src1Reg))
4458     return false;
4459 
4460   auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4461   if (!C1)
4462     return false;
4463   auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4464   if (!C2)
4465     return false;
4466 
4467   const APInt &C1APIntVal = *C1;
4468   const APInt &C2APIntVal = *C2;
4469   const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4470 
4471   for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4472     // This combine may end up running before ptrtoint/inttoptr combines
4473     // manage to eliminate redundant conversions, so try to look through them.
4474     MachineInstr *ConvUseMI = &UseMI;
4475     unsigned ConvUseOpc = ConvUseMI->getOpcode();
4476     while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4477            ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4478       Register DefReg = ConvUseMI->getOperand(0).getReg();
4479       if (!MRI.hasOneNonDBGUse(DefReg))
4480         break;
4481       ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4482       ConvUseOpc = ConvUseMI->getOpcode();
4483     }
4484     auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4485                      ConvUseOpc == TargetOpcode::G_STORE;
4486     if (!LoadStore)
4487       continue;
4488     // Is x[offset2] already not a legal addressing mode? If so then
4489     // reassociating the constants breaks nothing (we test offset2 because
4490     // that's the one we hope to fold into the load or store).
4491     TargetLoweringBase::AddrMode AM;
4492     AM.HasBaseReg = true;
4493     AM.BaseOffs = C2APIntVal.getSExtValue();
4494     unsigned AS =
4495         MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4496     Type *AccessTy =
4497         getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4498                       PtrAdd.getMF()->getFunction().getContext());
4499     const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4500     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4501                                    AccessTy, AS))
4502       continue;
4503 
4504     // Would x[offset1+offset2] still be a legal addressing mode?
4505     AM.BaseOffs = CombinedValue;
4506     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4507                                    AccessTy, AS))
4508       return true;
4509   }
4510 
4511   return false;
4512 }
4513 
4514 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
4515                                                   MachineInstr *RHS,
4516                                                   BuildFnTy &MatchInfo) {
4517   // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4518   Register Src1Reg = MI.getOperand(1).getReg();
4519   if (RHS->getOpcode() != TargetOpcode::G_ADD)
4520     return false;
4521   auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4522   if (!C2)
4523     return false;
4524 
4525   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4526     LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4527 
4528     auto NewBase =
4529         Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4530     Observer.changingInstr(MI);
4531     MI.getOperand(1).setReg(NewBase.getReg(0));
4532     MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4533     Observer.changedInstr(MI);
4534   };
4535   return !reassociationCanBreakAddressingModePattern(MI);
4536 }
4537 
4538 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
4539                                                   MachineInstr *LHS,
4540                                                   MachineInstr *RHS,
4541                                                   BuildFnTy &MatchInfo) {
4542   // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4543   // if and only if (G_PTR_ADD X, C) has one use.
4544   Register LHSBase;
4545   Optional<ValueAndVReg> LHSCstOff;
4546   if (!mi_match(MI.getBaseReg(), MRI,
4547                 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
4548     return false;
4549 
4550   auto *LHSPtrAdd = cast<GPtrAdd>(LHS);
4551   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4552     // When we change LHSPtrAdd's offset register we might cause it to use a reg
4553     // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4554     // doesn't happen.
4555     LHSPtrAdd->moveBefore(&MI);
4556     Register RHSReg = MI.getOffsetReg();
4557     Observer.changingInstr(MI);
4558     MI.getOperand(2).setReg(LHSCstOff->VReg);
4559     Observer.changedInstr(MI);
4560     Observer.changingInstr(*LHSPtrAdd);
4561     LHSPtrAdd->getOperand(2).setReg(RHSReg);
4562     Observer.changedInstr(*LHSPtrAdd);
4563   };
4564   return !reassociationCanBreakAddressingModePattern(MI);
4565 }
4566 
4567 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
4568                                                         MachineInstr *LHS,
4569                                                         MachineInstr *RHS,
4570                                                         BuildFnTy &MatchInfo) {
4571   // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4572   auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
4573   if (!LHSPtrAdd)
4574     return false;
4575 
4576   Register Src2Reg = MI.getOperand(2).getReg();
4577   Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4578   Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4579   auto C1 = getIConstantVRegVal(LHSSrc2, MRI);
4580   if (!C1)
4581     return false;
4582   auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4583   if (!C2)
4584     return false;
4585 
4586   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4587     auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4588     Observer.changingInstr(MI);
4589     MI.getOperand(1).setReg(LHSSrc1);
4590     MI.getOperand(2).setReg(NewCst.getReg(0));
4591     Observer.changedInstr(MI);
4592   };
4593   return !reassociationCanBreakAddressingModePattern(MI);
4594 }
4595 
4596 bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
4597                                         BuildFnTy &MatchInfo) {
4598   auto &PtrAdd = cast<GPtrAdd>(MI);
4599   // We're trying to match a few pointer computation patterns here for
4600   // re-association opportunities.
4601   // 1) Isolating a constant operand to be on the RHS, e.g.:
4602   // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4603   //
4604   // 2) Folding two constants in each sub-tree as long as such folding
4605   // doesn't break a legal addressing mode.
4606   // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4607   //
4608   // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4609   // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4610   // iif (G_PTR_ADD X, C) has one use.
4611   MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg());
4612   MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg());
4613 
4614   // Try to match example 2.
4615   if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo))
4616     return true;
4617 
4618   // Try to match example 3.
4619   if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo))
4620     return true;
4621 
4622   // Try to match example 1.
4623   if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo))
4624     return true;
4625 
4626   return false;
4627 }
4628 
4629 bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) {
4630   Register Op1 = MI.getOperand(1).getReg();
4631   Register Op2 = MI.getOperand(2).getReg();
4632   auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4633   if (!MaybeCst)
4634     return false;
4635   MatchInfo = *MaybeCst;
4636   return true;
4637 }
4638 
4639 bool CombinerHelper::matchNarrowBinopFeedingAnd(
4640     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4641   // Look for a binop feeding into an AND with a mask:
4642   //
4643   // %add = G_ADD %lhs, %rhs
4644   // %and = G_AND %add, 000...11111111
4645   //
4646   // Check if it's possible to perform the binop at a narrower width and zext
4647   // back to the original width like so:
4648   //
4649   // %narrow_lhs = G_TRUNC %lhs
4650   // %narrow_rhs = G_TRUNC %rhs
4651   // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4652   // %new_add = G_ZEXT %narrow_add
4653   // %and = G_AND %new_add, 000...11111111
4654   //
4655   // This can allow later combines to eliminate the G_AND if it turns out
4656   // that the mask is irrelevant.
4657   assert(MI.getOpcode() == TargetOpcode::G_AND);
4658   Register Dst = MI.getOperand(0).getReg();
4659   Register AndLHS = MI.getOperand(1).getReg();
4660   Register AndRHS = MI.getOperand(2).getReg();
4661   LLT WideTy = MRI.getType(Dst);
4662 
4663   // If the potential binop has more than one use, then it's possible that one
4664   // of those uses will need its full width.
4665   if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4666     return false;
4667 
4668   // Check if the LHS feeding the AND is impacted by the high bits that we're
4669   // masking out.
4670   //
4671   // e.g. for 64-bit x, y:
4672   //
4673   // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4674   MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4675   if (!LHSInst)
4676     return false;
4677   unsigned LHSOpc = LHSInst->getOpcode();
4678   switch (LHSOpc) {
4679   default:
4680     return false;
4681   case TargetOpcode::G_ADD:
4682   case TargetOpcode::G_SUB:
4683   case TargetOpcode::G_MUL:
4684   case TargetOpcode::G_AND:
4685   case TargetOpcode::G_OR:
4686   case TargetOpcode::G_XOR:
4687     break;
4688   }
4689 
4690   // Find the mask on the RHS.
4691   auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI);
4692   if (!Cst)
4693     return false;
4694   auto Mask = Cst->Value;
4695   if (!Mask.isMask())
4696     return false;
4697 
4698   // No point in combining if there's nothing to truncate.
4699   unsigned NarrowWidth = Mask.countTrailingOnes();
4700   if (NarrowWidth == WideTy.getSizeInBits())
4701     return false;
4702   LLT NarrowTy = LLT::scalar(NarrowWidth);
4703 
4704   // Check if adding the zext + truncates could be harmful.
4705   auto &MF = *MI.getMF();
4706   const auto &TLI = getTargetLowering();
4707   LLVMContext &Ctx = MF.getFunction().getContext();
4708   auto &DL = MF.getDataLayout();
4709   if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4710       !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4711     return false;
4712   if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4713       !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4714     return false;
4715   Register BinOpLHS = LHSInst->getOperand(1).getReg();
4716   Register BinOpRHS = LHSInst->getOperand(2).getReg();
4717   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4718     auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4719     auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4720     auto NarrowBinOp =
4721         Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4722     auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4723     Observer.changingInstr(MI);
4724     MI.getOperand(1).setReg(Ext.getReg(0));
4725     Observer.changedInstr(MI);
4726   };
4727   return true;
4728 }
4729 
4730 bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
4731   unsigned Opc = MI.getOpcode();
4732   assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
4733 
4734   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2)))
4735     return false;
4736 
4737   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4738     Observer.changingInstr(MI);
4739     unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
4740                                                    : TargetOpcode::G_SADDO;
4741     MI.setDesc(Builder.getTII().get(NewOpc));
4742     MI.getOperand(3).setReg(MI.getOperand(2).getReg());
4743     Observer.changedInstr(MI);
4744   };
4745   return true;
4746 }
4747 
4748 bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4749   // (G_*MULO x, 0) -> 0 + no carry out
4750   assert(MI.getOpcode() == TargetOpcode::G_UMULO ||
4751          MI.getOpcode() == TargetOpcode::G_SMULO);
4752   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4753     return false;
4754   Register Dst = MI.getOperand(0).getReg();
4755   Register Carry = MI.getOperand(1).getReg();
4756   if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) ||
4757       !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4758     return false;
4759   MatchInfo = [=](MachineIRBuilder &B) {
4760     B.buildConstant(Dst, 0);
4761     B.buildConstant(Carry, 0);
4762   };
4763   return true;
4764 }
4765 
4766 bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4767   // (G_*ADDO x, 0) -> x + no carry out
4768   assert(MI.getOpcode() == TargetOpcode::G_UADDO ||
4769          MI.getOpcode() == TargetOpcode::G_SADDO);
4770   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4771     return false;
4772   Register Carry = MI.getOperand(1).getReg();
4773   if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4774     return false;
4775   Register Dst = MI.getOperand(0).getReg();
4776   Register LHS = MI.getOperand(2).getReg();
4777   MatchInfo = [=](MachineIRBuilder &B) {
4778     B.buildCopy(Dst, LHS);
4779     B.buildConstant(Carry, 0);
4780   };
4781   return true;
4782 }
4783 
4784 MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
4785   assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4786   auto &UDiv = cast<GenericMachineInstr>(MI);
4787   Register Dst = UDiv.getReg(0);
4788   Register LHS = UDiv.getReg(1);
4789   Register RHS = UDiv.getReg(2);
4790   LLT Ty = MRI.getType(Dst);
4791   LLT ScalarTy = Ty.getScalarType();
4792   const unsigned EltBits = ScalarTy.getScalarSizeInBits();
4793   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4794   LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4795   auto &MIB = Builder;
4796   MIB.setInstrAndDebugLoc(MI);
4797 
4798   bool UseNPQ = false;
4799   SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4800 
4801   auto BuildUDIVPattern = [&](const Constant *C) {
4802     auto *CI = cast<ConstantInt>(C);
4803     const APInt &Divisor = CI->getValue();
4804     UnsignedDivisionByConstantInfo magics =
4805         UnsignedDivisionByConstantInfo::get(Divisor);
4806     unsigned PreShift = 0, PostShift = 0;
4807 
4808     // If the divisor is even, we can avoid using the expensive fixup by
4809     // shifting the divided value upfront.
4810     if (magics.IsAdd && !Divisor[0]) {
4811       PreShift = Divisor.countTrailingZeros();
4812       // Get magic number for the shifted divisor.
4813       magics =
4814           UnsignedDivisionByConstantInfo::get(Divisor.lshr(PreShift), PreShift);
4815       assert(!magics.IsAdd && "Should use cheap fixup now");
4816     }
4817 
4818     unsigned SelNPQ;
4819     if (!magics.IsAdd || Divisor.isOneValue()) {
4820       assert(magics.ShiftAmount < Divisor.getBitWidth() &&
4821              "We shouldn't generate an undefined shift!");
4822       PostShift = magics.ShiftAmount;
4823       SelNPQ = false;
4824     } else {
4825       PostShift = magics.ShiftAmount - 1;
4826       SelNPQ = true;
4827     }
4828 
4829     PreShifts.push_back(
4830         MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
4831     MagicFactors.push_back(MIB.buildConstant(ScalarTy, magics.Magic).getReg(0));
4832     NPQFactors.push_back(
4833         MIB.buildConstant(ScalarTy,
4834                           SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4835                                  : APInt::getZero(EltBits))
4836             .getReg(0));
4837     PostShifts.push_back(
4838         MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
4839     UseNPQ |= SelNPQ;
4840     return true;
4841   };
4842 
4843   // Collect the shifts/magic values from each element.
4844   bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern);
4845   (void)Matched;
4846   assert(Matched && "Expected unary predicate match to succeed");
4847 
4848   Register PreShift, PostShift, MagicFactor, NPQFactor;
4849   auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI);
4850   if (RHSDef) {
4851     PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
4852     MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
4853     NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
4854     PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
4855   } else {
4856     assert(MRI.getType(RHS).isScalar() &&
4857            "Non-build_vector operation should have been a scalar");
4858     PreShift = PreShifts[0];
4859     MagicFactor = MagicFactors[0];
4860     PostShift = PostShifts[0];
4861   }
4862 
4863   Register Q = LHS;
4864   Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
4865 
4866   // Multiply the numerator (operand 0) by the magic value.
4867   Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
4868 
4869   if (UseNPQ) {
4870     Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0);
4871 
4872     // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4873     // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
4874     if (Ty.isVector())
4875       NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
4876     else
4877       NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
4878 
4879     Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
4880   }
4881 
4882   Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
4883   auto One = MIB.buildConstant(Ty, 1);
4884   auto IsOne = MIB.buildICmp(
4885       CmpInst::Predicate::ICMP_EQ,
4886       Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
4887   return MIB.buildSelect(Ty, IsOne, LHS, Q);
4888 }
4889 
4890 bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
4891   assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4892   Register Dst = MI.getOperand(0).getReg();
4893   Register RHS = MI.getOperand(2).getReg();
4894   LLT DstTy = MRI.getType(Dst);
4895   auto *RHSDef = MRI.getVRegDef(RHS);
4896   if (!isConstantOrConstantVector(*RHSDef, MRI))
4897     return false;
4898 
4899   auto &MF = *MI.getMF();
4900   AttributeList Attr = MF.getFunction().getAttributes();
4901   const auto &TLI = getTargetLowering();
4902   LLVMContext &Ctx = MF.getFunction().getContext();
4903   auto &DL = MF.getDataLayout();
4904   if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4905     return false;
4906 
4907   // Don't do this for minsize because the instruction sequence is usually
4908   // larger.
4909   if (MF.getFunction().hasMinSize())
4910     return false;
4911 
4912   // Don't do this if the types are not going to be legal.
4913   if (LI) {
4914     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}}))
4915       return false;
4916     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}}))
4917       return false;
4918     if (!isLegalOrBeforeLegalizer(
4919             {TargetOpcode::G_ICMP,
4920              {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1),
4921               DstTy}}))
4922       return false;
4923   }
4924 
4925   auto CheckEltValue = [&](const Constant *C) {
4926     if (auto *CI = dyn_cast_or_null<ConstantInt>(C))
4927       return !CI->isZero();
4928     return false;
4929   };
4930   return matchUnaryPredicate(MRI, RHS, CheckEltValue);
4931 }
4932 
4933 void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
4934   auto *NewMI = buildUDivUsingMul(MI);
4935   replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4936 }
4937 
4938 bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
4939   assert(MI.getOpcode() == TargetOpcode::G_UMULH);
4940   Register RHS = MI.getOperand(2).getReg();
4941   Register Dst = MI.getOperand(0).getReg();
4942   LLT Ty = MRI.getType(Dst);
4943   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4944   auto MatchPow2ExceptOne = [&](const Constant *C) {
4945     if (auto *CI = dyn_cast<ConstantInt>(C))
4946       return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
4947     return false;
4948   };
4949   if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false))
4950     return false;
4951   return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}});
4952 }
4953 
4954 void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
4955   Register LHS = MI.getOperand(1).getReg();
4956   Register RHS = MI.getOperand(2).getReg();
4957   Register Dst = MI.getOperand(0).getReg();
4958   LLT Ty = MRI.getType(Dst);
4959   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4960   unsigned NumEltBits = Ty.getScalarSizeInBits();
4961 
4962   Builder.setInstrAndDebugLoc(MI);
4963   auto LogBase2 = buildLogBase2(RHS, Builder);
4964   auto ShiftAmt =
4965       Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2);
4966   auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt);
4967   Builder.buildLShr(Dst, LHS, Trunc);
4968   MI.eraseFromParent();
4969 }
4970 
4971 bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
4972                                                BuildFnTy &MatchInfo) {
4973   unsigned Opc = MI.getOpcode();
4974   assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
4975          Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
4976          Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);
4977 
4978   Register Dst = MI.getOperand(0).getReg();
4979   Register X = MI.getOperand(1).getReg();
4980   Register Y = MI.getOperand(2).getReg();
4981   LLT Type = MRI.getType(Dst);
4982 
4983   // fold (fadd x, fneg(y)) -> (fsub x, y)
4984   // fold (fadd fneg(y), x) -> (fsub x, y)
4985   // G_ADD is commutative so both cases are checked by m_GFAdd
4986   if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
4987       isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) {
4988     Opc = TargetOpcode::G_FSUB;
4989   }
4990   /// fold (fsub x, fneg(y)) -> (fadd x, y)
4991   else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
4992            isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) {
4993     Opc = TargetOpcode::G_FADD;
4994   }
4995   // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
4996   // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
4997   // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
4998   // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
4999   else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5000             Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5001            mi_match(X, MRI, m_GFNeg(m_Reg(X))) &&
5002            mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) {
5003     // no opcode change
5004   } else
5005     return false;
5006 
5007   MatchInfo = [=, &MI](MachineIRBuilder &B) {
5008     Observer.changingInstr(MI);
5009     MI.setDesc(B.getTII().get(Opc));
5010     MI.getOperand(1).setReg(X);
5011     MI.getOperand(2).setReg(Y);
5012     Observer.changedInstr(MI);
5013   };
5014   return true;
5015 }
5016 
5017 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5018 /// due to global flags or MachineInstr flags.
5019 static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) {
5020   if (MI.getOpcode() != TargetOpcode::G_FMUL)
5021     return false;
5022   return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract);
5023 }
5024 
5025 static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
5026                         const MachineRegisterInfo &MRI) {
5027   return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()),
5028                        MRI.use_instr_nodbg_end()) >
5029          std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()),
5030                        MRI.use_instr_nodbg_end());
5031 }
5032 
5033 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
5034                                          bool &AllowFusionGlobally,
5035                                          bool &HasFMAD, bool &Aggressive,
5036                                          bool CanReassociate) {
5037 
5038   auto *MF = MI.getMF();
5039   const auto &TLI = *MF->getSubtarget().getTargetLowering();
5040   const TargetOptions &Options = MF->getTarget().Options;
5041   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5042 
5043   if (CanReassociate &&
5044       !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc)))
5045     return false;
5046 
5047   // Floating-point multiply-add with intermediate rounding.
5048   HasFMAD = (LI && TLI.isFMADLegal(MI, DstType));
5049   // Floating-point multiply-add without intermediate rounding.
5050   bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
5051                 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}});
5052   // No valid opcode, do not combine.
5053   if (!HasFMAD && !HasFMA)
5054     return false;
5055 
5056   AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast ||
5057                         Options.UnsafeFPMath || HasFMAD;
5058   // If the addition is not contractable, do not combine.
5059   if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract))
5060     return false;
5061 
5062   Aggressive = TLI.enableAggressiveFMAFusion(DstType);
5063   return true;
5064 }
5065 
5066 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5067     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5068   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5069 
5070   bool AllowFusionGlobally, HasFMAD, Aggressive;
5071   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5072     return false;
5073 
5074   Register Op1 = MI.getOperand(1).getReg();
5075   Register Op2 = MI.getOperand(2).getReg();
5076   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5077   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5078   unsigned PreferredFusedOpcode =
5079       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5080 
5081   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5082   // prefer to fold the multiply with fewer uses.
5083   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5084       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5085     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5086       std::swap(LHS, RHS);
5087   }
5088 
5089   // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5090   if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5091       (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) {
5092     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5093       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5094                    {LHS.MI->getOperand(1).getReg(),
5095                     LHS.MI->getOperand(2).getReg(), RHS.Reg});
5096     };
5097     return true;
5098   }
5099 
5100   // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5101   if (isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5102       (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) {
5103     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5104       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5105                    {RHS.MI->getOperand(1).getReg(),
5106                     RHS.MI->getOperand(2).getReg(), LHS.Reg});
5107     };
5108     return true;
5109   }
5110 
5111   return false;
5112 }
5113 
5114 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
5115     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5116   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5117 
5118   bool AllowFusionGlobally, HasFMAD, Aggressive;
5119   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5120     return false;
5121 
5122   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5123   Register Op1 = MI.getOperand(1).getReg();
5124   Register Op2 = MI.getOperand(2).getReg();
5125   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5126   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5127   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5128 
5129   unsigned PreferredFusedOpcode =
5130       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5131 
5132   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5133   // prefer to fold the multiply with fewer uses.
5134   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5135       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5136     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5137       std::swap(LHS, RHS);
5138   }
5139 
5140   // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
5141   MachineInstr *FpExtSrc;
5142   if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5143       isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5144       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5145                           MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5146     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5147       auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5148       auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5149       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5150                    {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
5151     };
5152     return true;
5153   }
5154 
5155   // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
5156   // Note: Commutes FADD operands.
5157   if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5158       isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5159       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5160                           MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5161     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5162       auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5163       auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5164       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5165                    {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
5166     };
5167     return true;
5168   }
5169 
5170   return false;
5171 }
5172 
5173 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
5174     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5175   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5176 
5177   bool AllowFusionGlobally, HasFMAD, Aggressive;
5178   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true))
5179     return false;
5180 
5181   Register Op1 = MI.getOperand(1).getReg();
5182   Register Op2 = MI.getOperand(2).getReg();
5183   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5184   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5185   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5186 
5187   unsigned PreferredFusedOpcode =
5188       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5189 
5190   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5191   // prefer to fold the multiply with fewer uses.
5192   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5193       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5194     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5195       std::swap(LHS, RHS);
5196   }
5197 
5198   MachineInstr *FMA = nullptr;
5199   Register Z;
5200   // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
5201   if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5202       (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
5203        TargetOpcode::G_FMUL) &&
5204       MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
5205       MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
5206     FMA = LHS.MI;
5207     Z = RHS.Reg;
5208   }
5209   // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
5210   else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5211            (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
5212             TargetOpcode::G_FMUL) &&
5213            MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
5214            MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
5215     Z = LHS.Reg;
5216     FMA = RHS.MI;
5217   }
5218 
5219   if (FMA) {
5220     MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg());
5221     Register X = FMA->getOperand(1).getReg();
5222     Register Y = FMA->getOperand(2).getReg();
5223     Register U = FMulMI->getOperand(1).getReg();
5224     Register V = FMulMI->getOperand(2).getReg();
5225 
5226     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5227       Register InnerFMA = MRI.createGenericVirtualRegister(DstTy);
5228       B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
5229       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5230                    {X, Y, InnerFMA});
5231     };
5232     return true;
5233   }
5234 
5235   return false;
5236 }
5237 
5238 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
5239     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5240   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5241 
5242   bool AllowFusionGlobally, HasFMAD, Aggressive;
5243   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5244     return false;
5245 
5246   if (!Aggressive)
5247     return false;
5248 
5249   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5250   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5251   Register Op1 = MI.getOperand(1).getReg();
5252   Register Op2 = MI.getOperand(2).getReg();
5253   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5254   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5255 
5256   unsigned PreferredFusedOpcode =
5257       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5258 
5259   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5260   // prefer to fold the multiply with fewer uses.
5261   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5262       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5263     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5264       std::swap(LHS, RHS);
5265   }
5266 
5267   // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
5268   auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
5269                                  Register Y, MachineIRBuilder &B) {
5270     Register FpExtU = B.buildFPExt(DstType, U).getReg(0);
5271     Register FpExtV = B.buildFPExt(DstType, V).getReg(0);
5272     Register InnerFMA =
5273         B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
5274             .getReg(0);
5275     B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5276                  {X, Y, InnerFMA});
5277   };
5278 
5279   MachineInstr *FMulMI, *FMAMI;
5280   // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
5281   //   -> (fma x, y, (fma (fpext u), (fpext v), z))
5282   if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5283       mi_match(LHS.MI->getOperand(3).getReg(), MRI,
5284                m_GFPExt(m_MInstr(FMulMI))) &&
5285       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5286       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5287                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5288     MatchInfo = [=](MachineIRBuilder &B) {
5289       buildMatchInfo(FMulMI->getOperand(1).getReg(),
5290                      FMulMI->getOperand(2).getReg(), RHS.Reg,
5291                      LHS.MI->getOperand(1).getReg(),
5292                      LHS.MI->getOperand(2).getReg(), B);
5293     };
5294     return true;
5295   }
5296 
5297   // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
5298   //   -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5299   // FIXME: This turns two single-precision and one double-precision
5300   // operation into two double-precision operations, which might not be
5301   // interesting for all targets, especially GPUs.
5302   if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5303       FMAMI->getOpcode() == PreferredFusedOpcode) {
5304     MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5305     if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5306         TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5307                             MRI.getType(FMAMI->getOperand(0).getReg()))) {
5308       MatchInfo = [=](MachineIRBuilder &B) {
5309         Register X = FMAMI->getOperand(1).getReg();
5310         Register Y = FMAMI->getOperand(2).getReg();
5311         X = B.buildFPExt(DstType, X).getReg(0);
5312         Y = B.buildFPExt(DstType, Y).getReg(0);
5313         buildMatchInfo(FMulMI->getOperand(1).getReg(),
5314                        FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B);
5315       };
5316 
5317       return true;
5318     }
5319   }
5320 
5321   // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
5322   //   -> (fma x, y, (fma (fpext u), (fpext v), z))
5323   if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5324       mi_match(RHS.MI->getOperand(3).getReg(), MRI,
5325                m_GFPExt(m_MInstr(FMulMI))) &&
5326       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5327       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5328                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5329     MatchInfo = [=](MachineIRBuilder &B) {
5330       buildMatchInfo(FMulMI->getOperand(1).getReg(),
5331                      FMulMI->getOperand(2).getReg(), LHS.Reg,
5332                      RHS.MI->getOperand(1).getReg(),
5333                      RHS.MI->getOperand(2).getReg(), B);
5334     };
5335     return true;
5336   }
5337 
5338   // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
5339   //   -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5340   // FIXME: This turns two single-precision and one double-precision
5341   // operation into two double-precision operations, which might not be
5342   // interesting for all targets, especially GPUs.
5343   if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5344       FMAMI->getOpcode() == PreferredFusedOpcode) {
5345     MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5346     if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5347         TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5348                             MRI.getType(FMAMI->getOperand(0).getReg()))) {
5349       MatchInfo = [=](MachineIRBuilder &B) {
5350         Register X = FMAMI->getOperand(1).getReg();
5351         Register Y = FMAMI->getOperand(2).getReg();
5352         X = B.buildFPExt(DstType, X).getReg(0);
5353         Y = B.buildFPExt(DstType, Y).getReg(0);
5354         buildMatchInfo(FMulMI->getOperand(1).getReg(),
5355                        FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B);
5356       };
5357       return true;
5358     }
5359   }
5360 
5361   return false;
5362 }
5363 
5364 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
5365     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5366   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5367 
5368   bool AllowFusionGlobally, HasFMAD, Aggressive;
5369   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5370     return false;
5371 
5372   Register Op1 = MI.getOperand(1).getReg();
5373   Register Op2 = MI.getOperand(2).getReg();
5374   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5375   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5376   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5377 
5378   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5379   // prefer to fold the multiply with fewer uses.
5380   int FirstMulHasFewerUses = true;
5381   if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5382       isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5383       hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5384     FirstMulHasFewerUses = false;
5385 
5386   unsigned PreferredFusedOpcode =
5387       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5388 
5389   // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
5390   if (FirstMulHasFewerUses &&
5391       (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5392        (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) {
5393     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5394       Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0);
5395       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5396                    {LHS.MI->getOperand(1).getReg(),
5397                     LHS.MI->getOperand(2).getReg(), NegZ});
5398     };
5399     return true;
5400   }
5401   // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
5402   else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5403             (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) {
5404     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5405       Register NegY =
5406           B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
5407       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5408                    {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
5409     };
5410     return true;
5411   }
5412 
5413   return false;
5414 }
5415 
5416 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
5417     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5418   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5419 
5420   bool AllowFusionGlobally, HasFMAD, Aggressive;
5421   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5422     return false;
5423 
5424   Register LHSReg = MI.getOperand(1).getReg();
5425   Register RHSReg = MI.getOperand(2).getReg();
5426   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5427 
5428   unsigned PreferredFusedOpcode =
5429       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5430 
5431   MachineInstr *FMulMI;
5432   // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
5433   if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5434       (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) &&
5435                       MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5436       isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5437     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5438       Register NegX =
5439           B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5440       Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5441       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5442                    {NegX, FMulMI->getOperand(2).getReg(), NegZ});
5443     };
5444     return true;
5445   }
5446 
5447   // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
5448   if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5449       (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) &&
5450                       MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5451       isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5452     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5453       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5454                    {FMulMI->getOperand(1).getReg(),
5455                     FMulMI->getOperand(2).getReg(), LHSReg});
5456     };
5457     return true;
5458   }
5459 
5460   return false;
5461 }
5462 
5463 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
5464     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5465   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5466 
5467   bool AllowFusionGlobally, HasFMAD, Aggressive;
5468   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5469     return false;
5470 
5471   Register LHSReg = MI.getOperand(1).getReg();
5472   Register RHSReg = MI.getOperand(2).getReg();
5473   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5474 
5475   unsigned PreferredFusedOpcode =
5476       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5477 
5478   MachineInstr *FMulMI;
5479   // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
5480   if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5481       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5482       (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) {
5483     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5484       Register FpExtX =
5485           B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5486       Register FpExtY =
5487           B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5488       Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5489       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5490                    {FpExtX, FpExtY, NegZ});
5491     };
5492     return true;
5493   }
5494 
5495   // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
5496   if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5497       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5498       (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) {
5499     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5500       Register FpExtY =
5501           B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5502       Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0);
5503       Register FpExtZ =
5504           B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5505       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5506                    {NegY, FpExtZ, LHSReg});
5507     };
5508     return true;
5509   }
5510 
5511   return false;
5512 }
5513 
5514 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
5515     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5516   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5517 
5518   bool AllowFusionGlobally, HasFMAD, Aggressive;
5519   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5520     return false;
5521 
5522   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5523   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5524   Register LHSReg = MI.getOperand(1).getReg();
5525   Register RHSReg = MI.getOperand(2).getReg();
5526 
5527   unsigned PreferredFusedOpcode =
5528       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5529 
5530   auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
5531                             MachineIRBuilder &B) {
5532     Register FpExtX = B.buildFPExt(DstTy, X).getReg(0);
5533     Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0);
5534     B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
5535   };
5536 
5537   MachineInstr *FMulMI;
5538   // fold (fsub (fpext (fneg (fmul x, y))), z) ->
5539   //      (fneg (fma (fpext x), (fpext y), z))
5540   // fold (fsub (fneg (fpext (fmul x, y))), z) ->
5541   //      (fneg (fma (fpext x), (fpext y), z))
5542   if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5543        mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5544       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5545       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5546                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5547     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5548       Register FMAReg = MRI.createGenericVirtualRegister(DstTy);
5549       buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(),
5550                      FMulMI->getOperand(2).getReg(), RHSReg, B);
5551       B.buildFNeg(MI.getOperand(0).getReg(), FMAReg);
5552     };
5553     return true;
5554   }
5555 
5556   // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5557   // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5558   if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5559        mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5560       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5561       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5562                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5563     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5564       buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(),
5565                      FMulMI->getOperand(2).getReg(), LHSReg, B);
5566     };
5567     return true;
5568   }
5569 
5570   return false;
5571 }
5572 
5573 bool CombinerHelper::matchSelectToLogical(MachineInstr &MI,
5574                                           BuildFnTy &MatchInfo) {
5575   GSelect &Sel = cast<GSelect>(MI);
5576   Register DstReg = Sel.getReg(0);
5577   Register Cond = Sel.getCondReg();
5578   Register TrueReg = Sel.getTrueReg();
5579   Register FalseReg = Sel.getFalseReg();
5580 
5581   auto *TrueDef = getDefIgnoringCopies(TrueReg, MRI);
5582   auto *FalseDef = getDefIgnoringCopies(FalseReg, MRI);
5583 
5584   const LLT CondTy = MRI.getType(Cond);
5585   const LLT OpTy = MRI.getType(TrueReg);
5586   if (CondTy != OpTy || OpTy.getScalarSizeInBits() != 1)
5587     return false;
5588 
5589   // We have a boolean select.
5590 
5591   // select Cond, Cond, F --> or Cond, F
5592   // select Cond, 1, F    --> or Cond, F
5593   auto MaybeCstTrue = isConstantOrConstantSplatVector(*TrueDef, MRI);
5594   if (Cond == TrueReg || (MaybeCstTrue && MaybeCstTrue->isOne())) {
5595     MatchInfo = [=](MachineIRBuilder &MIB) {
5596       MIB.buildOr(DstReg, Cond, FalseReg);
5597     };
5598     return true;
5599   }
5600 
5601   // select Cond, T, Cond --> and Cond, T
5602   // select Cond, T, 0    --> and Cond, T
5603   auto MaybeCstFalse = isConstantOrConstantSplatVector(*FalseDef, MRI);
5604   if (Cond == FalseReg || (MaybeCstFalse && MaybeCstFalse->isZero())) {
5605     MatchInfo = [=](MachineIRBuilder &MIB) {
5606       MIB.buildAnd(DstReg, Cond, TrueReg);
5607     };
5608     return true;
5609   }
5610 
5611  // select Cond, T, 1 --> or (not Cond), T
5612   if (MaybeCstFalse && MaybeCstFalse->isOne()) {
5613     MatchInfo = [=](MachineIRBuilder &MIB) {
5614       MIB.buildOr(DstReg, MIB.buildNot(OpTy, Cond), TrueReg);
5615     };
5616     return true;
5617   }
5618 
5619   // select Cond, 0, F --> and (not Cond), F
5620   if (MaybeCstTrue && MaybeCstTrue->isZero()) {
5621     MatchInfo = [=](MachineIRBuilder &MIB) {
5622       MIB.buildAnd(DstReg, MIB.buildNot(OpTy, Cond), FalseReg);
5623     };
5624     return true;
5625   }
5626   return false;
5627 }
5628 
5629 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
5630                                             unsigned &IdxToPropagate) {
5631   bool PropagateNaN;
5632   switch (MI.getOpcode()) {
5633   default:
5634     return false;
5635   case TargetOpcode::G_FMINNUM:
5636   case TargetOpcode::G_FMAXNUM:
5637     PropagateNaN = false;
5638     break;
5639   case TargetOpcode::G_FMINIMUM:
5640   case TargetOpcode::G_FMAXIMUM:
5641     PropagateNaN = true;
5642     break;
5643   }
5644 
5645   auto MatchNaN = [&](unsigned Idx) {
5646     Register MaybeNaNReg = MI.getOperand(Idx).getReg();
5647     const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI);
5648     if (!MaybeCst || !MaybeCst->getValueAPF().isNaN())
5649       return false;
5650     IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
5651     return true;
5652   };
5653 
5654   return MatchNaN(1) || MatchNaN(2);
5655 }
5656 
5657 bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
5658   assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD");
5659   Register LHS = MI.getOperand(1).getReg();
5660   Register RHS = MI.getOperand(2).getReg();
5661 
5662   // Helper lambda to check for opportunities for
5663   // A + (B - A) -> B
5664   // (B - A) + A -> B
5665   auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) {
5666     Register Reg;
5667     return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) &&
5668            Reg == MaybeSameReg;
5669   };
5670   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
5671 }
5672 
5673 bool CombinerHelper::tryCombine(MachineInstr &MI) {
5674   if (tryCombineCopy(MI))
5675     return true;
5676   if (tryCombineExtendingLoads(MI))
5677     return true;
5678   if (tryCombineIndexedLoadStore(MI))
5679     return true;
5680   return false;
5681 }
5682