1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/ADT/SetVector.h"
10 #include "llvm/ADT/SmallBitVector.h"
11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
13 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
14 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/LowLevelTypeUtils.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterBankInfo.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/DivisionByConstantInfo.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include <cmath>
36 #include <optional>
37 #include <tuple>
38 
39 #define DEBUG_TYPE "gi-combiner"
40 
41 using namespace llvm;
42 using namespace MIPatternMatch;
43 
44 // Option to allow testing of the combiner while no targets know about indexed
45 // addressing.
46 static cl::opt<bool>
47     ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
48                        cl::desc("Force all indexed operations to be "
49                                 "legal for the GlobalISel combiner"));
50 
51 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
52                                MachineIRBuilder &B, bool IsPreLegalize,
53                                GISelKnownBits *KB, MachineDominatorTree *MDT,
54                                const LegalizerInfo *LI)
55     : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
56       MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
57       RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
58       TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
59   (void)this->KB;
60 }
61 
62 const TargetLowering &CombinerHelper::getTargetLowering() const {
63   return *Builder.getMF().getSubtarget().getTargetLowering();
64 }
65 
66 /// \returns The little endian in-memory byte position of byte \p I in a
67 /// \p ByteWidth bytes wide type.
68 ///
69 /// E.g. Given a 4-byte type x, x[0] -> byte 0
70 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
71   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
72   return I;
73 }
74 
75 /// Determines the LogBase2 value for a non-null input value using the
76 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
77 static Register buildLogBase2(Register V, MachineIRBuilder &MIB) {
78   auto &MRI = *MIB.getMRI();
79   LLT Ty = MRI.getType(V);
80   auto Ctlz = MIB.buildCTLZ(Ty, V);
81   auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1);
82   return MIB.buildSub(Ty, Base, Ctlz).getReg(0);
83 }
84 
85 /// \returns The big endian in-memory byte position of byte \p I in a
86 /// \p ByteWidth bytes wide type.
87 ///
88 /// E.g. Given a 4-byte type x, x[0] -> byte 3
89 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
90   assert(I < ByteWidth && "I must be in [0, ByteWidth)");
91   return ByteWidth - I - 1;
92 }
93 
94 /// Given a map from byte offsets in memory to indices in a load/store,
95 /// determine if that map corresponds to a little or big endian byte pattern.
96 ///
97 /// \param MemOffset2Idx maps memory offsets to address offsets.
98 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
99 ///
100 /// \returns true if the map corresponds to a big endian byte pattern, false if
101 /// it corresponds to a little endian byte pattern, and std::nullopt otherwise.
102 ///
103 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
104 /// are as follows:
105 ///
106 /// AddrOffset   Little endian    Big endian
107 /// 0            0                3
108 /// 1            1                2
109 /// 2            2                1
110 /// 3            3                0
111 static std::optional<bool>
112 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
113             int64_t LowestIdx) {
114   // Need at least two byte positions to decide on endianness.
115   unsigned Width = MemOffset2Idx.size();
116   if (Width < 2)
117     return std::nullopt;
118   bool BigEndian = true, LittleEndian = true;
119   for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
120     auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
121     if (MemOffsetAndIdx == MemOffset2Idx.end())
122       return std::nullopt;
123     const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
124     assert(Idx >= 0 && "Expected non-negative byte offset?");
125     LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
126     BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
127     if (!BigEndian && !LittleEndian)
128       return std::nullopt;
129   }
130 
131   assert((BigEndian != LittleEndian) &&
132          "Pattern cannot be both big and little endian!");
133   return BigEndian;
134 }
135 
136 bool CombinerHelper::isPreLegalize() const { return IsPreLegalize; }
137 
138 bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
139   assert(LI && "Must have LegalizerInfo to query isLegal!");
140   return LI->getAction(Query).Action == LegalizeActions::Legal;
141 }
142 
143 bool CombinerHelper::isLegalOrBeforeLegalizer(
144     const LegalityQuery &Query) const {
145   return isPreLegalize() || isLegal(Query);
146 }
147 
148 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const {
149   if (!Ty.isVector())
150     return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}});
151   // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
152   if (isPreLegalize())
153     return true;
154   LLT EltTy = Ty.getElementType();
155   return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
156          isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
157 }
158 
159 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
160                                     Register ToReg) const {
161   Observer.changingAllUsesOfReg(MRI, FromReg);
162 
163   if (MRI.constrainRegAttrs(ToReg, FromReg))
164     MRI.replaceRegWith(FromReg, ToReg);
165   else
166     Builder.buildCopy(ToReg, FromReg);
167 
168   Observer.finishedChangingAllUsesOfReg();
169 }
170 
171 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
172                                       MachineOperand &FromRegOp,
173                                       Register ToReg) const {
174   assert(FromRegOp.getParent() && "Expected an operand in an MI");
175   Observer.changingInstr(*FromRegOp.getParent());
176 
177   FromRegOp.setReg(ToReg);
178 
179   Observer.changedInstr(*FromRegOp.getParent());
180 }
181 
182 void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI,
183                                        unsigned ToOpcode) const {
184   Observer.changingInstr(FromMI);
185 
186   FromMI.setDesc(Builder.getTII().get(ToOpcode));
187 
188   Observer.changedInstr(FromMI);
189 }
190 
191 const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
192   return RBI->getRegBank(Reg, MRI, *TRI);
193 }
194 
195 void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) {
196   if (RegBank)
197     MRI.setRegBank(Reg, *RegBank);
198 }
199 
200 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
201   if (matchCombineCopy(MI)) {
202     applyCombineCopy(MI);
203     return true;
204   }
205   return false;
206 }
207 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
208   if (MI.getOpcode() != TargetOpcode::COPY)
209     return false;
210   Register DstReg = MI.getOperand(0).getReg();
211   Register SrcReg = MI.getOperand(1).getReg();
212   return canReplaceReg(DstReg, SrcReg, MRI);
213 }
214 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
215   Register DstReg = MI.getOperand(0).getReg();
216   Register SrcReg = MI.getOperand(1).getReg();
217   MI.eraseFromParent();
218   replaceRegWith(MRI, DstReg, SrcReg);
219 }
220 
221 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
222   bool IsUndef = false;
223   SmallVector<Register, 4> Ops;
224   if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
225     applyCombineConcatVectors(MI, IsUndef, Ops);
226     return true;
227   }
228   return false;
229 }
230 
231 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
232                                                SmallVectorImpl<Register> &Ops) {
233   assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
234          "Invalid instruction");
235   IsUndef = true;
236   MachineInstr *Undef = nullptr;
237 
238   // Walk over all the operands of concat vectors and check if they are
239   // build_vector themselves or undef.
240   // Then collect their operands in Ops.
241   for (const MachineOperand &MO : MI.uses()) {
242     Register Reg = MO.getReg();
243     MachineInstr *Def = MRI.getVRegDef(Reg);
244     assert(Def && "Operand not defined");
245     switch (Def->getOpcode()) {
246     case TargetOpcode::G_BUILD_VECTOR:
247       IsUndef = false;
248       // Remember the operands of the build_vector to fold
249       // them into the yet-to-build flattened concat vectors.
250       for (const MachineOperand &BuildVecMO : Def->uses())
251         Ops.push_back(BuildVecMO.getReg());
252       break;
253     case TargetOpcode::G_IMPLICIT_DEF: {
254       LLT OpType = MRI.getType(Reg);
255       // Keep one undef value for all the undef operands.
256       if (!Undef) {
257         Builder.setInsertPt(*MI.getParent(), MI);
258         Undef = Builder.buildUndef(OpType.getScalarType());
259       }
260       assert(MRI.getType(Undef->getOperand(0).getReg()) ==
261                  OpType.getScalarType() &&
262              "All undefs should have the same type");
263       // Break the undef vector in as many scalar elements as needed
264       // for the flattening.
265       for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
266            EltIdx != EltEnd; ++EltIdx)
267         Ops.push_back(Undef->getOperand(0).getReg());
268       break;
269     }
270     default:
271       return false;
272     }
273   }
274   return true;
275 }
276 void CombinerHelper::applyCombineConcatVectors(
277     MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
278   // We determined that the concat_vectors can be flatten.
279   // Generate the flattened build_vector.
280   Register DstReg = MI.getOperand(0).getReg();
281   Builder.setInsertPt(*MI.getParent(), MI);
282   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
283 
284   // Note: IsUndef is sort of redundant. We could have determine it by
285   // checking that at all Ops are undef.  Alternatively, we could have
286   // generate a build_vector of undefs and rely on another combine to
287   // clean that up.  For now, given we already gather this information
288   // in tryCombineConcatVectors, just save compile time and issue the
289   // right thing.
290   if (IsUndef)
291     Builder.buildUndef(NewDstReg);
292   else
293     Builder.buildBuildVector(NewDstReg, Ops);
294   MI.eraseFromParent();
295   replaceRegWith(MRI, DstReg, NewDstReg);
296 }
297 
298 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
299   SmallVector<Register, 4> Ops;
300   if (matchCombineShuffleVector(MI, Ops)) {
301     applyCombineShuffleVector(MI, Ops);
302     return true;
303   }
304   return false;
305 }
306 
307 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
308                                                SmallVectorImpl<Register> &Ops) {
309   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
310          "Invalid instruction kind");
311   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
312   Register Src1 = MI.getOperand(1).getReg();
313   LLT SrcType = MRI.getType(Src1);
314   // As bizarre as it may look, shuffle vector can actually produce
315   // scalar! This is because at the IR level a <1 x ty> shuffle
316   // vector is perfectly valid.
317   unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
318   unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
319 
320   // If the resulting vector is smaller than the size of the source
321   // vectors being concatenated, we won't be able to replace the
322   // shuffle vector into a concat_vectors.
323   //
324   // Note: We may still be able to produce a concat_vectors fed by
325   //       extract_vector_elt and so on. It is less clear that would
326   //       be better though, so don't bother for now.
327   //
328   // If the destination is a scalar, the size of the sources doesn't
329   // matter. we will lower the shuffle to a plain copy. This will
330   // work only if the source and destination have the same size. But
331   // that's covered by the next condition.
332   //
333   // TODO: If the size between the source and destination don't match
334   //       we could still emit an extract vector element in that case.
335   if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
336     return false;
337 
338   // Check that the shuffle mask can be broken evenly between the
339   // different sources.
340   if (DstNumElts % SrcNumElts != 0)
341     return false;
342 
343   // Mask length is a multiple of the source vector length.
344   // Check if the shuffle is some kind of concatenation of the input
345   // vectors.
346   unsigned NumConcat = DstNumElts / SrcNumElts;
347   SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
348   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
349   for (unsigned i = 0; i != DstNumElts; ++i) {
350     int Idx = Mask[i];
351     // Undef value.
352     if (Idx < 0)
353       continue;
354     // Ensure the indices in each SrcType sized piece are sequential and that
355     // the same source is used for the whole piece.
356     if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
357         (ConcatSrcs[i / SrcNumElts] >= 0 &&
358          ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
359       return false;
360     // Remember which source this index came from.
361     ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
362   }
363 
364   // The shuffle is concatenating multiple vectors together.
365   // Collect the different operands for that.
366   Register UndefReg;
367   Register Src2 = MI.getOperand(2).getReg();
368   for (auto Src : ConcatSrcs) {
369     if (Src < 0) {
370       if (!UndefReg) {
371         Builder.setInsertPt(*MI.getParent(), MI);
372         UndefReg = Builder.buildUndef(SrcType).getReg(0);
373       }
374       Ops.push_back(UndefReg);
375     } else if (Src == 0)
376       Ops.push_back(Src1);
377     else
378       Ops.push_back(Src2);
379   }
380   return true;
381 }
382 
383 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
384                                                const ArrayRef<Register> Ops) {
385   Register DstReg = MI.getOperand(0).getReg();
386   Builder.setInsertPt(*MI.getParent(), MI);
387   Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
388 
389   if (Ops.size() == 1)
390     Builder.buildCopy(NewDstReg, Ops[0]);
391   else
392     Builder.buildMergeLikeInstr(NewDstReg, Ops);
393 
394   MI.eraseFromParent();
395   replaceRegWith(MRI, DstReg, NewDstReg);
396 }
397 
398 namespace {
399 
400 /// Select a preference between two uses. CurrentUse is the current preference
401 /// while *ForCandidate is attributes of the candidate under consideration.
402 PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI,
403                                   PreferredTuple &CurrentUse,
404                                   const LLT TyForCandidate,
405                                   unsigned OpcodeForCandidate,
406                                   MachineInstr *MIForCandidate) {
407   if (!CurrentUse.Ty.isValid()) {
408     if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
409         CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
410       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
411     return CurrentUse;
412   }
413 
414   // We permit the extend to hoist through basic blocks but this is only
415   // sensible if the target has extending loads. If you end up lowering back
416   // into a load and extend during the legalizer then the end result is
417   // hoisting the extend up to the load.
418 
419   // Prefer defined extensions to undefined extensions as these are more
420   // likely to reduce the number of instructions.
421   if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
422       CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
423     return CurrentUse;
424   else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
425            OpcodeForCandidate != TargetOpcode::G_ANYEXT)
426     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
427 
428   // Prefer sign extensions to zero extensions as sign-extensions tend to be
429   // more expensive. Don't do this if the load is already a zero-extend load
430   // though, otherwise we'll rewrite a zero-extend load into a sign-extend
431   // later.
432   if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) {
433     if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
434         OpcodeForCandidate == TargetOpcode::G_ZEXT)
435       return CurrentUse;
436     else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
437              OpcodeForCandidate == TargetOpcode::G_SEXT)
438       return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
439   }
440 
441   // This is potentially target specific. We've chosen the largest type
442   // because G_TRUNC is usually free. One potential catch with this is that
443   // some targets have a reduced number of larger registers than smaller
444   // registers and this choice potentially increases the live-range for the
445   // larger value.
446   if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
447     return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
448   }
449   return CurrentUse;
450 }
451 
452 /// Find a suitable place to insert some instructions and insert them. This
453 /// function accounts for special cases like inserting before a PHI node.
454 /// The current strategy for inserting before PHI's is to duplicate the
455 /// instructions for each predecessor. However, while that's ok for G_TRUNC
456 /// on most targets since it generally requires no code, other targets/cases may
457 /// want to try harder to find a dominating block.
458 static void InsertInsnsWithoutSideEffectsBeforeUse(
459     MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
460     std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
461                        MachineOperand &UseMO)>
462         Inserter) {
463   MachineInstr &UseMI = *UseMO.getParent();
464 
465   MachineBasicBlock *InsertBB = UseMI.getParent();
466 
467   // If the use is a PHI then we want the predecessor block instead.
468   if (UseMI.isPHI()) {
469     MachineOperand *PredBB = std::next(&UseMO);
470     InsertBB = PredBB->getMBB();
471   }
472 
473   // If the block is the same block as the def then we want to insert just after
474   // the def instead of at the start of the block.
475   if (InsertBB == DefMI.getParent()) {
476     MachineBasicBlock::iterator InsertPt = &DefMI;
477     Inserter(InsertBB, std::next(InsertPt), UseMO);
478     return;
479   }
480 
481   // Otherwise we want the start of the BB
482   Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
483 }
484 } // end anonymous namespace
485 
486 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
487   PreferredTuple Preferred;
488   if (matchCombineExtendingLoads(MI, Preferred)) {
489     applyCombineExtendingLoads(MI, Preferred);
490     return true;
491   }
492   return false;
493 }
494 
495 static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) {
496   unsigned CandidateLoadOpc;
497   switch (ExtOpc) {
498   case TargetOpcode::G_ANYEXT:
499     CandidateLoadOpc = TargetOpcode::G_LOAD;
500     break;
501   case TargetOpcode::G_SEXT:
502     CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
503     break;
504   case TargetOpcode::G_ZEXT:
505     CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
506     break;
507   default:
508     llvm_unreachable("Unexpected extend opc");
509   }
510   return CandidateLoadOpc;
511 }
512 
513 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
514                                                 PreferredTuple &Preferred) {
515   // We match the loads and follow the uses to the extend instead of matching
516   // the extends and following the def to the load. This is because the load
517   // must remain in the same position for correctness (unless we also add code
518   // to find a safe place to sink it) whereas the extend is freely movable.
519   // It also prevents us from duplicating the load for the volatile case or just
520   // for performance.
521   GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
522   if (!LoadMI)
523     return false;
524 
525   Register LoadReg = LoadMI->getDstReg();
526 
527   LLT LoadValueTy = MRI.getType(LoadReg);
528   if (!LoadValueTy.isScalar())
529     return false;
530 
531   // Most architectures are going to legalize <s8 loads into at least a 1 byte
532   // load, and the MMOs can only describe memory accesses in multiples of bytes.
533   // If we try to perform extload combining on those, we can end up with
534   // %a(s8) = extload %ptr (load 1 byte from %ptr)
535   // ... which is an illegal extload instruction.
536   if (LoadValueTy.getSizeInBits() < 8)
537     return false;
538 
539   // For non power-of-2 types, they will very likely be legalized into multiple
540   // loads. Don't bother trying to match them into extending loads.
541   if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits()))
542     return false;
543 
544   // Find the preferred type aside from the any-extends (unless it's the only
545   // one) and non-extending ops. We'll emit an extending load to that type and
546   // and emit a variant of (extend (trunc X)) for the others according to the
547   // relative type sizes. At the same time, pick an extend to use based on the
548   // extend involved in the chosen type.
549   unsigned PreferredOpcode =
550       isa<GLoad>(&MI)
551           ? TargetOpcode::G_ANYEXT
552           : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
553   Preferred = {LLT(), PreferredOpcode, nullptr};
554   for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
555     if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
556         UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
557         (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
558       const auto &MMO = LoadMI->getMMO();
559       // For atomics, only form anyextending loads.
560       if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
561         continue;
562       // Check for legality.
563       if (!isPreLegalize()) {
564         LegalityQuery::MemDesc MMDesc(MMO);
565         unsigned CandidateLoadOpc = getExtLoadOpcForExtend(UseMI.getOpcode());
566         LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
567         LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
568         if (LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
569                 .Action != LegalizeActions::Legal)
570           continue;
571       }
572       Preferred = ChoosePreferredUse(MI, Preferred,
573                                      MRI.getType(UseMI.getOperand(0).getReg()),
574                                      UseMI.getOpcode(), &UseMI);
575     }
576   }
577 
578   // There were no extends
579   if (!Preferred.MI)
580     return false;
581   // It should be impossible to chose an extend without selecting a different
582   // type since by definition the result of an extend is larger.
583   assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
584 
585   LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
586   return true;
587 }
588 
589 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
590                                                 PreferredTuple &Preferred) {
591   // Rewrite the load to the chosen extending load.
592   Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
593 
594   // Inserter to insert a truncate back to the original type at a given point
595   // with some basic CSE to limit truncate duplication to one per BB.
596   DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
597   auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
598                            MachineBasicBlock::iterator InsertBefore,
599                            MachineOperand &UseMO) {
600     MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
601     if (PreviouslyEmitted) {
602       Observer.changingInstr(*UseMO.getParent());
603       UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
604       Observer.changedInstr(*UseMO.getParent());
605       return;
606     }
607 
608     Builder.setInsertPt(*InsertIntoBB, InsertBefore);
609     Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
610     MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
611     EmittedInsns[InsertIntoBB] = NewMI;
612     replaceRegOpWith(MRI, UseMO, NewDstReg);
613   };
614 
615   Observer.changingInstr(MI);
616   unsigned LoadOpc = getExtLoadOpcForExtend(Preferred.ExtendOpcode);
617   MI.setDesc(Builder.getTII().get(LoadOpc));
618 
619   // Rewrite all the uses to fix up the types.
620   auto &LoadValue = MI.getOperand(0);
621   SmallVector<MachineOperand *, 4> Uses;
622   for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
623     Uses.push_back(&UseMO);
624 
625   for (auto *UseMO : Uses) {
626     MachineInstr *UseMI = UseMO->getParent();
627 
628     // If the extend is compatible with the preferred extend then we should fix
629     // up the type and extend so that it uses the preferred use.
630     if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
631         UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
632       Register UseDstReg = UseMI->getOperand(0).getReg();
633       MachineOperand &UseSrcMO = UseMI->getOperand(1);
634       const LLT UseDstTy = MRI.getType(UseDstReg);
635       if (UseDstReg != ChosenDstReg) {
636         if (Preferred.Ty == UseDstTy) {
637           // If the use has the same type as the preferred use, then merge
638           // the vregs and erase the extend. For example:
639           //    %1:_(s8) = G_LOAD ...
640           //    %2:_(s32) = G_SEXT %1(s8)
641           //    %3:_(s32) = G_ANYEXT %1(s8)
642           //    ... = ... %3(s32)
643           // rewrites to:
644           //    %2:_(s32) = G_SEXTLOAD ...
645           //    ... = ... %2(s32)
646           replaceRegWith(MRI, UseDstReg, ChosenDstReg);
647           Observer.erasingInstr(*UseMO->getParent());
648           UseMO->getParent()->eraseFromParent();
649         } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
650           // If the preferred size is smaller, then keep the extend but extend
651           // from the result of the extending load. For example:
652           //    %1:_(s8) = G_LOAD ...
653           //    %2:_(s32) = G_SEXT %1(s8)
654           //    %3:_(s64) = G_ANYEXT %1(s8)
655           //    ... = ... %3(s64)
656           /// rewrites to:
657           //    %2:_(s32) = G_SEXTLOAD ...
658           //    %3:_(s64) = G_ANYEXT %2:_(s32)
659           //    ... = ... %3(s64)
660           replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
661         } else {
662           // If the preferred size is large, then insert a truncate. For
663           // example:
664           //    %1:_(s8) = G_LOAD ...
665           //    %2:_(s64) = G_SEXT %1(s8)
666           //    %3:_(s32) = G_ZEXT %1(s8)
667           //    ... = ... %3(s32)
668           /// rewrites to:
669           //    %2:_(s64) = G_SEXTLOAD ...
670           //    %4:_(s8) = G_TRUNC %2:_(s32)
671           //    %3:_(s64) = G_ZEXT %2:_(s8)
672           //    ... = ... %3(s64)
673           InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
674                                                  InsertTruncAt);
675         }
676         continue;
677       }
678       // The use is (one of) the uses of the preferred use we chose earlier.
679       // We're going to update the load to def this value later so just erase
680       // the old extend.
681       Observer.erasingInstr(*UseMO->getParent());
682       UseMO->getParent()->eraseFromParent();
683       continue;
684     }
685 
686     // The use isn't an extend. Truncate back to the type we originally loaded.
687     // This is free on many targets.
688     InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
689   }
690 
691   MI.getOperand(0).setReg(ChosenDstReg);
692   Observer.changedInstr(MI);
693 }
694 
695 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
696                                                  BuildFnTy &MatchInfo) {
697   assert(MI.getOpcode() == TargetOpcode::G_AND);
698 
699   // If we have the following code:
700   //  %mask = G_CONSTANT 255
701   //  %ld   = G_LOAD %ptr, (load s16)
702   //  %and  = G_AND %ld, %mask
703   //
704   // Try to fold it into
705   //   %ld = G_ZEXTLOAD %ptr, (load s8)
706 
707   Register Dst = MI.getOperand(0).getReg();
708   if (MRI.getType(Dst).isVector())
709     return false;
710 
711   auto MaybeMask =
712       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
713   if (!MaybeMask)
714     return false;
715 
716   APInt MaskVal = MaybeMask->Value;
717 
718   if (!MaskVal.isMask())
719     return false;
720 
721   Register SrcReg = MI.getOperand(1).getReg();
722   // Don't use getOpcodeDef() here since intermediate instructions may have
723   // multiple users.
724   GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg));
725   if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()))
726     return false;
727 
728   Register LoadReg = LoadMI->getDstReg();
729   LLT RegTy = MRI.getType(LoadReg);
730   Register PtrReg = LoadMI->getPointerReg();
731   unsigned RegSize = RegTy.getSizeInBits();
732   uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
733   unsigned MaskSizeBits = MaskVal.countr_one();
734 
735   // The mask may not be larger than the in-memory type, as it might cover sign
736   // extended bits
737   if (MaskSizeBits > LoadSizeBits)
738     return false;
739 
740   // If the mask covers the whole destination register, there's nothing to
741   // extend
742   if (MaskSizeBits >= RegSize)
743     return false;
744 
745   // Most targets cannot deal with loads of size < 8 and need to re-legalize to
746   // at least byte loads. Avoid creating such loads here
747   if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
748     return false;
749 
750   const MachineMemOperand &MMO = LoadMI->getMMO();
751   LegalityQuery::MemDesc MemDesc(MMO);
752 
753   // Don't modify the memory access size if this is atomic/volatile, but we can
754   // still adjust the opcode to indicate the high bit behavior.
755   if (LoadMI->isSimple())
756     MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
757   else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
758     return false;
759 
760   // TODO: Could check if it's legal with the reduced or original memory size.
761   if (!isLegalOrBeforeLegalizer(
762           {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}}))
763     return false;
764 
765   MatchInfo = [=](MachineIRBuilder &B) {
766     B.setInstrAndDebugLoc(*LoadMI);
767     auto &MF = B.getMF();
768     auto PtrInfo = MMO.getPointerInfo();
769     auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy);
770     B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
771     LoadMI->eraseFromParent();
772   };
773   return true;
774 }
775 
776 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
777                                    const MachineInstr &UseMI) {
778   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
779          "shouldn't consider debug uses");
780   assert(DefMI.getParent() == UseMI.getParent());
781   if (&DefMI == &UseMI)
782     return true;
783   const MachineBasicBlock &MBB = *DefMI.getParent();
784   auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
785     return &MI == &DefMI || &MI == &UseMI;
786   });
787   if (DefOrUse == MBB.end())
788     llvm_unreachable("Block must contain both DefMI and UseMI!");
789   return &*DefOrUse == &DefMI;
790 }
791 
792 bool CombinerHelper::dominates(const MachineInstr &DefMI,
793                                const MachineInstr &UseMI) {
794   assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
795          "shouldn't consider debug uses");
796   if (MDT)
797     return MDT->dominates(&DefMI, &UseMI);
798   else if (DefMI.getParent() != UseMI.getParent())
799     return false;
800 
801   return isPredecessor(DefMI, UseMI);
802 }
803 
804 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
805   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
806   Register SrcReg = MI.getOperand(1).getReg();
807   Register LoadUser = SrcReg;
808 
809   if (MRI.getType(SrcReg).isVector())
810     return false;
811 
812   Register TruncSrc;
813   if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
814     LoadUser = TruncSrc;
815 
816   uint64_t SizeInBits = MI.getOperand(2).getImm();
817   // If the source is a G_SEXTLOAD from the same bit width, then we don't
818   // need any extend at all, just a truncate.
819   if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
820     // If truncating more than the original extended value, abort.
821     auto LoadSizeBits = LoadMI->getMemSizeInBits();
822     if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
823       return false;
824     if (LoadSizeBits == SizeInBits)
825       return true;
826   }
827   return false;
828 }
829 
830 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
831   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
832   Builder.setInstrAndDebugLoc(MI);
833   Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
834   MI.eraseFromParent();
835 }
836 
837 bool CombinerHelper::matchSextInRegOfLoad(
838     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
839   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
840 
841   Register DstReg = MI.getOperand(0).getReg();
842   LLT RegTy = MRI.getType(DstReg);
843 
844   // Only supports scalars for now.
845   if (RegTy.isVector())
846     return false;
847 
848   Register SrcReg = MI.getOperand(1).getReg();
849   auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
850   if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
851     return false;
852 
853   uint64_t MemBits = LoadDef->getMemSizeInBits();
854 
855   // If the sign extend extends from a narrower width than the load's width,
856   // then we can narrow the load width when we combine to a G_SEXTLOAD.
857   // Avoid widening the load at all.
858   unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits);
859 
860   // Don't generate G_SEXTLOADs with a < 1 byte width.
861   if (NewSizeBits < 8)
862     return false;
863   // Don't bother creating a non-power-2 sextload, it will likely be broken up
864   // anyway for most targets.
865   if (!isPowerOf2_32(NewSizeBits))
866     return false;
867 
868   const MachineMemOperand &MMO = LoadDef->getMMO();
869   LegalityQuery::MemDesc MMDesc(MMO);
870 
871   // Don't modify the memory access size if this is atomic/volatile, but we can
872   // still adjust the opcode to indicate the high bit behavior.
873   if (LoadDef->isSimple())
874     MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
875   else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
876     return false;
877 
878   // TODO: Could check if it's legal with the reduced or original memory size.
879   if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
880                                  {MRI.getType(LoadDef->getDstReg()),
881                                   MRI.getType(LoadDef->getPointerReg())},
882                                  {MMDesc}}))
883     return false;
884 
885   MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
886   return true;
887 }
888 
889 void CombinerHelper::applySextInRegOfLoad(
890     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
891   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
892   Register LoadReg;
893   unsigned ScalarSizeBits;
894   std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
895   GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
896 
897   // If we have the following:
898   // %ld = G_LOAD %ptr, (load 2)
899   // %ext = G_SEXT_INREG %ld, 8
900   //    ==>
901   // %ld = G_SEXTLOAD %ptr (load 1)
902 
903   auto &MMO = LoadDef->getMMO();
904   Builder.setInstrAndDebugLoc(*LoadDef);
905   auto &MF = Builder.getMF();
906   auto PtrInfo = MMO.getPointerInfo();
907   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
908   Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
909                          LoadDef->getPointerReg(), *NewMMO);
910   MI.eraseFromParent();
911 }
912 
913 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
914                                             Register &Base, Register &Offset) {
915   auto &MF = *MI.getParent()->getParent();
916   const auto &TLI = *MF.getSubtarget().getTargetLowering();
917 
918 #ifndef NDEBUG
919   unsigned Opcode = MI.getOpcode();
920   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
921          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
922 #endif
923 
924   Base = MI.getOperand(1).getReg();
925   MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
926   if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
927     return false;
928 
929   LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
930   // FIXME: The following use traversal needs a bail out for patholigical cases.
931   for (auto &Use : MRI.use_nodbg_instructions(Base)) {
932     if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
933       continue;
934 
935     Offset = Use.getOperand(2).getReg();
936     if (!ForceLegalIndexing &&
937         !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
938       LLVM_DEBUG(dbgs() << "    Ignoring candidate with illegal addrmode: "
939                         << Use);
940       continue;
941     }
942 
943     // Make sure the offset calculation is before the potentially indexed op.
944     // FIXME: we really care about dependency here. The offset calculation might
945     // be movable.
946     MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
947     if (!OffsetDef || !dominates(*OffsetDef, MI)) {
948       LLVM_DEBUG(dbgs() << "    Ignoring candidate with offset after mem-op: "
949                         << Use);
950       continue;
951     }
952 
953     // FIXME: check whether all uses of Base are load/store with foldable
954     // addressing modes. If so, using the normal addr-modes is better than
955     // forming an indexed one.
956 
957     bool MemOpDominatesAddrUses = true;
958     for (auto &PtrAddUse :
959          MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
960       if (!dominates(MI, PtrAddUse)) {
961         MemOpDominatesAddrUses = false;
962         break;
963       }
964     }
965 
966     if (!MemOpDominatesAddrUses) {
967       LLVM_DEBUG(
968           dbgs() << "    Ignoring candidate as memop does not dominate uses: "
969                  << Use);
970       continue;
971     }
972 
973     LLVM_DEBUG(dbgs() << "    Found match: " << Use);
974     Addr = Use.getOperand(0).getReg();
975     return true;
976   }
977 
978   return false;
979 }
980 
981 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
982                                            Register &Base, Register &Offset) {
983   auto &MF = *MI.getParent()->getParent();
984   const auto &TLI = *MF.getSubtarget().getTargetLowering();
985 
986 #ifndef NDEBUG
987   unsigned Opcode = MI.getOpcode();
988   assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
989          Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
990 #endif
991 
992   Addr = MI.getOperand(1).getReg();
993   MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
994   if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
995     return false;
996 
997   Base = AddrDef->getOperand(1).getReg();
998   Offset = AddrDef->getOperand(2).getReg();
999 
1000   LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
1001 
1002   if (!ForceLegalIndexing &&
1003       !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
1004     LLVM_DEBUG(dbgs() << "    Skipping, not legal for target");
1005     return false;
1006   }
1007 
1008   MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
1009   if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1010     LLVM_DEBUG(dbgs() << "    Skipping, frame index would need copy anyway.");
1011     return false;
1012   }
1013 
1014   if (MI.getOpcode() == TargetOpcode::G_STORE) {
1015     // Would require a copy.
1016     if (Base == MI.getOperand(0).getReg()) {
1017       LLVM_DEBUG(dbgs() << "    Skipping, storing base so need copy anyway.");
1018       return false;
1019     }
1020 
1021     // We're expecting one use of Addr in MI, but it could also be the
1022     // value stored, which isn't actually dominated by the instruction.
1023     if (MI.getOperand(0).getReg() == Addr) {
1024       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses");
1025       return false;
1026     }
1027   }
1028 
1029   // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1030   // That might allow us to end base's liveness here by adjusting the constant.
1031 
1032   for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
1033     if (!dominates(MI, UseMI)) {
1034       LLVM_DEBUG(dbgs() << "    Skipping, does not dominate all addr uses.");
1035       return false;
1036     }
1037   }
1038 
1039   return true;
1040 }
1041 
1042 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
1043   IndexedLoadStoreMatchInfo MatchInfo;
1044   if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
1045     applyCombineIndexedLoadStore(MI, MatchInfo);
1046     return true;
1047   }
1048   return false;
1049 }
1050 
1051 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1052   unsigned Opcode = MI.getOpcode();
1053   if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1054       Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1055     return false;
1056 
1057   // For now, no targets actually support these opcodes so don't waste time
1058   // running these unless we're forced to for testing.
1059   if (!ForceLegalIndexing)
1060     return false;
1061 
1062   MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1063                                           MatchInfo.Offset);
1064   if (!MatchInfo.IsPre &&
1065       !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1066                               MatchInfo.Offset))
1067     return false;
1068 
1069   return true;
1070 }
1071 
1072 void CombinerHelper::applyCombineIndexedLoadStore(
1073     MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1074   MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
1075   MachineIRBuilder MIRBuilder(MI);
1076   unsigned Opcode = MI.getOpcode();
1077   bool IsStore = Opcode == TargetOpcode::G_STORE;
1078   unsigned NewOpcode;
1079   switch (Opcode) {
1080   case TargetOpcode::G_LOAD:
1081     NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1082     break;
1083   case TargetOpcode::G_SEXTLOAD:
1084     NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1085     break;
1086   case TargetOpcode::G_ZEXTLOAD:
1087     NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1088     break;
1089   case TargetOpcode::G_STORE:
1090     NewOpcode = TargetOpcode::G_INDEXED_STORE;
1091     break;
1092   default:
1093     llvm_unreachable("Unknown load/store opcode");
1094   }
1095 
1096   auto MIB = MIRBuilder.buildInstr(NewOpcode);
1097   if (IsStore) {
1098     MIB.addDef(MatchInfo.Addr);
1099     MIB.addUse(MI.getOperand(0).getReg());
1100   } else {
1101     MIB.addDef(MI.getOperand(0).getReg());
1102     MIB.addDef(MatchInfo.Addr);
1103   }
1104 
1105   MIB.addUse(MatchInfo.Base);
1106   MIB.addUse(MatchInfo.Offset);
1107   MIB.addImm(MatchInfo.IsPre);
1108   MI.eraseFromParent();
1109   AddrDef.eraseFromParent();
1110 
1111   LLVM_DEBUG(dbgs() << "    Combinined to indexed operation");
1112 }
1113 
1114 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
1115                                         MachineInstr *&OtherMI) {
1116   unsigned Opcode = MI.getOpcode();
1117   bool IsDiv, IsSigned;
1118 
1119   switch (Opcode) {
1120   default:
1121     llvm_unreachable("Unexpected opcode!");
1122   case TargetOpcode::G_SDIV:
1123   case TargetOpcode::G_UDIV: {
1124     IsDiv = true;
1125     IsSigned = Opcode == TargetOpcode::G_SDIV;
1126     break;
1127   }
1128   case TargetOpcode::G_SREM:
1129   case TargetOpcode::G_UREM: {
1130     IsDiv = false;
1131     IsSigned = Opcode == TargetOpcode::G_SREM;
1132     break;
1133   }
1134   }
1135 
1136   Register Src1 = MI.getOperand(1).getReg();
1137   unsigned DivOpcode, RemOpcode, DivremOpcode;
1138   if (IsSigned) {
1139     DivOpcode = TargetOpcode::G_SDIV;
1140     RemOpcode = TargetOpcode::G_SREM;
1141     DivremOpcode = TargetOpcode::G_SDIVREM;
1142   } else {
1143     DivOpcode = TargetOpcode::G_UDIV;
1144     RemOpcode = TargetOpcode::G_UREM;
1145     DivremOpcode = TargetOpcode::G_UDIVREM;
1146   }
1147 
1148   if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1149     return false;
1150 
1151   // Combine:
1152   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
1153   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
1154   // into:
1155   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1156 
1157   // Combine:
1158   //   %rem:_ = G_[SU]REM %src1:_, %src2:_
1159   //   %div:_ = G_[SU]DIV %src1:_, %src2:_
1160   // into:
1161   //  %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1162 
1163   for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1164     if (MI.getParent() == UseMI.getParent() &&
1165         ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1166          (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1167         matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) &&
1168         matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) {
1169       OtherMI = &UseMI;
1170       return true;
1171     }
1172   }
1173 
1174   return false;
1175 }
1176 
1177 void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1178                                         MachineInstr *&OtherMI) {
1179   unsigned Opcode = MI.getOpcode();
1180   assert(OtherMI && "OtherMI shouldn't be empty.");
1181 
1182   Register DestDivReg, DestRemReg;
1183   if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1184     DestDivReg = MI.getOperand(0).getReg();
1185     DestRemReg = OtherMI->getOperand(0).getReg();
1186   } else {
1187     DestDivReg = OtherMI->getOperand(0).getReg();
1188     DestRemReg = MI.getOperand(0).getReg();
1189   }
1190 
1191   bool IsSigned =
1192       Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1193 
1194   // Check which instruction is first in the block so we don't break def-use
1195   // deps by "moving" the instruction incorrectly. Also keep track of which
1196   // instruction is first so we pick it's operands, avoiding use-before-def
1197   // bugs.
1198   MachineInstr *FirstInst;
1199   if (dominates(MI, *OtherMI)) {
1200     Builder.setInstrAndDebugLoc(MI);
1201     FirstInst = &MI;
1202   } else {
1203     Builder.setInstrAndDebugLoc(*OtherMI);
1204     FirstInst = OtherMI;
1205   }
1206 
1207   Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1208                               : TargetOpcode::G_UDIVREM,
1209                      {DestDivReg, DestRemReg},
1210                      { FirstInst->getOperand(1), FirstInst->getOperand(2) });
1211   MI.eraseFromParent();
1212   OtherMI->eraseFromParent();
1213 }
1214 
1215 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
1216                                                    MachineInstr *&BrCond) {
1217   assert(MI.getOpcode() == TargetOpcode::G_BR);
1218 
1219   // Try to match the following:
1220   // bb1:
1221   //   G_BRCOND %c1, %bb2
1222   //   G_BR %bb3
1223   // bb2:
1224   // ...
1225   // bb3:
1226 
1227   // The above pattern does not have a fall through to the successor bb2, always
1228   // resulting in a branch no matter which path is taken. Here we try to find
1229   // and replace that pattern with conditional branch to bb3 and otherwise
1230   // fallthrough to bb2. This is generally better for branch predictors.
1231 
1232   MachineBasicBlock *MBB = MI.getParent();
1233   MachineBasicBlock::iterator BrIt(MI);
1234   if (BrIt == MBB->begin())
1235     return false;
1236   assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1237 
1238   BrCond = &*std::prev(BrIt);
1239   if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1240     return false;
1241 
1242   // Check that the next block is the conditional branch target. Also make sure
1243   // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1244   MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1245   return BrCondTarget != MI.getOperand(0).getMBB() &&
1246          MBB->isLayoutSuccessor(BrCondTarget);
1247 }
1248 
1249 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
1250                                                    MachineInstr *&BrCond) {
1251   MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1252   Builder.setInstrAndDebugLoc(*BrCond);
1253   LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1254   // FIXME: Does int/fp matter for this? If so, we might need to restrict
1255   // this to i1 only since we might not know for sure what kind of
1256   // compare generated the condition value.
1257   auto True = Builder.buildConstant(
1258       Ty, getICmpTrueVal(getTargetLowering(), false, false));
1259   auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1260 
1261   auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1262   Observer.changingInstr(MI);
1263   MI.getOperand(0).setMBB(FallthroughBB);
1264   Observer.changedInstr(MI);
1265 
1266   // Change the conditional branch to use the inverted condition and
1267   // new target block.
1268   Observer.changingInstr(*BrCond);
1269   BrCond->getOperand(0).setReg(Xor.getReg(0));
1270   BrCond->getOperand(1).setMBB(BrTarget);
1271   Observer.changedInstr(*BrCond);
1272 }
1273 
1274 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1275   if (Ty.isVector())
1276     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1277                                 Ty.getNumElements());
1278   return IntegerType::get(C, Ty.getSizeInBits());
1279 }
1280 
1281 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
1282   MachineIRBuilder HelperBuilder(MI);
1283   GISelObserverWrapper DummyObserver;
1284   LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1285   return Helper.lowerMemcpyInline(MI) ==
1286          LegalizerHelper::LegalizeResult::Legalized;
1287 }
1288 
1289 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1290   MachineIRBuilder HelperBuilder(MI);
1291   GISelObserverWrapper DummyObserver;
1292   LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1293   return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1294          LegalizerHelper::LegalizeResult::Legalized;
1295 }
1296 
1297 static APFloat constantFoldFpUnary(const MachineInstr &MI,
1298                                    const MachineRegisterInfo &MRI,
1299                                    const APFloat &Val) {
1300   APFloat Result(Val);
1301   switch (MI.getOpcode()) {
1302   default:
1303     llvm_unreachable("Unexpected opcode!");
1304   case TargetOpcode::G_FNEG: {
1305     Result.changeSign();
1306     return Result;
1307   }
1308   case TargetOpcode::G_FABS: {
1309     Result.clearSign();
1310     return Result;
1311   }
1312   case TargetOpcode::G_FPTRUNC: {
1313     bool Unused;
1314     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1315     Result.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven,
1316                    &Unused);
1317     return Result;
1318   }
1319   case TargetOpcode::G_FSQRT: {
1320     bool Unused;
1321     Result.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
1322                    &Unused);
1323     Result = APFloat(sqrt(Result.convertToDouble()));
1324     break;
1325   }
1326   case TargetOpcode::G_FLOG2: {
1327     bool Unused;
1328     Result.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
1329                    &Unused);
1330     Result = APFloat(log2(Result.convertToDouble()));
1331     break;
1332   }
1333   }
1334   // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1335   // `buildFConstant` will assert on size mismatch. Only `G_FSQRT`, and
1336   // `G_FLOG2` reach here.
1337   bool Unused;
1338   Result.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &Unused);
1339   return Result;
1340 }
1341 
1342 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1343                                                      const ConstantFP *Cst) {
1344   Builder.setInstrAndDebugLoc(MI);
1345   APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue());
1346   const ConstantFP *NewCst = ConstantFP::get(Builder.getContext(), Folded);
1347   Builder.buildFConstant(MI.getOperand(0), *NewCst);
1348   MI.eraseFromParent();
1349 }
1350 
1351 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1352                                            PtrAddChain &MatchInfo) {
1353   // We're trying to match the following pattern:
1354   //   %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1355   //   %root = G_PTR_ADD %t1, G_CONSTANT imm2
1356   // -->
1357   //   %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1358 
1359   if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1360     return false;
1361 
1362   Register Add2 = MI.getOperand(1).getReg();
1363   Register Imm1 = MI.getOperand(2).getReg();
1364   auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1365   if (!MaybeImmVal)
1366     return false;
1367 
1368   MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1369   if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1370     return false;
1371 
1372   Register Base = Add2Def->getOperand(1).getReg();
1373   Register Imm2 = Add2Def->getOperand(2).getReg();
1374   auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1375   if (!MaybeImm2Val)
1376     return false;
1377 
1378   // Check if the new combined immediate forms an illegal addressing mode.
1379   // Do not combine if it was legal before but would get illegal.
1380   // To do so, we need to find a load/store user of the pointer to get
1381   // the access type.
1382   Type *AccessTy = nullptr;
1383   auto &MF = *MI.getMF();
1384   for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1385     if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1386       AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1387                                MF.getFunction().getContext());
1388       break;
1389     }
1390   }
1391   TargetLoweringBase::AddrMode AMNew;
1392   APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1393   AMNew.BaseOffs = CombinedImm.getSExtValue();
1394   if (AccessTy) {
1395     AMNew.HasBaseReg = true;
1396     TargetLoweringBase::AddrMode AMOld;
1397     AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1398     AMOld.HasBaseReg = true;
1399     unsigned AS = MRI.getType(Add2).getAddressSpace();
1400     const auto &TLI = *MF.getSubtarget().getTargetLowering();
1401     if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1402         !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1403       return false;
1404   }
1405 
1406   // Pass the combined immediate to the apply function.
1407   MatchInfo.Imm = AMNew.BaseOffs;
1408   MatchInfo.Base = Base;
1409   MatchInfo.Bank = getRegBank(Imm2);
1410   return true;
1411 }
1412 
1413 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1414                                            PtrAddChain &MatchInfo) {
1415   assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1416   MachineIRBuilder MIB(MI);
1417   LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1418   auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1419   setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1420   Observer.changingInstr(MI);
1421   MI.getOperand(1).setReg(MatchInfo.Base);
1422   MI.getOperand(2).setReg(NewOffset.getReg(0));
1423   Observer.changedInstr(MI);
1424 }
1425 
1426 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1427                                           RegisterImmPair &MatchInfo) {
1428   // We're trying to match the following pattern with any of
1429   // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1430   //   %t1 = SHIFT %base, G_CONSTANT imm1
1431   //   %root = SHIFT %t1, G_CONSTANT imm2
1432   // -->
1433   //   %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1434 
1435   unsigned Opcode = MI.getOpcode();
1436   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1437           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1438           Opcode == TargetOpcode::G_USHLSAT) &&
1439          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1440 
1441   Register Shl2 = MI.getOperand(1).getReg();
1442   Register Imm1 = MI.getOperand(2).getReg();
1443   auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1444   if (!MaybeImmVal)
1445     return false;
1446 
1447   MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1448   if (Shl2Def->getOpcode() != Opcode)
1449     return false;
1450 
1451   Register Base = Shl2Def->getOperand(1).getReg();
1452   Register Imm2 = Shl2Def->getOperand(2).getReg();
1453   auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1454   if (!MaybeImm2Val)
1455     return false;
1456 
1457   // Pass the combined immediate to the apply function.
1458   MatchInfo.Imm =
1459       (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1460   MatchInfo.Reg = Base;
1461 
1462   // There is no simple replacement for a saturating unsigned left shift that
1463   // exceeds the scalar size.
1464   if (Opcode == TargetOpcode::G_USHLSAT &&
1465       MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1466     return false;
1467 
1468   return true;
1469 }
1470 
1471 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1472                                           RegisterImmPair &MatchInfo) {
1473   unsigned Opcode = MI.getOpcode();
1474   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1475           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1476           Opcode == TargetOpcode::G_USHLSAT) &&
1477          "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1478 
1479   Builder.setInstrAndDebugLoc(MI);
1480   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1481   unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1482   auto Imm = MatchInfo.Imm;
1483 
1484   if (Imm >= ScalarSizeInBits) {
1485     // Any logical shift that exceeds scalar size will produce zero.
1486     if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1487       Builder.buildConstant(MI.getOperand(0), 0);
1488       MI.eraseFromParent();
1489       return;
1490     }
1491     // Arithmetic shift and saturating signed left shift have no effect beyond
1492     // scalar size.
1493     Imm = ScalarSizeInBits - 1;
1494   }
1495 
1496   LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1497   Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1498   Observer.changingInstr(MI);
1499   MI.getOperand(1).setReg(MatchInfo.Reg);
1500   MI.getOperand(2).setReg(NewImm);
1501   Observer.changedInstr(MI);
1502 }
1503 
1504 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1505                                               ShiftOfShiftedLogic &MatchInfo) {
1506   // We're trying to match the following pattern with any of
1507   // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1508   // with any of G_AND/G_OR/G_XOR logic instructions.
1509   //   %t1 = SHIFT %X, G_CONSTANT C0
1510   //   %t2 = LOGIC %t1, %Y
1511   //   %root = SHIFT %t2, G_CONSTANT C1
1512   // -->
1513   //   %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1514   //   %t4 = SHIFT %Y, G_CONSTANT C1
1515   //   %root = LOGIC %t3, %t4
1516   unsigned ShiftOpcode = MI.getOpcode();
1517   assert((ShiftOpcode == TargetOpcode::G_SHL ||
1518           ShiftOpcode == TargetOpcode::G_ASHR ||
1519           ShiftOpcode == TargetOpcode::G_LSHR ||
1520           ShiftOpcode == TargetOpcode::G_USHLSAT ||
1521           ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1522          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1523 
1524   // Match a one-use bitwise logic op.
1525   Register LogicDest = MI.getOperand(1).getReg();
1526   if (!MRI.hasOneNonDBGUse(LogicDest))
1527     return false;
1528 
1529   MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1530   unsigned LogicOpcode = LogicMI->getOpcode();
1531   if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1532       LogicOpcode != TargetOpcode::G_XOR)
1533     return false;
1534 
1535   // Find a matching one-use shift by constant.
1536   const Register C1 = MI.getOperand(2).getReg();
1537   auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1538   if (!MaybeImmVal)
1539     return false;
1540 
1541   const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1542 
1543   auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1544     // Shift should match previous one and should be a one-use.
1545     if (MI->getOpcode() != ShiftOpcode ||
1546         !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1547       return false;
1548 
1549     // Must be a constant.
1550     auto MaybeImmVal =
1551         getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1552     if (!MaybeImmVal)
1553       return false;
1554 
1555     ShiftVal = MaybeImmVal->Value.getSExtValue();
1556     return true;
1557   };
1558 
1559   // Logic ops are commutative, so check each operand for a match.
1560   Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1561   MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1562   Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1563   MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1564   uint64_t C0Val;
1565 
1566   if (matchFirstShift(LogicMIOp1, C0Val)) {
1567     MatchInfo.LogicNonShiftReg = LogicMIReg2;
1568     MatchInfo.Shift2 = LogicMIOp1;
1569   } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1570     MatchInfo.LogicNonShiftReg = LogicMIReg1;
1571     MatchInfo.Shift2 = LogicMIOp2;
1572   } else
1573     return false;
1574 
1575   MatchInfo.ValSum = C0Val + C1Val;
1576 
1577   // The fold is not valid if the sum of the shift values exceeds bitwidth.
1578   if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1579     return false;
1580 
1581   MatchInfo.Logic = LogicMI;
1582   return true;
1583 }
1584 
1585 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1586                                               ShiftOfShiftedLogic &MatchInfo) {
1587   unsigned Opcode = MI.getOpcode();
1588   assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1589           Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1590           Opcode == TargetOpcode::G_SSHLSAT) &&
1591          "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1592 
1593   LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1594   LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1595   Builder.setInstrAndDebugLoc(MI);
1596 
1597   Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1598 
1599   Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1600   Register Shift1 =
1601       Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1602 
1603   // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same
1604   // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when
1605   // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we
1606   // remove old shift1. And it will cause crash later. So erase it earlier to
1607   // avoid the crash.
1608   MatchInfo.Shift2->eraseFromParent();
1609 
1610   Register Shift2Const = MI.getOperand(2).getReg();
1611   Register Shift2 = Builder
1612                         .buildInstr(Opcode, {DestType},
1613                                     {MatchInfo.LogicNonShiftReg, Shift2Const})
1614                         .getReg(0);
1615 
1616   Register Dest = MI.getOperand(0).getReg();
1617   Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1618 
1619   // This was one use so it's safe to remove it.
1620   MatchInfo.Logic->eraseFromParent();
1621 
1622   MI.eraseFromParent();
1623 }
1624 
1625 bool CombinerHelper::matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) {
1626   assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL");
1627   // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1628   // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1629   auto &Shl = cast<GenericMachineInstr>(MI);
1630   Register DstReg = Shl.getReg(0);
1631   Register SrcReg = Shl.getReg(1);
1632   Register ShiftReg = Shl.getReg(2);
1633   Register X, C1;
1634 
1635   if (!getTargetLowering().isDesirableToCommuteWithShift(MI, !isPreLegalize()))
1636     return false;
1637 
1638   if (!mi_match(SrcReg, MRI,
1639                 m_OneNonDBGUse(m_any_of(m_GAdd(m_Reg(X), m_Reg(C1)),
1640                                         m_GOr(m_Reg(X), m_Reg(C1))))))
1641     return false;
1642 
1643   APInt C1Val, C2Val;
1644   if (!mi_match(C1, MRI, m_ICstOrSplat(C1Val)) ||
1645       !mi_match(ShiftReg, MRI, m_ICstOrSplat(C2Val)))
1646     return false;
1647 
1648   auto *SrcDef = MRI.getVRegDef(SrcReg);
1649   assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
1650           SrcDef->getOpcode() == TargetOpcode::G_OR) && "Unexpected op");
1651   LLT SrcTy = MRI.getType(SrcReg);
1652   MatchInfo = [=](MachineIRBuilder &B) {
1653     auto S1 = B.buildShl(SrcTy, X, ShiftReg);
1654     auto S2 = B.buildShl(SrcTy, C1, ShiftReg);
1655     B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2});
1656   };
1657   return true;
1658 }
1659 
1660 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1661                                           unsigned &ShiftVal) {
1662   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1663   auto MaybeImmVal =
1664       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1665   if (!MaybeImmVal)
1666     return false;
1667 
1668   ShiftVal = MaybeImmVal->Value.exactLogBase2();
1669   return (static_cast<int32_t>(ShiftVal) != -1);
1670 }
1671 
1672 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1673                                           unsigned &ShiftVal) {
1674   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1675   MachineIRBuilder MIB(MI);
1676   LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1677   auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1678   Observer.changingInstr(MI);
1679   MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1680   MI.getOperand(2).setReg(ShiftCst.getReg(0));
1681   Observer.changedInstr(MI);
1682 }
1683 
1684 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1685 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1686                                              RegisterImmPair &MatchData) {
1687   assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1688 
1689   Register LHS = MI.getOperand(1).getReg();
1690 
1691   Register ExtSrc;
1692   if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1693       !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1694       !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1695     return false;
1696 
1697   Register RHS = MI.getOperand(2).getReg();
1698   MachineInstr *MIShiftAmt = MRI.getVRegDef(RHS);
1699   auto MaybeShiftAmtVal = isConstantOrConstantSplatVector(*MIShiftAmt, MRI);
1700   if (!MaybeShiftAmtVal)
1701     return false;
1702 
1703   if (LI) {
1704     LLT SrcTy = MRI.getType(ExtSrc);
1705 
1706     // We only really care about the legality with the shifted value. We can
1707     // pick any type the constant shift amount, so ask the target what to
1708     // use. Otherwise we would have to guess and hope it is reported as legal.
1709     LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1710     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1711       return false;
1712   }
1713 
1714   int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
1715   MatchData.Reg = ExtSrc;
1716   MatchData.Imm = ShiftAmt;
1717 
1718   unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one();
1719   unsigned SrcTySize = MRI.getType(ExtSrc).getScalarSizeInBits();
1720   return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
1721 }
1722 
1723 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1724                                              const RegisterImmPair &MatchData) {
1725   Register ExtSrcReg = MatchData.Reg;
1726   int64_t ShiftAmtVal = MatchData.Imm;
1727 
1728   LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1729   Builder.setInstrAndDebugLoc(MI);
1730   auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1731   auto NarrowShift =
1732       Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1733   Builder.buildZExt(MI.getOperand(0), NarrowShift);
1734   MI.eraseFromParent();
1735 }
1736 
1737 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
1738                                               Register &MatchInfo) {
1739   GMerge &Merge = cast<GMerge>(MI);
1740   SmallVector<Register, 16> MergedValues;
1741   for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1742     MergedValues.emplace_back(Merge.getSourceReg(I));
1743 
1744   auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1745   if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1746     return false;
1747 
1748   for (unsigned I = 0; I < MergedValues.size(); ++I)
1749     if (MergedValues[I] != Unmerge->getReg(I))
1750       return false;
1751 
1752   MatchInfo = Unmerge->getSourceReg();
1753   return true;
1754 }
1755 
1756 static Register peekThroughBitcast(Register Reg,
1757                                    const MachineRegisterInfo &MRI) {
1758   while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1759     ;
1760 
1761   return Reg;
1762 }
1763 
1764 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1765     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1766   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1767          "Expected an unmerge");
1768   auto &Unmerge = cast<GUnmerge>(MI);
1769   Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1770 
1771   auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg, MRI);
1772   if (!SrcInstr)
1773     return false;
1774 
1775   // Check the source type of the merge.
1776   LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1777   LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1778   bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1779   if (SrcMergeTy != Dst0Ty && !SameSize)
1780     return false;
1781   // They are the same now (modulo a bitcast).
1782   // We can collect all the src registers.
1783   for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1784     Operands.push_back(SrcInstr->getSourceReg(Idx));
1785   return true;
1786 }
1787 
1788 void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1789     MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1790   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1791          "Expected an unmerge");
1792   assert((MI.getNumOperands() - 1 == Operands.size()) &&
1793          "Not enough operands to replace all defs");
1794   unsigned NumElems = MI.getNumOperands() - 1;
1795 
1796   LLT SrcTy = MRI.getType(Operands[0]);
1797   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1798   bool CanReuseInputDirectly = DstTy == SrcTy;
1799   Builder.setInstrAndDebugLoc(MI);
1800   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1801     Register DstReg = MI.getOperand(Idx).getReg();
1802     Register SrcReg = Operands[Idx];
1803 
1804     // This combine may run after RegBankSelect, so we need to be aware of
1805     // register banks.
1806     const auto &DstCB = MRI.getRegClassOrRegBank(DstReg);
1807     if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(SrcReg)) {
1808       SrcReg = Builder.buildCopy(MRI.getType(SrcReg), SrcReg).getReg(0);
1809       MRI.setRegClassOrRegBank(SrcReg, DstCB);
1810     }
1811 
1812     if (CanReuseInputDirectly)
1813       replaceRegWith(MRI, DstReg, SrcReg);
1814     else
1815       Builder.buildCast(DstReg, SrcReg);
1816   }
1817   MI.eraseFromParent();
1818 }
1819 
1820 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1821                                                  SmallVectorImpl<APInt> &Csts) {
1822   unsigned SrcIdx = MI.getNumOperands() - 1;
1823   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1824   MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1825   if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1826       SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1827     return false;
1828   // Break down the big constant in smaller ones.
1829   const MachineOperand &CstVal = SrcInstr->getOperand(1);
1830   APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1831                   ? CstVal.getCImm()->getValue()
1832                   : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1833 
1834   LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1835   unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1836   // Unmerge a constant.
1837   for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1838     Csts.emplace_back(Val.trunc(ShiftAmt));
1839     Val = Val.lshr(ShiftAmt);
1840   }
1841 
1842   return true;
1843 }
1844 
1845 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1846                                                  SmallVectorImpl<APInt> &Csts) {
1847   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1848          "Expected an unmerge");
1849   assert((MI.getNumOperands() - 1 == Csts.size()) &&
1850          "Not enough operands to replace all defs");
1851   unsigned NumElems = MI.getNumOperands() - 1;
1852   Builder.setInstrAndDebugLoc(MI);
1853   for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1854     Register DstReg = MI.getOperand(Idx).getReg();
1855     Builder.buildConstant(DstReg, Csts[Idx]);
1856   }
1857 
1858   MI.eraseFromParent();
1859 }
1860 
1861 bool CombinerHelper::matchCombineUnmergeUndef(
1862     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
1863   unsigned SrcIdx = MI.getNumOperands() - 1;
1864   Register SrcReg = MI.getOperand(SrcIdx).getReg();
1865   MatchInfo = [&MI](MachineIRBuilder &B) {
1866     unsigned NumElems = MI.getNumOperands() - 1;
1867     for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1868       Register DstReg = MI.getOperand(Idx).getReg();
1869       B.buildUndef(DstReg);
1870     }
1871   };
1872   return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
1873 }
1874 
1875 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1876   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1877          "Expected an unmerge");
1878   // Check that all the lanes are dead except the first one.
1879   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1880     if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1881       return false;
1882   }
1883   return true;
1884 }
1885 
1886 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1887   Builder.setInstrAndDebugLoc(MI);
1888   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1889   // Truncating a vector is going to truncate every single lane,
1890   // whereas we want the full lowbits.
1891   // Do the operation on a scalar instead.
1892   LLT SrcTy = MRI.getType(SrcReg);
1893   if (SrcTy.isVector())
1894     SrcReg =
1895         Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1896 
1897   Register Dst0Reg = MI.getOperand(0).getReg();
1898   LLT Dst0Ty = MRI.getType(Dst0Reg);
1899   if (Dst0Ty.isVector()) {
1900     auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1901     Builder.buildCast(Dst0Reg, MIB);
1902   } else
1903     Builder.buildTrunc(Dst0Reg, SrcReg);
1904   MI.eraseFromParent();
1905 }
1906 
1907 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1908   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1909          "Expected an unmerge");
1910   Register Dst0Reg = MI.getOperand(0).getReg();
1911   LLT Dst0Ty = MRI.getType(Dst0Reg);
1912   // G_ZEXT on vector applies to each lane, so it will
1913   // affect all destinations. Therefore we won't be able
1914   // to simplify the unmerge to just the first definition.
1915   if (Dst0Ty.isVector())
1916     return false;
1917   Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1918   LLT SrcTy = MRI.getType(SrcReg);
1919   if (SrcTy.isVector())
1920     return false;
1921 
1922   Register ZExtSrcReg;
1923   if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1924     return false;
1925 
1926   // Finally we can replace the first definition with
1927   // a zext of the source if the definition is big enough to hold
1928   // all of ZExtSrc bits.
1929   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1930   return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1931 }
1932 
1933 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1934   assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1935          "Expected an unmerge");
1936 
1937   Register Dst0Reg = MI.getOperand(0).getReg();
1938 
1939   MachineInstr *ZExtInstr =
1940       MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1941   assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1942          "Expecting a G_ZEXT");
1943 
1944   Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1945   LLT Dst0Ty = MRI.getType(Dst0Reg);
1946   LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1947 
1948   Builder.setInstrAndDebugLoc(MI);
1949 
1950   if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1951     Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1952   } else {
1953     assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1954            "ZExt src doesn't fit in destination");
1955     replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1956   }
1957 
1958   Register ZeroReg;
1959   for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1960     if (!ZeroReg)
1961       ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1962     replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1963   }
1964   MI.eraseFromParent();
1965 }
1966 
1967 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1968                                                 unsigned TargetShiftSize,
1969                                                 unsigned &ShiftVal) {
1970   assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1971           MI.getOpcode() == TargetOpcode::G_LSHR ||
1972           MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1973 
1974   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1975   if (Ty.isVector()) // TODO:
1976     return false;
1977 
1978   // Don't narrow further than the requested size.
1979   unsigned Size = Ty.getSizeInBits();
1980   if (Size <= TargetShiftSize)
1981     return false;
1982 
1983   auto MaybeImmVal =
1984       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1985   if (!MaybeImmVal)
1986     return false;
1987 
1988   ShiftVal = MaybeImmVal->Value.getSExtValue();
1989   return ShiftVal >= Size / 2 && ShiftVal < Size;
1990 }
1991 
1992 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1993                                                 const unsigned &ShiftVal) {
1994   Register DstReg = MI.getOperand(0).getReg();
1995   Register SrcReg = MI.getOperand(1).getReg();
1996   LLT Ty = MRI.getType(SrcReg);
1997   unsigned Size = Ty.getSizeInBits();
1998   unsigned HalfSize = Size / 2;
1999   assert(ShiftVal >= HalfSize);
2000 
2001   LLT HalfTy = LLT::scalar(HalfSize);
2002 
2003   Builder.setInstr(MI);
2004   auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
2005   unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2006 
2007   if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2008     Register Narrowed = Unmerge.getReg(1);
2009 
2010     //  dst = G_LSHR s64:x, C for C >= 32
2011     // =>
2012     //   lo, hi = G_UNMERGE_VALUES x
2013     //   dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2014 
2015     if (NarrowShiftAmt != 0) {
2016       Narrowed = Builder.buildLShr(HalfTy, Narrowed,
2017         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2018     }
2019 
2020     auto Zero = Builder.buildConstant(HalfTy, 0);
2021     Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero});
2022   } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2023     Register Narrowed = Unmerge.getReg(0);
2024     //  dst = G_SHL s64:x, C for C >= 32
2025     // =>
2026     //   lo, hi = G_UNMERGE_VALUES x
2027     //   dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2028     if (NarrowShiftAmt != 0) {
2029       Narrowed = Builder.buildShl(HalfTy, Narrowed,
2030         Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2031     }
2032 
2033     auto Zero = Builder.buildConstant(HalfTy, 0);
2034     Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed});
2035   } else {
2036     assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2037     auto Hi = Builder.buildAShr(
2038       HalfTy, Unmerge.getReg(1),
2039       Builder.buildConstant(HalfTy, HalfSize - 1));
2040 
2041     if (ShiftVal == HalfSize) {
2042       // (G_ASHR i64:x, 32) ->
2043       //   G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2044       Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi});
2045     } else if (ShiftVal == Size - 1) {
2046       // Don't need a second shift.
2047       // (G_ASHR i64:x, 63) ->
2048       //   %narrowed = (G_ASHR hi_32(x), 31)
2049       //   G_MERGE_VALUES %narrowed, %narrowed
2050       Builder.buildMergeLikeInstr(DstReg, {Hi, Hi});
2051     } else {
2052       auto Lo = Builder.buildAShr(
2053         HalfTy, Unmerge.getReg(1),
2054         Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
2055 
2056       // (G_ASHR i64:x, C) ->, for C >= 32
2057       //   G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2058       Builder.buildMergeLikeInstr(DstReg, {Lo, Hi});
2059     }
2060   }
2061 
2062   MI.eraseFromParent();
2063 }
2064 
2065 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
2066                                               unsigned TargetShiftAmount) {
2067   unsigned ShiftAmt;
2068   if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
2069     applyCombineShiftToUnmerge(MI, ShiftAmt);
2070     return true;
2071   }
2072 
2073   return false;
2074 }
2075 
2076 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2077   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2078   Register DstReg = MI.getOperand(0).getReg();
2079   LLT DstTy = MRI.getType(DstReg);
2080   Register SrcReg = MI.getOperand(1).getReg();
2081   return mi_match(SrcReg, MRI,
2082                   m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2083 }
2084 
2085 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2086   assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2087   Register DstReg = MI.getOperand(0).getReg();
2088   Builder.setInstr(MI);
2089   Builder.buildCopy(DstReg, Reg);
2090   MI.eraseFromParent();
2091 }
2092 
2093 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2094   assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2095   Register DstReg = MI.getOperand(0).getReg();
2096   Builder.setInstr(MI);
2097   Builder.buildZExtOrTrunc(DstReg, Reg);
2098   MI.eraseFromParent();
2099 }
2100 
2101 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2102     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2103   assert(MI.getOpcode() == TargetOpcode::G_ADD);
2104   Register LHS = MI.getOperand(1).getReg();
2105   Register RHS = MI.getOperand(2).getReg();
2106   LLT IntTy = MRI.getType(LHS);
2107 
2108   // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2109   // instruction.
2110   PtrReg.second = false;
2111   for (Register SrcReg : {LHS, RHS}) {
2112     if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2113       // Don't handle cases where the integer is implicitly converted to the
2114       // pointer width.
2115       LLT PtrTy = MRI.getType(PtrReg.first);
2116       if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2117         return true;
2118     }
2119 
2120     PtrReg.second = true;
2121   }
2122 
2123   return false;
2124 }
2125 
2126 void CombinerHelper::applyCombineAddP2IToPtrAdd(
2127     MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2128   Register Dst = MI.getOperand(0).getReg();
2129   Register LHS = MI.getOperand(1).getReg();
2130   Register RHS = MI.getOperand(2).getReg();
2131 
2132   const bool DoCommute = PtrReg.second;
2133   if (DoCommute)
2134     std::swap(LHS, RHS);
2135   LHS = PtrReg.first;
2136 
2137   LLT PtrTy = MRI.getType(LHS);
2138 
2139   Builder.setInstrAndDebugLoc(MI);
2140   auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2141   Builder.buildPtrToInt(Dst, PtrAdd);
2142   MI.eraseFromParent();
2143 }
2144 
2145 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2146                                                   APInt &NewCst) {
2147   auto &PtrAdd = cast<GPtrAdd>(MI);
2148   Register LHS = PtrAdd.getBaseReg();
2149   Register RHS = PtrAdd.getOffsetReg();
2150   MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2151 
2152   if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
2153     APInt Cst;
2154     if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2155       auto DstTy = MRI.getType(PtrAdd.getReg(0));
2156       // G_INTTOPTR uses zero-extension
2157       NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
2158       NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
2159       return true;
2160     }
2161   }
2162 
2163   return false;
2164 }
2165 
2166 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2167                                                   APInt &NewCst) {
2168   auto &PtrAdd = cast<GPtrAdd>(MI);
2169   Register Dst = PtrAdd.getReg(0);
2170 
2171   Builder.setInstrAndDebugLoc(MI);
2172   Builder.buildConstant(Dst, NewCst);
2173   PtrAdd.eraseFromParent();
2174 }
2175 
2176 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2177   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2178   Register DstReg = MI.getOperand(0).getReg();
2179   Register SrcReg = MI.getOperand(1).getReg();
2180   LLT DstTy = MRI.getType(DstReg);
2181   return mi_match(SrcReg, MRI,
2182                   m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2183 }
2184 
2185 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
2186   assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2187   Register DstReg = MI.getOperand(0).getReg();
2188   Register SrcReg = MI.getOperand(1).getReg();
2189   LLT DstTy = MRI.getType(DstReg);
2190   if (mi_match(SrcReg, MRI,
2191                m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2192     unsigned DstSize = DstTy.getScalarSizeInBits();
2193     unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2194     return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2195   }
2196   return false;
2197 }
2198 
2199 bool CombinerHelper::matchCombineExtOfExt(
2200     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2201   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2202           MI.getOpcode() == TargetOpcode::G_SEXT ||
2203           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2204          "Expected a G_[ASZ]EXT");
2205   Register SrcReg = MI.getOperand(1).getReg();
2206   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2207   // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2208   unsigned Opc = MI.getOpcode();
2209   unsigned SrcOpc = SrcMI->getOpcode();
2210   if (Opc == SrcOpc ||
2211       (Opc == TargetOpcode::G_ANYEXT &&
2212        (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2213       (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2214     MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2215     return true;
2216   }
2217   return false;
2218 }
2219 
2220 void CombinerHelper::applyCombineExtOfExt(
2221     MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2222   assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2223           MI.getOpcode() == TargetOpcode::G_SEXT ||
2224           MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2225          "Expected a G_[ASZ]EXT");
2226 
2227   Register Reg = std::get<0>(MatchInfo);
2228   unsigned SrcExtOp = std::get<1>(MatchInfo);
2229 
2230   // Combine exts with the same opcode.
2231   if (MI.getOpcode() == SrcExtOp) {
2232     Observer.changingInstr(MI);
2233     MI.getOperand(1).setReg(Reg);
2234     Observer.changedInstr(MI);
2235     return;
2236   }
2237 
2238   // Combine:
2239   // - anyext([sz]ext x) to [sz]ext x
2240   // - sext(zext x) to zext x
2241   if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2242       (MI.getOpcode() == TargetOpcode::G_SEXT &&
2243        SrcExtOp == TargetOpcode::G_ZEXT)) {
2244     Register DstReg = MI.getOperand(0).getReg();
2245     Builder.setInstrAndDebugLoc(MI);
2246     Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2247     MI.eraseFromParent();
2248   }
2249 }
2250 
2251 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2252   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2253   Register DstReg = MI.getOperand(0).getReg();
2254   Register SrcReg = MI.getOperand(1).getReg();
2255   LLT DstTy = MRI.getType(DstReg);
2256 
2257   Builder.setInstrAndDebugLoc(MI);
2258   Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2259                    MI.getFlags());
2260   MI.eraseFromParent();
2261 }
2262 
2263 bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI,
2264                                             BuildFnTy &MatchInfo) {
2265   assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2266   Register Src = MI.getOperand(1).getReg();
2267   Register NegSrc;
2268 
2269   if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc))))
2270     return false;
2271 
2272   MatchInfo = [=, &MI](MachineIRBuilder &B) {
2273     Observer.changingInstr(MI);
2274     MI.getOperand(1).setReg(NegSrc);
2275     Observer.changedInstr(MI);
2276   };
2277   return true;
2278 }
2279 
2280 bool CombinerHelper::matchCombineTruncOfExt(
2281     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2282   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2283   Register SrcReg = MI.getOperand(1).getReg();
2284   MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2285   unsigned SrcOpc = SrcMI->getOpcode();
2286   if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2287       SrcOpc == TargetOpcode::G_ZEXT) {
2288     MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2289     return true;
2290   }
2291   return false;
2292 }
2293 
2294 void CombinerHelper::applyCombineTruncOfExt(
2295     MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2296   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2297   Register SrcReg = MatchInfo.first;
2298   unsigned SrcExtOp = MatchInfo.second;
2299   Register DstReg = MI.getOperand(0).getReg();
2300   LLT SrcTy = MRI.getType(SrcReg);
2301   LLT DstTy = MRI.getType(DstReg);
2302   if (SrcTy == DstTy) {
2303     MI.eraseFromParent();
2304     replaceRegWith(MRI, DstReg, SrcReg);
2305     return;
2306   }
2307   Builder.setInstrAndDebugLoc(MI);
2308   if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2309     Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2310   else
2311     Builder.buildTrunc(DstReg, SrcReg);
2312   MI.eraseFromParent();
2313 }
2314 
2315 static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
2316   const unsigned ShiftSize = ShiftTy.getScalarSizeInBits();
2317   const unsigned TruncSize = TruncTy.getScalarSizeInBits();
2318 
2319   // ShiftTy > 32 > TruncTy -> 32
2320   if (ShiftSize > 32 && TruncSize < 32)
2321     return ShiftTy.changeElementSize(32);
2322 
2323   // TODO: We could also reduce to 16 bits, but that's more target-dependent.
2324   //  Some targets like it, some don't, some only like it under certain
2325   //  conditions/processor versions, etc.
2326   //  A TL hook might be needed for this.
2327 
2328   // Don't combine
2329   return ShiftTy;
2330 }
2331 
2332 bool CombinerHelper::matchCombineTruncOfShift(
2333     MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2334   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2335   Register DstReg = MI.getOperand(0).getReg();
2336   Register SrcReg = MI.getOperand(1).getReg();
2337 
2338   if (!MRI.hasOneNonDBGUse(SrcReg))
2339     return false;
2340 
2341   LLT SrcTy = MRI.getType(SrcReg);
2342   LLT DstTy = MRI.getType(DstReg);
2343 
2344   MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI);
2345   const auto &TL = getTargetLowering();
2346 
2347   LLT NewShiftTy;
2348   switch (SrcMI->getOpcode()) {
2349   default:
2350     return false;
2351   case TargetOpcode::G_SHL: {
2352     NewShiftTy = DstTy;
2353 
2354     // Make sure new shift amount is legal.
2355     KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2356     if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits()))
2357       return false;
2358     break;
2359   }
2360   case TargetOpcode::G_LSHR:
2361   case TargetOpcode::G_ASHR: {
2362     // For right shifts, we conservatively do not do the transform if the TRUNC
2363     // has any STORE users. The reason is that if we change the type of the
2364     // shift, we may break the truncstore combine.
2365     //
2366     // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
2367     for (auto &User : MRI.use_instructions(DstReg))
2368       if (User.getOpcode() == TargetOpcode::G_STORE)
2369         return false;
2370 
2371     NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy);
2372     if (NewShiftTy == SrcTy)
2373       return false;
2374 
2375     // Make sure we won't lose information by truncating the high bits.
2376     KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2377     if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() -
2378                                 DstTy.getScalarSizeInBits()))
2379       return false;
2380     break;
2381   }
2382   }
2383 
2384   if (!isLegalOrBeforeLegalizer(
2385           {SrcMI->getOpcode(),
2386            {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2387     return false;
2388 
2389   MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2390   return true;
2391 }
2392 
2393 void CombinerHelper::applyCombineTruncOfShift(
2394     MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2395   Builder.setInstrAndDebugLoc(MI);
2396 
2397   MachineInstr *ShiftMI = MatchInfo.first;
2398   LLT NewShiftTy = MatchInfo.second;
2399 
2400   Register Dst = MI.getOperand(0).getReg();
2401   LLT DstTy = MRI.getType(Dst);
2402 
2403   Register ShiftAmt = ShiftMI->getOperand(2).getReg();
2404   Register ShiftSrc = ShiftMI->getOperand(1).getReg();
2405   ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0);
2406 
2407   Register NewShift =
2408       Builder
2409           .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt})
2410           .getReg(0);
2411 
2412   if (NewShiftTy == DstTy)
2413     replaceRegWith(MRI, Dst, NewShift);
2414   else
2415     Builder.buildTrunc(Dst, NewShift);
2416 
2417   eraseInst(MI);
2418 }
2419 
2420 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2421   return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2422     return MO.isReg() &&
2423            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2424   });
2425 }
2426 
2427 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2428   return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2429     return !MO.isReg() ||
2430            getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2431   });
2432 }
2433 
2434 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2435   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2436   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2437   return all_of(Mask, [](int Elt) { return Elt < 0; });
2438 }
2439 
2440 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2441   assert(MI.getOpcode() == TargetOpcode::G_STORE);
2442   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2443                       MRI);
2444 }
2445 
2446 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2447   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2448   return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2449                       MRI);
2450 }
2451 
2452 bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) {
2453   assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2454           MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2455          "Expected an insert/extract element op");
2456   LLT VecTy = MRI.getType(MI.getOperand(1).getReg());
2457   unsigned IdxIdx =
2458       MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2459   auto Idx = getIConstantVRegVal(MI.getOperand(IdxIdx).getReg(), MRI);
2460   if (!Idx)
2461     return false;
2462   return Idx->getZExtValue() >= VecTy.getNumElements();
2463 }
2464 
2465 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2466   GSelect &SelMI = cast<GSelect>(MI);
2467   auto Cst =
2468       isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI);
2469   if (!Cst)
2470     return false;
2471   OpIdx = Cst->isZero() ? 3 : 2;
2472   return true;
2473 }
2474 
2475 void CombinerHelper::eraseInst(MachineInstr &MI) { MI.eraseFromParent(); }
2476 
2477 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2478                                     const MachineOperand &MOP2) {
2479   if (!MOP1.isReg() || !MOP2.isReg())
2480     return false;
2481   auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2482   if (!InstAndDef1)
2483     return false;
2484   auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2485   if (!InstAndDef2)
2486     return false;
2487   MachineInstr *I1 = InstAndDef1->MI;
2488   MachineInstr *I2 = InstAndDef2->MI;
2489 
2490   // Handle a case like this:
2491   //
2492   // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2493   //
2494   // Even though %0 and %1 are produced by the same instruction they are not
2495   // the same values.
2496   if (I1 == I2)
2497     return MOP1.getReg() == MOP2.getReg();
2498 
2499   // If we have an instruction which loads or stores, we can't guarantee that
2500   // it is identical.
2501   //
2502   // For example, we may have
2503   //
2504   // %x1 = G_LOAD %addr (load N from @somewhere)
2505   // ...
2506   // call @foo
2507   // ...
2508   // %x2 = G_LOAD %addr (load N from @somewhere)
2509   // ...
2510   // %or = G_OR %x1, %x2
2511   //
2512   // It's possible that @foo will modify whatever lives at the address we're
2513   // loading from. To be safe, let's just assume that all loads and stores
2514   // are different (unless we have something which is guaranteed to not
2515   // change.)
2516   if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2517     return false;
2518 
2519   // If both instructions are loads or stores, they are equal only if both
2520   // are dereferenceable invariant loads with the same number of bits.
2521   if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2522     GLoadStore *LS1 = dyn_cast<GLoadStore>(I1);
2523     GLoadStore *LS2 = dyn_cast<GLoadStore>(I2);
2524     if (!LS1 || !LS2)
2525       return false;
2526 
2527     if (!I2->isDereferenceableInvariantLoad() ||
2528         (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2529       return false;
2530   }
2531 
2532   // Check for physical registers on the instructions first to avoid cases
2533   // like this:
2534   //
2535   // %a = COPY $physreg
2536   // ...
2537   // SOMETHING implicit-def $physreg
2538   // ...
2539   // %b = COPY $physreg
2540   //
2541   // These copies are not equivalent.
2542   if (any_of(I1->uses(), [](const MachineOperand &MO) {
2543         return MO.isReg() && MO.getReg().isPhysical();
2544       })) {
2545     // Check if we have a case like this:
2546     //
2547     // %a = COPY $physreg
2548     // %b = COPY %a
2549     //
2550     // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2551     // From that, we know that they must have the same value, since they must
2552     // have come from the same COPY.
2553     return I1->isIdenticalTo(*I2);
2554   }
2555 
2556   // We don't have any physical registers, so we don't necessarily need the
2557   // same vreg defs.
2558   //
2559   // On the off-chance that there's some target instruction feeding into the
2560   // instruction, let's use produceSameValue instead of isIdenticalTo.
2561   if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2562     // Handle instructions with multiple defs that produce same values. Values
2563     // are same for operands with same index.
2564     // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2565     // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2566     // I1 and I2 are different instructions but produce same values,
2567     // %1 and %6 are same, %1 and %7 are not the same value.
2568     return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2569            I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2570   }
2571   return false;
2572 }
2573 
2574 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2575   if (!MOP.isReg())
2576     return false;
2577   auto *MI = MRI.getVRegDef(MOP.getReg());
2578   auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI);
2579   return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2580          MaybeCst->getSExtValue() == C;
2581 }
2582 
2583 void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2584                                                      unsigned OpIdx) {
2585   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2586   Register OldReg = MI.getOperand(0).getReg();
2587   Register Replacement = MI.getOperand(OpIdx).getReg();
2588   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2589   MI.eraseFromParent();
2590   replaceRegWith(MRI, OldReg, Replacement);
2591 }
2592 
2593 void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2594                                                  Register Replacement) {
2595   assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2596   Register OldReg = MI.getOperand(0).getReg();
2597   assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2598   MI.eraseFromParent();
2599   replaceRegWith(MRI, OldReg, Replacement);
2600 }
2601 
2602 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2603   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2604   // Match (cond ? x : x)
2605   return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2606          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2607                        MRI);
2608 }
2609 
2610 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2611   return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2612          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2613                        MRI);
2614 }
2615 
2616 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2617   return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2618          canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2619                        MRI);
2620 }
2621 
2622 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2623   MachineOperand &MO = MI.getOperand(OpIdx);
2624   return MO.isReg() &&
2625          getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2626 }
2627 
2628 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2629                                                         unsigned OpIdx) {
2630   MachineOperand &MO = MI.getOperand(OpIdx);
2631   return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2632 }
2633 
2634 void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2635   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2636   Builder.setInstr(MI);
2637   Builder.buildFConstant(MI.getOperand(0), C);
2638   MI.eraseFromParent();
2639 }
2640 
2641 void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2642   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2643   Builder.setInstr(MI);
2644   Builder.buildConstant(MI.getOperand(0), C);
2645   MI.eraseFromParent();
2646 }
2647 
2648 void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
2649   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2650   Builder.setInstr(MI);
2651   Builder.buildConstant(MI.getOperand(0), C);
2652   MI.eraseFromParent();
2653 }
2654 
2655 void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2656   assert(MI.getNumDefs() == 1 && "Expected only one def?");
2657   Builder.setInstr(MI);
2658   Builder.buildUndef(MI.getOperand(0));
2659   MI.eraseFromParent();
2660 }
2661 
2662 bool CombinerHelper::matchSimplifyAddToSub(
2663     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2664   Register LHS = MI.getOperand(1).getReg();
2665   Register RHS = MI.getOperand(2).getReg();
2666   Register &NewLHS = std::get<0>(MatchInfo);
2667   Register &NewRHS = std::get<1>(MatchInfo);
2668 
2669   // Helper lambda to check for opportunities for
2670   // ((0-A) + B) -> B - A
2671   // (A + (0-B)) -> A - B
2672   auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2673     if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2674       return false;
2675     NewLHS = MaybeNewLHS;
2676     return true;
2677   };
2678 
2679   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2680 }
2681 
2682 bool CombinerHelper::matchCombineInsertVecElts(
2683     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2684   assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2685          "Invalid opcode");
2686   Register DstReg = MI.getOperand(0).getReg();
2687   LLT DstTy = MRI.getType(DstReg);
2688   assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2689   unsigned NumElts = DstTy.getNumElements();
2690   // If this MI is part of a sequence of insert_vec_elts, then
2691   // don't do the combine in the middle of the sequence.
2692   if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2693                                    TargetOpcode::G_INSERT_VECTOR_ELT)
2694     return false;
2695   MachineInstr *CurrInst = &MI;
2696   MachineInstr *TmpInst;
2697   int64_t IntImm;
2698   Register TmpReg;
2699   MatchInfo.resize(NumElts);
2700   while (mi_match(
2701       CurrInst->getOperand(0).getReg(), MRI,
2702       m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2703     if (IntImm >= NumElts || IntImm < 0)
2704       return false;
2705     if (!MatchInfo[IntImm])
2706       MatchInfo[IntImm] = TmpReg;
2707     CurrInst = TmpInst;
2708   }
2709   // Variable index.
2710   if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2711     return false;
2712   if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2713     for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2714       if (!MatchInfo[I - 1].isValid())
2715         MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2716     }
2717     return true;
2718   }
2719   // If we didn't end in a G_IMPLICIT_DEF, bail out.
2720   return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2721 }
2722 
2723 void CombinerHelper::applyCombineInsertVecElts(
2724     MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2725   Builder.setInstr(MI);
2726   Register UndefReg;
2727   auto GetUndef = [&]() {
2728     if (UndefReg)
2729       return UndefReg;
2730     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2731     UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2732     return UndefReg;
2733   };
2734   for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2735     if (!MatchInfo[I])
2736       MatchInfo[I] = GetUndef();
2737   }
2738   Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2739   MI.eraseFromParent();
2740 }
2741 
2742 void CombinerHelper::applySimplifyAddToSub(
2743     MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2744   Builder.setInstr(MI);
2745   Register SubLHS, SubRHS;
2746   std::tie(SubLHS, SubRHS) = MatchInfo;
2747   Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2748   MI.eraseFromParent();
2749 }
2750 
2751 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2752     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2753   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2754   //
2755   // Creates the new hand + logic instruction (but does not insert them.)
2756   //
2757   // On success, MatchInfo is populated with the new instructions. These are
2758   // inserted in applyHoistLogicOpWithSameOpcodeHands.
2759   unsigned LogicOpcode = MI.getOpcode();
2760   assert(LogicOpcode == TargetOpcode::G_AND ||
2761          LogicOpcode == TargetOpcode::G_OR ||
2762          LogicOpcode == TargetOpcode::G_XOR);
2763   MachineIRBuilder MIB(MI);
2764   Register Dst = MI.getOperand(0).getReg();
2765   Register LHSReg = MI.getOperand(1).getReg();
2766   Register RHSReg = MI.getOperand(2).getReg();
2767 
2768   // Don't recompute anything.
2769   if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2770     return false;
2771 
2772   // Make sure we have (hand x, ...), (hand y, ...)
2773   MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2774   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2775   if (!LeftHandInst || !RightHandInst)
2776     return false;
2777   unsigned HandOpcode = LeftHandInst->getOpcode();
2778   if (HandOpcode != RightHandInst->getOpcode())
2779     return false;
2780   if (!LeftHandInst->getOperand(1).isReg() ||
2781       !RightHandInst->getOperand(1).isReg())
2782     return false;
2783 
2784   // Make sure the types match up, and if we're doing this post-legalization,
2785   // we end up with legal types.
2786   Register X = LeftHandInst->getOperand(1).getReg();
2787   Register Y = RightHandInst->getOperand(1).getReg();
2788   LLT XTy = MRI.getType(X);
2789   LLT YTy = MRI.getType(Y);
2790   if (!XTy.isValid() || XTy != YTy)
2791     return false;
2792 
2793   // Optional extra source register.
2794   Register ExtraHandOpSrcReg;
2795   switch (HandOpcode) {
2796   default:
2797     return false;
2798   case TargetOpcode::G_ANYEXT:
2799   case TargetOpcode::G_SEXT:
2800   case TargetOpcode::G_ZEXT: {
2801     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2802     break;
2803   }
2804   case TargetOpcode::G_AND:
2805   case TargetOpcode::G_ASHR:
2806   case TargetOpcode::G_LSHR:
2807   case TargetOpcode::G_SHL: {
2808     // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2809     MachineOperand &ZOp = LeftHandInst->getOperand(2);
2810     if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2811       return false;
2812     ExtraHandOpSrcReg = ZOp.getReg();
2813     break;
2814   }
2815   }
2816 
2817   if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2818     return false;
2819 
2820   // Record the steps to build the new instructions.
2821   //
2822   // Steps to build (logic x, y)
2823   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2824   OperandBuildSteps LogicBuildSteps = {
2825       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2826       [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2827       [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2828   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2829 
2830   // Steps to build hand (logic x, y), ...z
2831   OperandBuildSteps HandBuildSteps = {
2832       [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2833       [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2834   if (ExtraHandOpSrcReg.isValid())
2835     HandBuildSteps.push_back(
2836         [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2837   InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2838 
2839   MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2840   return true;
2841 }
2842 
2843 void CombinerHelper::applyBuildInstructionSteps(
2844     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2845   assert(MatchInfo.InstrsToBuild.size() &&
2846          "Expected at least one instr to build?");
2847   Builder.setInstr(MI);
2848   for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2849     assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2850     assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2851     MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2852     for (auto &OperandFn : InstrToBuild.OperandFns)
2853       OperandFn(Instr);
2854   }
2855   MI.eraseFromParent();
2856 }
2857 
2858 bool CombinerHelper::matchAshrShlToSextInreg(
2859     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2860   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2861   int64_t ShlCst, AshrCst;
2862   Register Src;
2863   if (!mi_match(MI.getOperand(0).getReg(), MRI,
2864                 m_GAShr(m_GShl(m_Reg(Src), m_ICstOrSplat(ShlCst)),
2865                         m_ICstOrSplat(AshrCst))))
2866     return false;
2867   if (ShlCst != AshrCst)
2868     return false;
2869   if (!isLegalOrBeforeLegalizer(
2870           {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2871     return false;
2872   MatchInfo = std::make_tuple(Src, ShlCst);
2873   return true;
2874 }
2875 
2876 void CombinerHelper::applyAshShlToSextInreg(
2877     MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2878   assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2879   Register Src;
2880   int64_t ShiftAmt;
2881   std::tie(Src, ShiftAmt) = MatchInfo;
2882   unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2883   Builder.setInstrAndDebugLoc(MI);
2884   Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2885   MI.eraseFromParent();
2886 }
2887 
2888 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2889 bool CombinerHelper::matchOverlappingAnd(
2890     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2891   assert(MI.getOpcode() == TargetOpcode::G_AND);
2892 
2893   Register Dst = MI.getOperand(0).getReg();
2894   LLT Ty = MRI.getType(Dst);
2895 
2896   Register R;
2897   int64_t C1;
2898   int64_t C2;
2899   if (!mi_match(
2900           Dst, MRI,
2901           m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
2902     return false;
2903 
2904   MatchInfo = [=](MachineIRBuilder &B) {
2905     if (C1 & C2) {
2906       B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
2907       return;
2908     }
2909     auto Zero = B.buildConstant(Ty, 0);
2910     replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
2911   };
2912   return true;
2913 }
2914 
2915 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
2916                                        Register &Replacement) {
2917   // Given
2918   //
2919   // %y:_(sN) = G_SOMETHING
2920   // %x:_(sN) = G_SOMETHING
2921   // %res:_(sN) = G_AND %x, %y
2922   //
2923   // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2924   //
2925   // Patterns like this can appear as a result of legalization. E.g.
2926   //
2927   // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2928   // %one:_(s32) = G_CONSTANT i32 1
2929   // %and:_(s32) = G_AND %cmp, %one
2930   //
2931   // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2932   assert(MI.getOpcode() == TargetOpcode::G_AND);
2933   if (!KB)
2934     return false;
2935 
2936   Register AndDst = MI.getOperand(0).getReg();
2937   Register LHS = MI.getOperand(1).getReg();
2938   Register RHS = MI.getOperand(2).getReg();
2939   KnownBits LHSBits = KB->getKnownBits(LHS);
2940   KnownBits RHSBits = KB->getKnownBits(RHS);
2941 
2942   // Check that x & Mask == x.
2943   // x & 1 == x, always
2944   // x & 0 == x, only if x is also 0
2945   // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2946   //
2947   // Check if we can replace AndDst with the LHS of the G_AND
2948   if (canReplaceReg(AndDst, LHS, MRI) &&
2949       (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2950     Replacement = LHS;
2951     return true;
2952   }
2953 
2954   // Check if we can replace AndDst with the RHS of the G_AND
2955   if (canReplaceReg(AndDst, RHS, MRI) &&
2956       (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2957     Replacement = RHS;
2958     return true;
2959   }
2960 
2961   return false;
2962 }
2963 
2964 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
2965   // Given
2966   //
2967   // %y:_(sN) = G_SOMETHING
2968   // %x:_(sN) = G_SOMETHING
2969   // %res:_(sN) = G_OR %x, %y
2970   //
2971   // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2972   assert(MI.getOpcode() == TargetOpcode::G_OR);
2973   if (!KB)
2974     return false;
2975 
2976   Register OrDst = MI.getOperand(0).getReg();
2977   Register LHS = MI.getOperand(1).getReg();
2978   Register RHS = MI.getOperand(2).getReg();
2979   KnownBits LHSBits = KB->getKnownBits(LHS);
2980   KnownBits RHSBits = KB->getKnownBits(RHS);
2981 
2982   // Check that x | Mask == x.
2983   // x | 0 == x, always
2984   // x | 1 == x, only if x is also 1
2985   // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2986   //
2987   // Check if we can replace OrDst with the LHS of the G_OR
2988   if (canReplaceReg(OrDst, LHS, MRI) &&
2989       (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2990     Replacement = LHS;
2991     return true;
2992   }
2993 
2994   // Check if we can replace OrDst with the RHS of the G_OR
2995   if (canReplaceReg(OrDst, RHS, MRI) &&
2996       (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2997     Replacement = RHS;
2998     return true;
2999   }
3000 
3001   return false;
3002 }
3003 
3004 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
3005   // If the input is already sign extended, just drop the extension.
3006   Register Src = MI.getOperand(1).getReg();
3007   unsigned ExtBits = MI.getOperand(2).getImm();
3008   unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
3009   return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
3010 }
3011 
3012 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3013                              int64_t Cst, bool IsVector, bool IsFP) {
3014   // For i1, Cst will always be -1 regardless of boolean contents.
3015   return (ScalarSizeBits == 1 && Cst == -1) ||
3016          isConstTrueVal(TLI, Cst, IsVector, IsFP);
3017 }
3018 
3019 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
3020                                  SmallVectorImpl<Register> &RegsToNegate) {
3021   assert(MI.getOpcode() == TargetOpcode::G_XOR);
3022   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3023   const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3024   Register XorSrc;
3025   Register CstReg;
3026   // We match xor(src, true) here.
3027   if (!mi_match(MI.getOperand(0).getReg(), MRI,
3028                 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
3029     return false;
3030 
3031   if (!MRI.hasOneNonDBGUse(XorSrc))
3032     return false;
3033 
3034   // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3035   // and ORs. The suffix of RegsToNegate starting from index I is used a work
3036   // list of tree nodes to visit.
3037   RegsToNegate.push_back(XorSrc);
3038   // Remember whether the comparisons are all integer or all floating point.
3039   bool IsInt = false;
3040   bool IsFP = false;
3041   for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3042     Register Reg = RegsToNegate[I];
3043     if (!MRI.hasOneNonDBGUse(Reg))
3044       return false;
3045     MachineInstr *Def = MRI.getVRegDef(Reg);
3046     switch (Def->getOpcode()) {
3047     default:
3048       // Don't match if the tree contains anything other than ANDs, ORs and
3049       // comparisons.
3050       return false;
3051     case TargetOpcode::G_ICMP:
3052       if (IsFP)
3053         return false;
3054       IsInt = true;
3055       // When we apply the combine we will invert the predicate.
3056       break;
3057     case TargetOpcode::G_FCMP:
3058       if (IsInt)
3059         return false;
3060       IsFP = true;
3061       // When we apply the combine we will invert the predicate.
3062       break;
3063     case TargetOpcode::G_AND:
3064     case TargetOpcode::G_OR:
3065       // Implement De Morgan's laws:
3066       // ~(x & y) -> ~x | ~y
3067       // ~(x | y) -> ~x & ~y
3068       // When we apply the combine we will change the opcode and recursively
3069       // negate the operands.
3070       RegsToNegate.push_back(Def->getOperand(1).getReg());
3071       RegsToNegate.push_back(Def->getOperand(2).getReg());
3072       break;
3073     }
3074   }
3075 
3076   // Now we know whether the comparisons are integer or floating point, check
3077   // the constant in the xor.
3078   int64_t Cst;
3079   if (Ty.isVector()) {
3080     MachineInstr *CstDef = MRI.getVRegDef(CstReg);
3081     auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI);
3082     if (!MaybeCst)
3083       return false;
3084     if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
3085       return false;
3086   } else {
3087     if (!mi_match(CstReg, MRI, m_ICst(Cst)))
3088       return false;
3089     if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
3090       return false;
3091   }
3092 
3093   return true;
3094 }
3095 
3096 void CombinerHelper::applyNotCmp(MachineInstr &MI,
3097                                  SmallVectorImpl<Register> &RegsToNegate) {
3098   for (Register Reg : RegsToNegate) {
3099     MachineInstr *Def = MRI.getVRegDef(Reg);
3100     Observer.changingInstr(*Def);
3101     // For each comparison, invert the opcode. For each AND and OR, change the
3102     // opcode.
3103     switch (Def->getOpcode()) {
3104     default:
3105       llvm_unreachable("Unexpected opcode");
3106     case TargetOpcode::G_ICMP:
3107     case TargetOpcode::G_FCMP: {
3108       MachineOperand &PredOp = Def->getOperand(1);
3109       CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3110           (CmpInst::Predicate)PredOp.getPredicate());
3111       PredOp.setPredicate(NewP);
3112       break;
3113     }
3114     case TargetOpcode::G_AND:
3115       Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3116       break;
3117     case TargetOpcode::G_OR:
3118       Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3119       break;
3120     }
3121     Observer.changedInstr(*Def);
3122   }
3123 
3124   replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3125   MI.eraseFromParent();
3126 }
3127 
3128 bool CombinerHelper::matchXorOfAndWithSameReg(
3129     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3130   // Match (xor (and x, y), y) (or any of its commuted cases)
3131   assert(MI.getOpcode() == TargetOpcode::G_XOR);
3132   Register &X = MatchInfo.first;
3133   Register &Y = MatchInfo.second;
3134   Register AndReg = MI.getOperand(1).getReg();
3135   Register SharedReg = MI.getOperand(2).getReg();
3136 
3137   // Find a G_AND on either side of the G_XOR.
3138   // Look for one of
3139   //
3140   // (xor (and x, y), SharedReg)
3141   // (xor SharedReg, (and x, y))
3142   if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3143     std::swap(AndReg, SharedReg);
3144     if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3145       return false;
3146   }
3147 
3148   // Only do this if we'll eliminate the G_AND.
3149   if (!MRI.hasOneNonDBGUse(AndReg))
3150     return false;
3151 
3152   // We can combine if SharedReg is the same as either the LHS or RHS of the
3153   // G_AND.
3154   if (Y != SharedReg)
3155     std::swap(X, Y);
3156   return Y == SharedReg;
3157 }
3158 
3159 void CombinerHelper::applyXorOfAndWithSameReg(
3160     MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3161   // Fold (xor (and x, y), y) -> (and (not x), y)
3162   Builder.setInstrAndDebugLoc(MI);
3163   Register X, Y;
3164   std::tie(X, Y) = MatchInfo;
3165   auto Not = Builder.buildNot(MRI.getType(X), X);
3166   Observer.changingInstr(MI);
3167   MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3168   MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3169   MI.getOperand(2).setReg(Y);
3170   Observer.changedInstr(MI);
3171 }
3172 
3173 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3174   auto &PtrAdd = cast<GPtrAdd>(MI);
3175   Register DstReg = PtrAdd.getReg(0);
3176   LLT Ty = MRI.getType(DstReg);
3177   const DataLayout &DL = Builder.getMF().getDataLayout();
3178 
3179   if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3180     return false;
3181 
3182   if (Ty.isPointer()) {
3183     auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3184     return ConstVal && *ConstVal == 0;
3185   }
3186 
3187   assert(Ty.isVector() && "Expecting a vector type");
3188   const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3189   return isBuildVectorAllZeros(*VecMI, MRI);
3190 }
3191 
3192 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3193   auto &PtrAdd = cast<GPtrAdd>(MI);
3194   Builder.setInstrAndDebugLoc(PtrAdd);
3195   Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3196   PtrAdd.eraseFromParent();
3197 }
3198 
3199 /// The second source operand is known to be a power of 2.
3200 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3201   Register DstReg = MI.getOperand(0).getReg();
3202   Register Src0 = MI.getOperand(1).getReg();
3203   Register Pow2Src1 = MI.getOperand(2).getReg();
3204   LLT Ty = MRI.getType(DstReg);
3205   Builder.setInstrAndDebugLoc(MI);
3206 
3207   // Fold (urem x, pow2) -> (and x, pow2-1)
3208   auto NegOne = Builder.buildConstant(Ty, -1);
3209   auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3210   Builder.buildAnd(DstReg, Src0, Add);
3211   MI.eraseFromParent();
3212 }
3213 
3214 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
3215                                               unsigned &SelectOpNo) {
3216   Register LHS = MI.getOperand(1).getReg();
3217   Register RHS = MI.getOperand(2).getReg();
3218 
3219   Register OtherOperandReg = RHS;
3220   SelectOpNo = 1;
3221   MachineInstr *Select = MRI.getVRegDef(LHS);
3222 
3223   // Don't do this unless the old select is going away. We want to eliminate the
3224   // binary operator, not replace a binop with a select.
3225   if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3226       !MRI.hasOneNonDBGUse(LHS)) {
3227     OtherOperandReg = LHS;
3228     SelectOpNo = 2;
3229     Select = MRI.getVRegDef(RHS);
3230     if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3231         !MRI.hasOneNonDBGUse(RHS))
3232       return false;
3233   }
3234 
3235   MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
3236   MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
3237 
3238   if (!isConstantOrConstantVector(*SelectLHS, MRI,
3239                                   /*AllowFP*/ true,
3240                                   /*AllowOpaqueConstants*/ false))
3241     return false;
3242   if (!isConstantOrConstantVector(*SelectRHS, MRI,
3243                                   /*AllowFP*/ true,
3244                                   /*AllowOpaqueConstants*/ false))
3245     return false;
3246 
3247   unsigned BinOpcode = MI.getOpcode();
3248 
3249   // We know know one of the operands is a select of constants. Now verify that
3250   // the other binary operator operand is either a constant, or we can handle a
3251   // variable.
3252   bool CanFoldNonConst =
3253       (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3254       (isNullOrNullSplat(*SelectLHS, MRI) ||
3255        isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
3256       (isNullOrNullSplat(*SelectRHS, MRI) ||
3257        isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
3258   if (CanFoldNonConst)
3259     return true;
3260 
3261   return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
3262                                     /*AllowFP*/ true,
3263                                     /*AllowOpaqueConstants*/ false);
3264 }
3265 
3266 /// \p SelectOperand is the operand in binary operator \p MI that is the select
3267 /// to fold.
3268 void CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
3269                                               const unsigned &SelectOperand) {
3270   Builder.setInstrAndDebugLoc(MI);
3271 
3272   Register Dst = MI.getOperand(0).getReg();
3273   Register LHS = MI.getOperand(1).getReg();
3274   Register RHS = MI.getOperand(2).getReg();
3275   MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
3276 
3277   Register SelectCond = Select->getOperand(1).getReg();
3278   Register SelectTrue = Select->getOperand(2).getReg();
3279   Register SelectFalse = Select->getOperand(3).getReg();
3280 
3281   LLT Ty = MRI.getType(Dst);
3282   unsigned BinOpcode = MI.getOpcode();
3283 
3284   Register FoldTrue, FoldFalse;
3285 
3286   // We have a select-of-constants followed by a binary operator with a
3287   // constant. Eliminate the binop by pulling the constant math into the select.
3288   // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3289   if (SelectOperand == 1) {
3290     // TODO: SelectionDAG verifies this actually constant folds before
3291     // committing to the combine.
3292 
3293     FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
3294     FoldFalse =
3295         Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
3296   } else {
3297     FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
3298     FoldFalse =
3299         Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
3300   }
3301 
3302   Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
3303   MI.eraseFromParent();
3304 }
3305 
3306 std::optional<SmallVector<Register, 8>>
3307 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3308   assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3309   // We want to detect if Root is part of a tree which represents a bunch
3310   // of loads being merged into a larger load. We'll try to recognize patterns
3311   // like, for example:
3312   //
3313   //  Reg   Reg
3314   //   \    /
3315   //    OR_1   Reg
3316   //     \    /
3317   //      OR_2
3318   //        \     Reg
3319   //         .. /
3320   //        Root
3321   //
3322   //  Reg   Reg   Reg   Reg
3323   //     \ /       \   /
3324   //     OR_1      OR_2
3325   //       \       /
3326   //        \    /
3327   //         ...
3328   //         Root
3329   //
3330   // Each "Reg" may have been produced by a load + some arithmetic. This
3331   // function will save each of them.
3332   SmallVector<Register, 8> RegsToVisit;
3333   SmallVector<const MachineInstr *, 7> Ors = {Root};
3334 
3335   // In the "worst" case, we're dealing with a load for each byte. So, there
3336   // are at most #bytes - 1 ORs.
3337   const unsigned MaxIter =
3338       MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3339   for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3340     if (Ors.empty())
3341       break;
3342     const MachineInstr *Curr = Ors.pop_back_val();
3343     Register OrLHS = Curr->getOperand(1).getReg();
3344     Register OrRHS = Curr->getOperand(2).getReg();
3345 
3346     // In the combine, we want to elimate the entire tree.
3347     if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3348       return std::nullopt;
3349 
3350     // If it's a G_OR, save it and continue to walk. If it's not, then it's
3351     // something that may be a load + arithmetic.
3352     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3353       Ors.push_back(Or);
3354     else
3355       RegsToVisit.push_back(OrLHS);
3356     if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3357       Ors.push_back(Or);
3358     else
3359       RegsToVisit.push_back(OrRHS);
3360   }
3361 
3362   // We're going to try and merge each register into a wider power-of-2 type,
3363   // so we ought to have an even number of registers.
3364   if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3365     return std::nullopt;
3366   return RegsToVisit;
3367 }
3368 
3369 /// Helper function for findLoadOffsetsForLoadOrCombine.
3370 ///
3371 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3372 /// and then moving that value into a specific byte offset.
3373 ///
3374 /// e.g. x[i] << 24
3375 ///
3376 /// \returns The load instruction and the byte offset it is moved into.
3377 static std::optional<std::pair<GZExtLoad *, int64_t>>
3378 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3379                          const MachineRegisterInfo &MRI) {
3380   assert(MRI.hasOneNonDBGUse(Reg) &&
3381          "Expected Reg to only have one non-debug use?");
3382   Register MaybeLoad;
3383   int64_t Shift;
3384   if (!mi_match(Reg, MRI,
3385                 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3386     Shift = 0;
3387     MaybeLoad = Reg;
3388   }
3389 
3390   if (Shift % MemSizeInBits != 0)
3391     return std::nullopt;
3392 
3393   // TODO: Handle other types of loads.
3394   auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3395   if (!Load)
3396     return std::nullopt;
3397 
3398   if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3399     return std::nullopt;
3400 
3401   return std::make_pair(Load, Shift / MemSizeInBits);
3402 }
3403 
3404 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3405 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3406     SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3407     const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3408 
3409   // Each load found for the pattern. There should be one for each RegsToVisit.
3410   SmallSetVector<const MachineInstr *, 8> Loads;
3411 
3412   // The lowest index used in any load. (The lowest "i" for each x[i].)
3413   int64_t LowestIdx = INT64_MAX;
3414 
3415   // The load which uses the lowest index.
3416   GZExtLoad *LowestIdxLoad = nullptr;
3417 
3418   // Keeps track of the load indices we see. We shouldn't see any indices twice.
3419   SmallSet<int64_t, 8> SeenIdx;
3420 
3421   // Ensure each load is in the same MBB.
3422   // TODO: Support multiple MachineBasicBlocks.
3423   MachineBasicBlock *MBB = nullptr;
3424   const MachineMemOperand *MMO = nullptr;
3425 
3426   // Earliest instruction-order load in the pattern.
3427   GZExtLoad *EarliestLoad = nullptr;
3428 
3429   // Latest instruction-order load in the pattern.
3430   GZExtLoad *LatestLoad = nullptr;
3431 
3432   // Base pointer which every load should share.
3433   Register BasePtr;
3434 
3435   // We want to find a load for each register. Each load should have some
3436   // appropriate bit twiddling arithmetic. During this loop, we will also keep
3437   // track of the load which uses the lowest index. Later, we will check if we
3438   // can use its pointer in the final, combined load.
3439   for (auto Reg : RegsToVisit) {
3440     // Find the load, and find the position that it will end up in (e.g. a
3441     // shifted) value.
3442     auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3443     if (!LoadAndPos)
3444       return std::nullopt;
3445     GZExtLoad *Load;
3446     int64_t DstPos;
3447     std::tie(Load, DstPos) = *LoadAndPos;
3448 
3449     // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3450     // it is difficult to check for stores/calls/etc between loads.
3451     MachineBasicBlock *LoadMBB = Load->getParent();
3452     if (!MBB)
3453       MBB = LoadMBB;
3454     if (LoadMBB != MBB)
3455       return std::nullopt;
3456 
3457     // Make sure that the MachineMemOperands of every seen load are compatible.
3458     auto &LoadMMO = Load->getMMO();
3459     if (!MMO)
3460       MMO = &LoadMMO;
3461     if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3462       return std::nullopt;
3463 
3464     // Find out what the base pointer and index for the load is.
3465     Register LoadPtr;
3466     int64_t Idx;
3467     if (!mi_match(Load->getOperand(1).getReg(), MRI,
3468                   m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3469       LoadPtr = Load->getOperand(1).getReg();
3470       Idx = 0;
3471     }
3472 
3473     // Don't combine things like a[i], a[i] -> a bigger load.
3474     if (!SeenIdx.insert(Idx).second)
3475       return std::nullopt;
3476 
3477     // Every load must share the same base pointer; don't combine things like:
3478     //
3479     // a[i], b[i + 1] -> a bigger load.
3480     if (!BasePtr.isValid())
3481       BasePtr = LoadPtr;
3482     if (BasePtr != LoadPtr)
3483       return std::nullopt;
3484 
3485     if (Idx < LowestIdx) {
3486       LowestIdx = Idx;
3487       LowestIdxLoad = Load;
3488     }
3489 
3490     // Keep track of the byte offset that this load ends up at. If we have seen
3491     // the byte offset, then stop here. We do not want to combine:
3492     //
3493     // a[i] << 16, a[i + k] << 16 -> a bigger load.
3494     if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3495       return std::nullopt;
3496     Loads.insert(Load);
3497 
3498     // Keep track of the position of the earliest/latest loads in the pattern.
3499     // We will check that there are no load fold barriers between them later
3500     // on.
3501     //
3502     // FIXME: Is there a better way to check for load fold barriers?
3503     if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3504       EarliestLoad = Load;
3505     if (!LatestLoad || dominates(*LatestLoad, *Load))
3506       LatestLoad = Load;
3507   }
3508 
3509   // We found a load for each register. Let's check if each load satisfies the
3510   // pattern.
3511   assert(Loads.size() == RegsToVisit.size() &&
3512          "Expected to find a load for each register?");
3513   assert(EarliestLoad != LatestLoad && EarliestLoad &&
3514          LatestLoad && "Expected at least two loads?");
3515 
3516   // Check if there are any stores, calls, etc. between any of the loads. If
3517   // there are, then we can't safely perform the combine.
3518   //
3519   // MaxIter is chosen based off the (worst case) number of iterations it
3520   // typically takes to succeed in the LLVM test suite plus some padding.
3521   //
3522   // FIXME: Is there a better way to check for load fold barriers?
3523   const unsigned MaxIter = 20;
3524   unsigned Iter = 0;
3525   for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3526                                                  LatestLoad->getIterator())) {
3527     if (Loads.count(&MI))
3528       continue;
3529     if (MI.isLoadFoldBarrier())
3530       return std::nullopt;
3531     if (Iter++ == MaxIter)
3532       return std::nullopt;
3533   }
3534 
3535   return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3536 }
3537 
3538 bool CombinerHelper::matchLoadOrCombine(
3539     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3540   assert(MI.getOpcode() == TargetOpcode::G_OR);
3541   MachineFunction &MF = *MI.getMF();
3542   // Assuming a little-endian target, transform:
3543   //  s8 *a = ...
3544   //  s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3545   // =>
3546   //  s32 val = *((i32)a)
3547   //
3548   //  s8 *a = ...
3549   //  s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3550   // =>
3551   //  s32 val = BSWAP(*((s32)a))
3552   Register Dst = MI.getOperand(0).getReg();
3553   LLT Ty = MRI.getType(Dst);
3554   if (Ty.isVector())
3555     return false;
3556 
3557   // We need to combine at least two loads into this type. Since the smallest
3558   // possible load is into a byte, we need at least a 16-bit wide type.
3559   const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3560   if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3561     return false;
3562 
3563   // Match a collection of non-OR instructions in the pattern.
3564   auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3565   if (!RegsToVisit)
3566     return false;
3567 
3568   // We have a collection of non-OR instructions. Figure out how wide each of
3569   // the small loads should be based off of the number of potential loads we
3570   // found.
3571   const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3572   if (NarrowMemSizeInBits % 8 != 0)
3573     return false;
3574 
3575   // Check if each register feeding into each OR is a load from the same
3576   // base pointer + some arithmetic.
3577   //
3578   // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3579   //
3580   // Also verify that each of these ends up putting a[i] into the same memory
3581   // offset as a load into a wide type would.
3582   SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3583   GZExtLoad *LowestIdxLoad, *LatestLoad;
3584   int64_t LowestIdx;
3585   auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3586       MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3587   if (!MaybeLoadInfo)
3588     return false;
3589   std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3590 
3591   // We have a bunch of loads being OR'd together. Using the addresses + offsets
3592   // we found before, check if this corresponds to a big or little endian byte
3593   // pattern. If it does, then we can represent it using a load + possibly a
3594   // BSWAP.
3595   bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3596   std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3597   if (!IsBigEndian)
3598     return false;
3599   bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3600   if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3601     return false;
3602 
3603   // Make sure that the load from the lowest index produces offset 0 in the
3604   // final value.
3605   //
3606   // This ensures that we won't combine something like this:
3607   //
3608   // load x[i] -> byte 2
3609   // load x[i+1] -> byte 0 ---> wide_load x[i]
3610   // load x[i+2] -> byte 1
3611   const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3612   const unsigned ZeroByteOffset =
3613       *IsBigEndian
3614           ? bigEndianByteAt(NumLoadsInTy, 0)
3615           : littleEndianByteAt(NumLoadsInTy, 0);
3616   auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3617   if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3618       ZeroOffsetIdx->second != LowestIdx)
3619     return false;
3620 
3621   // We wil reuse the pointer from the load which ends up at byte offset 0. It
3622   // may not use index 0.
3623   Register Ptr = LowestIdxLoad->getPointerReg();
3624   const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3625   LegalityQuery::MemDesc MMDesc(MMO);
3626   MMDesc.MemoryTy = Ty;
3627   if (!isLegalOrBeforeLegalizer(
3628           {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3629     return false;
3630   auto PtrInfo = MMO.getPointerInfo();
3631   auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3632 
3633   // Load must be allowed and fast on the target.
3634   LLVMContext &C = MF.getFunction().getContext();
3635   auto &DL = MF.getDataLayout();
3636   unsigned Fast = 0;
3637   if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3638       !Fast)
3639     return false;
3640 
3641   MatchInfo = [=](MachineIRBuilder &MIB) {
3642     MIB.setInstrAndDebugLoc(*LatestLoad);
3643     Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3644     MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3645     if (NeedsBSwap)
3646       MIB.buildBSwap(Dst, LoadDst);
3647   };
3648   return true;
3649 }
3650 
3651 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
3652                                             MachineInstr *&ExtMI) {
3653   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3654 
3655   Register DstReg = MI.getOperand(0).getReg();
3656 
3657   // TODO: Extending a vector may be expensive, don't do this until heuristics
3658   // are better.
3659   if (MRI.getType(DstReg).isVector())
3660     return false;
3661 
3662   // Try to match a phi, whose only use is an extend.
3663   if (!MRI.hasOneNonDBGUse(DstReg))
3664     return false;
3665   ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3666   switch (ExtMI->getOpcode()) {
3667   case TargetOpcode::G_ANYEXT:
3668     return true; // G_ANYEXT is usually free.
3669   case TargetOpcode::G_ZEXT:
3670   case TargetOpcode::G_SEXT:
3671     break;
3672   default:
3673     return false;
3674   }
3675 
3676   // If the target is likely to fold this extend away, don't propagate.
3677   if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3678     return false;
3679 
3680   // We don't want to propagate the extends unless there's a good chance that
3681   // they'll be optimized in some way.
3682   // Collect the unique incoming values.
3683   SmallPtrSet<MachineInstr *, 4> InSrcs;
3684   for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3685     auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3686     switch (DefMI->getOpcode()) {
3687     case TargetOpcode::G_LOAD:
3688     case TargetOpcode::G_TRUNC:
3689     case TargetOpcode::G_SEXT:
3690     case TargetOpcode::G_ZEXT:
3691     case TargetOpcode::G_ANYEXT:
3692     case TargetOpcode::G_CONSTANT:
3693       InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3694       // Don't try to propagate if there are too many places to create new
3695       // extends, chances are it'll increase code size.
3696       if (InSrcs.size() > 2)
3697         return false;
3698       break;
3699     default:
3700       return false;
3701     }
3702   }
3703   return true;
3704 }
3705 
3706 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
3707                                             MachineInstr *&ExtMI) {
3708   assert(MI.getOpcode() == TargetOpcode::G_PHI);
3709   Register DstReg = ExtMI->getOperand(0).getReg();
3710   LLT ExtTy = MRI.getType(DstReg);
3711 
3712   // Propagate the extension into the block of each incoming reg's block.
3713   // Use a SetVector here because PHIs can have duplicate edges, and we want
3714   // deterministic iteration order.
3715   SmallSetVector<MachineInstr *, 8> SrcMIs;
3716   SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
3717   for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3718     auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3719     if (!SrcMIs.insert(SrcMI))
3720       continue;
3721 
3722     // Build an extend after each src inst.
3723     auto *MBB = SrcMI->getParent();
3724     MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3725     if (InsertPt != MBB->end() && InsertPt->isPHI())
3726       InsertPt = MBB->getFirstNonPHI();
3727 
3728     Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3729     Builder.setDebugLoc(MI.getDebugLoc());
3730     auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3731                                           SrcMI->getOperand(0).getReg());
3732     OldToNewSrcMap[SrcMI] = NewExt;
3733   }
3734 
3735   // Create a new phi with the extended inputs.
3736   Builder.setInstrAndDebugLoc(MI);
3737   auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3738   NewPhi.addDef(DstReg);
3739   for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
3740     if (!MO.isReg()) {
3741       NewPhi.addMBB(MO.getMBB());
3742       continue;
3743     }
3744     auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3745     NewPhi.addUse(NewSrc->getOperand(0).getReg());
3746   }
3747   Builder.insertInstr(NewPhi);
3748   ExtMI->eraseFromParent();
3749 }
3750 
3751 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
3752                                                 Register &Reg) {
3753   assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3754   // If we have a constant index, look for a G_BUILD_VECTOR source
3755   // and find the source register that the index maps to.
3756   Register SrcVec = MI.getOperand(1).getReg();
3757   LLT SrcTy = MRI.getType(SrcVec);
3758 
3759   auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3760   if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3761     return false;
3762 
3763   unsigned VecIdx = Cst->Value.getZExtValue();
3764 
3765   // Check if we have a build_vector or build_vector_trunc with an optional
3766   // trunc in front.
3767   MachineInstr *SrcVecMI = MRI.getVRegDef(SrcVec);
3768   if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) {
3769     SrcVecMI = MRI.getVRegDef(SrcVecMI->getOperand(1).getReg());
3770   }
3771 
3772   if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
3773       SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
3774     return false;
3775 
3776   EVT Ty(getMVTForLLT(SrcTy));
3777   if (!MRI.hasOneNonDBGUse(SrcVec) &&
3778       !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3779     return false;
3780 
3781   Reg = SrcVecMI->getOperand(VecIdx + 1).getReg();
3782   return true;
3783 }
3784 
3785 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
3786                                                 Register &Reg) {
3787   // Check the type of the register, since it may have come from a
3788   // G_BUILD_VECTOR_TRUNC.
3789   LLT ScalarTy = MRI.getType(Reg);
3790   Register DstReg = MI.getOperand(0).getReg();
3791   LLT DstTy = MRI.getType(DstReg);
3792 
3793   Builder.setInstrAndDebugLoc(MI);
3794   if (ScalarTy != DstTy) {
3795     assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
3796     Builder.buildTrunc(DstReg, Reg);
3797     MI.eraseFromParent();
3798     return;
3799   }
3800   replaceSingleDefInstWithReg(MI, Reg);
3801 }
3802 
3803 bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3804     MachineInstr &MI,
3805     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3806   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3807   // This combine tries to find build_vector's which have every source element
3808   // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3809   // the masked load scalarization is run late in the pipeline. There's already
3810   // a combine for a similar pattern starting from the extract, but that
3811   // doesn't attempt to do it if there are multiple uses of the build_vector,
3812   // which in this case is true. Starting the combine from the build_vector
3813   // feels more natural than trying to find sibling nodes of extracts.
3814   // E.g.
3815   //  %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3816   //  %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3817   //  %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3818   //  %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3819   //  %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3820   // ==>
3821   // replace ext{1,2,3,4} with %s{1,2,3,4}
3822 
3823   Register DstReg = MI.getOperand(0).getReg();
3824   LLT DstTy = MRI.getType(DstReg);
3825   unsigned NumElts = DstTy.getNumElements();
3826 
3827   SmallBitVector ExtractedElts(NumElts);
3828   for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) {
3829     if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3830       return false;
3831     auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
3832     if (!Cst)
3833       return false;
3834     unsigned Idx = Cst->getZExtValue();
3835     if (Idx >= NumElts)
3836       return false; // Out of range.
3837     ExtractedElts.set(Idx);
3838     SrcDstPairs.emplace_back(
3839         std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
3840   }
3841   // Match if every element was extracted.
3842   return ExtractedElts.all();
3843 }
3844 
3845 void CombinerHelper::applyExtractAllEltsFromBuildVector(
3846     MachineInstr &MI,
3847     SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3848   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3849   for (auto &Pair : SrcDstPairs) {
3850     auto *ExtMI = Pair.second;
3851     replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
3852     ExtMI->eraseFromParent();
3853   }
3854   MI.eraseFromParent();
3855 }
3856 
3857 void CombinerHelper::applyBuildFn(
3858     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3859   Builder.setInstrAndDebugLoc(MI);
3860   MatchInfo(Builder);
3861   MI.eraseFromParent();
3862 }
3863 
3864 void CombinerHelper::applyBuildFnNoErase(
3865     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3866   Builder.setInstrAndDebugLoc(MI);
3867   MatchInfo(Builder);
3868 }
3869 
3870 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
3871                                                BuildFnTy &MatchInfo) {
3872   assert(MI.getOpcode() == TargetOpcode::G_OR);
3873 
3874   Register Dst = MI.getOperand(0).getReg();
3875   LLT Ty = MRI.getType(Dst);
3876   unsigned BitWidth = Ty.getScalarSizeInBits();
3877 
3878   Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
3879   unsigned FshOpc = 0;
3880 
3881   // Match (or (shl ...), (lshr ...)).
3882   if (!mi_match(Dst, MRI,
3883                 // m_GOr() handles the commuted version as well.
3884                 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
3885                       m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
3886     return false;
3887 
3888   // Given constants C0 and C1 such that C0 + C1 is bit-width:
3889   // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
3890   int64_t CstShlAmt, CstLShrAmt;
3891   if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) &&
3892       mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) &&
3893       CstShlAmt + CstLShrAmt == BitWidth) {
3894     FshOpc = TargetOpcode::G_FSHR;
3895     Amt = LShrAmt;
3896 
3897   } else if (mi_match(LShrAmt, MRI,
3898                       m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
3899              ShlAmt == Amt) {
3900     // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
3901     FshOpc = TargetOpcode::G_FSHL;
3902 
3903   } else if (mi_match(ShlAmt, MRI,
3904                       m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
3905              LShrAmt == Amt) {
3906     // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
3907     FshOpc = TargetOpcode::G_FSHR;
3908 
3909   } else {
3910     return false;
3911   }
3912 
3913   LLT AmtTy = MRI.getType(Amt);
3914   if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
3915     return false;
3916 
3917   MatchInfo = [=](MachineIRBuilder &B) {
3918     B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
3919   };
3920   return true;
3921 }
3922 
3923 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
3924 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
3925   unsigned Opc = MI.getOpcode();
3926   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
3927   Register X = MI.getOperand(1).getReg();
3928   Register Y = MI.getOperand(2).getReg();
3929   if (X != Y)
3930     return false;
3931   unsigned RotateOpc =
3932       Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
3933   return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
3934 }
3935 
3936 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
3937   unsigned Opc = MI.getOpcode();
3938   assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
3939   bool IsFSHL = Opc == TargetOpcode::G_FSHL;
3940   Observer.changingInstr(MI);
3941   MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
3942                                          : TargetOpcode::G_ROTR));
3943   MI.removeOperand(2);
3944   Observer.changedInstr(MI);
3945 }
3946 
3947 // Fold (rot x, c) -> (rot x, c % BitSize)
3948 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
3949   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
3950          MI.getOpcode() == TargetOpcode::G_ROTR);
3951   unsigned Bitsize =
3952       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3953   Register AmtReg = MI.getOperand(2).getReg();
3954   bool OutOfRange = false;
3955   auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
3956     if (auto *CI = dyn_cast<ConstantInt>(C))
3957       OutOfRange |= CI->getValue().uge(Bitsize);
3958     return true;
3959   };
3960   return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
3961 }
3962 
3963 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
3964   assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
3965          MI.getOpcode() == TargetOpcode::G_ROTR);
3966   unsigned Bitsize =
3967       MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3968   Builder.setInstrAndDebugLoc(MI);
3969   Register Amt = MI.getOperand(2).getReg();
3970   LLT AmtTy = MRI.getType(Amt);
3971   auto Bits = Builder.buildConstant(AmtTy, Bitsize);
3972   Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
3973   Observer.changingInstr(MI);
3974   MI.getOperand(2).setReg(Amt);
3975   Observer.changedInstr(MI);
3976 }
3977 
3978 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
3979                                                    int64_t &MatchInfo) {
3980   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
3981   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
3982   auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
3983   auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
3984   std::optional<bool> KnownVal;
3985   switch (Pred) {
3986   default:
3987     llvm_unreachable("Unexpected G_ICMP predicate?");
3988   case CmpInst::ICMP_EQ:
3989     KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
3990     break;
3991   case CmpInst::ICMP_NE:
3992     KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
3993     break;
3994   case CmpInst::ICMP_SGE:
3995     KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
3996     break;
3997   case CmpInst::ICMP_SGT:
3998     KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
3999     break;
4000   case CmpInst::ICMP_SLE:
4001     KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4002     break;
4003   case CmpInst::ICMP_SLT:
4004     KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4005     break;
4006   case CmpInst::ICMP_UGE:
4007     KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4008     break;
4009   case CmpInst::ICMP_UGT:
4010     KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4011     break;
4012   case CmpInst::ICMP_ULE:
4013     KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4014     break;
4015   case CmpInst::ICMP_ULT:
4016     KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4017     break;
4018   }
4019   if (!KnownVal)
4020     return false;
4021   MatchInfo =
4022       *KnownVal
4023           ? getICmpTrueVal(getTargetLowering(),
4024                            /*IsVector = */
4025                            MRI.getType(MI.getOperand(0).getReg()).isVector(),
4026                            /* IsFP = */ false)
4027           : 0;
4028   return true;
4029 }
4030 
4031 bool CombinerHelper::matchICmpToLHSKnownBits(
4032     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4033   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4034   // Given:
4035   //
4036   // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4037   // %cmp = G_ICMP ne %x, 0
4038   //
4039   // Or:
4040   //
4041   // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4042   // %cmp = G_ICMP eq %x, 1
4043   //
4044   // We can replace %cmp with %x assuming true is 1 on the target.
4045   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4046   if (!CmpInst::isEquality(Pred))
4047     return false;
4048   Register Dst = MI.getOperand(0).getReg();
4049   LLT DstTy = MRI.getType(Dst);
4050   if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(),
4051                      /* IsFP = */ false) != 1)
4052     return false;
4053   int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4054   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
4055     return false;
4056   Register LHS = MI.getOperand(2).getReg();
4057   auto KnownLHS = KB->getKnownBits(LHS);
4058   if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4059     return false;
4060   // Make sure replacing Dst with the LHS is a legal operation.
4061   LLT LHSTy = MRI.getType(LHS);
4062   unsigned LHSSize = LHSTy.getSizeInBits();
4063   unsigned DstSize = DstTy.getSizeInBits();
4064   unsigned Op = TargetOpcode::COPY;
4065   if (DstSize != LHSSize)
4066     Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4067   if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
4068     return false;
4069   MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
4070   return true;
4071 }
4072 
4073 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4074 bool CombinerHelper::matchAndOrDisjointMask(
4075     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4076   assert(MI.getOpcode() == TargetOpcode::G_AND);
4077 
4078   // Ignore vector types to simplify matching the two constants.
4079   // TODO: do this for vectors and scalars via a demanded bits analysis.
4080   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4081   if (Ty.isVector())
4082     return false;
4083 
4084   Register Src;
4085   Register AndMaskReg;
4086   int64_t AndMaskBits;
4087   int64_t OrMaskBits;
4088   if (!mi_match(MI, MRI,
4089                 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)),
4090                        m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg)))))
4091     return false;
4092 
4093   // Check if OrMask could turn on any bits in Src.
4094   if (AndMaskBits & OrMaskBits)
4095     return false;
4096 
4097   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4098     Observer.changingInstr(MI);
4099     // Canonicalize the result to have the constant on the RHS.
4100     if (MI.getOperand(1).getReg() == AndMaskReg)
4101       MI.getOperand(2).setReg(AndMaskReg);
4102     MI.getOperand(1).setReg(Src);
4103     Observer.changedInstr(MI);
4104   };
4105   return true;
4106 }
4107 
4108 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4109 bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4110     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4111   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4112   Register Dst = MI.getOperand(0).getReg();
4113   Register Src = MI.getOperand(1).getReg();
4114   LLT Ty = MRI.getType(Src);
4115   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4116   if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4117     return false;
4118   int64_t Width = MI.getOperand(2).getImm();
4119   Register ShiftSrc;
4120   int64_t ShiftImm;
4121   if (!mi_match(
4122           Src, MRI,
4123           m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4124                                   m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4125     return false;
4126   if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4127     return false;
4128 
4129   MatchInfo = [=](MachineIRBuilder &B) {
4130     auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4131     auto Cst2 = B.buildConstant(ExtractTy, Width);
4132     B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4133   };
4134   return true;
4135 }
4136 
4137 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4138 bool CombinerHelper::matchBitfieldExtractFromAnd(
4139     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4140   assert(MI.getOpcode() == TargetOpcode::G_AND);
4141   Register Dst = MI.getOperand(0).getReg();
4142   LLT Ty = MRI.getType(Dst);
4143   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4144   if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4145           TargetOpcode::G_UBFX, Ty, ExtractTy))
4146     return false;
4147 
4148   int64_t AndImm, LSBImm;
4149   Register ShiftSrc;
4150   const unsigned Size = Ty.getScalarSizeInBits();
4151   if (!mi_match(MI.getOperand(0).getReg(), MRI,
4152                 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4153                        m_ICst(AndImm))))
4154     return false;
4155 
4156   // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4157   auto MaybeMask = static_cast<uint64_t>(AndImm);
4158   if (MaybeMask & (MaybeMask + 1))
4159     return false;
4160 
4161   // LSB must fit within the register.
4162   if (static_cast<uint64_t>(LSBImm) >= Size)
4163     return false;
4164 
4165   uint64_t Width = APInt(Size, AndImm).countr_one();
4166   MatchInfo = [=](MachineIRBuilder &B) {
4167     auto WidthCst = B.buildConstant(ExtractTy, Width);
4168     auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4169     B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4170   };
4171   return true;
4172 }
4173 
4174 bool CombinerHelper::matchBitfieldExtractFromShr(
4175     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4176   const unsigned Opcode = MI.getOpcode();
4177   assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4178 
4179   const Register Dst = MI.getOperand(0).getReg();
4180 
4181   const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4182                                   ? TargetOpcode::G_SBFX
4183                                   : TargetOpcode::G_UBFX;
4184 
4185   // Check if the type we would use for the extract is legal
4186   LLT Ty = MRI.getType(Dst);
4187   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4188   if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4189     return false;
4190 
4191   Register ShlSrc;
4192   int64_t ShrAmt;
4193   int64_t ShlAmt;
4194   const unsigned Size = Ty.getScalarSizeInBits();
4195 
4196   // Try to match shr (shl x, c1), c2
4197   if (!mi_match(Dst, MRI,
4198                 m_BinOp(Opcode,
4199                         m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4200                         m_ICst(ShrAmt))))
4201     return false;
4202 
4203   // Make sure that the shift sizes can fit a bitfield extract
4204   if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4205     return false;
4206 
4207   // Skip this combine if the G_SEXT_INREG combine could handle it
4208   if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4209     return false;
4210 
4211   // Calculate start position and width of the extract
4212   const int64_t Pos = ShrAmt - ShlAmt;
4213   const int64_t Width = Size - ShrAmt;
4214 
4215   MatchInfo = [=](MachineIRBuilder &B) {
4216     auto WidthCst = B.buildConstant(ExtractTy, Width);
4217     auto PosCst = B.buildConstant(ExtractTy, Pos);
4218     B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4219   };
4220   return true;
4221 }
4222 
4223 bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4224     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4225   const unsigned Opcode = MI.getOpcode();
4226   assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4227 
4228   const Register Dst = MI.getOperand(0).getReg();
4229   LLT Ty = MRI.getType(Dst);
4230   LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4231   if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4232           TargetOpcode::G_UBFX, Ty, ExtractTy))
4233     return false;
4234 
4235   // Try to match shr (and x, c1), c2
4236   Register AndSrc;
4237   int64_t ShrAmt;
4238   int64_t SMask;
4239   if (!mi_match(Dst, MRI,
4240                 m_BinOp(Opcode,
4241                         m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))),
4242                         m_ICst(ShrAmt))))
4243     return false;
4244 
4245   const unsigned Size = Ty.getScalarSizeInBits();
4246   if (ShrAmt < 0 || ShrAmt >= Size)
4247     return false;
4248 
4249   // If the shift subsumes the mask, emit the 0 directly.
4250   if (0 == (SMask >> ShrAmt)) {
4251     MatchInfo = [=](MachineIRBuilder &B) {
4252       B.buildConstant(Dst, 0);
4253     };
4254     return true;
4255   }
4256 
4257   // Check that ubfx can do the extraction, with no holes in the mask.
4258   uint64_t UMask = SMask;
4259   UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4260   UMask &= maskTrailingOnes<uint64_t>(Size);
4261   if (!isMask_64(UMask))
4262     return false;
4263 
4264   // Calculate start position and width of the extract.
4265   const int64_t Pos = ShrAmt;
4266   const int64_t Width = llvm::countr_one(UMask) - ShrAmt;
4267 
4268   // It's preferable to keep the shift, rather than form G_SBFX.
4269   // TODO: remove the G_AND via demanded bits analysis.
4270   if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
4271     return false;
4272 
4273   MatchInfo = [=](MachineIRBuilder &B) {
4274     auto WidthCst = B.buildConstant(ExtractTy, Width);
4275     auto PosCst = B.buildConstant(ExtractTy, Pos);
4276     B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4277   };
4278   return true;
4279 }
4280 
4281 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4282     MachineInstr &PtrAdd) {
4283   assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD);
4284 
4285   Register Src1Reg = PtrAdd.getOperand(1).getReg();
4286   MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4287   if (!Src1Def)
4288     return false;
4289 
4290   Register Src2Reg = PtrAdd.getOperand(2).getReg();
4291 
4292   if (MRI.hasOneNonDBGUse(Src1Reg))
4293     return false;
4294 
4295   auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4296   if (!C1)
4297     return false;
4298   auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4299   if (!C2)
4300     return false;
4301 
4302   const APInt &C1APIntVal = *C1;
4303   const APInt &C2APIntVal = *C2;
4304   const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4305 
4306   for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4307     // This combine may end up running before ptrtoint/inttoptr combines
4308     // manage to eliminate redundant conversions, so try to look through them.
4309     MachineInstr *ConvUseMI = &UseMI;
4310     unsigned ConvUseOpc = ConvUseMI->getOpcode();
4311     while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4312            ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4313       Register DefReg = ConvUseMI->getOperand(0).getReg();
4314       if (!MRI.hasOneNonDBGUse(DefReg))
4315         break;
4316       ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4317       ConvUseOpc = ConvUseMI->getOpcode();
4318     }
4319     auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4320                      ConvUseOpc == TargetOpcode::G_STORE;
4321     if (!LoadStore)
4322       continue;
4323     // Is x[offset2] already not a legal addressing mode? If so then
4324     // reassociating the constants breaks nothing (we test offset2 because
4325     // that's the one we hope to fold into the load or store).
4326     TargetLoweringBase::AddrMode AM;
4327     AM.HasBaseReg = true;
4328     AM.BaseOffs = C2APIntVal.getSExtValue();
4329     unsigned AS =
4330         MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4331     Type *AccessTy =
4332         getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4333                       PtrAdd.getMF()->getFunction().getContext());
4334     const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4335     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4336                                    AccessTy, AS))
4337       continue;
4338 
4339     // Would x[offset1+offset2] still be a legal addressing mode?
4340     AM.BaseOffs = CombinedValue;
4341     if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4342                                    AccessTy, AS))
4343       return true;
4344   }
4345 
4346   return false;
4347 }
4348 
4349 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
4350                                                   MachineInstr *RHS,
4351                                                   BuildFnTy &MatchInfo) {
4352   // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4353   Register Src1Reg = MI.getOperand(1).getReg();
4354   if (RHS->getOpcode() != TargetOpcode::G_ADD)
4355     return false;
4356   auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4357   if (!C2)
4358     return false;
4359 
4360   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4361     LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4362 
4363     auto NewBase =
4364         Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4365     Observer.changingInstr(MI);
4366     MI.getOperand(1).setReg(NewBase.getReg(0));
4367     MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4368     Observer.changedInstr(MI);
4369   };
4370   return !reassociationCanBreakAddressingModePattern(MI);
4371 }
4372 
4373 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
4374                                                   MachineInstr *LHS,
4375                                                   MachineInstr *RHS,
4376                                                   BuildFnTy &MatchInfo) {
4377   // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4378   // if and only if (G_PTR_ADD X, C) has one use.
4379   Register LHSBase;
4380   std::optional<ValueAndVReg> LHSCstOff;
4381   if (!mi_match(MI.getBaseReg(), MRI,
4382                 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
4383     return false;
4384 
4385   auto *LHSPtrAdd = cast<GPtrAdd>(LHS);
4386   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4387     // When we change LHSPtrAdd's offset register we might cause it to use a reg
4388     // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4389     // doesn't happen.
4390     LHSPtrAdd->moveBefore(&MI);
4391     Register RHSReg = MI.getOffsetReg();
4392     // set VReg will cause type mismatch if it comes from extend/trunc
4393     auto NewCst = B.buildConstant(MRI.getType(RHSReg), LHSCstOff->Value);
4394     Observer.changingInstr(MI);
4395     MI.getOperand(2).setReg(NewCst.getReg(0));
4396     Observer.changedInstr(MI);
4397     Observer.changingInstr(*LHSPtrAdd);
4398     LHSPtrAdd->getOperand(2).setReg(RHSReg);
4399     Observer.changedInstr(*LHSPtrAdd);
4400   };
4401   return !reassociationCanBreakAddressingModePattern(MI);
4402 }
4403 
4404 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
4405                                                         MachineInstr *LHS,
4406                                                         MachineInstr *RHS,
4407                                                         BuildFnTy &MatchInfo) {
4408   // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4409   auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
4410   if (!LHSPtrAdd)
4411     return false;
4412 
4413   Register Src2Reg = MI.getOperand(2).getReg();
4414   Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4415   Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4416   auto C1 = getIConstantVRegVal(LHSSrc2, MRI);
4417   if (!C1)
4418     return false;
4419   auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4420   if (!C2)
4421     return false;
4422 
4423   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4424     auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4425     Observer.changingInstr(MI);
4426     MI.getOperand(1).setReg(LHSSrc1);
4427     MI.getOperand(2).setReg(NewCst.getReg(0));
4428     Observer.changedInstr(MI);
4429   };
4430   return !reassociationCanBreakAddressingModePattern(MI);
4431 }
4432 
4433 bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
4434                                         BuildFnTy &MatchInfo) {
4435   auto &PtrAdd = cast<GPtrAdd>(MI);
4436   // We're trying to match a few pointer computation patterns here for
4437   // re-association opportunities.
4438   // 1) Isolating a constant operand to be on the RHS, e.g.:
4439   // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4440   //
4441   // 2) Folding two constants in each sub-tree as long as such folding
4442   // doesn't break a legal addressing mode.
4443   // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4444   //
4445   // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4446   // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4447   // iif (G_PTR_ADD X, C) has one use.
4448   MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg());
4449   MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg());
4450 
4451   // Try to match example 2.
4452   if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo))
4453     return true;
4454 
4455   // Try to match example 3.
4456   if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo))
4457     return true;
4458 
4459   // Try to match example 1.
4460   if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo))
4461     return true;
4462 
4463   return false;
4464 }
4465 bool CombinerHelper::tryReassocBinOp(unsigned Opc, Register DstReg,
4466                                      Register OpLHS, Register OpRHS,
4467                                      BuildFnTy &MatchInfo) {
4468   LLT OpRHSTy = MRI.getType(OpRHS);
4469   MachineInstr *OpLHSDef = MRI.getVRegDef(OpLHS);
4470 
4471   if (OpLHSDef->getOpcode() != Opc)
4472     return false;
4473 
4474   MachineInstr *OpRHSDef = MRI.getVRegDef(OpRHS);
4475   Register OpLHSLHS = OpLHSDef->getOperand(1).getReg();
4476   Register OpLHSRHS = OpLHSDef->getOperand(2).getReg();
4477 
4478   // If the inner op is (X op C), pull the constant out so it can be folded with
4479   // other constants in the expression tree. Folding is not guaranteed so we
4480   // might have (C1 op C2). In that case do not pull a constant out because it
4481   // won't help and can lead to infinite loops.
4482   if (isConstantOrConstantSplatVector(*MRI.getVRegDef(OpLHSRHS), MRI) &&
4483       !isConstantOrConstantSplatVector(*MRI.getVRegDef(OpLHSLHS), MRI)) {
4484     if (isConstantOrConstantSplatVector(*OpRHSDef, MRI)) {
4485       // (Opc (Opc X, C1), C2) -> (Opc X, (Opc C1, C2))
4486       MatchInfo = [=](MachineIRBuilder &B) {
4487         auto NewCst = B.buildInstr(Opc, {OpRHSTy}, {OpLHSRHS, OpRHS});
4488         B.buildInstr(Opc, {DstReg}, {OpLHSLHS, NewCst});
4489       };
4490       return true;
4491     }
4492     if (getTargetLowering().isReassocProfitable(MRI, OpLHS, OpRHS)) {
4493       // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
4494       //              iff (op x, c1) has one use
4495       MatchInfo = [=](MachineIRBuilder &B) {
4496         auto NewLHSLHS = B.buildInstr(Opc, {OpRHSTy}, {OpLHSLHS, OpRHS});
4497         B.buildInstr(Opc, {DstReg}, {NewLHSLHS, OpLHSRHS});
4498       };
4499       return true;
4500     }
4501   }
4502 
4503   return false;
4504 }
4505 
4506 bool CombinerHelper::matchReassocCommBinOp(MachineInstr &MI,
4507                                            BuildFnTy &MatchInfo) {
4508   // We don't check if the reassociation will break a legal addressing mode
4509   // here since pointer arithmetic is handled by G_PTR_ADD.
4510   unsigned Opc = MI.getOpcode();
4511   Register DstReg = MI.getOperand(0).getReg();
4512   Register LHSReg = MI.getOperand(1).getReg();
4513   Register RHSReg = MI.getOperand(2).getReg();
4514 
4515   if (tryReassocBinOp(Opc, DstReg, LHSReg, RHSReg, MatchInfo))
4516     return true;
4517   if (tryReassocBinOp(Opc, DstReg, RHSReg, LHSReg, MatchInfo))
4518     return true;
4519   return false;
4520 }
4521 
4522 bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) {
4523   Register Op1 = MI.getOperand(1).getReg();
4524   Register Op2 = MI.getOperand(2).getReg();
4525   auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4526   if (!MaybeCst)
4527     return false;
4528   MatchInfo = *MaybeCst;
4529   return true;
4530 }
4531 
4532 bool CombinerHelper::matchNarrowBinopFeedingAnd(
4533     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4534   // Look for a binop feeding into an AND with a mask:
4535   //
4536   // %add = G_ADD %lhs, %rhs
4537   // %and = G_AND %add, 000...11111111
4538   //
4539   // Check if it's possible to perform the binop at a narrower width and zext
4540   // back to the original width like so:
4541   //
4542   // %narrow_lhs = G_TRUNC %lhs
4543   // %narrow_rhs = G_TRUNC %rhs
4544   // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4545   // %new_add = G_ZEXT %narrow_add
4546   // %and = G_AND %new_add, 000...11111111
4547   //
4548   // This can allow later combines to eliminate the G_AND if it turns out
4549   // that the mask is irrelevant.
4550   assert(MI.getOpcode() == TargetOpcode::G_AND);
4551   Register Dst = MI.getOperand(0).getReg();
4552   Register AndLHS = MI.getOperand(1).getReg();
4553   Register AndRHS = MI.getOperand(2).getReg();
4554   LLT WideTy = MRI.getType(Dst);
4555 
4556   // If the potential binop has more than one use, then it's possible that one
4557   // of those uses will need its full width.
4558   if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4559     return false;
4560 
4561   // Check if the LHS feeding the AND is impacted by the high bits that we're
4562   // masking out.
4563   //
4564   // e.g. for 64-bit x, y:
4565   //
4566   // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4567   MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4568   if (!LHSInst)
4569     return false;
4570   unsigned LHSOpc = LHSInst->getOpcode();
4571   switch (LHSOpc) {
4572   default:
4573     return false;
4574   case TargetOpcode::G_ADD:
4575   case TargetOpcode::G_SUB:
4576   case TargetOpcode::G_MUL:
4577   case TargetOpcode::G_AND:
4578   case TargetOpcode::G_OR:
4579   case TargetOpcode::G_XOR:
4580     break;
4581   }
4582 
4583   // Find the mask on the RHS.
4584   auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI);
4585   if (!Cst)
4586     return false;
4587   auto Mask = Cst->Value;
4588   if (!Mask.isMask())
4589     return false;
4590 
4591   // No point in combining if there's nothing to truncate.
4592   unsigned NarrowWidth = Mask.countr_one();
4593   if (NarrowWidth == WideTy.getSizeInBits())
4594     return false;
4595   LLT NarrowTy = LLT::scalar(NarrowWidth);
4596 
4597   // Check if adding the zext + truncates could be harmful.
4598   auto &MF = *MI.getMF();
4599   const auto &TLI = getTargetLowering();
4600   LLVMContext &Ctx = MF.getFunction().getContext();
4601   auto &DL = MF.getDataLayout();
4602   if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4603       !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4604     return false;
4605   if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4606       !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4607     return false;
4608   Register BinOpLHS = LHSInst->getOperand(1).getReg();
4609   Register BinOpRHS = LHSInst->getOperand(2).getReg();
4610   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4611     auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4612     auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4613     auto NarrowBinOp =
4614         Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4615     auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4616     Observer.changingInstr(MI);
4617     MI.getOperand(1).setReg(Ext.getReg(0));
4618     Observer.changedInstr(MI);
4619   };
4620   return true;
4621 }
4622 
4623 bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
4624   unsigned Opc = MI.getOpcode();
4625   assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
4626 
4627   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2)))
4628     return false;
4629 
4630   MatchInfo = [=, &MI](MachineIRBuilder &B) {
4631     Observer.changingInstr(MI);
4632     unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
4633                                                    : TargetOpcode::G_SADDO;
4634     MI.setDesc(Builder.getTII().get(NewOpc));
4635     MI.getOperand(3).setReg(MI.getOperand(2).getReg());
4636     Observer.changedInstr(MI);
4637   };
4638   return true;
4639 }
4640 
4641 bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4642   // (G_*MULO x, 0) -> 0 + no carry out
4643   assert(MI.getOpcode() == TargetOpcode::G_UMULO ||
4644          MI.getOpcode() == TargetOpcode::G_SMULO);
4645   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4646     return false;
4647   Register Dst = MI.getOperand(0).getReg();
4648   Register Carry = MI.getOperand(1).getReg();
4649   if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) ||
4650       !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4651     return false;
4652   MatchInfo = [=](MachineIRBuilder &B) {
4653     B.buildConstant(Dst, 0);
4654     B.buildConstant(Carry, 0);
4655   };
4656   return true;
4657 }
4658 
4659 bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4660   // (G_*ADDO x, 0) -> x + no carry out
4661   assert(MI.getOpcode() == TargetOpcode::G_UADDO ||
4662          MI.getOpcode() == TargetOpcode::G_SADDO);
4663   if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4664     return false;
4665   Register Carry = MI.getOperand(1).getReg();
4666   if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4667     return false;
4668   Register Dst = MI.getOperand(0).getReg();
4669   Register LHS = MI.getOperand(2).getReg();
4670   MatchInfo = [=](MachineIRBuilder &B) {
4671     B.buildCopy(Dst, LHS);
4672     B.buildConstant(Carry, 0);
4673   };
4674   return true;
4675 }
4676 
4677 bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) {
4678   // (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
4679   // (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
4680   assert(MI.getOpcode() == TargetOpcode::G_UADDE ||
4681          MI.getOpcode() == TargetOpcode::G_SADDE ||
4682          MI.getOpcode() == TargetOpcode::G_USUBE ||
4683          MI.getOpcode() == TargetOpcode::G_SSUBE);
4684   if (!mi_match(MI.getOperand(4).getReg(), MRI, m_SpecificICstOrSplat(0)))
4685     return false;
4686   MatchInfo = [&](MachineIRBuilder &B) {
4687     unsigned NewOpcode;
4688     switch (MI.getOpcode()) {
4689     case TargetOpcode::G_UADDE:
4690       NewOpcode = TargetOpcode::G_UADDO;
4691       break;
4692     case TargetOpcode::G_SADDE:
4693       NewOpcode = TargetOpcode::G_SADDO;
4694       break;
4695     case TargetOpcode::G_USUBE:
4696       NewOpcode = TargetOpcode::G_USUBO;
4697       break;
4698     case TargetOpcode::G_SSUBE:
4699       NewOpcode = TargetOpcode::G_SSUBO;
4700       break;
4701     }
4702     Observer.changingInstr(MI);
4703     MI.setDesc(B.getTII().get(NewOpcode));
4704     MI.removeOperand(4);
4705     Observer.changedInstr(MI);
4706   };
4707   return true;
4708 }
4709 
4710 bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI,
4711                                         BuildFnTy &MatchInfo) {
4712   assert(MI.getOpcode() == TargetOpcode::G_SUB);
4713   Register Dst = MI.getOperand(0).getReg();
4714   // (x + y) - z -> x (if y == z)
4715   // (x + y) - z -> y (if x == z)
4716   Register X, Y, Z;
4717   if (mi_match(Dst, MRI, m_GSub(m_GAdd(m_Reg(X), m_Reg(Y)), m_Reg(Z)))) {
4718     Register ReplaceReg;
4719     int64_t CstX, CstY;
4720     if (Y == Z || (mi_match(Y, MRI, m_ICstOrSplat(CstY)) &&
4721                    mi_match(Z, MRI, m_SpecificICstOrSplat(CstY))))
4722       ReplaceReg = X;
4723     else if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4724                         mi_match(Z, MRI, m_SpecificICstOrSplat(CstX))))
4725       ReplaceReg = Y;
4726     if (ReplaceReg) {
4727       MatchInfo = [=](MachineIRBuilder &B) { B.buildCopy(Dst, ReplaceReg); };
4728       return true;
4729     }
4730   }
4731 
4732   // x - (y + z) -> 0 - y (if x == z)
4733   // x - (y + z) -> 0 - z (if x == y)
4734   if (mi_match(Dst, MRI, m_GSub(m_Reg(X), m_GAdd(m_Reg(Y), m_Reg(Z))))) {
4735     Register ReplaceReg;
4736     int64_t CstX;
4737     if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4738                    mi_match(Z, MRI, m_SpecificICstOrSplat(CstX))))
4739       ReplaceReg = Y;
4740     else if (X == Y || (mi_match(X, MRI, m_ICstOrSplat(CstX)) &&
4741                         mi_match(Y, MRI, m_SpecificICstOrSplat(CstX))))
4742       ReplaceReg = Z;
4743     if (ReplaceReg) {
4744       MatchInfo = [=](MachineIRBuilder &B) {
4745         auto Zero = B.buildConstant(MRI.getType(Dst), 0);
4746         B.buildSub(Dst, Zero, ReplaceReg);
4747       };
4748       return true;
4749     }
4750   }
4751   return false;
4752 }
4753 
4754 MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
4755   assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4756   auto &UDiv = cast<GenericMachineInstr>(MI);
4757   Register Dst = UDiv.getReg(0);
4758   Register LHS = UDiv.getReg(1);
4759   Register RHS = UDiv.getReg(2);
4760   LLT Ty = MRI.getType(Dst);
4761   LLT ScalarTy = Ty.getScalarType();
4762   const unsigned EltBits = ScalarTy.getScalarSizeInBits();
4763   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4764   LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4765   auto &MIB = Builder;
4766   MIB.setInstrAndDebugLoc(MI);
4767 
4768   bool UseNPQ = false;
4769   SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4770 
4771   auto BuildUDIVPattern = [&](const Constant *C) {
4772     auto *CI = cast<ConstantInt>(C);
4773     const APInt &Divisor = CI->getValue();
4774 
4775     bool SelNPQ = false;
4776     APInt Magic(Divisor.getBitWidth(), 0);
4777     unsigned PreShift = 0, PostShift = 0;
4778 
4779     // Magic algorithm doesn't work for division by 1. We need to emit a select
4780     // at the end.
4781     // TODO: Use undef values for divisor of 1.
4782     if (!Divisor.isOne()) {
4783       UnsignedDivisionByConstantInfo magics =
4784           UnsignedDivisionByConstantInfo::get(Divisor);
4785 
4786       Magic = std::move(magics.Magic);
4787 
4788       assert(magics.PreShift < Divisor.getBitWidth() &&
4789              "We shouldn't generate an undefined shift!");
4790       assert(magics.PostShift < Divisor.getBitWidth() &&
4791              "We shouldn't generate an undefined shift!");
4792       assert((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift");
4793       PreShift = magics.PreShift;
4794       PostShift = magics.PostShift;
4795       SelNPQ = magics.IsAdd;
4796     }
4797 
4798     PreShifts.push_back(
4799         MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
4800     MagicFactors.push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0));
4801     NPQFactors.push_back(
4802         MIB.buildConstant(ScalarTy,
4803                           SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4804                                  : APInt::getZero(EltBits))
4805             .getReg(0));
4806     PostShifts.push_back(
4807         MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
4808     UseNPQ |= SelNPQ;
4809     return true;
4810   };
4811 
4812   // Collect the shifts/magic values from each element.
4813   bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern);
4814   (void)Matched;
4815   assert(Matched && "Expected unary predicate match to succeed");
4816 
4817   Register PreShift, PostShift, MagicFactor, NPQFactor;
4818   auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI);
4819   if (RHSDef) {
4820     PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
4821     MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
4822     NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
4823     PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
4824   } else {
4825     assert(MRI.getType(RHS).isScalar() &&
4826            "Non-build_vector operation should have been a scalar");
4827     PreShift = PreShifts[0];
4828     MagicFactor = MagicFactors[0];
4829     PostShift = PostShifts[0];
4830   }
4831 
4832   Register Q = LHS;
4833   Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
4834 
4835   // Multiply the numerator (operand 0) by the magic value.
4836   Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
4837 
4838   if (UseNPQ) {
4839     Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0);
4840 
4841     // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4842     // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
4843     if (Ty.isVector())
4844       NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
4845     else
4846       NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
4847 
4848     Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
4849   }
4850 
4851   Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
4852   auto One = MIB.buildConstant(Ty, 1);
4853   auto IsOne = MIB.buildICmp(
4854       CmpInst::Predicate::ICMP_EQ,
4855       Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
4856   return MIB.buildSelect(Ty, IsOne, LHS, Q);
4857 }
4858 
4859 bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
4860   assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4861   Register Dst = MI.getOperand(0).getReg();
4862   Register RHS = MI.getOperand(2).getReg();
4863   LLT DstTy = MRI.getType(Dst);
4864   auto *RHSDef = MRI.getVRegDef(RHS);
4865   if (!isConstantOrConstantVector(*RHSDef, MRI))
4866     return false;
4867 
4868   auto &MF = *MI.getMF();
4869   AttributeList Attr = MF.getFunction().getAttributes();
4870   const auto &TLI = getTargetLowering();
4871   LLVMContext &Ctx = MF.getFunction().getContext();
4872   auto &DL = MF.getDataLayout();
4873   if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4874     return false;
4875 
4876   // Don't do this for minsize because the instruction sequence is usually
4877   // larger.
4878   if (MF.getFunction().hasMinSize())
4879     return false;
4880 
4881   // Don't do this if the types are not going to be legal.
4882   if (LI) {
4883     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}}))
4884       return false;
4885     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}}))
4886       return false;
4887     if (!isLegalOrBeforeLegalizer(
4888             {TargetOpcode::G_ICMP,
4889              {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1),
4890               DstTy}}))
4891       return false;
4892   }
4893 
4894   auto CheckEltValue = [&](const Constant *C) {
4895     if (auto *CI = dyn_cast_or_null<ConstantInt>(C))
4896       return !CI->isZero();
4897     return false;
4898   };
4899   return matchUnaryPredicate(MRI, RHS, CheckEltValue);
4900 }
4901 
4902 void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
4903   auto *NewMI = buildUDivUsingMul(MI);
4904   replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4905 }
4906 
4907 bool CombinerHelper::matchSDivByConst(MachineInstr &MI) {
4908   assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
4909   Register Dst = MI.getOperand(0).getReg();
4910   Register RHS = MI.getOperand(2).getReg();
4911   LLT DstTy = MRI.getType(Dst);
4912 
4913   auto &MF = *MI.getMF();
4914   AttributeList Attr = MF.getFunction().getAttributes();
4915   const auto &TLI = getTargetLowering();
4916   LLVMContext &Ctx = MF.getFunction().getContext();
4917   auto &DL = MF.getDataLayout();
4918   if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4919     return false;
4920 
4921   // Don't do this for minsize because the instruction sequence is usually
4922   // larger.
4923   if (MF.getFunction().hasMinSize())
4924     return false;
4925 
4926   // If the sdiv has an 'exact' flag we can use a simpler lowering.
4927   if (MI.getFlag(MachineInstr::MIFlag::IsExact)) {
4928     return matchUnaryPredicate(
4929         MRI, RHS, [](const Constant *C) { return C && !C->isZeroValue(); });
4930   }
4931 
4932   // Don't support the general case for now.
4933   return false;
4934 }
4935 
4936 void CombinerHelper::applySDivByConst(MachineInstr &MI) {
4937   auto *NewMI = buildSDivUsingMul(MI);
4938   replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4939 }
4940 
4941 MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
4942   assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
4943   auto &SDiv = cast<GenericMachineInstr>(MI);
4944   Register Dst = SDiv.getReg(0);
4945   Register LHS = SDiv.getReg(1);
4946   Register RHS = SDiv.getReg(2);
4947   LLT Ty = MRI.getType(Dst);
4948   LLT ScalarTy = Ty.getScalarType();
4949   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4950   LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4951   auto &MIB = Builder;
4952   MIB.setInstrAndDebugLoc(MI);
4953 
4954   bool UseSRA = false;
4955   SmallVector<Register, 16> Shifts, Factors;
4956 
4957   auto *RHSDef = cast<GenericMachineInstr>(getDefIgnoringCopies(RHS, MRI));
4958   bool IsSplat = getIConstantSplatVal(*RHSDef, MRI).has_value();
4959 
4960   auto BuildSDIVPattern = [&](const Constant *C) {
4961     // Don't recompute inverses for each splat element.
4962     if (IsSplat && !Factors.empty()) {
4963       Shifts.push_back(Shifts[0]);
4964       Factors.push_back(Factors[0]);
4965       return true;
4966     }
4967 
4968     auto *CI = cast<ConstantInt>(C);
4969     APInt Divisor = CI->getValue();
4970     unsigned Shift = Divisor.countr_zero();
4971     if (Shift) {
4972       Divisor.ashrInPlace(Shift);
4973       UseSRA = true;
4974     }
4975 
4976     // Calculate the multiplicative inverse modulo BW.
4977     // 2^W requires W + 1 bits, so we have to extend and then truncate.
4978     unsigned W = Divisor.getBitWidth();
4979     APInt Factor = Divisor.zext(W + 1)
4980                        .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
4981                        .trunc(W);
4982     Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
4983     Factors.push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
4984     return true;
4985   };
4986 
4987   // Collect all magic values from the build vector.
4988   bool Matched = matchUnaryPredicate(MRI, RHS, BuildSDIVPattern);
4989   (void)Matched;
4990   assert(Matched && "Expected unary predicate match to succeed");
4991 
4992   Register Shift, Factor;
4993   if (Ty.isVector()) {
4994     Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
4995     Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
4996   } else {
4997     Shift = Shifts[0];
4998     Factor = Factors[0];
4999   }
5000 
5001   Register Res = LHS;
5002 
5003   if (UseSRA)
5004     Res = MIB.buildAShr(Ty, Res, Shift, MachineInstr::IsExact).getReg(0);
5005 
5006   return MIB.buildMul(Ty, Res, Factor);
5007 }
5008 
5009 bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
5010   assert(MI.getOpcode() == TargetOpcode::G_UMULH);
5011   Register RHS = MI.getOperand(2).getReg();
5012   Register Dst = MI.getOperand(0).getReg();
5013   LLT Ty = MRI.getType(Dst);
5014   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
5015   auto MatchPow2ExceptOne = [&](const Constant *C) {
5016     if (auto *CI = dyn_cast<ConstantInt>(C))
5017       return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
5018     return false;
5019   };
5020   if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false))
5021     return false;
5022   return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}});
5023 }
5024 
5025 void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
5026   Register LHS = MI.getOperand(1).getReg();
5027   Register RHS = MI.getOperand(2).getReg();
5028   Register Dst = MI.getOperand(0).getReg();
5029   LLT Ty = MRI.getType(Dst);
5030   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
5031   unsigned NumEltBits = Ty.getScalarSizeInBits();
5032 
5033   Builder.setInstrAndDebugLoc(MI);
5034   auto LogBase2 = buildLogBase2(RHS, Builder);
5035   auto ShiftAmt =
5036       Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2);
5037   auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt);
5038   Builder.buildLShr(Dst, LHS, Trunc);
5039   MI.eraseFromParent();
5040 }
5041 
5042 bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
5043                                                BuildFnTy &MatchInfo) {
5044   unsigned Opc = MI.getOpcode();
5045   assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
5046          Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5047          Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);
5048 
5049   Register Dst = MI.getOperand(0).getReg();
5050   Register X = MI.getOperand(1).getReg();
5051   Register Y = MI.getOperand(2).getReg();
5052   LLT Type = MRI.getType(Dst);
5053 
5054   // fold (fadd x, fneg(y)) -> (fsub x, y)
5055   // fold (fadd fneg(y), x) -> (fsub x, y)
5056   // G_ADD is commutative so both cases are checked by m_GFAdd
5057   if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
5058       isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) {
5059     Opc = TargetOpcode::G_FSUB;
5060   }
5061   /// fold (fsub x, fneg(y)) -> (fadd x, y)
5062   else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
5063            isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) {
5064     Opc = TargetOpcode::G_FADD;
5065   }
5066   // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
5067   // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
5068   // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
5069   // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
5070   else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5071             Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5072            mi_match(X, MRI, m_GFNeg(m_Reg(X))) &&
5073            mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) {
5074     // no opcode change
5075   } else
5076     return false;
5077 
5078   MatchInfo = [=, &MI](MachineIRBuilder &B) {
5079     Observer.changingInstr(MI);
5080     MI.setDesc(B.getTII().get(Opc));
5081     MI.getOperand(1).setReg(X);
5082     MI.getOperand(2).setReg(Y);
5083     Observer.changedInstr(MI);
5084   };
5085   return true;
5086 }
5087 
5088 bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
5089   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5090 
5091   Register LHS = MI.getOperand(1).getReg();
5092   MatchInfo = MI.getOperand(2).getReg();
5093   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
5094 
5095   const auto LHSCst = Ty.isVector()
5096                           ? getFConstantSplat(LHS, MRI, /* allowUndef */ true)
5097                           : getFConstantVRegValWithLookThrough(LHS, MRI);
5098   if (!LHSCst)
5099     return false;
5100 
5101   // -0.0 is always allowed
5102   if (LHSCst->Value.isNegZero())
5103     return true;
5104 
5105   // +0.0 is only allowed if nsz is set.
5106   if (LHSCst->Value.isPosZero())
5107     return MI.getFlag(MachineInstr::FmNsz);
5108 
5109   return false;
5110 }
5111 
5112 void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
5113   Builder.setInstrAndDebugLoc(MI);
5114   Register Dst = MI.getOperand(0).getReg();
5115   Builder.buildFNeg(
5116       Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0));
5117   eraseInst(MI);
5118 }
5119 
5120 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5121 /// due to global flags or MachineInstr flags.
5122 static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) {
5123   if (MI.getOpcode() != TargetOpcode::G_FMUL)
5124     return false;
5125   return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract);
5126 }
5127 
5128 static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
5129                         const MachineRegisterInfo &MRI) {
5130   return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()),
5131                        MRI.use_instr_nodbg_end()) >
5132          std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()),
5133                        MRI.use_instr_nodbg_end());
5134 }
5135 
5136 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
5137                                          bool &AllowFusionGlobally,
5138                                          bool &HasFMAD, bool &Aggressive,
5139                                          bool CanReassociate) {
5140 
5141   auto *MF = MI.getMF();
5142   const auto &TLI = *MF->getSubtarget().getTargetLowering();
5143   const TargetOptions &Options = MF->getTarget().Options;
5144   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5145 
5146   if (CanReassociate &&
5147       !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc)))
5148     return false;
5149 
5150   // Floating-point multiply-add with intermediate rounding.
5151   HasFMAD = (!isPreLegalize() && TLI.isFMADLegal(MI, DstType));
5152   // Floating-point multiply-add without intermediate rounding.
5153   bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
5154                 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}});
5155   // No valid opcode, do not combine.
5156   if (!HasFMAD && !HasFMA)
5157     return false;
5158 
5159   AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast ||
5160                         Options.UnsafeFPMath || HasFMAD;
5161   // If the addition is not contractable, do not combine.
5162   if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract))
5163     return false;
5164 
5165   Aggressive = TLI.enableAggressiveFMAFusion(DstType);
5166   return true;
5167 }
5168 
5169 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5170     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5171   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5172 
5173   bool AllowFusionGlobally, HasFMAD, Aggressive;
5174   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5175     return false;
5176 
5177   Register Op1 = MI.getOperand(1).getReg();
5178   Register Op2 = MI.getOperand(2).getReg();
5179   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5180   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5181   unsigned PreferredFusedOpcode =
5182       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5183 
5184   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5185   // prefer to fold the multiply with fewer uses.
5186   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5187       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5188     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5189       std::swap(LHS, RHS);
5190   }
5191 
5192   // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5193   if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5194       (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) {
5195     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5196       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5197                    {LHS.MI->getOperand(1).getReg(),
5198                     LHS.MI->getOperand(2).getReg(), RHS.Reg});
5199     };
5200     return true;
5201   }
5202 
5203   // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5204   if (isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5205       (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) {
5206     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5207       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5208                    {RHS.MI->getOperand(1).getReg(),
5209                     RHS.MI->getOperand(2).getReg(), LHS.Reg});
5210     };
5211     return true;
5212   }
5213 
5214   return false;
5215 }
5216 
5217 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
5218     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5219   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5220 
5221   bool AllowFusionGlobally, HasFMAD, Aggressive;
5222   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5223     return false;
5224 
5225   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5226   Register Op1 = MI.getOperand(1).getReg();
5227   Register Op2 = MI.getOperand(2).getReg();
5228   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5229   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5230   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5231 
5232   unsigned PreferredFusedOpcode =
5233       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5234 
5235   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5236   // prefer to fold the multiply with fewer uses.
5237   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5238       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5239     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5240       std::swap(LHS, RHS);
5241   }
5242 
5243   // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
5244   MachineInstr *FpExtSrc;
5245   if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5246       isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5247       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5248                           MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5249     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5250       auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5251       auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5252       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5253                    {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
5254     };
5255     return true;
5256   }
5257 
5258   // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
5259   // Note: Commutes FADD operands.
5260   if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5261       isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5262       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5263                           MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5264     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5265       auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5266       auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5267       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5268                    {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
5269     };
5270     return true;
5271   }
5272 
5273   return false;
5274 }
5275 
5276 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
5277     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5278   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5279 
5280   bool AllowFusionGlobally, HasFMAD, Aggressive;
5281   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true))
5282     return false;
5283 
5284   Register Op1 = MI.getOperand(1).getReg();
5285   Register Op2 = MI.getOperand(2).getReg();
5286   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5287   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5288   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5289 
5290   unsigned PreferredFusedOpcode =
5291       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5292 
5293   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5294   // prefer to fold the multiply with fewer uses.
5295   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5296       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5297     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5298       std::swap(LHS, RHS);
5299   }
5300 
5301   MachineInstr *FMA = nullptr;
5302   Register Z;
5303   // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
5304   if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5305       (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
5306        TargetOpcode::G_FMUL) &&
5307       MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
5308       MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
5309     FMA = LHS.MI;
5310     Z = RHS.Reg;
5311   }
5312   // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
5313   else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5314            (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
5315             TargetOpcode::G_FMUL) &&
5316            MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
5317            MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
5318     Z = LHS.Reg;
5319     FMA = RHS.MI;
5320   }
5321 
5322   if (FMA) {
5323     MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg());
5324     Register X = FMA->getOperand(1).getReg();
5325     Register Y = FMA->getOperand(2).getReg();
5326     Register U = FMulMI->getOperand(1).getReg();
5327     Register V = FMulMI->getOperand(2).getReg();
5328 
5329     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5330       Register InnerFMA = MRI.createGenericVirtualRegister(DstTy);
5331       B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
5332       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5333                    {X, Y, InnerFMA});
5334     };
5335     return true;
5336   }
5337 
5338   return false;
5339 }
5340 
5341 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
5342     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5343   assert(MI.getOpcode() == TargetOpcode::G_FADD);
5344 
5345   bool AllowFusionGlobally, HasFMAD, Aggressive;
5346   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5347     return false;
5348 
5349   if (!Aggressive)
5350     return false;
5351 
5352   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5353   LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5354   Register Op1 = MI.getOperand(1).getReg();
5355   Register Op2 = MI.getOperand(2).getReg();
5356   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5357   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5358 
5359   unsigned PreferredFusedOpcode =
5360       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5361 
5362   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5363   // prefer to fold the multiply with fewer uses.
5364   if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5365       isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5366     if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5367       std::swap(LHS, RHS);
5368   }
5369 
5370   // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
5371   auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
5372                                  Register Y, MachineIRBuilder &B) {
5373     Register FpExtU = B.buildFPExt(DstType, U).getReg(0);
5374     Register FpExtV = B.buildFPExt(DstType, V).getReg(0);
5375     Register InnerFMA =
5376         B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
5377             .getReg(0);
5378     B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5379                  {X, Y, InnerFMA});
5380   };
5381 
5382   MachineInstr *FMulMI, *FMAMI;
5383   // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
5384   //   -> (fma x, y, (fma (fpext u), (fpext v), z))
5385   if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5386       mi_match(LHS.MI->getOperand(3).getReg(), MRI,
5387                m_GFPExt(m_MInstr(FMulMI))) &&
5388       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5389       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5390                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5391     MatchInfo = [=](MachineIRBuilder &B) {
5392       buildMatchInfo(FMulMI->getOperand(1).getReg(),
5393                      FMulMI->getOperand(2).getReg(), RHS.Reg,
5394                      LHS.MI->getOperand(1).getReg(),
5395                      LHS.MI->getOperand(2).getReg(), B);
5396     };
5397     return true;
5398   }
5399 
5400   // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
5401   //   -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5402   // FIXME: This turns two single-precision and one double-precision
5403   // operation into two double-precision operations, which might not be
5404   // interesting for all targets, especially GPUs.
5405   if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5406       FMAMI->getOpcode() == PreferredFusedOpcode) {
5407     MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5408     if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5409         TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5410                             MRI.getType(FMAMI->getOperand(0).getReg()))) {
5411       MatchInfo = [=](MachineIRBuilder &B) {
5412         Register X = FMAMI->getOperand(1).getReg();
5413         Register Y = FMAMI->getOperand(2).getReg();
5414         X = B.buildFPExt(DstType, X).getReg(0);
5415         Y = B.buildFPExt(DstType, Y).getReg(0);
5416         buildMatchInfo(FMulMI->getOperand(1).getReg(),
5417                        FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B);
5418       };
5419 
5420       return true;
5421     }
5422   }
5423 
5424   // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
5425   //   -> (fma x, y, (fma (fpext u), (fpext v), z))
5426   if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5427       mi_match(RHS.MI->getOperand(3).getReg(), MRI,
5428                m_GFPExt(m_MInstr(FMulMI))) &&
5429       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5430       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5431                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5432     MatchInfo = [=](MachineIRBuilder &B) {
5433       buildMatchInfo(FMulMI->getOperand(1).getReg(),
5434                      FMulMI->getOperand(2).getReg(), LHS.Reg,
5435                      RHS.MI->getOperand(1).getReg(),
5436                      RHS.MI->getOperand(2).getReg(), B);
5437     };
5438     return true;
5439   }
5440 
5441   // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
5442   //   -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5443   // FIXME: This turns two single-precision and one double-precision
5444   // operation into two double-precision operations, which might not be
5445   // interesting for all targets, especially GPUs.
5446   if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5447       FMAMI->getOpcode() == PreferredFusedOpcode) {
5448     MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5449     if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5450         TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5451                             MRI.getType(FMAMI->getOperand(0).getReg()))) {
5452       MatchInfo = [=](MachineIRBuilder &B) {
5453         Register X = FMAMI->getOperand(1).getReg();
5454         Register Y = FMAMI->getOperand(2).getReg();
5455         X = B.buildFPExt(DstType, X).getReg(0);
5456         Y = B.buildFPExt(DstType, Y).getReg(0);
5457         buildMatchInfo(FMulMI->getOperand(1).getReg(),
5458                        FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B);
5459       };
5460       return true;
5461     }
5462   }
5463 
5464   return false;
5465 }
5466 
5467 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
5468     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5469   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5470 
5471   bool AllowFusionGlobally, HasFMAD, Aggressive;
5472   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5473     return false;
5474 
5475   Register Op1 = MI.getOperand(1).getReg();
5476   Register Op2 = MI.getOperand(2).getReg();
5477   DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5478   DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5479   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5480 
5481   // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5482   // prefer to fold the multiply with fewer uses.
5483   int FirstMulHasFewerUses = true;
5484   if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5485       isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5486       hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5487     FirstMulHasFewerUses = false;
5488 
5489   unsigned PreferredFusedOpcode =
5490       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5491 
5492   // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
5493   if (FirstMulHasFewerUses &&
5494       (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5495        (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) {
5496     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5497       Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0);
5498       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5499                    {LHS.MI->getOperand(1).getReg(),
5500                     LHS.MI->getOperand(2).getReg(), NegZ});
5501     };
5502     return true;
5503   }
5504   // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
5505   else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5506             (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) {
5507     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5508       Register NegY =
5509           B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
5510       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5511                    {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
5512     };
5513     return true;
5514   }
5515 
5516   return false;
5517 }
5518 
5519 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
5520     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5521   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5522 
5523   bool AllowFusionGlobally, HasFMAD, Aggressive;
5524   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5525     return false;
5526 
5527   Register LHSReg = MI.getOperand(1).getReg();
5528   Register RHSReg = MI.getOperand(2).getReg();
5529   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5530 
5531   unsigned PreferredFusedOpcode =
5532       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5533 
5534   MachineInstr *FMulMI;
5535   // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
5536   if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5537       (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) &&
5538                       MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5539       isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5540     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5541       Register NegX =
5542           B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5543       Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5544       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5545                    {NegX, FMulMI->getOperand(2).getReg(), NegZ});
5546     };
5547     return true;
5548   }
5549 
5550   // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
5551   if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5552       (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) &&
5553                       MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5554       isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5555     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5556       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5557                    {FMulMI->getOperand(1).getReg(),
5558                     FMulMI->getOperand(2).getReg(), LHSReg});
5559     };
5560     return true;
5561   }
5562 
5563   return false;
5564 }
5565 
5566 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
5567     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5568   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5569 
5570   bool AllowFusionGlobally, HasFMAD, Aggressive;
5571   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5572     return false;
5573 
5574   Register LHSReg = MI.getOperand(1).getReg();
5575   Register RHSReg = MI.getOperand(2).getReg();
5576   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5577 
5578   unsigned PreferredFusedOpcode =
5579       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5580 
5581   MachineInstr *FMulMI;
5582   // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
5583   if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5584       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5585       (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) {
5586     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5587       Register FpExtX =
5588           B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5589       Register FpExtY =
5590           B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5591       Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5592       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5593                    {FpExtX, FpExtY, NegZ});
5594     };
5595     return true;
5596   }
5597 
5598   // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
5599   if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5600       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5601       (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) {
5602     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5603       Register FpExtY =
5604           B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5605       Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0);
5606       Register FpExtZ =
5607           B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5608       B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5609                    {NegY, FpExtZ, LHSReg});
5610     };
5611     return true;
5612   }
5613 
5614   return false;
5615 }
5616 
5617 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
5618     MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5619   assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5620 
5621   bool AllowFusionGlobally, HasFMAD, Aggressive;
5622   if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5623     return false;
5624 
5625   const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5626   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5627   Register LHSReg = MI.getOperand(1).getReg();
5628   Register RHSReg = MI.getOperand(2).getReg();
5629 
5630   unsigned PreferredFusedOpcode =
5631       HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5632 
5633   auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
5634                             MachineIRBuilder &B) {
5635     Register FpExtX = B.buildFPExt(DstTy, X).getReg(0);
5636     Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0);
5637     B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
5638   };
5639 
5640   MachineInstr *FMulMI;
5641   // fold (fsub (fpext (fneg (fmul x, y))), z) ->
5642   //      (fneg (fma (fpext x), (fpext y), z))
5643   // fold (fsub (fneg (fpext (fmul x, y))), z) ->
5644   //      (fneg (fma (fpext x), (fpext y), z))
5645   if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5646        mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5647       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5648       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5649                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5650     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5651       Register FMAReg = MRI.createGenericVirtualRegister(DstTy);
5652       buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(),
5653                      FMulMI->getOperand(2).getReg(), RHSReg, B);
5654       B.buildFNeg(MI.getOperand(0).getReg(), FMAReg);
5655     };
5656     return true;
5657   }
5658 
5659   // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5660   // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5661   if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5662        mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5663       isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5664       TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5665                           MRI.getType(FMulMI->getOperand(0).getReg()))) {
5666     MatchInfo = [=, &MI](MachineIRBuilder &B) {
5667       buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(),
5668                      FMulMI->getOperand(2).getReg(), LHSReg, B);
5669     };
5670     return true;
5671   }
5672 
5673   return false;
5674 }
5675 
5676 bool CombinerHelper::matchSelectToLogical(MachineInstr &MI,
5677                                           BuildFnTy &MatchInfo) {
5678   GSelect &Sel = cast<GSelect>(MI);
5679   Register DstReg = Sel.getReg(0);
5680   Register Cond = Sel.getCondReg();
5681   Register TrueReg = Sel.getTrueReg();
5682   Register FalseReg = Sel.getFalseReg();
5683 
5684   auto *TrueDef = getDefIgnoringCopies(TrueReg, MRI);
5685   auto *FalseDef = getDefIgnoringCopies(FalseReg, MRI);
5686 
5687   const LLT CondTy = MRI.getType(Cond);
5688   const LLT OpTy = MRI.getType(TrueReg);
5689   if (CondTy != OpTy || OpTy.getScalarSizeInBits() != 1)
5690     return false;
5691 
5692   // We have a boolean select.
5693 
5694   // select Cond, Cond, F --> or Cond, F
5695   // select Cond, 1, F    --> or Cond, F
5696   auto MaybeCstTrue = isConstantOrConstantSplatVector(*TrueDef, MRI);
5697   if (Cond == TrueReg || (MaybeCstTrue && MaybeCstTrue->isOne())) {
5698     MatchInfo = [=](MachineIRBuilder &MIB) {
5699       MIB.buildOr(DstReg, Cond, FalseReg);
5700     };
5701     return true;
5702   }
5703 
5704   // select Cond, T, Cond --> and Cond, T
5705   // select Cond, T, 0    --> and Cond, T
5706   auto MaybeCstFalse = isConstantOrConstantSplatVector(*FalseDef, MRI);
5707   if (Cond == FalseReg || (MaybeCstFalse && MaybeCstFalse->isZero())) {
5708     MatchInfo = [=](MachineIRBuilder &MIB) {
5709       MIB.buildAnd(DstReg, Cond, TrueReg);
5710     };
5711     return true;
5712   }
5713 
5714  // select Cond, T, 1 --> or (not Cond), T
5715   if (MaybeCstFalse && MaybeCstFalse->isOne()) {
5716     MatchInfo = [=](MachineIRBuilder &MIB) {
5717       MIB.buildOr(DstReg, MIB.buildNot(OpTy, Cond), TrueReg);
5718     };
5719     return true;
5720   }
5721 
5722   // select Cond, 0, F --> and (not Cond), F
5723   if (MaybeCstTrue && MaybeCstTrue->isZero()) {
5724     MatchInfo = [=](MachineIRBuilder &MIB) {
5725       MIB.buildAnd(DstReg, MIB.buildNot(OpTy, Cond), FalseReg);
5726     };
5727     return true;
5728   }
5729   return false;
5730 }
5731 
5732 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
5733                                             unsigned &IdxToPropagate) {
5734   bool PropagateNaN;
5735   switch (MI.getOpcode()) {
5736   default:
5737     return false;
5738   case TargetOpcode::G_FMINNUM:
5739   case TargetOpcode::G_FMAXNUM:
5740     PropagateNaN = false;
5741     break;
5742   case TargetOpcode::G_FMINIMUM:
5743   case TargetOpcode::G_FMAXIMUM:
5744     PropagateNaN = true;
5745     break;
5746   }
5747 
5748   auto MatchNaN = [&](unsigned Idx) {
5749     Register MaybeNaNReg = MI.getOperand(Idx).getReg();
5750     const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI);
5751     if (!MaybeCst || !MaybeCst->getValueAPF().isNaN())
5752       return false;
5753     IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
5754     return true;
5755   };
5756 
5757   return MatchNaN(1) || MatchNaN(2);
5758 }
5759 
5760 bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
5761   assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD");
5762   Register LHS = MI.getOperand(1).getReg();
5763   Register RHS = MI.getOperand(2).getReg();
5764 
5765   // Helper lambda to check for opportunities for
5766   // A + (B - A) -> B
5767   // (B - A) + A -> B
5768   auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) {
5769     Register Reg;
5770     return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) &&
5771            Reg == MaybeSameReg;
5772   };
5773   return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
5774 }
5775 
5776 bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
5777                                                   Register &MatchInfo) {
5778   // This combine folds the following patterns:
5779   //
5780   //  G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k))
5781   //  G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), G_TRUNC(G_LSHR(G_BITCAST(x), k)))
5782   //    into
5783   //      x
5784   //    if
5785   //      k == sizeof(VecEltTy)/2
5786   //      type(x) == type(dst)
5787   //
5788   //  G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), undef)
5789   //    into
5790   //      x
5791   //    if
5792   //      type(x) == type(dst)
5793 
5794   LLT DstVecTy = MRI.getType(MI.getOperand(0).getReg());
5795   LLT DstEltTy = DstVecTy.getElementType();
5796 
5797   Register Lo, Hi;
5798 
5799   if (mi_match(
5800           MI, MRI,
5801           m_GBuildVector(m_GTrunc(m_GBitcast(m_Reg(Lo))), m_GImplicitDef()))) {
5802     MatchInfo = Lo;
5803     return MRI.getType(MatchInfo) == DstVecTy;
5804   }
5805 
5806   std::optional<ValueAndVReg> ShiftAmount;
5807   const auto LoPattern = m_GBitcast(m_Reg(Lo));
5808   const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount));
5809   if (mi_match(
5810           MI, MRI,
5811           m_any_of(m_GBuildVectorTrunc(LoPattern, HiPattern),
5812                    m_GBuildVector(m_GTrunc(LoPattern), m_GTrunc(HiPattern))))) {
5813     if (Lo == Hi && ShiftAmount->Value == DstEltTy.getSizeInBits()) {
5814       MatchInfo = Lo;
5815       return MRI.getType(MatchInfo) == DstVecTy;
5816     }
5817   }
5818 
5819   return false;
5820 }
5821 
5822 bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI,
5823                                                Register &MatchInfo) {
5824   // Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x
5825   // if type(x) == type(G_TRUNC)
5826   if (!mi_match(MI.getOperand(1).getReg(), MRI,
5827                 m_GBitcast(m_GBuildVector(m_Reg(MatchInfo), m_Reg()))))
5828     return false;
5829 
5830   return MRI.getType(MatchInfo) == MRI.getType(MI.getOperand(0).getReg());
5831 }
5832 
5833 bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
5834                                                    Register &MatchInfo) {
5835   // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
5836   //    y if K == size of vector element type
5837   std::optional<ValueAndVReg> ShiftAmt;
5838   if (!mi_match(MI.getOperand(1).getReg(), MRI,
5839                 m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))),
5840                         m_GCst(ShiftAmt))))
5841     return false;
5842 
5843   LLT MatchTy = MRI.getType(MatchInfo);
5844   return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() &&
5845          MatchTy == MRI.getType(MI.getOperand(0).getReg());
5846 }
5847 
5848 unsigned CombinerHelper::getFPMinMaxOpcForSelect(
5849     CmpInst::Predicate Pred, LLT DstTy,
5850     SelectPatternNaNBehaviour VsNaNRetVal) const {
5851   assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE &&
5852          "Expected a NaN behaviour?");
5853   // Choose an opcode based off of legality or the behaviour when one of the
5854   // LHS/RHS may be NaN.
5855   switch (Pred) {
5856   default:
5857     return 0;
5858   case CmpInst::FCMP_UGT:
5859   case CmpInst::FCMP_UGE:
5860   case CmpInst::FCMP_OGT:
5861   case CmpInst::FCMP_OGE:
5862     if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
5863       return TargetOpcode::G_FMAXNUM;
5864     if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
5865       return TargetOpcode::G_FMAXIMUM;
5866     if (isLegal({TargetOpcode::G_FMAXNUM, {DstTy}}))
5867       return TargetOpcode::G_FMAXNUM;
5868     if (isLegal({TargetOpcode::G_FMAXIMUM, {DstTy}}))
5869       return TargetOpcode::G_FMAXIMUM;
5870     return 0;
5871   case CmpInst::FCMP_ULT:
5872   case CmpInst::FCMP_ULE:
5873   case CmpInst::FCMP_OLT:
5874   case CmpInst::FCMP_OLE:
5875     if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
5876       return TargetOpcode::G_FMINNUM;
5877     if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
5878       return TargetOpcode::G_FMINIMUM;
5879     if (isLegal({TargetOpcode::G_FMINNUM, {DstTy}}))
5880       return TargetOpcode::G_FMINNUM;
5881     if (!isLegal({TargetOpcode::G_FMINIMUM, {DstTy}}))
5882       return 0;
5883     return TargetOpcode::G_FMINIMUM;
5884   }
5885 }
5886 
5887 CombinerHelper::SelectPatternNaNBehaviour
5888 CombinerHelper::computeRetValAgainstNaN(Register LHS, Register RHS,
5889                                         bool IsOrderedComparison) const {
5890   bool LHSSafe = isKnownNeverNaN(LHS, MRI);
5891   bool RHSSafe = isKnownNeverNaN(RHS, MRI);
5892   // Completely unsafe.
5893   if (!LHSSafe && !RHSSafe)
5894     return SelectPatternNaNBehaviour::NOT_APPLICABLE;
5895   if (LHSSafe && RHSSafe)
5896     return SelectPatternNaNBehaviour::RETURNS_ANY;
5897   // An ordered comparison will return false when given a NaN, so it
5898   // returns the RHS.
5899   if (IsOrderedComparison)
5900     return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN
5901                    : SelectPatternNaNBehaviour::RETURNS_OTHER;
5902   // An unordered comparison will return true when given a NaN, so it
5903   // returns the LHS.
5904   return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER
5905                  : SelectPatternNaNBehaviour::RETURNS_NAN;
5906 }
5907 
5908 bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond,
5909                                            Register TrueVal, Register FalseVal,
5910                                            BuildFnTy &MatchInfo) {
5911   // Match: select (fcmp cond x, y) x, y
5912   //        select (fcmp cond x, y) y, x
5913   // And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition.
5914   LLT DstTy = MRI.getType(Dst);
5915   // Bail out early on pointers, since we'll never want to fold to a min/max.
5916   if (DstTy.isPointer())
5917     return false;
5918   // Match a floating point compare with a less-than/greater-than predicate.
5919   // TODO: Allow multiple users of the compare if they are all selects.
5920   CmpInst::Predicate Pred;
5921   Register CmpLHS, CmpRHS;
5922   if (!mi_match(Cond, MRI,
5923                 m_OneNonDBGUse(
5924                     m_GFCmp(m_Pred(Pred), m_Reg(CmpLHS), m_Reg(CmpRHS)))) ||
5925       CmpInst::isEquality(Pred))
5926     return false;
5927   SelectPatternNaNBehaviour ResWithKnownNaNInfo =
5928       computeRetValAgainstNaN(CmpLHS, CmpRHS, CmpInst::isOrdered(Pred));
5929   if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE)
5930     return false;
5931   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5932     std::swap(CmpLHS, CmpRHS);
5933     Pred = CmpInst::getSwappedPredicate(Pred);
5934     if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN)
5935       ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER;
5936     else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER)
5937       ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN;
5938   }
5939   if (TrueVal != CmpLHS || FalseVal != CmpRHS)
5940     return false;
5941   // Decide what type of max/min this should be based off of the predicate.
5942   unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, ResWithKnownNaNInfo);
5943   if (!Opc || !isLegal({Opc, {DstTy}}))
5944     return false;
5945   // Comparisons between signed zero and zero may have different results...
5946   // unless we have fmaximum/fminimum. In that case, we know -0 < 0.
5947   if (Opc != TargetOpcode::G_FMAXIMUM && Opc != TargetOpcode::G_FMINIMUM) {
5948     // We don't know if a comparison between two 0s will give us a consistent
5949     // result. Be conservative and only proceed if at least one side is
5950     // non-zero.
5951     auto KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpLHS, MRI);
5952     if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) {
5953       KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpRHS, MRI);
5954       if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero())
5955         return false;
5956     }
5957   }
5958   MatchInfo = [=](MachineIRBuilder &B) {
5959     B.buildInstr(Opc, {Dst}, {CmpLHS, CmpRHS});
5960   };
5961   return true;
5962 }
5963 
5964 bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI,
5965                                                  BuildFnTy &MatchInfo) {
5966   // TODO: Handle integer cases.
5967   assert(MI.getOpcode() == TargetOpcode::G_SELECT);
5968   // Condition may be fed by a truncated compare.
5969   Register Cond = MI.getOperand(1).getReg();
5970   Register MaybeTrunc;
5971   if (mi_match(Cond, MRI, m_OneNonDBGUse(m_GTrunc(m_Reg(MaybeTrunc)))))
5972     Cond = MaybeTrunc;
5973   Register Dst = MI.getOperand(0).getReg();
5974   Register TrueVal = MI.getOperand(2).getReg();
5975   Register FalseVal = MI.getOperand(3).getReg();
5976   return matchFPSelectToMinMax(Dst, Cond, TrueVal, FalseVal, MatchInfo);
5977 }
5978 
5979 bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI,
5980                                                    BuildFnTy &MatchInfo) {
5981   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
5982   // (X + Y) == X --> Y == 0
5983   // (X + Y) != X --> Y != 0
5984   // (X - Y) == X --> Y == 0
5985   // (X - Y) != X --> Y != 0
5986   // (X ^ Y) == X --> Y == 0
5987   // (X ^ Y) != X --> Y != 0
5988   Register Dst = MI.getOperand(0).getReg();
5989   CmpInst::Predicate Pred;
5990   Register X, Y, OpLHS, OpRHS;
5991   bool MatchedSub = mi_match(
5992       Dst, MRI,
5993       m_c_GICmp(m_Pred(Pred), m_Reg(X), m_GSub(m_Reg(OpLHS), m_Reg(Y))));
5994   if (MatchedSub && X != OpLHS)
5995     return false;
5996   if (!MatchedSub) {
5997     if (!mi_match(Dst, MRI,
5998                   m_c_GICmp(m_Pred(Pred), m_Reg(X),
5999                             m_any_of(m_GAdd(m_Reg(OpLHS), m_Reg(OpRHS)),
6000                                      m_GXor(m_Reg(OpLHS), m_Reg(OpRHS))))))
6001       return false;
6002     Y = X == OpLHS ? OpRHS : X == OpRHS ? OpLHS : Register();
6003   }
6004   MatchInfo = [=](MachineIRBuilder &B) {
6005     auto Zero = B.buildConstant(MRI.getType(Y), 0);
6006     B.buildICmp(Pred, Dst, Y, Zero);
6007   };
6008   return CmpInst::isEquality(Pred) && Y.isValid();
6009 }
6010 
6011 bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) {
6012   Register ShiftReg = MI.getOperand(2).getReg();
6013   LLT ResTy = MRI.getType(MI.getOperand(0).getReg());
6014   auto IsShiftTooBig = [&](const Constant *C) {
6015     auto *CI = dyn_cast<ConstantInt>(C);
6016     return CI && CI->uge(ResTy.getScalarSizeInBits());
6017   };
6018   return matchUnaryPredicate(MRI, ShiftReg, IsShiftTooBig);
6019 }
6020 
6021 bool CombinerHelper::tryCombine(MachineInstr &MI) {
6022   if (tryCombineCopy(MI))
6023     return true;
6024   if (tryCombineExtendingLoads(MI))
6025     return true;
6026   if (tryCombineIndexedLoadStore(MI))
6027     return true;
6028   return false;
6029 }
6030