1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // InstructionCombining - Combine instructions to form fewer, simple
10 // instructions.  This pass does not modify the CFG.  This pass is where
11 // algebraic simplification happens.
12 //
13 // This pass combines things like:
14 //    %Y = add i32 %X, 1
15 //    %Z = add i32 %Y, 1
16 // into:
17 //    %Z = add i32 %X, 2
18 //
19 // This is a simple worklist driven algorithm.
20 //
21 // This pass guarantees that the following canonicalizations are performed on
22 // the program:
23 //    1. If a binary operator has a constant operand, it is moved to the RHS
24 //    2. Bitwise operators with constant operands are always grouped so that
25 //       shifts are performed first, then or's, then and's, then xor's.
26 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27 //    4. All cmp instructions on boolean values are replaced with logical ops
28 //    5. add X, X is represented as (X*2) => (X << 1)
29 //    6. Multiplies with a power-of-two constant argument are transformed into
30 //       shifts.
31 //   ... etc.
32 //
33 //===----------------------------------------------------------------------===//
34 
35 #include "InstCombineInternal.h"
36 #include "llvm/ADT/APInt.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AssumptionCache.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CFG.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/InstructionSimplify.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/LoopInfo.h"
52 #include "llvm/Analysis/MemoryBuiltins.h"
53 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
54 #include "llvm/Analysis/ProfileSummaryInfo.h"
55 #include "llvm/Analysis/TargetFolder.h"
56 #include "llvm/Analysis/TargetLibraryInfo.h"
57 #include "llvm/Analysis/TargetTransformInfo.h"
58 #include "llvm/Analysis/Utils/Local.h"
59 #include "llvm/Analysis/ValueTracking.h"
60 #include "llvm/Analysis/VectorUtils.h"
61 #include "llvm/IR/BasicBlock.h"
62 #include "llvm/IR/CFG.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DIBuilder.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfo.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/IRBuilder.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/Metadata.h"
80 #include "llvm/IR/Operator.h"
81 #include "llvm/IR/PassManager.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/Use.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/IR/ValueHandle.h"
88 #include "llvm/InitializePasses.h"
89 #include "llvm/Support/Casting.h"
90 #include "llvm/Support/CommandLine.h"
91 #include "llvm/Support/Compiler.h"
92 #include "llvm/Support/Debug.h"
93 #include "llvm/Support/DebugCounter.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/KnownBits.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Transforms/InstCombine/InstCombine.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstdint>
103 #include <memory>
104 #include <optional>
105 #include <string>
106 #include <utility>
107 
108 #define DEBUG_TYPE "instcombine"
109 #include "llvm/Transforms/Utils/InstructionWorklist.h"
110 #include <optional>
111 
112 using namespace llvm;
113 using namespace llvm::PatternMatch;
114 
115 STATISTIC(NumWorklistIterations,
116           "Number of instruction combining iterations performed");
117 STATISTIC(NumOneIteration, "Number of functions with one iteration");
118 STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119 STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120 STATISTIC(NumFourOrMoreIterations,
121           "Number of functions with four or more iterations");
122 
123 STATISTIC(NumCombined , "Number of insts combined");
124 STATISTIC(NumConstProp, "Number of constant folds");
125 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126 STATISTIC(NumSunkInst , "Number of instructions sunk");
127 STATISTIC(NumExpand,    "Number of expansions");
128 STATISTIC(NumFactor   , "Number of factorizations");
129 STATISTIC(NumReassoc  , "Number of reassociations");
130 DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131               "Controls which instructions are visited");
132 
133 // FIXME: these limits eventually should be as low as 2.
134 #ifndef NDEBUG
135 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 100;
136 #else
137 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 1000;
138 #endif
139 
140 static cl::opt<bool>
141 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
142                                               cl::init(true));
143 
144 static cl::opt<unsigned> MaxSinkNumUsers(
145     "instcombine-max-sink-users", cl::init(32),
146     cl::desc("Maximum number of undroppable users for instruction sinking"));
147 
148 static cl::opt<unsigned> InfiniteLoopDetectionThreshold(
149     "instcombine-infinite-loop-threshold",
150     cl::desc("Number of instruction combining iterations considered an "
151              "infinite loop"),
152     cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden);
153 
154 static cl::opt<unsigned>
155 MaxArraySize("instcombine-maxarray-size", cl::init(1024),
156              cl::desc("Maximum array size considered when doing a combine"));
157 
158 // FIXME: Remove this flag when it is no longer necessary to convert
159 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
160 // increases variable availability at the cost of accuracy. Variables that
161 // cannot be promoted by mem2reg or SROA will be described as living in memory
162 // for their entire lifetime. However, passes like DSE and instcombine can
163 // delete stores to the alloca, leading to misleading and inaccurate debug
164 // information. This flag can be removed when those passes are fixed.
165 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
166                                                cl::Hidden, cl::init(true));
167 
168 std::optional<Instruction *>
169 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
170   // Handle target specific intrinsics
171   if (II.getCalledFunction()->isTargetIntrinsic()) {
172     return TTI.instCombineIntrinsic(*this, II);
173   }
174   return std::nullopt;
175 }
176 
177 std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
178     IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
179     bool &KnownBitsComputed) {
180   // Handle target specific intrinsics
181   if (II.getCalledFunction()->isTargetIntrinsic()) {
182     return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known,
183                                                 KnownBitsComputed);
184   }
185   return std::nullopt;
186 }
187 
188 std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
189     IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2,
190     APInt &UndefElts3,
191     std::function<void(Instruction *, unsigned, APInt, APInt &)>
192         SimplifyAndSetOp) {
193   // Handle target specific intrinsics
194   if (II.getCalledFunction()->isTargetIntrinsic()) {
195     return TTI.simplifyDemandedVectorEltsIntrinsic(
196         *this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
197         SimplifyAndSetOp);
198   }
199   return std::nullopt;
200 }
201 
202 bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
203   return TTI.isValidAddrSpaceCast(FromAS, ToAS);
204 }
205 
206 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) {
207   return llvm::emitGEPOffset(&Builder, DL, GEP);
208 }
209 
210 /// Legal integers and common types are considered desirable. This is used to
211 /// avoid creating instructions with types that may not be supported well by the
212 /// the backend.
213 /// NOTE: This treats i8, i16 and i32 specially because they are common
214 ///       types in frontend languages.
215 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
216   switch (BitWidth) {
217   case 8:
218   case 16:
219   case 32:
220     return true;
221   default:
222     return DL.isLegalInteger(BitWidth);
223   }
224 }
225 
226 /// Return true if it is desirable to convert an integer computation from a
227 /// given bit width to a new bit width.
228 /// We don't want to convert from a legal or desirable type (like i8) to an
229 /// illegal type or from a smaller to a larger illegal type. A width of '1'
230 /// is always treated as a desirable type because i1 is a fundamental type in
231 /// IR, and there are many specialized optimizations for i1 types.
232 /// Common/desirable widths are equally treated as legal to convert to, in
233 /// order to open up more combining opportunities.
234 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
235                                         unsigned ToWidth) const {
236   bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
237   bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
238 
239   // Convert to desirable widths even if they are not legal types.
240   // Only shrink types, to prevent infinite loops.
241   if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
242     return true;
243 
244   // If this is a legal or desiable integer from type, and the result would be
245   // an illegal type, don't do the transformation.
246   if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
247     return false;
248 
249   // Otherwise, if both are illegal, do not increase the size of the result. We
250   // do allow things like i160 -> i64, but not i64 -> i160.
251   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
252     return false;
253 
254   return true;
255 }
256 
257 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
258 /// We don't want to convert from a legal to an illegal type or from a smaller
259 /// to a larger illegal type. i1 is always treated as a legal type because it is
260 /// a fundamental type in IR, and there are many specialized optimizations for
261 /// i1 types.
262 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
263   // TODO: This could be extended to allow vectors. Datalayout changes might be
264   // needed to properly support that.
265   if (!From->isIntegerTy() || !To->isIntegerTy())
266     return false;
267 
268   unsigned FromWidth = From->getPrimitiveSizeInBits();
269   unsigned ToWidth = To->getPrimitiveSizeInBits();
270   return shouldChangeType(FromWidth, ToWidth);
271 }
272 
273 // Return true, if No Signed Wrap should be maintained for I.
274 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
275 // where both B and C should be ConstantInts, results in a constant that does
276 // not overflow. This function only handles the Add and Sub opcodes. For
277 // all other opcodes, the function conservatively returns false.
278 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
279   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
280   if (!OBO || !OBO->hasNoSignedWrap())
281     return false;
282 
283   // We reason about Add and Sub Only.
284   Instruction::BinaryOps Opcode = I.getOpcode();
285   if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
286     return false;
287 
288   const APInt *BVal, *CVal;
289   if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
290     return false;
291 
292   bool Overflow = false;
293   if (Opcode == Instruction::Add)
294     (void)BVal->sadd_ov(*CVal, Overflow);
295   else
296     (void)BVal->ssub_ov(*CVal, Overflow);
297 
298   return !Overflow;
299 }
300 
301 static bool hasNoUnsignedWrap(BinaryOperator &I) {
302   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
303   return OBO && OBO->hasNoUnsignedWrap();
304 }
305 
306 static bool hasNoSignedWrap(BinaryOperator &I) {
307   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
308   return OBO && OBO->hasNoSignedWrap();
309 }
310 
311 /// Conservatively clears subclassOptionalData after a reassociation or
312 /// commutation. We preserve fast-math flags when applicable as they can be
313 /// preserved.
314 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
315   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
316   if (!FPMO) {
317     I.clearSubclassOptionalData();
318     return;
319   }
320 
321   FastMathFlags FMF = I.getFastMathFlags();
322   I.clearSubclassOptionalData();
323   I.setFastMathFlags(FMF);
324 }
325 
326 /// Combine constant operands of associative operations either before or after a
327 /// cast to eliminate one of the associative operations:
328 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
329 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
330 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
331                                    InstCombinerImpl &IC) {
332   auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
333   if (!Cast || !Cast->hasOneUse())
334     return false;
335 
336   // TODO: Enhance logic for other casts and remove this check.
337   auto CastOpcode = Cast->getOpcode();
338   if (CastOpcode != Instruction::ZExt)
339     return false;
340 
341   // TODO: Enhance logic for other BinOps and remove this check.
342   if (!BinOp1->isBitwiseLogicOp())
343     return false;
344 
345   auto AssocOpcode = BinOp1->getOpcode();
346   auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
347   if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
348     return false;
349 
350   Constant *C1, *C2;
351   if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
352       !match(BinOp2->getOperand(1), m_Constant(C2)))
353     return false;
354 
355   // TODO: This assumes a zext cast.
356   // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
357   // to the destination type might lose bits.
358 
359   // Fold the constants together in the destination type:
360   // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
361   Type *DestTy = C1->getType();
362   Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
363   Constant *FoldedC =
364       ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, IC.getDataLayout());
365   if (!FoldedC)
366     return false;
367 
368   IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
369   IC.replaceOperand(*BinOp1, 1, FoldedC);
370   return true;
371 }
372 
373 // Simplifies IntToPtr/PtrToInt RoundTrip Cast.
374 // inttoptr ( ptrtoint (x) ) --> x
375 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
376   auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
377   if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
378                       DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
379     auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
380     Type *CastTy = IntToPtr->getDestTy();
381     if (PtrToInt &&
382         CastTy->getPointerAddressSpace() ==
383             PtrToInt->getSrcTy()->getPointerAddressSpace() &&
384         DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
385             DL.getTypeSizeInBits(PtrToInt->getDestTy()))
386       return PtrToInt->getOperand(0);
387   }
388   return nullptr;
389 }
390 
391 /// This performs a few simplifications for operators that are associative or
392 /// commutative:
393 ///
394 ///  Commutative operators:
395 ///
396 ///  1. Order operands such that they are listed from right (least complex) to
397 ///     left (most complex).  This puts constants before unary operators before
398 ///     binary operators.
399 ///
400 ///  Associative operators:
401 ///
402 ///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
403 ///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
404 ///
405 ///  Associative and commutative operators:
406 ///
407 ///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
408 ///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
409 ///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
410 ///     if C1 and C2 are constants.
411 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
412   Instruction::BinaryOps Opcode = I.getOpcode();
413   bool Changed = false;
414 
415   do {
416     // Order operands such that they are listed from right (least complex) to
417     // left (most complex).  This puts constants before unary operators before
418     // binary operators.
419     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
420         getComplexity(I.getOperand(1)))
421       Changed = !I.swapOperands();
422 
423     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
424     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
425 
426     if (I.isAssociative()) {
427       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
428       if (Op0 && Op0->getOpcode() == Opcode) {
429         Value *A = Op0->getOperand(0);
430         Value *B = Op0->getOperand(1);
431         Value *C = I.getOperand(1);
432 
433         // Does "B op C" simplify?
434         if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
435           // It simplifies to V.  Form "A op V".
436           replaceOperand(I, 0, A);
437           replaceOperand(I, 1, V);
438           bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
439           bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
440 
441           // Conservatively clear all optional flags since they may not be
442           // preserved by the reassociation. Reset nsw/nuw based on the above
443           // analysis.
444           ClearSubclassDataAfterReassociation(I);
445 
446           // Note: this is only valid because SimplifyBinOp doesn't look at
447           // the operands to Op0.
448           if (IsNUW)
449             I.setHasNoUnsignedWrap(true);
450 
451           if (IsNSW)
452             I.setHasNoSignedWrap(true);
453 
454           Changed = true;
455           ++NumReassoc;
456           continue;
457         }
458       }
459 
460       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
461       if (Op1 && Op1->getOpcode() == Opcode) {
462         Value *A = I.getOperand(0);
463         Value *B = Op1->getOperand(0);
464         Value *C = Op1->getOperand(1);
465 
466         // Does "A op B" simplify?
467         if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
468           // It simplifies to V.  Form "V op C".
469           replaceOperand(I, 0, V);
470           replaceOperand(I, 1, C);
471           // Conservatively clear the optional flags, since they may not be
472           // preserved by the reassociation.
473           ClearSubclassDataAfterReassociation(I);
474           Changed = true;
475           ++NumReassoc;
476           continue;
477         }
478       }
479     }
480 
481     if (I.isAssociative() && I.isCommutative()) {
482       if (simplifyAssocCastAssoc(&I, *this)) {
483         Changed = true;
484         ++NumReassoc;
485         continue;
486       }
487 
488       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
489       if (Op0 && Op0->getOpcode() == Opcode) {
490         Value *A = Op0->getOperand(0);
491         Value *B = Op0->getOperand(1);
492         Value *C = I.getOperand(1);
493 
494         // Does "C op A" simplify?
495         if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
496           // It simplifies to V.  Form "V op B".
497           replaceOperand(I, 0, V);
498           replaceOperand(I, 1, B);
499           // Conservatively clear the optional flags, since they may not be
500           // preserved by the reassociation.
501           ClearSubclassDataAfterReassociation(I);
502           Changed = true;
503           ++NumReassoc;
504           continue;
505         }
506       }
507 
508       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
509       if (Op1 && Op1->getOpcode() == Opcode) {
510         Value *A = I.getOperand(0);
511         Value *B = Op1->getOperand(0);
512         Value *C = Op1->getOperand(1);
513 
514         // Does "C op A" simplify?
515         if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
516           // It simplifies to V.  Form "B op V".
517           replaceOperand(I, 0, B);
518           replaceOperand(I, 1, V);
519           // Conservatively clear the optional flags, since they may not be
520           // preserved by the reassociation.
521           ClearSubclassDataAfterReassociation(I);
522           Changed = true;
523           ++NumReassoc;
524           continue;
525         }
526       }
527 
528       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
529       // if C1 and C2 are constants.
530       Value *A, *B;
531       Constant *C1, *C2, *CRes;
532       if (Op0 && Op1 &&
533           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
534           match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
535           match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
536           (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
537         bool IsNUW = hasNoUnsignedWrap(I) &&
538            hasNoUnsignedWrap(*Op0) &&
539            hasNoUnsignedWrap(*Op1);
540          BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
541            BinaryOperator::CreateNUW(Opcode, A, B) :
542            BinaryOperator::Create(Opcode, A, B);
543 
544          if (isa<FPMathOperator>(NewBO)) {
545           FastMathFlags Flags = I.getFastMathFlags();
546           Flags &= Op0->getFastMathFlags();
547           Flags &= Op1->getFastMathFlags();
548           NewBO->setFastMathFlags(Flags);
549         }
550         InsertNewInstWith(NewBO, I);
551         NewBO->takeName(Op1);
552         replaceOperand(I, 0, NewBO);
553         replaceOperand(I, 1, CRes);
554         // Conservatively clear the optional flags, since they may not be
555         // preserved by the reassociation.
556         ClearSubclassDataAfterReassociation(I);
557         if (IsNUW)
558           I.setHasNoUnsignedWrap(true);
559 
560         Changed = true;
561         continue;
562       }
563     }
564 
565     // No further simplifications.
566     return Changed;
567   } while (true);
568 }
569 
570 /// Return whether "X LOp (Y ROp Z)" is always equal to
571 /// "(X LOp Y) ROp (X LOp Z)".
572 static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
573                                      Instruction::BinaryOps ROp) {
574   // X & (Y | Z) <--> (X & Y) | (X & Z)
575   // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
576   if (LOp == Instruction::And)
577     return ROp == Instruction::Or || ROp == Instruction::Xor;
578 
579   // X | (Y & Z) <--> (X | Y) & (X | Z)
580   if (LOp == Instruction::Or)
581     return ROp == Instruction::And;
582 
583   // X * (Y + Z) <--> (X * Y) + (X * Z)
584   // X * (Y - Z) <--> (X * Y) - (X * Z)
585   if (LOp == Instruction::Mul)
586     return ROp == Instruction::Add || ROp == Instruction::Sub;
587 
588   return false;
589 }
590 
591 /// Return whether "(X LOp Y) ROp Z" is always equal to
592 /// "(X ROp Z) LOp (Y ROp Z)".
593 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
594                                      Instruction::BinaryOps ROp) {
595   if (Instruction::isCommutative(ROp))
596     return leftDistributesOverRight(ROp, LOp);
597 
598   // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
599   return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp);
600 
601   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
602   // but this requires knowing that the addition does not overflow and other
603   // such subtleties.
604 }
605 
606 /// This function returns identity value for given opcode, which can be used to
607 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
608 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
609   if (isa<Constant>(V))
610     return nullptr;
611 
612   return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
613 }
614 
615 /// This function predicates factorization using distributive laws. By default,
616 /// it just returns the 'Op' inputs. But for special-cases like
617 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
618 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
619 /// allow more factorization opportunities.
620 static Instruction::BinaryOps
621 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
622                           Value *&LHS, Value *&RHS) {
623   assert(Op && "Expected a binary operator");
624   LHS = Op->getOperand(0);
625   RHS = Op->getOperand(1);
626   if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
627     Constant *C;
628     if (match(Op, m_Shl(m_Value(), m_Constant(C)))) {
629       // X << C --> X * (1 << C)
630       RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C);
631       return Instruction::Mul;
632     }
633     // TODO: We can add other conversions e.g. shr => div etc.
634   }
635   return Op->getOpcode();
636 }
637 
638 /// This tries to simplify binary operations by factorizing out common terms
639 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
640 static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
641                                InstCombiner::BuilderTy &Builder,
642                                Instruction::BinaryOps InnerOpcode, Value *A,
643                                Value *B, Value *C, Value *D) {
644   assert(A && B && C && D && "All values must be provided");
645 
646   Value *V = nullptr;
647   Value *RetVal = nullptr;
648   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
649   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
650 
651   // Does "X op' Y" always equal "Y op' X"?
652   bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
653 
654   // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
655   if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
656     // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
657     // commutative case, "(A op' B) op (C op' A)"?
658     if (A == C || (InnerCommutative && A == D)) {
659       if (A != C)
660         std::swap(C, D);
661       // Consider forming "A op' (B op D)".
662       // If "B op D" simplifies then it can be formed with no cost.
663       V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
664 
665       // If "B op D" doesn't simplify then only go on if one of the existing
666       // operations "A op' B" and "C op' D" will be zapped as no longer used.
667       if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
668         V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
669       if (V)
670         RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
671     }
672   }
673 
674   // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
675   if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
676     // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
677     // commutative case, "(A op' B) op (B op' D)"?
678     if (B == D || (InnerCommutative && B == C)) {
679       if (B != D)
680         std::swap(C, D);
681       // Consider forming "(A op C) op' B".
682       // If "A op C" simplifies then it can be formed with no cost.
683       V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
684 
685       // If "A op C" doesn't simplify then only go on if one of the existing
686       // operations "A op' B" and "C op' D" will be zapped as no longer used.
687       if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
688         V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
689       if (V)
690         RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
691     }
692   }
693 
694   if (!RetVal)
695     return nullptr;
696 
697   ++NumFactor;
698   RetVal->takeName(&I);
699 
700   // Try to add no-overflow flags to the final value.
701   if (isa<OverflowingBinaryOperator>(RetVal)) {
702     bool HasNSW = false;
703     bool HasNUW = false;
704     if (isa<OverflowingBinaryOperator>(&I)) {
705       HasNSW = I.hasNoSignedWrap();
706       HasNUW = I.hasNoUnsignedWrap();
707     }
708     if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
709       HasNSW &= LOBO->hasNoSignedWrap();
710       HasNUW &= LOBO->hasNoUnsignedWrap();
711     }
712 
713     if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
714       HasNSW &= ROBO->hasNoSignedWrap();
715       HasNUW &= ROBO->hasNoUnsignedWrap();
716     }
717 
718     if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
719       // We can propagate 'nsw' if we know that
720       //  %Y = mul nsw i16 %X, C
721       //  %Z = add nsw i16 %Y, %X
722       // =>
723       //  %Z = mul nsw i16 %X, C+1
724       //
725       // iff C+1 isn't INT_MIN
726       const APInt *CInt;
727       if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
728         cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
729 
730       // nuw can be propagated with any constant or nuw value.
731       cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
732     }
733   }
734   return RetVal;
735 }
736 
737 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
738 //   IFF
739 //    1) the logic_shifts match
740 //    2) either both binops are binops and one is `and` or
741 //       BinOp1 is `and`
742 //       (logic_shift (inv_logic_shift C1, C), C) == C1 or
743 //
744 //    -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
745 //
746 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
747 //   IFF
748 //    1) the logic_shifts match
749 //    2) BinOp1 == BinOp2 (if BinOp ==  `add`, then also requires `shl`).
750 //
751 //    -> (BinOp (logic_shift (BinOp X, Y)), Mask)
752 Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
753   auto IsValidBinOpc = [](unsigned Opc) {
754     switch (Opc) {
755     default:
756       return false;
757     case Instruction::And:
758     case Instruction::Or:
759     case Instruction::Xor:
760     case Instruction::Add:
761       // Skip Sub as we only match constant masks which will canonicalize to use
762       // add.
763       return true;
764     }
765   };
766 
767   // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
768   // constraints.
769   auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
770                                       unsigned ShOpc) {
771     return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
772            ShOpc == Instruction::Shl;
773   };
774 
775   auto GetInvShift = [](unsigned ShOpc) {
776     return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
777   };
778 
779   auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
780                                  unsigned ShOpc, Constant *CMask,
781                                  Constant *CShift) {
782     // If the BinOp1 is `and` we don't need to check the mask.
783     if (BinOpc1 == Instruction::And)
784       return true;
785 
786     // For all other possible transfers we need complete distributable
787     // binop/shift (anything but `add` + `lshr`).
788     if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
789       return false;
790 
791     // If BinOp2 is `and`, any mask works (this only really helps for non-splat
792     // vecs, otherwise the mask will be simplified and the following check will
793     // handle it).
794     if (BinOpc2 == Instruction::And)
795       return true;
796 
797     // Otherwise, need mask that meets the below requirement.
798     // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
799     return ConstantExpr::get(
800                ShOpc, ConstantExpr::get(GetInvShift(ShOpc), CMask, CShift),
801                CShift) == CMask;
802   };
803 
804   auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
805     Constant *CMask, *CShift;
806     Value *X, *Y, *ShiftedX, *Mask, *Shift;
807     if (!match(I.getOperand(ShOpnum),
808                m_OneUse(m_LogicalShift(m_Value(Y), m_Value(Shift)))))
809       return nullptr;
810     if (!match(I.getOperand(1 - ShOpnum),
811                m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
812       return nullptr;
813 
814     if (!match(ShiftedX,
815                m_OneUse(m_LogicalShift(m_Value(X), m_Specific(Shift)))))
816       return nullptr;
817 
818     // Make sure we are matching instruction shifts and not ConstantExpr
819     auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
820     auto *IX = dyn_cast<Instruction>(ShiftedX);
821     if (!IY || !IX)
822       return nullptr;
823 
824     // LHS and RHS need same shift opcode
825     unsigned ShOpc = IY->getOpcode();
826     if (ShOpc != IX->getOpcode())
827       return nullptr;
828 
829     // Make sure binop is real instruction and not ConstantExpr
830     auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
831     if (!BO2)
832       return nullptr;
833 
834     unsigned BinOpc = BO2->getOpcode();
835     // Make sure we have valid binops.
836     if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
837       return nullptr;
838 
839     // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
840     // distribute to drop the shift irrelevant of constants.
841     if (BinOpc == I.getOpcode() &&
842         IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
843       Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
844       Value *NewBinOp1 = Builder.CreateBinOp(
845           static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
846       return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
847     }
848 
849     // Otherwise we can only distribute by constant shifting the mask, so
850     // ensure we have constants.
851     if (!match(Shift, m_ImmConstant(CShift)))
852       return nullptr;
853     if (!match(Mask, m_ImmConstant(CMask)))
854       return nullptr;
855 
856     // Check if we can distribute the binops.
857     if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
858       return nullptr;
859 
860     Constant *NewCMask = ConstantExpr::get(GetInvShift(ShOpc), CMask, CShift);
861     Value *NewBinOp2 = Builder.CreateBinOp(
862         static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
863     Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
864     return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
865                                   NewBinOp1, CShift);
866   };
867 
868   if (Instruction *R = MatchBinOp(0))
869     return R;
870   return MatchBinOp(1);
871 }
872 
873 // (Binop (zext C), (select C, T, F))
874 //    -> (select C, (binop 1, T), (binop 0, F))
875 //
876 // (Binop (sext C), (select C, T, F))
877 //    -> (select C, (binop -1, T), (binop 0, F))
878 //
879 // Attempt to simplify binary operations into a select with folded args, when
880 // one operand of the binop is a select instruction and the other operand is a
881 // zext/sext extension, whose value is the select condition.
882 Instruction *
883 InstCombinerImpl::foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I) {
884   // TODO: this simplification may be extended to any speculatable instruction,
885   // not just binops, and would possibly be handled better in FoldOpIntoSelect.
886   Instruction::BinaryOps Opc = I.getOpcode();
887   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
888   Value *A, *CondVal, *TrueVal, *FalseVal;
889   Value *CastOp;
890 
891   auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
892     return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
893            A->getType()->getScalarSizeInBits() == 1 &&
894            match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
895                                     m_Value(FalseVal)));
896   };
897 
898   // Make sure one side of the binop is a select instruction, and the other is a
899   // zero/sign extension operating on a i1.
900   if (MatchSelectAndCast(LHS, RHS))
901     CastOp = LHS;
902   else if (MatchSelectAndCast(RHS, LHS))
903     CastOp = RHS;
904   else
905     return nullptr;
906 
907   auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
908     bool IsCastOpRHS = (CastOp == RHS);
909     bool IsZExt = isa<ZExtOperator>(CastOp);
910     Constant *C;
911 
912     if (IsTrueArm) {
913       C = Constant::getNullValue(V->getType());
914     } else if (IsZExt) {
915       unsigned BitWidth = V->getType()->getScalarSizeInBits();
916       C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
917     } else {
918       C = Constant::getAllOnesValue(V->getType());
919     }
920 
921     return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
922                        : Builder.CreateBinOp(Opc, C, V);
923   };
924 
925   // If the value used in the zext/sext is the select condition, or the negated
926   // of the select condition, the binop can be simplified.
927   if (CondVal == A)
928     return SelectInst::Create(CondVal, NewFoldedConst(false, TrueVal),
929                               NewFoldedConst(true, FalseVal));
930 
931   if (match(A, m_Not(m_Specific(CondVal))))
932     return SelectInst::Create(CondVal, NewFoldedConst(true, TrueVal),
933                               NewFoldedConst(false, FalseVal));
934 
935   return nullptr;
936 }
937 
938 Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) {
939   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
940   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
941   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
942   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
943   Value *A, *B, *C, *D;
944   Instruction::BinaryOps LHSOpcode, RHSOpcode;
945 
946   if (Op0)
947     LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
948   if (Op1)
949     RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
950 
951   // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
952   // a common term.
953   if (Op0 && Op1 && LHSOpcode == RHSOpcode)
954     if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
955       return V;
956 
957   // The instruction has the form "(A op' B) op (C)".  Try to factorize common
958   // term.
959   if (Op0)
960     if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
961       if (Value *V =
962               tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
963         return V;
964 
965   // The instruction has the form "(B) op (C op' D)".  Try to factorize common
966   // term.
967   if (Op1)
968     if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
969       if (Value *V =
970               tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
971         return V;
972 
973   return nullptr;
974 }
975 
976 /// This tries to simplify binary operations which some other binary operation
977 /// distributes over either by factorizing out common terms
978 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
979 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
980 /// Returns the simplified value, or null if it didn't simplify.
981 Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) {
982   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
983   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
984   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
985   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
986 
987   // Factorization.
988   if (Value *R = tryFactorizationFolds(I))
989     return R;
990 
991   // Expansion.
992   if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
993     // The instruction has the form "(A op' B) op C".  See if expanding it out
994     // to "(A op C) op' (B op C)" results in simplifications.
995     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
996     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
997 
998     // Disable the use of undef because it's not safe to distribute undef.
999     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1000     Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1001     Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1002 
1003     // Do "A op C" and "B op C" both simplify?
1004     if (L && R) {
1005       // They do! Return "L op' R".
1006       ++NumExpand;
1007       C = Builder.CreateBinOp(InnerOpcode, L, R);
1008       C->takeName(&I);
1009       return C;
1010     }
1011 
1012     // Does "A op C" simplify to the identity value for the inner opcode?
1013     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1014       // They do! Return "B op C".
1015       ++NumExpand;
1016       C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1017       C->takeName(&I);
1018       return C;
1019     }
1020 
1021     // Does "B op C" simplify to the identity value for the inner opcode?
1022     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1023       // They do! Return "A op C".
1024       ++NumExpand;
1025       C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1026       C->takeName(&I);
1027       return C;
1028     }
1029   }
1030 
1031   if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1032     // The instruction has the form "A op (B op' C)".  See if expanding it out
1033     // to "(A op B) op' (A op C)" results in simplifications.
1034     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1035     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1036 
1037     // Disable the use of undef because it's not safe to distribute undef.
1038     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1039     Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1040     Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1041 
1042     // Do "A op B" and "A op C" both simplify?
1043     if (L && R) {
1044       // They do! Return "L op' R".
1045       ++NumExpand;
1046       A = Builder.CreateBinOp(InnerOpcode, L, R);
1047       A->takeName(&I);
1048       return A;
1049     }
1050 
1051     // Does "A op B" simplify to the identity value for the inner opcode?
1052     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1053       // They do! Return "A op C".
1054       ++NumExpand;
1055       A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1056       A->takeName(&I);
1057       return A;
1058     }
1059 
1060     // Does "A op C" simplify to the identity value for the inner opcode?
1061     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1062       // They do! Return "A op B".
1063       ++NumExpand;
1064       A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1065       A->takeName(&I);
1066       return A;
1067     }
1068   }
1069 
1070   return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1071 }
1072 
1073 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
1074                                                         Value *LHS,
1075                                                         Value *RHS) {
1076   Value *A, *B, *C, *D, *E, *F;
1077   bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1078   bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1079   if (!LHSIsSelect && !RHSIsSelect)
1080     return nullptr;
1081 
1082   FastMathFlags FMF;
1083   BuilderTy::FastMathFlagGuard Guard(Builder);
1084   if (isa<FPMathOperator>(&I)) {
1085     FMF = I.getFastMathFlags();
1086     Builder.setFastMathFlags(FMF);
1087   }
1088 
1089   Instruction::BinaryOps Opcode = I.getOpcode();
1090   SimplifyQuery Q = SQ.getWithInstruction(&I);
1091 
1092   Value *Cond, *True = nullptr, *False = nullptr;
1093 
1094   // Special-case for add/negate combination. Replace the zero in the negation
1095   // with the trailing add operand:
1096   // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1097   // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1098   auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1099     // We need an 'add' and exactly 1 arm of the select to have been simplified.
1100     if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1101       return nullptr;
1102 
1103     Value *N;
1104     if (True && match(FVal, m_Neg(m_Value(N)))) {
1105       Value *Sub = Builder.CreateSub(Z, N);
1106       return Builder.CreateSelect(Cond, True, Sub, I.getName());
1107     }
1108     if (False && match(TVal, m_Neg(m_Value(N)))) {
1109       Value *Sub = Builder.CreateSub(Z, N);
1110       return Builder.CreateSelect(Cond, Sub, False, I.getName());
1111     }
1112     return nullptr;
1113   };
1114 
1115   if (LHSIsSelect && RHSIsSelect && A == D) {
1116     // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1117     Cond = A;
1118     True = simplifyBinOp(Opcode, B, E, FMF, Q);
1119     False = simplifyBinOp(Opcode, C, F, FMF, Q);
1120 
1121     if (LHS->hasOneUse() && RHS->hasOneUse()) {
1122       if (False && !True)
1123         True = Builder.CreateBinOp(Opcode, B, E);
1124       else if (True && !False)
1125         False = Builder.CreateBinOp(Opcode, C, F);
1126     }
1127   } else if (LHSIsSelect && LHS->hasOneUse()) {
1128     // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1129     Cond = A;
1130     True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1131     False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1132     if (Value *NewSel = foldAddNegate(B, C, RHS))
1133       return NewSel;
1134   } else if (RHSIsSelect && RHS->hasOneUse()) {
1135     // X op (D ? E : F) -> D ? (X op E) : (X op F)
1136     Cond = D;
1137     True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1138     False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1139     if (Value *NewSel = foldAddNegate(E, F, LHS))
1140       return NewSel;
1141   }
1142 
1143   if (!True || !False)
1144     return nullptr;
1145 
1146   Value *SI = Builder.CreateSelect(Cond, True, False);
1147   SI->takeName(&I);
1148   return SI;
1149 }
1150 
1151 /// Freely adapt every user of V as-if V was changed to !V.
1152 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1153 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) {
1154   assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1155   for (User *U : make_early_inc_range(I->users())) {
1156     if (U == IgnoredUser)
1157       continue; // Don't consider this user.
1158     switch (cast<Instruction>(U)->getOpcode()) {
1159     case Instruction::Select: {
1160       auto *SI = cast<SelectInst>(U);
1161       SI->swapValues();
1162       SI->swapProfMetadata();
1163       break;
1164     }
1165     case Instruction::Br:
1166       cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too
1167       break;
1168     case Instruction::Xor:
1169       replaceInstUsesWith(cast<Instruction>(*U), I);
1170       break;
1171     default:
1172       llvm_unreachable("Got unexpected user - out of sync with "
1173                        "canFreelyInvertAllUsersOf() ?");
1174     }
1175   }
1176 }
1177 
1178 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1179 /// constant zero (which is the 'negate' form).
1180 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1181   Value *NegV;
1182   if (match(V, m_Neg(m_Value(NegV))))
1183     return NegV;
1184 
1185   // Constants can be considered to be negated values if they can be folded.
1186   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1187     return ConstantExpr::getNeg(C);
1188 
1189   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1190     if (C->getType()->getElementType()->isIntegerTy())
1191       return ConstantExpr::getNeg(C);
1192 
1193   if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1194     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1195       Constant *Elt = CV->getAggregateElement(i);
1196       if (!Elt)
1197         return nullptr;
1198 
1199       if (isa<UndefValue>(Elt))
1200         continue;
1201 
1202       if (!isa<ConstantInt>(Elt))
1203         return nullptr;
1204     }
1205     return ConstantExpr::getNeg(CV);
1206   }
1207 
1208   // Negate integer vector splats.
1209   if (auto *CV = dyn_cast<Constant>(V))
1210     if (CV->getType()->isVectorTy() &&
1211         CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1212       return ConstantExpr::getNeg(CV);
1213 
1214   return nullptr;
1215 }
1216 
1217 /// A binop with a constant operand and a sign-extended boolean operand may be
1218 /// converted into a select of constants by applying the binary operation to
1219 /// the constant with the two possible values of the extended boolean (0 or -1).
1220 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1221   // TODO: Handle non-commutative binop (constant is operand 0).
1222   // TODO: Handle zext.
1223   // TODO: Peek through 'not' of cast.
1224   Value *BO0 = BO.getOperand(0);
1225   Value *BO1 = BO.getOperand(1);
1226   Value *X;
1227   Constant *C;
1228   if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1229       !X->getType()->isIntOrIntVectorTy(1))
1230     return nullptr;
1231 
1232   // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1233   Constant *Ones = ConstantInt::getAllOnesValue(BO.getType());
1234   Constant *Zero = ConstantInt::getNullValue(BO.getType());
1235   Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1236   Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1237   return SelectInst::Create(X, TVal, FVal);
1238 }
1239 
1240 static Constant *constantFoldOperationIntoSelectOperand(Instruction &I,
1241                                                         SelectInst *SI,
1242                                                         bool IsTrueArm) {
1243   SmallVector<Constant *> ConstOps;
1244   for (Value *Op : I.operands()) {
1245     CmpInst::Predicate Pred;
1246     Constant *C = nullptr;
1247     if (Op == SI) {
1248       C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1249                                        : SI->getFalseValue());
1250     } else if (match(SI->getCondition(),
1251                      m_ICmp(Pred, m_Specific(Op), m_Constant(C))) &&
1252                Pred == (IsTrueArm ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
1253                isGuaranteedNotToBeUndefOrPoison(C)) {
1254       // Pass
1255     } else {
1256       C = dyn_cast<Constant>(Op);
1257     }
1258     if (C == nullptr)
1259       return nullptr;
1260 
1261     ConstOps.push_back(C);
1262   }
1263 
1264   return ConstantFoldInstOperands(&I, ConstOps, I.getModule()->getDataLayout());
1265 }
1266 
1267 static Value *foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI,
1268                                              Value *NewOp, InstCombiner &IC) {
1269   Instruction *Clone = I.clone();
1270   Clone->replaceUsesOfWith(SI, NewOp);
1271   IC.InsertNewInstBefore(Clone, *SI);
1272   return Clone;
1273 }
1274 
1275 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
1276                                                 bool FoldWithMultiUse) {
1277   // Don't modify shared select instructions unless set FoldWithMultiUse
1278   if (!SI->hasOneUse() && !FoldWithMultiUse)
1279     return nullptr;
1280 
1281   Value *TV = SI->getTrueValue();
1282   Value *FV = SI->getFalseValue();
1283   if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1284     return nullptr;
1285 
1286   // Bool selects with constant operands can be folded to logical ops.
1287   if (SI->getType()->isIntOrIntVectorTy(1))
1288     return nullptr;
1289 
1290   // If it's a bitcast involving vectors, make sure it has the same number of
1291   // elements on both sides.
1292   if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
1293     VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1294     VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1295 
1296     // Verify that either both or neither are vectors.
1297     if ((SrcTy == nullptr) != (DestTy == nullptr))
1298       return nullptr;
1299 
1300     // If vectors, verify that they have the same number of elements.
1301     if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount())
1302       return nullptr;
1303   }
1304 
1305   // Make sure that one of the select arms constant folds successfully.
1306   Value *NewTV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ true);
1307   Value *NewFV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ false);
1308   if (!NewTV && !NewFV)
1309     return nullptr;
1310 
1311   // Create an instruction for the arm that did not fold.
1312   if (!NewTV)
1313     NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1314   if (!NewFV)
1315     NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1316   return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1317 }
1318 
1319 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
1320   unsigned NumPHIValues = PN->getNumIncomingValues();
1321   if (NumPHIValues == 0)
1322     return nullptr;
1323 
1324   // We normally only transform phis with a single use.  However, if a PHI has
1325   // multiple uses and they are all the same operation, we can fold *all* of the
1326   // uses into the PHI.
1327   if (!PN->hasOneUse()) {
1328     // Walk the use list for the instruction, comparing them to I.
1329     for (User *U : PN->users()) {
1330       Instruction *UI = cast<Instruction>(U);
1331       if (UI != &I && !I.isIdenticalTo(UI))
1332         return nullptr;
1333     }
1334     // Otherwise, we can replace *all* users with the new PHI we form.
1335   }
1336 
1337   // Check to see whether the instruction can be folded into each phi operand.
1338   // If there is one operand that does not fold, remember the BB it is in.
1339   // If there is more than one or if *it* is a PHI, bail out.
1340   SmallVector<Value *> NewPhiValues;
1341   BasicBlock *NonSimplifiedBB = nullptr;
1342   Value *NonSimplifiedInVal = nullptr;
1343   for (unsigned i = 0; i != NumPHIValues; ++i) {
1344     Value *InVal = PN->getIncomingValue(i);
1345     BasicBlock *InBB = PN->getIncomingBlock(i);
1346 
1347     // NB: It is a precondition of this transform that the operands be
1348     // phi translatable! This is usually trivially satisfied by limiting it
1349     // to constant ops, and for selects we do a more sophisticated check.
1350     SmallVector<Value *> Ops;
1351     for (Value *Op : I.operands()) {
1352       if (Op == PN)
1353         Ops.push_back(InVal);
1354       else
1355         Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1356     }
1357 
1358     // Don't consider the simplification successful if we get back a constant
1359     // expression. That's just an instruction in hiding.
1360     // Also reject the case where we simplify back to the phi node. We wouldn't
1361     // be able to remove it in that case.
1362     Value *NewVal = simplifyInstructionWithOperands(
1363         &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1364     if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr())) {
1365       NewPhiValues.push_back(NewVal);
1366       continue;
1367     }
1368 
1369     if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
1370     if (NonSimplifiedBB) return nullptr;  // More than one non-simplified value.
1371 
1372     NonSimplifiedBB = InBB;
1373     NonSimplifiedInVal = InVal;
1374     NewPhiValues.push_back(nullptr);
1375 
1376     // If the InVal is an invoke at the end of the pred block, then we can't
1377     // insert a computation after it without breaking the edge.
1378     if (isa<InvokeInst>(InVal))
1379       if (cast<Instruction>(InVal)->getParent() == NonSimplifiedBB)
1380         return nullptr;
1381 
1382     // If the incoming non-constant value is reachable from the phis block,
1383     // we'll push the operation across a loop backedge. This could result in
1384     // an infinite combine loop, and is generally non-profitable (especially
1385     // if the operation was originally outside the loop).
1386     if (isPotentiallyReachable(PN->getParent(), NonSimplifiedBB, nullptr, &DT,
1387                                LI))
1388       return nullptr;
1389   }
1390 
1391   // If there is exactly one non-simplified value, we can insert a copy of the
1392   // operation in that block.  However, if this is a critical edge, we would be
1393   // inserting the computation on some other paths (e.g. inside a loop).  Only
1394   // do this if the pred block is unconditionally branching into the phi block.
1395   // Also, make sure that the pred block is not dead code.
1396   if (NonSimplifiedBB != nullptr) {
1397     BranchInst *BI = dyn_cast<BranchInst>(NonSimplifiedBB->getTerminator());
1398     if (!BI || !BI->isUnconditional() ||
1399         !DT.isReachableFromEntry(NonSimplifiedBB))
1400       return nullptr;
1401   }
1402 
1403   // Okay, we can do the transformation: create the new PHI node.
1404   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1405   InsertNewInstBefore(NewPN, *PN);
1406   NewPN->takeName(PN);
1407   NewPN->setDebugLoc(PN->getDebugLoc());
1408 
1409   // If we are going to have to insert a new computation, do so right before the
1410   // predecessor's terminator.
1411   Instruction *Clone = nullptr;
1412   if (NonSimplifiedBB) {
1413     Clone = I.clone();
1414     for (Use &U : Clone->operands()) {
1415       if (U == PN)
1416         U = NonSimplifiedInVal;
1417       else
1418         U = U->DoPHITranslation(PN->getParent(), NonSimplifiedBB);
1419     }
1420     InsertNewInstBefore(Clone, *NonSimplifiedBB->getTerminator());
1421   }
1422 
1423   for (unsigned i = 0; i != NumPHIValues; ++i) {
1424     if (NewPhiValues[i])
1425       NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1426     else
1427       NewPN->addIncoming(Clone, PN->getIncomingBlock(i));
1428   }
1429 
1430   for (User *U : make_early_inc_range(PN->users())) {
1431     Instruction *User = cast<Instruction>(U);
1432     if (User == &I) continue;
1433     replaceInstUsesWith(*User, NewPN);
1434     eraseInstFromFunction(*User);
1435   }
1436 
1437   replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1438                         const_cast<PHINode &>(*NewPN),
1439                         const_cast<PHINode &>(*PN), DT);
1440   return replaceInstUsesWith(I, NewPN);
1441 }
1442 
1443 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
1444   // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1445   //       we are guarding against replicating the binop in >1 predecessor.
1446   //       This could miss matching a phi with 2 constant incoming values.
1447   auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1448   auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1449   if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1450       Phi0->getNumOperands() != Phi1->getNumOperands())
1451     return nullptr;
1452 
1453   // TODO: Remove the restriction for binop being in the same block as the phis.
1454   if (BO.getParent() != Phi0->getParent() ||
1455       BO.getParent() != Phi1->getParent())
1456     return nullptr;
1457 
1458   // Fold if there is at least one specific constant value in phi0 or phi1's
1459   // incoming values that comes from the same block and this specific constant
1460   // value can be used to do optimization for specific binary operator.
1461   // For example:
1462   // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1463   // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1464   // %add = add i32 %phi0, %phi1
1465   // ==>
1466   // %add = phi i32 [%j, %bb0], [%i, %bb1]
1467   Constant *C = ConstantExpr::getBinOpIdentity(BO.getOpcode(), BO.getType(),
1468                                                /*AllowRHSConstant*/ false);
1469   if (C) {
1470     SmallVector<Value *, 4> NewIncomingValues;
1471     auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1472       auto &Phi0Use = std::get<0>(T);
1473       auto &Phi1Use = std::get<1>(T);
1474       if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1475         return false;
1476       Value *Phi0UseV = Phi0Use.get();
1477       Value *Phi1UseV = Phi1Use.get();
1478       if (Phi0UseV == C)
1479         NewIncomingValues.push_back(Phi1UseV);
1480       else if (Phi1UseV == C)
1481         NewIncomingValues.push_back(Phi0UseV);
1482       else
1483         return false;
1484       return true;
1485     };
1486 
1487     if (all_of(zip(Phi0->operands(), Phi1->operands()),
1488                CanFoldIncomingValuePair)) {
1489       PHINode *NewPhi =
1490           PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1491       assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1492              "The number of collected incoming values should equal the number "
1493              "of the original PHINode operands!");
1494       for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1495         NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1496       return NewPhi;
1497     }
1498   }
1499 
1500   if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1501     return nullptr;
1502 
1503   // Match a pair of incoming constants for one of the predecessor blocks.
1504   BasicBlock *ConstBB, *OtherBB;
1505   Constant *C0, *C1;
1506   if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
1507     ConstBB = Phi0->getIncomingBlock(0);
1508     OtherBB = Phi0->getIncomingBlock(1);
1509   } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
1510     ConstBB = Phi0->getIncomingBlock(1);
1511     OtherBB = Phi0->getIncomingBlock(0);
1512   } else {
1513     return nullptr;
1514   }
1515   if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
1516     return nullptr;
1517 
1518   // The block that we are hoisting to must reach here unconditionally.
1519   // Otherwise, we could be speculatively executing an expensive or
1520   // non-speculative op.
1521   auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
1522   if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1523       !DT.isReachableFromEntry(OtherBB))
1524     return nullptr;
1525 
1526   // TODO: This check could be tightened to only apply to binops (div/rem) that
1527   //       are not safe to speculatively execute. But that could allow hoisting
1528   //       potentially expensive instructions (fdiv for example).
1529   for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
1530     if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
1531       return nullptr;
1532 
1533   // Fold constants for the predecessor block with constant incoming values.
1534   Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
1535   if (!NewC)
1536     return nullptr;
1537 
1538   // Make a new binop in the predecessor block with the non-constant incoming
1539   // values.
1540   Builder.SetInsertPoint(PredBlockBranch);
1541   Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
1542                                      Phi0->getIncomingValueForBlock(OtherBB),
1543                                      Phi1->getIncomingValueForBlock(OtherBB));
1544   if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1545     NotFoldedNewBO->copyIRFlags(&BO);
1546 
1547   // Replace the binop with a phi of the new values. The old phis are dead.
1548   PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
1549   NewPhi->addIncoming(NewBO, OtherBB);
1550   NewPhi->addIncoming(NewC, ConstBB);
1551   return NewPhi;
1552 }
1553 
1554 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
1555   if (!isa<Constant>(I.getOperand(1)))
1556     return nullptr;
1557 
1558   if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
1559     if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
1560       return NewSel;
1561   } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
1562     if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
1563       return NewPhi;
1564   }
1565   return nullptr;
1566 }
1567 
1568 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
1569   // If this GEP has only 0 indices, it is the same pointer as
1570   // Src. If Src is not a trivial GEP too, don't combine
1571   // the indices.
1572   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1573       !Src.hasOneUse())
1574     return false;
1575   return true;
1576 }
1577 
1578 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
1579   if (!isa<VectorType>(Inst.getType()))
1580     return nullptr;
1581 
1582   BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
1583   Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1584   assert(cast<VectorType>(LHS->getType())->getElementCount() ==
1585          cast<VectorType>(Inst.getType())->getElementCount());
1586   assert(cast<VectorType>(RHS->getType())->getElementCount() ==
1587          cast<VectorType>(Inst.getType())->getElementCount());
1588 
1589   // If both operands of the binop are vector concatenations, then perform the
1590   // narrow binop on each pair of the source operands followed by concatenation
1591   // of the results.
1592   Value *L0, *L1, *R0, *R1;
1593   ArrayRef<int> Mask;
1594   if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
1595       match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
1596       LHS->hasOneUse() && RHS->hasOneUse() &&
1597       cast<ShuffleVectorInst>(LHS)->isConcat() &&
1598       cast<ShuffleVectorInst>(RHS)->isConcat()) {
1599     // This transform does not have the speculative execution constraint as
1600     // below because the shuffle is a concatenation. The new binops are
1601     // operating on exactly the same elements as the existing binop.
1602     // TODO: We could ease the mask requirement to allow different undef lanes,
1603     //       but that requires an analysis of the binop-with-undef output value.
1604     Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
1605     if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1606       BO->copyIRFlags(&Inst);
1607     Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
1608     if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1609       BO->copyIRFlags(&Inst);
1610     return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
1611   }
1612 
1613   auto createBinOpReverse = [&](Value *X, Value *Y) {
1614     Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
1615     if (auto *BO = dyn_cast<BinaryOperator>(V))
1616       BO->copyIRFlags(&Inst);
1617     Module *M = Inst.getModule();
1618     Function *F = Intrinsic::getDeclaration(
1619         M, Intrinsic::experimental_vector_reverse, V->getType());
1620     return CallInst::Create(F, V);
1621   };
1622 
1623   // NOTE: Reverse shuffles don't require the speculative execution protection
1624   // below because they don't affect which lanes take part in the computation.
1625 
1626   Value *V1, *V2;
1627   if (match(LHS, m_VecReverse(m_Value(V1)))) {
1628     // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
1629     if (match(RHS, m_VecReverse(m_Value(V2))) &&
1630         (LHS->hasOneUse() || RHS->hasOneUse() ||
1631          (LHS == RHS && LHS->hasNUses(2))))
1632       return createBinOpReverse(V1, V2);
1633 
1634     // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
1635     if (LHS->hasOneUse() && isSplatValue(RHS))
1636       return createBinOpReverse(V1, RHS);
1637   }
1638   // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
1639   else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
1640     return createBinOpReverse(LHS, V2);
1641 
1642   // It may not be safe to reorder shuffles and things like div, urem, etc.
1643   // because we may trap when executing those ops on unknown vector elements.
1644   // See PR20059.
1645   if (!isSafeToSpeculativelyExecute(&Inst))
1646     return nullptr;
1647 
1648   auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
1649     Value *XY = Builder.CreateBinOp(Opcode, X, Y);
1650     if (auto *BO = dyn_cast<BinaryOperator>(XY))
1651       BO->copyIRFlags(&Inst);
1652     return new ShuffleVectorInst(XY, M);
1653   };
1654 
1655   // If both arguments of the binary operation are shuffles that use the same
1656   // mask and shuffle within a single vector, move the shuffle after the binop.
1657   if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) &&
1658       match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) &&
1659       V1->getType() == V2->getType() &&
1660       (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
1661     // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
1662     return createBinOpShuffle(V1, V2, Mask);
1663   }
1664 
1665   // If both arguments of a commutative binop are select-shuffles that use the
1666   // same mask with commuted operands, the shuffles are unnecessary.
1667   if (Inst.isCommutative() &&
1668       match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
1669       match(RHS,
1670             m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
1671     auto *LShuf = cast<ShuffleVectorInst>(LHS);
1672     auto *RShuf = cast<ShuffleVectorInst>(RHS);
1673     // TODO: Allow shuffles that contain undefs in the mask?
1674     //       That is legal, but it reduces undef knowledge.
1675     // TODO: Allow arbitrary shuffles by shuffling after binop?
1676     //       That might be legal, but we have to deal with poison.
1677     if (LShuf->isSelect() &&
1678         !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
1679         RShuf->isSelect() &&
1680         !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
1681       // Example:
1682       // LHS = shuffle V1, V2, <0, 5, 6, 3>
1683       // RHS = shuffle V2, V1, <0, 5, 6, 3>
1684       // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
1685       Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
1686       NewBO->copyIRFlags(&Inst);
1687       return NewBO;
1688     }
1689   }
1690 
1691   // If one argument is a shuffle within one vector and the other is a constant,
1692   // try moving the shuffle after the binary operation. This canonicalization
1693   // intends to move shuffles closer to other shuffles and binops closer to
1694   // other binops, so they can be folded. It may also enable demanded elements
1695   // transforms.
1696   Constant *C;
1697   auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
1698   if (InstVTy &&
1699       match(&Inst,
1700             m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))),
1701                       m_ImmConstant(C))) &&
1702       cast<FixedVectorType>(V1->getType())->getNumElements() <=
1703           InstVTy->getNumElements()) {
1704     assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
1705            "Shuffle should not change scalar type");
1706 
1707     // Find constant NewC that has property:
1708     //   shuffle(NewC, ShMask) = C
1709     // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
1710     // reorder is not possible. A 1-to-1 mapping is not required. Example:
1711     // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
1712     bool ConstOp1 = isa<Constant>(RHS);
1713     ArrayRef<int> ShMask = Mask;
1714     unsigned SrcVecNumElts =
1715         cast<FixedVectorType>(V1->getType())->getNumElements();
1716     UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType());
1717     SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, UndefScalar);
1718     bool MayChange = true;
1719     unsigned NumElts = InstVTy->getNumElements();
1720     for (unsigned I = 0; I < NumElts; ++I) {
1721       Constant *CElt = C->getAggregateElement(I);
1722       if (ShMask[I] >= 0) {
1723         assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
1724         Constant *NewCElt = NewVecC[ShMask[I]];
1725         // Bail out if:
1726         // 1. The constant vector contains a constant expression.
1727         // 2. The shuffle needs an element of the constant vector that can't
1728         //    be mapped to a new constant vector.
1729         // 3. This is a widening shuffle that copies elements of V1 into the
1730         //    extended elements (extending with undef is allowed).
1731         if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
1732             I >= SrcVecNumElts) {
1733           MayChange = false;
1734           break;
1735         }
1736         NewVecC[ShMask[I]] = CElt;
1737       }
1738       // If this is a widening shuffle, we must be able to extend with undef
1739       // elements. If the original binop does not produce an undef in the high
1740       // lanes, then this transform is not safe.
1741       // Similarly for undef lanes due to the shuffle mask, we can only
1742       // transform binops that preserve undef.
1743       // TODO: We could shuffle those non-undef constant values into the
1744       //       result by using a constant vector (rather than an undef vector)
1745       //       as operand 1 of the new binop, but that might be too aggressive
1746       //       for target-independent shuffle creation.
1747       if (I >= SrcVecNumElts || ShMask[I] < 0) {
1748         Constant *MaybeUndef =
1749             ConstOp1
1750                 ? ConstantFoldBinaryOpOperands(Opcode, UndefScalar, CElt, DL)
1751                 : ConstantFoldBinaryOpOperands(Opcode, CElt, UndefScalar, DL);
1752         if (!MaybeUndef || !match(MaybeUndef, m_Undef())) {
1753           MayChange = false;
1754           break;
1755         }
1756       }
1757     }
1758     if (MayChange) {
1759       Constant *NewC = ConstantVector::get(NewVecC);
1760       // It may not be safe to execute a binop on a vector with undef elements
1761       // because the entire instruction can be folded to undef or create poison
1762       // that did not exist in the original code.
1763       if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
1764         NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
1765 
1766       // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
1767       // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
1768       Value *NewLHS = ConstOp1 ? V1 : NewC;
1769       Value *NewRHS = ConstOp1 ? NewC : V1;
1770       return createBinOpShuffle(NewLHS, NewRHS, Mask);
1771     }
1772   }
1773 
1774   // Try to reassociate to sink a splat shuffle after a binary operation.
1775   if (Inst.isAssociative() && Inst.isCommutative()) {
1776     // Canonicalize shuffle operand as LHS.
1777     if (isa<ShuffleVectorInst>(RHS))
1778       std::swap(LHS, RHS);
1779 
1780     Value *X;
1781     ArrayRef<int> MaskC;
1782     int SplatIndex;
1783     Value *Y, *OtherOp;
1784     if (!match(LHS,
1785                m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
1786         !match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
1787         X->getType() != Inst.getType() ||
1788         !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
1789       return nullptr;
1790 
1791     // FIXME: This may not be safe if the analysis allows undef elements. By
1792     //        moving 'Y' before the splat shuffle, we are implicitly assuming
1793     //        that it is not undef/poison at the splat index.
1794     if (isSplatValue(OtherOp, SplatIndex)) {
1795       std::swap(Y, OtherOp);
1796     } else if (!isSplatValue(Y, SplatIndex)) {
1797       return nullptr;
1798     }
1799 
1800     // X and Y are splatted values, so perform the binary operation on those
1801     // values followed by a splat followed by the 2nd binary operation:
1802     // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
1803     Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
1804     SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
1805     Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
1806     Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
1807 
1808     // Intersect FMF on both new binops. Other (poison-generating) flags are
1809     // dropped to be safe.
1810     if (isa<FPMathOperator>(R)) {
1811       R->copyFastMathFlags(&Inst);
1812       R->andIRFlags(RHS);
1813     }
1814     if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1815       NewInstBO->copyIRFlags(R);
1816     return R;
1817   }
1818 
1819   return nullptr;
1820 }
1821 
1822 /// Try to narrow the width of a binop if at least 1 operand is an extend of
1823 /// of a value. This requires a potentially expensive known bits check to make
1824 /// sure the narrow op does not overflow.
1825 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
1826   // We need at least one extended operand.
1827   Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
1828 
1829   // If this is a sub, we swap the operands since we always want an extension
1830   // on the RHS. The LHS can be an extension or a constant.
1831   if (BO.getOpcode() == Instruction::Sub)
1832     std::swap(Op0, Op1);
1833 
1834   Value *X;
1835   bool IsSext = match(Op0, m_SExt(m_Value(X)));
1836   if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
1837     return nullptr;
1838 
1839   // If both operands are the same extension from the same source type and we
1840   // can eliminate at least one (hasOneUse), this might work.
1841   CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
1842   Value *Y;
1843   if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
1844         cast<Operator>(Op1)->getOpcode() == CastOpc &&
1845         (Op0->hasOneUse() || Op1->hasOneUse()))) {
1846     // If that did not match, see if we have a suitable constant operand.
1847     // Truncating and extending must produce the same constant.
1848     Constant *WideC;
1849     if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
1850       return nullptr;
1851     Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType());
1852     if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC)
1853       return nullptr;
1854     Y = NarrowC;
1855   }
1856 
1857   // Swap back now that we found our operands.
1858   if (BO.getOpcode() == Instruction::Sub)
1859     std::swap(X, Y);
1860 
1861   // Both operands have narrow versions. Last step: the math must not overflow
1862   // in the narrow width.
1863   if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
1864     return nullptr;
1865 
1866   // bo (ext X), (ext Y) --> ext (bo X, Y)
1867   // bo (ext X), C       --> ext (bo X, C')
1868   Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
1869   if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1870     if (IsSext)
1871       NewBinOp->setHasNoSignedWrap();
1872     else
1873       NewBinOp->setHasNoUnsignedWrap();
1874   }
1875   return CastInst::Create(CastOpc, NarrowBO, BO.getType());
1876 }
1877 
1878 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) {
1879   // At least one GEP must be inbounds.
1880   if (!GEP1.isInBounds() && !GEP2.isInBounds())
1881     return false;
1882 
1883   return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) &&
1884          (GEP2.isInBounds() || GEP2.hasAllZeroIndices());
1885 }
1886 
1887 /// Thread a GEP operation with constant indices through the constant true/false
1888 /// arms of a select.
1889 static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
1890                                   InstCombiner::BuilderTy &Builder) {
1891   if (!GEP.hasAllConstantIndices())
1892     return nullptr;
1893 
1894   Instruction *Sel;
1895   Value *Cond;
1896   Constant *TrueC, *FalseC;
1897   if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
1898       !match(Sel,
1899              m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
1900     return nullptr;
1901 
1902   // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
1903   // Propagate 'inbounds' and metadata from existing instructions.
1904   // Note: using IRBuilder to create the constants for efficiency.
1905   SmallVector<Value *, 4> IndexC(GEP.indices());
1906   bool IsInBounds = GEP.isInBounds();
1907   Type *Ty = GEP.getSourceElementType();
1908   Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", IsInBounds);
1909   Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", IsInBounds);
1910   return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
1911 }
1912 
1913 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
1914                                              GEPOperator *Src) {
1915   // Combine Indices - If the source pointer to this getelementptr instruction
1916   // is a getelementptr instruction with matching element type, combine the
1917   // indices of the two getelementptr instructions into a single instruction.
1918   if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1919     return nullptr;
1920 
1921   // For constant GEPs, use a more general offset-based folding approach.
1922   Type *PtrTy = Src->getType()->getScalarType();
1923   if (GEP.hasAllConstantIndices() &&
1924       (Src->hasOneUse() || Src->hasAllConstantIndices())) {
1925     // Split Src into a variable part and a constant suffix.
1926     gep_type_iterator GTI = gep_type_begin(*Src);
1927     Type *BaseType = GTI.getIndexedType();
1928     bool IsFirstType = true;
1929     unsigned NumVarIndices = 0;
1930     for (auto Pair : enumerate(Src->indices())) {
1931       if (!isa<ConstantInt>(Pair.value())) {
1932         BaseType = GTI.getIndexedType();
1933         IsFirstType = false;
1934         NumVarIndices = Pair.index() + 1;
1935       }
1936       ++GTI;
1937     }
1938 
1939     // Determine the offset for the constant suffix of Src.
1940     APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
1941     if (NumVarIndices != Src->getNumIndices()) {
1942       // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
1943       if (isa<ScalableVectorType>(BaseType))
1944         return nullptr;
1945 
1946       SmallVector<Value *> ConstantIndices;
1947       if (!IsFirstType)
1948         ConstantIndices.push_back(
1949             Constant::getNullValue(Type::getInt32Ty(GEP.getContext())));
1950       append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
1951       Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
1952     }
1953 
1954     // Add the offset for GEP (which is fully constant).
1955     if (!GEP.accumulateConstantOffset(DL, Offset))
1956       return nullptr;
1957 
1958     APInt OffsetOld = Offset;
1959     // Convert the total offset back into indices.
1960     SmallVector<APInt> ConstIndices =
1961         DL.getGEPIndicesForOffset(BaseType, Offset);
1962     if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
1963       // If both GEP are constant-indexed, and cannot be merged in either way,
1964       // convert them to a GEP of i8.
1965       if (Src->hasAllConstantIndices())
1966         return replaceInstUsesWith(
1967             GEP, Builder.CreateGEP(
1968                      Builder.getInt8Ty(), Src->getOperand(0),
1969                      Builder.getInt(OffsetOld), "",
1970                      isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
1971       return nullptr;
1972     }
1973 
1974     bool IsInBounds = isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP));
1975     SmallVector<Value *> Indices;
1976     append_range(Indices, drop_end(Src->indices(),
1977                                    Src->getNumIndices() - NumVarIndices));
1978     for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
1979       Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
1980       // Even if the total offset is inbounds, we may end up representing it
1981       // by first performing a larger negative offset, and then a smaller
1982       // positive one. The large negative offset might go out of bounds. Only
1983       // preserve inbounds if all signs are the same.
1984       IsInBounds &= Idx.isNonNegative() == ConstIndices[0].isNonNegative();
1985     }
1986 
1987     return replaceInstUsesWith(
1988         GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
1989                                Indices, "", IsInBounds));
1990   }
1991 
1992   if (Src->getResultElementType() != GEP.getSourceElementType())
1993     return nullptr;
1994 
1995   SmallVector<Value*, 8> Indices;
1996 
1997   // Find out whether the last index in the source GEP is a sequential idx.
1998   bool EndsWithSequential = false;
1999   for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2000        I != E; ++I)
2001     EndsWithSequential = I.isSequential();
2002 
2003   // Can we combine the two pointer arithmetics offsets?
2004   if (EndsWithSequential) {
2005     // Replace: gep (gep %P, long B), long A, ...
2006     // With:    T = long A+B; gep %P, T, ...
2007     Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2008     Value *GO1 = GEP.getOperand(1);
2009 
2010     // If they aren't the same type, then the input hasn't been processed
2011     // by the loop above yet (which canonicalizes sequential index types to
2012     // intptr_t).  Just avoid transforming this until the input has been
2013     // normalized.
2014     if (SO1->getType() != GO1->getType())
2015       return nullptr;
2016 
2017     Value *Sum =
2018         simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2019     // Only do the combine when we are sure the cost after the
2020     // merge is never more than that before the merge.
2021     if (Sum == nullptr)
2022       return nullptr;
2023 
2024     // Update the GEP in place if possible.
2025     if (Src->getNumOperands() == 2) {
2026       GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
2027       replaceOperand(GEP, 0, Src->getOperand(0));
2028       replaceOperand(GEP, 1, Sum);
2029       return &GEP;
2030     }
2031     Indices.append(Src->op_begin()+1, Src->op_end()-1);
2032     Indices.push_back(Sum);
2033     Indices.append(GEP.op_begin()+2, GEP.op_end());
2034   } else if (isa<Constant>(*GEP.idx_begin()) &&
2035              cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2036              Src->getNumOperands() != 1) {
2037     // Otherwise we can do the fold if the first index of the GEP is a zero
2038     Indices.append(Src->op_begin()+1, Src->op_end());
2039     Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2040   }
2041 
2042   if (!Indices.empty())
2043     return replaceInstUsesWith(
2044         GEP, Builder.CreateGEP(
2045                  Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2046                  isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
2047 
2048   return nullptr;
2049 }
2050 
2051 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
2052   Value *PtrOp = GEP.getOperand(0);
2053   SmallVector<Value *, 8> Indices(GEP.indices());
2054   Type *GEPType = GEP.getType();
2055   Type *GEPEltType = GEP.getSourceElementType();
2056   bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
2057   if (Value *V = simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
2058                                  SQ.getWithInstruction(&GEP)))
2059     return replaceInstUsesWith(GEP, V);
2060 
2061   // For vector geps, use the generic demanded vector support.
2062   // Skip if GEP return type is scalable. The number of elements is unknown at
2063   // compile-time.
2064   if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2065     auto VWidth = GEPFVTy->getNumElements();
2066     APInt UndefElts(VWidth, 0);
2067     APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2068     if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2069                                               UndefElts)) {
2070       if (V != &GEP)
2071         return replaceInstUsesWith(GEP, V);
2072       return &GEP;
2073     }
2074 
2075     // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2076     // possible (decide on canonical form for pointer broadcast), 3) exploit
2077     // undef elements to decrease demanded bits
2078   }
2079 
2080   // Eliminate unneeded casts for indices, and replace indices which displace
2081   // by multiples of a zero size type with zero.
2082   bool MadeChange = false;
2083 
2084   // Index width may not be the same width as pointer width.
2085   // Data layout chooses the right type based on supported integer types.
2086   Type *NewScalarIndexTy =
2087       DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2088 
2089   gep_type_iterator GTI = gep_type_begin(GEP);
2090   for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2091        ++I, ++GTI) {
2092     // Skip indices into struct types.
2093     if (GTI.isStruct())
2094       continue;
2095 
2096     Type *IndexTy = (*I)->getType();
2097     Type *NewIndexType =
2098         IndexTy->isVectorTy()
2099             ? VectorType::get(NewScalarIndexTy,
2100                               cast<VectorType>(IndexTy)->getElementCount())
2101             : NewScalarIndexTy;
2102 
2103     // If the element type has zero size then any index over it is equivalent
2104     // to an index of zero, so replace it with zero if it is not zero already.
2105     Type *EltTy = GTI.getIndexedType();
2106     if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2107       if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2108         *I = Constant::getNullValue(NewIndexType);
2109         MadeChange = true;
2110       }
2111 
2112     if (IndexTy != NewIndexType) {
2113       // If we are using a wider index than needed for this platform, shrink
2114       // it to what we need.  If narrower, sign-extend it to what we need.
2115       // This explicit cast can make subsequent optimizations more obvious.
2116       *I = Builder.CreateIntCast(*I, NewIndexType, true);
2117       MadeChange = true;
2118     }
2119   }
2120   if (MadeChange)
2121     return &GEP;
2122 
2123   // Check to see if the inputs to the PHI node are getelementptr instructions.
2124   if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2125     auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2126     if (!Op1)
2127       return nullptr;
2128 
2129     // Don't fold a GEP into itself through a PHI node. This can only happen
2130     // through the back-edge of a loop. Folding a GEP into itself means that
2131     // the value of the previous iteration needs to be stored in the meantime,
2132     // thus requiring an additional register variable to be live, but not
2133     // actually achieving anything (the GEP still needs to be executed once per
2134     // loop iteration).
2135     if (Op1 == &GEP)
2136       return nullptr;
2137 
2138     int DI = -1;
2139 
2140     for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2141       auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2142       if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2143           Op1->getSourceElementType() != Op2->getSourceElementType())
2144         return nullptr;
2145 
2146       // As for Op1 above, don't try to fold a GEP into itself.
2147       if (Op2 == &GEP)
2148         return nullptr;
2149 
2150       // Keep track of the type as we walk the GEP.
2151       Type *CurTy = nullptr;
2152 
2153       for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2154         if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2155           return nullptr;
2156 
2157         if (Op1->getOperand(J) != Op2->getOperand(J)) {
2158           if (DI == -1) {
2159             // We have not seen any differences yet in the GEPs feeding the
2160             // PHI yet, so we record this one if it is allowed to be a
2161             // variable.
2162 
2163             // The first two arguments can vary for any GEP, the rest have to be
2164             // static for struct slots
2165             if (J > 1) {
2166               assert(CurTy && "No current type?");
2167               if (CurTy->isStructTy())
2168                 return nullptr;
2169             }
2170 
2171             DI = J;
2172           } else {
2173             // The GEP is different by more than one input. While this could be
2174             // extended to support GEPs that vary by more than one variable it
2175             // doesn't make sense since it greatly increases the complexity and
2176             // would result in an R+R+R addressing mode which no backend
2177             // directly supports and would need to be broken into several
2178             // simpler instructions anyway.
2179             return nullptr;
2180           }
2181         }
2182 
2183         // Sink down a layer of the type for the next iteration.
2184         if (J > 0) {
2185           if (J == 1) {
2186             CurTy = Op1->getSourceElementType();
2187           } else {
2188             CurTy =
2189                 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2190           }
2191         }
2192       }
2193     }
2194 
2195     // If not all GEPs are identical we'll have to create a new PHI node.
2196     // Check that the old PHI node has only one use so that it will get
2197     // removed.
2198     if (DI != -1 && !PN->hasOneUse())
2199       return nullptr;
2200 
2201     auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2202     if (DI == -1) {
2203       // All the GEPs feeding the PHI are identical. Clone one down into our
2204       // BB so that it can be merged with the current GEP.
2205     } else {
2206       // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2207       // into the current block so it can be merged, and create a new PHI to
2208       // set that index.
2209       PHINode *NewPN;
2210       {
2211         IRBuilderBase::InsertPointGuard Guard(Builder);
2212         Builder.SetInsertPoint(PN);
2213         NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2214                                   PN->getNumOperands());
2215       }
2216 
2217       for (auto &I : PN->operands())
2218         NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2219                            PN->getIncomingBlock(I));
2220 
2221       NewGEP->setOperand(DI, NewPN);
2222     }
2223 
2224     NewGEP->insertInto(GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2225     return replaceOperand(GEP, 0, NewGEP);
2226   }
2227 
2228   if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2229     if (Instruction *I = visitGEPOfGEP(GEP, Src))
2230       return I;
2231 
2232   // Skip if GEP source element type is scalable. The type alloc size is unknown
2233   // at compile-time.
2234   if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2235     unsigned AS = GEP.getPointerAddressSpace();
2236     if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2237         DL.getIndexSizeInBits(AS)) {
2238       uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
2239 
2240       bool Matched = false;
2241       uint64_t C;
2242       Value *V = nullptr;
2243       if (TyAllocSize == 1) {
2244         V = GEP.getOperand(1);
2245         Matched = true;
2246       } else if (match(GEP.getOperand(1),
2247                        m_AShr(m_Value(V), m_ConstantInt(C)))) {
2248         if (TyAllocSize == 1ULL << C)
2249           Matched = true;
2250       } else if (match(GEP.getOperand(1),
2251                        m_SDiv(m_Value(V), m_ConstantInt(C)))) {
2252         if (TyAllocSize == C)
2253           Matched = true;
2254       }
2255 
2256       // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y), but
2257       // only if both point to the same underlying object (otherwise provenance
2258       // is not necessarily retained).
2259       Value *Y;
2260       Value *X = GEP.getOperand(0);
2261       if (Matched &&
2262           match(V, m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) &&
2263           getUnderlyingObject(X) == getUnderlyingObject(Y))
2264         return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType);
2265     }
2266   }
2267 
2268   // We do not handle pointer-vector geps here.
2269   if (GEPType->isVectorTy())
2270     return nullptr;
2271 
2272   if (!GEP.isInBounds()) {
2273     unsigned IdxWidth =
2274         DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
2275     APInt BasePtrOffset(IdxWidth, 0);
2276     Value *UnderlyingPtrOp =
2277             PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
2278                                                              BasePtrOffset);
2279     bool CanBeNull, CanBeFreed;
2280     uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
2281         DL, CanBeNull, CanBeFreed);
2282     if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
2283       if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
2284           BasePtrOffset.isNonNegative()) {
2285         APInt AllocSize(IdxWidth, DerefBytes);
2286         if (BasePtrOffset.ule(AllocSize)) {
2287           return GetElementPtrInst::CreateInBounds(
2288               GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
2289         }
2290       }
2291     }
2292   }
2293 
2294   if (Instruction *R = foldSelectGEP(GEP, Builder))
2295     return R;
2296 
2297   return nullptr;
2298 }
2299 
2300 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
2301                                          Instruction *AI) {
2302   if (isa<ConstantPointerNull>(V))
2303     return true;
2304   if (auto *LI = dyn_cast<LoadInst>(V))
2305     return isa<GlobalVariable>(LI->getPointerOperand());
2306   // Two distinct allocations will never be equal.
2307   return isAllocLikeFn(V, &TLI) && V != AI;
2308 }
2309 
2310 /// Given a call CB which uses an address UsedV, return true if we can prove the
2311 /// call's only possible effect is storing to V.
2312 static bool isRemovableWrite(CallBase &CB, Value *UsedV,
2313                              const TargetLibraryInfo &TLI) {
2314   if (!CB.use_empty())
2315     // TODO: add recursion if returned attribute is present
2316     return false;
2317 
2318   if (CB.isTerminator())
2319     // TODO: remove implementation restriction
2320     return false;
2321 
2322   if (!CB.willReturn() || !CB.doesNotThrow())
2323     return false;
2324 
2325   // If the only possible side effect of the call is writing to the alloca,
2326   // and the result isn't used, we can safely remove any reads implied by the
2327   // call including those which might read the alloca itself.
2328   std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
2329   return Dest && Dest->Ptr == UsedV;
2330 }
2331 
2332 static bool isAllocSiteRemovable(Instruction *AI,
2333                                  SmallVectorImpl<WeakTrackingVH> &Users,
2334                                  const TargetLibraryInfo &TLI) {
2335   SmallVector<Instruction*, 4> Worklist;
2336   const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
2337   Worklist.push_back(AI);
2338 
2339   do {
2340     Instruction *PI = Worklist.pop_back_val();
2341     for (User *U : PI->users()) {
2342       Instruction *I = cast<Instruction>(U);
2343       switch (I->getOpcode()) {
2344       default:
2345         // Give up the moment we see something we can't handle.
2346         return false;
2347 
2348       case Instruction::AddrSpaceCast:
2349       case Instruction::BitCast:
2350       case Instruction::GetElementPtr:
2351         Users.emplace_back(I);
2352         Worklist.push_back(I);
2353         continue;
2354 
2355       case Instruction::ICmp: {
2356         ICmpInst *ICI = cast<ICmpInst>(I);
2357         // We can fold eq/ne comparisons with null to false/true, respectively.
2358         // We also fold comparisons in some conditions provided the alloc has
2359         // not escaped (see isNeverEqualToUnescapedAlloc).
2360         if (!ICI->isEquality())
2361           return false;
2362         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
2363         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
2364           return false;
2365         Users.emplace_back(I);
2366         continue;
2367       }
2368 
2369       case Instruction::Call:
2370         // Ignore no-op and store intrinsics.
2371         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2372           switch (II->getIntrinsicID()) {
2373           default:
2374             return false;
2375 
2376           case Intrinsic::memmove:
2377           case Intrinsic::memcpy:
2378           case Intrinsic::memset: {
2379             MemIntrinsic *MI = cast<MemIntrinsic>(II);
2380             if (MI->isVolatile() || MI->getRawDest() != PI)
2381               return false;
2382             [[fallthrough]];
2383           }
2384           case Intrinsic::assume:
2385           case Intrinsic::invariant_start:
2386           case Intrinsic::invariant_end:
2387           case Intrinsic::lifetime_start:
2388           case Intrinsic::lifetime_end:
2389           case Intrinsic::objectsize:
2390             Users.emplace_back(I);
2391             continue;
2392           case Intrinsic::launder_invariant_group:
2393           case Intrinsic::strip_invariant_group:
2394             Users.emplace_back(I);
2395             Worklist.push_back(I);
2396             continue;
2397           }
2398         }
2399 
2400         if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
2401           Users.emplace_back(I);
2402           continue;
2403         }
2404 
2405         if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
2406             getAllocationFamily(I, &TLI) == Family) {
2407           assert(Family);
2408           Users.emplace_back(I);
2409           continue;
2410         }
2411 
2412         if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
2413             getAllocationFamily(I, &TLI) == Family) {
2414           assert(Family);
2415           Users.emplace_back(I);
2416           Worklist.push_back(I);
2417           continue;
2418         }
2419 
2420         return false;
2421 
2422       case Instruction::Store: {
2423         StoreInst *SI = cast<StoreInst>(I);
2424         if (SI->isVolatile() || SI->getPointerOperand() != PI)
2425           return false;
2426         Users.emplace_back(I);
2427         continue;
2428       }
2429       }
2430       llvm_unreachable("missing a return?");
2431     }
2432   } while (!Worklist.empty());
2433   return true;
2434 }
2435 
2436 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
2437   assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
2438 
2439   // If we have a malloc call which is only used in any amount of comparisons to
2440   // null and free calls, delete the calls and replace the comparisons with true
2441   // or false as appropriate.
2442 
2443   // This is based on the principle that we can substitute our own allocation
2444   // function (which will never return null) rather than knowledge of the
2445   // specific function being called. In some sense this can change the permitted
2446   // outputs of a program (when we convert a malloc to an alloca, the fact that
2447   // the allocation is now on the stack is potentially visible, for example),
2448   // but we believe in a permissible manner.
2449   SmallVector<WeakTrackingVH, 64> Users;
2450 
2451   // If we are removing an alloca with a dbg.declare, insert dbg.value calls
2452   // before each store.
2453   SmallVector<DbgVariableIntrinsic *, 8> DVIs;
2454   std::unique_ptr<DIBuilder> DIB;
2455   if (isa<AllocaInst>(MI)) {
2456     findDbgUsers(DVIs, &MI);
2457     DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
2458   }
2459 
2460   if (isAllocSiteRemovable(&MI, Users, TLI)) {
2461     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2462       // Lowering all @llvm.objectsize calls first because they may
2463       // use a bitcast/GEP of the alloca we are removing.
2464       if (!Users[i])
2465        continue;
2466 
2467       Instruction *I = cast<Instruction>(&*Users[i]);
2468 
2469       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2470         if (II->getIntrinsicID() == Intrinsic::objectsize) {
2471           SmallVector<Instruction *> InsertedInstructions;
2472           Value *Result = lowerObjectSizeCall(
2473               II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
2474           for (Instruction *Inserted : InsertedInstructions)
2475             Worklist.add(Inserted);
2476           replaceInstUsesWith(*I, Result);
2477           eraseInstFromFunction(*I);
2478           Users[i] = nullptr; // Skip examining in the next loop.
2479         }
2480       }
2481     }
2482     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2483       if (!Users[i])
2484         continue;
2485 
2486       Instruction *I = cast<Instruction>(&*Users[i]);
2487 
2488       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
2489         replaceInstUsesWith(*C,
2490                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
2491                                              C->isFalseWhenEqual()));
2492       } else if (auto *SI = dyn_cast<StoreInst>(I)) {
2493         for (auto *DVI : DVIs)
2494           if (DVI->isAddressOfVariable())
2495             ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
2496       } else {
2497         // Casts, GEP, or anything else: we're about to delete this instruction,
2498         // so it can not have any valid uses.
2499         replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
2500       }
2501       eraseInstFromFunction(*I);
2502     }
2503 
2504     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
2505       // Replace invoke with a NOP intrinsic to maintain the original CFG
2506       Module *M = II->getModule();
2507       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
2508       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
2509                          std::nullopt, "", II->getParent());
2510     }
2511 
2512     // Remove debug intrinsics which describe the value contained within the
2513     // alloca. In addition to removing dbg.{declare,addr} which simply point to
2514     // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
2515     //
2516     // ```
2517     //   define void @foo(i32 %0) {
2518     //     %a = alloca i32                              ; Deleted.
2519     //     store i32 %0, i32* %a
2520     //     dbg.value(i32 %0, "arg0")                    ; Not deleted.
2521     //     dbg.value(i32* %a, "arg0", DW_OP_deref)      ; Deleted.
2522     //     call void @trivially_inlinable_no_op(i32* %a)
2523     //     ret void
2524     //  }
2525     // ```
2526     //
2527     // This may not be required if we stop describing the contents of allocas
2528     // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
2529     // the LowerDbgDeclare utility.
2530     //
2531     // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
2532     // "arg0" dbg.value may be stale after the call. However, failing to remove
2533     // the DW_OP_deref dbg.value causes large gaps in location coverage.
2534     for (auto *DVI : DVIs)
2535       if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2536         DVI->eraseFromParent();
2537 
2538     return eraseInstFromFunction(MI);
2539   }
2540   return nullptr;
2541 }
2542 
2543 /// Move the call to free before a NULL test.
2544 ///
2545 /// Check if this free is accessed after its argument has been test
2546 /// against NULL (property 0).
2547 /// If yes, it is legal to move this call in its predecessor block.
2548 ///
2549 /// The move is performed only if the block containing the call to free
2550 /// will be removed, i.e.:
2551 /// 1. it has only one predecessor P, and P has two successors
2552 /// 2. it contains the call, noops, and an unconditional branch
2553 /// 3. its successor is the same as its predecessor's successor
2554 ///
2555 /// The profitability is out-of concern here and this function should
2556 /// be called only if the caller knows this transformation would be
2557 /// profitable (e.g., for code size).
2558 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
2559                                                 const DataLayout &DL) {
2560   Value *Op = FI.getArgOperand(0);
2561   BasicBlock *FreeInstrBB = FI.getParent();
2562   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
2563 
2564   // Validate part of constraint #1: Only one predecessor
2565   // FIXME: We can extend the number of predecessor, but in that case, we
2566   //        would duplicate the call to free in each predecessor and it may
2567   //        not be profitable even for code size.
2568   if (!PredBB)
2569     return nullptr;
2570 
2571   // Validate constraint #2: Does this block contains only the call to
2572   //                         free, noops, and an unconditional branch?
2573   BasicBlock *SuccBB;
2574   Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
2575   if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
2576     return nullptr;
2577 
2578   // If there are only 2 instructions in the block, at this point,
2579   // this is the call to free and unconditional.
2580   // If there are more than 2 instructions, check that they are noops
2581   // i.e., they won't hurt the performance of the generated code.
2582   if (FreeInstrBB->size() != 2) {
2583     for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
2584       if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2585         continue;
2586       auto *Cast = dyn_cast<CastInst>(&Inst);
2587       if (!Cast || !Cast->isNoopCast(DL))
2588         return nullptr;
2589     }
2590   }
2591   // Validate the rest of constraint #1 by matching on the pred branch.
2592   Instruction *TI = PredBB->getTerminator();
2593   BasicBlock *TrueBB, *FalseBB;
2594   ICmpInst::Predicate Pred;
2595   if (!match(TI, m_Br(m_ICmp(Pred,
2596                              m_CombineOr(m_Specific(Op),
2597                                          m_Specific(Op->stripPointerCasts())),
2598                              m_Zero()),
2599                       TrueBB, FalseBB)))
2600     return nullptr;
2601   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2602     return nullptr;
2603 
2604   // Validate constraint #3: Ensure the null case just falls through.
2605   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
2606     return nullptr;
2607   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
2608          "Broken CFG: missing edge from predecessor to successor");
2609 
2610   // At this point, we know that everything in FreeInstrBB can be moved
2611   // before TI.
2612   for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
2613     if (&Instr == FreeInstrBBTerminator)
2614       break;
2615     Instr.moveBefore(TI);
2616   }
2617   assert(FreeInstrBB->size() == 1 &&
2618          "Only the branch instruction should remain");
2619 
2620   // Now that we've moved the call to free before the NULL check, we have to
2621   // remove any attributes on its parameter that imply it's non-null, because
2622   // those attributes might have only been valid because of the NULL check, and
2623   // we can get miscompiles if we keep them. This is conservative if non-null is
2624   // also implied by something other than the NULL check, but it's guaranteed to
2625   // be correct, and the conservativeness won't matter in practice, since the
2626   // attributes are irrelevant for the call to free itself and the pointer
2627   // shouldn't be used after the call.
2628   AttributeList Attrs = FI.getAttributes();
2629   Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
2630   Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
2631   if (Dereferenceable.isValid()) {
2632     uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
2633     Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
2634                                        Attribute::Dereferenceable);
2635     Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
2636   }
2637   FI.setAttributes(Attrs);
2638 
2639   return &FI;
2640 }
2641 
2642 Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {
2643   // free undef -> unreachable.
2644   if (isa<UndefValue>(Op)) {
2645     // Leave a marker since we can't modify the CFG here.
2646     CreateNonTerminatorUnreachable(&FI);
2647     return eraseInstFromFunction(FI);
2648   }
2649 
2650   // If we have 'free null' delete the instruction.  This can happen in stl code
2651   // when lots of inlining happens.
2652   if (isa<ConstantPointerNull>(Op))
2653     return eraseInstFromFunction(FI);
2654 
2655   // If we had free(realloc(...)) with no intervening uses, then eliminate the
2656   // realloc() entirely.
2657   CallInst *CI = dyn_cast<CallInst>(Op);
2658   if (CI && CI->hasOneUse())
2659     if (Value *ReallocatedOp = getReallocatedOperand(CI))
2660       return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
2661 
2662   // If we optimize for code size, try to move the call to free before the null
2663   // test so that simplify cfg can remove the empty block and dead code
2664   // elimination the branch. I.e., helps to turn something like:
2665   // if (foo) free(foo);
2666   // into
2667   // free(foo);
2668   //
2669   // Note that we can only do this for 'free' and not for any flavor of
2670   // 'operator delete'; there is no 'operator delete' symbol for which we are
2671   // permitted to invent a call, even if we're passing in a null pointer.
2672   if (MinimizeSize) {
2673     LibFunc Func;
2674     if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
2675       if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL))
2676         return I;
2677   }
2678 
2679   return nullptr;
2680 }
2681 
2682 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
2683   // Nothing for now.
2684   return nullptr;
2685 }
2686 
2687 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
2688 bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) {
2689   // Try to remove the previous instruction if it must lead to unreachable.
2690   // This includes instructions like stores and "llvm.assume" that may not get
2691   // removed by simple dead code elimination.
2692   bool Changed = false;
2693   while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
2694     // While we theoretically can erase EH, that would result in a block that
2695     // used to start with an EH no longer starting with EH, which is invalid.
2696     // To make it valid, we'd need to fixup predecessors to no longer refer to
2697     // this block, but that changes CFG, which is not allowed in InstCombine.
2698     if (Prev->isEHPad())
2699       break; // Can not drop any more instructions. We're done here.
2700 
2701     if (!isGuaranteedToTransferExecutionToSuccessor(Prev))
2702       break; // Can not drop any more instructions. We're done here.
2703     // Otherwise, this instruction can be freely erased,
2704     // even if it is not side-effect free.
2705 
2706     // A value may still have uses before we process it here (for example, in
2707     // another unreachable block), so convert those to poison.
2708     replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
2709     eraseInstFromFunction(*Prev);
2710     Changed = true;
2711   }
2712   return Changed;
2713 }
2714 
2715 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
2716   removeInstructionsBeforeUnreachable(I);
2717   return nullptr;
2718 }
2719 
2720 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {
2721   assert(BI.isUnconditional() && "Only for unconditional branches.");
2722 
2723   // If this store is the second-to-last instruction in the basic block
2724   // (excluding debug info and bitcasts of pointers) and if the block ends with
2725   // an unconditional branch, try to move the store to the successor block.
2726 
2727   auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
2728     auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
2729       return BBI->isDebugOrPseudoInst() ||
2730              (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
2731     };
2732 
2733     BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
2734     do {
2735       if (BBI != FirstInstr)
2736         --BBI;
2737     } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
2738 
2739     return dyn_cast<StoreInst>(BBI);
2740   };
2741 
2742   if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
2743     if (mergeStoreIntoSuccessor(*SI))
2744       return &BI;
2745 
2746   return nullptr;
2747 }
2748 
2749 // Under the assumption that I is unreachable, remove it and following
2750 // instructions.
2751 bool InstCombinerImpl::handleUnreachableFrom(Instruction *I) {
2752   bool Changed = false;
2753   BasicBlock *BB = I->getParent();
2754   for (Instruction &Inst : make_early_inc_range(
2755            make_range(std::next(BB->getTerminator()->getReverseIterator()),
2756                       std::next(I->getReverseIterator())))) {
2757     if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
2758       replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
2759       Changed = true;
2760     }
2761     if (Inst.isEHPad() || Inst.getType()->isTokenTy())
2762       continue;
2763     eraseInstFromFunction(Inst);
2764     Changed = true;
2765   }
2766 
2767   // Replace phi node operands in successor blocks with poison.
2768   for (BasicBlock *Succ : successors(BB))
2769     for (PHINode &PN : Succ->phis())
2770       for (Use &U : PN.incoming_values())
2771         if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
2772           replaceUse(U, PoisonValue::get(PN.getType()));
2773           addToWorklist(&PN);
2774           Changed = true;
2775         }
2776 
2777   // TODO: Successor blocks may also be dead.
2778   return Changed;
2779 }
2780 
2781 bool InstCombinerImpl::handlePotentiallyDeadSuccessors(BasicBlock *BB,
2782                                                        BasicBlock *LiveSucc) {
2783   bool Changed = false;
2784   for (BasicBlock *Succ : successors(BB)) {
2785     // The live successor isn't dead.
2786     if (Succ == LiveSucc)
2787       continue;
2788 
2789     if (!all_of(predecessors(Succ), [&](BasicBlock *Pred) {
2790           return DT.dominates(BasicBlockEdge(BB, Succ),
2791                               BasicBlockEdge(Pred, Succ));
2792         }))
2793       continue;
2794 
2795     Changed |= handleUnreachableFrom(&Succ->front());
2796   }
2797   return Changed;
2798 }
2799 
2800 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {
2801   if (BI.isUnconditional())
2802     return visitUnconditionalBranchInst(BI);
2803 
2804   // Change br (not X), label True, label False to: br X, label False, True
2805   Value *Cond = BI.getCondition();
2806   Value *X;
2807   if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
2808     // Swap Destinations and condition...
2809     BI.swapSuccessors();
2810     return replaceOperand(BI, 0, X);
2811   }
2812 
2813   // Canonicalize logical-and-with-invert as logical-or-with-invert.
2814   // This is done by inverting the condition and swapping successors:
2815   // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
2816   Value *Y;
2817   if (isa<SelectInst>(Cond) &&
2818       match(Cond,
2819             m_OneUse(m_LogicalAnd(m_Value(X), m_OneUse(m_Not(m_Value(Y))))))) {
2820     Value *NotX = Builder.CreateNot(X, "not." + X->getName());
2821     Value *Or = Builder.CreateLogicalOr(NotX, Y);
2822     BI.swapSuccessors();
2823     return replaceOperand(BI, 0, Or);
2824   }
2825 
2826   // If the condition is irrelevant, remove the use so that other
2827   // transforms on the condition become more effective.
2828   if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
2829     return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
2830 
2831   // Canonicalize, for example, fcmp_one -> fcmp_oeq.
2832   CmpInst::Predicate Pred;
2833   if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
2834       !isCanonicalPredicate(Pred)) {
2835     // Swap destinations and condition.
2836     auto *Cmp = cast<CmpInst>(Cond);
2837     Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
2838     BI.swapSuccessors();
2839     Worklist.push(Cmp);
2840     return &BI;
2841   }
2842 
2843   if (isa<UndefValue>(Cond) &&
2844       handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr))
2845     return &BI;
2846   if (auto *CI = dyn_cast<ConstantInt>(Cond))
2847     if (handlePotentiallyDeadSuccessors(BI.getParent(),
2848                                         BI.getSuccessor(!CI->getZExtValue())))
2849       return &BI;
2850 
2851   return nullptr;
2852 }
2853 
2854 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
2855   Value *Cond = SI.getCondition();
2856   Value *Op0;
2857   ConstantInt *AddRHS;
2858   if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
2859     // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
2860     for (auto Case : SI.cases()) {
2861       Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
2862       assert(isa<ConstantInt>(NewCase) &&
2863              "Result of expression should be constant");
2864       Case.setValue(cast<ConstantInt>(NewCase));
2865     }
2866     return replaceOperand(SI, 0, Op0);
2867   }
2868 
2869   if (isa<UndefValue>(Cond) &&
2870       handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr))
2871     return &SI;
2872   if (auto *CI = dyn_cast<ConstantInt>(Cond))
2873     if (handlePotentiallyDeadSuccessors(
2874             SI.getParent(), SI.findCaseValue(CI)->getCaseSuccessor()))
2875       return &SI;
2876 
2877   KnownBits Known = computeKnownBits(Cond, 0, &SI);
2878   unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
2879   unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
2880 
2881   // Compute the number of leading bits we can ignore.
2882   // TODO: A better way to determine this would use ComputeNumSignBits().
2883   for (const auto &C : SI.cases()) {
2884     LeadingKnownZeros =
2885         std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
2886     LeadingKnownOnes =
2887         std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
2888   }
2889 
2890   unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
2891 
2892   // Shrink the condition operand if the new type is smaller than the old type.
2893   // But do not shrink to a non-standard type, because backend can't generate
2894   // good code for that yet.
2895   // TODO: We can make it aggressive again after fixing PR39569.
2896   if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
2897       shouldChangeType(Known.getBitWidth(), NewWidth)) {
2898     IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
2899     Builder.SetInsertPoint(&SI);
2900     Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
2901 
2902     for (auto Case : SI.cases()) {
2903       APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
2904       Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
2905     }
2906     return replaceOperand(SI, 0, NewCond);
2907   }
2908 
2909   return nullptr;
2910 }
2911 
2912 Instruction *
2913 InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
2914   auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
2915   if (!WO)
2916     return nullptr;
2917 
2918   Intrinsic::ID OvID = WO->getIntrinsicID();
2919   const APInt *C = nullptr;
2920   if (match(WO->getRHS(), m_APIntAllowUndef(C))) {
2921     if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
2922                                  OvID == Intrinsic::umul_with_overflow)) {
2923       // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
2924       if (C->isAllOnes())
2925         return BinaryOperator::CreateNeg(WO->getLHS());
2926       // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
2927       if (C->isPowerOf2()) {
2928         return BinaryOperator::CreateShl(
2929             WO->getLHS(),
2930             ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
2931       }
2932     }
2933   }
2934 
2935   // We're extracting from an overflow intrinsic. See if we're the only user.
2936   // That allows us to simplify multiple result intrinsics to simpler things
2937   // that just get one value.
2938   if (!WO->hasOneUse())
2939     return nullptr;
2940 
2941   // Check if we're grabbing only the result of a 'with overflow' intrinsic
2942   // and replace it with a traditional binary instruction.
2943   if (*EV.idx_begin() == 0) {
2944     Instruction::BinaryOps BinOp = WO->getBinaryOp();
2945     Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
2946     // Replace the old instruction's uses with poison.
2947     replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
2948     eraseInstFromFunction(*WO);
2949     return BinaryOperator::Create(BinOp, LHS, RHS);
2950   }
2951 
2952   assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
2953 
2954   // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
2955   if (OvID == Intrinsic::usub_with_overflow)
2956     return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
2957 
2958   // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
2959   // +1 is not possible because we assume signed values.
2960   if (OvID == Intrinsic::smul_with_overflow &&
2961       WO->getLHS()->getType()->isIntOrIntVectorTy(1))
2962     return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
2963 
2964   // If only the overflow result is used, and the right hand side is a
2965   // constant (or constant splat), we can remove the intrinsic by directly
2966   // checking for overflow.
2967   if (C) {
2968     // Compute the no-wrap range for LHS given RHS=C, then construct an
2969     // equivalent icmp, potentially using an offset.
2970     ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
2971         WO->getBinaryOp(), *C, WO->getNoWrapKind());
2972 
2973     CmpInst::Predicate Pred;
2974     APInt NewRHSC, Offset;
2975     NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
2976     auto *OpTy = WO->getRHS()->getType();
2977     auto *NewLHS = WO->getLHS();
2978     if (Offset != 0)
2979       NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
2980     return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
2981                         ConstantInt::get(OpTy, NewRHSC));
2982   }
2983 
2984   return nullptr;
2985 }
2986 
2987 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
2988   Value *Agg = EV.getAggregateOperand();
2989 
2990   if (!EV.hasIndices())
2991     return replaceInstUsesWith(EV, Agg);
2992 
2993   if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
2994                                           SQ.getWithInstruction(&EV)))
2995     return replaceInstUsesWith(EV, V);
2996 
2997   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
2998     // We're extracting from an insertvalue instruction, compare the indices
2999     const unsigned *exti, *exte, *insi, *inse;
3000     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
3001          exte = EV.idx_end(), inse = IV->idx_end();
3002          exti != exte && insi != inse;
3003          ++exti, ++insi) {
3004       if (*insi != *exti)
3005         // The insert and extract both reference distinctly different elements.
3006         // This means the extract is not influenced by the insert, and we can
3007         // replace the aggregate operand of the extract with the aggregate
3008         // operand of the insert. i.e., replace
3009         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3010         // %E = extractvalue { i32, { i32 } } %I, 0
3011         // with
3012         // %E = extractvalue { i32, { i32 } } %A, 0
3013         return ExtractValueInst::Create(IV->getAggregateOperand(),
3014                                         EV.getIndices());
3015     }
3016     if (exti == exte && insi == inse)
3017       // Both iterators are at the end: Index lists are identical. Replace
3018       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3019       // %C = extractvalue { i32, { i32 } } %B, 1, 0
3020       // with "i32 42"
3021       return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
3022     if (exti == exte) {
3023       // The extract list is a prefix of the insert list. i.e. replace
3024       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3025       // %E = extractvalue { i32, { i32 } } %I, 1
3026       // with
3027       // %X = extractvalue { i32, { i32 } } %A, 1
3028       // %E = insertvalue { i32 } %X, i32 42, 0
3029       // by switching the order of the insert and extract (though the
3030       // insertvalue should be left in, since it may have other uses).
3031       Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
3032                                                 EV.getIndices());
3033       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
3034                                      ArrayRef(insi, inse));
3035     }
3036     if (insi == inse)
3037       // The insert list is a prefix of the extract list
3038       // We can simply remove the common indices from the extract and make it
3039       // operate on the inserted value instead of the insertvalue result.
3040       // i.e., replace
3041       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3042       // %E = extractvalue { i32, { i32 } } %I, 1, 0
3043       // with
3044       // %E extractvalue { i32 } { i32 42 }, 0
3045       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
3046                                       ArrayRef(exti, exte));
3047   }
3048 
3049   if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3050     return R;
3051 
3052   if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3053     // Bail out if the aggregate contains scalable vector type
3054     if (auto *STy = dyn_cast<StructType>(Agg->getType());
3055         STy && STy->containsScalableVectorType())
3056       return nullptr;
3057 
3058     // If the (non-volatile) load only has one use, we can rewrite this to a
3059     // load from a GEP. This reduces the size of the load. If a load is used
3060     // only by extractvalue instructions then this either must have been
3061     // optimized before, or it is a struct with padding, in which case we
3062     // don't want to do the transformation as it loses padding knowledge.
3063     if (L->isSimple() && L->hasOneUse()) {
3064       // extractvalue has integer indices, getelementptr has Value*s. Convert.
3065       SmallVector<Value*, 4> Indices;
3066       // Prefix an i32 0 since we need the first element.
3067       Indices.push_back(Builder.getInt32(0));
3068       for (unsigned Idx : EV.indices())
3069         Indices.push_back(Builder.getInt32(Idx));
3070 
3071       // We need to insert these at the location of the old load, not at that of
3072       // the extractvalue.
3073       Builder.SetInsertPoint(L);
3074       Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
3075                                              L->getPointerOperand(), Indices);
3076       Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
3077       // Whatever aliasing information we had for the orignal load must also
3078       // hold for the smaller load, so propagate the annotations.
3079       NL->setAAMetadata(L->getAAMetadata());
3080       // Returning the load directly will cause the main loop to insert it in
3081       // the wrong spot, so use replaceInstUsesWith().
3082       return replaceInstUsesWith(EV, NL);
3083     }
3084   }
3085 
3086   if (auto *PN = dyn_cast<PHINode>(Agg))
3087     if (Instruction *Res = foldOpIntoPhi(EV, PN))
3088       return Res;
3089 
3090   // We could simplify extracts from other values. Note that nested extracts may
3091   // already be simplified implicitly by the above: extract (extract (insert) )
3092   // will be translated into extract ( insert ( extract ) ) first and then just
3093   // the value inserted, if appropriate. Similarly for extracts from single-use
3094   // loads: extract (extract (load)) will be translated to extract (load (gep))
3095   // and if again single-use then via load (gep (gep)) to load (gep).
3096   // However, double extracts from e.g. function arguments or return values
3097   // aren't handled yet.
3098   return nullptr;
3099 }
3100 
3101 /// Return 'true' if the given typeinfo will match anything.
3102 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
3103   switch (Personality) {
3104   case EHPersonality::GNU_C:
3105   case EHPersonality::GNU_C_SjLj:
3106   case EHPersonality::Rust:
3107     // The GCC C EH and Rust personality only exists to support cleanups, so
3108     // it's not clear what the semantics of catch clauses are.
3109     return false;
3110   case EHPersonality::Unknown:
3111     return false;
3112   case EHPersonality::GNU_Ada:
3113     // While __gnat_all_others_value will match any Ada exception, it doesn't
3114     // match foreign exceptions (or didn't, before gcc-4.7).
3115     return false;
3116   case EHPersonality::GNU_CXX:
3117   case EHPersonality::GNU_CXX_SjLj:
3118   case EHPersonality::GNU_ObjC:
3119   case EHPersonality::MSVC_X86SEH:
3120   case EHPersonality::MSVC_TableSEH:
3121   case EHPersonality::MSVC_CXX:
3122   case EHPersonality::CoreCLR:
3123   case EHPersonality::Wasm_CXX:
3124   case EHPersonality::XL_CXX:
3125     return TypeInfo->isNullValue();
3126   }
3127   llvm_unreachable("invalid enum");
3128 }
3129 
3130 static bool shorter_filter(const Value *LHS, const Value *RHS) {
3131   return
3132     cast<ArrayType>(LHS->getType())->getNumElements()
3133   <
3134     cast<ArrayType>(RHS->getType())->getNumElements();
3135 }
3136 
3137 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
3138   // The logic here should be correct for any real-world personality function.
3139   // However if that turns out not to be true, the offending logic can always
3140   // be conditioned on the personality function, like the catch-all logic is.
3141   EHPersonality Personality =
3142       classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
3143 
3144   // Simplify the list of clauses, eg by removing repeated catch clauses
3145   // (these are often created by inlining).
3146   bool MakeNewInstruction = false; // If true, recreate using the following:
3147   SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
3148   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
3149 
3150   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
3151   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
3152     bool isLastClause = i + 1 == e;
3153     if (LI.isCatch(i)) {
3154       // A catch clause.
3155       Constant *CatchClause = LI.getClause(i);
3156       Constant *TypeInfo = CatchClause->stripPointerCasts();
3157 
3158       // If we already saw this clause, there is no point in having a second
3159       // copy of it.
3160       if (AlreadyCaught.insert(TypeInfo).second) {
3161         // This catch clause was not already seen.
3162         NewClauses.push_back(CatchClause);
3163       } else {
3164         // Repeated catch clause - drop the redundant copy.
3165         MakeNewInstruction = true;
3166       }
3167 
3168       // If this is a catch-all then there is no point in keeping any following
3169       // clauses or marking the landingpad as having a cleanup.
3170       if (isCatchAll(Personality, TypeInfo)) {
3171         if (!isLastClause)
3172           MakeNewInstruction = true;
3173         CleanupFlag = false;
3174         break;
3175       }
3176     } else {
3177       // A filter clause.  If any of the filter elements were already caught
3178       // then they can be dropped from the filter.  It is tempting to try to
3179       // exploit the filter further by saying that any typeinfo that does not
3180       // occur in the filter can't be caught later (and thus can be dropped).
3181       // However this would be wrong, since typeinfos can match without being
3182       // equal (for example if one represents a C++ class, and the other some
3183       // class derived from it).
3184       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
3185       Constant *FilterClause = LI.getClause(i);
3186       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
3187       unsigned NumTypeInfos = FilterType->getNumElements();
3188 
3189       // An empty filter catches everything, so there is no point in keeping any
3190       // following clauses or marking the landingpad as having a cleanup.  By
3191       // dealing with this case here the following code is made a bit simpler.
3192       if (!NumTypeInfos) {
3193         NewClauses.push_back(FilterClause);
3194         if (!isLastClause)
3195           MakeNewInstruction = true;
3196         CleanupFlag = false;
3197         break;
3198       }
3199 
3200       bool MakeNewFilter = false; // If true, make a new filter.
3201       SmallVector<Constant *, 16> NewFilterElts; // New elements.
3202       if (isa<ConstantAggregateZero>(FilterClause)) {
3203         // Not an empty filter - it contains at least one null typeinfo.
3204         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
3205         Constant *TypeInfo =
3206           Constant::getNullValue(FilterType->getElementType());
3207         // If this typeinfo is a catch-all then the filter can never match.
3208         if (isCatchAll(Personality, TypeInfo)) {
3209           // Throw the filter away.
3210           MakeNewInstruction = true;
3211           continue;
3212         }
3213 
3214         // There is no point in having multiple copies of this typeinfo, so
3215         // discard all but the first copy if there is more than one.
3216         NewFilterElts.push_back(TypeInfo);
3217         if (NumTypeInfos > 1)
3218           MakeNewFilter = true;
3219       } else {
3220         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
3221         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
3222         NewFilterElts.reserve(NumTypeInfos);
3223 
3224         // Remove any filter elements that were already caught or that already
3225         // occurred in the filter.  While there, see if any of the elements are
3226         // catch-alls.  If so, the filter can be discarded.
3227         bool SawCatchAll = false;
3228         for (unsigned j = 0; j != NumTypeInfos; ++j) {
3229           Constant *Elt = Filter->getOperand(j);
3230           Constant *TypeInfo = Elt->stripPointerCasts();
3231           if (isCatchAll(Personality, TypeInfo)) {
3232             // This element is a catch-all.  Bail out, noting this fact.
3233             SawCatchAll = true;
3234             break;
3235           }
3236 
3237           // Even if we've seen a type in a catch clause, we don't want to
3238           // remove it from the filter.  An unexpected type handler may be
3239           // set up for a call site which throws an exception of the same
3240           // type caught.  In order for the exception thrown by the unexpected
3241           // handler to propagate correctly, the filter must be correctly
3242           // described for the call site.
3243           //
3244           // Example:
3245           //
3246           // void unexpected() { throw 1;}
3247           // void foo() throw (int) {
3248           //   std::set_unexpected(unexpected);
3249           //   try {
3250           //     throw 2.0;
3251           //   } catch (int i) {}
3252           // }
3253 
3254           // There is no point in having multiple copies of the same typeinfo in
3255           // a filter, so only add it if we didn't already.
3256           if (SeenInFilter.insert(TypeInfo).second)
3257             NewFilterElts.push_back(cast<Constant>(Elt));
3258         }
3259         // A filter containing a catch-all cannot match anything by definition.
3260         if (SawCatchAll) {
3261           // Throw the filter away.
3262           MakeNewInstruction = true;
3263           continue;
3264         }
3265 
3266         // If we dropped something from the filter, make a new one.
3267         if (NewFilterElts.size() < NumTypeInfos)
3268           MakeNewFilter = true;
3269       }
3270       if (MakeNewFilter) {
3271         FilterType = ArrayType::get(FilterType->getElementType(),
3272                                     NewFilterElts.size());
3273         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
3274         MakeNewInstruction = true;
3275       }
3276 
3277       NewClauses.push_back(FilterClause);
3278 
3279       // If the new filter is empty then it will catch everything so there is
3280       // no point in keeping any following clauses or marking the landingpad
3281       // as having a cleanup.  The case of the original filter being empty was
3282       // already handled above.
3283       if (MakeNewFilter && !NewFilterElts.size()) {
3284         assert(MakeNewInstruction && "New filter but not a new instruction!");
3285         CleanupFlag = false;
3286         break;
3287       }
3288     }
3289   }
3290 
3291   // If several filters occur in a row then reorder them so that the shortest
3292   // filters come first (those with the smallest number of elements).  This is
3293   // advantageous because shorter filters are more likely to match, speeding up
3294   // unwinding, but mostly because it increases the effectiveness of the other
3295   // filter optimizations below.
3296   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
3297     unsigned j;
3298     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
3299     for (j = i; j != e; ++j)
3300       if (!isa<ArrayType>(NewClauses[j]->getType()))
3301         break;
3302 
3303     // Check whether the filters are already sorted by length.  We need to know
3304     // if sorting them is actually going to do anything so that we only make a
3305     // new landingpad instruction if it does.
3306     for (unsigned k = i; k + 1 < j; ++k)
3307       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
3308         // Not sorted, so sort the filters now.  Doing an unstable sort would be
3309         // correct too but reordering filters pointlessly might confuse users.
3310         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
3311                          shorter_filter);
3312         MakeNewInstruction = true;
3313         break;
3314       }
3315 
3316     // Look for the next batch of filters.
3317     i = j + 1;
3318   }
3319 
3320   // If typeinfos matched if and only if equal, then the elements of a filter L
3321   // that occurs later than a filter F could be replaced by the intersection of
3322   // the elements of F and L.  In reality two typeinfos can match without being
3323   // equal (for example if one represents a C++ class, and the other some class
3324   // derived from it) so it would be wrong to perform this transform in general.
3325   // However the transform is correct and useful if F is a subset of L.  In that
3326   // case L can be replaced by F, and thus removed altogether since repeating a
3327   // filter is pointless.  So here we look at all pairs of filters F and L where
3328   // L follows F in the list of clauses, and remove L if every element of F is
3329   // an element of L.  This can occur when inlining C++ functions with exception
3330   // specifications.
3331   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
3332     // Examine each filter in turn.
3333     Value *Filter = NewClauses[i];
3334     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
3335     if (!FTy)
3336       // Not a filter - skip it.
3337       continue;
3338     unsigned FElts = FTy->getNumElements();
3339     // Examine each filter following this one.  Doing this backwards means that
3340     // we don't have to worry about filters disappearing under us when removed.
3341     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
3342       Value *LFilter = NewClauses[j];
3343       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
3344       if (!LTy)
3345         // Not a filter - skip it.
3346         continue;
3347       // If Filter is a subset of LFilter, i.e. every element of Filter is also
3348       // an element of LFilter, then discard LFilter.
3349       SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
3350       // If Filter is empty then it is a subset of LFilter.
3351       if (!FElts) {
3352         // Discard LFilter.
3353         NewClauses.erase(J);
3354         MakeNewInstruction = true;
3355         // Move on to the next filter.
3356         continue;
3357       }
3358       unsigned LElts = LTy->getNumElements();
3359       // If Filter is longer than LFilter then it cannot be a subset of it.
3360       if (FElts > LElts)
3361         // Move on to the next filter.
3362         continue;
3363       // At this point we know that LFilter has at least one element.
3364       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
3365         // Filter is a subset of LFilter iff Filter contains only zeros (as we
3366         // already know that Filter is not longer than LFilter).
3367         if (isa<ConstantAggregateZero>(Filter)) {
3368           assert(FElts <= LElts && "Should have handled this case earlier!");
3369           // Discard LFilter.
3370           NewClauses.erase(J);
3371           MakeNewInstruction = true;
3372         }
3373         // Move on to the next filter.
3374         continue;
3375       }
3376       ConstantArray *LArray = cast<ConstantArray>(LFilter);
3377       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
3378         // Since Filter is non-empty and contains only zeros, it is a subset of
3379         // LFilter iff LFilter contains a zero.
3380         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
3381         for (unsigned l = 0; l != LElts; ++l)
3382           if (LArray->getOperand(l)->isNullValue()) {
3383             // LFilter contains a zero - discard it.
3384             NewClauses.erase(J);
3385             MakeNewInstruction = true;
3386             break;
3387           }
3388         // Move on to the next filter.
3389         continue;
3390       }
3391       // At this point we know that both filters are ConstantArrays.  Loop over
3392       // operands to see whether every element of Filter is also an element of
3393       // LFilter.  Since filters tend to be short this is probably faster than
3394       // using a method that scales nicely.
3395       ConstantArray *FArray = cast<ConstantArray>(Filter);
3396       bool AllFound = true;
3397       for (unsigned f = 0; f != FElts; ++f) {
3398         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
3399         AllFound = false;
3400         for (unsigned l = 0; l != LElts; ++l) {
3401           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
3402           if (LTypeInfo == FTypeInfo) {
3403             AllFound = true;
3404             break;
3405           }
3406         }
3407         if (!AllFound)
3408           break;
3409       }
3410       if (AllFound) {
3411         // Discard LFilter.
3412         NewClauses.erase(J);
3413         MakeNewInstruction = true;
3414       }
3415       // Move on to the next filter.
3416     }
3417   }
3418 
3419   // If we changed any of the clauses, replace the old landingpad instruction
3420   // with a new one.
3421   if (MakeNewInstruction) {
3422     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
3423                                                  NewClauses.size());
3424     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
3425       NLI->addClause(NewClauses[i]);
3426     // A landing pad with no clauses must have the cleanup flag set.  It is
3427     // theoretically possible, though highly unlikely, that we eliminated all
3428     // clauses.  If so, force the cleanup flag to true.
3429     if (NewClauses.empty())
3430       CleanupFlag = true;
3431     NLI->setCleanup(CleanupFlag);
3432     return NLI;
3433   }
3434 
3435   // Even if none of the clauses changed, we may nonetheless have understood
3436   // that the cleanup flag is pointless.  Clear it if so.
3437   if (LI.isCleanup() != CleanupFlag) {
3438     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
3439     LI.setCleanup(CleanupFlag);
3440     return &LI;
3441   }
3442 
3443   return nullptr;
3444 }
3445 
3446 Value *
3447 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {
3448   // Try to push freeze through instructions that propagate but don't produce
3449   // poison as far as possible.  If an operand of freeze follows three
3450   // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
3451   // guaranteed-non-poison operands then push the freeze through to the one
3452   // operand that is not guaranteed non-poison.  The actual transform is as
3453   // follows.
3454   //   Op1 = ...                        ; Op1 can be posion
3455   //   Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
3456   //                                    ; single guaranteed-non-poison operands
3457   //   ... = Freeze(Op0)
3458   // =>
3459   //   Op1 = ...
3460   //   Op1.fr = Freeze(Op1)
3461   //   ... = Inst(Op1.fr, NonPoisonOps...)
3462   auto *OrigOp = OrigFI.getOperand(0);
3463   auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3464 
3465   // While we could change the other users of OrigOp to use freeze(OrigOp), that
3466   // potentially reduces their optimization potential, so let's only do this iff
3467   // the OrigOp is only used by the freeze.
3468   if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3469     return nullptr;
3470 
3471   // We can't push the freeze through an instruction which can itself create
3472   // poison.  If the only source of new poison is flags, we can simply
3473   // strip them (since we know the only use is the freeze and nothing can
3474   // benefit from them.)
3475   if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
3476                              /*ConsiderFlagsAndMetadata*/ false))
3477     return nullptr;
3478 
3479   // If operand is guaranteed not to be poison, there is no need to add freeze
3480   // to the operand. So we first find the operand that is not guaranteed to be
3481   // poison.
3482   Use *MaybePoisonOperand = nullptr;
3483   for (Use &U : OrigOpInst->operands()) {
3484     if (isa<MetadataAsValue>(U.get()) ||
3485         isGuaranteedNotToBeUndefOrPoison(U.get()))
3486       continue;
3487     if (!MaybePoisonOperand)
3488       MaybePoisonOperand = &U;
3489     else
3490       return nullptr;
3491   }
3492 
3493   OrigOpInst->dropPoisonGeneratingFlagsAndMetadata();
3494 
3495   // If all operands are guaranteed to be non-poison, we can drop freeze.
3496   if (!MaybePoisonOperand)
3497     return OrigOp;
3498 
3499   Builder.SetInsertPoint(OrigOpInst);
3500   auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
3501       MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
3502 
3503   replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3504   return OrigOp;
3505 }
3506 
3507 Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
3508                                                         PHINode *PN) {
3509   // Detect whether this is a recurrence with a start value and some number of
3510   // backedge values. We'll check whether we can push the freeze through the
3511   // backedge values (possibly dropping poison flags along the way) until we
3512   // reach the phi again. In that case, we can move the freeze to the start
3513   // value.
3514   Use *StartU = nullptr;
3515   SmallVector<Value *> Worklist;
3516   for (Use &U : PN->incoming_values()) {
3517     if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
3518       // Add backedge value to worklist.
3519       Worklist.push_back(U.get());
3520       continue;
3521     }
3522 
3523     // Don't bother handling multiple start values.
3524     if (StartU)
3525       return nullptr;
3526     StartU = &U;
3527   }
3528 
3529   if (!StartU || Worklist.empty())
3530     return nullptr; // Not a recurrence.
3531 
3532   Value *StartV = StartU->get();
3533   BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
3534   bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
3535   // We can't insert freeze if the the start value is the result of the
3536   // terminator (e.g. an invoke).
3537   if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
3538     return nullptr;
3539 
3540   SmallPtrSet<Value *, 32> Visited;
3541   SmallVector<Instruction *> DropFlags;
3542   while (!Worklist.empty()) {
3543     Value *V = Worklist.pop_back_val();
3544     if (!Visited.insert(V).second)
3545       continue;
3546 
3547     if (Visited.size() > 32)
3548       return nullptr; // Limit the total number of values we inspect.
3549 
3550     // Assume that PN is non-poison, because it will be after the transform.
3551     if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
3552       continue;
3553 
3554     Instruction *I = dyn_cast<Instruction>(V);
3555     if (!I || canCreateUndefOrPoison(cast<Operator>(I),
3556                                      /*ConsiderFlagsAndMetadata*/ false))
3557       return nullptr;
3558 
3559     DropFlags.push_back(I);
3560     append_range(Worklist, I->operands());
3561   }
3562 
3563   for (Instruction *I : DropFlags)
3564     I->dropPoisonGeneratingFlagsAndMetadata();
3565 
3566   if (StartNeedsFreeze) {
3567     Builder.SetInsertPoint(StartBB->getTerminator());
3568     Value *FrozenStartV = Builder.CreateFreeze(StartV,
3569                                                StartV->getName() + ".fr");
3570     replaceUse(*StartU, FrozenStartV);
3571   }
3572   return replaceInstUsesWith(FI, PN);
3573 }
3574 
3575 bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
3576   Value *Op = FI.getOperand(0);
3577 
3578   if (isa<Constant>(Op) || Op->hasOneUse())
3579     return false;
3580 
3581   // Move the freeze directly after the definition of its operand, so that
3582   // it dominates the maximum number of uses. Note that it may not dominate
3583   // *all* uses if the operand is an invoke/callbr and the use is in a phi on
3584   // the normal/default destination. This is why the domination check in the
3585   // replacement below is still necessary.
3586   Instruction *MoveBefore;
3587   if (isa<Argument>(Op)) {
3588     MoveBefore =
3589         &*FI.getFunction()->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
3590   } else {
3591     MoveBefore = cast<Instruction>(Op)->getInsertionPointAfterDef();
3592     if (!MoveBefore)
3593       return false;
3594   }
3595 
3596   bool Changed = false;
3597   if (&FI != MoveBefore) {
3598     FI.moveBefore(MoveBefore);
3599     Changed = true;
3600   }
3601 
3602   Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
3603     bool Dominates = DT.dominates(&FI, U);
3604     Changed |= Dominates;
3605     return Dominates;
3606   });
3607 
3608   return Changed;
3609 }
3610 
3611 // Check if any direct or bitcast user of this value is a shuffle instruction.
3612 static bool isUsedWithinShuffleVector(Value *V) {
3613   for (auto *U : V->users()) {
3614     if (isa<ShuffleVectorInst>(U))
3615       return true;
3616     else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
3617       return true;
3618   }
3619   return false;
3620 }
3621 
3622 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
3623   Value *Op0 = I.getOperand(0);
3624 
3625   if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
3626     return replaceInstUsesWith(I, V);
3627 
3628   // freeze (phi const, x) --> phi const, (freeze x)
3629   if (auto *PN = dyn_cast<PHINode>(Op0)) {
3630     if (Instruction *NV = foldOpIntoPhi(I, PN))
3631       return NV;
3632     if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
3633       return NV;
3634   }
3635 
3636   if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I))
3637     return replaceInstUsesWith(I, NI);
3638 
3639   // If I is freeze(undef), check its uses and fold it to a fixed constant.
3640   // - or: pick -1
3641   // - select's condition: if the true value is constant, choose it by making
3642   //                       the condition true.
3643   // - default: pick 0
3644   //
3645   // Note that this transform is intentionally done here rather than
3646   // via an analysis in InstSimplify or at individual user sites. That is
3647   // because we must produce the same value for all uses of the freeze -
3648   // it's the reason "freeze" exists!
3649   //
3650   // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
3651   //       duplicating logic for binops at least.
3652   auto getUndefReplacement = [&I](Type *Ty) {
3653     Constant *BestValue = nullptr;
3654     Constant *NullValue = Constant::getNullValue(Ty);
3655     for (const auto *U : I.users()) {
3656       Constant *C = NullValue;
3657       if (match(U, m_Or(m_Value(), m_Value())))
3658         C = ConstantInt::getAllOnesValue(Ty);
3659       else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
3660         C = ConstantInt::getTrue(Ty);
3661 
3662       if (!BestValue)
3663         BestValue = C;
3664       else if (BestValue != C)
3665         BestValue = NullValue;
3666     }
3667     assert(BestValue && "Must have at least one use");
3668     return BestValue;
3669   };
3670 
3671   if (match(Op0, m_Undef())) {
3672     // Don't fold freeze(undef/poison) if it's used as a vector operand in
3673     // a shuffle. This may improve codegen for shuffles that allow
3674     // unspecified inputs.
3675     if (isUsedWithinShuffleVector(&I))
3676       return nullptr;
3677     return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
3678   }
3679 
3680   Constant *C;
3681   if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
3682     Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
3683     return replaceInstUsesWith(I, Constant::replaceUndefsWith(C, ReplaceC));
3684   }
3685 
3686   // Replace uses of Op with freeze(Op).
3687   if (freezeOtherUses(I))
3688     return &I;
3689 
3690   return nullptr;
3691 }
3692 
3693 /// Check for case where the call writes to an otherwise dead alloca.  This
3694 /// shows up for unused out-params in idiomatic C/C++ code.   Note that this
3695 /// helper *only* analyzes the write; doesn't check any other legality aspect.
3696 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
3697   auto *CB = dyn_cast<CallBase>(I);
3698   if (!CB)
3699     // TODO: handle e.g. store to alloca here - only worth doing if we extend
3700     // to allow reload along used path as described below.  Otherwise, this
3701     // is simply a store to a dead allocation which will be removed.
3702     return false;
3703   std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
3704   if (!Dest)
3705     return false;
3706   auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
3707   if (!AI)
3708     // TODO: allow malloc?
3709     return false;
3710   // TODO: allow memory access dominated by move point?  Note that since AI
3711   // could have a reference to itself captured by the call, we would need to
3712   // account for cycles in doing so.
3713   SmallVector<const User *> AllocaUsers;
3714   SmallPtrSet<const User *, 4> Visited;
3715   auto pushUsers = [&](const Instruction &I) {
3716     for (const User *U : I.users()) {
3717       if (Visited.insert(U).second)
3718         AllocaUsers.push_back(U);
3719     }
3720   };
3721   pushUsers(*AI);
3722   while (!AllocaUsers.empty()) {
3723     auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
3724     if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
3725         isa<AddrSpaceCastInst>(UserI)) {
3726       pushUsers(*UserI);
3727       continue;
3728     }
3729     if (UserI == CB)
3730       continue;
3731     // TODO: support lifetime.start/end here
3732     return false;
3733   }
3734   return true;
3735 }
3736 
3737 /// Try to move the specified instruction from its current block into the
3738 /// beginning of DestBlock, which can only happen if it's safe to move the
3739 /// instruction past all of the instructions between it and the end of its
3740 /// block.
3741 bool InstCombinerImpl::tryToSinkInstruction(Instruction *I,
3742                                             BasicBlock *DestBlock) {
3743   BasicBlock *SrcBlock = I->getParent();
3744 
3745   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
3746   if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
3747       I->isTerminator())
3748     return false;
3749 
3750   // Do not sink static or dynamic alloca instructions. Static allocas must
3751   // remain in the entry block, and dynamic allocas must not be sunk in between
3752   // a stacksave / stackrestore pair, which would incorrectly shorten its
3753   // lifetime.
3754   if (isa<AllocaInst>(I))
3755     return false;
3756 
3757   // Do not sink into catchswitch blocks.
3758   if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
3759     return false;
3760 
3761   // Do not sink convergent call instructions.
3762   if (auto *CI = dyn_cast<CallInst>(I)) {
3763     if (CI->isConvergent())
3764       return false;
3765   }
3766 
3767   // Unless we can prove that the memory write isn't visibile except on the
3768   // path we're sinking to, we must bail.
3769   if (I->mayWriteToMemory()) {
3770     if (!SoleWriteToDeadLocal(I, TLI))
3771       return false;
3772   }
3773 
3774   // We can only sink load instructions if there is nothing between the load and
3775   // the end of block that could change the value.
3776   if (I->mayReadFromMemory()) {
3777     // We don't want to do any sophisticated alias analysis, so we only check
3778     // the instructions after I in I's parent block if we try to sink to its
3779     // successor block.
3780     if (DestBlock->getUniquePredecessor() != I->getParent())
3781       return false;
3782     for (BasicBlock::iterator Scan = std::next(I->getIterator()),
3783                               E = I->getParent()->end();
3784          Scan != E; ++Scan)
3785       if (Scan->mayWriteToMemory())
3786         return false;
3787   }
3788 
3789   I->dropDroppableUses([&](const Use *U) {
3790     auto *I = dyn_cast<Instruction>(U->getUser());
3791     if (I && I->getParent() != DestBlock) {
3792       Worklist.add(I);
3793       return true;
3794     }
3795     return false;
3796   });
3797   /// FIXME: We could remove droppable uses that are not dominated by
3798   /// the new position.
3799 
3800   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
3801   I->moveBefore(&*InsertPos);
3802   ++NumSunkInst;
3803 
3804   // Also sink all related debug uses from the source basic block. Otherwise we
3805   // get debug use before the def. Attempt to salvage debug uses first, to
3806   // maximise the range variables have location for. If we cannot salvage, then
3807   // mark the location undef: we know it was supposed to receive a new location
3808   // here, but that computation has been sunk.
3809   SmallVector<DbgVariableIntrinsic *, 2> DbgUsers;
3810   findDbgUsers(DbgUsers, I);
3811   // Process the sinking DbgUsers in reverse order, as we only want to clone the
3812   // last appearing debug intrinsic for each given variable.
3813   SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink;
3814   for (DbgVariableIntrinsic *DVI : DbgUsers)
3815     if (DVI->getParent() == SrcBlock)
3816       DbgUsersToSink.push_back(DVI);
3817   llvm::sort(DbgUsersToSink,
3818              [](auto *A, auto *B) { return B->comesBefore(A); });
3819 
3820   SmallVector<DbgVariableIntrinsic *, 2> DIIClones;
3821   SmallSet<DebugVariable, 4> SunkVariables;
3822   for (auto *User : DbgUsersToSink) {
3823     // A dbg.declare instruction should not be cloned, since there can only be
3824     // one per variable fragment. It should be left in the original place
3825     // because the sunk instruction is not an alloca (otherwise we could not be
3826     // here).
3827     if (isa<DbgDeclareInst>(User))
3828       continue;
3829 
3830     DebugVariable DbgUserVariable =
3831         DebugVariable(User->getVariable(), User->getExpression(),
3832                       User->getDebugLoc()->getInlinedAt());
3833 
3834     if (!SunkVariables.insert(DbgUserVariable).second)
3835       continue;
3836 
3837     // Leave dbg.assign intrinsics in their original positions and there should
3838     // be no need to insert a clone.
3839     if (isa<DbgAssignIntrinsic>(User))
3840       continue;
3841 
3842     DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
3843     if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
3844       DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
3845     LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
3846   }
3847 
3848   // Perform salvaging without the clones, then sink the clones.
3849   if (!DIIClones.empty()) {
3850     salvageDebugInfoForDbgValues(*I, DbgUsers);
3851     // The clones are in reverse order of original appearance, reverse again to
3852     // maintain the original order.
3853     for (auto &DIIClone : llvm::reverse(DIIClones)) {
3854       DIIClone->insertBefore(&*InsertPos);
3855       LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
3856     }
3857   }
3858 
3859   return true;
3860 }
3861 
3862 bool InstCombinerImpl::run() {
3863   while (!Worklist.isEmpty()) {
3864     // Walk deferred instructions in reverse order, and push them to the
3865     // worklist, which means they'll end up popped from the worklist in-order.
3866     while (Instruction *I = Worklist.popDeferred()) {
3867       // Check to see if we can DCE the instruction. We do this already here to
3868       // reduce the number of uses and thus allow other folds to trigger.
3869       // Note that eraseInstFromFunction() may push additional instructions on
3870       // the deferred worklist, so this will DCE whole instruction chains.
3871       if (isInstructionTriviallyDead(I, &TLI)) {
3872         eraseInstFromFunction(*I);
3873         ++NumDeadInst;
3874         continue;
3875       }
3876 
3877       Worklist.push(I);
3878     }
3879 
3880     Instruction *I = Worklist.removeOne();
3881     if (I == nullptr) continue;  // skip null values.
3882 
3883     // Check to see if we can DCE the instruction.
3884     if (isInstructionTriviallyDead(I, &TLI)) {
3885       eraseInstFromFunction(*I);
3886       ++NumDeadInst;
3887       continue;
3888     }
3889 
3890     if (!DebugCounter::shouldExecute(VisitCounter))
3891       continue;
3892 
3893     // See if we can trivially sink this instruction to its user if we can
3894     // prove that the successor is not executed more frequently than our block.
3895     // Return the UserBlock if successful.
3896     auto getOptionalSinkBlockForInst =
3897         [this](Instruction *I) -> std::optional<BasicBlock *> {
3898       if (!EnableCodeSinking)
3899         return std::nullopt;
3900 
3901       BasicBlock *BB = I->getParent();
3902       BasicBlock *UserParent = nullptr;
3903       unsigned NumUsers = 0;
3904 
3905       for (auto *U : I->users()) {
3906         if (U->isDroppable())
3907           continue;
3908         if (NumUsers > MaxSinkNumUsers)
3909           return std::nullopt;
3910 
3911         Instruction *UserInst = cast<Instruction>(U);
3912         // Special handling for Phi nodes - get the block the use occurs in.
3913         if (PHINode *PN = dyn_cast<PHINode>(UserInst)) {
3914           for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
3915             if (PN->getIncomingValue(i) == I) {
3916               // Bail out if we have uses in different blocks. We don't do any
3917               // sophisticated analysis (i.e finding NearestCommonDominator of
3918               // these use blocks).
3919               if (UserParent && UserParent != PN->getIncomingBlock(i))
3920                 return std::nullopt;
3921               UserParent = PN->getIncomingBlock(i);
3922             }
3923           }
3924           assert(UserParent && "expected to find user block!");
3925         } else {
3926           if (UserParent && UserParent != UserInst->getParent())
3927             return std::nullopt;
3928           UserParent = UserInst->getParent();
3929         }
3930 
3931         // Make sure these checks are done only once, naturally we do the checks
3932         // the first time we get the userparent, this will save compile time.
3933         if (NumUsers == 0) {
3934           // Try sinking to another block. If that block is unreachable, then do
3935           // not bother. SimplifyCFG should handle it.
3936           if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
3937             return std::nullopt;
3938 
3939           auto *Term = UserParent->getTerminator();
3940           // See if the user is one of our successors that has only one
3941           // predecessor, so that we don't have to split the critical edge.
3942           // Another option where we can sink is a block that ends with a
3943           // terminator that does not pass control to other block (such as
3944           // return or unreachable or resume). In this case:
3945           //   - I dominates the User (by SSA form);
3946           //   - the User will be executed at most once.
3947           // So sinking I down to User is always profitable or neutral.
3948           if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
3949             return std::nullopt;
3950 
3951           assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
3952         }
3953 
3954         NumUsers++;
3955       }
3956 
3957       // No user or only has droppable users.
3958       if (!UserParent)
3959         return std::nullopt;
3960 
3961       return UserParent;
3962     };
3963 
3964     auto OptBB = getOptionalSinkBlockForInst(I);
3965     if (OptBB) {
3966       auto *UserParent = *OptBB;
3967       // Okay, the CFG is simple enough, try to sink this instruction.
3968       if (tryToSinkInstruction(I, UserParent)) {
3969         LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
3970         MadeIRChange = true;
3971         // We'll add uses of the sunk instruction below, but since
3972         // sinking can expose opportunities for it's *operands* add
3973         // them to the worklist
3974         for (Use &U : I->operands())
3975           if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
3976             Worklist.push(OpI);
3977       }
3978     }
3979 
3980     // Now that we have an instruction, try combining it to simplify it.
3981     Builder.SetInsertPoint(I);
3982     Builder.CollectMetadataToCopy(
3983         I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
3984 
3985 #ifndef NDEBUG
3986     std::string OrigI;
3987 #endif
3988     LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
3989     LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
3990 
3991     if (Instruction *Result = visit(*I)) {
3992       ++NumCombined;
3993       // Should we replace the old instruction with a new one?
3994       if (Result != I) {
3995         LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
3996                           << "    New = " << *Result << '\n');
3997 
3998         Result->copyMetadata(*I,
3999                              {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4000         // Everything uses the new instruction now.
4001         I->replaceAllUsesWith(Result);
4002 
4003         // Move the name to the new instruction first.
4004         Result->takeName(I);
4005 
4006         // Insert the new instruction into the basic block...
4007         BasicBlock *InstParent = I->getParent();
4008         BasicBlock::iterator InsertPos = I->getIterator();
4009 
4010         // Are we replace a PHI with something that isn't a PHI, or vice versa?
4011         if (isa<PHINode>(Result) != isa<PHINode>(I)) {
4012           // We need to fix up the insertion point.
4013           if (isa<PHINode>(I)) // PHI -> Non-PHI
4014             InsertPos = InstParent->getFirstInsertionPt();
4015           else // Non-PHI -> PHI
4016             InsertPos = InstParent->getFirstNonPHI()->getIterator();
4017         }
4018 
4019         Result->insertInto(InstParent, InsertPos);
4020 
4021         // Push the new instruction and any users onto the worklist.
4022         Worklist.pushUsersToWorkList(*Result);
4023         Worklist.push(Result);
4024 
4025         eraseInstFromFunction(*I);
4026       } else {
4027         LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
4028                           << "    New = " << *I << '\n');
4029 
4030         // If the instruction was modified, it's possible that it is now dead.
4031         // if so, remove it.
4032         if (isInstructionTriviallyDead(I, &TLI)) {
4033           eraseInstFromFunction(*I);
4034         } else {
4035           Worklist.pushUsersToWorkList(*I);
4036           Worklist.push(I);
4037         }
4038       }
4039       MadeIRChange = true;
4040     }
4041   }
4042 
4043   Worklist.zap();
4044   return MadeIRChange;
4045 }
4046 
4047 // Track the scopes used by !alias.scope and !noalias. In a function, a
4048 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used
4049 // by both sets. If not, the declaration of the scope can be safely omitted.
4050 // The MDNode of the scope can be omitted as well for the instructions that are
4051 // part of this function. We do not do that at this point, as this might become
4052 // too time consuming to do.
4053 class AliasScopeTracker {
4054   SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
4055   SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
4056 
4057 public:
4058   void analyse(Instruction *I) {
4059     // This seems to be faster than checking 'mayReadOrWriteMemory()'.
4060     if (!I->hasMetadataOtherThanDebugLoc())
4061       return;
4062 
4063     auto Track = [](Metadata *ScopeList, auto &Container) {
4064       const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
4065       if (!MDScopeList || !Container.insert(MDScopeList).second)
4066         return;
4067       for (const auto &MDOperand : MDScopeList->operands())
4068         if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
4069           Container.insert(MDScope);
4070     };
4071 
4072     Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
4073     Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
4074   }
4075 
4076   bool isNoAliasScopeDeclDead(Instruction *Inst) {
4077     NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
4078     if (!Decl)
4079       return false;
4080 
4081     assert(Decl->use_empty() &&
4082            "llvm.experimental.noalias.scope.decl in use ?");
4083     const MDNode *MDSL = Decl->getScopeList();
4084     assert(MDSL->getNumOperands() == 1 &&
4085            "llvm.experimental.noalias.scope should refer to a single scope");
4086     auto &MDOperand = MDSL->getOperand(0);
4087     if (auto *MD = dyn_cast<MDNode>(MDOperand))
4088       return !UsedAliasScopesAndLists.contains(MD) ||
4089              !UsedNoAliasScopesAndLists.contains(MD);
4090 
4091     // Not an MDNode ? throw away.
4092     return true;
4093   }
4094 };
4095 
4096 /// Populate the IC worklist from a function, by walking it in depth-first
4097 /// order and adding all reachable code to the worklist.
4098 ///
4099 /// This has a couple of tricks to make the code faster and more powerful.  In
4100 /// particular, we constant fold and DCE instructions as we go, to avoid adding
4101 /// them to the worklist (this significantly speeds up instcombine on code where
4102 /// many instructions are dead or constant).  Additionally, if we find a branch
4103 /// whose condition is a known constant, we only visit the reachable successors.
4104 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
4105                                           const TargetLibraryInfo *TLI,
4106                                           InstructionWorklist &ICWorklist) {
4107   bool MadeIRChange = false;
4108   SmallPtrSet<BasicBlock *, 32> Visited;
4109   SmallVector<BasicBlock*, 256> Worklist;
4110   Worklist.push_back(&F.front());
4111 
4112   SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
4113   DenseMap<Constant *, Constant *> FoldedConstants;
4114   AliasScopeTracker SeenAliasScopes;
4115 
4116   do {
4117     BasicBlock *BB = Worklist.pop_back_val();
4118 
4119     // We have now visited this block!  If we've already been here, ignore it.
4120     if (!Visited.insert(BB).second)
4121       continue;
4122 
4123     for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
4124       // ConstantProp instruction if trivially constant.
4125       if (!Inst.use_empty() &&
4126           (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
4127         if (Constant *C = ConstantFoldInstruction(&Inst, DL, TLI)) {
4128           LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
4129                             << '\n');
4130           Inst.replaceAllUsesWith(C);
4131           ++NumConstProp;
4132           if (isInstructionTriviallyDead(&Inst, TLI))
4133             Inst.eraseFromParent();
4134           MadeIRChange = true;
4135           continue;
4136         }
4137 
4138       // See if we can constant fold its operands.
4139       for (Use &U : Inst.operands()) {
4140         if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
4141           continue;
4142 
4143         auto *C = cast<Constant>(U);
4144         Constant *&FoldRes = FoldedConstants[C];
4145         if (!FoldRes)
4146           FoldRes = ConstantFoldConstant(C, DL, TLI);
4147 
4148         if (FoldRes != C) {
4149           LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
4150                             << "\n    Old = " << *C
4151                             << "\n    New = " << *FoldRes << '\n');
4152           U = FoldRes;
4153           MadeIRChange = true;
4154         }
4155       }
4156 
4157       // Skip processing debug and pseudo intrinsics in InstCombine. Processing
4158       // these call instructions consumes non-trivial amount of time and
4159       // provides no value for the optimization.
4160       if (!Inst.isDebugOrPseudoInst()) {
4161         InstrsForInstructionWorklist.push_back(&Inst);
4162         SeenAliasScopes.analyse(&Inst);
4163       }
4164     }
4165 
4166     // Recursively visit successors.  If this is a branch or switch on a
4167     // constant, only visit the reachable successor.
4168     Instruction *TI = BB->getTerminator();
4169     if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
4170       if (isa<UndefValue>(BI->getCondition()))
4171         // Branch on undef is UB.
4172         continue;
4173       if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
4174         bool CondVal = Cond->getZExtValue();
4175         BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
4176         Worklist.push_back(ReachableBB);
4177         continue;
4178       }
4179     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
4180       if (isa<UndefValue>(SI->getCondition()))
4181         // Switch on undef is UB.
4182         continue;
4183       if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
4184         Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
4185         continue;
4186       }
4187     }
4188 
4189     append_range(Worklist, successors(TI));
4190   } while (!Worklist.empty());
4191 
4192   // Remove instructions inside unreachable blocks. This prevents the
4193   // instcombine code from having to deal with some bad special cases, and
4194   // reduces use counts of instructions.
4195   for (BasicBlock &BB : F) {
4196     if (Visited.count(&BB))
4197       continue;
4198 
4199     unsigned NumDeadInstInBB;
4200     unsigned NumDeadDbgInstInBB;
4201     std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
4202         removeAllNonTerminatorAndEHPadInstructions(&BB);
4203 
4204     MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
4205     NumDeadInst += NumDeadInstInBB;
4206   }
4207 
4208   // Once we've found all of the instructions to add to instcombine's worklist,
4209   // add them in reverse order.  This way instcombine will visit from the top
4210   // of the function down.  This jives well with the way that it adds all uses
4211   // of instructions to the worklist after doing a transformation, thus avoiding
4212   // some N^2 behavior in pathological cases.
4213   ICWorklist.reserve(InstrsForInstructionWorklist.size());
4214   for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
4215     // DCE instruction if trivially dead. As we iterate in reverse program
4216     // order here, we will clean up whole chains of dead instructions.
4217     if (isInstructionTriviallyDead(Inst, TLI) ||
4218         SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
4219       ++NumDeadInst;
4220       LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
4221       salvageDebugInfo(*Inst);
4222       Inst->eraseFromParent();
4223       MadeIRChange = true;
4224       continue;
4225     }
4226 
4227     ICWorklist.push(Inst);
4228   }
4229 
4230   return MadeIRChange;
4231 }
4232 
4233 static bool combineInstructionsOverFunction(
4234     Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
4235     AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
4236     DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
4237     ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI) {
4238   auto &DL = F.getParent()->getDataLayout();
4239 
4240   /// Builder - This is an IRBuilder that automatically inserts new
4241   /// instructions into the worklist when they are created.
4242   IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
4243       F.getContext(), TargetFolder(DL),
4244       IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
4245         Worklist.add(I);
4246         if (auto *Assume = dyn_cast<AssumeInst>(I))
4247           AC.registerAssumption(Assume);
4248       }));
4249 
4250   // Lower dbg.declare intrinsics otherwise their value may be clobbered
4251   // by instcombiner.
4252   bool MadeIRChange = false;
4253   if (ShouldLowerDbgDeclare)
4254     MadeIRChange = LowerDbgDeclare(F);
4255 
4256   // Iterate while there is work to do.
4257   unsigned Iteration = 0;
4258   while (true) {
4259     ++NumWorklistIterations;
4260     ++Iteration;
4261 
4262     if (Iteration > InfiniteLoopDetectionThreshold) {
4263       report_fatal_error(
4264           "Instruction Combining seems stuck in an infinite loop after " +
4265           Twine(InfiniteLoopDetectionThreshold) + " iterations.");
4266     }
4267 
4268     if (Iteration > MaxIterations) {
4269       LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << MaxIterations
4270                         << " on " << F.getName()
4271                         << " reached; stopping before reaching a fixpoint\n");
4272       break;
4273     }
4274 
4275     LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
4276                       << F.getName() << "\n");
4277 
4278     MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
4279 
4280     InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
4281                         ORE, BFI, PSI, DL, LI);
4282     IC.MaxArraySizeForCombine = MaxArraySize;
4283 
4284     if (!IC.run())
4285       break;
4286 
4287     MadeIRChange = true;
4288   }
4289 
4290   if (Iteration == 1)
4291     ++NumOneIteration;
4292   else if (Iteration == 2)
4293     ++NumTwoIterations;
4294   else if (Iteration == 3)
4295     ++NumThreeIterations;
4296   else
4297     ++NumFourOrMoreIterations;
4298 
4299   return MadeIRChange;
4300 }
4301 
4302 InstCombinePass::InstCombinePass(InstCombineOptions Opts) : Options(Opts) {}
4303 
4304 void InstCombinePass::printPipeline(
4305     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
4306   static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
4307       OS, MapClassName2PassName);
4308   OS << '<';
4309   OS << "max-iterations=" << Options.MaxIterations << ";";
4310   OS << (Options.UseLoopInfo ? "" : "no-") << "use-loop-info";
4311   OS << '>';
4312 }
4313 
4314 PreservedAnalyses InstCombinePass::run(Function &F,
4315                                        FunctionAnalysisManager &AM) {
4316   auto &AC = AM.getResult<AssumptionAnalysis>(F);
4317   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
4318   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
4319   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4320   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
4321 
4322   // TODO: Only use LoopInfo when the option is set. This requires that the
4323   //       callers in the pass pipeline explicitly set the option.
4324   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
4325   if (!LI && Options.UseLoopInfo)
4326     LI = &AM.getResult<LoopAnalysis>(F);
4327 
4328   auto *AA = &AM.getResult<AAManager>(F);
4329   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
4330   ProfileSummaryInfo *PSI =
4331       MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
4332   auto *BFI = (PSI && PSI->hasProfileSummary()) ?
4333       &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
4334 
4335   if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4336                                        BFI, PSI, Options.MaxIterations, LI))
4337     // No changes, all analyses are preserved.
4338     return PreservedAnalyses::all();
4339 
4340   // Mark all the analyses that instcombine updates as preserved.
4341   PreservedAnalyses PA;
4342   PA.preserveSet<CFGAnalyses>();
4343   return PA;
4344 }
4345 
4346 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
4347   AU.setPreservesCFG();
4348   AU.addRequired<AAResultsWrapperPass>();
4349   AU.addRequired<AssumptionCacheTracker>();
4350   AU.addRequired<TargetLibraryInfoWrapperPass>();
4351   AU.addRequired<TargetTransformInfoWrapperPass>();
4352   AU.addRequired<DominatorTreeWrapperPass>();
4353   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
4354   AU.addPreserved<DominatorTreeWrapperPass>();
4355   AU.addPreserved<AAResultsWrapperPass>();
4356   AU.addPreserved<BasicAAWrapperPass>();
4357   AU.addPreserved<GlobalsAAWrapperPass>();
4358   AU.addRequired<ProfileSummaryInfoWrapperPass>();
4359   LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
4360 }
4361 
4362 bool InstructionCombiningPass::runOnFunction(Function &F) {
4363   if (skipFunction(F))
4364     return false;
4365 
4366   // Required analyses.
4367   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4368   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
4369   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4370   auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
4371   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4372   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4373 
4374   // Optional analyses.
4375   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4376   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
4377   ProfileSummaryInfo *PSI =
4378       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4379   BlockFrequencyInfo *BFI =
4380       (PSI && PSI->hasProfileSummary()) ?
4381       &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4382       nullptr;
4383 
4384   return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4385                                          BFI, PSI,
4386                                          InstCombineDefaultMaxIterations, LI);
4387 }
4388 
4389 char InstructionCombiningPass::ID = 0;
4390 
4391 InstructionCombiningPass::InstructionCombiningPass() : FunctionPass(ID) {
4392   initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
4393 }
4394 
4395 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
4396                       "Combine redundant instructions", false, false)
4397 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4398 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
4399 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4400 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4401 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4402 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
4403 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
4404 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
4405 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
4406 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
4407                     "Combine redundant instructions", false, false)
4408 
4409 // Initialization Routines
4410 void llvm::initializeInstCombine(PassRegistry &Registry) {
4411   initializeInstructionCombiningPassPass(Registry);
4412 }
4413 
4414 FunctionPass *llvm::createInstructionCombiningPass() {
4415   return new InstructionCombiningPass();
4416 }
4417