1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the aggressive expression pattern combiner classes.
10 // Currently, it handles expression patterns for:
11 //  * Truncate instruction
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16 #include "AggressiveInstCombineInternal.h"
17 #include "llvm-c/Initialization.h"
18 #include "llvm-c/Transforms/AggressiveInstCombine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/BasicAliasAnalysis.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/LegacyPassManager.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Transforms/Utils/BuildLibCalls.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 
37 using namespace llvm;
38 using namespace PatternMatch;
39 
40 namespace llvm {
41 class DataLayout;
42 }
43 
44 #define DEBUG_TYPE "aggressive-instcombine"
45 
46 STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
47 STATISTIC(NumGuardedRotates,
48           "Number of guarded rotates transformed into funnel shifts");
49 STATISTIC(NumGuardedFunnelShifts,
50           "Number of guarded funnel shifts transformed into funnel shifts");
51 STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
52 
53 namespace {
54 /// Contains expression pattern combiner logic.
55 /// This class provides both the logic to combine expression patterns and
56 /// combine them. It differs from InstCombiner class in that each pattern
57 /// combiner runs only once as opposed to InstCombine's multi-iteration,
58 /// which allows pattern combiner to have higher complexity than the O(1)
59 /// required by the instruction combiner.
60 class AggressiveInstCombinerLegacyPass : public FunctionPass {
61 public:
62   static char ID; // Pass identification, replacement for typeid
63 
64   AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
65     initializeAggressiveInstCombinerLegacyPassPass(
66         *PassRegistry::getPassRegistry());
67   }
68 
69   void getAnalysisUsage(AnalysisUsage &AU) const override;
70 
71   /// Run all expression pattern optimizations on the given /p F function.
72   ///
73   /// \param F function to optimize.
74   /// \returns true if the IR is changed.
75   bool runOnFunction(Function &F) override;
76 };
77 } // namespace
78 
79 /// Match a pattern for a bitwise funnel/rotate operation that partially guards
80 /// against undefined behavior by branching around the funnel-shift/rotation
81 /// when the shift amount is 0.
82 static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
83   if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
84     return false;
85 
86   // As with the one-use checks below, this is not strictly necessary, but we
87   // are being cautious to avoid potential perf regressions on targets that
88   // do not actually have a funnel/rotate instruction (where the funnel shift
89   // would be expanded back into math/shift/logic ops).
90   if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
91     return false;
92 
93   // Match V to funnel shift left/right and capture the source operands and
94   // shift amount.
95   auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
96                              Value *&ShAmt) {
97     Value *SubAmt;
98     unsigned Width = V->getType()->getScalarSizeInBits();
99 
100     // fshl(ShVal0, ShVal1, ShAmt)
101     //  == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
102     if (match(V, m_OneUse(m_c_Or(
103                      m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
104                      m_LShr(m_Value(ShVal1),
105                             m_Sub(m_SpecificInt(Width), m_Value(SubAmt))))))) {
106       if (ShAmt == SubAmt) // TODO: Use m_Specific
107         return Intrinsic::fshl;
108     }
109 
110     // fshr(ShVal0, ShVal1, ShAmt)
111     //  == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
112     if (match(V,
113               m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width),
114                                                            m_Value(SubAmt))),
115                               m_LShr(m_Value(ShVal1), m_Value(ShAmt)))))) {
116       if (ShAmt == SubAmt) // TODO: Use m_Specific
117         return Intrinsic::fshr;
118     }
119 
120     return Intrinsic::not_intrinsic;
121   };
122 
123   // One phi operand must be a funnel/rotate operation, and the other phi
124   // operand must be the source value of that funnel/rotate operation:
125   // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
126   // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
127   // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
128   PHINode &Phi = cast<PHINode>(I);
129   unsigned FunnelOp = 0, GuardOp = 1;
130   Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
131   Value *ShVal0, *ShVal1, *ShAmt;
132   Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
133   if (IID == Intrinsic::not_intrinsic ||
134       (IID == Intrinsic::fshl && ShVal0 != P1) ||
135       (IID == Intrinsic::fshr && ShVal1 != P1)) {
136     IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
137     if (IID == Intrinsic::not_intrinsic ||
138         (IID == Intrinsic::fshl && ShVal0 != P0) ||
139         (IID == Intrinsic::fshr && ShVal1 != P0))
140       return false;
141     assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
142            "Pattern must match funnel shift left or right");
143     std::swap(FunnelOp, GuardOp);
144   }
145 
146   // The incoming block with our source operand must be the "guard" block.
147   // That must contain a cmp+branch to avoid the funnel/rotate when the shift
148   // amount is equal to 0. The other incoming block is the block with the
149   // funnel/rotate.
150   BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
151   BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
152   Instruction *TermI = GuardBB->getTerminator();
153 
154   // Ensure that the shift values dominate each block.
155   if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
156     return false;
157 
158   ICmpInst::Predicate Pred;
159   BasicBlock *PhiBB = Phi.getParent();
160   if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()),
161                          m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
162     return false;
163 
164   if (Pred != CmpInst::ICMP_EQ)
165     return false;
166 
167   IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
168 
169   if (ShVal0 == ShVal1)
170     ++NumGuardedRotates;
171   else
172     ++NumGuardedFunnelShifts;
173 
174   // If this is not a rotate then the select was blocking poison from the
175   // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
176   bool IsFshl = IID == Intrinsic::fshl;
177   if (ShVal0 != ShVal1) {
178     if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
179       ShVal1 = Builder.CreateFreeze(ShVal1);
180     else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
181       ShVal0 = Builder.CreateFreeze(ShVal0);
182   }
183 
184   // We matched a variation of this IR pattern:
185   // GuardBB:
186   //   %cmp = icmp eq i32 %ShAmt, 0
187   //   br i1 %cmp, label %PhiBB, label %FunnelBB
188   // FunnelBB:
189   //   %sub = sub i32 32, %ShAmt
190   //   %shr = lshr i32 %ShVal1, %sub
191   //   %shl = shl i32 %ShVal0, %ShAmt
192   //   %fsh = or i32 %shr, %shl
193   //   br label %PhiBB
194   // PhiBB:
195   //   %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
196   // -->
197   // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
198   Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
199   Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
200   return true;
201 }
202 
203 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
204 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
205 /// of 'and' ops, then we also need to capture the fact that we saw an
206 /// "and X, 1", so that's an extra return value for that case.
207 struct MaskOps {
208   Value *Root = nullptr;
209   APInt Mask;
210   bool MatchAndChain;
211   bool FoundAnd1 = false;
212 
213   MaskOps(unsigned BitWidth, bool MatchAnds)
214       : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}
215 };
216 
217 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
218 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
219 /// value. Examples:
220 ///   or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
221 /// returns { X, 0x129 }
222 ///   and (and (X >> 1), 1), (X >> 4)
223 /// returns { X, 0x12 }
224 static bool matchAndOrChain(Value *V, MaskOps &MOps) {
225   Value *Op0, *Op1;
226   if (MOps.MatchAndChain) {
227     // Recurse through a chain of 'and' operands. This requires an extra check
228     // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
229     // in the chain to know that all of the high bits are cleared.
230     if (match(V, m_And(m_Value(Op0), m_One()))) {
231       MOps.FoundAnd1 = true;
232       return matchAndOrChain(Op0, MOps);
233     }
234     if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
235       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
236   } else {
237     // Recurse through a chain of 'or' operands.
238     if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
239       return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
240   }
241 
242   // We need a shift-right or a bare value representing a compare of bit 0 of
243   // the original source operand.
244   Value *Candidate;
245   const APInt *BitIndex = nullptr;
246   if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
247     Candidate = V;
248 
249   // Initialize result source operand.
250   if (!MOps.Root)
251     MOps.Root = Candidate;
252 
253   // The shift constant is out-of-range? This code hasn't been simplified.
254   if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
255     return false;
256 
257   // Fill in the mask bit derived from the shift constant.
258   MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
259   return MOps.Root == Candidate;
260 }
261 
262 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
263 /// These will include a chain of 'or' or 'and'-shifted bits from a
264 /// common source value:
265 /// and (or  (lshr X, C), ...), 1 --> (X & CMask) != 0
266 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
267 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
268 /// that differ only with a final 'not' of the result. We expect that final
269 /// 'not' to be folded with the compare that we create here (invert predicate).
270 static bool foldAnyOrAllBitsSet(Instruction &I) {
271   // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
272   // final "and X, 1" instruction must be the final op in the sequence.
273   bool MatchAllBitsSet;
274   if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
275     MatchAllBitsSet = true;
276   else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
277     MatchAllBitsSet = false;
278   else
279     return false;
280 
281   MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
282   if (MatchAllBitsSet) {
283     if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
284       return false;
285   } else {
286     if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
287       return false;
288   }
289 
290   // The pattern was found. Create a masked compare that replaces all of the
291   // shift and logic ops.
292   IRBuilder<> Builder(&I);
293   Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
294   Value *And = Builder.CreateAnd(MOps.Root, Mask);
295   Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
296                                : Builder.CreateIsNotNull(And);
297   Value *Zext = Builder.CreateZExt(Cmp, I.getType());
298   I.replaceAllUsesWith(Zext);
299   ++NumAnyOrAllBitsSet;
300   return true;
301 }
302 
303 // Try to recognize below function as popcount intrinsic.
304 // This is the "best" algorithm from
305 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
306 // Also used in TargetLowering::expandCTPOP().
307 //
308 // int popcount(unsigned int i) {
309 //   i = i - ((i >> 1) & 0x55555555);
310 //   i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
311 //   i = ((i + (i >> 4)) & 0x0F0F0F0F);
312 //   return (i * 0x01010101) >> 24;
313 // }
314 static bool tryToRecognizePopCount(Instruction &I) {
315   if (I.getOpcode() != Instruction::LShr)
316     return false;
317 
318   Type *Ty = I.getType();
319   if (!Ty->isIntOrIntVectorTy())
320     return false;
321 
322   unsigned Len = Ty->getScalarSizeInBits();
323   // FIXME: fix Len == 8 and other irregular type lengths.
324   if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
325     return false;
326 
327   APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
328   APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
329   APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
330   APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
331   APInt MaskShift = APInt(Len, Len - 8);
332 
333   Value *Op0 = I.getOperand(0);
334   Value *Op1 = I.getOperand(1);
335   Value *MulOp0;
336   // Matching "(i * 0x01010101...) >> 24".
337   if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
338        match(Op1, m_SpecificInt(MaskShift))) {
339     Value *ShiftOp0;
340     // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
341     if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
342                                     m_Deferred(ShiftOp0)),
343                             m_SpecificInt(Mask0F)))) {
344       Value *AndOp0;
345       // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
346       if (match(ShiftOp0,
347                 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
348                         m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)),
349                               m_SpecificInt(Mask33))))) {
350         Value *Root, *SubOp1;
351         // Matching "i - ((i >> 1) & 0x55555555...)".
352         if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
353             match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
354                                 m_SpecificInt(Mask55)))) {
355           LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
356           IRBuilder<> Builder(&I);
357           Function *Func = Intrinsic::getDeclaration(
358               I.getModule(), Intrinsic::ctpop, I.getType());
359           I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
360           ++NumPopCountRecognized;
361           return true;
362         }
363       }
364     }
365   }
366 
367   return false;
368 }
369 
370 /// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and
371 /// C2 saturate the value of the fp conversion. The transform is not reversable
372 /// as the fptosi.sat is more defined than the input - all values produce a
373 /// valid value for the fptosi.sat, where as some produce poison for original
374 /// that were out of range of the integer conversion. The reversed pattern may
375 /// use fmax and fmin instead. As we cannot directly reverse the transform, and
376 /// it is not always profitable, we make it conditional on the cost being
377 /// reported as lower by TTI.
378 static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) {
379   // Look for min(max(fptosi, converting to fptosi_sat.
380   Value *In;
381   const APInt *MinC, *MaxC;
382   if (!match(&I, m_SMax(m_OneUse(m_SMin(m_OneUse(m_FPToSI(m_Value(In))),
383                                         m_APInt(MinC))),
384                         m_APInt(MaxC))) &&
385       !match(&I, m_SMin(m_OneUse(m_SMax(m_OneUse(m_FPToSI(m_Value(In))),
386                                         m_APInt(MaxC))),
387                         m_APInt(MinC))))
388     return false;
389 
390   // Check that the constants clamp a saturate.
391   if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)
392     return false;
393 
394   Type *IntTy = I.getType();
395   Type *FpTy = In->getType();
396   Type *SatTy =
397       IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);
398   if (auto *VecTy = dyn_cast<VectorType>(IntTy))
399     SatTy = VectorType::get(SatTy, VecTy->getElementCount());
400 
401   // Get the cost of the intrinsic, and check that against the cost of
402   // fptosi+smin+smax
403   InstructionCost SatCost = TTI.getIntrinsicInstrCost(
404       IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}),
405       TTI::TCK_RecipThroughput);
406   SatCost += TTI.getCastInstrCost(Instruction::SExt, SatTy, IntTy,
407                                   TTI::CastContextHint::None,
408                                   TTI::TCK_RecipThroughput);
409 
410   InstructionCost MinMaxCost = TTI.getCastInstrCost(
411       Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None,
412       TTI::TCK_RecipThroughput);
413   MinMaxCost += TTI.getIntrinsicInstrCost(
414       IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}),
415       TTI::TCK_RecipThroughput);
416   MinMaxCost += TTI.getIntrinsicInstrCost(
417       IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}),
418       TTI::TCK_RecipThroughput);
419 
420   if (SatCost >= MinMaxCost)
421     return false;
422 
423   IRBuilder<> Builder(&I);
424   Function *Fn = Intrinsic::getDeclaration(I.getModule(), Intrinsic::fptosi_sat,
425                                            {SatTy, FpTy});
426   Value *Sat = Builder.CreateCall(Fn, In);
427   I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
428   return true;
429 }
430 
431 /// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
432 /// pessimistic codegen that has to account for setting errno and can enable
433 /// vectorization.
434 static bool
435 foldSqrt(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI) {
436   // Match a call to sqrt mathlib function.
437   auto *Call = dyn_cast<CallInst>(&I);
438   if (!Call)
439     return false;
440 
441   Module *M = Call->getModule();
442   LibFunc Func;
443   if (!TLI.getLibFunc(*Call, Func) || !isLibFuncEmittable(M, &TLI, Func))
444     return false;
445 
446   if (Func != LibFunc_sqrt && Func != LibFunc_sqrtf && Func != LibFunc_sqrtl)
447     return false;
448 
449   // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created,
450   // and (3) we would not end up lowering to a libcall anyway (which could
451   // change the value of errno), then:
452   // (1) the operand arg must not be less than -0.0.
453   // (2) errno won't be set.
454   // (3) it is safe to convert this to an intrinsic call.
455   // TODO: Check if the arg is known non-negative.
456   Type *Ty = Call->getType();
457   if (TTI.haveFastSqrt(Ty) && Call->hasNoNaNs()) {
458     IRBuilder<> Builder(&I);
459     IRBuilderBase::FastMathFlagGuard Guard(Builder);
460     Builder.setFastMathFlags(Call->getFastMathFlags());
461 
462     Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, Ty);
463     Value *NewSqrt = Builder.CreateCall(Sqrt, Call->getArgOperand(0), "sqrt");
464     I.replaceAllUsesWith(NewSqrt);
465 
466     // Explicitly erase the old call because a call with side effects is not
467     // trivially dead.
468     I.eraseFromParent();
469     return true;
470   }
471 
472   return false;
473 }
474 
475 /// This is the entry point for folds that could be implemented in regular
476 /// InstCombine, but they are separated because they are not expected to
477 /// occur frequently and/or have more than a constant-length pattern match.
478 static bool foldUnusualPatterns(Function &F, DominatorTree &DT,
479                                 TargetTransformInfo &TTI,
480                                 TargetLibraryInfo &TLI) {
481   bool MadeChange = false;
482   for (BasicBlock &BB : F) {
483     // Ignore unreachable basic blocks.
484     if (!DT.isReachableFromEntry(&BB))
485       continue;
486 
487     // Walk the block backwards for efficiency. We're matching a chain of
488     // use->defs, so we're more likely to succeed by starting from the bottom.
489     // Also, we want to avoid matching partial patterns.
490     // TODO: It would be more efficient if we removed dead instructions
491     // iteratively in this loop rather than waiting until the end.
492     for (Instruction &I : make_early_inc_range(llvm::reverse(BB))) {
493       MadeChange |= foldAnyOrAllBitsSet(I);
494       MadeChange |= foldGuardedFunnelShift(I, DT);
495       MadeChange |= tryToRecognizePopCount(I);
496       MadeChange |= tryToFPToSat(I, TTI);
497       MadeChange |= foldSqrt(I, TTI, TLI);
498     }
499   }
500 
501   // We're done with transforms, so remove dead instructions.
502   if (MadeChange)
503     for (BasicBlock &BB : F)
504       SimplifyInstructionsInBlock(&BB);
505 
506   return MadeChange;
507 }
508 
509 /// This is the entry point for all transforms. Pass manager differences are
510 /// handled in the callers of this function.
511 static bool runImpl(Function &F, AssumptionCache &AC, TargetTransformInfo &TTI,
512                     TargetLibraryInfo &TLI, DominatorTree &DT) {
513   bool MadeChange = false;
514   const DataLayout &DL = F.getParent()->getDataLayout();
515   TruncInstCombine TIC(AC, TLI, DL, DT);
516   MadeChange |= TIC.run(F);
517   MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI);
518   return MadeChange;
519 }
520 
521 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
522     AnalysisUsage &AU) const {
523   AU.setPreservesCFG();
524   AU.addRequired<AssumptionCacheTracker>();
525   AU.addRequired<DominatorTreeWrapperPass>();
526   AU.addRequired<TargetLibraryInfoWrapperPass>();
527   AU.addRequired<TargetTransformInfoWrapperPass>();
528   AU.addPreserved<AAResultsWrapperPass>();
529   AU.addPreserved<BasicAAWrapperPass>();
530   AU.addPreserved<DominatorTreeWrapperPass>();
531   AU.addPreserved<GlobalsAAWrapperPass>();
532 }
533 
534 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
535   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
536   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
537   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
538   auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
539   return runImpl(F, AC, TTI, TLI, DT);
540 }
541 
542 PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
543                                                  FunctionAnalysisManager &AM) {
544   auto &AC = AM.getResult<AssumptionAnalysis>(F);
545   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
546   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
547   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
548   if (!runImpl(F, AC, TTI, TLI, DT)) {
549     // No changes, all analyses are preserved.
550     return PreservedAnalyses::all();
551   }
552   // Mark all the analyses that instcombine updates as preserved.
553   PreservedAnalyses PA;
554   PA.preserveSet<CFGAnalyses>();
555   return PA;
556 }
557 
558 char AggressiveInstCombinerLegacyPass::ID = 0;
559 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
560                       "aggressive-instcombine",
561                       "Combine pattern based expressions", false, false)
562 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
563 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
564 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
565 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
566 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
567                     "Combine pattern based expressions", false, false)
568 
569 // Initialization Routines
570 void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) {
571   initializeAggressiveInstCombinerLegacyPassPass(Registry);
572 }
573 
574 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) {
575   initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R));
576 }
577 
578 FunctionPass *llvm::createAggressiveInstCombinerPass() {
579   return new AggressiveInstCombinerLegacyPass();
580 }
581 
582 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) {
583   unwrap(PM)->add(createAggressiveInstCombinerPass());
584 }
585