1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/BlockFrequencyInfo.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/ProfileSummaryInfo.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/Analysis.h"
34 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
35 #include "llvm/CodeGen/ISDOpcodes.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/TargetLowering.h"
38 #include "llvm/CodeGen/TargetPassConfig.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/IR/Argument.h"
43 #include "llvm/IR/Attributes.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfo.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalValue.h"
54 #include "llvm/IR/GlobalVariable.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/IntrinsicsAArch64.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/MDBuilder.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/ProfDataUtils.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/ValueHandle.h"
75 #include "llvm/IR/ValueMap.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/BlockFrequency.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CommandLine.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/MachineValueType.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
91 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
92 #include "llvm/Transforms/Utils/Local.h"
93 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
94 #include "llvm/Transforms/Utils/SizeOpts.h"
95 #include <algorithm>
96 #include <cassert>
97 #include <cstdint>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <optional>
102 #include <utility>
103 #include <vector>
104 
105 using namespace llvm;
106 using namespace llvm::PatternMatch;
107 
108 #define DEBUG_TYPE "codegenprepare"
109 
110 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
111 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
112 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
113 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
114                       "sunken Cmps");
115 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
116                        "of sunken Casts");
117 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
118                           "computations were sunk");
119 STATISTIC(NumMemoryInstsPhiCreated,
120           "Number of phis created when address "
121           "computations were sunk to memory instructions");
122 STATISTIC(NumMemoryInstsSelectCreated,
123           "Number of select created when address "
124           "computations were sunk to memory instructions");
125 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
126 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
127 STATISTIC(NumAndsAdded,
128           "Number of and mask instructions added to form ext loads");
129 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
130 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
131 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
132 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
133 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
134 
135 static cl::opt<bool> DisableBranchOpts(
136     "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
137     cl::desc("Disable branch optimizations in CodeGenPrepare"));
138 
139 static cl::opt<bool>
140     DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
141                   cl::desc("Disable GC optimizations in CodeGenPrepare"));
142 
143 static cl::opt<bool>
144     DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
145                           cl::init(false),
146                           cl::desc("Disable select to branch conversion."));
147 
148 static cl::opt<bool>
149     AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
150                       cl::desc("Address sinking in CGP using GEPs."));
151 
152 static cl::opt<bool>
153     EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
154                         cl::desc("Enable sinkinig and/cmp into branches."));
155 
156 static cl::opt<bool> DisableStoreExtract(
157     "disable-cgp-store-extract", cl::Hidden, cl::init(false),
158     cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
159 
160 static cl::opt<bool> StressStoreExtract(
161     "stress-cgp-store-extract", cl::Hidden, cl::init(false),
162     cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
163 
164 static cl::opt<bool> DisableExtLdPromotion(
165     "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
166     cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
167              "CodeGenPrepare"));
168 
169 static cl::opt<bool> StressExtLdPromotion(
170     "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
171     cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
172              "optimization in CodeGenPrepare"));
173 
174 static cl::opt<bool> DisablePreheaderProtect(
175     "disable-preheader-prot", cl::Hidden, cl::init(false),
176     cl::desc("Disable protection against removing loop preheaders"));
177 
178 static cl::opt<bool> ProfileGuidedSectionPrefix(
179     "profile-guided-section-prefix", cl::Hidden, cl::init(true),
180     cl::desc("Use profile info to add section prefix for hot/cold functions"));
181 
182 static cl::opt<bool> ProfileUnknownInSpecialSection(
183     "profile-unknown-in-special-section", cl::Hidden,
184     cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
185              "profile, we cannot tell the function is cold for sure because "
186              "it may be a function newly added without ever being sampled. "
187              "With the flag enabled, compiler can put such profile unknown "
188              "functions into a special section, so runtime system can choose "
189              "to handle it in a different way than .text section, to save "
190              "RAM for example. "));
191 
192 static cl::opt<bool> BBSectionsGuidedSectionPrefix(
193     "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
194     cl::desc("Use the basic-block-sections profile to determine the text "
195              "section prefix for hot functions. Functions with "
196              "basic-block-sections profile will be placed in `.text.hot` "
197              "regardless of their FDO profile info. Other functions won't be "
198              "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
199              "profiles."));
200 
201 static cl::opt<unsigned> FreqRatioToSkipMerge(
202     "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
203     cl::desc("Skip merging empty blocks if (frequency of empty block) / "
204              "(frequency of destination block) is greater than this ratio"));
205 
206 static cl::opt<bool> ForceSplitStore(
207     "force-split-store", cl::Hidden, cl::init(false),
208     cl::desc("Force store splitting no matter what the target query says."));
209 
210 static cl::opt<bool> EnableTypePromotionMerge(
211     "cgp-type-promotion-merge", cl::Hidden,
212     cl::desc("Enable merging of redundant sexts when one is dominating"
213              " the other."),
214     cl::init(true));
215 
216 static cl::opt<bool> DisableComplexAddrModes(
217     "disable-complex-addr-modes", cl::Hidden, cl::init(false),
218     cl::desc("Disables combining addressing modes with different parts "
219              "in optimizeMemoryInst."));
220 
221 static cl::opt<bool>
222     AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
223                     cl::desc("Allow creation of Phis in Address sinking."));
224 
225 static cl::opt<bool> AddrSinkNewSelects(
226     "addr-sink-new-select", cl::Hidden, cl::init(true),
227     cl::desc("Allow creation of selects in Address sinking."));
228 
229 static cl::opt<bool> AddrSinkCombineBaseReg(
230     "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
231     cl::desc("Allow combining of BaseReg field in Address sinking."));
232 
233 static cl::opt<bool> AddrSinkCombineBaseGV(
234     "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
235     cl::desc("Allow combining of BaseGV field in Address sinking."));
236 
237 static cl::opt<bool> AddrSinkCombineBaseOffs(
238     "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
239     cl::desc("Allow combining of BaseOffs field in Address sinking."));
240 
241 static cl::opt<bool> AddrSinkCombineScaledReg(
242     "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
243     cl::desc("Allow combining of ScaledReg field in Address sinking."));
244 
245 static cl::opt<bool>
246     EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
247                          cl::init(true),
248                          cl::desc("Enable splitting large offset of GEP."));
249 
250 static cl::opt<bool> EnableICMP_EQToICMP_ST(
251     "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
252     cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
253 
254 static cl::opt<bool>
255     VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
256                      cl::desc("Enable BFI update verification for "
257                               "CodeGenPrepare."));
258 
259 static cl::opt<bool>
260     OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(false),
261                      cl::desc("Enable converting phi types in CodeGenPrepare"));
262 
263 static cl::opt<unsigned>
264     HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
265                             cl::desc("Least BB number of huge function."));
266 
267 namespace {
268 
269 enum ExtType {
270   ZeroExtension, // Zero extension has been seen.
271   SignExtension, // Sign extension has been seen.
272   BothExtension  // This extension type is used if we saw sext after
273                  // ZeroExtension had been set, or if we saw zext after
274                  // SignExtension had been set. It makes the type
275                  // information of a promoted instruction invalid.
276 };
277 
278 enum ModifyDT {
279   NotModifyDT, // Not Modify any DT.
280   ModifyBBDT,  // Modify the Basic Block Dominator Tree.
281   ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
282                // This usually means we move/delete/insert instruction
283                // in a Basic Block. So we should re-iterate instructions
284                // in such Basic Block.
285 };
286 
287 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
288 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
289 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
290 using SExts = SmallVector<Instruction *, 16>;
291 using ValueToSExts = MapVector<Value *, SExts>;
292 
293 class TypePromotionTransaction;
294 
295 class CodeGenPrepare : public FunctionPass {
296   const TargetMachine *TM = nullptr;
297   const TargetSubtargetInfo *SubtargetInfo;
298   const TargetLowering *TLI = nullptr;
299   const TargetRegisterInfo *TRI;
300   const TargetTransformInfo *TTI = nullptr;
301   const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
302   const TargetLibraryInfo *TLInfo;
303   const LoopInfo *LI;
304   std::unique_ptr<BlockFrequencyInfo> BFI;
305   std::unique_ptr<BranchProbabilityInfo> BPI;
306   ProfileSummaryInfo *PSI;
307 
308   /// As we scan instructions optimizing them, this is the next instruction
309   /// to optimize. Transforms that can invalidate this should update it.
310   BasicBlock::iterator CurInstIterator;
311 
312   /// Keeps track of non-local addresses that have been sunk into a block.
313   /// This allows us to avoid inserting duplicate code for blocks with
314   /// multiple load/stores of the same address. The usage of WeakTrackingVH
315   /// enables SunkAddrs to be treated as a cache whose entries can be
316   /// invalidated if a sunken address computation has been erased.
317   ValueMap<Value *, WeakTrackingVH> SunkAddrs;
318 
319   /// Keeps track of all instructions inserted for the current function.
320   SetOfInstrs InsertedInsts;
321 
322   /// Keeps track of the type of the related instruction before their
323   /// promotion for the current function.
324   InstrToOrigTy PromotedInsts;
325 
326   /// Keep track of instructions removed during promotion.
327   SetOfInstrs RemovedInsts;
328 
329   /// Keep track of sext chains based on their initial value.
330   DenseMap<Value *, Instruction *> SeenChainsForSExt;
331 
332   /// Keep track of GEPs accessing the same data structures such as structs or
333   /// arrays that are candidates to be split later because of their large
334   /// size.
335   MapVector<AssertingVH<Value>,
336             SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
337       LargeOffsetGEPMap;
338 
339   /// Keep track of new GEP base after splitting the GEPs having large offset.
340   SmallSet<AssertingVH<Value>, 2> NewGEPBases;
341 
342   /// Map serial numbers to Large offset GEPs.
343   DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
344 
345   /// Keep track of SExt promoted.
346   ValueToSExts ValToSExtendedUses;
347 
348   /// True if the function has the OptSize attribute.
349   bool OptSize;
350 
351   /// DataLayout for the Function being processed.
352   const DataLayout *DL = nullptr;
353 
354   /// Building the dominator tree can be expensive, so we only build it
355   /// lazily and update it when required.
356   std::unique_ptr<DominatorTree> DT;
357 
358 public:
359   /// If encounter huge function, we need to limit the build time.
360   bool IsHugeFunc = false;
361 
362   /// FreshBBs is like worklist, it collected the updated BBs which need
363   /// to be optimized again.
364   /// Note: Consider building time in this pass, when a BB updated, we need
365   /// to insert such BB into FreshBBs for huge function.
366   SmallSet<BasicBlock *, 32> FreshBBs;
367 
368   static char ID; // Pass identification, replacement for typeid
369 
CodeGenPrepare()370   CodeGenPrepare() : FunctionPass(ID) {
371     initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
372   }
373 
374   bool runOnFunction(Function &F) override;
375 
getPassName() const376   StringRef getPassName() const override { return "CodeGen Prepare"; }
377 
getAnalysisUsage(AnalysisUsage & AU) const378   void getAnalysisUsage(AnalysisUsage &AU) const override {
379     // FIXME: When we can selectively preserve passes, preserve the domtree.
380     AU.addRequired<ProfileSummaryInfoWrapperPass>();
381     AU.addRequired<TargetLibraryInfoWrapperPass>();
382     AU.addRequired<TargetPassConfig>();
383     AU.addRequired<TargetTransformInfoWrapperPass>();
384     AU.addRequired<LoopInfoWrapperPass>();
385     AU.addUsedIfAvailable<BasicBlockSectionsProfileReader>();
386   }
387 
388 private:
389   template <typename F>
resetIteratorIfInvalidatedWhileCalling(BasicBlock * BB,F f)390   void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
391     // Substituting can cause recursive simplifications, which can invalidate
392     // our iterator.  Use a WeakTrackingVH to hold onto it in case this
393     // happens.
394     Value *CurValue = &*CurInstIterator;
395     WeakTrackingVH IterHandle(CurValue);
396 
397     f();
398 
399     // If the iterator instruction was recursively deleted, start over at the
400     // start of the block.
401     if (IterHandle != CurValue) {
402       CurInstIterator = BB->begin();
403       SunkAddrs.clear();
404     }
405   }
406 
407   // Get the DominatorTree, building if necessary.
getDT(Function & F)408   DominatorTree &getDT(Function &F) {
409     if (!DT)
410       DT = std::make_unique<DominatorTree>(F);
411     return *DT;
412   }
413 
414   void removeAllAssertingVHReferences(Value *V);
415   bool eliminateAssumptions(Function &F);
416   bool eliminateFallThrough(Function &F);
417   bool eliminateMostlyEmptyBlocks(Function &F);
418   BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
419   bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
420   void eliminateMostlyEmptyBlock(BasicBlock *BB);
421   bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
422                                      bool isPreheader);
423   bool makeBitReverse(Instruction &I);
424   bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
425   bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
426   bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
427                           unsigned AddrSpace);
428   bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
429   bool optimizeInlineAsmInst(CallInst *CS);
430   bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
431   bool optimizeExt(Instruction *&I);
432   bool optimizeExtUses(Instruction *I);
433   bool optimizeLoadExt(LoadInst *Load);
434   bool optimizeShiftInst(BinaryOperator *BO);
435   bool optimizeFunnelShift(IntrinsicInst *Fsh);
436   bool optimizeSelectInst(SelectInst *SI);
437   bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
438   bool optimizeSwitchType(SwitchInst *SI);
439   bool optimizeSwitchPhiConstants(SwitchInst *SI);
440   bool optimizeSwitchInst(SwitchInst *SI);
441   bool optimizeExtractElementInst(Instruction *Inst);
442   bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
443   bool fixupDbgValue(Instruction *I);
444   bool placeDbgValues(Function &F);
445   bool placePseudoProbes(Function &F);
446   bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
447                     LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
448   bool tryToPromoteExts(TypePromotionTransaction &TPT,
449                         const SmallVectorImpl<Instruction *> &Exts,
450                         SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
451                         unsigned CreatedInstsCost = 0);
452   bool mergeSExts(Function &F);
453   bool splitLargeGEPOffsets();
454   bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
455                        SmallPtrSetImpl<Instruction *> &DeletedInstrs);
456   bool optimizePhiTypes(Function &F);
457   bool performAddressTypePromotion(
458       Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
459       bool HasPromoted, TypePromotionTransaction &TPT,
460       SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
461   bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
462   bool simplifyOffsetableRelocate(GCStatepointInst &I);
463 
464   bool tryToSinkFreeOperands(Instruction *I);
465   bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
466                                    CmpInst *Cmp, Intrinsic::ID IID);
467   bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
468   bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
469   bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
470   void verifyBFIUpdates(Function &F);
471 };
472 
473 } // end anonymous namespace
474 
475 char CodeGenPrepare::ID = 0;
476 
477 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
478                       "Optimize for code generation", false, false)
INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReader)479 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReader)
480 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
481 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
482 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
483 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
484 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
485 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, "Optimize for code generation",
486                     false, false)
487 
488 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
489 
runOnFunction(Function & F)490 bool CodeGenPrepare::runOnFunction(Function &F) {
491   if (skipFunction(F))
492     return false;
493 
494   DL = &F.getParent()->getDataLayout();
495 
496   bool EverMadeChange = false;
497   // Clear per function information.
498   InsertedInsts.clear();
499   PromotedInsts.clear();
500   FreshBBs.clear();
501 
502   TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
503   SubtargetInfo = TM->getSubtargetImpl(F);
504   TLI = SubtargetInfo->getTargetLowering();
505   TRI = SubtargetInfo->getRegisterInfo();
506   TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
507   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
508   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
509   BPI.reset(new BranchProbabilityInfo(F, *LI));
510   BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
511   PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
512   BBSectionsProfileReader =
513       getAnalysisIfAvailable<BasicBlockSectionsProfileReader>();
514   OptSize = F.hasOptSize();
515   // Use the basic-block-sections profile to promote hot functions to .text.hot
516   // if requested.
517   if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
518       BBSectionsProfileReader->isFunctionHot(F.getName())) {
519     F.setSectionPrefix("hot");
520   } else if (ProfileGuidedSectionPrefix) {
521     // The hot attribute overwrites profile count based hotness while profile
522     // counts based hotness overwrite the cold attribute.
523     // This is a conservative behabvior.
524     if (F.hasFnAttribute(Attribute::Hot) ||
525         PSI->isFunctionHotInCallGraph(&F, *BFI))
526       F.setSectionPrefix("hot");
527     // If PSI shows this function is not hot, we will placed the function
528     // into unlikely section if (1) PSI shows this is a cold function, or
529     // (2) the function has a attribute of cold.
530     else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
531              F.hasFnAttribute(Attribute::Cold))
532       F.setSectionPrefix("unlikely");
533     else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
534              PSI->isFunctionHotnessUnknown(F))
535       F.setSectionPrefix("unknown");
536   }
537 
538   /// This optimization identifies DIV instructions that can be
539   /// profitably bypassed and carried out with a shorter, faster divide.
540   if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
541     const DenseMap<unsigned int, unsigned int> &BypassWidths =
542         TLI->getBypassSlowDivWidths();
543     BasicBlock *BB = &*F.begin();
544     while (BB != nullptr) {
545       // bypassSlowDivision may create new BBs, but we don't want to reapply the
546       // optimization to those blocks.
547       BasicBlock *Next = BB->getNextNode();
548       // F.hasOptSize is already checked in the outer if statement.
549       if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
550         EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
551       BB = Next;
552     }
553   }
554 
555   // Get rid of @llvm.assume builtins before attempting to eliminate empty
556   // blocks, since there might be blocks that only contain @llvm.assume calls
557   // (plus arguments that we can get rid of).
558   EverMadeChange |= eliminateAssumptions(F);
559 
560   // Eliminate blocks that contain only PHI nodes and an
561   // unconditional branch.
562   EverMadeChange |= eliminateMostlyEmptyBlocks(F);
563 
564   ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
565   if (!DisableBranchOpts)
566     EverMadeChange |= splitBranchCondition(F, ModifiedDT);
567 
568   // Split some critical edges where one of the sources is an indirect branch,
569   // to help generate sane code for PHIs involving such edges.
570   EverMadeChange |=
571       SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
572 
573   // If we are optimzing huge function, we need to consider the build time.
574   // Because the basic algorithm's complex is near O(N!).
575   IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
576 
577   bool MadeChange = true;
578   bool FuncIterated = false;
579   while (MadeChange) {
580     MadeChange = false;
581     DT.reset();
582 
583     for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
584       if (FuncIterated && !FreshBBs.contains(&BB))
585         continue;
586 
587       ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
588       bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
589 
590       MadeChange |= Changed;
591       if (IsHugeFunc) {
592         // If the BB is updated, it may still has chance to be optimized.
593         // This usually happen at sink optimization.
594         // For example:
595         //
596         // bb0:
597         // %and = and i32 %a, 4
598         // %cmp = icmp eq i32 %and, 0
599         //
600         // If the %cmp sink to other BB, the %and will has chance to sink.
601         if (Changed)
602           FreshBBs.insert(&BB);
603         else if (FuncIterated)
604           FreshBBs.erase(&BB);
605 
606         if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
607           DT.reset();
608       } else {
609         // For small/normal functions, we restart BB iteration if the dominator
610         // tree of the Function was changed.
611         if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
612           break;
613       }
614     }
615     // We have iterated all the BB in the (only work for huge) function.
616     FuncIterated = IsHugeFunc;
617 
618     if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
619       MadeChange |= mergeSExts(F);
620     if (!LargeOffsetGEPMap.empty())
621       MadeChange |= splitLargeGEPOffsets();
622     MadeChange |= optimizePhiTypes(F);
623 
624     if (MadeChange)
625       eliminateFallThrough(F);
626 
627     // Really free removed instructions during promotion.
628     for (Instruction *I : RemovedInsts)
629       I->deleteValue();
630 
631     EverMadeChange |= MadeChange;
632     SeenChainsForSExt.clear();
633     ValToSExtendedUses.clear();
634     RemovedInsts.clear();
635     LargeOffsetGEPMap.clear();
636     LargeOffsetGEPID.clear();
637   }
638 
639   NewGEPBases.clear();
640   SunkAddrs.clear();
641 
642   if (!DisableBranchOpts) {
643     MadeChange = false;
644     // Use a set vector to get deterministic iteration order. The order the
645     // blocks are removed may affect whether or not PHI nodes in successors
646     // are removed.
647     SmallSetVector<BasicBlock *, 8> WorkList;
648     for (BasicBlock &BB : F) {
649       SmallVector<BasicBlock *, 2> Successors(successors(&BB));
650       MadeChange |= ConstantFoldTerminator(&BB, true);
651       if (!MadeChange)
652         continue;
653 
654       for (BasicBlock *Succ : Successors)
655         if (pred_empty(Succ))
656           WorkList.insert(Succ);
657     }
658 
659     // Delete the dead blocks and any of their dead successors.
660     MadeChange |= !WorkList.empty();
661     while (!WorkList.empty()) {
662       BasicBlock *BB = WorkList.pop_back_val();
663       SmallVector<BasicBlock *, 2> Successors(successors(BB));
664 
665       DeleteDeadBlock(BB);
666 
667       for (BasicBlock *Succ : Successors)
668         if (pred_empty(Succ))
669           WorkList.insert(Succ);
670     }
671 
672     // Merge pairs of basic blocks with unconditional branches, connected by
673     // a single edge.
674     if (EverMadeChange || MadeChange)
675       MadeChange |= eliminateFallThrough(F);
676 
677     EverMadeChange |= MadeChange;
678   }
679 
680   if (!DisableGCOpts) {
681     SmallVector<GCStatepointInst *, 2> Statepoints;
682     for (BasicBlock &BB : F)
683       for (Instruction &I : BB)
684         if (auto *SP = dyn_cast<GCStatepointInst>(&I))
685           Statepoints.push_back(SP);
686     for (auto &I : Statepoints)
687       EverMadeChange |= simplifyOffsetableRelocate(*I);
688   }
689 
690   // Do this last to clean up use-before-def scenarios introduced by other
691   // preparatory transforms.
692   EverMadeChange |= placeDbgValues(F);
693   EverMadeChange |= placePseudoProbes(F);
694 
695 #ifndef NDEBUG
696   if (VerifyBFIUpdates)
697     verifyBFIUpdates(F);
698 #endif
699 
700   return EverMadeChange;
701 }
702 
eliminateAssumptions(Function & F)703 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
704   bool MadeChange = false;
705   for (BasicBlock &BB : F) {
706     CurInstIterator = BB.begin();
707     while (CurInstIterator != BB.end()) {
708       Instruction *I = &*(CurInstIterator++);
709       if (auto *Assume = dyn_cast<AssumeInst>(I)) {
710         MadeChange = true;
711         Value *Operand = Assume->getOperand(0);
712         Assume->eraseFromParent();
713 
714         resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
715           RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
716         });
717       }
718     }
719   }
720   return MadeChange;
721 }
722 
723 /// An instruction is about to be deleted, so remove all references to it in our
724 /// GEP-tracking data strcutures.
removeAllAssertingVHReferences(Value * V)725 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
726   LargeOffsetGEPMap.erase(V);
727   NewGEPBases.erase(V);
728 
729   auto GEP = dyn_cast<GetElementPtrInst>(V);
730   if (!GEP)
731     return;
732 
733   LargeOffsetGEPID.erase(GEP);
734 
735   auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
736   if (VecI == LargeOffsetGEPMap.end())
737     return;
738 
739   auto &GEPVector = VecI->second;
740   llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
741 
742   if (GEPVector.empty())
743     LargeOffsetGEPMap.erase(VecI);
744 }
745 
746 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
verifyBFIUpdates(Function & F)747 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
748   DominatorTree NewDT(F);
749   LoopInfo NewLI(NewDT);
750   BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
751   BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
752   NewBFI.verifyMatch(*BFI);
753 }
754 
755 /// Merge basic blocks which are connected by a single edge, where one of the
756 /// basic blocks has a single successor pointing to the other basic block,
757 /// which has a single predecessor.
eliminateFallThrough(Function & F)758 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
759   bool Changed = false;
760   // Scan all of the blocks in the function, except for the entry block.
761   // Use a temporary array to avoid iterator being invalidated when
762   // deleting blocks.
763   SmallVector<WeakTrackingVH, 16> Blocks;
764   for (auto &Block : llvm::drop_begin(F))
765     Blocks.push_back(&Block);
766 
767   SmallSet<WeakTrackingVH, 16> Preds;
768   for (auto &Block : Blocks) {
769     auto *BB = cast_or_null<BasicBlock>(Block);
770     if (!BB)
771       continue;
772     // If the destination block has a single pred, then this is a trivial
773     // edge, just collapse it.
774     BasicBlock *SinglePred = BB->getSinglePredecessor();
775 
776     // Don't merge if BB's address is taken.
777     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
778       continue;
779 
780     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
781     if (Term && !Term->isConditional()) {
782       Changed = true;
783       LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
784 
785       // Merge BB into SinglePred and delete it.
786       MergeBlockIntoPredecessor(BB);
787       Preds.insert(SinglePred);
788 
789       if (IsHugeFunc) {
790         // Update FreshBBs to optimize the merged BB.
791         FreshBBs.insert(SinglePred);
792         FreshBBs.erase(BB);
793       }
794     }
795   }
796 
797   // (Repeatedly) merging blocks into their predecessors can create redundant
798   // debug intrinsics.
799   for (const auto &Pred : Preds)
800     if (auto *BB = cast_or_null<BasicBlock>(Pred))
801       RemoveRedundantDbgInstrs(BB);
802 
803   return Changed;
804 }
805 
806 /// Find a destination block from BB if BB is mergeable empty block.
findDestBlockOfMergeableEmptyBlock(BasicBlock * BB)807 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
808   // If this block doesn't end with an uncond branch, ignore it.
809   BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
810   if (!BI || !BI->isUnconditional())
811     return nullptr;
812 
813   // If the instruction before the branch (skipping debug info) isn't a phi
814   // node, then other stuff is happening here.
815   BasicBlock::iterator BBI = BI->getIterator();
816   if (BBI != BB->begin()) {
817     --BBI;
818     while (isa<DbgInfoIntrinsic>(BBI)) {
819       if (BBI == BB->begin())
820         break;
821       --BBI;
822     }
823     if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
824       return nullptr;
825   }
826 
827   // Do not break infinite loops.
828   BasicBlock *DestBB = BI->getSuccessor(0);
829   if (DestBB == BB)
830     return nullptr;
831 
832   if (!canMergeBlocks(BB, DestBB))
833     DestBB = nullptr;
834 
835   return DestBB;
836 }
837 
838 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
839 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
840 /// edges in ways that are non-optimal for isel. Start by eliminating these
841 /// blocks so we can split them the way we want them.
eliminateMostlyEmptyBlocks(Function & F)842 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
843   SmallPtrSet<BasicBlock *, 16> Preheaders;
844   SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
845   while (!LoopList.empty()) {
846     Loop *L = LoopList.pop_back_val();
847     llvm::append_range(LoopList, *L);
848     if (BasicBlock *Preheader = L->getLoopPreheader())
849       Preheaders.insert(Preheader);
850   }
851 
852   bool MadeChange = false;
853   // Copy blocks into a temporary array to avoid iterator invalidation issues
854   // as we remove them.
855   // Note that this intentionally skips the entry block.
856   SmallVector<WeakTrackingVH, 16> Blocks;
857   for (auto &Block : llvm::drop_begin(F))
858     Blocks.push_back(&Block);
859 
860   for (auto &Block : Blocks) {
861     BasicBlock *BB = cast_or_null<BasicBlock>(Block);
862     if (!BB)
863       continue;
864     BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
865     if (!DestBB ||
866         !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
867       continue;
868 
869     eliminateMostlyEmptyBlock(BB);
870     MadeChange = true;
871   }
872   return MadeChange;
873 }
874 
isMergingEmptyBlockProfitable(BasicBlock * BB,BasicBlock * DestBB,bool isPreheader)875 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
876                                                    BasicBlock *DestBB,
877                                                    bool isPreheader) {
878   // Do not delete loop preheaders if doing so would create a critical edge.
879   // Loop preheaders can be good locations to spill registers. If the
880   // preheader is deleted and we create a critical edge, registers may be
881   // spilled in the loop body instead.
882   if (!DisablePreheaderProtect && isPreheader &&
883       !(BB->getSinglePredecessor() &&
884         BB->getSinglePredecessor()->getSingleSuccessor()))
885     return false;
886 
887   // Skip merging if the block's successor is also a successor to any callbr
888   // that leads to this block.
889   // FIXME: Is this really needed? Is this a correctness issue?
890   for (BasicBlock *Pred : predecessors(BB)) {
891     if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
892       for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
893         if (DestBB == CBI->getSuccessor(i))
894           return false;
895   }
896 
897   // Try to skip merging if the unique predecessor of BB is terminated by a
898   // switch or indirect branch instruction, and BB is used as an incoming block
899   // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
900   // add COPY instructions in the predecessor of BB instead of BB (if it is not
901   // merged). Note that the critical edge created by merging such blocks wont be
902   // split in MachineSink because the jump table is not analyzable. By keeping
903   // such empty block (BB), ISel will place COPY instructions in BB, not in the
904   // predecessor of BB.
905   BasicBlock *Pred = BB->getUniquePredecessor();
906   if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
907                  isa<IndirectBrInst>(Pred->getTerminator())))
908     return true;
909 
910   if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
911     return true;
912 
913   // We use a simple cost heuristic which determine skipping merging is
914   // profitable if the cost of skipping merging is less than the cost of
915   // merging : Cost(skipping merging) < Cost(merging BB), where the
916   // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
917   // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
918   // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
919   //   Freq(Pred) / Freq(BB) > 2.
920   // Note that if there are multiple empty blocks sharing the same incoming
921   // value for the PHIs in the DestBB, we consider them together. In such
922   // case, Cost(merging BB) will be the sum of their frequencies.
923 
924   if (!isa<PHINode>(DestBB->begin()))
925     return true;
926 
927   SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
928 
929   // Find all other incoming blocks from which incoming values of all PHIs in
930   // DestBB are the same as the ones from BB.
931   for (BasicBlock *DestBBPred : predecessors(DestBB)) {
932     if (DestBBPred == BB)
933       continue;
934 
935     if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
936           return DestPN.getIncomingValueForBlock(BB) ==
937                  DestPN.getIncomingValueForBlock(DestBBPred);
938         }))
939       SameIncomingValueBBs.insert(DestBBPred);
940   }
941 
942   // See if all BB's incoming values are same as the value from Pred. In this
943   // case, no reason to skip merging because COPYs are expected to be place in
944   // Pred already.
945   if (SameIncomingValueBBs.count(Pred))
946     return true;
947 
948   BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
949   BlockFrequency BBFreq = BFI->getBlockFreq(BB);
950 
951   for (auto *SameValueBB : SameIncomingValueBBs)
952     if (SameValueBB->getUniquePredecessor() == Pred &&
953         DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
954       BBFreq += BFI->getBlockFreq(SameValueBB);
955 
956   return PredFreq.getFrequency() <=
957          BBFreq.getFrequency() * FreqRatioToSkipMerge;
958 }
959 
960 /// Return true if we can merge BB into DestBB if there is a single
961 /// unconditional branch between them, and BB contains no other non-phi
962 /// instructions.
canMergeBlocks(const BasicBlock * BB,const BasicBlock * DestBB) const963 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
964                                     const BasicBlock *DestBB) const {
965   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
966   // the successor.  If there are more complex condition (e.g. preheaders),
967   // don't mess around with them.
968   for (const PHINode &PN : BB->phis()) {
969     for (const User *U : PN.users()) {
970       const Instruction *UI = cast<Instruction>(U);
971       if (UI->getParent() != DestBB || !isa<PHINode>(UI))
972         return false;
973       // If User is inside DestBB block and it is a PHINode then check
974       // incoming value. If incoming value is not from BB then this is
975       // a complex condition (e.g. preheaders) we want to avoid here.
976       if (UI->getParent() == DestBB) {
977         if (const PHINode *UPN = dyn_cast<PHINode>(UI))
978           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
979             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
980             if (Insn && Insn->getParent() == BB &&
981                 Insn->getParent() != UPN->getIncomingBlock(I))
982               return false;
983           }
984       }
985     }
986   }
987 
988   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
989   // and DestBB may have conflicting incoming values for the block.  If so, we
990   // can't merge the block.
991   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
992   if (!DestBBPN)
993     return true; // no conflict.
994 
995   // Collect the preds of BB.
996   SmallPtrSet<const BasicBlock *, 16> BBPreds;
997   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
998     // It is faster to get preds from a PHI than with pred_iterator.
999     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1000       BBPreds.insert(BBPN->getIncomingBlock(i));
1001   } else {
1002     BBPreds.insert(pred_begin(BB), pred_end(BB));
1003   }
1004 
1005   // Walk the preds of DestBB.
1006   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1007     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1008     if (BBPreds.count(Pred)) { // Common predecessor?
1009       for (const PHINode &PN : DestBB->phis()) {
1010         const Value *V1 = PN.getIncomingValueForBlock(Pred);
1011         const Value *V2 = PN.getIncomingValueForBlock(BB);
1012 
1013         // If V2 is a phi node in BB, look up what the mapped value will be.
1014         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1015           if (V2PN->getParent() == BB)
1016             V2 = V2PN->getIncomingValueForBlock(Pred);
1017 
1018         // If there is a conflict, bail out.
1019         if (V1 != V2)
1020           return false;
1021       }
1022     }
1023   }
1024 
1025   return true;
1026 }
1027 
1028 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
replaceAllUsesWith(Value * Old,Value * New,SmallSet<BasicBlock *,32> & FreshBBs,bool IsHuge)1029 static void replaceAllUsesWith(Value *Old, Value *New,
1030                                SmallSet<BasicBlock *, 32> &FreshBBs,
1031                                bool IsHuge) {
1032   auto *OldI = dyn_cast<Instruction>(Old);
1033   if (OldI) {
1034     for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1035          UI != E; ++UI) {
1036       Instruction *User = cast<Instruction>(*UI);
1037       if (IsHuge)
1038         FreshBBs.insert(User->getParent());
1039     }
1040   }
1041   Old->replaceAllUsesWith(New);
1042 }
1043 
1044 /// Eliminate a basic block that has only phi's and an unconditional branch in
1045 /// it.
eliminateMostlyEmptyBlock(BasicBlock * BB)1046 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1047   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1048   BasicBlock *DestBB = BI->getSuccessor(0);
1049 
1050   LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1051                     << *BB << *DestBB);
1052 
1053   // If the destination block has a single pred, then this is a trivial edge,
1054   // just collapse it.
1055   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1056     if (SinglePred != DestBB) {
1057       assert(SinglePred == BB &&
1058              "Single predecessor not the same as predecessor");
1059       // Merge DestBB into SinglePred/BB and delete it.
1060       MergeBlockIntoPredecessor(DestBB);
1061       // Note: BB(=SinglePred) will not be deleted on this path.
1062       // DestBB(=its single successor) is the one that was deleted.
1063       LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1064 
1065       if (IsHugeFunc) {
1066         // Update FreshBBs to optimize the merged BB.
1067         FreshBBs.insert(SinglePred);
1068         FreshBBs.erase(DestBB);
1069       }
1070       return;
1071     }
1072   }
1073 
1074   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
1075   // to handle the new incoming edges it is about to have.
1076   for (PHINode &PN : DestBB->phis()) {
1077     // Remove the incoming value for BB, and remember it.
1078     Value *InVal = PN.removeIncomingValue(BB, false);
1079 
1080     // Two options: either the InVal is a phi node defined in BB or it is some
1081     // value that dominates BB.
1082     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1083     if (InValPhi && InValPhi->getParent() == BB) {
1084       // Add all of the input values of the input PHI as inputs of this phi.
1085       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1086         PN.addIncoming(InValPhi->getIncomingValue(i),
1087                        InValPhi->getIncomingBlock(i));
1088     } else {
1089       // Otherwise, add one instance of the dominating value for each edge that
1090       // we will be adding.
1091       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1092         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1093           PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1094       } else {
1095         for (BasicBlock *Pred : predecessors(BB))
1096           PN.addIncoming(InVal, Pred);
1097       }
1098     }
1099   }
1100 
1101   // The PHIs are now updated, change everything that refers to BB to use
1102   // DestBB and remove BB.
1103   BB->replaceAllUsesWith(DestBB);
1104   BB->eraseFromParent();
1105   ++NumBlocksElim;
1106 
1107   LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1108 }
1109 
1110 // Computes a map of base pointer relocation instructions to corresponding
1111 // derived pointer relocation instructions given a vector of all relocate calls
computeBaseDerivedRelocateMap(const SmallVectorImpl<GCRelocateInst * > & AllRelocateCalls,DenseMap<GCRelocateInst *,SmallVector<GCRelocateInst *,2>> & RelocateInstMap)1112 static void computeBaseDerivedRelocateMap(
1113     const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1114     DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
1115         &RelocateInstMap) {
1116   // Collect information in two maps: one primarily for locating the base object
1117   // while filling the second map; the second map is the final structure holding
1118   // a mapping between Base and corresponding Derived relocate calls
1119   DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1120   for (auto *ThisRelocate : AllRelocateCalls) {
1121     auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1122                             ThisRelocate->getDerivedPtrIndex());
1123     RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1124   }
1125   for (auto &Item : RelocateIdxMap) {
1126     std::pair<unsigned, unsigned> Key = Item.first;
1127     if (Key.first == Key.second)
1128       // Base relocation: nothing to insert
1129       continue;
1130 
1131     GCRelocateInst *I = Item.second;
1132     auto BaseKey = std::make_pair(Key.first, Key.first);
1133 
1134     // We're iterating over RelocateIdxMap so we cannot modify it.
1135     auto MaybeBase = RelocateIdxMap.find(BaseKey);
1136     if (MaybeBase == RelocateIdxMap.end())
1137       // TODO: We might want to insert a new base object relocate and gep off
1138       // that, if there are enough derived object relocates.
1139       continue;
1140 
1141     RelocateInstMap[MaybeBase->second].push_back(I);
1142   }
1143 }
1144 
1145 // Accepts a GEP and extracts the operands into a vector provided they're all
1146 // small integer constants
getGEPSmallConstantIntOffsetV(GetElementPtrInst * GEP,SmallVectorImpl<Value * > & OffsetV)1147 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1148                                           SmallVectorImpl<Value *> &OffsetV) {
1149   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1150     // Only accept small constant integer operands
1151     auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1152     if (!Op || Op->getZExtValue() > 20)
1153       return false;
1154   }
1155 
1156   for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1157     OffsetV.push_back(GEP->getOperand(i));
1158   return true;
1159 }
1160 
1161 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1162 // replace, computes a replacement, and affects it.
1163 static bool
simplifyRelocatesOffABase(GCRelocateInst * RelocatedBase,const SmallVectorImpl<GCRelocateInst * > & Targets)1164 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1165                           const SmallVectorImpl<GCRelocateInst *> &Targets) {
1166   bool MadeChange = false;
1167   // We must ensure the relocation of derived pointer is defined after
1168   // relocation of base pointer. If we find a relocation corresponding to base
1169   // defined earlier than relocation of base then we move relocation of base
1170   // right before found relocation. We consider only relocation in the same
1171   // basic block as relocation of base. Relocations from other basic block will
1172   // be skipped by optimization and we do not care about them.
1173   for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1174        &*R != RelocatedBase; ++R)
1175     if (auto *RI = dyn_cast<GCRelocateInst>(R))
1176       if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1177         if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1178           RelocatedBase->moveBefore(RI);
1179           break;
1180         }
1181 
1182   for (GCRelocateInst *ToReplace : Targets) {
1183     assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1184            "Not relocating a derived object of the original base object");
1185     if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1186       // A duplicate relocate call. TODO: coalesce duplicates.
1187       continue;
1188     }
1189 
1190     if (RelocatedBase->getParent() != ToReplace->getParent()) {
1191       // Base and derived relocates are in different basic blocks.
1192       // In this case transform is only valid when base dominates derived
1193       // relocate. However it would be too expensive to check dominance
1194       // for each such relocate, so we skip the whole transformation.
1195       continue;
1196     }
1197 
1198     Value *Base = ToReplace->getBasePtr();
1199     auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1200     if (!Derived || Derived->getPointerOperand() != Base)
1201       continue;
1202 
1203     SmallVector<Value *, 2> OffsetV;
1204     if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1205       continue;
1206 
1207     // Create a Builder and replace the target callsite with a gep
1208     assert(RelocatedBase->getNextNode() &&
1209            "Should always have one since it's not a terminator");
1210 
1211     // Insert after RelocatedBase
1212     IRBuilder<> Builder(RelocatedBase->getNextNode());
1213     Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1214 
1215     // If gc_relocate does not match the actual type, cast it to the right type.
1216     // In theory, there must be a bitcast after gc_relocate if the type does not
1217     // match, and we should reuse it to get the derived pointer. But it could be
1218     // cases like this:
1219     // bb1:
1220     //  ...
1221     //  %g1 = call coldcc i8 addrspace(1)*
1222     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1223     //
1224     // bb2:
1225     //  ...
1226     //  %g2 = call coldcc i8 addrspace(1)*
1227     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1228     //
1229     // merge:
1230     //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1231     //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1232     //
1233     // In this case, we can not find the bitcast any more. So we insert a new
1234     // bitcast no matter there is already one or not. In this way, we can handle
1235     // all cases, and the extra bitcast should be optimized away in later
1236     // passes.
1237     Value *ActualRelocatedBase = RelocatedBase;
1238     if (RelocatedBase->getType() != Base->getType()) {
1239       ActualRelocatedBase =
1240           Builder.CreateBitCast(RelocatedBase, Base->getType());
1241     }
1242     Value *Replacement =
1243         Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1244                           ArrayRef(OffsetV));
1245     Replacement->takeName(ToReplace);
1246     // If the newly generated derived pointer's type does not match the original
1247     // derived pointer's type, cast the new derived pointer to match it. Same
1248     // reasoning as above.
1249     Value *ActualReplacement = Replacement;
1250     if (Replacement->getType() != ToReplace->getType()) {
1251       ActualReplacement =
1252           Builder.CreateBitCast(Replacement, ToReplace->getType());
1253     }
1254     ToReplace->replaceAllUsesWith(ActualReplacement);
1255     ToReplace->eraseFromParent();
1256 
1257     MadeChange = true;
1258   }
1259   return MadeChange;
1260 }
1261 
1262 // Turns this:
1263 //
1264 // %base = ...
1265 // %ptr = gep %base + 15
1266 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1267 // %base' = relocate(%tok, i32 4, i32 4)
1268 // %ptr' = relocate(%tok, i32 4, i32 5)
1269 // %val = load %ptr'
1270 //
1271 // into this:
1272 //
1273 // %base = ...
1274 // %ptr = gep %base + 15
1275 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1276 // %base' = gc.relocate(%tok, i32 4, i32 4)
1277 // %ptr' = gep %base' + 15
1278 // %val = load %ptr'
simplifyOffsetableRelocate(GCStatepointInst & I)1279 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1280   bool MadeChange = false;
1281   SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1282   for (auto *U : I.users())
1283     if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1284       // Collect all the relocate calls associated with a statepoint
1285       AllRelocateCalls.push_back(Relocate);
1286 
1287   // We need at least one base pointer relocation + one derived pointer
1288   // relocation to mangle
1289   if (AllRelocateCalls.size() < 2)
1290     return false;
1291 
1292   // RelocateInstMap is a mapping from the base relocate instruction to the
1293   // corresponding derived relocate instructions
1294   DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1295   computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1296   if (RelocateInstMap.empty())
1297     return false;
1298 
1299   for (auto &Item : RelocateInstMap)
1300     // Item.first is the RelocatedBase to offset against
1301     // Item.second is the vector of Targets to replace
1302     MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1303   return MadeChange;
1304 }
1305 
1306 /// Sink the specified cast instruction into its user blocks.
SinkCast(CastInst * CI)1307 static bool SinkCast(CastInst *CI) {
1308   BasicBlock *DefBB = CI->getParent();
1309 
1310   /// InsertedCasts - Only insert a cast in each block once.
1311   DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1312 
1313   bool MadeChange = false;
1314   for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1315        UI != E;) {
1316     Use &TheUse = UI.getUse();
1317     Instruction *User = cast<Instruction>(*UI);
1318 
1319     // Figure out which BB this cast is used in.  For PHI's this is the
1320     // appropriate predecessor block.
1321     BasicBlock *UserBB = User->getParent();
1322     if (PHINode *PN = dyn_cast<PHINode>(User)) {
1323       UserBB = PN->getIncomingBlock(TheUse);
1324     }
1325 
1326     // Preincrement use iterator so we don't invalidate it.
1327     ++UI;
1328 
1329     // The first insertion point of a block containing an EH pad is after the
1330     // pad.  If the pad is the user, we cannot sink the cast past the pad.
1331     if (User->isEHPad())
1332       continue;
1333 
1334     // If the block selected to receive the cast is an EH pad that does not
1335     // allow non-PHI instructions before the terminator, we can't sink the
1336     // cast.
1337     if (UserBB->getTerminator()->isEHPad())
1338       continue;
1339 
1340     // If this user is in the same block as the cast, don't change the cast.
1341     if (UserBB == DefBB)
1342       continue;
1343 
1344     // If we have already inserted a cast into this block, use it.
1345     CastInst *&InsertedCast = InsertedCasts[UserBB];
1346 
1347     if (!InsertedCast) {
1348       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1349       assert(InsertPt != UserBB->end());
1350       InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1351                                       CI->getType(), "", &*InsertPt);
1352       InsertedCast->setDebugLoc(CI->getDebugLoc());
1353     }
1354 
1355     // Replace a use of the cast with a use of the new cast.
1356     TheUse = InsertedCast;
1357     MadeChange = true;
1358     ++NumCastUses;
1359   }
1360 
1361   // If we removed all uses, nuke the cast.
1362   if (CI->use_empty()) {
1363     salvageDebugInfo(*CI);
1364     CI->eraseFromParent();
1365     MadeChange = true;
1366   }
1367 
1368   return MadeChange;
1369 }
1370 
1371 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1372 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1373 /// reduce the number of virtual registers that must be created and coalesced.
1374 ///
1375 /// Return true if any changes are made.
OptimizeNoopCopyExpression(CastInst * CI,const TargetLowering & TLI,const DataLayout & DL)1376 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1377                                        const DataLayout &DL) {
1378   // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1379   // than sinking only nop casts, but is helpful on some platforms.
1380   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1381     if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1382                                  ASC->getDestAddressSpace()))
1383       return false;
1384   }
1385 
1386   // If this is a noop copy,
1387   EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1388   EVT DstVT = TLI.getValueType(DL, CI->getType());
1389 
1390   // This is an fp<->int conversion?
1391   if (SrcVT.isInteger() != DstVT.isInteger())
1392     return false;
1393 
1394   // If this is an extension, it will be a zero or sign extension, which
1395   // isn't a noop.
1396   if (SrcVT.bitsLT(DstVT))
1397     return false;
1398 
1399   // If these values will be promoted, find out what they will be promoted
1400   // to.  This helps us consider truncates on PPC as noop copies when they
1401   // are.
1402   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1403       TargetLowering::TypePromoteInteger)
1404     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1405   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1406       TargetLowering::TypePromoteInteger)
1407     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1408 
1409   // If, after promotion, these are the same types, this is a noop copy.
1410   if (SrcVT != DstVT)
1411     return false;
1412 
1413   return SinkCast(CI);
1414 }
1415 
1416 // Match a simple increment by constant operation.  Note that if a sub is
1417 // matched, the step is negated (as if the step had been canonicalized to
1418 // an add, even though we leave the instruction alone.)
matchIncrement(const Instruction * IVInc,Instruction * & LHS,Constant * & Step)1419 bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1420                     Constant *&Step) {
1421   if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1422       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1423                        m_Instruction(LHS), m_Constant(Step)))))
1424     return true;
1425   if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1426       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1427                        m_Instruction(LHS), m_Constant(Step))))) {
1428     Step = ConstantExpr::getNeg(Step);
1429     return true;
1430   }
1431   return false;
1432 }
1433 
1434 /// If given \p PN is an inductive variable with value IVInc coming from the
1435 /// backedge, and on each iteration it gets increased by Step, return pair
1436 /// <IVInc, Step>. Otherwise, return std::nullopt.
1437 static std::optional<std::pair<Instruction *, Constant *>>
getIVIncrement(const PHINode * PN,const LoopInfo * LI)1438 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1439   const Loop *L = LI->getLoopFor(PN->getParent());
1440   if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1441     return std::nullopt;
1442   auto *IVInc =
1443       dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1444   if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1445     return std::nullopt;
1446   Instruction *LHS = nullptr;
1447   Constant *Step = nullptr;
1448   if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1449     return std::make_pair(IVInc, Step);
1450   return std::nullopt;
1451 }
1452 
isIVIncrement(const Value * V,const LoopInfo * LI)1453 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1454   auto *I = dyn_cast<Instruction>(V);
1455   if (!I)
1456     return false;
1457   Instruction *LHS = nullptr;
1458   Constant *Step = nullptr;
1459   if (!matchIncrement(I, LHS, Step))
1460     return false;
1461   if (auto *PN = dyn_cast<PHINode>(LHS))
1462     if (auto IVInc = getIVIncrement(PN, LI))
1463       return IVInc->first == I;
1464   return false;
1465 }
1466 
replaceMathCmpWithIntrinsic(BinaryOperator * BO,Value * Arg0,Value * Arg1,CmpInst * Cmp,Intrinsic::ID IID)1467 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1468                                                  Value *Arg0, Value *Arg1,
1469                                                  CmpInst *Cmp,
1470                                                  Intrinsic::ID IID) {
1471   auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1472     if (!isIVIncrement(BO, LI))
1473       return false;
1474     const Loop *L = LI->getLoopFor(BO->getParent());
1475     assert(L && "L should not be null after isIVIncrement()");
1476     // Do not risk on moving increment into a child loop.
1477     if (LI->getLoopFor(Cmp->getParent()) != L)
1478       return false;
1479 
1480     // Finally, we need to ensure that the insert point will dominate all
1481     // existing uses of the increment.
1482 
1483     auto &DT = getDT(*BO->getParent()->getParent());
1484     if (DT.dominates(Cmp->getParent(), BO->getParent()))
1485       // If we're moving up the dom tree, all uses are trivially dominated.
1486       // (This is the common case for code produced by LSR.)
1487       return true;
1488 
1489     // Otherwise, special case the single use in the phi recurrence.
1490     return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1491   };
1492   if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1493     // We used to use a dominator tree here to allow multi-block optimization.
1494     // But that was problematic because:
1495     // 1. It could cause a perf regression by hoisting the math op into the
1496     //    critical path.
1497     // 2. It could cause a perf regression by creating a value that was live
1498     //    across multiple blocks and increasing register pressure.
1499     // 3. Use of a dominator tree could cause large compile-time regression.
1500     //    This is because we recompute the DT on every change in the main CGP
1501     //    run-loop. The recomputing is probably unnecessary in many cases, so if
1502     //    that was fixed, using a DT here would be ok.
1503     //
1504     // There is one important particular case we still want to handle: if BO is
1505     // the IV increment. Important properties that make it profitable:
1506     // - We can speculate IV increment anywhere in the loop (as long as the
1507     //   indvar Phi is its only user);
1508     // - Upon computing Cmp, we effectively compute something equivalent to the
1509     //   IV increment (despite it loops differently in the IR). So moving it up
1510     //   to the cmp point does not really increase register pressure.
1511     return false;
1512   }
1513 
1514   // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1515   if (BO->getOpcode() == Instruction::Add &&
1516       IID == Intrinsic::usub_with_overflow) {
1517     assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1518     Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1519   }
1520 
1521   // Insert at the first instruction of the pair.
1522   Instruction *InsertPt = nullptr;
1523   for (Instruction &Iter : *Cmp->getParent()) {
1524     // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1525     // the overflow intrinsic are defined.
1526     if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1527       InsertPt = &Iter;
1528       break;
1529     }
1530   }
1531   assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1532 
1533   IRBuilder<> Builder(InsertPt);
1534   Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1535   if (BO->getOpcode() != Instruction::Xor) {
1536     Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1537     replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1538   } else
1539     assert(BO->hasOneUse() &&
1540            "Patterns with XOr should use the BO only in the compare");
1541   Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1542   replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1543   Cmp->eraseFromParent();
1544   BO->eraseFromParent();
1545   return true;
1546 }
1547 
1548 /// Match special-case patterns that check for unsigned add overflow.
matchUAddWithOverflowConstantEdgeCases(CmpInst * Cmp,BinaryOperator * & Add)1549 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1550                                                    BinaryOperator *&Add) {
1551   // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1552   // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1553   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1554 
1555   // We are not expecting non-canonical/degenerate code. Just bail out.
1556   if (isa<Constant>(A))
1557     return false;
1558 
1559   ICmpInst::Predicate Pred = Cmp->getPredicate();
1560   if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1561     B = ConstantInt::get(B->getType(), 1);
1562   else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1563     B = ConstantInt::get(B->getType(), -1);
1564   else
1565     return false;
1566 
1567   // Check the users of the variable operand of the compare looking for an add
1568   // with the adjusted constant.
1569   for (User *U : A->users()) {
1570     if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1571       Add = cast<BinaryOperator>(U);
1572       return true;
1573     }
1574   }
1575   return false;
1576 }
1577 
1578 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1579 /// intrinsic. Return true if any changes were made.
combineToUAddWithOverflow(CmpInst * Cmp,ModifyDT & ModifiedDT)1580 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1581                                                ModifyDT &ModifiedDT) {
1582   Value *A, *B;
1583   BinaryOperator *Add;
1584   if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1585     if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1586       return false;
1587     // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1588     A = Add->getOperand(0);
1589     B = Add->getOperand(1);
1590   }
1591 
1592   if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1593                                  TLI->getValueType(*DL, Add->getType()),
1594                                  Add->hasNUsesOrMore(2)))
1595     return false;
1596 
1597   // We don't want to move around uses of condition values this late, so we
1598   // check if it is legal to create the call to the intrinsic in the basic
1599   // block containing the icmp.
1600   if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1601     return false;
1602 
1603   if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1604                                    Intrinsic::uadd_with_overflow))
1605     return false;
1606 
1607   // Reset callers - do not crash by iterating over a dead instruction.
1608   ModifiedDT = ModifyDT::ModifyInstDT;
1609   return true;
1610 }
1611 
combineToUSubWithOverflow(CmpInst * Cmp,ModifyDT & ModifiedDT)1612 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1613                                                ModifyDT &ModifiedDT) {
1614   // We are not expecting non-canonical/degenerate code. Just bail out.
1615   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1616   if (isa<Constant>(A) && isa<Constant>(B))
1617     return false;
1618 
1619   // Convert (A u> B) to (A u< B) to simplify pattern matching.
1620   ICmpInst::Predicate Pred = Cmp->getPredicate();
1621   if (Pred == ICmpInst::ICMP_UGT) {
1622     std::swap(A, B);
1623     Pred = ICmpInst::ICMP_ULT;
1624   }
1625   // Convert special-case: (A == 0) is the same as (A u< 1).
1626   if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1627     B = ConstantInt::get(B->getType(), 1);
1628     Pred = ICmpInst::ICMP_ULT;
1629   }
1630   // Convert special-case: (A != 0) is the same as (0 u< A).
1631   if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1632     std::swap(A, B);
1633     Pred = ICmpInst::ICMP_ULT;
1634   }
1635   if (Pred != ICmpInst::ICMP_ULT)
1636     return false;
1637 
1638   // Walk the users of a variable operand of a compare looking for a subtract or
1639   // add with that same operand. Also match the 2nd operand of the compare to
1640   // the add/sub, but that may be a negated constant operand of an add.
1641   Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1642   BinaryOperator *Sub = nullptr;
1643   for (User *U : CmpVariableOperand->users()) {
1644     // A - B, A u< B --> usubo(A, B)
1645     if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1646       Sub = cast<BinaryOperator>(U);
1647       break;
1648     }
1649 
1650     // A + (-C), A u< C (canonicalized form of (sub A, C))
1651     const APInt *CmpC, *AddC;
1652     if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1653         match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1654       Sub = cast<BinaryOperator>(U);
1655       break;
1656     }
1657   }
1658   if (!Sub)
1659     return false;
1660 
1661   if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1662                                  TLI->getValueType(*DL, Sub->getType()),
1663                                  Sub->hasNUsesOrMore(2)))
1664     return false;
1665 
1666   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1667                                    Cmp, Intrinsic::usub_with_overflow))
1668     return false;
1669 
1670   // Reset callers - do not crash by iterating over a dead instruction.
1671   ModifiedDT = ModifyDT::ModifyInstDT;
1672   return true;
1673 }
1674 
1675 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1676 /// registers that must be created and coalesced. This is a clear win except on
1677 /// targets with multiple condition code registers (PowerPC), where it might
1678 /// lose; some adjustment may be wanted there.
1679 ///
1680 /// Return true if any changes are made.
sinkCmpExpression(CmpInst * Cmp,const TargetLowering & TLI)1681 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1682   if (TLI.hasMultipleConditionRegisters())
1683     return false;
1684 
1685   // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1686   if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1687     return false;
1688 
1689   // Only insert a cmp in each block once.
1690   DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1691 
1692   bool MadeChange = false;
1693   for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1694        UI != E;) {
1695     Use &TheUse = UI.getUse();
1696     Instruction *User = cast<Instruction>(*UI);
1697 
1698     // Preincrement use iterator so we don't invalidate it.
1699     ++UI;
1700 
1701     // Don't bother for PHI nodes.
1702     if (isa<PHINode>(User))
1703       continue;
1704 
1705     // Figure out which BB this cmp is used in.
1706     BasicBlock *UserBB = User->getParent();
1707     BasicBlock *DefBB = Cmp->getParent();
1708 
1709     // If this user is in the same block as the cmp, don't change the cmp.
1710     if (UserBB == DefBB)
1711       continue;
1712 
1713     // If we have already inserted a cmp into this block, use it.
1714     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1715 
1716     if (!InsertedCmp) {
1717       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1718       assert(InsertPt != UserBB->end());
1719       InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1720                                     Cmp->getOperand(0), Cmp->getOperand(1), "",
1721                                     &*InsertPt);
1722       // Propagate the debug info.
1723       InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1724     }
1725 
1726     // Replace a use of the cmp with a use of the new cmp.
1727     TheUse = InsertedCmp;
1728     MadeChange = true;
1729     ++NumCmpUses;
1730   }
1731 
1732   // If we removed all uses, nuke the cmp.
1733   if (Cmp->use_empty()) {
1734     Cmp->eraseFromParent();
1735     MadeChange = true;
1736   }
1737 
1738   return MadeChange;
1739 }
1740 
1741 /// For pattern like:
1742 ///
1743 ///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1744 ///   ...
1745 /// DomBB:
1746 ///   ...
1747 ///   br DomCond, TrueBB, CmpBB
1748 /// CmpBB: (with DomBB being the single predecessor)
1749 ///   ...
1750 ///   Cmp = icmp eq CmpOp0, CmpOp1
1751 ///   ...
1752 ///
1753 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1754 /// different from lowering of icmp eq (PowerPC). This function try to convert
1755 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1756 /// After that, DomCond and Cmp can use the same comparison so reduce one
1757 /// comparison.
1758 ///
1759 /// Return true if any changes are made.
foldICmpWithDominatingICmp(CmpInst * Cmp,const TargetLowering & TLI)1760 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1761                                        const TargetLowering &TLI) {
1762   if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1763     return false;
1764 
1765   ICmpInst::Predicate Pred = Cmp->getPredicate();
1766   if (Pred != ICmpInst::ICMP_EQ)
1767     return false;
1768 
1769   // If icmp eq has users other than BranchInst and SelectInst, converting it to
1770   // icmp slt/sgt would introduce more redundant LLVM IR.
1771   for (User *U : Cmp->users()) {
1772     if (isa<BranchInst>(U))
1773       continue;
1774     if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1775       continue;
1776     return false;
1777   }
1778 
1779   // This is a cheap/incomplete check for dominance - just match a single
1780   // predecessor with a conditional branch.
1781   BasicBlock *CmpBB = Cmp->getParent();
1782   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1783   if (!DomBB)
1784     return false;
1785 
1786   // We want to ensure that the only way control gets to the comparison of
1787   // interest is that a less/greater than comparison on the same operands is
1788   // false.
1789   Value *DomCond;
1790   BasicBlock *TrueBB, *FalseBB;
1791   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1792     return false;
1793   if (CmpBB != FalseBB)
1794     return false;
1795 
1796   Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1797   ICmpInst::Predicate DomPred;
1798   if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1799     return false;
1800   if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1801     return false;
1802 
1803   // Convert the equality comparison to the opposite of the dominating
1804   // comparison and swap the direction for all branch/select users.
1805   // We have conceptually converted:
1806   // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1807   // to
1808   // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1809   // And similarly for branches.
1810   for (User *U : Cmp->users()) {
1811     if (auto *BI = dyn_cast<BranchInst>(U)) {
1812       assert(BI->isConditional() && "Must be conditional");
1813       BI->swapSuccessors();
1814       continue;
1815     }
1816     if (auto *SI = dyn_cast<SelectInst>(U)) {
1817       // Swap operands
1818       SI->swapValues();
1819       SI->swapProfMetadata();
1820       continue;
1821     }
1822     llvm_unreachable("Must be a branch or a select");
1823   }
1824   Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1825   return true;
1826 }
1827 
optimizeCmp(CmpInst * Cmp,ModifyDT & ModifiedDT)1828 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
1829   if (sinkCmpExpression(Cmp, *TLI))
1830     return true;
1831 
1832   if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1833     return true;
1834 
1835   if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1836     return true;
1837 
1838   if (foldICmpWithDominatingICmp(Cmp, *TLI))
1839     return true;
1840 
1841   return false;
1842 }
1843 
1844 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1845 /// used in a compare to allow isel to generate better code for targets where
1846 /// this operation can be combined.
1847 ///
1848 /// Return true if any changes are made.
sinkAndCmp0Expression(Instruction * AndI,const TargetLowering & TLI,SetOfInstrs & InsertedInsts)1849 static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
1850                                   SetOfInstrs &InsertedInsts) {
1851   // Double-check that we're not trying to optimize an instruction that was
1852   // already optimized by some other part of this pass.
1853   assert(!InsertedInsts.count(AndI) &&
1854          "Attempting to optimize already optimized and instruction");
1855   (void)InsertedInsts;
1856 
1857   // Nothing to do for single use in same basic block.
1858   if (AndI->hasOneUse() &&
1859       AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1860     return false;
1861 
1862   // Try to avoid cases where sinking/duplicating is likely to increase register
1863   // pressure.
1864   if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1865       !isa<ConstantInt>(AndI->getOperand(1)) &&
1866       AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1867     return false;
1868 
1869   for (auto *U : AndI->users()) {
1870     Instruction *User = cast<Instruction>(U);
1871 
1872     // Only sink 'and' feeding icmp with 0.
1873     if (!isa<ICmpInst>(User))
1874       return false;
1875 
1876     auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1877     if (!CmpC || !CmpC->isZero())
1878       return false;
1879   }
1880 
1881   if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1882     return false;
1883 
1884   LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1885   LLVM_DEBUG(AndI->getParent()->dump());
1886 
1887   // Push the 'and' into the same block as the icmp 0.  There should only be
1888   // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1889   // others, so we don't need to keep track of which BBs we insert into.
1890   for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1891        UI != E;) {
1892     Use &TheUse = UI.getUse();
1893     Instruction *User = cast<Instruction>(*UI);
1894 
1895     // Preincrement use iterator so we don't invalidate it.
1896     ++UI;
1897 
1898     LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1899 
1900     // Keep the 'and' in the same place if the use is already in the same block.
1901     Instruction *InsertPt =
1902         User->getParent() == AndI->getParent() ? AndI : User;
1903     Instruction *InsertedAnd =
1904         BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1905                                AndI->getOperand(1), "", InsertPt);
1906     // Propagate the debug info.
1907     InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1908 
1909     // Replace a use of the 'and' with a use of the new 'and'.
1910     TheUse = InsertedAnd;
1911     ++NumAndUses;
1912     LLVM_DEBUG(User->getParent()->dump());
1913   }
1914 
1915   // We removed all uses, nuke the and.
1916   AndI->eraseFromParent();
1917   return true;
1918 }
1919 
1920 /// Check if the candidates could be combined with a shift instruction, which
1921 /// includes:
1922 /// 1. Truncate instruction
1923 /// 2. And instruction and the imm is a mask of the low bits:
1924 /// imm & (imm+1) == 0
isExtractBitsCandidateUse(Instruction * User)1925 static bool isExtractBitsCandidateUse(Instruction *User) {
1926   if (!isa<TruncInst>(User)) {
1927     if (User->getOpcode() != Instruction::And ||
1928         !isa<ConstantInt>(User->getOperand(1)))
1929       return false;
1930 
1931     const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1932 
1933     if ((Cimm & (Cimm + 1)).getBoolValue())
1934       return false;
1935   }
1936   return true;
1937 }
1938 
1939 /// Sink both shift and truncate instruction to the use of truncate's BB.
1940 static bool
SinkShiftAndTruncate(BinaryOperator * ShiftI,Instruction * User,ConstantInt * CI,DenseMap<BasicBlock *,BinaryOperator * > & InsertedShifts,const TargetLowering & TLI,const DataLayout & DL)1941 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1942                      DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1943                      const TargetLowering &TLI, const DataLayout &DL) {
1944   BasicBlock *UserBB = User->getParent();
1945   DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1946   auto *TruncI = cast<TruncInst>(User);
1947   bool MadeChange = false;
1948 
1949   for (Value::user_iterator TruncUI = TruncI->user_begin(),
1950                             TruncE = TruncI->user_end();
1951        TruncUI != TruncE;) {
1952 
1953     Use &TruncTheUse = TruncUI.getUse();
1954     Instruction *TruncUser = cast<Instruction>(*TruncUI);
1955     // Preincrement use iterator so we don't invalidate it.
1956 
1957     ++TruncUI;
1958 
1959     int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1960     if (!ISDOpcode)
1961       continue;
1962 
1963     // If the use is actually a legal node, there will not be an
1964     // implicit truncate.
1965     // FIXME: always querying the result type is just an
1966     // approximation; some nodes' legality is determined by the
1967     // operand or other means. There's no good way to find out though.
1968     if (TLI.isOperationLegalOrCustom(
1969             ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1970       continue;
1971 
1972     // Don't bother for PHI nodes.
1973     if (isa<PHINode>(TruncUser))
1974       continue;
1975 
1976     BasicBlock *TruncUserBB = TruncUser->getParent();
1977 
1978     if (UserBB == TruncUserBB)
1979       continue;
1980 
1981     BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1982     CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1983 
1984     if (!InsertedShift && !InsertedTrunc) {
1985       BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1986       assert(InsertPt != TruncUserBB->end());
1987       // Sink the shift
1988       if (ShiftI->getOpcode() == Instruction::AShr)
1989         InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1990                                                    "", &*InsertPt);
1991       else
1992         InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1993                                                    "", &*InsertPt);
1994       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1995 
1996       // Sink the trunc
1997       BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1998       TruncInsertPt++;
1999       assert(TruncInsertPt != TruncUserBB->end());
2000 
2001       InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2002                                        TruncI->getType(), "", &*TruncInsertPt);
2003       InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2004 
2005       MadeChange = true;
2006 
2007       TruncTheUse = InsertedTrunc;
2008     }
2009   }
2010   return MadeChange;
2011 }
2012 
2013 /// Sink the shift *right* instruction into user blocks if the uses could
2014 /// potentially be combined with this shift instruction and generate BitExtract
2015 /// instruction. It will only be applied if the architecture supports BitExtract
2016 /// instruction. Here is an example:
2017 /// BB1:
2018 ///   %x.extract.shift = lshr i64 %arg1, 32
2019 /// BB2:
2020 ///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
2021 /// ==>
2022 ///
2023 /// BB2:
2024 ///   %x.extract.shift.1 = lshr i64 %arg1, 32
2025 ///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2026 ///
2027 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2028 /// instruction.
2029 /// Return true if any changes are made.
OptimizeExtractBits(BinaryOperator * ShiftI,ConstantInt * CI,const TargetLowering & TLI,const DataLayout & DL)2030 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
2031                                 const TargetLowering &TLI,
2032                                 const DataLayout &DL) {
2033   BasicBlock *DefBB = ShiftI->getParent();
2034 
2035   /// Only insert instructions in each block once.
2036   DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
2037 
2038   bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2039 
2040   bool MadeChange = false;
2041   for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2042        UI != E;) {
2043     Use &TheUse = UI.getUse();
2044     Instruction *User = cast<Instruction>(*UI);
2045     // Preincrement use iterator so we don't invalidate it.
2046     ++UI;
2047 
2048     // Don't bother for PHI nodes.
2049     if (isa<PHINode>(User))
2050       continue;
2051 
2052     if (!isExtractBitsCandidateUse(User))
2053       continue;
2054 
2055     BasicBlock *UserBB = User->getParent();
2056 
2057     if (UserBB == DefBB) {
2058       // If the shift and truncate instruction are in the same BB. The use of
2059       // the truncate(TruncUse) may still introduce another truncate if not
2060       // legal. In this case, we would like to sink both shift and truncate
2061       // instruction to the BB of TruncUse.
2062       // for example:
2063       // BB1:
2064       // i64 shift.result = lshr i64 opnd, imm
2065       // trunc.result = trunc shift.result to i16
2066       //
2067       // BB2:
2068       //   ----> We will have an implicit truncate here if the architecture does
2069       //   not have i16 compare.
2070       // cmp i16 trunc.result, opnd2
2071       //
2072       if (isa<TruncInst>(User) &&
2073           shiftIsLegal
2074           // If the type of the truncate is legal, no truncate will be
2075           // introduced in other basic blocks.
2076           && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2077         MadeChange =
2078             SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2079 
2080       continue;
2081     }
2082     // If we have already inserted a shift into this block, use it.
2083     BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2084 
2085     if (!InsertedShift) {
2086       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2087       assert(InsertPt != UserBB->end());
2088 
2089       if (ShiftI->getOpcode() == Instruction::AShr)
2090         InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
2091                                                    "", &*InsertPt);
2092       else
2093         InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
2094                                                    "", &*InsertPt);
2095       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2096 
2097       MadeChange = true;
2098     }
2099 
2100     // Replace a use of the shift with a use of the new shift.
2101     TheUse = InsertedShift;
2102   }
2103 
2104   // If we removed all uses, or there are none, nuke the shift.
2105   if (ShiftI->use_empty()) {
2106     salvageDebugInfo(*ShiftI);
2107     ShiftI->eraseFromParent();
2108     MadeChange = true;
2109   }
2110 
2111   return MadeChange;
2112 }
2113 
2114 /// If counting leading or trailing zeros is an expensive operation and a zero
2115 /// input is defined, add a check for zero to avoid calling the intrinsic.
2116 ///
2117 /// We want to transform:
2118 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2119 ///
2120 /// into:
2121 ///   entry:
2122 ///     %cmpz = icmp eq i64 %A, 0
2123 ///     br i1 %cmpz, label %cond.end, label %cond.false
2124 ///   cond.false:
2125 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2126 ///     br label %cond.end
2127 ///   cond.end:
2128 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2129 ///
2130 /// If the transform is performed, return true and set ModifiedDT to true.
despeculateCountZeros(IntrinsicInst * CountZeros,const TargetLowering * TLI,const DataLayout * DL,ModifyDT & ModifiedDT,SmallSet<BasicBlock *,32> & FreshBBs,bool IsHugeFunc)2131 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2132                                   const TargetLowering *TLI,
2133                                   const DataLayout *DL, ModifyDT &ModifiedDT,
2134                                   SmallSet<BasicBlock *, 32> &FreshBBs,
2135                                   bool IsHugeFunc) {
2136   // If a zero input is undefined, it doesn't make sense to despeculate that.
2137   if (match(CountZeros->getOperand(1), m_One()))
2138     return false;
2139 
2140   // If it's cheap to speculate, there's nothing to do.
2141   Type *Ty = CountZeros->getType();
2142   auto IntrinsicID = CountZeros->getIntrinsicID();
2143   if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2144       (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2145     return false;
2146 
2147   // Only handle legal scalar cases. Anything else requires too much work.
2148   unsigned SizeInBits = Ty->getScalarSizeInBits();
2149   if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2150     return false;
2151 
2152   // Bail if the value is never zero.
2153   Use &Op = CountZeros->getOperandUse(0);
2154   if (isKnownNonZero(Op, *DL))
2155     return false;
2156 
2157   // The intrinsic will be sunk behind a compare against zero and branch.
2158   BasicBlock *StartBlock = CountZeros->getParent();
2159   BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2160   if (IsHugeFunc)
2161     FreshBBs.insert(CallBlock);
2162 
2163   // Create another block after the count zero intrinsic. A PHI will be added
2164   // in this block to select the result of the intrinsic or the bit-width
2165   // constant if the input to the intrinsic is zero.
2166   BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2167   BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2168   if (IsHugeFunc)
2169     FreshBBs.insert(EndBlock);
2170 
2171   // Set up a builder to create a compare, conditional branch, and PHI.
2172   IRBuilder<> Builder(CountZeros->getContext());
2173   Builder.SetInsertPoint(StartBlock->getTerminator());
2174   Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2175 
2176   // Replace the unconditional branch that was created by the first split with
2177   // a compare against zero and a conditional branch.
2178   Value *Zero = Constant::getNullValue(Ty);
2179   // Avoid introducing branch on poison. This also replaces the ctz operand.
2180   if (!isGuaranteedNotToBeUndefOrPoison(Op))
2181     Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2182   Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2183   Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2184   StartBlock->getTerminator()->eraseFromParent();
2185 
2186   // Create a PHI in the end block to select either the output of the intrinsic
2187   // or the bit width of the operand.
2188   Builder.SetInsertPoint(&EndBlock->front());
2189   PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2190   replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2191   Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2192   PN->addIncoming(BitWidth, StartBlock);
2193   PN->addIncoming(CountZeros, CallBlock);
2194 
2195   // We are explicitly handling the zero case, so we can set the intrinsic's
2196   // undefined zero argument to 'true'. This will also prevent reprocessing the
2197   // intrinsic; we only despeculate when a zero input is defined.
2198   CountZeros->setArgOperand(1, Builder.getTrue());
2199   ModifiedDT = ModifyDT::ModifyBBDT;
2200   return true;
2201 }
2202 
optimizeCallInst(CallInst * CI,ModifyDT & ModifiedDT)2203 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2204   BasicBlock *BB = CI->getParent();
2205 
2206   // Lower inline assembly if we can.
2207   // If we found an inline asm expession, and if the target knows how to
2208   // lower it to normal LLVM code, do so now.
2209   if (CI->isInlineAsm()) {
2210     if (TLI->ExpandInlineAsm(CI)) {
2211       // Avoid invalidating the iterator.
2212       CurInstIterator = BB->begin();
2213       // Avoid processing instructions out of order, which could cause
2214       // reuse before a value is defined.
2215       SunkAddrs.clear();
2216       return true;
2217     }
2218     // Sink address computing for memory operands into the block.
2219     if (optimizeInlineAsmInst(CI))
2220       return true;
2221   }
2222 
2223   // Align the pointer arguments to this call if the target thinks it's a good
2224   // idea
2225   unsigned MinSize;
2226   Align PrefAlign;
2227   if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2228     for (auto &Arg : CI->args()) {
2229       // We want to align both objects whose address is used directly and
2230       // objects whose address is used in casts and GEPs, though it only makes
2231       // sense for GEPs if the offset is a multiple of the desired alignment and
2232       // if size - offset meets the size threshold.
2233       if (!Arg->getType()->isPointerTy())
2234         continue;
2235       APInt Offset(DL->getIndexSizeInBits(
2236                        cast<PointerType>(Arg->getType())->getAddressSpace()),
2237                    0);
2238       Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2239       uint64_t Offset2 = Offset.getLimitedValue();
2240       if (!isAligned(PrefAlign, Offset2))
2241         continue;
2242       AllocaInst *AI;
2243       if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2244           DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2245         AI->setAlignment(PrefAlign);
2246       // Global variables can only be aligned if they are defined in this
2247       // object (i.e. they are uniquely initialized in this object), and
2248       // over-aligning global variables that have an explicit section is
2249       // forbidden.
2250       GlobalVariable *GV;
2251       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2252           GV->getPointerAlignment(*DL) < PrefAlign &&
2253           DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2254         GV->setAlignment(PrefAlign);
2255     }
2256   }
2257   // If this is a memcpy (or similar) then we may be able to improve the
2258   // alignment.
2259   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2260     Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2261     MaybeAlign MIDestAlign = MI->getDestAlign();
2262     if (!MIDestAlign || DestAlign > *MIDestAlign)
2263       MI->setDestAlignment(DestAlign);
2264     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2265       MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2266       Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2267       if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2268         MTI->setSourceAlignment(SrcAlign);
2269     }
2270   }
2271 
2272   // If we have a cold call site, try to sink addressing computation into the
2273   // cold block.  This interacts with our handling for loads and stores to
2274   // ensure that we can fold all uses of a potential addressing computation
2275   // into their uses.  TODO: generalize this to work over profiling data
2276   if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
2277       !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2278     for (auto &Arg : CI->args()) {
2279       if (!Arg->getType()->isPointerTy())
2280         continue;
2281       unsigned AS = Arg->getType()->getPointerAddressSpace();
2282       return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2283     }
2284 
2285   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2286   if (II) {
2287     switch (II->getIntrinsicID()) {
2288     default:
2289       break;
2290     case Intrinsic::assume:
2291       llvm_unreachable("llvm.assume should have been removed already");
2292     case Intrinsic::experimental_widenable_condition: {
2293       // Give up on future widening oppurtunties so that we can fold away dead
2294       // paths and merge blocks before going into block-local instruction
2295       // selection.
2296       if (II->use_empty()) {
2297         II->eraseFromParent();
2298         return true;
2299       }
2300       Constant *RetVal = ConstantInt::getTrue(II->getContext());
2301       resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2302         replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2303       });
2304       return true;
2305     }
2306     case Intrinsic::objectsize:
2307       llvm_unreachable("llvm.objectsize.* should have been lowered already");
2308     case Intrinsic::is_constant:
2309       llvm_unreachable("llvm.is.constant.* should have been lowered already");
2310     case Intrinsic::aarch64_stlxr:
2311     case Intrinsic::aarch64_stxr: {
2312       ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2313       if (!ExtVal || !ExtVal->hasOneUse() ||
2314           ExtVal->getParent() == CI->getParent())
2315         return false;
2316       // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2317       ExtVal->moveBefore(CI);
2318       // Mark this instruction as "inserted by CGP", so that other
2319       // optimizations don't touch it.
2320       InsertedInsts.insert(ExtVal);
2321       return true;
2322     }
2323 
2324     case Intrinsic::launder_invariant_group:
2325     case Intrinsic::strip_invariant_group: {
2326       Value *ArgVal = II->getArgOperand(0);
2327       auto it = LargeOffsetGEPMap.find(II);
2328       if (it != LargeOffsetGEPMap.end()) {
2329         // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2330         // Make sure not to have to deal with iterator invalidation
2331         // after possibly adding ArgVal to LargeOffsetGEPMap.
2332         auto GEPs = std::move(it->second);
2333         LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2334         LargeOffsetGEPMap.erase(II);
2335       }
2336 
2337       replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2338       II->eraseFromParent();
2339       return true;
2340     }
2341     case Intrinsic::cttz:
2342     case Intrinsic::ctlz:
2343       // If counting zeros is expensive, try to avoid it.
2344       return despeculateCountZeros(II, TLI, DL, ModifiedDT, FreshBBs,
2345                                    IsHugeFunc);
2346     case Intrinsic::fshl:
2347     case Intrinsic::fshr:
2348       return optimizeFunnelShift(II);
2349     case Intrinsic::dbg_assign:
2350     case Intrinsic::dbg_value:
2351       return fixupDbgValue(II);
2352     case Intrinsic::vscale: {
2353       // If datalayout has no special restrictions on vector data layout,
2354       // replace `llvm.vscale` by an equivalent constant expression
2355       // to benefit from cheap constant propagation.
2356       Type *ScalableVectorTy =
2357           VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2358       if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinValue() == 8) {
2359         auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2360         auto *One = ConstantInt::getSigned(II->getType(), 1);
2361         auto *CGep =
2362             ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2363         replaceAllUsesWith(II, ConstantExpr::getPtrToInt(CGep, II->getType()),
2364                            FreshBBs, IsHugeFunc);
2365         II->eraseFromParent();
2366         return true;
2367       }
2368       break;
2369     }
2370     case Intrinsic::masked_gather:
2371       return optimizeGatherScatterInst(II, II->getArgOperand(0));
2372     case Intrinsic::masked_scatter:
2373       return optimizeGatherScatterInst(II, II->getArgOperand(1));
2374     }
2375 
2376     SmallVector<Value *, 2> PtrOps;
2377     Type *AccessTy;
2378     if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2379       while (!PtrOps.empty()) {
2380         Value *PtrVal = PtrOps.pop_back_val();
2381         unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2382         if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2383           return true;
2384       }
2385   }
2386 
2387   // From here on out we're working with named functions.
2388   if (!CI->getCalledFunction())
2389     return false;
2390 
2391   // Lower all default uses of _chk calls.  This is very similar
2392   // to what InstCombineCalls does, but here we are only lowering calls
2393   // to fortified library functions (e.g. __memcpy_chk) that have the default
2394   // "don't know" as the objectsize.  Anything else should be left alone.
2395   FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2396   IRBuilder<> Builder(CI);
2397   if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2398     replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2399     CI->eraseFromParent();
2400     return true;
2401   }
2402 
2403   return false;
2404 }
2405 
2406 /// Look for opportunities to duplicate return instructions to the predecessor
2407 /// to enable tail call optimizations. The case it is currently looking for is:
2408 /// @code
2409 /// bb0:
2410 ///   %tmp0 = tail call i32 @f0()
2411 ///   br label %return
2412 /// bb1:
2413 ///   %tmp1 = tail call i32 @f1()
2414 ///   br label %return
2415 /// bb2:
2416 ///   %tmp2 = tail call i32 @f2()
2417 ///   br label %return
2418 /// return:
2419 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2420 ///   ret i32 %retval
2421 /// @endcode
2422 ///
2423 /// =>
2424 ///
2425 /// @code
2426 /// bb0:
2427 ///   %tmp0 = tail call i32 @f0()
2428 ///   ret i32 %tmp0
2429 /// bb1:
2430 ///   %tmp1 = tail call i32 @f1()
2431 ///   ret i32 %tmp1
2432 /// bb2:
2433 ///   %tmp2 = tail call i32 @f2()
2434 ///   ret i32 %tmp2
2435 /// @endcode
dupRetToEnableTailCallOpts(BasicBlock * BB,ModifyDT & ModifiedDT)2436 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2437                                                 ModifyDT &ModifiedDT) {
2438   if (!BB->getTerminator())
2439     return false;
2440 
2441   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2442   if (!RetI)
2443     return false;
2444 
2445   PHINode *PN = nullptr;
2446   ExtractValueInst *EVI = nullptr;
2447   BitCastInst *BCI = nullptr;
2448   Value *V = RetI->getReturnValue();
2449   if (V) {
2450     BCI = dyn_cast<BitCastInst>(V);
2451     if (BCI)
2452       V = BCI->getOperand(0);
2453 
2454     EVI = dyn_cast<ExtractValueInst>(V);
2455     if (EVI) {
2456       V = EVI->getOperand(0);
2457       if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2458         return false;
2459     }
2460 
2461     PN = dyn_cast<PHINode>(V);
2462     if (!PN)
2463       return false;
2464   }
2465 
2466   if (PN && PN->getParent() != BB)
2467     return false;
2468 
2469   auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2470     const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2471     if (BC && BC->hasOneUse())
2472       Inst = BC->user_back();
2473 
2474     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2475       return II->getIntrinsicID() == Intrinsic::lifetime_end;
2476     return false;
2477   };
2478 
2479   // Make sure there are no instructions between the first instruction
2480   // and return.
2481   const Instruction *BI = BB->getFirstNonPHI();
2482   // Skip over debug and the bitcast.
2483   while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2484          isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2485     BI = BI->getNextNode();
2486   if (BI != RetI)
2487     return false;
2488 
2489   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2490   /// call.
2491   const Function *F = BB->getParent();
2492   SmallVector<BasicBlock *, 4> TailCallBBs;
2493   if (PN) {
2494     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2495       // Look through bitcasts.
2496       Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2497       CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2498       BasicBlock *PredBB = PN->getIncomingBlock(I);
2499       // Make sure the phi value is indeed produced by the tail call.
2500       if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2501           TLI->mayBeEmittedAsTailCall(CI) &&
2502           attributesPermitTailCall(F, CI, RetI, *TLI))
2503         TailCallBBs.push_back(PredBB);
2504     }
2505   } else {
2506     SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2507     for (BasicBlock *Pred : predecessors(BB)) {
2508       if (!VisitedBBs.insert(Pred).second)
2509         continue;
2510       if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2511         CallInst *CI = dyn_cast<CallInst>(I);
2512         if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2513             attributesPermitTailCall(F, CI, RetI, *TLI))
2514           TailCallBBs.push_back(Pred);
2515       }
2516     }
2517   }
2518 
2519   bool Changed = false;
2520   for (auto const &TailCallBB : TailCallBBs) {
2521     // Make sure the call instruction is followed by an unconditional branch to
2522     // the return block.
2523     BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2524     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2525       continue;
2526 
2527     // Duplicate the return into TailCallBB.
2528     (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2529     assert(!VerifyBFIUpdates ||
2530            BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2531     BFI->setBlockFreq(
2532         BB,
2533         (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2534     ModifiedDT = ModifyDT::ModifyBBDT;
2535     Changed = true;
2536     ++NumRetsDup;
2537   }
2538 
2539   // If we eliminated all predecessors of the block, delete the block now.
2540   if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2541     BB->eraseFromParent();
2542 
2543   return Changed;
2544 }
2545 
2546 //===----------------------------------------------------------------------===//
2547 // Memory Optimization
2548 //===----------------------------------------------------------------------===//
2549 
2550 namespace {
2551 
2552 /// This is an extended version of TargetLowering::AddrMode
2553 /// which holds actual Value*'s for register values.
2554 struct ExtAddrMode : public TargetLowering::AddrMode {
2555   Value *BaseReg = nullptr;
2556   Value *ScaledReg = nullptr;
2557   Value *OriginalValue = nullptr;
2558   bool InBounds = true;
2559 
2560   enum FieldName {
2561     NoField = 0x00,
2562     BaseRegField = 0x01,
2563     BaseGVField = 0x02,
2564     BaseOffsField = 0x04,
2565     ScaledRegField = 0x08,
2566     ScaleField = 0x10,
2567     MultipleFields = 0xff
2568   };
2569 
2570   ExtAddrMode() = default;
2571 
2572   void print(raw_ostream &OS) const;
2573   void dump() const;
2574 
compare__anon7418ce250911::ExtAddrMode2575   FieldName compare(const ExtAddrMode &other) {
2576     // First check that the types are the same on each field, as differing types
2577     // is something we can't cope with later on.
2578     if (BaseReg && other.BaseReg &&
2579         BaseReg->getType() != other.BaseReg->getType())
2580       return MultipleFields;
2581     if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
2582       return MultipleFields;
2583     if (ScaledReg && other.ScaledReg &&
2584         ScaledReg->getType() != other.ScaledReg->getType())
2585       return MultipleFields;
2586 
2587     // Conservatively reject 'inbounds' mismatches.
2588     if (InBounds != other.InBounds)
2589       return MultipleFields;
2590 
2591     // Check each field to see if it differs.
2592     unsigned Result = NoField;
2593     if (BaseReg != other.BaseReg)
2594       Result |= BaseRegField;
2595     if (BaseGV != other.BaseGV)
2596       Result |= BaseGVField;
2597     if (BaseOffs != other.BaseOffs)
2598       Result |= BaseOffsField;
2599     if (ScaledReg != other.ScaledReg)
2600       Result |= ScaledRegField;
2601     // Don't count 0 as being a different scale, because that actually means
2602     // unscaled (which will already be counted by having no ScaledReg).
2603     if (Scale && other.Scale && Scale != other.Scale)
2604       Result |= ScaleField;
2605 
2606     if (llvm::popcount(Result) > 1)
2607       return MultipleFields;
2608     else
2609       return static_cast<FieldName>(Result);
2610   }
2611 
2612   // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2613   // with no offset.
isTrivial__anon7418ce250911::ExtAddrMode2614   bool isTrivial() {
2615     // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2616     // trivial if at most one of these terms is nonzero, except that BaseGV and
2617     // BaseReg both being zero actually means a null pointer value, which we
2618     // consider to be 'non-zero' here.
2619     return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2620   }
2621 
GetFieldAsValue__anon7418ce250911::ExtAddrMode2622   Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2623     switch (Field) {
2624     default:
2625       return nullptr;
2626     case BaseRegField:
2627       return BaseReg;
2628     case BaseGVField:
2629       return BaseGV;
2630     case ScaledRegField:
2631       return ScaledReg;
2632     case BaseOffsField:
2633       return ConstantInt::get(IntPtrTy, BaseOffs);
2634     }
2635   }
2636 
SetCombinedField__anon7418ce250911::ExtAddrMode2637   void SetCombinedField(FieldName Field, Value *V,
2638                         const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2639     switch (Field) {
2640     default:
2641       llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2642       break;
2643     case ExtAddrMode::BaseRegField:
2644       BaseReg = V;
2645       break;
2646     case ExtAddrMode::BaseGVField:
2647       // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2648       // in the BaseReg field.
2649       assert(BaseReg == nullptr);
2650       BaseReg = V;
2651       BaseGV = nullptr;
2652       break;
2653     case ExtAddrMode::ScaledRegField:
2654       ScaledReg = V;
2655       // If we have a mix of scaled and unscaled addrmodes then we want scale
2656       // to be the scale and not zero.
2657       if (!Scale)
2658         for (const ExtAddrMode &AM : AddrModes)
2659           if (AM.Scale) {
2660             Scale = AM.Scale;
2661             break;
2662           }
2663       break;
2664     case ExtAddrMode::BaseOffsField:
2665       // The offset is no longer a constant, so it goes in ScaledReg with a
2666       // scale of 1.
2667       assert(ScaledReg == nullptr);
2668       ScaledReg = V;
2669       Scale = 1;
2670       BaseOffs = 0;
2671       break;
2672     }
2673   }
2674 };
2675 
2676 #ifndef NDEBUG
operator <<(raw_ostream & OS,const ExtAddrMode & AM)2677 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2678   AM.print(OS);
2679   return OS;
2680 }
2681 #endif
2682 
2683 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const2684 void ExtAddrMode::print(raw_ostream &OS) const {
2685   bool NeedPlus = false;
2686   OS << "[";
2687   if (InBounds)
2688     OS << "inbounds ";
2689   if (BaseGV) {
2690     OS << (NeedPlus ? " + " : "") << "GV:";
2691     BaseGV->printAsOperand(OS, /*PrintType=*/false);
2692     NeedPlus = true;
2693   }
2694 
2695   if (BaseOffs) {
2696     OS << (NeedPlus ? " + " : "") << BaseOffs;
2697     NeedPlus = true;
2698   }
2699 
2700   if (BaseReg) {
2701     OS << (NeedPlus ? " + " : "") << "Base:";
2702     BaseReg->printAsOperand(OS, /*PrintType=*/false);
2703     NeedPlus = true;
2704   }
2705   if (Scale) {
2706     OS << (NeedPlus ? " + " : "") << Scale << "*";
2707     ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2708   }
2709 
2710   OS << ']';
2711 }
2712 
dump() const2713 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2714   print(dbgs());
2715   dbgs() << '\n';
2716 }
2717 #endif
2718 
2719 } // end anonymous namespace
2720 
2721 namespace {
2722 
2723 /// This class provides transaction based operation on the IR.
2724 /// Every change made through this class is recorded in the internal state and
2725 /// can be undone (rollback) until commit is called.
2726 /// CGP does not check if instructions could be speculatively executed when
2727 /// moved. Preserving the original location would pessimize the debugging
2728 /// experience, as well as negatively impact the quality of sample PGO.
2729 class TypePromotionTransaction {
2730   /// This represents the common interface of the individual transaction.
2731   /// Each class implements the logic for doing one specific modification on
2732   /// the IR via the TypePromotionTransaction.
2733   class TypePromotionAction {
2734   protected:
2735     /// The Instruction modified.
2736     Instruction *Inst;
2737 
2738   public:
2739     /// Constructor of the action.
2740     /// The constructor performs the related action on the IR.
TypePromotionAction(Instruction * Inst)2741     TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2742 
2743     virtual ~TypePromotionAction() = default;
2744 
2745     /// Undo the modification done by this action.
2746     /// When this method is called, the IR must be in the same state as it was
2747     /// before this action was applied.
2748     /// \pre Undoing the action works if and only if the IR is in the exact same
2749     /// state as it was directly after this action was applied.
2750     virtual void undo() = 0;
2751 
2752     /// Advocate every change made by this action.
2753     /// When the results on the IR of the action are to be kept, it is important
2754     /// to call this function, otherwise hidden information may be kept forever.
commit()2755     virtual void commit() {
2756       // Nothing to be done, this action is not doing anything.
2757     }
2758   };
2759 
2760   /// Utility to remember the position of an instruction.
2761   class InsertionHandler {
2762     /// Position of an instruction.
2763     /// Either an instruction:
2764     /// - Is the first in a basic block: BB is used.
2765     /// - Has a previous instruction: PrevInst is used.
2766     union {
2767       Instruction *PrevInst;
2768       BasicBlock *BB;
2769     } Point;
2770 
2771     /// Remember whether or not the instruction had a previous instruction.
2772     bool HasPrevInstruction;
2773 
2774   public:
2775     /// Record the position of \p Inst.
InsertionHandler(Instruction * Inst)2776     InsertionHandler(Instruction *Inst) {
2777       BasicBlock::iterator It = Inst->getIterator();
2778       HasPrevInstruction = (It != (Inst->getParent()->begin()));
2779       if (HasPrevInstruction)
2780         Point.PrevInst = &*--It;
2781       else
2782         Point.BB = Inst->getParent();
2783     }
2784 
2785     /// Insert \p Inst at the recorded position.
insert(Instruction * Inst)2786     void insert(Instruction *Inst) {
2787       if (HasPrevInstruction) {
2788         if (Inst->getParent())
2789           Inst->removeFromParent();
2790         Inst->insertAfter(Point.PrevInst);
2791       } else {
2792         Instruction *Position = &*Point.BB->getFirstInsertionPt();
2793         if (Inst->getParent())
2794           Inst->moveBefore(Position);
2795         else
2796           Inst->insertBefore(Position);
2797       }
2798     }
2799   };
2800 
2801   /// Move an instruction before another.
2802   class InstructionMoveBefore : public TypePromotionAction {
2803     /// Original position of the instruction.
2804     InsertionHandler Position;
2805 
2806   public:
2807     /// Move \p Inst before \p Before.
InstructionMoveBefore(Instruction * Inst,Instruction * Before)2808     InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2809         : TypePromotionAction(Inst), Position(Inst) {
2810       LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2811                         << "\n");
2812       Inst->moveBefore(Before);
2813     }
2814 
2815     /// Move the instruction back to its original position.
undo()2816     void undo() override {
2817       LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2818       Position.insert(Inst);
2819     }
2820   };
2821 
2822   /// Set the operand of an instruction with a new value.
2823   class OperandSetter : public TypePromotionAction {
2824     /// Original operand of the instruction.
2825     Value *Origin;
2826 
2827     /// Index of the modified instruction.
2828     unsigned Idx;
2829 
2830   public:
2831     /// Set \p Idx operand of \p Inst with \p NewVal.
OperandSetter(Instruction * Inst,unsigned Idx,Value * NewVal)2832     OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2833         : TypePromotionAction(Inst), Idx(Idx) {
2834       LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2835                         << "for:" << *Inst << "\n"
2836                         << "with:" << *NewVal << "\n");
2837       Origin = Inst->getOperand(Idx);
2838       Inst->setOperand(Idx, NewVal);
2839     }
2840 
2841     /// Restore the original value of the instruction.
undo()2842     void undo() override {
2843       LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2844                         << "for: " << *Inst << "\n"
2845                         << "with: " << *Origin << "\n");
2846       Inst->setOperand(Idx, Origin);
2847     }
2848   };
2849 
2850   /// Hide the operands of an instruction.
2851   /// Do as if this instruction was not using any of its operands.
2852   class OperandsHider : public TypePromotionAction {
2853     /// The list of original operands.
2854     SmallVector<Value *, 4> OriginalValues;
2855 
2856   public:
2857     /// Remove \p Inst from the uses of the operands of \p Inst.
OperandsHider(Instruction * Inst)2858     OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2859       LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2860       unsigned NumOpnds = Inst->getNumOperands();
2861       OriginalValues.reserve(NumOpnds);
2862       for (unsigned It = 0; It < NumOpnds; ++It) {
2863         // Save the current operand.
2864         Value *Val = Inst->getOperand(It);
2865         OriginalValues.push_back(Val);
2866         // Set a dummy one.
2867         // We could use OperandSetter here, but that would imply an overhead
2868         // that we are not willing to pay.
2869         Inst->setOperand(It, UndefValue::get(Val->getType()));
2870       }
2871     }
2872 
2873     /// Restore the original list of uses.
undo()2874     void undo() override {
2875       LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2876       for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2877         Inst->setOperand(It, OriginalValues[It]);
2878     }
2879   };
2880 
2881   /// Build a truncate instruction.
2882   class TruncBuilder : public TypePromotionAction {
2883     Value *Val;
2884 
2885   public:
2886     /// Build a truncate instruction of \p Opnd producing a \p Ty
2887     /// result.
2888     /// trunc Opnd to Ty.
TruncBuilder(Instruction * Opnd,Type * Ty)2889     TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2890       IRBuilder<> Builder(Opnd);
2891       Builder.SetCurrentDebugLocation(DebugLoc());
2892       Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2893       LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2894     }
2895 
2896     /// Get the built value.
getBuiltValue()2897     Value *getBuiltValue() { return Val; }
2898 
2899     /// Remove the built instruction.
undo()2900     void undo() override {
2901       LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2902       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2903         IVal->eraseFromParent();
2904     }
2905   };
2906 
2907   /// Build a sign extension instruction.
2908   class SExtBuilder : public TypePromotionAction {
2909     Value *Val;
2910 
2911   public:
2912     /// Build a sign extension instruction of \p Opnd producing a \p Ty
2913     /// result.
2914     /// sext Opnd to Ty.
SExtBuilder(Instruction * InsertPt,Value * Opnd,Type * Ty)2915     SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2916         : TypePromotionAction(InsertPt) {
2917       IRBuilder<> Builder(InsertPt);
2918       Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2919       LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2920     }
2921 
2922     /// Get the built value.
getBuiltValue()2923     Value *getBuiltValue() { return Val; }
2924 
2925     /// Remove the built instruction.
undo()2926     void undo() override {
2927       LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2928       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2929         IVal->eraseFromParent();
2930     }
2931   };
2932 
2933   /// Build a zero extension instruction.
2934   class ZExtBuilder : public TypePromotionAction {
2935     Value *Val;
2936 
2937   public:
2938     /// Build a zero extension instruction of \p Opnd producing a \p Ty
2939     /// result.
2940     /// zext Opnd to Ty.
ZExtBuilder(Instruction * InsertPt,Value * Opnd,Type * Ty)2941     ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2942         : TypePromotionAction(InsertPt) {
2943       IRBuilder<> Builder(InsertPt);
2944       Builder.SetCurrentDebugLocation(DebugLoc());
2945       Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2946       LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2947     }
2948 
2949     /// Get the built value.
getBuiltValue()2950     Value *getBuiltValue() { return Val; }
2951 
2952     /// Remove the built instruction.
undo()2953     void undo() override {
2954       LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2955       if (Instruction *IVal = dyn_cast<Instruction>(Val))
2956         IVal->eraseFromParent();
2957     }
2958   };
2959 
2960   /// Mutate an instruction to another type.
2961   class TypeMutator : public TypePromotionAction {
2962     /// Record the original type.
2963     Type *OrigTy;
2964 
2965   public:
2966     /// Mutate the type of \p Inst into \p NewTy.
TypeMutator(Instruction * Inst,Type * NewTy)2967     TypeMutator(Instruction *Inst, Type *NewTy)
2968         : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2969       LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2970                         << "\n");
2971       Inst->mutateType(NewTy);
2972     }
2973 
2974     /// Mutate the instruction back to its original type.
undo()2975     void undo() override {
2976       LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2977                         << "\n");
2978       Inst->mutateType(OrigTy);
2979     }
2980   };
2981 
2982   /// Replace the uses of an instruction by another instruction.
2983   class UsesReplacer : public TypePromotionAction {
2984     /// Helper structure to keep track of the replaced uses.
2985     struct InstructionAndIdx {
2986       /// The instruction using the instruction.
2987       Instruction *Inst;
2988 
2989       /// The index where this instruction is used for Inst.
2990       unsigned Idx;
2991 
InstructionAndIdx__anon7418ce250a11::TypePromotionTransaction::UsesReplacer::InstructionAndIdx2992       InstructionAndIdx(Instruction *Inst, unsigned Idx)
2993           : Inst(Inst), Idx(Idx) {}
2994     };
2995 
2996     /// Keep track of the original uses (pair Instruction, Index).
2997     SmallVector<InstructionAndIdx, 4> OriginalUses;
2998     /// Keep track of the debug users.
2999     SmallVector<DbgValueInst *, 1> DbgValues;
3000 
3001     /// Keep track of the new value so that we can undo it by replacing
3002     /// instances of the new value with the original value.
3003     Value *New;
3004 
3005     using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3006 
3007   public:
3008     /// Replace all the use of \p Inst by \p New.
UsesReplacer(Instruction * Inst,Value * New)3009     UsesReplacer(Instruction *Inst, Value *New)
3010         : TypePromotionAction(Inst), New(New) {
3011       LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3012                         << "\n");
3013       // Record the original uses.
3014       for (Use &U : Inst->uses()) {
3015         Instruction *UserI = cast<Instruction>(U.getUser());
3016         OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3017       }
3018       // Record the debug uses separately. They are not in the instruction's
3019       // use list, but they are replaced by RAUW.
3020       findDbgValues(DbgValues, Inst);
3021 
3022       // Now, we can replace the uses.
3023       Inst->replaceAllUsesWith(New);
3024     }
3025 
3026     /// Reassign the original uses of Inst to Inst.
undo()3027     void undo() override {
3028       LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3029       for (InstructionAndIdx &Use : OriginalUses)
3030         Use.Inst->setOperand(Use.Idx, Inst);
3031       // RAUW has replaced all original uses with references to the new value,
3032       // including the debug uses. Since we are undoing the replacements,
3033       // the original debug uses must also be reinstated to maintain the
3034       // correctness and utility of debug value instructions.
3035       for (auto *DVI : DbgValues)
3036         DVI->replaceVariableLocationOp(New, Inst);
3037     }
3038   };
3039 
3040   /// Remove an instruction from the IR.
3041   class InstructionRemover : public TypePromotionAction {
3042     /// Original position of the instruction.
3043     InsertionHandler Inserter;
3044 
3045     /// Helper structure to hide all the link to the instruction. In other
3046     /// words, this helps to do as if the instruction was removed.
3047     OperandsHider Hider;
3048 
3049     /// Keep track of the uses replaced, if any.
3050     UsesReplacer *Replacer = nullptr;
3051 
3052     /// Keep track of instructions removed.
3053     SetOfInstrs &RemovedInsts;
3054 
3055   public:
3056     /// Remove all reference of \p Inst and optionally replace all its
3057     /// uses with New.
3058     /// \p RemovedInsts Keep track of the instructions removed by this Action.
3059     /// \pre If !Inst->use_empty(), then New != nullptr
InstructionRemover(Instruction * Inst,SetOfInstrs & RemovedInsts,Value * New=nullptr)3060     InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3061                        Value *New = nullptr)
3062         : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3063           RemovedInsts(RemovedInsts) {
3064       if (New)
3065         Replacer = new UsesReplacer(Inst, New);
3066       LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3067       RemovedInsts.insert(Inst);
3068       /// The instructions removed here will be freed after completing
3069       /// optimizeBlock() for all blocks as we need to keep track of the
3070       /// removed instructions during promotion.
3071       Inst->removeFromParent();
3072     }
3073 
~InstructionRemover()3074     ~InstructionRemover() override { delete Replacer; }
3075 
3076     /// Resurrect the instruction and reassign it to the proper uses if
3077     /// new value was provided when build this action.
undo()3078     void undo() override {
3079       LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3080       Inserter.insert(Inst);
3081       if (Replacer)
3082         Replacer->undo();
3083       Hider.undo();
3084       RemovedInsts.erase(Inst);
3085     }
3086   };
3087 
3088 public:
3089   /// Restoration point.
3090   /// The restoration point is a pointer to an action instead of an iterator
3091   /// because the iterator may be invalidated but not the pointer.
3092   using ConstRestorationPt = const TypePromotionAction *;
3093 
TypePromotionTransaction(SetOfInstrs & RemovedInsts)3094   TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3095       : RemovedInsts(RemovedInsts) {}
3096 
3097   /// Advocate every changes made in that transaction. Return true if any change
3098   /// happen.
3099   bool commit();
3100 
3101   /// Undo all the changes made after the given point.
3102   void rollback(ConstRestorationPt Point);
3103 
3104   /// Get the current restoration point.
3105   ConstRestorationPt getRestorationPoint() const;
3106 
3107   /// \name API for IR modification with state keeping to support rollback.
3108   /// @{
3109   /// Same as Instruction::setOperand.
3110   void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3111 
3112   /// Same as Instruction::eraseFromParent.
3113   void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3114 
3115   /// Same as Value::replaceAllUsesWith.
3116   void replaceAllUsesWith(Instruction *Inst, Value *New);
3117 
3118   /// Same as Value::mutateType.
3119   void mutateType(Instruction *Inst, Type *NewTy);
3120 
3121   /// Same as IRBuilder::createTrunc.
3122   Value *createTrunc(Instruction *Opnd, Type *Ty);
3123 
3124   /// Same as IRBuilder::createSExt.
3125   Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3126 
3127   /// Same as IRBuilder::createZExt.
3128   Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3129 
3130   /// Same as Instruction::moveBefore.
3131   void moveBefore(Instruction *Inst, Instruction *Before);
3132   /// @}
3133 
3134 private:
3135   /// The ordered list of actions made so far.
3136   SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3137 
3138   using CommitPt =
3139       SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3140 
3141   SetOfInstrs &RemovedInsts;
3142 };
3143 
3144 } // end anonymous namespace
3145 
setOperand(Instruction * Inst,unsigned Idx,Value * NewVal)3146 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3147                                           Value *NewVal) {
3148   Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3149       Inst, Idx, NewVal));
3150 }
3151 
eraseInstruction(Instruction * Inst,Value * NewVal)3152 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3153                                                 Value *NewVal) {
3154   Actions.push_back(
3155       std::make_unique<TypePromotionTransaction::InstructionRemover>(
3156           Inst, RemovedInsts, NewVal));
3157 }
3158 
replaceAllUsesWith(Instruction * Inst,Value * New)3159 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3160                                                   Value *New) {
3161   Actions.push_back(
3162       std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3163 }
3164 
mutateType(Instruction * Inst,Type * NewTy)3165 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3166   Actions.push_back(
3167       std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3168 }
3169 
createTrunc(Instruction * Opnd,Type * Ty)3170 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3171   std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3172   Value *Val = Ptr->getBuiltValue();
3173   Actions.push_back(std::move(Ptr));
3174   return Val;
3175 }
3176 
createSExt(Instruction * Inst,Value * Opnd,Type * Ty)3177 Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3178                                             Type *Ty) {
3179   std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3180   Value *Val = Ptr->getBuiltValue();
3181   Actions.push_back(std::move(Ptr));
3182   return Val;
3183 }
3184 
createZExt(Instruction * Inst,Value * Opnd,Type * Ty)3185 Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3186                                             Type *Ty) {
3187   std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3188   Value *Val = Ptr->getBuiltValue();
3189   Actions.push_back(std::move(Ptr));
3190   return Val;
3191 }
3192 
moveBefore(Instruction * Inst,Instruction * Before)3193 void TypePromotionTransaction::moveBefore(Instruction *Inst,
3194                                           Instruction *Before) {
3195   Actions.push_back(
3196       std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
3197           Inst, Before));
3198 }
3199 
3200 TypePromotionTransaction::ConstRestorationPt
getRestorationPoint() const3201 TypePromotionTransaction::getRestorationPoint() const {
3202   return !Actions.empty() ? Actions.back().get() : nullptr;
3203 }
3204 
commit()3205 bool TypePromotionTransaction::commit() {
3206   for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3207     Action->commit();
3208   bool Modified = !Actions.empty();
3209   Actions.clear();
3210   return Modified;
3211 }
3212 
rollback(TypePromotionTransaction::ConstRestorationPt Point)3213 void TypePromotionTransaction::rollback(
3214     TypePromotionTransaction::ConstRestorationPt Point) {
3215   while (!Actions.empty() && Point != Actions.back().get()) {
3216     std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3217     Curr->undo();
3218   }
3219 }
3220 
3221 namespace {
3222 
3223 /// A helper class for matching addressing modes.
3224 ///
3225 /// This encapsulates the logic for matching the target-legal addressing modes.
3226 class AddressingModeMatcher {
3227   SmallVectorImpl<Instruction *> &AddrModeInsts;
3228   const TargetLowering &TLI;
3229   const TargetRegisterInfo &TRI;
3230   const DataLayout &DL;
3231   const LoopInfo &LI;
3232   const std::function<const DominatorTree &()> getDTFn;
3233 
3234   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3235   /// the memory instruction that we're computing this address for.
3236   Type *AccessTy;
3237   unsigned AddrSpace;
3238   Instruction *MemoryInst;
3239 
3240   /// This is the addressing mode that we're building up. This is
3241   /// part of the return value of this addressing mode matching stuff.
3242   ExtAddrMode &AddrMode;
3243 
3244   /// The instructions inserted by other CodeGenPrepare optimizations.
3245   const SetOfInstrs &InsertedInsts;
3246 
3247   /// A map from the instructions to their type before promotion.
3248   InstrToOrigTy &PromotedInsts;
3249 
3250   /// The ongoing transaction where every action should be registered.
3251   TypePromotionTransaction &TPT;
3252 
3253   // A GEP which has too large offset to be folded into the addressing mode.
3254   std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3255 
3256   /// This is set to true when we should not do profitability checks.
3257   /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3258   bool IgnoreProfitability;
3259 
3260   /// True if we are optimizing for size.
3261   bool OptSize;
3262 
3263   ProfileSummaryInfo *PSI;
3264   BlockFrequencyInfo *BFI;
3265 
AddressingModeMatcher(SmallVectorImpl<Instruction * > & AMI,const TargetLowering & TLI,const TargetRegisterInfo & TRI,const LoopInfo & LI,const std::function<const DominatorTree & ()> getDTFn,Type * AT,unsigned AS,Instruction * MI,ExtAddrMode & AM,const SetOfInstrs & InsertedInsts,InstrToOrigTy & PromotedInsts,TypePromotionTransaction & TPT,std::pair<AssertingVH<GetElementPtrInst>,int64_t> & LargeOffsetGEP,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)3266   AddressingModeMatcher(
3267       SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3268       const TargetRegisterInfo &TRI, const LoopInfo &LI,
3269       const std::function<const DominatorTree &()> getDTFn, Type *AT,
3270       unsigned AS, Instruction *MI, ExtAddrMode &AM,
3271       const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3272       TypePromotionTransaction &TPT,
3273       std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3274       bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3275       : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3276         DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn),
3277         AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3278         InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3279         LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3280     IgnoreProfitability = false;
3281   }
3282 
3283 public:
3284   /// Find the maximal addressing mode that a load/store of V can fold,
3285   /// give an access type of AccessTy.  This returns a list of involved
3286   /// instructions in AddrModeInsts.
3287   /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3288   /// optimizations.
3289   /// \p PromotedInsts maps the instructions to their type before promotion.
3290   /// \p The ongoing transaction where every action should be registered.
3291   static ExtAddrMode
Match(Value * V,Type * AccessTy,unsigned AS,Instruction * MemoryInst,SmallVectorImpl<Instruction * > & AddrModeInsts,const TargetLowering & TLI,const LoopInfo & LI,const std::function<const DominatorTree & ()> getDTFn,const TargetRegisterInfo & TRI,const SetOfInstrs & InsertedInsts,InstrToOrigTy & PromotedInsts,TypePromotionTransaction & TPT,std::pair<AssertingVH<GetElementPtrInst>,int64_t> & LargeOffsetGEP,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)3292   Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3293         SmallVectorImpl<Instruction *> &AddrModeInsts,
3294         const TargetLowering &TLI, const LoopInfo &LI,
3295         const std::function<const DominatorTree &()> getDTFn,
3296         const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3297         InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3298         std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3299         bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3300     ExtAddrMode Result;
3301 
3302     bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3303                                          AccessTy, AS, MemoryInst, Result,
3304                                          InsertedInsts, PromotedInsts, TPT,
3305                                          LargeOffsetGEP, OptSize, PSI, BFI)
3306                        .matchAddr(V, 0);
3307     (void)Success;
3308     assert(Success && "Couldn't select *anything*?");
3309     return Result;
3310   }
3311 
3312 private:
3313   bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3314   bool matchAddr(Value *Addr, unsigned Depth);
3315   bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3316                           bool *MovedAway = nullptr);
3317   bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3318                                             ExtAddrMode &AMBefore,
3319                                             ExtAddrMode &AMAfter);
3320   bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3321   bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3322                              Value *PromotedOperand) const;
3323 };
3324 
3325 class PhiNodeSet;
3326 
3327 /// An iterator for PhiNodeSet.
3328 class PhiNodeSetIterator {
3329   PhiNodeSet *const Set;
3330   size_t CurrentIndex = 0;
3331 
3332 public:
3333   /// The constructor. Start should point to either a valid element, or be equal
3334   /// to the size of the underlying SmallVector of the PhiNodeSet.
3335   PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3336   PHINode *operator*() const;
3337   PhiNodeSetIterator &operator++();
3338   bool operator==(const PhiNodeSetIterator &RHS) const;
3339   bool operator!=(const PhiNodeSetIterator &RHS) const;
3340 };
3341 
3342 /// Keeps a set of PHINodes.
3343 ///
3344 /// This is a minimal set implementation for a specific use case:
3345 /// It is very fast when there are very few elements, but also provides good
3346 /// performance when there are many. It is similar to SmallPtrSet, but also
3347 /// provides iteration by insertion order, which is deterministic and stable
3348 /// across runs. It is also similar to SmallSetVector, but provides removing
3349 /// elements in O(1) time. This is achieved by not actually removing the element
3350 /// from the underlying vector, so comes at the cost of using more memory, but
3351 /// that is fine, since PhiNodeSets are used as short lived objects.
3352 class PhiNodeSet {
3353   friend class PhiNodeSetIterator;
3354 
3355   using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3356   using iterator = PhiNodeSetIterator;
3357 
3358   /// Keeps the elements in the order of their insertion in the underlying
3359   /// vector. To achieve constant time removal, it never deletes any element.
3360   SmallVector<PHINode *, 32> NodeList;
3361 
3362   /// Keeps the elements in the underlying set implementation. This (and not the
3363   /// NodeList defined above) is the source of truth on whether an element
3364   /// is actually in the collection.
3365   MapType NodeMap;
3366 
3367   /// Points to the first valid (not deleted) element when the set is not empty
3368   /// and the value is not zero. Equals to the size of the underlying vector
3369   /// when the set is empty. When the value is 0, as in the beginning, the
3370   /// first element may or may not be valid.
3371   size_t FirstValidElement = 0;
3372 
3373 public:
3374   /// Inserts a new element to the collection.
3375   /// \returns true if the element is actually added, i.e. was not in the
3376   /// collection before the operation.
insert(PHINode * Ptr)3377   bool insert(PHINode *Ptr) {
3378     if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3379       NodeList.push_back(Ptr);
3380       return true;
3381     }
3382     return false;
3383   }
3384 
3385   /// Removes the element from the collection.
3386   /// \returns whether the element is actually removed, i.e. was in the
3387   /// collection before the operation.
erase(PHINode * Ptr)3388   bool erase(PHINode *Ptr) {
3389     if (NodeMap.erase(Ptr)) {
3390       SkipRemovedElements(FirstValidElement);
3391       return true;
3392     }
3393     return false;
3394   }
3395 
3396   /// Removes all elements and clears the collection.
clear()3397   void clear() {
3398     NodeMap.clear();
3399     NodeList.clear();
3400     FirstValidElement = 0;
3401   }
3402 
3403   /// \returns an iterator that will iterate the elements in the order of
3404   /// insertion.
begin()3405   iterator begin() {
3406     if (FirstValidElement == 0)
3407       SkipRemovedElements(FirstValidElement);
3408     return PhiNodeSetIterator(this, FirstValidElement);
3409   }
3410 
3411   /// \returns an iterator that points to the end of the collection.
end()3412   iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3413 
3414   /// Returns the number of elements in the collection.
size() const3415   size_t size() const { return NodeMap.size(); }
3416 
3417   /// \returns 1 if the given element is in the collection, and 0 if otherwise.
count(PHINode * Ptr) const3418   size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3419 
3420 private:
3421   /// Updates the CurrentIndex so that it will point to a valid element.
3422   ///
3423   /// If the element of NodeList at CurrentIndex is valid, it does not
3424   /// change it. If there are no more valid elements, it updates CurrentIndex
3425   /// to point to the end of the NodeList.
SkipRemovedElements(size_t & CurrentIndex)3426   void SkipRemovedElements(size_t &CurrentIndex) {
3427     while (CurrentIndex < NodeList.size()) {
3428       auto it = NodeMap.find(NodeList[CurrentIndex]);
3429       // If the element has been deleted and added again later, NodeMap will
3430       // point to a different index, so CurrentIndex will still be invalid.
3431       if (it != NodeMap.end() && it->second == CurrentIndex)
3432         break;
3433       ++CurrentIndex;
3434     }
3435   }
3436 };
3437 
PhiNodeSetIterator(PhiNodeSet * const Set,size_t Start)3438 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3439     : Set(Set), CurrentIndex(Start) {}
3440 
operator *() const3441 PHINode *PhiNodeSetIterator::operator*() const {
3442   assert(CurrentIndex < Set->NodeList.size() &&
3443          "PhiNodeSet access out of range");
3444   return Set->NodeList[CurrentIndex];
3445 }
3446 
operator ++()3447 PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3448   assert(CurrentIndex < Set->NodeList.size() &&
3449          "PhiNodeSet access out of range");
3450   ++CurrentIndex;
3451   Set->SkipRemovedElements(CurrentIndex);
3452   return *this;
3453 }
3454 
operator ==(const PhiNodeSetIterator & RHS) const3455 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3456   return CurrentIndex == RHS.CurrentIndex;
3457 }
3458 
operator !=(const PhiNodeSetIterator & RHS) const3459 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3460   return !((*this) == RHS);
3461 }
3462 
3463 /// Keep track of simplification of Phi nodes.
3464 /// Accept the set of all phi nodes and erase phi node from this set
3465 /// if it is simplified.
3466 class SimplificationTracker {
3467   DenseMap<Value *, Value *> Storage;
3468   const SimplifyQuery &SQ;
3469   // Tracks newly created Phi nodes. The elements are iterated by insertion
3470   // order.
3471   PhiNodeSet AllPhiNodes;
3472   // Tracks newly created Select nodes.
3473   SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3474 
3475 public:
SimplificationTracker(const SimplifyQuery & sq)3476   SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3477 
Get(Value * V)3478   Value *Get(Value *V) {
3479     do {
3480       auto SV = Storage.find(V);
3481       if (SV == Storage.end())
3482         return V;
3483       V = SV->second;
3484     } while (true);
3485   }
3486 
Simplify(Value * Val)3487   Value *Simplify(Value *Val) {
3488     SmallVector<Value *, 32> WorkList;
3489     SmallPtrSet<Value *, 32> Visited;
3490     WorkList.push_back(Val);
3491     while (!WorkList.empty()) {
3492       auto *P = WorkList.pop_back_val();
3493       if (!Visited.insert(P).second)
3494         continue;
3495       if (auto *PI = dyn_cast<Instruction>(P))
3496         if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3497           for (auto *U : PI->users())
3498             WorkList.push_back(cast<Value>(U));
3499           Put(PI, V);
3500           PI->replaceAllUsesWith(V);
3501           if (auto *PHI = dyn_cast<PHINode>(PI))
3502             AllPhiNodes.erase(PHI);
3503           if (auto *Select = dyn_cast<SelectInst>(PI))
3504             AllSelectNodes.erase(Select);
3505           PI->eraseFromParent();
3506         }
3507     }
3508     return Get(Val);
3509   }
3510 
Put(Value * From,Value * To)3511   void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3512 
ReplacePhi(PHINode * From,PHINode * To)3513   void ReplacePhi(PHINode *From, PHINode *To) {
3514     Value *OldReplacement = Get(From);
3515     while (OldReplacement != From) {
3516       From = To;
3517       To = dyn_cast<PHINode>(OldReplacement);
3518       OldReplacement = Get(From);
3519     }
3520     assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3521     Put(From, To);
3522     From->replaceAllUsesWith(To);
3523     AllPhiNodes.erase(From);
3524     From->eraseFromParent();
3525   }
3526 
newPhiNodes()3527   PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
3528 
insertNewPhi(PHINode * PN)3529   void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3530 
insertNewSelect(SelectInst * SI)3531   void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3532 
countNewPhiNodes() const3533   unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3534 
countNewSelectNodes() const3535   unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3536 
destroyNewNodes(Type * CommonType)3537   void destroyNewNodes(Type *CommonType) {
3538     // For safe erasing, replace the uses with dummy value first.
3539     auto *Dummy = PoisonValue::get(CommonType);
3540     for (auto *I : AllPhiNodes) {
3541       I->replaceAllUsesWith(Dummy);
3542       I->eraseFromParent();
3543     }
3544     AllPhiNodes.clear();
3545     for (auto *I : AllSelectNodes) {
3546       I->replaceAllUsesWith(Dummy);
3547       I->eraseFromParent();
3548     }
3549     AllSelectNodes.clear();
3550   }
3551 };
3552 
3553 /// A helper class for combining addressing modes.
3554 class AddressingModeCombiner {
3555   typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3556   typedef std::pair<PHINode *, PHINode *> PHIPair;
3557 
3558 private:
3559   /// The addressing modes we've collected.
3560   SmallVector<ExtAddrMode, 16> AddrModes;
3561 
3562   /// The field in which the AddrModes differ, when we have more than one.
3563   ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3564 
3565   /// Are the AddrModes that we have all just equal to their original values?
3566   bool AllAddrModesTrivial = true;
3567 
3568   /// Common Type for all different fields in addressing modes.
3569   Type *CommonType = nullptr;
3570 
3571   /// SimplifyQuery for simplifyInstruction utility.
3572   const SimplifyQuery &SQ;
3573 
3574   /// Original Address.
3575   Value *Original;
3576 
3577 public:
AddressingModeCombiner(const SimplifyQuery & _SQ,Value * OriginalValue)3578   AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3579       : SQ(_SQ), Original(OriginalValue) {}
3580 
3581   /// Get the combined AddrMode
getAddrMode() const3582   const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
3583 
3584   /// Add a new AddrMode if it's compatible with the AddrModes we already
3585   /// have.
3586   /// \return True iff we succeeded in doing so.
addNewAddrMode(ExtAddrMode & NewAddrMode)3587   bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3588     // Take note of if we have any non-trivial AddrModes, as we need to detect
3589     // when all AddrModes are trivial as then we would introduce a phi or select
3590     // which just duplicates what's already there.
3591     AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3592 
3593     // If this is the first addrmode then everything is fine.
3594     if (AddrModes.empty()) {
3595       AddrModes.emplace_back(NewAddrMode);
3596       return true;
3597     }
3598 
3599     // Figure out how different this is from the other address modes, which we
3600     // can do just by comparing against the first one given that we only care
3601     // about the cumulative difference.
3602     ExtAddrMode::FieldName ThisDifferentField =
3603         AddrModes[0].compare(NewAddrMode);
3604     if (DifferentField == ExtAddrMode::NoField)
3605       DifferentField = ThisDifferentField;
3606     else if (DifferentField != ThisDifferentField)
3607       DifferentField = ExtAddrMode::MultipleFields;
3608 
3609     // If NewAddrMode differs in more than one dimension we cannot handle it.
3610     bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3611 
3612     // If Scale Field is different then we reject.
3613     CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3614 
3615     // We also must reject the case when base offset is different and
3616     // scale reg is not null, we cannot handle this case due to merge of
3617     // different offsets will be used as ScaleReg.
3618     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3619                               !NewAddrMode.ScaledReg);
3620 
3621     // We also must reject the case when GV is different and BaseReg installed
3622     // due to we want to use base reg as a merge of GV values.
3623     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3624                               !NewAddrMode.HasBaseReg);
3625 
3626     // Even if NewAddMode is the same we still need to collect it due to
3627     // original value is different. And later we will need all original values
3628     // as anchors during finding the common Phi node.
3629     if (CanHandle)
3630       AddrModes.emplace_back(NewAddrMode);
3631     else
3632       AddrModes.clear();
3633 
3634     return CanHandle;
3635   }
3636 
3637   /// Combine the addressing modes we've collected into a single
3638   /// addressing mode.
3639   /// \return True iff we successfully combined them or we only had one so
3640   /// didn't need to combine them anyway.
combineAddrModes()3641   bool combineAddrModes() {
3642     // If we have no AddrModes then they can't be combined.
3643     if (AddrModes.size() == 0)
3644       return false;
3645 
3646     // A single AddrMode can trivially be combined.
3647     if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3648       return true;
3649 
3650     // If the AddrModes we collected are all just equal to the value they are
3651     // derived from then combining them wouldn't do anything useful.
3652     if (AllAddrModesTrivial)
3653       return false;
3654 
3655     if (!addrModeCombiningAllowed())
3656       return false;
3657 
3658     // Build a map between <original value, basic block where we saw it> to
3659     // value of base register.
3660     // Bail out if there is no common type.
3661     FoldAddrToValueMapping Map;
3662     if (!initializeMap(Map))
3663       return false;
3664 
3665     Value *CommonValue = findCommon(Map);
3666     if (CommonValue)
3667       AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3668     return CommonValue != nullptr;
3669   }
3670 
3671 private:
3672   /// Initialize Map with anchor values. For address seen
3673   /// we set the value of different field saw in this address.
3674   /// At the same time we find a common type for different field we will
3675   /// use to create new Phi/Select nodes. Keep it in CommonType field.
3676   /// Return false if there is no common type found.
initializeMap(FoldAddrToValueMapping & Map)3677   bool initializeMap(FoldAddrToValueMapping &Map) {
3678     // Keep track of keys where the value is null. We will need to replace it
3679     // with constant null when we know the common type.
3680     SmallVector<Value *, 2> NullValue;
3681     Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3682     for (auto &AM : AddrModes) {
3683       Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3684       if (DV) {
3685         auto *Type = DV->getType();
3686         if (CommonType && CommonType != Type)
3687           return false;
3688         CommonType = Type;
3689         Map[AM.OriginalValue] = DV;
3690       } else {
3691         NullValue.push_back(AM.OriginalValue);
3692       }
3693     }
3694     assert(CommonType && "At least one non-null value must be!");
3695     for (auto *V : NullValue)
3696       Map[V] = Constant::getNullValue(CommonType);
3697     return true;
3698   }
3699 
3700   /// We have mapping between value A and other value B where B was a field in
3701   /// addressing mode represented by A. Also we have an original value C
3702   /// representing an address we start with. Traversing from C through phi and
3703   /// selects we ended up with A's in a map. This utility function tries to find
3704   /// a value V which is a field in addressing mode C and traversing through phi
3705   /// nodes and selects we will end up in corresponded values B in a map.
3706   /// The utility will create a new Phi/Selects if needed.
3707   // The simple example looks as follows:
3708   // BB1:
3709   //   p1 = b1 + 40
3710   //   br cond BB2, BB3
3711   // BB2:
3712   //   p2 = b2 + 40
3713   //   br BB3
3714   // BB3:
3715   //   p = phi [p1, BB1], [p2, BB2]
3716   //   v = load p
3717   // Map is
3718   //   p1 -> b1
3719   //   p2 -> b2
3720   // Request is
3721   //   p -> ?
3722   // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
findCommon(FoldAddrToValueMapping & Map)3723   Value *findCommon(FoldAddrToValueMapping &Map) {
3724     // Tracks the simplification of newly created phi nodes. The reason we use
3725     // this mapping is because we will add new created Phi nodes in AddrToBase.
3726     // Simplification of Phi nodes is recursive, so some Phi node may
3727     // be simplified after we added it to AddrToBase. In reality this
3728     // simplification is possible only if original phi/selects were not
3729     // simplified yet.
3730     // Using this mapping we can find the current value in AddrToBase.
3731     SimplificationTracker ST(SQ);
3732 
3733     // First step, DFS to create PHI nodes for all intermediate blocks.
3734     // Also fill traverse order for the second step.
3735     SmallVector<Value *, 32> TraverseOrder;
3736     InsertPlaceholders(Map, TraverseOrder, ST);
3737 
3738     // Second Step, fill new nodes by merged values and simplify if possible.
3739     FillPlaceholders(Map, TraverseOrder, ST);
3740 
3741     if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3742       ST.destroyNewNodes(CommonType);
3743       return nullptr;
3744     }
3745 
3746     // Now we'd like to match New Phi nodes to existed ones.
3747     unsigned PhiNotMatchedCount = 0;
3748     if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3749       ST.destroyNewNodes(CommonType);
3750       return nullptr;
3751     }
3752 
3753     auto *Result = ST.Get(Map.find(Original)->second);
3754     if (Result) {
3755       NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3756       NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3757     }
3758     return Result;
3759   }
3760 
3761   /// Try to match PHI node to Candidate.
3762   /// Matcher tracks the matched Phi nodes.
MatchPhiNode(PHINode * PHI,PHINode * Candidate,SmallSetVector<PHIPair,8> & Matcher,PhiNodeSet & PhiNodesToMatch)3763   bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3764                     SmallSetVector<PHIPair, 8> &Matcher,
3765                     PhiNodeSet &PhiNodesToMatch) {
3766     SmallVector<PHIPair, 8> WorkList;
3767     Matcher.insert({PHI, Candidate});
3768     SmallSet<PHINode *, 8> MatchedPHIs;
3769     MatchedPHIs.insert(PHI);
3770     WorkList.push_back({PHI, Candidate});
3771     SmallSet<PHIPair, 8> Visited;
3772     while (!WorkList.empty()) {
3773       auto Item = WorkList.pop_back_val();
3774       if (!Visited.insert(Item).second)
3775         continue;
3776       // We iterate over all incoming values to Phi to compare them.
3777       // If values are different and both of them Phi and the first one is a
3778       // Phi we added (subject to match) and both of them is in the same basic
3779       // block then we can match our pair if values match. So we state that
3780       // these values match and add it to work list to verify that.
3781       for (auto *B : Item.first->blocks()) {
3782         Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3783         Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3784         if (FirstValue == SecondValue)
3785           continue;
3786 
3787         PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3788         PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3789 
3790         // One of them is not Phi or
3791         // The first one is not Phi node from the set we'd like to match or
3792         // Phi nodes from different basic blocks then
3793         // we will not be able to match.
3794         if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3795             FirstPhi->getParent() != SecondPhi->getParent())
3796           return false;
3797 
3798         // If we already matched them then continue.
3799         if (Matcher.count({FirstPhi, SecondPhi}))
3800           continue;
3801         // So the values are different and does not match. So we need them to
3802         // match. (But we register no more than one match per PHI node, so that
3803         // we won't later try to replace them twice.)
3804         if (MatchedPHIs.insert(FirstPhi).second)
3805           Matcher.insert({FirstPhi, SecondPhi});
3806         // But me must check it.
3807         WorkList.push_back({FirstPhi, SecondPhi});
3808       }
3809     }
3810     return true;
3811   }
3812 
3813   /// For the given set of PHI nodes (in the SimplificationTracker) try
3814   /// to find their equivalents.
3815   /// Returns false if this matching fails and creation of new Phi is disabled.
MatchPhiSet(SimplificationTracker & ST,bool AllowNewPhiNodes,unsigned & PhiNotMatchedCount)3816   bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3817                    unsigned &PhiNotMatchedCount) {
3818     // Matched and PhiNodesToMatch iterate their elements in a deterministic
3819     // order, so the replacements (ReplacePhi) are also done in a deterministic
3820     // order.
3821     SmallSetVector<PHIPair, 8> Matched;
3822     SmallPtrSet<PHINode *, 8> WillNotMatch;
3823     PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3824     while (PhiNodesToMatch.size()) {
3825       PHINode *PHI = *PhiNodesToMatch.begin();
3826 
3827       // Add us, if no Phi nodes in the basic block we do not match.
3828       WillNotMatch.clear();
3829       WillNotMatch.insert(PHI);
3830 
3831       // Traverse all Phis until we found equivalent or fail to do that.
3832       bool IsMatched = false;
3833       for (auto &P : PHI->getParent()->phis()) {
3834         // Skip new Phi nodes.
3835         if (PhiNodesToMatch.count(&P))
3836           continue;
3837         if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3838           break;
3839         // If it does not match, collect all Phi nodes from matcher.
3840         // if we end up with no match, them all these Phi nodes will not match
3841         // later.
3842         for (auto M : Matched)
3843           WillNotMatch.insert(M.first);
3844         Matched.clear();
3845       }
3846       if (IsMatched) {
3847         // Replace all matched values and erase them.
3848         for (auto MV : Matched)
3849           ST.ReplacePhi(MV.first, MV.second);
3850         Matched.clear();
3851         continue;
3852       }
3853       // If we are not allowed to create new nodes then bail out.
3854       if (!AllowNewPhiNodes)
3855         return false;
3856       // Just remove all seen values in matcher. They will not match anything.
3857       PhiNotMatchedCount += WillNotMatch.size();
3858       for (auto *P : WillNotMatch)
3859         PhiNodesToMatch.erase(P);
3860     }
3861     return true;
3862   }
3863   /// Fill the placeholders with values from predecessors and simplify them.
FillPlaceholders(FoldAddrToValueMapping & Map,SmallVectorImpl<Value * > & TraverseOrder,SimplificationTracker & ST)3864   void FillPlaceholders(FoldAddrToValueMapping &Map,
3865                         SmallVectorImpl<Value *> &TraverseOrder,
3866                         SimplificationTracker &ST) {
3867     while (!TraverseOrder.empty()) {
3868       Value *Current = TraverseOrder.pop_back_val();
3869       assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3870       Value *V = Map[Current];
3871 
3872       if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3873         // CurrentValue also must be Select.
3874         auto *CurrentSelect = cast<SelectInst>(Current);
3875         auto *TrueValue = CurrentSelect->getTrueValue();
3876         assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3877         Select->setTrueValue(ST.Get(Map[TrueValue]));
3878         auto *FalseValue = CurrentSelect->getFalseValue();
3879         assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3880         Select->setFalseValue(ST.Get(Map[FalseValue]));
3881       } else {
3882         // Must be a Phi node then.
3883         auto *PHI = cast<PHINode>(V);
3884         // Fill the Phi node with values from predecessors.
3885         for (auto *B : predecessors(PHI->getParent())) {
3886           Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3887           assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3888           PHI->addIncoming(ST.Get(Map[PV]), B);
3889         }
3890       }
3891       Map[Current] = ST.Simplify(V);
3892     }
3893   }
3894 
3895   /// Starting from original value recursively iterates over def-use chain up to
3896   /// known ending values represented in a map. For each traversed phi/select
3897   /// inserts a placeholder Phi or Select.
3898   /// Reports all new created Phi/Select nodes by adding them to set.
3899   /// Also reports and order in what values have been traversed.
InsertPlaceholders(FoldAddrToValueMapping & Map,SmallVectorImpl<Value * > & TraverseOrder,SimplificationTracker & ST)3900   void InsertPlaceholders(FoldAddrToValueMapping &Map,
3901                           SmallVectorImpl<Value *> &TraverseOrder,
3902                           SimplificationTracker &ST) {
3903     SmallVector<Value *, 32> Worklist;
3904     assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3905            "Address must be a Phi or Select node");
3906     auto *Dummy = PoisonValue::get(CommonType);
3907     Worklist.push_back(Original);
3908     while (!Worklist.empty()) {
3909       Value *Current = Worklist.pop_back_val();
3910       // if it is already visited or it is an ending value then skip it.
3911       if (Map.find(Current) != Map.end())
3912         continue;
3913       TraverseOrder.push_back(Current);
3914 
3915       // CurrentValue must be a Phi node or select. All others must be covered
3916       // by anchors.
3917       if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3918         // Is it OK to get metadata from OrigSelect?!
3919         // Create a Select placeholder with dummy value.
3920         SelectInst *Select = SelectInst::Create(
3921             CurrentSelect->getCondition(), Dummy, Dummy,
3922             CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3923         Map[Current] = Select;
3924         ST.insertNewSelect(Select);
3925         // We are interested in True and False values.
3926         Worklist.push_back(CurrentSelect->getTrueValue());
3927         Worklist.push_back(CurrentSelect->getFalseValue());
3928       } else {
3929         // It must be a Phi node then.
3930         PHINode *CurrentPhi = cast<PHINode>(Current);
3931         unsigned PredCount = CurrentPhi->getNumIncomingValues();
3932         PHINode *PHI =
3933             PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3934         Map[Current] = PHI;
3935         ST.insertNewPhi(PHI);
3936         append_range(Worklist, CurrentPhi->incoming_values());
3937       }
3938     }
3939   }
3940 
addrModeCombiningAllowed()3941   bool addrModeCombiningAllowed() {
3942     if (DisableComplexAddrModes)
3943       return false;
3944     switch (DifferentField) {
3945     default:
3946       return false;
3947     case ExtAddrMode::BaseRegField:
3948       return AddrSinkCombineBaseReg;
3949     case ExtAddrMode::BaseGVField:
3950       return AddrSinkCombineBaseGV;
3951     case ExtAddrMode::BaseOffsField:
3952       return AddrSinkCombineBaseOffs;
3953     case ExtAddrMode::ScaledRegField:
3954       return AddrSinkCombineScaledReg;
3955     }
3956   }
3957 };
3958 } // end anonymous namespace
3959 
3960 /// Try adding ScaleReg*Scale to the current addressing mode.
3961 /// Return true and update AddrMode if this addr mode is legal for the target,
3962 /// false if not.
matchScaledValue(Value * ScaleReg,int64_t Scale,unsigned Depth)3963 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3964                                              unsigned Depth) {
3965   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3966   // mode.  Just process that directly.
3967   if (Scale == 1)
3968     return matchAddr(ScaleReg, Depth);
3969 
3970   // If the scale is 0, it takes nothing to add this.
3971   if (Scale == 0)
3972     return true;
3973 
3974   // If we already have a scale of this value, we can add to it, otherwise, we
3975   // need an available scale field.
3976   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3977     return false;
3978 
3979   ExtAddrMode TestAddrMode = AddrMode;
3980 
3981   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
3982   // [A+B + A*7] -> [B+A*8].
3983   TestAddrMode.Scale += Scale;
3984   TestAddrMode.ScaledReg = ScaleReg;
3985 
3986   // If the new address isn't legal, bail out.
3987   if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3988     return false;
3989 
3990   // It was legal, so commit it.
3991   AddrMode = TestAddrMode;
3992 
3993   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
3994   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
3995   // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
3996   // go any further: we can reuse it and cannot eliminate it.
3997   ConstantInt *CI = nullptr;
3998   Value *AddLHS = nullptr;
3999   if (isa<Instruction>(ScaleReg) && // not a constant expr.
4000       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4001       !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4002     TestAddrMode.InBounds = false;
4003     TestAddrMode.ScaledReg = AddLHS;
4004     TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4005 
4006     // If this addressing mode is legal, commit it and remember that we folded
4007     // this instruction.
4008     if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4009       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4010       AddrMode = TestAddrMode;
4011       return true;
4012     }
4013     // Restore status quo.
4014     TestAddrMode = AddrMode;
4015   }
4016 
4017   // If this is an add recurrence with a constant step, return the increment
4018   // instruction and the canonicalized step.
4019   auto GetConstantStep =
4020       [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4021     auto *PN = dyn_cast<PHINode>(V);
4022     if (!PN)
4023       return std::nullopt;
4024     auto IVInc = getIVIncrement(PN, &LI);
4025     if (!IVInc)
4026       return std::nullopt;
4027     // TODO: The result of the intrinsics above is two-complement. However when
4028     // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4029     // If it has nuw or nsw flags, we need to make sure that these flags are
4030     // inferrable at the point of memory instruction. Otherwise we are replacing
4031     // well-defined two-complement computation with poison. Currently, to avoid
4032     // potentially complex analysis needed to prove this, we reject such cases.
4033     if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4034       if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4035         return std::nullopt;
4036     if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4037       return std::make_pair(IVInc->first, ConstantStep->getValue());
4038     return std::nullopt;
4039   };
4040 
4041   // Try to account for the following special case:
4042   // 1. ScaleReg is an inductive variable;
4043   // 2. We use it with non-zero offset;
4044   // 3. IV's increment is available at the point of memory instruction.
4045   //
4046   // In this case, we may reuse the IV increment instead of the IV Phi to
4047   // achieve the following advantages:
4048   // 1. If IV step matches the offset, we will have no need in the offset;
4049   // 2. Even if they don't match, we will reduce the overlap of living IV
4050   //    and IV increment, that will potentially lead to better register
4051   //    assignment.
4052   if (AddrMode.BaseOffs) {
4053     if (auto IVStep = GetConstantStep(ScaleReg)) {
4054       Instruction *IVInc = IVStep->first;
4055       // The following assert is important to ensure a lack of infinite loops.
4056       // This transforms is (intentionally) the inverse of the one just above.
4057       // If they don't agree on the definition of an increment, we'd alternate
4058       // back and forth indefinitely.
4059       assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4060       APInt Step = IVStep->second;
4061       APInt Offset = Step * AddrMode.Scale;
4062       if (Offset.isSignedIntN(64)) {
4063         TestAddrMode.InBounds = false;
4064         TestAddrMode.ScaledReg = IVInc;
4065         TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4066         // If this addressing mode is legal, commit it..
4067         // (Note that we defer the (expensive) domtree base legality check
4068         // to the very last possible point.)
4069         if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4070             getDTFn().dominates(IVInc, MemoryInst)) {
4071           AddrModeInsts.push_back(cast<Instruction>(IVInc));
4072           AddrMode = TestAddrMode;
4073           return true;
4074         }
4075         // Restore status quo.
4076         TestAddrMode = AddrMode;
4077       }
4078     }
4079   }
4080 
4081   // Otherwise, just return what we have.
4082   return true;
4083 }
4084 
4085 /// This is a little filter, which returns true if an addressing computation
4086 /// involving I might be folded into a load/store accessing it.
4087 /// This doesn't need to be perfect, but needs to accept at least
4088 /// the set of instructions that MatchOperationAddr can.
MightBeFoldableInst(Instruction * I)4089 static bool MightBeFoldableInst(Instruction *I) {
4090   switch (I->getOpcode()) {
4091   case Instruction::BitCast:
4092   case Instruction::AddrSpaceCast:
4093     // Don't touch identity bitcasts.
4094     if (I->getType() == I->getOperand(0)->getType())
4095       return false;
4096     return I->getType()->isIntOrPtrTy();
4097   case Instruction::PtrToInt:
4098     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4099     return true;
4100   case Instruction::IntToPtr:
4101     // We know the input is intptr_t, so this is foldable.
4102     return true;
4103   case Instruction::Add:
4104     return true;
4105   case Instruction::Mul:
4106   case Instruction::Shl:
4107     // Can only handle X*C and X << C.
4108     return isa<ConstantInt>(I->getOperand(1));
4109   case Instruction::GetElementPtr:
4110     return true;
4111   default:
4112     return false;
4113   }
4114 }
4115 
4116 /// Check whether or not \p Val is a legal instruction for \p TLI.
4117 /// \note \p Val is assumed to be the product of some type promotion.
4118 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4119 /// to be legal, as the non-promoted value would have had the same state.
isPromotedInstructionLegal(const TargetLowering & TLI,const DataLayout & DL,Value * Val)4120 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
4121                                        const DataLayout &DL, Value *Val) {
4122   Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4123   if (!PromotedInst)
4124     return false;
4125   int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4126   // If the ISDOpcode is undefined, it was undefined before the promotion.
4127   if (!ISDOpcode)
4128     return true;
4129   // Otherwise, check if the promoted instruction is legal or not.
4130   return TLI.isOperationLegalOrCustom(
4131       ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4132 }
4133 
4134 namespace {
4135 
4136 /// Hepler class to perform type promotion.
4137 class TypePromotionHelper {
4138   /// Utility function to add a promoted instruction \p ExtOpnd to
4139   /// \p PromotedInsts and record the type of extension we have seen.
addPromotedInst(InstrToOrigTy & PromotedInsts,Instruction * ExtOpnd,bool IsSExt)4140   static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4141                               Instruction *ExtOpnd, bool IsSExt) {
4142     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4143     InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4144     if (It != PromotedInsts.end()) {
4145       // If the new extension is same as original, the information in
4146       // PromotedInsts[ExtOpnd] is still correct.
4147       if (It->second.getInt() == ExtTy)
4148         return;
4149 
4150       // Now the new extension is different from old extension, we make
4151       // the type information invalid by setting extension type to
4152       // BothExtension.
4153       ExtTy = BothExtension;
4154     }
4155     PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4156   }
4157 
4158   /// Utility function to query the original type of instruction \p Opnd
4159   /// with a matched extension type. If the extension doesn't match, we
4160   /// cannot use the information we had on the original type.
4161   /// BothExtension doesn't match any extension type.
getOrigType(const InstrToOrigTy & PromotedInsts,Instruction * Opnd,bool IsSExt)4162   static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4163                                  Instruction *Opnd, bool IsSExt) {
4164     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4165     InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4166     if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4167       return It->second.getPointer();
4168     return nullptr;
4169   }
4170 
4171   /// Utility function to check whether or not a sign or zero extension
4172   /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4173   /// either using the operands of \p Inst or promoting \p Inst.
4174   /// The type of the extension is defined by \p IsSExt.
4175   /// In other words, check if:
4176   /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4177   /// #1 Promotion applies:
4178   /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4179   /// #2 Operand reuses:
4180   /// ext opnd1 to ConsideredExtType.
4181   /// \p PromotedInsts maps the instructions to their type before promotion.
4182   static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4183                             const InstrToOrigTy &PromotedInsts, bool IsSExt);
4184 
4185   /// Utility function to determine if \p OpIdx should be promoted when
4186   /// promoting \p Inst.
shouldExtOperand(const Instruction * Inst,int OpIdx)4187   static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4188     return !(isa<SelectInst>(Inst) && OpIdx == 0);
4189   }
4190 
4191   /// Utility function to promote the operand of \p Ext when this
4192   /// operand is a promotable trunc or sext or zext.
4193   /// \p PromotedInsts maps the instructions to their type before promotion.
4194   /// \p CreatedInstsCost[out] contains the cost of all instructions
4195   /// created to promote the operand of Ext.
4196   /// Newly added extensions are inserted in \p Exts.
4197   /// Newly added truncates are inserted in \p Truncs.
4198   /// Should never be called directly.
4199   /// \return The promoted value which is used instead of Ext.
4200   static Value *promoteOperandForTruncAndAnyExt(
4201       Instruction *Ext, TypePromotionTransaction &TPT,
4202       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4203       SmallVectorImpl<Instruction *> *Exts,
4204       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4205 
4206   /// Utility function to promote the operand of \p Ext when this
4207   /// operand is promotable and is not a supported trunc or sext.
4208   /// \p PromotedInsts maps the instructions to their type before promotion.
4209   /// \p CreatedInstsCost[out] contains the cost of all the instructions
4210   /// created to promote the operand of Ext.
4211   /// Newly added extensions are inserted in \p Exts.
4212   /// Newly added truncates are inserted in \p Truncs.
4213   /// Should never be called directly.
4214   /// \return The promoted value which is used instead of Ext.
4215   static Value *promoteOperandForOther(Instruction *Ext,
4216                                        TypePromotionTransaction &TPT,
4217                                        InstrToOrigTy &PromotedInsts,
4218                                        unsigned &CreatedInstsCost,
4219                                        SmallVectorImpl<Instruction *> *Exts,
4220                                        SmallVectorImpl<Instruction *> *Truncs,
4221                                        const TargetLowering &TLI, bool IsSExt);
4222 
4223   /// \see promoteOperandForOther.
signExtendOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)4224   static Value *signExtendOperandForOther(
4225       Instruction *Ext, TypePromotionTransaction &TPT,
4226       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4227       SmallVectorImpl<Instruction *> *Exts,
4228       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4229     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4230                                   Exts, Truncs, TLI, true);
4231   }
4232 
4233   /// \see promoteOperandForOther.
zeroExtendOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)4234   static Value *zeroExtendOperandForOther(
4235       Instruction *Ext, TypePromotionTransaction &TPT,
4236       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4237       SmallVectorImpl<Instruction *> *Exts,
4238       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4239     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4240                                   Exts, Truncs, TLI, false);
4241   }
4242 
4243 public:
4244   /// Type for the utility function that promotes the operand of Ext.
4245   using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4246                             InstrToOrigTy &PromotedInsts,
4247                             unsigned &CreatedInstsCost,
4248                             SmallVectorImpl<Instruction *> *Exts,
4249                             SmallVectorImpl<Instruction *> *Truncs,
4250                             const TargetLowering &TLI);
4251 
4252   /// Given a sign/zero extend instruction \p Ext, return the appropriate
4253   /// action to promote the operand of \p Ext instead of using Ext.
4254   /// \return NULL if no promotable action is possible with the current
4255   /// sign extension.
4256   /// \p InsertedInsts keeps track of all the instructions inserted by the
4257   /// other CodeGenPrepare optimizations. This information is important
4258   /// because we do not want to promote these instructions as CodeGenPrepare
4259   /// will reinsert them later. Thus creating an infinite loop: create/remove.
4260   /// \p PromotedInsts maps the instructions to their type before promotion.
4261   static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4262                           const TargetLowering &TLI,
4263                           const InstrToOrigTy &PromotedInsts);
4264 };
4265 
4266 } // end anonymous namespace
4267 
canGetThrough(const Instruction * Inst,Type * ConsideredExtType,const InstrToOrigTy & PromotedInsts,bool IsSExt)4268 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4269                                         Type *ConsideredExtType,
4270                                         const InstrToOrigTy &PromotedInsts,
4271                                         bool IsSExt) {
4272   // The promotion helper does not know how to deal with vector types yet.
4273   // To be able to fix that, we would need to fix the places where we
4274   // statically extend, e.g., constants and such.
4275   if (Inst->getType()->isVectorTy())
4276     return false;
4277 
4278   // We can always get through zext.
4279   if (isa<ZExtInst>(Inst))
4280     return true;
4281 
4282   // sext(sext) is ok too.
4283   if (IsSExt && isa<SExtInst>(Inst))
4284     return true;
4285 
4286   // We can get through binary operator, if it is legal. In other words, the
4287   // binary operator must have a nuw or nsw flag.
4288   if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4289     if (isa<OverflowingBinaryOperator>(BinOp) &&
4290         ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4291          (IsSExt && BinOp->hasNoSignedWrap())))
4292       return true;
4293 
4294   // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4295   if ((Inst->getOpcode() == Instruction::And ||
4296        Inst->getOpcode() == Instruction::Or))
4297     return true;
4298 
4299   // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4300   if (Inst->getOpcode() == Instruction::Xor) {
4301     // Make sure it is not a NOT.
4302     if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4303       if (!Cst->getValue().isAllOnes())
4304         return true;
4305   }
4306 
4307   // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4308   // It may change a poisoned value into a regular value, like
4309   //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
4310   //          poisoned value                    regular value
4311   // It should be OK since undef covers valid value.
4312   if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4313     return true;
4314 
4315   // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4316   // It may change a poisoned value into a regular value, like
4317   //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
4318   //          poisoned value                    regular value
4319   // It should be OK since undef covers valid value.
4320   if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4321     const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4322     if (ExtInst->hasOneUse()) {
4323       const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4324       if (AndInst && AndInst->getOpcode() == Instruction::And) {
4325         const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4326         if (Cst &&
4327             Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4328           return true;
4329       }
4330     }
4331   }
4332 
4333   // Check if we can do the following simplification.
4334   // ext(trunc(opnd)) --> ext(opnd)
4335   if (!isa<TruncInst>(Inst))
4336     return false;
4337 
4338   Value *OpndVal = Inst->getOperand(0);
4339   // Check if we can use this operand in the extension.
4340   // If the type is larger than the result type of the extension, we cannot.
4341   if (!OpndVal->getType()->isIntegerTy() ||
4342       OpndVal->getType()->getIntegerBitWidth() >
4343           ConsideredExtType->getIntegerBitWidth())
4344     return false;
4345 
4346   // If the operand of the truncate is not an instruction, we will not have
4347   // any information on the dropped bits.
4348   // (Actually we could for constant but it is not worth the extra logic).
4349   Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4350   if (!Opnd)
4351     return false;
4352 
4353   // Check if the source of the type is narrow enough.
4354   // I.e., check that trunc just drops extended bits of the same kind of
4355   // the extension.
4356   // #1 get the type of the operand and check the kind of the extended bits.
4357   const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4358   if (OpndType)
4359     ;
4360   else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4361     OpndType = Opnd->getOperand(0)->getType();
4362   else
4363     return false;
4364 
4365   // #2 check that the truncate just drops extended bits.
4366   return Inst->getType()->getIntegerBitWidth() >=
4367          OpndType->getIntegerBitWidth();
4368 }
4369 
getAction(Instruction * Ext,const SetOfInstrs & InsertedInsts,const TargetLowering & TLI,const InstrToOrigTy & PromotedInsts)4370 TypePromotionHelper::Action TypePromotionHelper::getAction(
4371     Instruction *Ext, const SetOfInstrs &InsertedInsts,
4372     const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4373   assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4374          "Unexpected instruction type");
4375   Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4376   Type *ExtTy = Ext->getType();
4377   bool IsSExt = isa<SExtInst>(Ext);
4378   // If the operand of the extension is not an instruction, we cannot
4379   // get through.
4380   // If it, check we can get through.
4381   if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4382     return nullptr;
4383 
4384   // Do not promote if the operand has been added by codegenprepare.
4385   // Otherwise, it means we are undoing an optimization that is likely to be
4386   // redone, thus causing potential infinite loop.
4387   if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4388     return nullptr;
4389 
4390   // SExt or Trunc instructions.
4391   // Return the related handler.
4392   if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4393       isa<ZExtInst>(ExtOpnd))
4394     return promoteOperandForTruncAndAnyExt;
4395 
4396   // Regular instruction.
4397   // Abort early if we will have to insert non-free instructions.
4398   if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4399     return nullptr;
4400   return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4401 }
4402 
promoteOperandForTruncAndAnyExt(Instruction * SExt,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)4403 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4404     Instruction *SExt, TypePromotionTransaction &TPT,
4405     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4406     SmallVectorImpl<Instruction *> *Exts,
4407     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4408   // By construction, the operand of SExt is an instruction. Otherwise we cannot
4409   // get through it and this method should not be called.
4410   Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4411   Value *ExtVal = SExt;
4412   bool HasMergedNonFreeExt = false;
4413   if (isa<ZExtInst>(SExtOpnd)) {
4414     // Replace s|zext(zext(opnd))
4415     // => zext(opnd).
4416     HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4417     Value *ZExt =
4418         TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4419     TPT.replaceAllUsesWith(SExt, ZExt);
4420     TPT.eraseInstruction(SExt);
4421     ExtVal = ZExt;
4422   } else {
4423     // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4424     // => z|sext(opnd).
4425     TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4426   }
4427   CreatedInstsCost = 0;
4428 
4429   // Remove dead code.
4430   if (SExtOpnd->use_empty())
4431     TPT.eraseInstruction(SExtOpnd);
4432 
4433   // Check if the extension is still needed.
4434   Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4435   if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4436     if (ExtInst) {
4437       if (Exts)
4438         Exts->push_back(ExtInst);
4439       CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4440     }
4441     return ExtVal;
4442   }
4443 
4444   // At this point we have: ext ty opnd to ty.
4445   // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4446   Value *NextVal = ExtInst->getOperand(0);
4447   TPT.eraseInstruction(ExtInst, NextVal);
4448   return NextVal;
4449 }
4450 
promoteOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI,bool IsSExt)4451 Value *TypePromotionHelper::promoteOperandForOther(
4452     Instruction *Ext, TypePromotionTransaction &TPT,
4453     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4454     SmallVectorImpl<Instruction *> *Exts,
4455     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4456     bool IsSExt) {
4457   // By construction, the operand of Ext is an instruction. Otherwise we cannot
4458   // get through it and this method should not be called.
4459   Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4460   CreatedInstsCost = 0;
4461   if (!ExtOpnd->hasOneUse()) {
4462     // ExtOpnd will be promoted.
4463     // All its uses, but Ext, will need to use a truncated value of the
4464     // promoted version.
4465     // Create the truncate now.
4466     Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4467     if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4468       // Insert it just after the definition.
4469       ITrunc->moveAfter(ExtOpnd);
4470       if (Truncs)
4471         Truncs->push_back(ITrunc);
4472     }
4473 
4474     TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4475     // Restore the operand of Ext (which has been replaced by the previous call
4476     // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4477     TPT.setOperand(Ext, 0, ExtOpnd);
4478   }
4479 
4480   // Get through the Instruction:
4481   // 1. Update its type.
4482   // 2. Replace the uses of Ext by Inst.
4483   // 3. Extend each operand that needs to be extended.
4484 
4485   // Remember the original type of the instruction before promotion.
4486   // This is useful to know that the high bits are sign extended bits.
4487   addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4488   // Step #1.
4489   TPT.mutateType(ExtOpnd, Ext->getType());
4490   // Step #2.
4491   TPT.replaceAllUsesWith(Ext, ExtOpnd);
4492   // Step #3.
4493   Instruction *ExtForOpnd = Ext;
4494 
4495   LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4496   for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4497        ++OpIdx) {
4498     LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4499     if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4500         !shouldExtOperand(ExtOpnd, OpIdx)) {
4501       LLVM_DEBUG(dbgs() << "No need to propagate\n");
4502       continue;
4503     }
4504     // Check if we can statically extend the operand.
4505     Value *Opnd = ExtOpnd->getOperand(OpIdx);
4506     if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4507       LLVM_DEBUG(dbgs() << "Statically extend\n");
4508       unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4509       APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4510                             : Cst->getValue().zext(BitWidth);
4511       TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4512       continue;
4513     }
4514     // UndefValue are typed, so we have to statically sign extend them.
4515     if (isa<UndefValue>(Opnd)) {
4516       LLVM_DEBUG(dbgs() << "Statically extend\n");
4517       TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4518       continue;
4519     }
4520 
4521     // Otherwise we have to explicitly sign extend the operand.
4522     // Check if Ext was reused to extend an operand.
4523     if (!ExtForOpnd) {
4524       // If yes, create a new one.
4525       LLVM_DEBUG(dbgs() << "More operands to ext\n");
4526       Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4527                                     : TPT.createZExt(Ext, Opnd, Ext->getType());
4528       if (!isa<Instruction>(ValForExtOpnd)) {
4529         TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4530         continue;
4531       }
4532       ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4533     }
4534     if (Exts)
4535       Exts->push_back(ExtForOpnd);
4536     TPT.setOperand(ExtForOpnd, 0, Opnd);
4537 
4538     // Move the sign extension before the insertion point.
4539     TPT.moveBefore(ExtForOpnd, ExtOpnd);
4540     TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4541     CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4542     // If more sext are required, new instructions will have to be created.
4543     ExtForOpnd = nullptr;
4544   }
4545   if (ExtForOpnd == Ext) {
4546     LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4547     TPT.eraseInstruction(Ext);
4548   }
4549   return ExtOpnd;
4550 }
4551 
4552 /// Check whether or not promoting an instruction to a wider type is profitable.
4553 /// \p NewCost gives the cost of extension instructions created by the
4554 /// promotion.
4555 /// \p OldCost gives the cost of extension instructions before the promotion
4556 /// plus the number of instructions that have been
4557 /// matched in the addressing mode the promotion.
4558 /// \p PromotedOperand is the value that has been promoted.
4559 /// \return True if the promotion is profitable, false otherwise.
isPromotionProfitable(unsigned NewCost,unsigned OldCost,Value * PromotedOperand) const4560 bool AddressingModeMatcher::isPromotionProfitable(
4561     unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4562   LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4563                     << '\n');
4564   // The cost of the new extensions is greater than the cost of the
4565   // old extension plus what we folded.
4566   // This is not profitable.
4567   if (NewCost > OldCost)
4568     return false;
4569   if (NewCost < OldCost)
4570     return true;
4571   // The promotion is neutral but it may help folding the sign extension in
4572   // loads for instance.
4573   // Check that we did not create an illegal instruction.
4574   return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4575 }
4576 
4577 /// Given an instruction or constant expr, see if we can fold the operation
4578 /// into the addressing mode. If so, update the addressing mode and return
4579 /// true, otherwise return false without modifying AddrMode.
4580 /// If \p MovedAway is not NULL, it contains the information of whether or
4581 /// not AddrInst has to be folded into the addressing mode on success.
4582 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4583 /// because it has been moved away.
4584 /// Thus AddrInst must not be added in the matched instructions.
4585 /// This state can happen when AddrInst is a sext, since it may be moved away.
4586 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4587 /// not be referenced anymore.
matchOperationAddr(User * AddrInst,unsigned Opcode,unsigned Depth,bool * MovedAway)4588 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4589                                                unsigned Depth,
4590                                                bool *MovedAway) {
4591   // Avoid exponential behavior on extremely deep expression trees.
4592   if (Depth >= 5)
4593     return false;
4594 
4595   // By default, all matched instructions stay in place.
4596   if (MovedAway)
4597     *MovedAway = false;
4598 
4599   switch (Opcode) {
4600   case Instruction::PtrToInt:
4601     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4602     return matchAddr(AddrInst->getOperand(0), Depth);
4603   case Instruction::IntToPtr: {
4604     auto AS = AddrInst->getType()->getPointerAddressSpace();
4605     auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4606     // This inttoptr is a no-op if the integer type is pointer sized.
4607     if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4608       return matchAddr(AddrInst->getOperand(0), Depth);
4609     return false;
4610   }
4611   case Instruction::BitCast:
4612     // BitCast is always a noop, and we can handle it as long as it is
4613     // int->int or pointer->pointer (we don't want int<->fp or something).
4614     if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4615         // Don't touch identity bitcasts.  These were probably put here by LSR,
4616         // and we don't want to mess around with them.  Assume it knows what it
4617         // is doing.
4618         AddrInst->getOperand(0)->getType() != AddrInst->getType())
4619       return matchAddr(AddrInst->getOperand(0), Depth);
4620     return false;
4621   case Instruction::AddrSpaceCast: {
4622     unsigned SrcAS =
4623         AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4624     unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4625     if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4626       return matchAddr(AddrInst->getOperand(0), Depth);
4627     return false;
4628   }
4629   case Instruction::Add: {
4630     // Check to see if we can merge in the RHS then the LHS.  If so, we win.
4631     ExtAddrMode BackupAddrMode = AddrMode;
4632     unsigned OldSize = AddrModeInsts.size();
4633     // Start a transaction at this point.
4634     // The LHS may match but not the RHS.
4635     // Therefore, we need a higher level restoration point to undo partially
4636     // matched operation.
4637     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4638         TPT.getRestorationPoint();
4639 
4640     AddrMode.InBounds = false;
4641     if (matchAddr(AddrInst->getOperand(1), Depth + 1) &&
4642         matchAddr(AddrInst->getOperand(0), Depth + 1))
4643       return true;
4644 
4645     // Restore the old addr mode info.
4646     AddrMode = BackupAddrMode;
4647     AddrModeInsts.resize(OldSize);
4648     TPT.rollback(LastKnownGood);
4649 
4650     // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
4651     if (matchAddr(AddrInst->getOperand(0), Depth + 1) &&
4652         matchAddr(AddrInst->getOperand(1), Depth + 1))
4653       return true;
4654 
4655     // Otherwise we definitely can't merge the ADD in.
4656     AddrMode = BackupAddrMode;
4657     AddrModeInsts.resize(OldSize);
4658     TPT.rollback(LastKnownGood);
4659     break;
4660   }
4661   // case Instruction::Or:
4662   //  TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4663   // break;
4664   case Instruction::Mul:
4665   case Instruction::Shl: {
4666     // Can only handle X*C and X << C.
4667     AddrMode.InBounds = false;
4668     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4669     if (!RHS || RHS->getBitWidth() > 64)
4670       return false;
4671     int64_t Scale = Opcode == Instruction::Shl
4672                         ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
4673                         : RHS->getSExtValue();
4674 
4675     return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4676   }
4677   case Instruction::GetElementPtr: {
4678     // Scan the GEP.  We check it if it contains constant offsets and at most
4679     // one variable offset.
4680     int VariableOperand = -1;
4681     unsigned VariableScale = 0;
4682 
4683     int64_t ConstantOffset = 0;
4684     gep_type_iterator GTI = gep_type_begin(AddrInst);
4685     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4686       if (StructType *STy = GTI.getStructTypeOrNull()) {
4687         const StructLayout *SL = DL.getStructLayout(STy);
4688         unsigned Idx =
4689             cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4690         ConstantOffset += SL->getElementOffset(Idx);
4691       } else {
4692         TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType());
4693         if (TS.isNonZero()) {
4694           // The optimisations below currently only work for fixed offsets.
4695           if (TS.isScalable())
4696             return false;
4697           int64_t TypeSize = TS.getFixedValue();
4698           if (ConstantInt *CI =
4699                   dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4700             const APInt &CVal = CI->getValue();
4701             if (CVal.getMinSignedBits() <= 64) {
4702               ConstantOffset += CVal.getSExtValue() * TypeSize;
4703               continue;
4704             }
4705           }
4706           // We only allow one variable index at the moment.
4707           if (VariableOperand != -1)
4708             return false;
4709 
4710           // Remember the variable index.
4711           VariableOperand = i;
4712           VariableScale = TypeSize;
4713         }
4714       }
4715     }
4716 
4717     // A common case is for the GEP to only do a constant offset.  In this case,
4718     // just add it to the disp field and check validity.
4719     if (VariableOperand == -1) {
4720       AddrMode.BaseOffs += ConstantOffset;
4721       if (ConstantOffset == 0 ||
4722           TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4723         // Check to see if we can fold the base pointer in too.
4724         if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4725           if (!cast<GEPOperator>(AddrInst)->isInBounds())
4726             AddrMode.InBounds = false;
4727           return true;
4728         }
4729       } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4730                  TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4731                  ConstantOffset > 0) {
4732         // Record GEPs with non-zero offsets as candidates for splitting in the
4733         // event that the offset cannot fit into the r+i addressing mode.
4734         // Simple and common case that only one GEP is used in calculating the
4735         // address for the memory access.
4736         Value *Base = AddrInst->getOperand(0);
4737         auto *BaseI = dyn_cast<Instruction>(Base);
4738         auto *GEP = cast<GetElementPtrInst>(AddrInst);
4739         if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4740             (BaseI && !isa<CastInst>(BaseI) &&
4741              !isa<GetElementPtrInst>(BaseI))) {
4742           // Make sure the parent block allows inserting non-PHI instructions
4743           // before the terminator.
4744           BasicBlock *Parent =
4745               BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4746           if (!Parent->getTerminator()->isEHPad())
4747             LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4748         }
4749       }
4750       AddrMode.BaseOffs -= ConstantOffset;
4751       return false;
4752     }
4753 
4754     // Save the valid addressing mode in case we can't match.
4755     ExtAddrMode BackupAddrMode = AddrMode;
4756     unsigned OldSize = AddrModeInsts.size();
4757 
4758     // See if the scale and offset amount is valid for this target.
4759     AddrMode.BaseOffs += ConstantOffset;
4760     if (!cast<GEPOperator>(AddrInst)->isInBounds())
4761       AddrMode.InBounds = false;
4762 
4763     // Match the base operand of the GEP.
4764     if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4765       // If it couldn't be matched, just stuff the value in a register.
4766       if (AddrMode.HasBaseReg) {
4767         AddrMode = BackupAddrMode;
4768         AddrModeInsts.resize(OldSize);
4769         return false;
4770       }
4771       AddrMode.HasBaseReg = true;
4772       AddrMode.BaseReg = AddrInst->getOperand(0);
4773     }
4774 
4775     // Match the remaining variable portion of the GEP.
4776     if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4777                           Depth)) {
4778       // If it couldn't be matched, try stuffing the base into a register
4779       // instead of matching it, and retrying the match of the scale.
4780       AddrMode = BackupAddrMode;
4781       AddrModeInsts.resize(OldSize);
4782       if (AddrMode.HasBaseReg)
4783         return false;
4784       AddrMode.HasBaseReg = true;
4785       AddrMode.BaseReg = AddrInst->getOperand(0);
4786       AddrMode.BaseOffs += ConstantOffset;
4787       if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4788                             VariableScale, Depth)) {
4789         // If even that didn't work, bail.
4790         AddrMode = BackupAddrMode;
4791         AddrModeInsts.resize(OldSize);
4792         return false;
4793       }
4794     }
4795 
4796     return true;
4797   }
4798   case Instruction::SExt:
4799   case Instruction::ZExt: {
4800     Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4801     if (!Ext)
4802       return false;
4803 
4804     // Try to move this ext out of the way of the addressing mode.
4805     // Ask for a method for doing so.
4806     TypePromotionHelper::Action TPH =
4807         TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4808     if (!TPH)
4809       return false;
4810 
4811     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4812         TPT.getRestorationPoint();
4813     unsigned CreatedInstsCost = 0;
4814     unsigned ExtCost = !TLI.isExtFree(Ext);
4815     Value *PromotedOperand =
4816         TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4817     // SExt has been moved away.
4818     // Thus either it will be rematched later in the recursive calls or it is
4819     // gone. Anyway, we must not fold it into the addressing mode at this point.
4820     // E.g.,
4821     // op = add opnd, 1
4822     // idx = ext op
4823     // addr = gep base, idx
4824     // is now:
4825     // promotedOpnd = ext opnd            <- no match here
4826     // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
4827     // addr = gep base, op                <- match
4828     if (MovedAway)
4829       *MovedAway = true;
4830 
4831     assert(PromotedOperand &&
4832            "TypePromotionHelper should have filtered out those cases");
4833 
4834     ExtAddrMode BackupAddrMode = AddrMode;
4835     unsigned OldSize = AddrModeInsts.size();
4836 
4837     if (!matchAddr(PromotedOperand, Depth) ||
4838         // The total of the new cost is equal to the cost of the created
4839         // instructions.
4840         // The total of the old cost is equal to the cost of the extension plus
4841         // what we have saved in the addressing mode.
4842         !isPromotionProfitable(CreatedInstsCost,
4843                                ExtCost + (AddrModeInsts.size() - OldSize),
4844                                PromotedOperand)) {
4845       AddrMode = BackupAddrMode;
4846       AddrModeInsts.resize(OldSize);
4847       LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4848       TPT.rollback(LastKnownGood);
4849       return false;
4850     }
4851     return true;
4852   }
4853   }
4854   return false;
4855 }
4856 
4857 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4858 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4859 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4860 /// for the target.
4861 ///
matchAddr(Value * Addr,unsigned Depth)4862 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4863   // Start a transaction at this point that we will rollback if the matching
4864   // fails.
4865   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4866       TPT.getRestorationPoint();
4867   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4868     if (CI->getValue().isSignedIntN(64)) {
4869       // Fold in immediates if legal for the target.
4870       AddrMode.BaseOffs += CI->getSExtValue();
4871       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4872         return true;
4873       AddrMode.BaseOffs -= CI->getSExtValue();
4874     }
4875   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4876     // If this is a global variable, try to fold it into the addressing mode.
4877     if (!AddrMode.BaseGV) {
4878       AddrMode.BaseGV = GV;
4879       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4880         return true;
4881       AddrMode.BaseGV = nullptr;
4882     }
4883   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4884     ExtAddrMode BackupAddrMode = AddrMode;
4885     unsigned OldSize = AddrModeInsts.size();
4886 
4887     // Check to see if it is possible to fold this operation.
4888     bool MovedAway = false;
4889     if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4890       // This instruction may have been moved away. If so, there is nothing
4891       // to check here.
4892       if (MovedAway)
4893         return true;
4894       // Okay, it's possible to fold this.  Check to see if it is actually
4895       // *profitable* to do so.  We use a simple cost model to avoid increasing
4896       // register pressure too much.
4897       if (I->hasOneUse() ||
4898           isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4899         AddrModeInsts.push_back(I);
4900         return true;
4901       }
4902 
4903       // It isn't profitable to do this, roll back.
4904       AddrMode = BackupAddrMode;
4905       AddrModeInsts.resize(OldSize);
4906       TPT.rollback(LastKnownGood);
4907     }
4908   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4909     if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4910       return true;
4911     TPT.rollback(LastKnownGood);
4912   } else if (isa<ConstantPointerNull>(Addr)) {
4913     // Null pointer gets folded without affecting the addressing mode.
4914     return true;
4915   }
4916 
4917   // Worse case, the target should support [reg] addressing modes. :)
4918   if (!AddrMode.HasBaseReg) {
4919     AddrMode.HasBaseReg = true;
4920     AddrMode.BaseReg = Addr;
4921     // Still check for legality in case the target supports [imm] but not [i+r].
4922     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4923       return true;
4924     AddrMode.HasBaseReg = false;
4925     AddrMode.BaseReg = nullptr;
4926   }
4927 
4928   // If the base register is already taken, see if we can do [r+r].
4929   if (AddrMode.Scale == 0) {
4930     AddrMode.Scale = 1;
4931     AddrMode.ScaledReg = Addr;
4932     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4933       return true;
4934     AddrMode.Scale = 0;
4935     AddrMode.ScaledReg = nullptr;
4936   }
4937   // Couldn't match.
4938   TPT.rollback(LastKnownGood);
4939   return false;
4940 }
4941 
4942 /// Check to see if all uses of OpVal by the specified inline asm call are due
4943 /// to memory operands. If so, return true, otherwise return false.
IsOperandAMemoryOperand(CallInst * CI,InlineAsm * IA,Value * OpVal,const TargetLowering & TLI,const TargetRegisterInfo & TRI)4944 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4945                                     const TargetLowering &TLI,
4946                                     const TargetRegisterInfo &TRI) {
4947   const Function *F = CI->getFunction();
4948   TargetLowering::AsmOperandInfoVector TargetConstraints =
4949       TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4950 
4951   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
4952     // Compute the constraint code and ConstraintType to use.
4953     TLI.ComputeConstraintToUse(OpInfo, SDValue());
4954 
4955     // If this asm operand is our Value*, and if it isn't an indirect memory
4956     // operand, we can't fold it!  TODO: Also handle C_Address?
4957     if (OpInfo.CallOperandVal == OpVal &&
4958         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4959          !OpInfo.isIndirect))
4960       return false;
4961   }
4962 
4963   return true;
4964 }
4965 
4966 // Max number of memory uses to look at before aborting the search to conserve
4967 // compile time.
4968 static constexpr int MaxMemoryUsesToScan = 20;
4969 
4970 /// Recursively walk all the uses of I until we find a memory use.
4971 /// If we find an obviously non-foldable instruction, return true.
4972 /// Add accessed addresses and types to MemoryUses.
FindAllMemoryUses(Instruction * I,SmallVectorImpl<std::pair<Value *,Type * >> & MemoryUses,SmallPtrSetImpl<Instruction * > & ConsideredInsts,const TargetLowering & TLI,const TargetRegisterInfo & TRI,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI,int SeenInsts=0)4973 static bool FindAllMemoryUses(
4974     Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses,
4975     SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4976     const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4977     BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4978   // If we already considered this instruction, we're done.
4979   if (!ConsideredInsts.insert(I).second)
4980     return false;
4981 
4982   // If this is an obviously unfoldable instruction, bail out.
4983   if (!MightBeFoldableInst(I))
4984     return true;
4985 
4986   // Loop over all the uses, recursively processing them.
4987   for (Use &U : I->uses()) {
4988     // Conservatively return true if we're seeing a large number or a deep chain
4989     // of users. This avoids excessive compilation times in pathological cases.
4990     if (SeenInsts++ >= MaxMemoryUsesToScan)
4991       return true;
4992 
4993     Instruction *UserI = cast<Instruction>(U.getUser());
4994     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4995       MemoryUses.push_back({U.get(), LI->getType()});
4996       continue;
4997     }
4998 
4999     if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5000       if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5001         return true; // Storing addr, not into addr.
5002       MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()});
5003       continue;
5004     }
5005 
5006     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5007       if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5008         return true; // Storing addr, not into addr.
5009       MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()});
5010       continue;
5011     }
5012 
5013     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5014       if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5015         return true; // Storing addr, not into addr.
5016       MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()});
5017       continue;
5018     }
5019 
5020     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5021       if (CI->hasFnAttr(Attribute::Cold)) {
5022         // If this is a cold call, we can sink the addressing calculation into
5023         // the cold path.  See optimizeCallInst
5024         bool OptForSize =
5025             OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
5026         if (!OptForSize)
5027           continue;
5028       }
5029 
5030       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5031       if (!IA)
5032         return true;
5033 
5034       // If this is a memory operand, we're cool, otherwise bail out.
5035       if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5036         return true;
5037       continue;
5038     }
5039 
5040     if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5041                           PSI, BFI, SeenInsts))
5042       return true;
5043   }
5044 
5045   return false;
5046 }
5047 
5048 /// Return true if Val is already known to be live at the use site that we're
5049 /// folding it into. If so, there is no cost to include it in the addressing
5050 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5051 /// instruction already.
valueAlreadyLiveAtInst(Value * Val,Value * KnownLive1,Value * KnownLive2)5052 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5053                                                    Value *KnownLive1,
5054                                                    Value *KnownLive2) {
5055   // If Val is either of the known-live values, we know it is live!
5056   if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5057     return true;
5058 
5059   // All values other than instructions and arguments (e.g. constants) are live.
5060   if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5061     return true;
5062 
5063   // If Val is a constant sized alloca in the entry block, it is live, this is
5064   // true because it is just a reference to the stack/frame pointer, which is
5065   // live for the whole function.
5066   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5067     if (AI->isStaticAlloca())
5068       return true;
5069 
5070   // Check to see if this value is already used in the memory instruction's
5071   // block.  If so, it's already live into the block at the very least, so we
5072   // can reasonably fold it.
5073   return Val->isUsedInBasicBlock(MemoryInst->getParent());
5074 }
5075 
5076 /// It is possible for the addressing mode of the machine to fold the specified
5077 /// instruction into a load or store that ultimately uses it.
5078 /// However, the specified instruction has multiple uses.
5079 /// Given this, it may actually increase register pressure to fold it
5080 /// into the load. For example, consider this code:
5081 ///
5082 ///     X = ...
5083 ///     Y = X+1
5084 ///     use(Y)   -> nonload/store
5085 ///     Z = Y+1
5086 ///     load Z
5087 ///
5088 /// In this case, Y has multiple uses, and can be folded into the load of Z
5089 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
5090 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
5091 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
5092 /// number of computations either.
5093 ///
5094 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
5095 /// X was live across 'load Z' for other reasons, we actually *would* want to
5096 /// fold the addressing mode in the Z case.  This would make Y die earlier.
isProfitableToFoldIntoAddressingMode(Instruction * I,ExtAddrMode & AMBefore,ExtAddrMode & AMAfter)5097 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5098     Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5099   if (IgnoreProfitability)
5100     return true;
5101 
5102   // AMBefore is the addressing mode before this instruction was folded into it,
5103   // and AMAfter is the addressing mode after the instruction was folded.  Get
5104   // the set of registers referenced by AMAfter and subtract out those
5105   // referenced by AMBefore: this is the set of values which folding in this
5106   // address extends the lifetime of.
5107   //
5108   // Note that there are only two potential values being referenced here,
5109   // BaseReg and ScaleReg (global addresses are always available, as are any
5110   // folded immediates).
5111   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5112 
5113   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5114   // lifetime wasn't extended by adding this instruction.
5115   if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5116     BaseReg = nullptr;
5117   if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5118     ScaledReg = nullptr;
5119 
5120   // If folding this instruction (and it's subexprs) didn't extend any live
5121   // ranges, we're ok with it.
5122   if (!BaseReg && !ScaledReg)
5123     return true;
5124 
5125   // If all uses of this instruction can have the address mode sunk into them,
5126   // we can remove the addressing mode and effectively trade one live register
5127   // for another (at worst.)  In this context, folding an addressing mode into
5128   // the use is just a particularly nice way of sinking it.
5129   SmallVector<std::pair<Value *, Type *>, 16> MemoryUses;
5130   SmallPtrSet<Instruction *, 16> ConsideredInsts;
5131   if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, PSI,
5132                         BFI))
5133     return false; // Has a non-memory, non-foldable use!
5134 
5135   // Now that we know that all uses of this instruction are part of a chain of
5136   // computation involving only operations that could theoretically be folded
5137   // into a memory use, loop over each of these memory operation uses and see
5138   // if they could  *actually* fold the instruction.  The assumption is that
5139   // addressing modes are cheap and that duplicating the computation involved
5140   // many times is worthwhile, even on a fastpath. For sinking candidates
5141   // (i.e. cold call sites), this serves as a way to prevent excessive code
5142   // growth since most architectures have some reasonable small and fast way to
5143   // compute an effective address.  (i.e LEA on x86)
5144   SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5145   for (const std::pair<Value *, Type *> &Pair : MemoryUses) {
5146     Value *Address = Pair.first;
5147     Type *AddressAccessTy = Pair.second;
5148     unsigned AS = Address->getType()->getPointerAddressSpace();
5149 
5150     // Do a match against the root of this address, ignoring profitability. This
5151     // will tell us if the addressing mode for the memory operation will
5152     // *actually* cover the shared instruction.
5153     ExtAddrMode Result;
5154     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5155                                                                       0);
5156     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5157         TPT.getRestorationPoint();
5158     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5159                                   AddressAccessTy, AS, MemoryInst, Result,
5160                                   InsertedInsts, PromotedInsts, TPT,
5161                                   LargeOffsetGEP, OptSize, PSI, BFI);
5162     Matcher.IgnoreProfitability = true;
5163     bool Success = Matcher.matchAddr(Address, 0);
5164     (void)Success;
5165     assert(Success && "Couldn't select *anything*?");
5166 
5167     // The match was to check the profitability, the changes made are not
5168     // part of the original matcher. Therefore, they should be dropped
5169     // otherwise the original matcher will not present the right state.
5170     TPT.rollback(LastKnownGood);
5171 
5172     // If the match didn't cover I, then it won't be shared by it.
5173     if (!is_contained(MatchedAddrModeInsts, I))
5174       return false;
5175 
5176     MatchedAddrModeInsts.clear();
5177   }
5178 
5179   return true;
5180 }
5181 
5182 /// Return true if the specified values are defined in a
5183 /// different basic block than BB.
IsNonLocalValue(Value * V,BasicBlock * BB)5184 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5185   if (Instruction *I = dyn_cast<Instruction>(V))
5186     return I->getParent() != BB;
5187   return false;
5188 }
5189 
5190 /// Sink addressing mode computation immediate before MemoryInst if doing so
5191 /// can be done without increasing register pressure.  The need for the
5192 /// register pressure constraint means this can end up being an all or nothing
5193 /// decision for all uses of the same addressing computation.
5194 ///
5195 /// Load and Store Instructions often have addressing modes that can do
5196 /// significant amounts of computation. As such, instruction selection will try
5197 /// to get the load or store to do as much computation as possible for the
5198 /// program. The problem is that isel can only see within a single block. As
5199 /// such, we sink as much legal addressing mode work into the block as possible.
5200 ///
5201 /// This method is used to optimize both load/store and inline asms with memory
5202 /// operands.  It's also used to sink addressing computations feeding into cold
5203 /// call sites into their (cold) basic block.
5204 ///
5205 /// The motivation for handling sinking into cold blocks is that doing so can
5206 /// both enable other address mode sinking (by satisfying the register pressure
5207 /// constraint above), and reduce register pressure globally (by removing the
5208 /// addressing mode computation from the fast path entirely.).
optimizeMemoryInst(Instruction * MemoryInst,Value * Addr,Type * AccessTy,unsigned AddrSpace)5209 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5210                                         Type *AccessTy, unsigned AddrSpace) {
5211   Value *Repl = Addr;
5212 
5213   // Try to collapse single-value PHI nodes.  This is necessary to undo
5214   // unprofitable PRE transformations.
5215   SmallVector<Value *, 8> worklist;
5216   SmallPtrSet<Value *, 16> Visited;
5217   worklist.push_back(Addr);
5218 
5219   // Use a worklist to iteratively look through PHI and select nodes, and
5220   // ensure that the addressing mode obtained from the non-PHI/select roots of
5221   // the graph are compatible.
5222   bool PhiOrSelectSeen = false;
5223   SmallVector<Instruction *, 16> AddrModeInsts;
5224   const SimplifyQuery SQ(*DL, TLInfo);
5225   AddressingModeCombiner AddrModes(SQ, Addr);
5226   TypePromotionTransaction TPT(RemovedInsts);
5227   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5228       TPT.getRestorationPoint();
5229   while (!worklist.empty()) {
5230     Value *V = worklist.pop_back_val();
5231 
5232     // We allow traversing cyclic Phi nodes.
5233     // In case of success after this loop we ensure that traversing through
5234     // Phi nodes ends up with all cases to compute address of the form
5235     //    BaseGV + Base + Scale * Index + Offset
5236     // where Scale and Offset are constans and BaseGV, Base and Index
5237     // are exactly the same Values in all cases.
5238     // It means that BaseGV, Scale and Offset dominate our memory instruction
5239     // and have the same value as they had in address computation represented
5240     // as Phi. So we can safely sink address computation to memory instruction.
5241     if (!Visited.insert(V).second)
5242       continue;
5243 
5244     // For a PHI node, push all of its incoming values.
5245     if (PHINode *P = dyn_cast<PHINode>(V)) {
5246       append_range(worklist, P->incoming_values());
5247       PhiOrSelectSeen = true;
5248       continue;
5249     }
5250     // Similar for select.
5251     if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5252       worklist.push_back(SI->getFalseValue());
5253       worklist.push_back(SI->getTrueValue());
5254       PhiOrSelectSeen = true;
5255       continue;
5256     }
5257 
5258     // For non-PHIs, determine the addressing mode being computed.  Note that
5259     // the result may differ depending on what other uses our candidate
5260     // addressing instructions might have.
5261     AddrModeInsts.clear();
5262     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5263                                                                       0);
5264     // Defer the query (and possible computation of) the dom tree to point of
5265     // actual use.  It's expected that most address matches don't actually need
5266     // the domtree.
5267     auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5268       Function *F = MemoryInst->getParent()->getParent();
5269       return this->getDT(*F);
5270     };
5271     ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5272         V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5273         *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5274         BFI.get());
5275 
5276     GetElementPtrInst *GEP = LargeOffsetGEP.first;
5277     if (GEP && !NewGEPBases.count(GEP)) {
5278       // If splitting the underlying data structure can reduce the offset of a
5279       // GEP, collect the GEP.  Skip the GEPs that are the new bases of
5280       // previously split data structures.
5281       LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5282       LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5283     }
5284 
5285     NewAddrMode.OriginalValue = V;
5286     if (!AddrModes.addNewAddrMode(NewAddrMode))
5287       break;
5288   }
5289 
5290   // Try to combine the AddrModes we've collected. If we couldn't collect any,
5291   // or we have multiple but either couldn't combine them or combining them
5292   // wouldn't do anything useful, bail out now.
5293   if (!AddrModes.combineAddrModes()) {
5294     TPT.rollback(LastKnownGood);
5295     return false;
5296   }
5297   bool Modified = TPT.commit();
5298 
5299   // Get the combined AddrMode (or the only AddrMode, if we only had one).
5300   ExtAddrMode AddrMode = AddrModes.getAddrMode();
5301 
5302   // If all the instructions matched are already in this BB, don't do anything.
5303   // If we saw a Phi node then it is not local definitely, and if we saw a
5304   // select then we want to push the address calculation past it even if it's
5305   // already in this BB.
5306   if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5307         return IsNonLocalValue(V, MemoryInst->getParent());
5308       })) {
5309     LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
5310                       << "\n");
5311     return Modified;
5312   }
5313 
5314   // Insert this computation right after this user.  Since our caller is
5315   // scanning from the top of the BB to the bottom, reuse of the expr are
5316   // guaranteed to happen later.
5317   IRBuilder<> Builder(MemoryInst);
5318 
5319   // Now that we determined the addressing expression we want to use and know
5320   // that we have to sink it into this block.  Check to see if we have already
5321   // done this for some other load/store instr in this block.  If so, reuse
5322   // the computation.  Before attempting reuse, check if the address is valid
5323   // as it may have been erased.
5324 
5325   WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5326 
5327   Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5328   Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5329   if (SunkAddr) {
5330     LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5331                       << " for " << *MemoryInst << "\n");
5332     if (SunkAddr->getType() != Addr->getType()) {
5333       if (SunkAddr->getType()->getPointerAddressSpace() !=
5334               Addr->getType()->getPointerAddressSpace() &&
5335           !DL->isNonIntegralPointerType(Addr->getType())) {
5336         // There are two reasons the address spaces might not match: a no-op
5337         // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5338         // ptrtoint/inttoptr pair to ensure we match the original semantics.
5339         // TODO: allow bitcast between different address space pointers with the
5340         // same size.
5341         SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5342         SunkAddr =
5343             Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5344       } else
5345         SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5346     }
5347   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5348                                    SubtargetInfo->addrSinkUsingGEPs())) {
5349     // By default, we use the GEP-based method when AA is used later. This
5350     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5351     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5352                       << " for " << *MemoryInst << "\n");
5353     Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5354 
5355     // First, find the pointer.
5356     if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5357       ResultPtr = AddrMode.BaseReg;
5358       AddrMode.BaseReg = nullptr;
5359     }
5360 
5361     if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5362       // We can't add more than one pointer together, nor can we scale a
5363       // pointer (both of which seem meaningless).
5364       if (ResultPtr || AddrMode.Scale != 1)
5365         return Modified;
5366 
5367       ResultPtr = AddrMode.ScaledReg;
5368       AddrMode.Scale = 0;
5369     }
5370 
5371     // It is only safe to sign extend the BaseReg if we know that the math
5372     // required to create it did not overflow before we extend it. Since
5373     // the original IR value was tossed in favor of a constant back when
5374     // the AddrMode was created we need to bail out gracefully if widths
5375     // do not match instead of extending it.
5376     //
5377     // (See below for code to add the scale.)
5378     if (AddrMode.Scale) {
5379       Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5380       if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5381           cast<IntegerType>(ScaledRegTy)->getBitWidth())
5382         return Modified;
5383     }
5384 
5385     if (AddrMode.BaseGV) {
5386       if (ResultPtr)
5387         return Modified;
5388 
5389       ResultPtr = AddrMode.BaseGV;
5390     }
5391 
5392     // If the real base value actually came from an inttoptr, then the matcher
5393     // will look through it and provide only the integer value. In that case,
5394     // use it here.
5395     if (!DL->isNonIntegralPointerType(Addr->getType())) {
5396       if (!ResultPtr && AddrMode.BaseReg) {
5397         ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5398                                            "sunkaddr");
5399         AddrMode.BaseReg = nullptr;
5400       } else if (!ResultPtr && AddrMode.Scale == 1) {
5401         ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5402                                            "sunkaddr");
5403         AddrMode.Scale = 0;
5404       }
5405     }
5406 
5407     if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5408         !AddrMode.BaseOffs) {
5409       SunkAddr = Constant::getNullValue(Addr->getType());
5410     } else if (!ResultPtr) {
5411       return Modified;
5412     } else {
5413       Type *I8PtrTy =
5414           Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5415       Type *I8Ty = Builder.getInt8Ty();
5416 
5417       // Start with the base register. Do this first so that subsequent address
5418       // matching finds it last, which will prevent it from trying to match it
5419       // as the scaled value in case it happens to be a mul. That would be
5420       // problematic if we've sunk a different mul for the scale, because then
5421       // we'd end up sinking both muls.
5422       if (AddrMode.BaseReg) {
5423         Value *V = AddrMode.BaseReg;
5424         if (V->getType() != IntPtrTy)
5425           V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5426 
5427         ResultIndex = V;
5428       }
5429 
5430       // Add the scale value.
5431       if (AddrMode.Scale) {
5432         Value *V = AddrMode.ScaledReg;
5433         if (V->getType() == IntPtrTy) {
5434           // done.
5435         } else {
5436           assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5437                      cast<IntegerType>(V->getType())->getBitWidth() &&
5438                  "We can't transform if ScaledReg is too narrow");
5439           V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5440         }
5441 
5442         if (AddrMode.Scale != 1)
5443           V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5444                                 "sunkaddr");
5445         if (ResultIndex)
5446           ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5447         else
5448           ResultIndex = V;
5449       }
5450 
5451       // Add in the Base Offset if present.
5452       if (AddrMode.BaseOffs) {
5453         Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5454         if (ResultIndex) {
5455           // We need to add this separately from the scale above to help with
5456           // SDAG consecutive load/store merging.
5457           if (ResultPtr->getType() != I8PtrTy)
5458             ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5459           ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex,
5460                                         "sunkaddr", AddrMode.InBounds);
5461         }
5462 
5463         ResultIndex = V;
5464       }
5465 
5466       if (!ResultIndex) {
5467         SunkAddr = ResultPtr;
5468       } else {
5469         if (ResultPtr->getType() != I8PtrTy)
5470           ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5471         SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr",
5472                                      AddrMode.InBounds);
5473       }
5474 
5475       if (SunkAddr->getType() != Addr->getType()) {
5476         if (SunkAddr->getType()->getPointerAddressSpace() !=
5477                 Addr->getType()->getPointerAddressSpace() &&
5478             !DL->isNonIntegralPointerType(Addr->getType())) {
5479           // There are two reasons the address spaces might not match: a no-op
5480           // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5481           // ptrtoint/inttoptr pair to ensure we match the original semantics.
5482           // TODO: allow bitcast between different address space pointers with
5483           // the same size.
5484           SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5485           SunkAddr =
5486               Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5487         } else
5488           SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5489       }
5490     }
5491   } else {
5492     // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5493     // non-integral pointers, so in that case bail out now.
5494     Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5495     Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5496     PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5497     PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5498     if (DL->isNonIntegralPointerType(Addr->getType()) ||
5499         (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5500         (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5501         (AddrMode.BaseGV &&
5502          DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5503       return Modified;
5504 
5505     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5506                       << " for " << *MemoryInst << "\n");
5507     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5508     Value *Result = nullptr;
5509 
5510     // Start with the base register. Do this first so that subsequent address
5511     // matching finds it last, which will prevent it from trying to match it
5512     // as the scaled value in case it happens to be a mul. That would be
5513     // problematic if we've sunk a different mul for the scale, because then
5514     // we'd end up sinking both muls.
5515     if (AddrMode.BaseReg) {
5516       Value *V = AddrMode.BaseReg;
5517       if (V->getType()->isPointerTy())
5518         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5519       if (V->getType() != IntPtrTy)
5520         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5521       Result = V;
5522     }
5523 
5524     // Add the scale value.
5525     if (AddrMode.Scale) {
5526       Value *V = AddrMode.ScaledReg;
5527       if (V->getType() == IntPtrTy) {
5528         // done.
5529       } else if (V->getType()->isPointerTy()) {
5530         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5531       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5532                  cast<IntegerType>(V->getType())->getBitWidth()) {
5533         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5534       } else {
5535         // It is only safe to sign extend the BaseReg if we know that the math
5536         // required to create it did not overflow before we extend it. Since
5537         // the original IR value was tossed in favor of a constant back when
5538         // the AddrMode was created we need to bail out gracefully if widths
5539         // do not match instead of extending it.
5540         Instruction *I = dyn_cast_or_null<Instruction>(Result);
5541         if (I && (Result != AddrMode.BaseReg))
5542           I->eraseFromParent();
5543         return Modified;
5544       }
5545       if (AddrMode.Scale != 1)
5546         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5547                               "sunkaddr");
5548       if (Result)
5549         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5550       else
5551         Result = V;
5552     }
5553 
5554     // Add in the BaseGV if present.
5555     if (AddrMode.BaseGV) {
5556       Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
5557       if (Result)
5558         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5559       else
5560         Result = V;
5561     }
5562 
5563     // Add in the Base Offset if present.
5564     if (AddrMode.BaseOffs) {
5565       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5566       if (Result)
5567         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5568       else
5569         Result = V;
5570     }
5571 
5572     if (!Result)
5573       SunkAddr = Constant::getNullValue(Addr->getType());
5574     else
5575       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
5576   }
5577 
5578   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
5579   // Store the newly computed address into the cache. In the case we reused a
5580   // value, this should be idempotent.
5581   SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
5582 
5583   // If we have no uses, recursively delete the value and all dead instructions
5584   // using it.
5585   if (Repl->use_empty()) {
5586     resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
5587       RecursivelyDeleteTriviallyDeadInstructions(
5588           Repl, TLInfo, nullptr,
5589           [&](Value *V) { removeAllAssertingVHReferences(V); });
5590     });
5591   }
5592   ++NumMemoryInsts;
5593   return true;
5594 }
5595 
5596 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5597 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5598 /// only handle a 2 operand GEP in the same basic block or a splat constant
5599 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5600 /// index.
5601 ///
5602 /// If the existing GEP has a vector base pointer that is splat, we can look
5603 /// through the splat to find the scalar pointer. If we can't find a scalar
5604 /// pointer there's nothing we can do.
5605 ///
5606 /// If we have a GEP with more than 2 indices where the middle indices are all
5607 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5608 ///
5609 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5610 /// followed by a GEP with an all zeroes vector index. This will enable
5611 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
5612 /// zero index.
optimizeGatherScatterInst(Instruction * MemoryInst,Value * Ptr)5613 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
5614                                                Value *Ptr) {
5615   Value *NewAddr;
5616 
5617   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
5618     // Don't optimize GEPs that don't have indices.
5619     if (!GEP->hasIndices())
5620       return false;
5621 
5622     // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5623     // FIXME: We should support this by sinking the GEP.
5624     if (MemoryInst->getParent() != GEP->getParent())
5625       return false;
5626 
5627     SmallVector<Value *, 2> Ops(GEP->operands());
5628 
5629     bool RewriteGEP = false;
5630 
5631     if (Ops[0]->getType()->isVectorTy()) {
5632       Ops[0] = getSplatValue(Ops[0]);
5633       if (!Ops[0])
5634         return false;
5635       RewriteGEP = true;
5636     }
5637 
5638     unsigned FinalIndex = Ops.size() - 1;
5639 
5640     // Ensure all but the last index is 0.
5641     // FIXME: This isn't strictly required. All that's required is that they are
5642     // all scalars or splats.
5643     for (unsigned i = 1; i < FinalIndex; ++i) {
5644       auto *C = dyn_cast<Constant>(Ops[i]);
5645       if (!C)
5646         return false;
5647       if (isa<VectorType>(C->getType()))
5648         C = C->getSplatValue();
5649       auto *CI = dyn_cast_or_null<ConstantInt>(C);
5650       if (!CI || !CI->isZero())
5651         return false;
5652       // Scalarize the index if needed.
5653       Ops[i] = CI;
5654     }
5655 
5656     // Try to scalarize the final index.
5657     if (Ops[FinalIndex]->getType()->isVectorTy()) {
5658       if (Value *V = getSplatValue(Ops[FinalIndex])) {
5659         auto *C = dyn_cast<ConstantInt>(V);
5660         // Don't scalarize all zeros vector.
5661         if (!C || !C->isZero()) {
5662           Ops[FinalIndex] = V;
5663           RewriteGEP = true;
5664         }
5665       }
5666     }
5667 
5668     // If we made any changes or the we have extra operands, we need to generate
5669     // new instructions.
5670     if (!RewriteGEP && Ops.size() == 2)
5671       return false;
5672 
5673     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5674 
5675     IRBuilder<> Builder(MemoryInst);
5676 
5677     Type *SourceTy = GEP->getSourceElementType();
5678     Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
5679 
5680     // If the final index isn't a vector, emit a scalar GEP containing all ops
5681     // and a vector GEP with all zeroes final index.
5682     if (!Ops[FinalIndex]->getType()->isVectorTy()) {
5683       NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
5684       auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5685       auto *SecondTy = GetElementPtrInst::getIndexedType(
5686           SourceTy, ArrayRef(Ops).drop_front());
5687       NewAddr =
5688           Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
5689     } else {
5690       Value *Base = Ops[0];
5691       Value *Index = Ops[FinalIndex];
5692 
5693       // Create a scalar GEP if there are more than 2 operands.
5694       if (Ops.size() != 2) {
5695         // Replace the last index with 0.
5696         Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy);
5697         Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
5698         SourceTy = GetElementPtrInst::getIndexedType(
5699             SourceTy, ArrayRef(Ops).drop_front());
5700       }
5701 
5702       // Now create the GEP with scalar pointer and vector index.
5703       NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
5704     }
5705   } else if (!isa<Constant>(Ptr)) {
5706     // Not a GEP, maybe its a splat and we can create a GEP to enable
5707     // SelectionDAGBuilder to use it as a uniform base.
5708     Value *V = getSplatValue(Ptr);
5709     if (!V)
5710       return false;
5711 
5712     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5713 
5714     IRBuilder<> Builder(MemoryInst);
5715 
5716     // Emit a vector GEP with a scalar pointer and all 0s vector index.
5717     Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
5718     auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5719     Type *ScalarTy;
5720     if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5721         Intrinsic::masked_gather) {
5722       ScalarTy = MemoryInst->getType()->getScalarType();
5723     } else {
5724       assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5725              Intrinsic::masked_scatter);
5726       ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
5727     }
5728     NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
5729   } else {
5730     // Constant, SelectionDAGBuilder knows to check if its a splat.
5731     return false;
5732   }
5733 
5734   MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
5735 
5736   // If we have no uses, recursively delete the value and all dead instructions
5737   // using it.
5738   if (Ptr->use_empty())
5739     RecursivelyDeleteTriviallyDeadInstructions(
5740         Ptr, TLInfo, nullptr,
5741         [&](Value *V) { removeAllAssertingVHReferences(V); });
5742 
5743   return true;
5744 }
5745 
5746 /// If there are any memory operands, use OptimizeMemoryInst to sink their
5747 /// address computing into the block when possible / profitable.
optimizeInlineAsmInst(CallInst * CS)5748 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
5749   bool MadeChange = false;
5750 
5751   const TargetRegisterInfo *TRI =
5752       TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
5753   TargetLowering::AsmOperandInfoVector TargetConstraints =
5754       TLI->ParseConstraints(*DL, TRI, *CS);
5755   unsigned ArgNo = 0;
5756   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5757     // Compute the constraint code and ConstraintType to use.
5758     TLI->ComputeConstraintToUse(OpInfo, SDValue());
5759 
5760     // TODO: Also handle C_Address?
5761     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5762         OpInfo.isIndirect) {
5763       Value *OpVal = CS->getArgOperand(ArgNo++);
5764       MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
5765     } else if (OpInfo.Type == InlineAsm::isInput)
5766       ArgNo++;
5767   }
5768 
5769   return MadeChange;
5770 }
5771 
5772 /// Check if all the uses of \p Val are equivalent (or free) zero or
5773 /// sign extensions.
hasSameExtUse(Value * Val,const TargetLowering & TLI)5774 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
5775   assert(!Val->use_empty() && "Input must have at least one use");
5776   const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
5777   bool IsSExt = isa<SExtInst>(FirstUser);
5778   Type *ExtTy = FirstUser->getType();
5779   for (const User *U : Val->users()) {
5780     const Instruction *UI = cast<Instruction>(U);
5781     if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
5782       return false;
5783     Type *CurTy = UI->getType();
5784     // Same input and output types: Same instruction after CSE.
5785     if (CurTy == ExtTy)
5786       continue;
5787 
5788     // If IsSExt is true, we are in this situation:
5789     // a = Val
5790     // b = sext ty1 a to ty2
5791     // c = sext ty1 a to ty3
5792     // Assuming ty2 is shorter than ty3, this could be turned into:
5793     // a = Val
5794     // b = sext ty1 a to ty2
5795     // c = sext ty2 b to ty3
5796     // However, the last sext is not free.
5797     if (IsSExt)
5798       return false;
5799 
5800     // This is a ZExt, maybe this is free to extend from one type to another.
5801     // In that case, we would not account for a different use.
5802     Type *NarrowTy;
5803     Type *LargeTy;
5804     if (ExtTy->getScalarType()->getIntegerBitWidth() >
5805         CurTy->getScalarType()->getIntegerBitWidth()) {
5806       NarrowTy = CurTy;
5807       LargeTy = ExtTy;
5808     } else {
5809       NarrowTy = ExtTy;
5810       LargeTy = CurTy;
5811     }
5812 
5813     if (!TLI.isZExtFree(NarrowTy, LargeTy))
5814       return false;
5815   }
5816   // All uses are the same or can be derived from one another for free.
5817   return true;
5818 }
5819 
5820 /// Try to speculatively promote extensions in \p Exts and continue
5821 /// promoting through newly promoted operands recursively as far as doing so is
5822 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
5823 /// When some promotion happened, \p TPT contains the proper state to revert
5824 /// them.
5825 ///
5826 /// \return true if some promotion happened, false otherwise.
tryToPromoteExts(TypePromotionTransaction & TPT,const SmallVectorImpl<Instruction * > & Exts,SmallVectorImpl<Instruction * > & ProfitablyMovedExts,unsigned CreatedInstsCost)5827 bool CodeGenPrepare::tryToPromoteExts(
5828     TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
5829     SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
5830     unsigned CreatedInstsCost) {
5831   bool Promoted = false;
5832 
5833   // Iterate over all the extensions to try to promote them.
5834   for (auto *I : Exts) {
5835     // Early check if we directly have ext(load).
5836     if (isa<LoadInst>(I->getOperand(0))) {
5837       ProfitablyMovedExts.push_back(I);
5838       continue;
5839     }
5840 
5841     // Check whether or not we want to do any promotion.  The reason we have
5842     // this check inside the for loop is to catch the case where an extension
5843     // is directly fed by a load because in such case the extension can be moved
5844     // up without any promotion on its operands.
5845     if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
5846       return false;
5847 
5848     // Get the action to perform the promotion.
5849     TypePromotionHelper::Action TPH =
5850         TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
5851     // Check if we can promote.
5852     if (!TPH) {
5853       // Save the current extension as we cannot move up through its operand.
5854       ProfitablyMovedExts.push_back(I);
5855       continue;
5856     }
5857 
5858     // Save the current state.
5859     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5860         TPT.getRestorationPoint();
5861     SmallVector<Instruction *, 4> NewExts;
5862     unsigned NewCreatedInstsCost = 0;
5863     unsigned ExtCost = !TLI->isExtFree(I);
5864     // Promote.
5865     Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
5866                              &NewExts, nullptr, *TLI);
5867     assert(PromotedVal &&
5868            "TypePromotionHelper should have filtered out those cases");
5869 
5870     // We would be able to merge only one extension in a load.
5871     // Therefore, if we have more than 1 new extension we heuristically
5872     // cut this search path, because it means we degrade the code quality.
5873     // With exactly 2, the transformation is neutral, because we will merge
5874     // one extension but leave one. However, we optimistically keep going,
5875     // because the new extension may be removed too.
5876     long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
5877     // FIXME: It would be possible to propagate a negative value instead of
5878     // conservatively ceiling it to 0.
5879     TotalCreatedInstsCost =
5880         std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
5881     if (!StressExtLdPromotion &&
5882         (TotalCreatedInstsCost > 1 ||
5883          !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
5884       // This promotion is not profitable, rollback to the previous state, and
5885       // save the current extension in ProfitablyMovedExts as the latest
5886       // speculative promotion turned out to be unprofitable.
5887       TPT.rollback(LastKnownGood);
5888       ProfitablyMovedExts.push_back(I);
5889       continue;
5890     }
5891     // Continue promoting NewExts as far as doing so is profitable.
5892     SmallVector<Instruction *, 2> NewlyMovedExts;
5893     (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
5894     bool NewPromoted = false;
5895     for (auto *ExtInst : NewlyMovedExts) {
5896       Instruction *MovedExt = cast<Instruction>(ExtInst);
5897       Value *ExtOperand = MovedExt->getOperand(0);
5898       // If we have reached to a load, we need this extra profitability check
5899       // as it could potentially be merged into an ext(load).
5900       if (isa<LoadInst>(ExtOperand) &&
5901           !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
5902             (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
5903         continue;
5904 
5905       ProfitablyMovedExts.push_back(MovedExt);
5906       NewPromoted = true;
5907     }
5908 
5909     // If none of speculative promotions for NewExts is profitable, rollback
5910     // and save the current extension (I) as the last profitable extension.
5911     if (!NewPromoted) {
5912       TPT.rollback(LastKnownGood);
5913       ProfitablyMovedExts.push_back(I);
5914       continue;
5915     }
5916     // The promotion is profitable.
5917     Promoted = true;
5918   }
5919   return Promoted;
5920 }
5921 
5922 /// Merging redundant sexts when one is dominating the other.
mergeSExts(Function & F)5923 bool CodeGenPrepare::mergeSExts(Function &F) {
5924   bool Changed = false;
5925   for (auto &Entry : ValToSExtendedUses) {
5926     SExts &Insts = Entry.second;
5927     SExts CurPts;
5928     for (Instruction *Inst : Insts) {
5929       if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
5930           Inst->getOperand(0) != Entry.first)
5931         continue;
5932       bool inserted = false;
5933       for (auto &Pt : CurPts) {
5934         if (getDT(F).dominates(Inst, Pt)) {
5935           replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
5936           RemovedInsts.insert(Pt);
5937           Pt->removeFromParent();
5938           Pt = Inst;
5939           inserted = true;
5940           Changed = true;
5941           break;
5942         }
5943         if (!getDT(F).dominates(Pt, Inst))
5944           // Give up if we need to merge in a common dominator as the
5945           // experiments show it is not profitable.
5946           continue;
5947         replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
5948         RemovedInsts.insert(Inst);
5949         Inst->removeFromParent();
5950         inserted = true;
5951         Changed = true;
5952         break;
5953       }
5954       if (!inserted)
5955         CurPts.push_back(Inst);
5956     }
5957   }
5958   return Changed;
5959 }
5960 
5961 // Splitting large data structures so that the GEPs accessing them can have
5962 // smaller offsets so that they can be sunk to the same blocks as their users.
5963 // For example, a large struct starting from %base is split into two parts
5964 // where the second part starts from %new_base.
5965 //
5966 // Before:
5967 // BB0:
5968 //   %base     =
5969 //
5970 // BB1:
5971 //   %gep0     = gep %base, off0
5972 //   %gep1     = gep %base, off1
5973 //   %gep2     = gep %base, off2
5974 //
5975 // BB2:
5976 //   %load1    = load %gep0
5977 //   %load2    = load %gep1
5978 //   %load3    = load %gep2
5979 //
5980 // After:
5981 // BB0:
5982 //   %base     =
5983 //   %new_base = gep %base, off0
5984 //
5985 // BB1:
5986 //   %new_gep0 = %new_base
5987 //   %new_gep1 = gep %new_base, off1 - off0
5988 //   %new_gep2 = gep %new_base, off2 - off0
5989 //
5990 // BB2:
5991 //   %load1    = load i32, i32* %new_gep0
5992 //   %load2    = load i32, i32* %new_gep1
5993 //   %load3    = load i32, i32* %new_gep2
5994 //
5995 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
5996 // their offsets are smaller enough to fit into the addressing mode.
splitLargeGEPOffsets()5997 bool CodeGenPrepare::splitLargeGEPOffsets() {
5998   bool Changed = false;
5999   for (auto &Entry : LargeOffsetGEPMap) {
6000     Value *OldBase = Entry.first;
6001     SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6002         &LargeOffsetGEPs = Entry.second;
6003     auto compareGEPOffset =
6004         [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6005             const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6006           if (LHS.first == RHS.first)
6007             return false;
6008           if (LHS.second != RHS.second)
6009             return LHS.second < RHS.second;
6010           return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6011         };
6012     // Sorting all the GEPs of the same data structures based on the offsets.
6013     llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6014     LargeOffsetGEPs.erase(
6015         std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
6016         LargeOffsetGEPs.end());
6017     // Skip if all the GEPs have the same offsets.
6018     if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6019       continue;
6020     GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6021     int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6022     Value *NewBaseGEP = nullptr;
6023 
6024     auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6025     while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6026       GetElementPtrInst *GEP = LargeOffsetGEP->first;
6027       int64_t Offset = LargeOffsetGEP->second;
6028       if (Offset != BaseOffset) {
6029         TargetLowering::AddrMode AddrMode;
6030         AddrMode.BaseOffs = Offset - BaseOffset;
6031         // The result type of the GEP might not be the type of the memory
6032         // access.
6033         if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6034                                         GEP->getResultElementType(),
6035                                         GEP->getAddressSpace())) {
6036           // We need to create a new base if the offset to the current base is
6037           // too large to fit into the addressing mode. So, a very large struct
6038           // may be split into several parts.
6039           BaseGEP = GEP;
6040           BaseOffset = Offset;
6041           NewBaseGEP = nullptr;
6042         }
6043       }
6044 
6045       // Generate a new GEP to replace the current one.
6046       LLVMContext &Ctx = GEP->getContext();
6047       Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
6048       Type *I8PtrTy =
6049           Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace());
6050       Type *I8Ty = Type::getInt8Ty(Ctx);
6051 
6052       if (!NewBaseGEP) {
6053         // Create a new base if we don't have one yet.  Find the insertion
6054         // pointer for the new base first.
6055         BasicBlock::iterator NewBaseInsertPt;
6056         BasicBlock *NewBaseInsertBB;
6057         if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6058           // If the base of the struct is an instruction, the new base will be
6059           // inserted close to it.
6060           NewBaseInsertBB = BaseI->getParent();
6061           if (isa<PHINode>(BaseI))
6062             NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6063           else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6064             NewBaseInsertBB =
6065                 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest());
6066             NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6067           } else
6068             NewBaseInsertPt = std::next(BaseI->getIterator());
6069         } else {
6070           // If the current base is an argument or global value, the new base
6071           // will be inserted to the entry block.
6072           NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6073           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6074         }
6075         IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6076         // Create a new base.
6077         Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset);
6078         NewBaseGEP = OldBase;
6079         if (NewBaseGEP->getType() != I8PtrTy)
6080           NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6081         NewBaseGEP =
6082             NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep");
6083         NewGEPBases.insert(NewBaseGEP);
6084       }
6085 
6086       IRBuilder<> Builder(GEP);
6087       Value *NewGEP = NewBaseGEP;
6088       if (Offset == BaseOffset) {
6089         if (GEP->getType() != I8PtrTy)
6090           NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
6091       } else {
6092         // Calculate the new offset for the new GEP.
6093         Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset);
6094         NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index);
6095 
6096         if (GEP->getType() != I8PtrTy)
6097           NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
6098       }
6099       replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6100       LargeOffsetGEPID.erase(GEP);
6101       LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6102       GEP->eraseFromParent();
6103       Changed = true;
6104     }
6105   }
6106   return Changed;
6107 }
6108 
optimizePhiType(PHINode * I,SmallPtrSetImpl<PHINode * > & Visited,SmallPtrSetImpl<Instruction * > & DeletedInstrs)6109 bool CodeGenPrepare::optimizePhiType(
6110     PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6111     SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6112   // We are looking for a collection on interconnected phi nodes that together
6113   // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6114   // are of the same type. Convert the whole set of nodes to the type of the
6115   // bitcast.
6116   Type *PhiTy = I->getType();
6117   Type *ConvertTy = nullptr;
6118   if (Visited.count(I) ||
6119       (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6120     return false;
6121 
6122   SmallVector<Instruction *, 4> Worklist;
6123   Worklist.push_back(cast<Instruction>(I));
6124   SmallPtrSet<PHINode *, 4> PhiNodes;
6125   SmallPtrSet<ConstantData *, 4> Constants;
6126   PhiNodes.insert(I);
6127   Visited.insert(I);
6128   SmallPtrSet<Instruction *, 4> Defs;
6129   SmallPtrSet<Instruction *, 4> Uses;
6130   // This works by adding extra bitcasts between load/stores and removing
6131   // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6132   // we can get in the situation where we remove a bitcast in one iteration
6133   // just to add it again in the next. We need to ensure that at least one
6134   // bitcast we remove are anchored to something that will not change back.
6135   bool AnyAnchored = false;
6136 
6137   while (!Worklist.empty()) {
6138     Instruction *II = Worklist.pop_back_val();
6139 
6140     if (auto *Phi = dyn_cast<PHINode>(II)) {
6141       // Handle Defs, which might also be PHI's
6142       for (Value *V : Phi->incoming_values()) {
6143         if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6144           if (!PhiNodes.count(OpPhi)) {
6145             if (!Visited.insert(OpPhi).second)
6146               return false;
6147             PhiNodes.insert(OpPhi);
6148             Worklist.push_back(OpPhi);
6149           }
6150         } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6151           if (!OpLoad->isSimple())
6152             return false;
6153           if (Defs.insert(OpLoad).second)
6154             Worklist.push_back(OpLoad);
6155         } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6156           if (Defs.insert(OpEx).second)
6157             Worklist.push_back(OpEx);
6158         } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6159           if (!ConvertTy)
6160             ConvertTy = OpBC->getOperand(0)->getType();
6161           if (OpBC->getOperand(0)->getType() != ConvertTy)
6162             return false;
6163           if (Defs.insert(OpBC).second) {
6164             Worklist.push_back(OpBC);
6165             AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
6166                            !isa<ExtractElementInst>(OpBC->getOperand(0));
6167           }
6168         } else if (auto *OpC = dyn_cast<ConstantData>(V))
6169           Constants.insert(OpC);
6170         else
6171           return false;
6172       }
6173     }
6174 
6175     // Handle uses which might also be phi's
6176     for (User *V : II->users()) {
6177       if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6178         if (!PhiNodes.count(OpPhi)) {
6179           if (Visited.count(OpPhi))
6180             return false;
6181           PhiNodes.insert(OpPhi);
6182           Visited.insert(OpPhi);
6183           Worklist.push_back(OpPhi);
6184         }
6185       } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
6186         if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
6187           return false;
6188         Uses.insert(OpStore);
6189       } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6190         if (!ConvertTy)
6191           ConvertTy = OpBC->getType();
6192         if (OpBC->getType() != ConvertTy)
6193           return false;
6194         Uses.insert(OpBC);
6195         AnyAnchored |=
6196             any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
6197       } else {
6198         return false;
6199       }
6200     }
6201   }
6202 
6203   if (!ConvertTy || !AnyAnchored ||
6204       !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
6205     return false;
6206 
6207   LLVM_DEBUG(dbgs() << "Converting " << *I << "\n  and connected nodes to "
6208                     << *ConvertTy << "\n");
6209 
6210   // Create all the new phi nodes of the new type, and bitcast any loads to the
6211   // correct type.
6212   ValueToValueMap ValMap;
6213   for (ConstantData *C : Constants)
6214     ValMap[C] = ConstantExpr::getCast(Instruction::BitCast, C, ConvertTy);
6215   for (Instruction *D : Defs) {
6216     if (isa<BitCastInst>(D)) {
6217       ValMap[D] = D->getOperand(0);
6218       DeletedInstrs.insert(D);
6219     } else {
6220       ValMap[D] =
6221           new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode());
6222     }
6223   }
6224   for (PHINode *Phi : PhiNodes)
6225     ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
6226                                   Phi->getName() + ".tc", Phi);
6227   // Pipe together all the PhiNodes.
6228   for (PHINode *Phi : PhiNodes) {
6229     PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
6230     for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
6231       NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
6232                           Phi->getIncomingBlock(i));
6233     Visited.insert(NewPhi);
6234   }
6235   // And finally pipe up the stores and bitcasts
6236   for (Instruction *U : Uses) {
6237     if (isa<BitCastInst>(U)) {
6238       DeletedInstrs.insert(U);
6239       replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
6240     } else {
6241       U->setOperand(0,
6242                     new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U));
6243     }
6244   }
6245 
6246   // Save the removed phis to be deleted later.
6247   for (PHINode *Phi : PhiNodes)
6248     DeletedInstrs.insert(Phi);
6249   return true;
6250 }
6251 
optimizePhiTypes(Function & F)6252 bool CodeGenPrepare::optimizePhiTypes(Function &F) {
6253   if (!OptimizePhiTypes)
6254     return false;
6255 
6256   bool Changed = false;
6257   SmallPtrSet<PHINode *, 4> Visited;
6258   SmallPtrSet<Instruction *, 4> DeletedInstrs;
6259 
6260   // Attempt to optimize all the phis in the functions to the correct type.
6261   for (auto &BB : F)
6262     for (auto &Phi : BB.phis())
6263       Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
6264 
6265   // Remove any old phi's that have been converted.
6266   for (auto *I : DeletedInstrs) {
6267     replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
6268     I->eraseFromParent();
6269   }
6270 
6271   return Changed;
6272 }
6273 
6274 /// Return true, if an ext(load) can be formed from an extension in
6275 /// \p MovedExts.
canFormExtLd(const SmallVectorImpl<Instruction * > & MovedExts,LoadInst * & LI,Instruction * & Inst,bool HasPromoted)6276 bool CodeGenPrepare::canFormExtLd(
6277     const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
6278     Instruction *&Inst, bool HasPromoted) {
6279   for (auto *MovedExtInst : MovedExts) {
6280     if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
6281       LI = cast<LoadInst>(MovedExtInst->getOperand(0));
6282       Inst = MovedExtInst;
6283       break;
6284     }
6285   }
6286   if (!LI)
6287     return false;
6288 
6289   // If they're already in the same block, there's nothing to do.
6290   // Make the cheap checks first if we did not promote.
6291   // If we promoted, we need to check if it is indeed profitable.
6292   if (!HasPromoted && LI->getParent() == Inst->getParent())
6293     return false;
6294 
6295   return TLI->isExtLoad(LI, Inst, *DL);
6296 }
6297 
6298 /// Move a zext or sext fed by a load into the same basic block as the load,
6299 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6300 /// extend into the load.
6301 ///
6302 /// E.g.,
6303 /// \code
6304 /// %ld = load i32* %addr
6305 /// %add = add nuw i32 %ld, 4
6306 /// %zext = zext i32 %add to i64
6307 // \endcode
6308 /// =>
6309 /// \code
6310 /// %ld = load i32* %addr
6311 /// %zext = zext i32 %ld to i64
6312 /// %add = add nuw i64 %zext, 4
6313 /// \encode
6314 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6315 /// allow us to match zext(load i32*) to i64.
6316 ///
6317 /// Also, try to promote the computations used to obtain a sign extended
6318 /// value used into memory accesses.
6319 /// E.g.,
6320 /// \code
6321 /// a = add nsw i32 b, 3
6322 /// d = sext i32 a to i64
6323 /// e = getelementptr ..., i64 d
6324 /// \endcode
6325 /// =>
6326 /// \code
6327 /// f = sext i32 b to i64
6328 /// a = add nsw i64 f, 3
6329 /// e = getelementptr ..., i64 a
6330 /// \endcode
6331 ///
6332 /// \p Inst[in/out] the extension may be modified during the process if some
6333 /// promotions apply.
optimizeExt(Instruction * & Inst)6334 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6335   bool AllowPromotionWithoutCommonHeader = false;
6336   /// See if it is an interesting sext operations for the address type
6337   /// promotion before trying to promote it, e.g., the ones with the right
6338   /// type and used in memory accesses.
6339   bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6340       *Inst, AllowPromotionWithoutCommonHeader);
6341   TypePromotionTransaction TPT(RemovedInsts);
6342   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6343       TPT.getRestorationPoint();
6344   SmallVector<Instruction *, 1> Exts;
6345   SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6346   Exts.push_back(Inst);
6347 
6348   bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6349 
6350   // Look for a load being extended.
6351   LoadInst *LI = nullptr;
6352   Instruction *ExtFedByLoad;
6353 
6354   // Try to promote a chain of computation if it allows to form an extended
6355   // load.
6356   if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6357     assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6358     TPT.commit();
6359     // Move the extend into the same block as the load.
6360     ExtFedByLoad->moveAfter(LI);
6361     ++NumExtsMoved;
6362     Inst = ExtFedByLoad;
6363     return true;
6364   }
6365 
6366   // Continue promoting SExts if known as considerable depending on targets.
6367   if (ATPConsiderable &&
6368       performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6369                                   HasPromoted, TPT, SpeculativelyMovedExts))
6370     return true;
6371 
6372   TPT.rollback(LastKnownGood);
6373   return false;
6374 }
6375 
6376 // Perform address type promotion if doing so is profitable.
6377 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6378 // instructions that sign extended the same initial value. However, if
6379 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6380 // extension is just profitable.
performAddressTypePromotion(Instruction * & Inst,bool AllowPromotionWithoutCommonHeader,bool HasPromoted,TypePromotionTransaction & TPT,SmallVectorImpl<Instruction * > & SpeculativelyMovedExts)6381 bool CodeGenPrepare::performAddressTypePromotion(
6382     Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6383     bool HasPromoted, TypePromotionTransaction &TPT,
6384     SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6385   bool Promoted = false;
6386   SmallPtrSet<Instruction *, 1> UnhandledExts;
6387   bool AllSeenFirst = true;
6388   for (auto *I : SpeculativelyMovedExts) {
6389     Value *HeadOfChain = I->getOperand(0);
6390     DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6391         SeenChainsForSExt.find(HeadOfChain);
6392     // If there is an unhandled SExt which has the same header, try to promote
6393     // it as well.
6394     if (AlreadySeen != SeenChainsForSExt.end()) {
6395       if (AlreadySeen->second != nullptr)
6396         UnhandledExts.insert(AlreadySeen->second);
6397       AllSeenFirst = false;
6398     }
6399   }
6400 
6401   if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6402                         SpeculativelyMovedExts.size() == 1)) {
6403     TPT.commit();
6404     if (HasPromoted)
6405       Promoted = true;
6406     for (auto *I : SpeculativelyMovedExts) {
6407       Value *HeadOfChain = I->getOperand(0);
6408       SeenChainsForSExt[HeadOfChain] = nullptr;
6409       ValToSExtendedUses[HeadOfChain].push_back(I);
6410     }
6411     // Update Inst as promotion happen.
6412     Inst = SpeculativelyMovedExts.pop_back_val();
6413   } else {
6414     // This is the first chain visited from the header, keep the current chain
6415     // as unhandled. Defer to promote this until we encounter another SExt
6416     // chain derived from the same header.
6417     for (auto *I : SpeculativelyMovedExts) {
6418       Value *HeadOfChain = I->getOperand(0);
6419       SeenChainsForSExt[HeadOfChain] = Inst;
6420     }
6421     return false;
6422   }
6423 
6424   if (!AllSeenFirst && !UnhandledExts.empty())
6425     for (auto *VisitedSExt : UnhandledExts) {
6426       if (RemovedInsts.count(VisitedSExt))
6427         continue;
6428       TypePromotionTransaction TPT(RemovedInsts);
6429       SmallVector<Instruction *, 1> Exts;
6430       SmallVector<Instruction *, 2> Chains;
6431       Exts.push_back(VisitedSExt);
6432       bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6433       TPT.commit();
6434       if (HasPromoted)
6435         Promoted = true;
6436       for (auto *I : Chains) {
6437         Value *HeadOfChain = I->getOperand(0);
6438         // Mark this as handled.
6439         SeenChainsForSExt[HeadOfChain] = nullptr;
6440         ValToSExtendedUses[HeadOfChain].push_back(I);
6441       }
6442     }
6443   return Promoted;
6444 }
6445 
optimizeExtUses(Instruction * I)6446 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6447   BasicBlock *DefBB = I->getParent();
6448 
6449   // If the result of a {s|z}ext and its source are both live out, rewrite all
6450   // other uses of the source with result of extension.
6451   Value *Src = I->getOperand(0);
6452   if (Src->hasOneUse())
6453     return false;
6454 
6455   // Only do this xform if truncating is free.
6456   if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6457     return false;
6458 
6459   // Only safe to perform the optimization if the source is also defined in
6460   // this block.
6461   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6462     return false;
6463 
6464   bool DefIsLiveOut = false;
6465   for (User *U : I->users()) {
6466     Instruction *UI = cast<Instruction>(U);
6467 
6468     // Figure out which BB this ext is used in.
6469     BasicBlock *UserBB = UI->getParent();
6470     if (UserBB == DefBB)
6471       continue;
6472     DefIsLiveOut = true;
6473     break;
6474   }
6475   if (!DefIsLiveOut)
6476     return false;
6477 
6478   // Make sure none of the uses are PHI nodes.
6479   for (User *U : Src->users()) {
6480     Instruction *UI = cast<Instruction>(U);
6481     BasicBlock *UserBB = UI->getParent();
6482     if (UserBB == DefBB)
6483       continue;
6484     // Be conservative. We don't want this xform to end up introducing
6485     // reloads just before load / store instructions.
6486     if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
6487       return false;
6488   }
6489 
6490   // InsertedTruncs - Only insert one trunc in each block once.
6491   DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
6492 
6493   bool MadeChange = false;
6494   for (Use &U : Src->uses()) {
6495     Instruction *User = cast<Instruction>(U.getUser());
6496 
6497     // Figure out which BB this ext is used in.
6498     BasicBlock *UserBB = User->getParent();
6499     if (UserBB == DefBB)
6500       continue;
6501 
6502     // Both src and def are live in this block. Rewrite the use.
6503     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
6504 
6505     if (!InsertedTrunc) {
6506       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
6507       assert(InsertPt != UserBB->end());
6508       InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
6509       InsertedInsts.insert(InsertedTrunc);
6510     }
6511 
6512     // Replace a use of the {s|z}ext source with a use of the result.
6513     U = InsertedTrunc;
6514     ++NumExtUses;
6515     MadeChange = true;
6516   }
6517 
6518   return MadeChange;
6519 }
6520 
6521 // Find loads whose uses only use some of the loaded value's bits.  Add an "and"
6522 // just after the load if the target can fold this into one extload instruction,
6523 // with the hope of eliminating some of the other later "and" instructions using
6524 // the loaded value.  "and"s that are made trivially redundant by the insertion
6525 // of the new "and" are removed by this function, while others (e.g. those whose
6526 // path from the load goes through a phi) are left for isel to potentially
6527 // remove.
6528 //
6529 // For example:
6530 //
6531 // b0:
6532 //   x = load i32
6533 //   ...
6534 // b1:
6535 //   y = and x, 0xff
6536 //   z = use y
6537 //
6538 // becomes:
6539 //
6540 // b0:
6541 //   x = load i32
6542 //   x' = and x, 0xff
6543 //   ...
6544 // b1:
6545 //   z = use x'
6546 //
6547 // whereas:
6548 //
6549 // b0:
6550 //   x1 = load i32
6551 //   ...
6552 // b1:
6553 //   x2 = load i32
6554 //   ...
6555 // b2:
6556 //   x = phi x1, x2
6557 //   y = and x, 0xff
6558 //
6559 // becomes (after a call to optimizeLoadExt for each load):
6560 //
6561 // b0:
6562 //   x1 = load i32
6563 //   x1' = and x1, 0xff
6564 //   ...
6565 // b1:
6566 //   x2 = load i32
6567 //   x2' = and x2, 0xff
6568 //   ...
6569 // b2:
6570 //   x = phi x1', x2'
6571 //   y = and x, 0xff
optimizeLoadExt(LoadInst * Load)6572 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
6573   if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
6574     return false;
6575 
6576   // Skip loads we've already transformed.
6577   if (Load->hasOneUse() &&
6578       InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
6579     return false;
6580 
6581   // Look at all uses of Load, looking through phis, to determine how many bits
6582   // of the loaded value are needed.
6583   SmallVector<Instruction *, 8> WorkList;
6584   SmallPtrSet<Instruction *, 16> Visited;
6585   SmallVector<Instruction *, 8> AndsToMaybeRemove;
6586   for (auto *U : Load->users())
6587     WorkList.push_back(cast<Instruction>(U));
6588 
6589   EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
6590   unsigned BitWidth = LoadResultVT.getSizeInBits();
6591   // If the BitWidth is 0, do not try to optimize the type
6592   if (BitWidth == 0)
6593     return false;
6594 
6595   APInt DemandBits(BitWidth, 0);
6596   APInt WidestAndBits(BitWidth, 0);
6597 
6598   while (!WorkList.empty()) {
6599     Instruction *I = WorkList.pop_back_val();
6600 
6601     // Break use-def graph loops.
6602     if (!Visited.insert(I).second)
6603       continue;
6604 
6605     // For a PHI node, push all of its users.
6606     if (auto *Phi = dyn_cast<PHINode>(I)) {
6607       for (auto *U : Phi->users())
6608         WorkList.push_back(cast<Instruction>(U));
6609       continue;
6610     }
6611 
6612     switch (I->getOpcode()) {
6613     case Instruction::And: {
6614       auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
6615       if (!AndC)
6616         return false;
6617       APInt AndBits = AndC->getValue();
6618       DemandBits |= AndBits;
6619       // Keep track of the widest and mask we see.
6620       if (AndBits.ugt(WidestAndBits))
6621         WidestAndBits = AndBits;
6622       if (AndBits == WidestAndBits && I->getOperand(0) == Load)
6623         AndsToMaybeRemove.push_back(I);
6624       break;
6625     }
6626 
6627     case Instruction::Shl: {
6628       auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
6629       if (!ShlC)
6630         return false;
6631       uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
6632       DemandBits.setLowBits(BitWidth - ShiftAmt);
6633       break;
6634     }
6635 
6636     case Instruction::Trunc: {
6637       EVT TruncVT = TLI->getValueType(*DL, I->getType());
6638       unsigned TruncBitWidth = TruncVT.getSizeInBits();
6639       DemandBits.setLowBits(TruncBitWidth);
6640       break;
6641     }
6642 
6643     default:
6644       return false;
6645     }
6646   }
6647 
6648   uint32_t ActiveBits = DemandBits.getActiveBits();
6649   // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6650   // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
6651   // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6652   // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6653   // followed by an AND.
6654   // TODO: Look into removing this restriction by fixing backends to either
6655   // return false for isLoadExtLegal for i1 or have them select this pattern to
6656   // a single instruction.
6657   //
6658   // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6659   // mask, since these are the only ands that will be removed by isel.
6660   if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
6661       WidestAndBits != DemandBits)
6662     return false;
6663 
6664   LLVMContext &Ctx = Load->getType()->getContext();
6665   Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
6666   EVT TruncVT = TLI->getValueType(*DL, TruncTy);
6667 
6668   // Reject cases that won't be matched as extloads.
6669   if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
6670       !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
6671     return false;
6672 
6673   IRBuilder<> Builder(Load->getNextNode());
6674   auto *NewAnd = cast<Instruction>(
6675       Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
6676   // Mark this instruction as "inserted by CGP", so that other
6677   // optimizations don't touch it.
6678   InsertedInsts.insert(NewAnd);
6679 
6680   // Replace all uses of load with new and (except for the use of load in the
6681   // new and itself).
6682   replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
6683   NewAnd->setOperand(0, Load);
6684 
6685   // Remove any and instructions that are now redundant.
6686   for (auto *And : AndsToMaybeRemove)
6687     // Check that the and mask is the same as the one we decided to put on the
6688     // new and.
6689     if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
6690       replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
6691       if (&*CurInstIterator == And)
6692         CurInstIterator = std::next(And->getIterator());
6693       And->eraseFromParent();
6694       ++NumAndUses;
6695     }
6696 
6697   ++NumAndsAdded;
6698   return true;
6699 }
6700 
6701 /// Check if V (an operand of a select instruction) is an expensive instruction
6702 /// that is only used once.
sinkSelectOperand(const TargetTransformInfo * TTI,Value * V)6703 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
6704   auto *I = dyn_cast<Instruction>(V);
6705   // If it's safe to speculatively execute, then it should not have side
6706   // effects; therefore, it's safe to sink and possibly *not* execute.
6707   return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
6708          TTI->isExpensiveToSpeculativelyExecute(I);
6709 }
6710 
6711 /// Returns true if a SelectInst should be turned into an explicit branch.
isFormingBranchFromSelectProfitable(const TargetTransformInfo * TTI,const TargetLowering * TLI,SelectInst * SI)6712 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
6713                                                 const TargetLowering *TLI,
6714                                                 SelectInst *SI) {
6715   // If even a predictable select is cheap, then a branch can't be cheaper.
6716   if (!TLI->isPredictableSelectExpensive())
6717     return false;
6718 
6719   // FIXME: This should use the same heuristics as IfConversion to determine
6720   // whether a select is better represented as a branch.
6721 
6722   // If metadata tells us that the select condition is obviously predictable,
6723   // then we want to replace the select with a branch.
6724   uint64_t TrueWeight, FalseWeight;
6725   if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
6726     uint64_t Max = std::max(TrueWeight, FalseWeight);
6727     uint64_t Sum = TrueWeight + FalseWeight;
6728     if (Sum != 0) {
6729       auto Probability = BranchProbability::getBranchProbability(Max, Sum);
6730       if (Probability > TTI->getPredictableBranchThreshold())
6731         return true;
6732     }
6733   }
6734 
6735   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
6736 
6737   // If a branch is predictable, an out-of-order CPU can avoid blocking on its
6738   // comparison condition. If the compare has more than one use, there's
6739   // probably another cmov or setcc around, so it's not worth emitting a branch.
6740   if (!Cmp || !Cmp->hasOneUse())
6741     return false;
6742 
6743   // If either operand of the select is expensive and only needed on one side
6744   // of the select, we should form a branch.
6745   if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
6746       sinkSelectOperand(TTI, SI->getFalseValue()))
6747     return true;
6748 
6749   return false;
6750 }
6751 
6752 /// If \p isTrue is true, return the true value of \p SI, otherwise return
6753 /// false value of \p SI. If the true/false value of \p SI is defined by any
6754 /// select instructions in \p Selects, look through the defining select
6755 /// instruction until the true/false value is not defined in \p Selects.
6756 static Value *
getTrueOrFalseValue(SelectInst * SI,bool isTrue,const SmallPtrSet<const Instruction *,2> & Selects)6757 getTrueOrFalseValue(SelectInst *SI, bool isTrue,
6758                     const SmallPtrSet<const Instruction *, 2> &Selects) {
6759   Value *V = nullptr;
6760 
6761   for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
6762        DefSI = dyn_cast<SelectInst>(V)) {
6763     assert(DefSI->getCondition() == SI->getCondition() &&
6764            "The condition of DefSI does not match with SI");
6765     V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
6766   }
6767 
6768   assert(V && "Failed to get select true/false value");
6769   return V;
6770 }
6771 
optimizeShiftInst(BinaryOperator * Shift)6772 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
6773   assert(Shift->isShift() && "Expected a shift");
6774 
6775   // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
6776   // general vector shifts, and (3) the shift amount is a select-of-splatted
6777   // values, hoist the shifts before the select:
6778   //   shift Op0, (select Cond, TVal, FVal) -->
6779   //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
6780   //
6781   // This is inverting a generic IR transform when we know that the cost of a
6782   // general vector shift is more than the cost of 2 shift-by-scalars.
6783   // We can't do this effectively in SDAG because we may not be able to
6784   // determine if the select operands are splats from within a basic block.
6785   Type *Ty = Shift->getType();
6786   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6787     return false;
6788   Value *Cond, *TVal, *FVal;
6789   if (!match(Shift->getOperand(1),
6790              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6791     return false;
6792   if (!isSplatValue(TVal) || !isSplatValue(FVal))
6793     return false;
6794 
6795   IRBuilder<> Builder(Shift);
6796   BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
6797   Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
6798   Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
6799   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6800   replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
6801   Shift->eraseFromParent();
6802   return true;
6803 }
6804 
optimizeFunnelShift(IntrinsicInst * Fsh)6805 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
6806   Intrinsic::ID Opcode = Fsh->getIntrinsicID();
6807   assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
6808          "Expected a funnel shift");
6809 
6810   // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
6811   // than general vector shifts, and (3) the shift amount is select-of-splatted
6812   // values, hoist the funnel shifts before the select:
6813   //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
6814   //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
6815   //
6816   // This is inverting a generic IR transform when we know that the cost of a
6817   // general vector shift is more than the cost of 2 shift-by-scalars.
6818   // We can't do this effectively in SDAG because we may not be able to
6819   // determine if the select operands are splats from within a basic block.
6820   Type *Ty = Fsh->getType();
6821   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6822     return false;
6823   Value *Cond, *TVal, *FVal;
6824   if (!match(Fsh->getOperand(2),
6825              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6826     return false;
6827   if (!isSplatValue(TVal) || !isSplatValue(FVal))
6828     return false;
6829 
6830   IRBuilder<> Builder(Fsh);
6831   Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
6832   Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
6833   Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
6834   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6835   replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
6836   Fsh->eraseFromParent();
6837   return true;
6838 }
6839 
6840 /// If we have a SelectInst that will likely profit from branch prediction,
6841 /// turn it into a branch.
optimizeSelectInst(SelectInst * SI)6842 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
6843   if (DisableSelectToBranch)
6844     return false;
6845 
6846   // If the SelectOptimize pass is enabled, selects have already been optimized.
6847   if (!getCGPassBuilderOption().DisableSelectOptimize)
6848     return false;
6849 
6850   // Find all consecutive select instructions that share the same condition.
6851   SmallVector<SelectInst *, 2> ASI;
6852   ASI.push_back(SI);
6853   for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
6854        It != SI->getParent()->end(); ++It) {
6855     SelectInst *I = dyn_cast<SelectInst>(&*It);
6856     if (I && SI->getCondition() == I->getCondition()) {
6857       ASI.push_back(I);
6858     } else {
6859       break;
6860     }
6861   }
6862 
6863   SelectInst *LastSI = ASI.back();
6864   // Increment the current iterator to skip all the rest of select instructions
6865   // because they will be either "not lowered" or "all lowered" to branch.
6866   CurInstIterator = std::next(LastSI->getIterator());
6867 
6868   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
6869 
6870   // Can we convert the 'select' to CF ?
6871   if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
6872     return false;
6873 
6874   TargetLowering::SelectSupportKind SelectKind;
6875   if (VectorCond)
6876     SelectKind = TargetLowering::VectorMaskSelect;
6877   else if (SI->getType()->isVectorTy())
6878     SelectKind = TargetLowering::ScalarCondVectorVal;
6879   else
6880     SelectKind = TargetLowering::ScalarValSelect;
6881 
6882   if (TLI->isSelectSupported(SelectKind) &&
6883       (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
6884        llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
6885     return false;
6886 
6887   // The DominatorTree needs to be rebuilt by any consumers after this
6888   // transformation. We simply reset here rather than setting the ModifiedDT
6889   // flag to avoid restarting the function walk in runOnFunction for each
6890   // select optimized.
6891   DT.reset();
6892 
6893   // Transform a sequence like this:
6894   //    start:
6895   //       %cmp = cmp uge i32 %a, %b
6896   //       %sel = select i1 %cmp, i32 %c, i32 %d
6897   //
6898   // Into:
6899   //    start:
6900   //       %cmp = cmp uge i32 %a, %b
6901   //       %cmp.frozen = freeze %cmp
6902   //       br i1 %cmp.frozen, label %select.true, label %select.false
6903   //    select.true:
6904   //       br label %select.end
6905   //    select.false:
6906   //       br label %select.end
6907   //    select.end:
6908   //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
6909   //
6910   // %cmp should be frozen, otherwise it may introduce undefined behavior.
6911   // In addition, we may sink instructions that produce %c or %d from
6912   // the entry block into the destination(s) of the new branch.
6913   // If the true or false blocks do not contain a sunken instruction, that
6914   // block and its branch may be optimized away. In that case, one side of the
6915   // first branch will point directly to select.end, and the corresponding PHI
6916   // predecessor block will be the start block.
6917 
6918   // First, we split the block containing the select into 2 blocks.
6919   BasicBlock *StartBlock = SI->getParent();
6920   BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI));
6921   BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
6922   if (IsHugeFunc)
6923     FreshBBs.insert(EndBlock);
6924   BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency());
6925 
6926   // Delete the unconditional branch that was just created by the split.
6927   StartBlock->getTerminator()->eraseFromParent();
6928 
6929   // These are the new basic blocks for the conditional branch.
6930   // At least one will become an actual new basic block.
6931   BasicBlock *TrueBlock = nullptr;
6932   BasicBlock *FalseBlock = nullptr;
6933   BranchInst *TrueBranch = nullptr;
6934   BranchInst *FalseBranch = nullptr;
6935 
6936   // Sink expensive instructions into the conditional blocks to avoid executing
6937   // them speculatively.
6938   for (SelectInst *SI : ASI) {
6939     if (sinkSelectOperand(TTI, SI->getTrueValue())) {
6940       if (TrueBlock == nullptr) {
6941         TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
6942                                        EndBlock->getParent(), EndBlock);
6943         TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
6944         if (IsHugeFunc)
6945           FreshBBs.insert(TrueBlock);
6946         TrueBranch->setDebugLoc(SI->getDebugLoc());
6947       }
6948       auto *TrueInst = cast<Instruction>(SI->getTrueValue());
6949       TrueInst->moveBefore(TrueBranch);
6950     }
6951     if (sinkSelectOperand(TTI, SI->getFalseValue())) {
6952       if (FalseBlock == nullptr) {
6953         FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
6954                                         EndBlock->getParent(), EndBlock);
6955         if (IsHugeFunc)
6956           FreshBBs.insert(FalseBlock);
6957         FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6958         FalseBranch->setDebugLoc(SI->getDebugLoc());
6959       }
6960       auto *FalseInst = cast<Instruction>(SI->getFalseValue());
6961       FalseInst->moveBefore(FalseBranch);
6962     }
6963   }
6964 
6965   // If there was nothing to sink, then arbitrarily choose the 'false' side
6966   // for a new input value to the PHI.
6967   if (TrueBlock == FalseBlock) {
6968     assert(TrueBlock == nullptr &&
6969            "Unexpected basic block transform while optimizing select");
6970 
6971     FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
6972                                     EndBlock->getParent(), EndBlock);
6973     if (IsHugeFunc)
6974       FreshBBs.insert(FalseBlock);
6975     auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6976     FalseBranch->setDebugLoc(SI->getDebugLoc());
6977   }
6978 
6979   // Insert the real conditional branch based on the original condition.
6980   // If we did not create a new block for one of the 'true' or 'false' paths
6981   // of the condition, it means that side of the branch goes to the end block
6982   // directly and the path originates from the start block from the point of
6983   // view of the new PHI.
6984   BasicBlock *TT, *FT;
6985   if (TrueBlock == nullptr) {
6986     TT = EndBlock;
6987     FT = FalseBlock;
6988     TrueBlock = StartBlock;
6989   } else if (FalseBlock == nullptr) {
6990     TT = TrueBlock;
6991     FT = EndBlock;
6992     FalseBlock = StartBlock;
6993   } else {
6994     TT = TrueBlock;
6995     FT = FalseBlock;
6996   }
6997   IRBuilder<> IB(SI);
6998   auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
6999   IB.CreateCondBr(CondFr, TT, FT, SI);
7000 
7001   SmallPtrSet<const Instruction *, 2> INS;
7002   INS.insert(ASI.begin(), ASI.end());
7003   // Use reverse iterator because later select may use the value of the
7004   // earlier select, and we need to propagate value through earlier select
7005   // to get the PHI operand.
7006   for (SelectInst *SI : llvm::reverse(ASI)) {
7007     // The select itself is replaced with a PHI Node.
7008     PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
7009     PN->takeName(SI);
7010     PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7011     PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7012     PN->setDebugLoc(SI->getDebugLoc());
7013 
7014     replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7015     SI->eraseFromParent();
7016     INS.erase(SI);
7017     ++NumSelectsExpanded;
7018   }
7019 
7020   // Instruct OptimizeBlock to skip to the next block.
7021   CurInstIterator = StartBlock->end();
7022   return true;
7023 }
7024 
7025 /// Some targets only accept certain types for splat inputs. For example a VDUP
7026 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7027 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
optimizeShuffleVectorInst(ShuffleVectorInst * SVI)7028 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7029   // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7030   if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7031                             m_Undef(), m_ZeroMask())))
7032     return false;
7033   Type *NewType = TLI->shouldConvertSplatType(SVI);
7034   if (!NewType)
7035     return false;
7036 
7037   auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7038   assert(!NewType->isVectorTy() && "Expected a scalar type!");
7039   assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7040          "Expected a type of the same size!");
7041   auto *NewVecType =
7042       FixedVectorType::get(NewType, SVIVecType->getNumElements());
7043 
7044   // Create a bitcast (shuffle (insert (bitcast(..))))
7045   IRBuilder<> Builder(SVI->getContext());
7046   Builder.SetInsertPoint(SVI);
7047   Value *BC1 = Builder.CreateBitCast(
7048       cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7049   Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7050   Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7051 
7052   replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7053   RecursivelyDeleteTriviallyDeadInstructions(
7054       SVI, TLInfo, nullptr,
7055       [&](Value *V) { removeAllAssertingVHReferences(V); });
7056 
7057   // Also hoist the bitcast up to its operand if it they are not in the same
7058   // block.
7059   if (auto *BCI = dyn_cast<Instruction>(BC1))
7060     if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7061       if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7062           !Op->isTerminator() && !Op->isEHPad())
7063         BCI->moveAfter(Op);
7064 
7065   return true;
7066 }
7067 
tryToSinkFreeOperands(Instruction * I)7068 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7069   // If the operands of I can be folded into a target instruction together with
7070   // I, duplicate and sink them.
7071   SmallVector<Use *, 4> OpsToSink;
7072   if (!TLI->shouldSinkOperands(I, OpsToSink))
7073     return false;
7074 
7075   // OpsToSink can contain multiple uses in a use chain (e.g.
7076   // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7077   // uses must come first, so we process the ops in reverse order so as to not
7078   // create invalid IR.
7079   BasicBlock *TargetBB = I->getParent();
7080   bool Changed = false;
7081   SmallVector<Use *, 4> ToReplace;
7082   Instruction *InsertPoint = I;
7083   DenseMap<const Instruction *, unsigned long> InstOrdering;
7084   unsigned long InstNumber = 0;
7085   for (const auto &I : *TargetBB)
7086     InstOrdering[&I] = InstNumber++;
7087 
7088   for (Use *U : reverse(OpsToSink)) {
7089     auto *UI = cast<Instruction>(U->get());
7090     if (isa<PHINode>(UI))
7091       continue;
7092     if (UI->getParent() == TargetBB) {
7093       if (InstOrdering[UI] < InstOrdering[InsertPoint])
7094         InsertPoint = UI;
7095       continue;
7096     }
7097     ToReplace.push_back(U);
7098   }
7099 
7100   SetVector<Instruction *> MaybeDead;
7101   DenseMap<Instruction *, Instruction *> NewInstructions;
7102   for (Use *U : ToReplace) {
7103     auto *UI = cast<Instruction>(U->get());
7104     Instruction *NI = UI->clone();
7105 
7106     if (IsHugeFunc) {
7107       // Now we clone an instruction, its operands' defs may sink to this BB
7108       // now. So we put the operands defs' BBs into FreshBBs to do optmization.
7109       for (unsigned I = 0; I < NI->getNumOperands(); ++I) {
7110         auto *OpDef = dyn_cast<Instruction>(NI->getOperand(I));
7111         if (!OpDef)
7112           continue;
7113         FreshBBs.insert(OpDef->getParent());
7114       }
7115     }
7116 
7117     NewInstructions[UI] = NI;
7118     MaybeDead.insert(UI);
7119     LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7120     NI->insertBefore(InsertPoint);
7121     InsertPoint = NI;
7122     InsertedInsts.insert(NI);
7123 
7124     // Update the use for the new instruction, making sure that we update the
7125     // sunk instruction uses, if it is part of a chain that has already been
7126     // sunk.
7127     Instruction *OldI = cast<Instruction>(U->getUser());
7128     if (NewInstructions.count(OldI))
7129       NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
7130     else
7131       U->set(NI);
7132     Changed = true;
7133   }
7134 
7135   // Remove instructions that are dead after sinking.
7136   for (auto *I : MaybeDead) {
7137     if (!I->hasNUsesOrMore(1)) {
7138       LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7139       I->eraseFromParent();
7140     }
7141   }
7142 
7143   return Changed;
7144 }
7145 
optimizeSwitchType(SwitchInst * SI)7146 bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7147   Value *Cond = SI->getCondition();
7148   Type *OldType = Cond->getType();
7149   LLVMContext &Context = Cond->getContext();
7150   EVT OldVT = TLI->getValueType(*DL, OldType);
7151   MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
7152   unsigned RegWidth = RegType.getSizeInBits();
7153 
7154   if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
7155     return false;
7156 
7157   // If the register width is greater than the type width, expand the condition
7158   // of the switch instruction and each case constant to the width of the
7159   // register. By widening the type of the switch condition, subsequent
7160   // comparisons (for case comparisons) will not need to be extended to the
7161   // preferred register width, so we will potentially eliminate N-1 extends,
7162   // where N is the number of cases in the switch.
7163   auto *NewType = Type::getIntNTy(Context, RegWidth);
7164 
7165   // Extend the switch condition and case constants using the target preferred
7166   // extend unless the switch condition is a function argument with an extend
7167   // attribute. In that case, we can avoid an unnecessary mask/extension by
7168   // matching the argument extension instead.
7169   Instruction::CastOps ExtType = Instruction::ZExt;
7170   // Some targets prefer SExt over ZExt.
7171   if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
7172     ExtType = Instruction::SExt;
7173 
7174   if (auto *Arg = dyn_cast<Argument>(Cond)) {
7175     if (Arg->hasSExtAttr())
7176       ExtType = Instruction::SExt;
7177     if (Arg->hasZExtAttr())
7178       ExtType = Instruction::ZExt;
7179   }
7180 
7181   auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
7182   ExtInst->insertBefore(SI);
7183   ExtInst->setDebugLoc(SI->getDebugLoc());
7184   SI->setCondition(ExtInst);
7185   for (auto Case : SI->cases()) {
7186     const APInt &NarrowConst = Case.getCaseValue()->getValue();
7187     APInt WideConst = (ExtType == Instruction::ZExt)
7188                           ? NarrowConst.zext(RegWidth)
7189                           : NarrowConst.sext(RegWidth);
7190     Case.setValue(ConstantInt::get(Context, WideConst));
7191   }
7192 
7193   return true;
7194 }
7195 
optimizeSwitchPhiConstants(SwitchInst * SI)7196 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
7197   // The SCCP optimization tends to produce code like this:
7198   //   switch(x) { case 42: phi(42, ...) }
7199   // Materializing the constant for the phi-argument needs instructions; So we
7200   // change the code to:
7201   //   switch(x) { case 42: phi(x, ...) }
7202 
7203   Value *Condition = SI->getCondition();
7204   // Avoid endless loop in degenerate case.
7205   if (isa<ConstantInt>(*Condition))
7206     return false;
7207 
7208   bool Changed = false;
7209   BasicBlock *SwitchBB = SI->getParent();
7210   Type *ConditionType = Condition->getType();
7211 
7212   for (const SwitchInst::CaseHandle &Case : SI->cases()) {
7213     ConstantInt *CaseValue = Case.getCaseValue();
7214     BasicBlock *CaseBB = Case.getCaseSuccessor();
7215     // Set to true if we previously checked that `CaseBB` is only reached by
7216     // a single case from this switch.
7217     bool CheckedForSinglePred = false;
7218     for (PHINode &PHI : CaseBB->phis()) {
7219       Type *PHIType = PHI.getType();
7220       // If ZExt is free then we can also catch patterns like this:
7221       //   switch((i32)x) { case 42: phi((i64)42, ...); }
7222       // and replace `(i64)42` with `zext i32 %x to i64`.
7223       bool TryZExt =
7224           PHIType->isIntegerTy() &&
7225           PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
7226           TLI->isZExtFree(ConditionType, PHIType);
7227       if (PHIType == ConditionType || TryZExt) {
7228         // Set to true to skip this case because of multiple preds.
7229         bool SkipCase = false;
7230         Value *Replacement = nullptr;
7231         for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
7232           Value *PHIValue = PHI.getIncomingValue(I);
7233           if (PHIValue != CaseValue) {
7234             if (!TryZExt)
7235               continue;
7236             ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
7237             if (!PHIValueInt ||
7238                 PHIValueInt->getValue() !=
7239                     CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
7240               continue;
7241           }
7242           if (PHI.getIncomingBlock(I) != SwitchBB)
7243             continue;
7244           // We cannot optimize if there are multiple case labels jumping to
7245           // this block.  This check may get expensive when there are many
7246           // case labels so we test for it last.
7247           if (!CheckedForSinglePred) {
7248             CheckedForSinglePred = true;
7249             if (SI->findCaseDest(CaseBB) == nullptr) {
7250               SkipCase = true;
7251               break;
7252             }
7253           }
7254 
7255           if (Replacement == nullptr) {
7256             if (PHIValue == CaseValue) {
7257               Replacement = Condition;
7258             } else {
7259               IRBuilder<> Builder(SI);
7260               Replacement = Builder.CreateZExt(Condition, PHIType);
7261             }
7262           }
7263           PHI.setIncomingValue(I, Replacement);
7264           Changed = true;
7265         }
7266         if (SkipCase)
7267           break;
7268       }
7269     }
7270   }
7271   return Changed;
7272 }
7273 
optimizeSwitchInst(SwitchInst * SI)7274 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
7275   bool Changed = optimizeSwitchType(SI);
7276   Changed |= optimizeSwitchPhiConstants(SI);
7277   return Changed;
7278 }
7279 
7280 namespace {
7281 
7282 /// Helper class to promote a scalar operation to a vector one.
7283 /// This class is used to move downward extractelement transition.
7284 /// E.g.,
7285 /// a = vector_op <2 x i32>
7286 /// b = extractelement <2 x i32> a, i32 0
7287 /// c = scalar_op b
7288 /// store c
7289 ///
7290 /// =>
7291 /// a = vector_op <2 x i32>
7292 /// c = vector_op a (equivalent to scalar_op on the related lane)
7293 /// * d = extractelement <2 x i32> c, i32 0
7294 /// * store d
7295 /// Assuming both extractelement and store can be combine, we get rid of the
7296 /// transition.
7297 class VectorPromoteHelper {
7298   /// DataLayout associated with the current module.
7299   const DataLayout &DL;
7300 
7301   /// Used to perform some checks on the legality of vector operations.
7302   const TargetLowering &TLI;
7303 
7304   /// Used to estimated the cost of the promoted chain.
7305   const TargetTransformInfo &TTI;
7306 
7307   /// The transition being moved downwards.
7308   Instruction *Transition;
7309 
7310   /// The sequence of instructions to be promoted.
7311   SmallVector<Instruction *, 4> InstsToBePromoted;
7312 
7313   /// Cost of combining a store and an extract.
7314   unsigned StoreExtractCombineCost;
7315 
7316   /// Instruction that will be combined with the transition.
7317   Instruction *CombineInst = nullptr;
7318 
7319   /// The instruction that represents the current end of the transition.
7320   /// Since we are faking the promotion until we reach the end of the chain
7321   /// of computation, we need a way to get the current end of the transition.
getEndOfTransition() const7322   Instruction *getEndOfTransition() const {
7323     if (InstsToBePromoted.empty())
7324       return Transition;
7325     return InstsToBePromoted.back();
7326   }
7327 
7328   /// Return the index of the original value in the transition.
7329   /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7330   /// c, is at index 0.
getTransitionOriginalValueIdx() const7331   unsigned getTransitionOriginalValueIdx() const {
7332     assert(isa<ExtractElementInst>(Transition) &&
7333            "Other kind of transitions are not supported yet");
7334     return 0;
7335   }
7336 
7337   /// Return the index of the index in the transition.
7338   /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7339   /// is at index 1.
getTransitionIdx() const7340   unsigned getTransitionIdx() const {
7341     assert(isa<ExtractElementInst>(Transition) &&
7342            "Other kind of transitions are not supported yet");
7343     return 1;
7344   }
7345 
7346   /// Get the type of the transition.
7347   /// This is the type of the original value.
7348   /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7349   /// transition is <2 x i32>.
getTransitionType() const7350   Type *getTransitionType() const {
7351     return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
7352   }
7353 
7354   /// Promote \p ToBePromoted by moving \p Def downward through.
7355   /// I.e., we have the following sequence:
7356   /// Def = Transition <ty1> a to <ty2>
7357   /// b = ToBePromoted <ty2> Def, ...
7358   /// =>
7359   /// b = ToBePromoted <ty1> a, ...
7360   /// Def = Transition <ty1> ToBePromoted to <ty2>
7361   void promoteImpl(Instruction *ToBePromoted);
7362 
7363   /// Check whether or not it is profitable to promote all the
7364   /// instructions enqueued to be promoted.
isProfitableToPromote()7365   bool isProfitableToPromote() {
7366     Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
7367     unsigned Index = isa<ConstantInt>(ValIdx)
7368                          ? cast<ConstantInt>(ValIdx)->getZExtValue()
7369                          : -1;
7370     Type *PromotedType = getTransitionType();
7371 
7372     StoreInst *ST = cast<StoreInst>(CombineInst);
7373     unsigned AS = ST->getPointerAddressSpace();
7374     // Check if this store is supported.
7375     if (!TLI.allowsMisalignedMemoryAccesses(
7376             TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
7377             ST->getAlign())) {
7378       // If this is not supported, there is no way we can combine
7379       // the extract with the store.
7380       return false;
7381     }
7382 
7383     // The scalar chain of computation has to pay for the transition
7384     // scalar to vector.
7385     // The vector chain has to account for the combining cost.
7386     enum TargetTransformInfo::TargetCostKind CostKind =
7387         TargetTransformInfo::TCK_RecipThroughput;
7388     InstructionCost ScalarCost =
7389         TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
7390     InstructionCost VectorCost = StoreExtractCombineCost;
7391     for (const auto &Inst : InstsToBePromoted) {
7392       // Compute the cost.
7393       // By construction, all instructions being promoted are arithmetic ones.
7394       // Moreover, one argument is a constant that can be viewed as a splat
7395       // constant.
7396       Value *Arg0 = Inst->getOperand(0);
7397       bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
7398                             isa<ConstantFP>(Arg0);
7399       TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
7400       if (IsArg0Constant)
7401         Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7402       else
7403         Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7404 
7405       ScalarCost += TTI.getArithmeticInstrCost(
7406           Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
7407       VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
7408                                                CostKind, Arg0Info, Arg1Info);
7409     }
7410     LLVM_DEBUG(
7411         dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7412                << ScalarCost << "\nVector: " << VectorCost << '\n');
7413     return ScalarCost > VectorCost;
7414   }
7415 
7416   /// Generate a constant vector with \p Val with the same
7417   /// number of elements as the transition.
7418   /// \p UseSplat defines whether or not \p Val should be replicated
7419   /// across the whole vector.
7420   /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7421   /// otherwise we generate a vector with as many undef as possible:
7422   /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7423   /// used at the index of the extract.
getConstantVector(Constant * Val,bool UseSplat) const7424   Value *getConstantVector(Constant *Val, bool UseSplat) const {
7425     unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
7426     if (!UseSplat) {
7427       // If we cannot determine where the constant must be, we have to
7428       // use a splat constant.
7429       Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
7430       if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
7431         ExtractIdx = CstVal->getSExtValue();
7432       else
7433         UseSplat = true;
7434     }
7435 
7436     ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7437     if (UseSplat)
7438       return ConstantVector::getSplat(EC, Val);
7439 
7440     if (!EC.isScalable()) {
7441       SmallVector<Constant *, 4> ConstVec;
7442       UndefValue *UndefVal = UndefValue::get(Val->getType());
7443       for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7444         if (Idx == ExtractIdx)
7445           ConstVec.push_back(Val);
7446         else
7447           ConstVec.push_back(UndefVal);
7448       }
7449       return ConstantVector::get(ConstVec);
7450     } else
7451       llvm_unreachable(
7452           "Generate scalable vector for non-splat is unimplemented");
7453   }
7454 
7455   /// Check if promoting to a vector type an operand at \p OperandIdx
7456   /// in \p Use can trigger undefined behavior.
canCauseUndefinedBehavior(const Instruction * Use,unsigned OperandIdx)7457   static bool canCauseUndefinedBehavior(const Instruction *Use,
7458                                         unsigned OperandIdx) {
7459     // This is not safe to introduce undef when the operand is on
7460     // the right hand side of a division-like instruction.
7461     if (OperandIdx != 1)
7462       return false;
7463     switch (Use->getOpcode()) {
7464     default:
7465       return false;
7466     case Instruction::SDiv:
7467     case Instruction::UDiv:
7468     case Instruction::SRem:
7469     case Instruction::URem:
7470       return true;
7471     case Instruction::FDiv:
7472     case Instruction::FRem:
7473       return !Use->hasNoNaNs();
7474     }
7475     llvm_unreachable(nullptr);
7476   }
7477 
7478 public:
VectorPromoteHelper(const DataLayout & DL,const TargetLowering & TLI,const TargetTransformInfo & TTI,Instruction * Transition,unsigned CombineCost)7479   VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
7480                       const TargetTransformInfo &TTI, Instruction *Transition,
7481                       unsigned CombineCost)
7482       : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
7483         StoreExtractCombineCost(CombineCost) {
7484     assert(Transition && "Do not know how to promote null");
7485   }
7486 
7487   /// Check if we can promote \p ToBePromoted to \p Type.
canPromote(const Instruction * ToBePromoted) const7488   bool canPromote(const Instruction *ToBePromoted) const {
7489     // We could support CastInst too.
7490     return isa<BinaryOperator>(ToBePromoted);
7491   }
7492 
7493   /// Check if it is profitable to promote \p ToBePromoted
7494   /// by moving downward the transition through.
shouldPromote(const Instruction * ToBePromoted) const7495   bool shouldPromote(const Instruction *ToBePromoted) const {
7496     // Promote only if all the operands can be statically expanded.
7497     // Indeed, we do not want to introduce any new kind of transitions.
7498     for (const Use &U : ToBePromoted->operands()) {
7499       const Value *Val = U.get();
7500       if (Val == getEndOfTransition()) {
7501         // If the use is a division and the transition is on the rhs,
7502         // we cannot promote the operation, otherwise we may create a
7503         // division by zero.
7504         if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
7505           return false;
7506         continue;
7507       }
7508       if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
7509           !isa<ConstantFP>(Val))
7510         return false;
7511     }
7512     // Check that the resulting operation is legal.
7513     int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
7514     if (!ISDOpcode)
7515       return false;
7516     return StressStoreExtract ||
7517            TLI.isOperationLegalOrCustom(
7518                ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
7519   }
7520 
7521   /// Check whether or not \p Use can be combined
7522   /// with the transition.
7523   /// I.e., is it possible to do Use(Transition) => AnotherUse?
canCombine(const Instruction * Use)7524   bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
7525 
7526   /// Record \p ToBePromoted as part of the chain to be promoted.
enqueueForPromotion(Instruction * ToBePromoted)7527   void enqueueForPromotion(Instruction *ToBePromoted) {
7528     InstsToBePromoted.push_back(ToBePromoted);
7529   }
7530 
7531   /// Set the instruction that will be combined with the transition.
recordCombineInstruction(Instruction * ToBeCombined)7532   void recordCombineInstruction(Instruction *ToBeCombined) {
7533     assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
7534     CombineInst = ToBeCombined;
7535   }
7536 
7537   /// Promote all the instructions enqueued for promotion if it is
7538   /// is profitable.
7539   /// \return True if the promotion happened, false otherwise.
promote()7540   bool promote() {
7541     // Check if there is something to promote.
7542     // Right now, if we do not have anything to combine with,
7543     // we assume the promotion is not profitable.
7544     if (InstsToBePromoted.empty() || !CombineInst)
7545       return false;
7546 
7547     // Check cost.
7548     if (!StressStoreExtract && !isProfitableToPromote())
7549       return false;
7550 
7551     // Promote.
7552     for (auto &ToBePromoted : InstsToBePromoted)
7553       promoteImpl(ToBePromoted);
7554     InstsToBePromoted.clear();
7555     return true;
7556   }
7557 };
7558 
7559 } // end anonymous namespace
7560 
promoteImpl(Instruction * ToBePromoted)7561 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
7562   // At this point, we know that all the operands of ToBePromoted but Def
7563   // can be statically promoted.
7564   // For Def, we need to use its parameter in ToBePromoted:
7565   // b = ToBePromoted ty1 a
7566   // Def = Transition ty1 b to ty2
7567   // Move the transition down.
7568   // 1. Replace all uses of the promoted operation by the transition.
7569   // = ... b => = ... Def.
7570   assert(ToBePromoted->getType() == Transition->getType() &&
7571          "The type of the result of the transition does not match "
7572          "the final type");
7573   ToBePromoted->replaceAllUsesWith(Transition);
7574   // 2. Update the type of the uses.
7575   // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7576   Type *TransitionTy = getTransitionType();
7577   ToBePromoted->mutateType(TransitionTy);
7578   // 3. Update all the operands of the promoted operation with promoted
7579   // operands.
7580   // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7581   for (Use &U : ToBePromoted->operands()) {
7582     Value *Val = U.get();
7583     Value *NewVal = nullptr;
7584     if (Val == Transition)
7585       NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
7586     else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
7587              isa<ConstantFP>(Val)) {
7588       // Use a splat constant if it is not safe to use undef.
7589       NewVal = getConstantVector(
7590           cast<Constant>(Val),
7591           isa<UndefValue>(Val) ||
7592               canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
7593     } else
7594       llvm_unreachable("Did you modified shouldPromote and forgot to update "
7595                        "this?");
7596     ToBePromoted->setOperand(U.getOperandNo(), NewVal);
7597   }
7598   Transition->moveAfter(ToBePromoted);
7599   Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
7600 }
7601 
7602 /// Some targets can do store(extractelement) with one instruction.
7603 /// Try to push the extractelement towards the stores when the target
7604 /// has this feature and this is profitable.
optimizeExtractElementInst(Instruction * Inst)7605 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
7606   unsigned CombineCost = std::numeric_limits<unsigned>::max();
7607   if (DisableStoreExtract ||
7608       (!StressStoreExtract &&
7609        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
7610                                        Inst->getOperand(1), CombineCost)))
7611     return false;
7612 
7613   // At this point we know that Inst is a vector to scalar transition.
7614   // Try to move it down the def-use chain, until:
7615   // - We can combine the transition with its single use
7616   //   => we got rid of the transition.
7617   // - We escape the current basic block
7618   //   => we would need to check that we are moving it at a cheaper place and
7619   //      we do not do that for now.
7620   BasicBlock *Parent = Inst->getParent();
7621   LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
7622   VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
7623   // If the transition has more than one use, assume this is not going to be
7624   // beneficial.
7625   while (Inst->hasOneUse()) {
7626     Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
7627     LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
7628 
7629     if (ToBePromoted->getParent() != Parent) {
7630       LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7631                         << ToBePromoted->getParent()->getName()
7632                         << ") than the transition (" << Parent->getName()
7633                         << ").\n");
7634       return false;
7635     }
7636 
7637     if (VPH.canCombine(ToBePromoted)) {
7638       LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
7639                         << "will be combined with: " << *ToBePromoted << '\n');
7640       VPH.recordCombineInstruction(ToBePromoted);
7641       bool Changed = VPH.promote();
7642       NumStoreExtractExposed += Changed;
7643       return Changed;
7644     }
7645 
7646     LLVM_DEBUG(dbgs() << "Try promoting.\n");
7647     if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
7648       return false;
7649 
7650     LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7651 
7652     VPH.enqueueForPromotion(ToBePromoted);
7653     Inst = ToBePromoted;
7654   }
7655   return false;
7656 }
7657 
7658 /// For the instruction sequence of store below, F and I values
7659 /// are bundled together as an i64 value before being stored into memory.
7660 /// Sometimes it is more efficient to generate separate stores for F and I,
7661 /// which can remove the bitwise instructions or sink them to colder places.
7662 ///
7663 ///   (store (or (zext (bitcast F to i32) to i64),
7664 ///              (shl (zext I to i64), 32)), addr)  -->
7665 ///   (store F, addr) and (store I, addr+4)
7666 ///
7667 /// Similarly, splitting for other merged store can also be beneficial, like:
7668 /// For pair of {i32, i32}, i64 store --> two i32 stores.
7669 /// For pair of {i32, i16}, i64 store --> two i32 stores.
7670 /// For pair of {i16, i16}, i32 store --> two i16 stores.
7671 /// For pair of {i16, i8},  i32 store --> two i16 stores.
7672 /// For pair of {i8, i8},   i16 store --> two i8 stores.
7673 ///
7674 /// We allow each target to determine specifically which kind of splitting is
7675 /// supported.
7676 ///
7677 /// The store patterns are commonly seen from the simple code snippet below
7678 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7679 ///   void goo(const std::pair<int, float> &);
7680 ///   hoo() {
7681 ///     ...
7682 ///     goo(std::make_pair(tmp, ftmp));
7683 ///     ...
7684 ///   }
7685 ///
7686 /// Although we already have similar splitting in DAG Combine, we duplicate
7687 /// it in CodeGenPrepare to catch the case in which pattern is across
7688 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7689 /// during code expansion.
splitMergedValStore(StoreInst & SI,const DataLayout & DL,const TargetLowering & TLI)7690 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
7691                                 const TargetLowering &TLI) {
7692   // Handle simple but common cases only.
7693   Type *StoreType = SI.getValueOperand()->getType();
7694 
7695   // The code below assumes shifting a value by <number of bits>,
7696   // whereas scalable vectors would have to be shifted by
7697   // <2log(vscale) + number of bits> in order to store the
7698   // low/high parts. Bailing out for now.
7699   if (isa<ScalableVectorType>(StoreType))
7700     return false;
7701 
7702   if (!DL.typeSizeEqualsStoreSize(StoreType) ||
7703       DL.getTypeSizeInBits(StoreType) == 0)
7704     return false;
7705 
7706   unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
7707   Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
7708   if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
7709     return false;
7710 
7711   // Don't split the store if it is volatile.
7712   if (SI.isVolatile())
7713     return false;
7714 
7715   // Match the following patterns:
7716   // (store (or (zext LValue to i64),
7717   //            (shl (zext HValue to i64), 32)), HalfValBitSize)
7718   //  or
7719   // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7720   //            (zext LValue to i64),
7721   // Expect both operands of OR and the first operand of SHL have only
7722   // one use.
7723   Value *LValue, *HValue;
7724   if (!match(SI.getValueOperand(),
7725              m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
7726                     m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
7727                                    m_SpecificInt(HalfValBitSize))))))
7728     return false;
7729 
7730   // Check LValue and HValue are int with size less or equal than 32.
7731   if (!LValue->getType()->isIntegerTy() ||
7732       DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
7733       !HValue->getType()->isIntegerTy() ||
7734       DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
7735     return false;
7736 
7737   // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
7738   // as the input of target query.
7739   auto *LBC = dyn_cast<BitCastInst>(LValue);
7740   auto *HBC = dyn_cast<BitCastInst>(HValue);
7741   EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
7742                   : EVT::getEVT(LValue->getType());
7743   EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
7744                    : EVT::getEVT(HValue->getType());
7745   if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
7746     return false;
7747 
7748   // Start to split store.
7749   IRBuilder<> Builder(SI.getContext());
7750   Builder.SetInsertPoint(&SI);
7751 
7752   // If LValue/HValue is a bitcast in another BB, create a new one in current
7753   // BB so it may be merged with the splitted stores by dag combiner.
7754   if (LBC && LBC->getParent() != SI.getParent())
7755     LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
7756   if (HBC && HBC->getParent() != SI.getParent())
7757     HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
7758 
7759   bool IsLE = SI.getModule()->getDataLayout().isLittleEndian();
7760   auto CreateSplitStore = [&](Value *V, bool Upper) {
7761     V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
7762     Value *Addr = Builder.CreateBitCast(
7763         SI.getOperand(1),
7764         SplitStoreType->getPointerTo(SI.getPointerAddressSpace()));
7765     Align Alignment = SI.getAlign();
7766     const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
7767     if (IsOffsetStore) {
7768       Addr = Builder.CreateGEP(
7769           SplitStoreType, Addr,
7770           ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
7771 
7772       // When splitting the store in half, naturally one half will retain the
7773       // alignment of the original wider store, regardless of whether it was
7774       // over-aligned or not, while the other will require adjustment.
7775       Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
7776     }
7777     Builder.CreateAlignedStore(V, Addr, Alignment);
7778   };
7779 
7780   CreateSplitStore(LValue, false);
7781   CreateSplitStore(HValue, true);
7782 
7783   // Delete the old store.
7784   SI.eraseFromParent();
7785   return true;
7786 }
7787 
7788 // Return true if the GEP has two operands, the first operand is of a sequential
7789 // type, and the second operand is a constant.
GEPSequentialConstIndexed(GetElementPtrInst * GEP)7790 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
7791   gep_type_iterator I = gep_type_begin(*GEP);
7792   return GEP->getNumOperands() == 2 && I.isSequential() &&
7793          isa<ConstantInt>(GEP->getOperand(1));
7794 }
7795 
7796 // Try unmerging GEPs to reduce liveness interference (register pressure) across
7797 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
7798 // reducing liveness interference across those edges benefits global register
7799 // allocation. Currently handles only certain cases.
7800 //
7801 // For example, unmerge %GEPI and %UGEPI as below.
7802 //
7803 // ---------- BEFORE ----------
7804 // SrcBlock:
7805 //   ...
7806 //   %GEPIOp = ...
7807 //   ...
7808 //   %GEPI = gep %GEPIOp, Idx
7809 //   ...
7810 //   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
7811 //   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
7812 //   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
7813 //   %UGEPI)
7814 //
7815 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
7816 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
7817 // ...
7818 //
7819 // DstBi:
7820 //   ...
7821 //   %UGEPI = gep %GEPIOp, UIdx
7822 // ...
7823 // ---------------------------
7824 //
7825 // ---------- AFTER ----------
7826 // SrcBlock:
7827 //   ... (same as above)
7828 //    (* %GEPI is still alive on the indirectbr edges)
7829 //    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
7830 //    unmerging)
7831 // ...
7832 //
7833 // DstBi:
7834 //   ...
7835 //   %UGEPI = gep %GEPI, (UIdx-Idx)
7836 //   ...
7837 // ---------------------------
7838 //
7839 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
7840 // no longer alive on them.
7841 //
7842 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
7843 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
7844 // not to disable further simplications and optimizations as a result of GEP
7845 // merging.
7846 //
7847 // Note this unmerging may increase the length of the data flow critical path
7848 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
7849 // between the register pressure and the length of data-flow critical
7850 // path. Restricting this to the uncommon IndirectBr case would minimize the
7851 // impact of potentially longer critical path, if any, and the impact on compile
7852 // time.
tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst * GEPI,const TargetTransformInfo * TTI)7853 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
7854                                              const TargetTransformInfo *TTI) {
7855   BasicBlock *SrcBlock = GEPI->getParent();
7856   // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
7857   // (non-IndirectBr) cases exit early here.
7858   if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
7859     return false;
7860   // Check that GEPI is a simple gep with a single constant index.
7861   if (!GEPSequentialConstIndexed(GEPI))
7862     return false;
7863   ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
7864   // Check that GEPI is a cheap one.
7865   if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
7866                          TargetTransformInfo::TCK_SizeAndLatency) >
7867       TargetTransformInfo::TCC_Basic)
7868     return false;
7869   Value *GEPIOp = GEPI->getOperand(0);
7870   // Check that GEPIOp is an instruction that's also defined in SrcBlock.
7871   if (!isa<Instruction>(GEPIOp))
7872     return false;
7873   auto *GEPIOpI = cast<Instruction>(GEPIOp);
7874   if (GEPIOpI->getParent() != SrcBlock)
7875     return false;
7876   // Check that GEP is used outside the block, meaning it's alive on the
7877   // IndirectBr edge(s).
7878   if (llvm::none_of(GEPI->users(), [&](User *Usr) {
7879         if (auto *I = dyn_cast<Instruction>(Usr)) {
7880           if (I->getParent() != SrcBlock) {
7881             return true;
7882           }
7883         }
7884         return false;
7885       }))
7886     return false;
7887   // The second elements of the GEP chains to be unmerged.
7888   std::vector<GetElementPtrInst *> UGEPIs;
7889   // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
7890   // on IndirectBr edges.
7891   for (User *Usr : GEPIOp->users()) {
7892     if (Usr == GEPI)
7893       continue;
7894     // Check if Usr is an Instruction. If not, give up.
7895     if (!isa<Instruction>(Usr))
7896       return false;
7897     auto *UI = cast<Instruction>(Usr);
7898     // Check if Usr in the same block as GEPIOp, which is fine, skip.
7899     if (UI->getParent() == SrcBlock)
7900       continue;
7901     // Check if Usr is a GEP. If not, give up.
7902     if (!isa<GetElementPtrInst>(Usr))
7903       return false;
7904     auto *UGEPI = cast<GetElementPtrInst>(Usr);
7905     // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
7906     // the pointer operand to it. If so, record it in the vector. If not, give
7907     // up.
7908     if (!GEPSequentialConstIndexed(UGEPI))
7909       return false;
7910     if (UGEPI->getOperand(0) != GEPIOp)
7911       return false;
7912     if (GEPIIdx->getType() !=
7913         cast<ConstantInt>(UGEPI->getOperand(1))->getType())
7914       return false;
7915     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7916     if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
7917                            TargetTransformInfo::TCK_SizeAndLatency) >
7918         TargetTransformInfo::TCC_Basic)
7919       return false;
7920     UGEPIs.push_back(UGEPI);
7921   }
7922   if (UGEPIs.size() == 0)
7923     return false;
7924   // Check the materializing cost of (Uidx-Idx).
7925   for (GetElementPtrInst *UGEPI : UGEPIs) {
7926     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7927     APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
7928     InstructionCost ImmCost = TTI->getIntImmCost(
7929         NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
7930     if (ImmCost > TargetTransformInfo::TCC_Basic)
7931       return false;
7932   }
7933   // Now unmerge between GEPI and UGEPIs.
7934   for (GetElementPtrInst *UGEPI : UGEPIs) {
7935     UGEPI->setOperand(0, GEPI);
7936     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7937     Constant *NewUGEPIIdx = ConstantInt::get(
7938         GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
7939     UGEPI->setOperand(1, NewUGEPIIdx);
7940     // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
7941     // inbounds to avoid UB.
7942     if (!GEPI->isInBounds()) {
7943       UGEPI->setIsInBounds(false);
7944     }
7945   }
7946   // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
7947   // alive on IndirectBr edges).
7948   assert(llvm::none_of(GEPIOp->users(),
7949                        [&](User *Usr) {
7950                          return cast<Instruction>(Usr)->getParent() != SrcBlock;
7951                        }) &&
7952          "GEPIOp is used outside SrcBlock");
7953   return true;
7954 }
7955 
optimizeBranch(BranchInst * Branch,const TargetLowering & TLI,SmallSet<BasicBlock *,32> & FreshBBs,bool IsHugeFunc)7956 static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
7957                            SmallSet<BasicBlock *, 32> &FreshBBs,
7958                            bool IsHugeFunc) {
7959   // Try and convert
7960   //  %c = icmp ult %x, 8
7961   //  br %c, bla, blb
7962   //  %tc = lshr %x, 3
7963   // to
7964   //  %tc = lshr %x, 3
7965   //  %c = icmp eq %tc, 0
7966   //  br %c, bla, blb
7967   // Creating the cmp to zero can be better for the backend, especially if the
7968   // lshr produces flags that can be used automatically.
7969   if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
7970     return false;
7971 
7972   ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
7973   if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
7974     return false;
7975 
7976   Value *X = Cmp->getOperand(0);
7977   APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
7978 
7979   for (auto *U : X->users()) {
7980     Instruction *UI = dyn_cast<Instruction>(U);
7981     // A quick dominance check
7982     if (!UI ||
7983         (UI->getParent() != Branch->getParent() &&
7984          UI->getParent() != Branch->getSuccessor(0) &&
7985          UI->getParent() != Branch->getSuccessor(1)) ||
7986         (UI->getParent() != Branch->getParent() &&
7987          !UI->getParent()->getSinglePredecessor()))
7988       continue;
7989 
7990     if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
7991         match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
7992       IRBuilder<> Builder(Branch);
7993       if (UI->getParent() != Branch->getParent())
7994         UI->moveBefore(Branch);
7995       Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
7996                                         ConstantInt::get(UI->getType(), 0));
7997       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
7998       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
7999       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8000       return true;
8001     }
8002     if (Cmp->isEquality() &&
8003         (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8004          match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) {
8005       IRBuilder<> Builder(Branch);
8006       if (UI->getParent() != Branch->getParent())
8007         UI->moveBefore(Branch);
8008       Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8009                                         ConstantInt::get(UI->getType(), 0));
8010       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8011       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8012       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8013       return true;
8014     }
8015   }
8016   return false;
8017 }
8018 
optimizeInst(Instruction * I,ModifyDT & ModifiedDT)8019 bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8020   // Bail out if we inserted the instruction to prevent optimizations from
8021   // stepping on each other's toes.
8022   if (InsertedInsts.count(I))
8023     return false;
8024 
8025   // TODO: Move into the switch on opcode below here.
8026   if (PHINode *P = dyn_cast<PHINode>(I)) {
8027     // It is possible for very late stage optimizations (such as SimplifyCFG)
8028     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
8029     // trivial PHI, go ahead and zap it here.
8030     if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8031       LargeOffsetGEPMap.erase(P);
8032       replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8033       P->eraseFromParent();
8034       ++NumPHIsElim;
8035       return true;
8036     }
8037     return false;
8038   }
8039 
8040   if (CastInst *CI = dyn_cast<CastInst>(I)) {
8041     // If the source of the cast is a constant, then this should have
8042     // already been constant folded.  The only reason NOT to constant fold
8043     // it is if something (e.g. LSR) was careful to place the constant
8044     // evaluation in a block other than then one that uses it (e.g. to hoist
8045     // the address of globals out of a loop).  If this is the case, we don't
8046     // want to forward-subst the cast.
8047     if (isa<Constant>(CI->getOperand(0)))
8048       return false;
8049 
8050     if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8051       return true;
8052 
8053     if ((isa<UIToFPInst>(I) || isa<FPToUIInst>(I) || isa<TruncInst>(I)) &&
8054         TLI->optimizeExtendOrTruncateConversion(I,
8055                                                 LI->getLoopFor(I->getParent())))
8056       return true;
8057 
8058     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8059       /// Sink a zext or sext into its user blocks if the target type doesn't
8060       /// fit in one register
8061       if (TLI->getTypeAction(CI->getContext(),
8062                              TLI->getValueType(*DL, CI->getType())) ==
8063           TargetLowering::TypeExpandInteger) {
8064         return SinkCast(CI);
8065       } else {
8066         if (TLI->optimizeExtendOrTruncateConversion(
8067                 I, LI->getLoopFor(I->getParent())))
8068           return true;
8069 
8070         bool MadeChange = optimizeExt(I);
8071         return MadeChange | optimizeExtUses(I);
8072       }
8073     }
8074     return false;
8075   }
8076 
8077   if (auto *Cmp = dyn_cast<CmpInst>(I))
8078     if (optimizeCmp(Cmp, ModifiedDT))
8079       return true;
8080 
8081   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8082     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8083     bool Modified = optimizeLoadExt(LI);
8084     unsigned AS = LI->getPointerAddressSpace();
8085     Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8086     return Modified;
8087   }
8088 
8089   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8090     if (splitMergedValStore(*SI, *DL, *TLI))
8091       return true;
8092     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8093     unsigned AS = SI->getPointerAddressSpace();
8094     return optimizeMemoryInst(I, SI->getOperand(1),
8095                               SI->getOperand(0)->getType(), AS);
8096   }
8097 
8098   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8099     unsigned AS = RMW->getPointerAddressSpace();
8100     return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8101   }
8102 
8103   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8104     unsigned AS = CmpX->getPointerAddressSpace();
8105     return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8106                               CmpX->getCompareOperand()->getType(), AS);
8107   }
8108 
8109   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8110 
8111   if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8112       sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8113     return true;
8114 
8115   // TODO: Move this into the switch on opcode - it handles shifts already.
8116   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8117                 BinOp->getOpcode() == Instruction::LShr)) {
8118     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8119     if (CI && TLI->hasExtractBitsInsn())
8120       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8121         return true;
8122   }
8123 
8124   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8125     if (GEPI->hasAllZeroIndices()) {
8126       /// The GEP operand must be a pointer, so must its result -> BitCast
8127       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8128                                         GEPI->getName(), GEPI);
8129       NC->setDebugLoc(GEPI->getDebugLoc());
8130       replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8131       GEPI->eraseFromParent();
8132       ++NumGEPsElim;
8133       optimizeInst(NC, ModifiedDT);
8134       return true;
8135     }
8136     if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
8137       return true;
8138     }
8139     return false;
8140   }
8141 
8142   if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
8143     // freeze(icmp a, const)) -> icmp (freeze a), const
8144     // This helps generate efficient conditional jumps.
8145     Instruction *CmpI = nullptr;
8146     if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
8147       CmpI = II;
8148     else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
8149       CmpI = F->getFastMathFlags().none() ? F : nullptr;
8150 
8151     if (CmpI && CmpI->hasOneUse()) {
8152       auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
8153       bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
8154                     isa<ConstantPointerNull>(Op0);
8155       bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
8156                     isa<ConstantPointerNull>(Op1);
8157       if (Const0 || Const1) {
8158         if (!Const0 || !Const1) {
8159           auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI);
8160           F->takeName(FI);
8161           CmpI->setOperand(Const0 ? 1 : 0, F);
8162         }
8163         replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
8164         FI->eraseFromParent();
8165         return true;
8166       }
8167     }
8168     return false;
8169   }
8170 
8171   if (tryToSinkFreeOperands(I))
8172     return true;
8173 
8174   switch (I->getOpcode()) {
8175   case Instruction::Shl:
8176   case Instruction::LShr:
8177   case Instruction::AShr:
8178     return optimizeShiftInst(cast<BinaryOperator>(I));
8179   case Instruction::Call:
8180     return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
8181   case Instruction::Select:
8182     return optimizeSelectInst(cast<SelectInst>(I));
8183   case Instruction::ShuffleVector:
8184     return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
8185   case Instruction::Switch:
8186     return optimizeSwitchInst(cast<SwitchInst>(I));
8187   case Instruction::ExtractElement:
8188     return optimizeExtractElementInst(cast<ExtractElementInst>(I));
8189   case Instruction::Br:
8190     return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
8191   }
8192 
8193   return false;
8194 }
8195 
8196 /// Given an OR instruction, check to see if this is a bitreverse
8197 /// idiom. If so, insert the new intrinsic and return true.
makeBitReverse(Instruction & I)8198 bool CodeGenPrepare::makeBitReverse(Instruction &I) {
8199   if (!I.getType()->isIntegerTy() ||
8200       !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
8201                                      TLI->getValueType(*DL, I.getType(), true)))
8202     return false;
8203 
8204   SmallVector<Instruction *, 4> Insts;
8205   if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
8206     return false;
8207   Instruction *LastInst = Insts.back();
8208   replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
8209   RecursivelyDeleteTriviallyDeadInstructions(
8210       &I, TLInfo, nullptr,
8211       [&](Value *V) { removeAllAssertingVHReferences(V); });
8212   return true;
8213 }
8214 
8215 // In this pass we look for GEP and cast instructions that are used
8216 // across basic blocks and rewrite them to improve basic-block-at-a-time
8217 // selection.
optimizeBlock(BasicBlock & BB,ModifyDT & ModifiedDT)8218 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
8219   SunkAddrs.clear();
8220   bool MadeChange = false;
8221 
8222   do {
8223     CurInstIterator = BB.begin();
8224     ModifiedDT = ModifyDT::NotModifyDT;
8225     while (CurInstIterator != BB.end()) {
8226       MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
8227       if (ModifiedDT != ModifyDT::NotModifyDT) {
8228         // For huge function we tend to quickly go though the inner optmization
8229         // opportunities in the BB. So we go back to the BB head to re-optimize
8230         // each instruction instead of go back to the function head.
8231         if (IsHugeFunc) {
8232           DT.reset();
8233           getDT(*BB.getParent());
8234           break;
8235         } else {
8236           return true;
8237         }
8238       }
8239     }
8240   } while (ModifiedDT == ModifyDT::ModifyInstDT);
8241 
8242   bool MadeBitReverse = true;
8243   while (MadeBitReverse) {
8244     MadeBitReverse = false;
8245     for (auto &I : reverse(BB)) {
8246       if (makeBitReverse(I)) {
8247         MadeBitReverse = MadeChange = true;
8248         break;
8249       }
8250     }
8251   }
8252   MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
8253 
8254   return MadeChange;
8255 }
8256 
8257 // Some CGP optimizations may move or alter what's computed in a block. Check
8258 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
fixupDbgValue(Instruction * I)8259 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
8260   assert(isa<DbgValueInst>(I));
8261   DbgValueInst &DVI = *cast<DbgValueInst>(I);
8262 
8263   // Does this dbg.value refer to a sunk address calculation?
8264   bool AnyChange = false;
8265   SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(),
8266                                      DVI.location_ops().end());
8267   for (Value *Location : LocationOps) {
8268     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8269     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8270     if (SunkAddr) {
8271       // Point dbg.value at locally computed address, which should give the best
8272       // opportunity to be accurately lowered. This update may change the type
8273       // of pointer being referred to; however this makes no difference to
8274       // debugging information, and we can't generate bitcasts that may affect
8275       // codegen.
8276       DVI.replaceVariableLocationOp(Location, SunkAddr);
8277       AnyChange = true;
8278     }
8279   }
8280   return AnyChange;
8281 }
8282 
8283 // A llvm.dbg.value may be using a value before its definition, due to
8284 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8285 // them by moving the dbg.value to immediately after the value definition.
8286 // FIXME: Ideally this should never be necessary, and this has the potential
8287 // to re-order dbg.value intrinsics.
placeDbgValues(Function & F)8288 bool CodeGenPrepare::placeDbgValues(Function &F) {
8289   bool MadeChange = false;
8290   DominatorTree DT(F);
8291 
8292   for (BasicBlock &BB : F) {
8293     for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
8294       DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
8295       if (!DVI)
8296         continue;
8297 
8298       SmallVector<Instruction *, 4> VIs;
8299       for (Value *V : DVI->getValues())
8300         if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
8301           VIs.push_back(VI);
8302 
8303       // This DVI may depend on multiple instructions, complicating any
8304       // potential sink. This block takes the defensive approach, opting to
8305       // "undef" the DVI if it has more than one instruction and any of them do
8306       // not dominate DVI.
8307       for (Instruction *VI : VIs) {
8308         if (VI->isTerminator())
8309           continue;
8310 
8311         // If VI is a phi in a block with an EHPad terminator, we can't insert
8312         // after it.
8313         if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
8314           continue;
8315 
8316         // If the defining instruction dominates the dbg.value, we do not need
8317         // to move the dbg.value.
8318         if (DT.dominates(VI, DVI))
8319           continue;
8320 
8321         // If we depend on multiple instructions and any of them doesn't
8322         // dominate this DVI, we probably can't salvage it: moving it to
8323         // after any of the instructions could cause us to lose the others.
8324         if (VIs.size() > 1) {
8325           LLVM_DEBUG(
8326               dbgs()
8327               << "Unable to find valid location for Debug Value, undefing:\n"
8328               << *DVI);
8329           DVI->setKillLocation();
8330           break;
8331         }
8332 
8333         LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8334                           << *DVI << ' ' << *VI);
8335         DVI->removeFromParent();
8336         if (isa<PHINode>(VI))
8337           DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
8338         else
8339           DVI->insertAfter(VI);
8340         MadeChange = true;
8341         ++NumDbgValueMoved;
8342       }
8343     }
8344   }
8345   return MadeChange;
8346 }
8347 
8348 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8349 // probes can be chained dependencies of other regular DAG nodes and block DAG
8350 // combine optimizations.
placePseudoProbes(Function & F)8351 bool CodeGenPrepare::placePseudoProbes(Function &F) {
8352   bool MadeChange = false;
8353   for (auto &Block : F) {
8354     // Move the rest probes to the beginning of the block.
8355     auto FirstInst = Block.getFirstInsertionPt();
8356     while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
8357       ++FirstInst;
8358     BasicBlock::iterator I(FirstInst);
8359     I++;
8360     while (I != Block.end()) {
8361       if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
8362         II->moveBefore(&*FirstInst);
8363         MadeChange = true;
8364       }
8365     }
8366   }
8367   return MadeChange;
8368 }
8369 
8370 /// Scale down both weights to fit into uint32_t.
scaleWeights(uint64_t & NewTrue,uint64_t & NewFalse)8371 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
8372   uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
8373   uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
8374   NewTrue = NewTrue / Scale;
8375   NewFalse = NewFalse / Scale;
8376 }
8377 
8378 /// Some targets prefer to split a conditional branch like:
8379 /// \code
8380 ///   %0 = icmp ne i32 %a, 0
8381 ///   %1 = icmp ne i32 %b, 0
8382 ///   %or.cond = or i1 %0, %1
8383 ///   br i1 %or.cond, label %TrueBB, label %FalseBB
8384 /// \endcode
8385 /// into multiple branch instructions like:
8386 /// \code
8387 ///   bb1:
8388 ///     %0 = icmp ne i32 %a, 0
8389 ///     br i1 %0, label %TrueBB, label %bb2
8390 ///   bb2:
8391 ///     %1 = icmp ne i32 %b, 0
8392 ///     br i1 %1, label %TrueBB, label %FalseBB
8393 /// \endcode
8394 /// This usually allows instruction selection to do even further optimizations
8395 /// and combine the compare with the branch instruction. Currently this is
8396 /// applied for targets which have "cheap" jump instructions.
8397 ///
8398 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8399 ///
splitBranchCondition(Function & F,ModifyDT & ModifiedDT)8400 bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
8401   if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
8402     return false;
8403 
8404   bool MadeChange = false;
8405   for (auto &BB : F) {
8406     // Does this BB end with the following?
8407     //   %cond1 = icmp|fcmp|binary instruction ...
8408     //   %cond2 = icmp|fcmp|binary instruction ...
8409     //   %cond.or = or|and i1 %cond1, cond2
8410     //   br i1 %cond.or label %dest1, label %dest2"
8411     Instruction *LogicOp;
8412     BasicBlock *TBB, *FBB;
8413     if (!match(BB.getTerminator(),
8414                m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
8415       continue;
8416 
8417     auto *Br1 = cast<BranchInst>(BB.getTerminator());
8418     if (Br1->getMetadata(LLVMContext::MD_unpredictable))
8419       continue;
8420 
8421     // The merging of mostly empty BB can cause a degenerate branch.
8422     if (TBB == FBB)
8423       continue;
8424 
8425     unsigned Opc;
8426     Value *Cond1, *Cond2;
8427     if (match(LogicOp,
8428               m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
8429       Opc = Instruction::And;
8430     else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
8431                                         m_OneUse(m_Value(Cond2)))))
8432       Opc = Instruction::Or;
8433     else
8434       continue;
8435 
8436     auto IsGoodCond = [](Value *Cond) {
8437       return match(
8438           Cond,
8439           m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
8440                                            m_LogicalOr(m_Value(), m_Value()))));
8441     };
8442     if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
8443       continue;
8444 
8445     LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
8446 
8447     // Create a new BB.
8448     auto *TmpBB =
8449         BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
8450                            BB.getParent(), BB.getNextNode());
8451     if (IsHugeFunc)
8452       FreshBBs.insert(TmpBB);
8453 
8454     // Update original basic block by using the first condition directly by the
8455     // branch instruction and removing the no longer needed and/or instruction.
8456     Br1->setCondition(Cond1);
8457     LogicOp->eraseFromParent();
8458 
8459     // Depending on the condition we have to either replace the true or the
8460     // false successor of the original branch instruction.
8461     if (Opc == Instruction::And)
8462       Br1->setSuccessor(0, TmpBB);
8463     else
8464       Br1->setSuccessor(1, TmpBB);
8465 
8466     // Fill in the new basic block.
8467     auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
8468     if (auto *I = dyn_cast<Instruction>(Cond2)) {
8469       I->removeFromParent();
8470       I->insertBefore(Br2);
8471     }
8472 
8473     // Update PHI nodes in both successors. The original BB needs to be
8474     // replaced in one successor's PHI nodes, because the branch comes now from
8475     // the newly generated BB (NewBB). In the other successor we need to add one
8476     // incoming edge to the PHI nodes, because both branch instructions target
8477     // now the same successor. Depending on the original branch condition
8478     // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
8479     // we perform the correct update for the PHI nodes.
8480     // This doesn't change the successor order of the just created branch
8481     // instruction (or any other instruction).
8482     if (Opc == Instruction::Or)
8483       std::swap(TBB, FBB);
8484 
8485     // Replace the old BB with the new BB.
8486     TBB->replacePhiUsesWith(&BB, TmpBB);
8487 
8488     // Add another incoming edge from the new BB.
8489     for (PHINode &PN : FBB->phis()) {
8490       auto *Val = PN.getIncomingValueForBlock(&BB);
8491       PN.addIncoming(Val, TmpBB);
8492     }
8493 
8494     // Update the branch weights (from SelectionDAGBuilder::
8495     // FindMergedConditions).
8496     if (Opc == Instruction::Or) {
8497       // Codegen X | Y as:
8498       // BB1:
8499       //   jmp_if_X TBB
8500       //   jmp TmpBB
8501       // TmpBB:
8502       //   jmp_if_Y TBB
8503       //   jmp FBB
8504       //
8505 
8506       // We have flexibility in setting Prob for BB1 and Prob for NewBB.
8507       // The requirement is that
8508       //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
8509       //     = TrueProb for original BB.
8510       // Assuming the original weights are A and B, one choice is to set BB1's
8511       // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
8512       // assumes that
8513       //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
8514       // Another choice is to assume TrueProb for BB1 equals to TrueProb for
8515       // TmpBB, but the math is more complicated.
8516       uint64_t TrueWeight, FalseWeight;
8517       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8518         uint64_t NewTrueWeight = TrueWeight;
8519         uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
8520         scaleWeights(NewTrueWeight, NewFalseWeight);
8521         Br1->setMetadata(LLVMContext::MD_prof,
8522                          MDBuilder(Br1->getContext())
8523                              .createBranchWeights(TrueWeight, FalseWeight));
8524 
8525         NewTrueWeight = TrueWeight;
8526         NewFalseWeight = 2 * FalseWeight;
8527         scaleWeights(NewTrueWeight, NewFalseWeight);
8528         Br2->setMetadata(LLVMContext::MD_prof,
8529                          MDBuilder(Br2->getContext())
8530                              .createBranchWeights(TrueWeight, FalseWeight));
8531       }
8532     } else {
8533       // Codegen X & Y as:
8534       // BB1:
8535       //   jmp_if_X TmpBB
8536       //   jmp FBB
8537       // TmpBB:
8538       //   jmp_if_Y TBB
8539       //   jmp FBB
8540       //
8541       //  This requires creation of TmpBB after CurBB.
8542 
8543       // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
8544       // The requirement is that
8545       //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
8546       //     = FalseProb for original BB.
8547       // Assuming the original weights are A and B, one choice is to set BB1's
8548       // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
8549       // assumes that
8550       //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
8551       uint64_t TrueWeight, FalseWeight;
8552       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8553         uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
8554         uint64_t NewFalseWeight = FalseWeight;
8555         scaleWeights(NewTrueWeight, NewFalseWeight);
8556         Br1->setMetadata(LLVMContext::MD_prof,
8557                          MDBuilder(Br1->getContext())
8558                              .createBranchWeights(TrueWeight, FalseWeight));
8559 
8560         NewTrueWeight = 2 * TrueWeight;
8561         NewFalseWeight = FalseWeight;
8562         scaleWeights(NewTrueWeight, NewFalseWeight);
8563         Br2->setMetadata(LLVMContext::MD_prof,
8564                          MDBuilder(Br2->getContext())
8565                              .createBranchWeights(TrueWeight, FalseWeight));
8566       }
8567     }
8568 
8569     ModifiedDT = ModifyDT::ModifyBBDT;
8570     MadeChange = true;
8571 
8572     LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
8573                TmpBB->dump());
8574   }
8575   return MadeChange;
8576 }
8577