1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/BlockFrequencyInfo.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/Config/llvm-config.h"
43 #include "llvm/IR/Argument.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalValue.h"
54 #include "llvm/IR/GlobalVariable.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/IntrinsicsAArch64.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/MDBuilder.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Statepoint.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/ValueMap.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Pass.h"
77 #include "llvm/Support/BlockFrequency.h"
78 #include "llvm/Support/BranchProbability.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/CommandLine.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/Debug.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/MachineValueType.h"
85 #include "llvm/Support/MathExtras.h"
86 #include "llvm/Support/raw_ostream.h"
87 #include "llvm/Target/TargetMachine.h"
88 #include "llvm/Target/TargetOptions.h"
89 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
90 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
91 #include "llvm/Transforms/Utils/Local.h"
92 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
93 #include "llvm/Transforms/Utils/SizeOpts.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <limits>
99 #include <memory>
100 #include <utility>
101 #include <vector>
102
103 using namespace llvm;
104 using namespace llvm::PatternMatch;
105
106 #define DEBUG_TYPE "codegenprepare"
107
108 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
109 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
110 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
111 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
112 "sunken Cmps");
113 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
114 "of sunken Casts");
115 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
116 "computations were sunk");
117 STATISTIC(NumMemoryInstsPhiCreated,
118 "Number of phis created when address "
119 "computations were sunk to memory instructions");
120 STATISTIC(NumMemoryInstsSelectCreated,
121 "Number of select created when address "
122 "computations were sunk to memory instructions");
123 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
124 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
125 STATISTIC(NumAndsAdded,
126 "Number of and mask instructions added to form ext loads");
127 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
128 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
129 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
130 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
131 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
132
133 static cl::opt<bool> DisableBranchOpts(
134 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
135 cl::desc("Disable branch optimizations in CodeGenPrepare"));
136
137 static cl::opt<bool>
138 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
139 cl::desc("Disable GC optimizations in CodeGenPrepare"));
140
141 static cl::opt<bool> DisableSelectToBranch(
142 "disable-cgp-select2branch", cl::Hidden, cl::init(false),
143 cl::desc("Disable select to branch conversion."));
144
145 static cl::opt<bool> AddrSinkUsingGEPs(
146 "addr-sink-using-gep", cl::Hidden, cl::init(true),
147 cl::desc("Address sinking in CGP using GEPs."));
148
149 static cl::opt<bool> EnableAndCmpSinking(
150 "enable-andcmp-sinking", cl::Hidden, cl::init(true),
151 cl::desc("Enable sinkinig and/cmp into branches."));
152
153 static cl::opt<bool> DisableStoreExtract(
154 "disable-cgp-store-extract", cl::Hidden, cl::init(false),
155 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
156
157 static cl::opt<bool> StressStoreExtract(
158 "stress-cgp-store-extract", cl::Hidden, cl::init(false),
159 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
160
161 static cl::opt<bool> DisableExtLdPromotion(
162 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
163 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
164 "CodeGenPrepare"));
165
166 static cl::opt<bool> StressExtLdPromotion(
167 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
169 "optimization in CodeGenPrepare"));
170
171 static cl::opt<bool> DisablePreheaderProtect(
172 "disable-preheader-prot", cl::Hidden, cl::init(false),
173 cl::desc("Disable protection against removing loop preheaders"));
174
175 static cl::opt<bool> ProfileGuidedSectionPrefix(
176 "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore,
177 cl::desc("Use profile info to add section prefix for hot/cold functions"));
178
179 static cl::opt<bool> ProfileUnknownInSpecialSection(
180 "profile-unknown-in-special-section", cl::Hidden, cl::init(false),
181 cl::ZeroOrMore,
182 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
183 "profile, we cannot tell the function is cold for sure because "
184 "it may be a function newly added without ever being sampled. "
185 "With the flag enabled, compiler can put such profile unknown "
186 "functions into a special section, so runtime system can choose "
187 "to handle it in a different way than .text section, to save "
188 "RAM for example. "));
189
190 static cl::opt<unsigned> FreqRatioToSkipMerge(
191 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
192 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
193 "(frequency of destination block) is greater than this ratio"));
194
195 static cl::opt<bool> ForceSplitStore(
196 "force-split-store", cl::Hidden, cl::init(false),
197 cl::desc("Force store splitting no matter what the target query says."));
198
199 static cl::opt<bool>
200 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
201 cl::desc("Enable merging of redundant sexts when one is dominating"
202 " the other."), cl::init(true));
203
204 static cl::opt<bool> DisableComplexAddrModes(
205 "disable-complex-addr-modes", cl::Hidden, cl::init(false),
206 cl::desc("Disables combining addressing modes with different parts "
207 "in optimizeMemoryInst."));
208
209 static cl::opt<bool>
210 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
211 cl::desc("Allow creation of Phis in Address sinking."));
212
213 static cl::opt<bool>
214 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true),
215 cl::desc("Allow creation of selects in Address sinking."));
216
217 static cl::opt<bool> AddrSinkCombineBaseReg(
218 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
219 cl::desc("Allow combining of BaseReg field in Address sinking."));
220
221 static cl::opt<bool> AddrSinkCombineBaseGV(
222 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
223 cl::desc("Allow combining of BaseGV field in Address sinking."));
224
225 static cl::opt<bool> AddrSinkCombineBaseOffs(
226 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
227 cl::desc("Allow combining of BaseOffs field in Address sinking."));
228
229 static cl::opt<bool> AddrSinkCombineScaledReg(
230 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
231 cl::desc("Allow combining of ScaledReg field in Address sinking."));
232
233 static cl::opt<bool>
234 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
235 cl::init(true),
236 cl::desc("Enable splitting large offset of GEP."));
237
238 static cl::opt<bool> EnableICMP_EQToICMP_ST(
239 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
240 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
241
242 static cl::opt<bool>
243 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
244 cl::desc("Enable BFI update verification for "
245 "CodeGenPrepare."));
246
247 static cl::opt<bool> OptimizePhiTypes(
248 "cgp-optimize-phi-types", cl::Hidden, cl::init(false),
249 cl::desc("Enable converting phi types in CodeGenPrepare"));
250
251 namespace {
252
253 enum ExtType {
254 ZeroExtension, // Zero extension has been seen.
255 SignExtension, // Sign extension has been seen.
256 BothExtension // This extension type is used if we saw sext after
257 // ZeroExtension had been set, or if we saw zext after
258 // SignExtension had been set. It makes the type
259 // information of a promoted instruction invalid.
260 };
261
262 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
263 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
264 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
265 using SExts = SmallVector<Instruction *, 16>;
266 using ValueToSExts = DenseMap<Value *, SExts>;
267
268 class TypePromotionTransaction;
269
270 class CodeGenPrepare : public FunctionPass {
271 const TargetMachine *TM = nullptr;
272 const TargetSubtargetInfo *SubtargetInfo;
273 const TargetLowering *TLI = nullptr;
274 const TargetRegisterInfo *TRI;
275 const TargetTransformInfo *TTI = nullptr;
276 const TargetLibraryInfo *TLInfo;
277 const LoopInfo *LI;
278 std::unique_ptr<BlockFrequencyInfo> BFI;
279 std::unique_ptr<BranchProbabilityInfo> BPI;
280 ProfileSummaryInfo *PSI;
281
282 /// As we scan instructions optimizing them, this is the next instruction
283 /// to optimize. Transforms that can invalidate this should update it.
284 BasicBlock::iterator CurInstIterator;
285
286 /// Keeps track of non-local addresses that have been sunk into a block.
287 /// This allows us to avoid inserting duplicate code for blocks with
288 /// multiple load/stores of the same address. The usage of WeakTrackingVH
289 /// enables SunkAddrs to be treated as a cache whose entries can be
290 /// invalidated if a sunken address computation has been erased.
291 ValueMap<Value*, WeakTrackingVH> SunkAddrs;
292
293 /// Keeps track of all instructions inserted for the current function.
294 SetOfInstrs InsertedInsts;
295
296 /// Keeps track of the type of the related instruction before their
297 /// promotion for the current function.
298 InstrToOrigTy PromotedInsts;
299
300 /// Keep track of instructions removed during promotion.
301 SetOfInstrs RemovedInsts;
302
303 /// Keep track of sext chains based on their initial value.
304 DenseMap<Value *, Instruction *> SeenChainsForSExt;
305
306 /// Keep track of GEPs accessing the same data structures such as structs or
307 /// arrays that are candidates to be split later because of their large
308 /// size.
309 MapVector<
310 AssertingVH<Value>,
311 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
312 LargeOffsetGEPMap;
313
314 /// Keep track of new GEP base after splitting the GEPs having large offset.
315 SmallSet<AssertingVH<Value>, 2> NewGEPBases;
316
317 /// Map serial numbers to Large offset GEPs.
318 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
319
320 /// Keep track of SExt promoted.
321 ValueToSExts ValToSExtendedUses;
322
323 /// True if the function has the OptSize attribute.
324 bool OptSize;
325
326 /// DataLayout for the Function being processed.
327 const DataLayout *DL = nullptr;
328
329 /// Building the dominator tree can be expensive, so we only build it
330 /// lazily and update it when required.
331 std::unique_ptr<DominatorTree> DT;
332
333 public:
334 static char ID; // Pass identification, replacement for typeid
335
CodeGenPrepare()336 CodeGenPrepare() : FunctionPass(ID) {
337 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
338 }
339
340 bool runOnFunction(Function &F) override;
341
getPassName() const342 StringRef getPassName() const override { return "CodeGen Prepare"; }
343
getAnalysisUsage(AnalysisUsage & AU) const344 void getAnalysisUsage(AnalysisUsage &AU) const override {
345 // FIXME: When we can selectively preserve passes, preserve the domtree.
346 AU.addRequired<ProfileSummaryInfoWrapperPass>();
347 AU.addRequired<TargetLibraryInfoWrapperPass>();
348 AU.addRequired<TargetPassConfig>();
349 AU.addRequired<TargetTransformInfoWrapperPass>();
350 AU.addRequired<LoopInfoWrapperPass>();
351 }
352
353 private:
354 template <typename F>
resetIteratorIfInvalidatedWhileCalling(BasicBlock * BB,F f)355 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
356 // Substituting can cause recursive simplifications, which can invalidate
357 // our iterator. Use a WeakTrackingVH to hold onto it in case this
358 // happens.
359 Value *CurValue = &*CurInstIterator;
360 WeakTrackingVH IterHandle(CurValue);
361
362 f();
363
364 // If the iterator instruction was recursively deleted, start over at the
365 // start of the block.
366 if (IterHandle != CurValue) {
367 CurInstIterator = BB->begin();
368 SunkAddrs.clear();
369 }
370 }
371
372 // Get the DominatorTree, building if necessary.
getDT(Function & F)373 DominatorTree &getDT(Function &F) {
374 if (!DT)
375 DT = std::make_unique<DominatorTree>(F);
376 return *DT;
377 }
378
379 void removeAllAssertingVHReferences(Value *V);
380 bool eliminateFallThrough(Function &F);
381 bool eliminateMostlyEmptyBlocks(Function &F);
382 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
383 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
384 void eliminateMostlyEmptyBlock(BasicBlock *BB);
385 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
386 bool isPreheader);
387 bool makeBitReverse(Instruction &I);
388 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
389 bool optimizeInst(Instruction *I, bool &ModifiedDT);
390 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
391 Type *AccessTy, unsigned AddrSpace);
392 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
393 bool optimizeInlineAsmInst(CallInst *CS);
394 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
395 bool optimizeExt(Instruction *&I);
396 bool optimizeExtUses(Instruction *I);
397 bool optimizeLoadExt(LoadInst *Load);
398 bool optimizeShiftInst(BinaryOperator *BO);
399 bool optimizeFunnelShift(IntrinsicInst *Fsh);
400 bool optimizeSelectInst(SelectInst *SI);
401 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
402 bool optimizeSwitchInst(SwitchInst *SI);
403 bool optimizeExtractElementInst(Instruction *Inst);
404 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT);
405 bool fixupDbgValue(Instruction *I);
406 bool placeDbgValues(Function &F);
407 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
408 LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
409 bool tryToPromoteExts(TypePromotionTransaction &TPT,
410 const SmallVectorImpl<Instruction *> &Exts,
411 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
412 unsigned CreatedInstsCost = 0);
413 bool mergeSExts(Function &F);
414 bool splitLargeGEPOffsets();
415 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
416 SmallPtrSetImpl<Instruction *> &DeletedInstrs);
417 bool optimizePhiTypes(Function &F);
418 bool performAddressTypePromotion(
419 Instruction *&Inst,
420 bool AllowPromotionWithoutCommonHeader,
421 bool HasPromoted, TypePromotionTransaction &TPT,
422 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
423 bool splitBranchCondition(Function &F, bool &ModifiedDT);
424 bool simplifyOffsetableRelocate(GCStatepointInst &I);
425
426 bool tryToSinkFreeOperands(Instruction *I);
427 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0,
428 Value *Arg1, CmpInst *Cmp,
429 Intrinsic::ID IID);
430 bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT);
431 bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
432 bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
433 void verifyBFIUpdates(Function &F);
434 };
435
436 } // end anonymous namespace
437
438 char CodeGenPrepare::ID = 0;
439
440 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
441 "Optimize for code generation", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)442 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
443 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
444 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
445 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
446 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
447 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,
448 "Optimize for code generation", false, false)
449
450 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
451
runOnFunction(Function & F)452 bool CodeGenPrepare::runOnFunction(Function &F) {
453 if (skipFunction(F))
454 return false;
455
456 DL = &F.getParent()->getDataLayout();
457
458 bool EverMadeChange = false;
459 // Clear per function information.
460 InsertedInsts.clear();
461 PromotedInsts.clear();
462
463 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
464 SubtargetInfo = TM->getSubtargetImpl(F);
465 TLI = SubtargetInfo->getTargetLowering();
466 TRI = SubtargetInfo->getRegisterInfo();
467 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
468 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
469 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
470 BPI.reset(new BranchProbabilityInfo(F, *LI));
471 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
472 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
473 OptSize = F.hasOptSize();
474 if (ProfileGuidedSectionPrefix) {
475 if (PSI->isFunctionHotInCallGraph(&F, *BFI))
476 F.setSectionPrefix(".hot");
477 else if (PSI->isFunctionColdInCallGraph(&F, *BFI))
478 F.setSectionPrefix(".unlikely");
479 else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
480 PSI->isFunctionHotnessUnknown(F))
481 F.setSectionPrefix(".unknown");
482 }
483
484 /// This optimization identifies DIV instructions that can be
485 /// profitably bypassed and carried out with a shorter, faster divide.
486 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
487 const DenseMap<unsigned int, unsigned int> &BypassWidths =
488 TLI->getBypassSlowDivWidths();
489 BasicBlock* BB = &*F.begin();
490 while (BB != nullptr) {
491 // bypassSlowDivision may create new BBs, but we don't want to reapply the
492 // optimization to those blocks.
493 BasicBlock* Next = BB->getNextNode();
494 // F.hasOptSize is already checked in the outer if statement.
495 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
496 EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
497 BB = Next;
498 }
499 }
500
501 // Eliminate blocks that contain only PHI nodes and an
502 // unconditional branch.
503 EverMadeChange |= eliminateMostlyEmptyBlocks(F);
504
505 bool ModifiedDT = false;
506 if (!DisableBranchOpts)
507 EverMadeChange |= splitBranchCondition(F, ModifiedDT);
508
509 // Split some critical edges where one of the sources is an indirect branch,
510 // to help generate sane code for PHIs involving such edges.
511 EverMadeChange |= SplitIndirectBrCriticalEdges(F);
512
513 bool MadeChange = true;
514 while (MadeChange) {
515 MadeChange = false;
516 DT.reset();
517 for (Function::iterator I = F.begin(); I != F.end(); ) {
518 BasicBlock *BB = &*I++;
519 bool ModifiedDTOnIteration = false;
520 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
521
522 // Restart BB iteration if the dominator tree of the Function was changed
523 if (ModifiedDTOnIteration)
524 break;
525 }
526 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
527 MadeChange |= mergeSExts(F);
528 if (!LargeOffsetGEPMap.empty())
529 MadeChange |= splitLargeGEPOffsets();
530 MadeChange |= optimizePhiTypes(F);
531
532 if (MadeChange)
533 eliminateFallThrough(F);
534
535 // Really free removed instructions during promotion.
536 for (Instruction *I : RemovedInsts)
537 I->deleteValue();
538
539 EverMadeChange |= MadeChange;
540 SeenChainsForSExt.clear();
541 ValToSExtendedUses.clear();
542 RemovedInsts.clear();
543 LargeOffsetGEPMap.clear();
544 LargeOffsetGEPID.clear();
545 }
546
547 SunkAddrs.clear();
548
549 if (!DisableBranchOpts) {
550 MadeChange = false;
551 // Use a set vector to get deterministic iteration order. The order the
552 // blocks are removed may affect whether or not PHI nodes in successors
553 // are removed.
554 SmallSetVector<BasicBlock*, 8> WorkList;
555 for (BasicBlock &BB : F) {
556 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
557 MadeChange |= ConstantFoldTerminator(&BB, true);
558 if (!MadeChange) continue;
559
560 for (SmallVectorImpl<BasicBlock*>::iterator
561 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
562 if (pred_begin(*II) == pred_end(*II))
563 WorkList.insert(*II);
564 }
565
566 // Delete the dead blocks and any of their dead successors.
567 MadeChange |= !WorkList.empty();
568 while (!WorkList.empty()) {
569 BasicBlock *BB = WorkList.pop_back_val();
570 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
571
572 DeleteDeadBlock(BB);
573
574 for (SmallVectorImpl<BasicBlock*>::iterator
575 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
576 if (pred_begin(*II) == pred_end(*II))
577 WorkList.insert(*II);
578 }
579
580 // Merge pairs of basic blocks with unconditional branches, connected by
581 // a single edge.
582 if (EverMadeChange || MadeChange)
583 MadeChange |= eliminateFallThrough(F);
584
585 EverMadeChange |= MadeChange;
586 }
587
588 if (!DisableGCOpts) {
589 SmallVector<GCStatepointInst *, 2> Statepoints;
590 for (BasicBlock &BB : F)
591 for (Instruction &I : BB)
592 if (auto *SP = dyn_cast<GCStatepointInst>(&I))
593 Statepoints.push_back(SP);
594 for (auto &I : Statepoints)
595 EverMadeChange |= simplifyOffsetableRelocate(*I);
596 }
597
598 // Do this last to clean up use-before-def scenarios introduced by other
599 // preparatory transforms.
600 EverMadeChange |= placeDbgValues(F);
601
602 #ifndef NDEBUG
603 if (VerifyBFIUpdates)
604 verifyBFIUpdates(F);
605 #endif
606
607 return EverMadeChange;
608 }
609
610 /// An instruction is about to be deleted, so remove all references to it in our
611 /// GEP-tracking data strcutures.
removeAllAssertingVHReferences(Value * V)612 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
613 LargeOffsetGEPMap.erase(V);
614 NewGEPBases.erase(V);
615
616 auto GEP = dyn_cast<GetElementPtrInst>(V);
617 if (!GEP)
618 return;
619
620 LargeOffsetGEPID.erase(GEP);
621
622 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
623 if (VecI == LargeOffsetGEPMap.end())
624 return;
625
626 auto &GEPVector = VecI->second;
627 const auto &I = std::find_if(GEPVector.begin(), GEPVector.end(),
628 [=](auto &Elt) { return Elt.first == GEP; });
629 if (I == GEPVector.end())
630 return;
631
632 GEPVector.erase(I);
633 if (GEPVector.empty())
634 LargeOffsetGEPMap.erase(VecI);
635 }
636
637 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
verifyBFIUpdates(Function & F)638 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
639 DominatorTree NewDT(F);
640 LoopInfo NewLI(NewDT);
641 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
642 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
643 NewBFI.verifyMatch(*BFI);
644 }
645
646 /// Merge basic blocks which are connected by a single edge, where one of the
647 /// basic blocks has a single successor pointing to the other basic block,
648 /// which has a single predecessor.
eliminateFallThrough(Function & F)649 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
650 bool Changed = false;
651 // Scan all of the blocks in the function, except for the entry block.
652 // Use a temporary array to avoid iterator being invalidated when
653 // deleting blocks.
654 SmallVector<WeakTrackingVH, 16> Blocks;
655 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
656 Blocks.push_back(&Block);
657
658 SmallSet<WeakTrackingVH, 16> Preds;
659 for (auto &Block : Blocks) {
660 auto *BB = cast_or_null<BasicBlock>(Block);
661 if (!BB)
662 continue;
663 // If the destination block has a single pred, then this is a trivial
664 // edge, just collapse it.
665 BasicBlock *SinglePred = BB->getSinglePredecessor();
666
667 // Don't merge if BB's address is taken.
668 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
669
670 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
671 if (Term && !Term->isConditional()) {
672 Changed = true;
673 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
674
675 // Merge BB into SinglePred and delete it.
676 MergeBlockIntoPredecessor(BB);
677 Preds.insert(SinglePred);
678 }
679 }
680
681 // (Repeatedly) merging blocks into their predecessors can create redundant
682 // debug intrinsics.
683 for (auto &Pred : Preds)
684 if (auto *BB = cast_or_null<BasicBlock>(Pred))
685 RemoveRedundantDbgInstrs(BB);
686
687 return Changed;
688 }
689
690 /// Find a destination block from BB if BB is mergeable empty block.
findDestBlockOfMergeableEmptyBlock(BasicBlock * BB)691 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
692 // If this block doesn't end with an uncond branch, ignore it.
693 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
694 if (!BI || !BI->isUnconditional())
695 return nullptr;
696
697 // If the instruction before the branch (skipping debug info) isn't a phi
698 // node, then other stuff is happening here.
699 BasicBlock::iterator BBI = BI->getIterator();
700 if (BBI != BB->begin()) {
701 --BBI;
702 while (isa<DbgInfoIntrinsic>(BBI)) {
703 if (BBI == BB->begin())
704 break;
705 --BBI;
706 }
707 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
708 return nullptr;
709 }
710
711 // Do not break infinite loops.
712 BasicBlock *DestBB = BI->getSuccessor(0);
713 if (DestBB == BB)
714 return nullptr;
715
716 if (!canMergeBlocks(BB, DestBB))
717 DestBB = nullptr;
718
719 return DestBB;
720 }
721
722 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
723 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
724 /// edges in ways that are non-optimal for isel. Start by eliminating these
725 /// blocks so we can split them the way we want them.
eliminateMostlyEmptyBlocks(Function & F)726 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
727 SmallPtrSet<BasicBlock *, 16> Preheaders;
728 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
729 while (!LoopList.empty()) {
730 Loop *L = LoopList.pop_back_val();
731 LoopList.insert(LoopList.end(), L->begin(), L->end());
732 if (BasicBlock *Preheader = L->getLoopPreheader())
733 Preheaders.insert(Preheader);
734 }
735
736 bool MadeChange = false;
737 // Copy blocks into a temporary array to avoid iterator invalidation issues
738 // as we remove them.
739 // Note that this intentionally skips the entry block.
740 SmallVector<WeakTrackingVH, 16> Blocks;
741 for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
742 Blocks.push_back(&Block);
743
744 for (auto &Block : Blocks) {
745 BasicBlock *BB = cast_or_null<BasicBlock>(Block);
746 if (!BB)
747 continue;
748 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
749 if (!DestBB ||
750 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
751 continue;
752
753 eliminateMostlyEmptyBlock(BB);
754 MadeChange = true;
755 }
756 return MadeChange;
757 }
758
isMergingEmptyBlockProfitable(BasicBlock * BB,BasicBlock * DestBB,bool isPreheader)759 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
760 BasicBlock *DestBB,
761 bool isPreheader) {
762 // Do not delete loop preheaders if doing so would create a critical edge.
763 // Loop preheaders can be good locations to spill registers. If the
764 // preheader is deleted and we create a critical edge, registers may be
765 // spilled in the loop body instead.
766 if (!DisablePreheaderProtect && isPreheader &&
767 !(BB->getSinglePredecessor() &&
768 BB->getSinglePredecessor()->getSingleSuccessor()))
769 return false;
770
771 // Skip merging if the block's successor is also a successor to any callbr
772 // that leads to this block.
773 // FIXME: Is this really needed? Is this a correctness issue?
774 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
775 if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator()))
776 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
777 if (DestBB == CBI->getSuccessor(i))
778 return false;
779 }
780
781 // Try to skip merging if the unique predecessor of BB is terminated by a
782 // switch or indirect branch instruction, and BB is used as an incoming block
783 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
784 // add COPY instructions in the predecessor of BB instead of BB (if it is not
785 // merged). Note that the critical edge created by merging such blocks wont be
786 // split in MachineSink because the jump table is not analyzable. By keeping
787 // such empty block (BB), ISel will place COPY instructions in BB, not in the
788 // predecessor of BB.
789 BasicBlock *Pred = BB->getUniquePredecessor();
790 if (!Pred ||
791 !(isa<SwitchInst>(Pred->getTerminator()) ||
792 isa<IndirectBrInst>(Pred->getTerminator())))
793 return true;
794
795 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
796 return true;
797
798 // We use a simple cost heuristic which determine skipping merging is
799 // profitable if the cost of skipping merging is less than the cost of
800 // merging : Cost(skipping merging) < Cost(merging BB), where the
801 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
802 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
803 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
804 // Freq(Pred) / Freq(BB) > 2.
805 // Note that if there are multiple empty blocks sharing the same incoming
806 // value for the PHIs in the DestBB, we consider them together. In such
807 // case, Cost(merging BB) will be the sum of their frequencies.
808
809 if (!isa<PHINode>(DestBB->begin()))
810 return true;
811
812 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
813
814 // Find all other incoming blocks from which incoming values of all PHIs in
815 // DestBB are the same as the ones from BB.
816 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E;
817 ++PI) {
818 BasicBlock *DestBBPred = *PI;
819 if (DestBBPred == BB)
820 continue;
821
822 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
823 return DestPN.getIncomingValueForBlock(BB) ==
824 DestPN.getIncomingValueForBlock(DestBBPred);
825 }))
826 SameIncomingValueBBs.insert(DestBBPred);
827 }
828
829 // See if all BB's incoming values are same as the value from Pred. In this
830 // case, no reason to skip merging because COPYs are expected to be place in
831 // Pred already.
832 if (SameIncomingValueBBs.count(Pred))
833 return true;
834
835 BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
836 BlockFrequency BBFreq = BFI->getBlockFreq(BB);
837
838 for (auto *SameValueBB : SameIncomingValueBBs)
839 if (SameValueBB->getUniquePredecessor() == Pred &&
840 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
841 BBFreq += BFI->getBlockFreq(SameValueBB);
842
843 return PredFreq.getFrequency() <=
844 BBFreq.getFrequency() * FreqRatioToSkipMerge;
845 }
846
847 /// Return true if we can merge BB into DestBB if there is a single
848 /// unconditional branch between them, and BB contains no other non-phi
849 /// instructions.
canMergeBlocks(const BasicBlock * BB,const BasicBlock * DestBB) const850 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
851 const BasicBlock *DestBB) const {
852 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
853 // the successor. If there are more complex condition (e.g. preheaders),
854 // don't mess around with them.
855 for (const PHINode &PN : BB->phis()) {
856 for (const User *U : PN.users()) {
857 const Instruction *UI = cast<Instruction>(U);
858 if (UI->getParent() != DestBB || !isa<PHINode>(UI))
859 return false;
860 // If User is inside DestBB block and it is a PHINode then check
861 // incoming value. If incoming value is not from BB then this is
862 // a complex condition (e.g. preheaders) we want to avoid here.
863 if (UI->getParent() == DestBB) {
864 if (const PHINode *UPN = dyn_cast<PHINode>(UI))
865 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
866 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
867 if (Insn && Insn->getParent() == BB &&
868 Insn->getParent() != UPN->getIncomingBlock(I))
869 return false;
870 }
871 }
872 }
873 }
874
875 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
876 // and DestBB may have conflicting incoming values for the block. If so, we
877 // can't merge the block.
878 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
879 if (!DestBBPN) return true; // no conflict.
880
881 // Collect the preds of BB.
882 SmallPtrSet<const BasicBlock*, 16> BBPreds;
883 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
884 // It is faster to get preds from a PHI than with pred_iterator.
885 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
886 BBPreds.insert(BBPN->getIncomingBlock(i));
887 } else {
888 BBPreds.insert(pred_begin(BB), pred_end(BB));
889 }
890
891 // Walk the preds of DestBB.
892 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
893 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
894 if (BBPreds.count(Pred)) { // Common predecessor?
895 for (const PHINode &PN : DestBB->phis()) {
896 const Value *V1 = PN.getIncomingValueForBlock(Pred);
897 const Value *V2 = PN.getIncomingValueForBlock(BB);
898
899 // If V2 is a phi node in BB, look up what the mapped value will be.
900 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
901 if (V2PN->getParent() == BB)
902 V2 = V2PN->getIncomingValueForBlock(Pred);
903
904 // If there is a conflict, bail out.
905 if (V1 != V2) return false;
906 }
907 }
908 }
909
910 return true;
911 }
912
913 /// Eliminate a basic block that has only phi's and an unconditional branch in
914 /// it.
eliminateMostlyEmptyBlock(BasicBlock * BB)915 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
916 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
917 BasicBlock *DestBB = BI->getSuccessor(0);
918
919 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
920 << *BB << *DestBB);
921
922 // If the destination block has a single pred, then this is a trivial edge,
923 // just collapse it.
924 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
925 if (SinglePred != DestBB) {
926 assert(SinglePred == BB &&
927 "Single predecessor not the same as predecessor");
928 // Merge DestBB into SinglePred/BB and delete it.
929 MergeBlockIntoPredecessor(DestBB);
930 // Note: BB(=SinglePred) will not be deleted on this path.
931 // DestBB(=its single successor) is the one that was deleted.
932 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
933 return;
934 }
935 }
936
937 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
938 // to handle the new incoming edges it is about to have.
939 for (PHINode &PN : DestBB->phis()) {
940 // Remove the incoming value for BB, and remember it.
941 Value *InVal = PN.removeIncomingValue(BB, false);
942
943 // Two options: either the InVal is a phi node defined in BB or it is some
944 // value that dominates BB.
945 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
946 if (InValPhi && InValPhi->getParent() == BB) {
947 // Add all of the input values of the input PHI as inputs of this phi.
948 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
949 PN.addIncoming(InValPhi->getIncomingValue(i),
950 InValPhi->getIncomingBlock(i));
951 } else {
952 // Otherwise, add one instance of the dominating value for each edge that
953 // we will be adding.
954 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
955 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
956 PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
957 } else {
958 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
959 PN.addIncoming(InVal, *PI);
960 }
961 }
962 }
963
964 // The PHIs are now updated, change everything that refers to BB to use
965 // DestBB and remove BB.
966 BB->replaceAllUsesWith(DestBB);
967 BB->eraseFromParent();
968 ++NumBlocksElim;
969
970 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
971 }
972
973 // Computes a map of base pointer relocation instructions to corresponding
974 // derived pointer relocation instructions given a vector of all relocate calls
computeBaseDerivedRelocateMap(const SmallVectorImpl<GCRelocateInst * > & AllRelocateCalls,DenseMap<GCRelocateInst *,SmallVector<GCRelocateInst *,2>> & RelocateInstMap)975 static void computeBaseDerivedRelocateMap(
976 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
977 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
978 &RelocateInstMap) {
979 // Collect information in two maps: one primarily for locating the base object
980 // while filling the second map; the second map is the final structure holding
981 // a mapping between Base and corresponding Derived relocate calls
982 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
983 for (auto *ThisRelocate : AllRelocateCalls) {
984 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
985 ThisRelocate->getDerivedPtrIndex());
986 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
987 }
988 for (auto &Item : RelocateIdxMap) {
989 std::pair<unsigned, unsigned> Key = Item.first;
990 if (Key.first == Key.second)
991 // Base relocation: nothing to insert
992 continue;
993
994 GCRelocateInst *I = Item.second;
995 auto BaseKey = std::make_pair(Key.first, Key.first);
996
997 // We're iterating over RelocateIdxMap so we cannot modify it.
998 auto MaybeBase = RelocateIdxMap.find(BaseKey);
999 if (MaybeBase == RelocateIdxMap.end())
1000 // TODO: We might want to insert a new base object relocate and gep off
1001 // that, if there are enough derived object relocates.
1002 continue;
1003
1004 RelocateInstMap[MaybeBase->second].push_back(I);
1005 }
1006 }
1007
1008 // Accepts a GEP and extracts the operands into a vector provided they're all
1009 // small integer constants
getGEPSmallConstantIntOffsetV(GetElementPtrInst * GEP,SmallVectorImpl<Value * > & OffsetV)1010 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1011 SmallVectorImpl<Value *> &OffsetV) {
1012 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1013 // Only accept small constant integer operands
1014 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1015 if (!Op || Op->getZExtValue() > 20)
1016 return false;
1017 }
1018
1019 for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1020 OffsetV.push_back(GEP->getOperand(i));
1021 return true;
1022 }
1023
1024 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1025 // replace, computes a replacement, and affects it.
1026 static bool
simplifyRelocatesOffABase(GCRelocateInst * RelocatedBase,const SmallVectorImpl<GCRelocateInst * > & Targets)1027 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1028 const SmallVectorImpl<GCRelocateInst *> &Targets) {
1029 bool MadeChange = false;
1030 // We must ensure the relocation of derived pointer is defined after
1031 // relocation of base pointer. If we find a relocation corresponding to base
1032 // defined earlier than relocation of base then we move relocation of base
1033 // right before found relocation. We consider only relocation in the same
1034 // basic block as relocation of base. Relocations from other basic block will
1035 // be skipped by optimization and we do not care about them.
1036 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1037 &*R != RelocatedBase; ++R)
1038 if (auto *RI = dyn_cast<GCRelocateInst>(R))
1039 if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1040 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1041 RelocatedBase->moveBefore(RI);
1042 break;
1043 }
1044
1045 for (GCRelocateInst *ToReplace : Targets) {
1046 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1047 "Not relocating a derived object of the original base object");
1048 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1049 // A duplicate relocate call. TODO: coalesce duplicates.
1050 continue;
1051 }
1052
1053 if (RelocatedBase->getParent() != ToReplace->getParent()) {
1054 // Base and derived relocates are in different basic blocks.
1055 // In this case transform is only valid when base dominates derived
1056 // relocate. However it would be too expensive to check dominance
1057 // for each such relocate, so we skip the whole transformation.
1058 continue;
1059 }
1060
1061 Value *Base = ToReplace->getBasePtr();
1062 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1063 if (!Derived || Derived->getPointerOperand() != Base)
1064 continue;
1065
1066 SmallVector<Value *, 2> OffsetV;
1067 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1068 continue;
1069
1070 // Create a Builder and replace the target callsite with a gep
1071 assert(RelocatedBase->getNextNode() &&
1072 "Should always have one since it's not a terminator");
1073
1074 // Insert after RelocatedBase
1075 IRBuilder<> Builder(RelocatedBase->getNextNode());
1076 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1077
1078 // If gc_relocate does not match the actual type, cast it to the right type.
1079 // In theory, there must be a bitcast after gc_relocate if the type does not
1080 // match, and we should reuse it to get the derived pointer. But it could be
1081 // cases like this:
1082 // bb1:
1083 // ...
1084 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1085 // br label %merge
1086 //
1087 // bb2:
1088 // ...
1089 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1090 // br label %merge
1091 //
1092 // merge:
1093 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1094 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1095 //
1096 // In this case, we can not find the bitcast any more. So we insert a new bitcast
1097 // no matter there is already one or not. In this way, we can handle all cases, and
1098 // the extra bitcast should be optimized away in later passes.
1099 Value *ActualRelocatedBase = RelocatedBase;
1100 if (RelocatedBase->getType() != Base->getType()) {
1101 ActualRelocatedBase =
1102 Builder.CreateBitCast(RelocatedBase, Base->getType());
1103 }
1104 Value *Replacement = Builder.CreateGEP(
1105 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1106 Replacement->takeName(ToReplace);
1107 // If the newly generated derived pointer's type does not match the original derived
1108 // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1109 Value *ActualReplacement = Replacement;
1110 if (Replacement->getType() != ToReplace->getType()) {
1111 ActualReplacement =
1112 Builder.CreateBitCast(Replacement, ToReplace->getType());
1113 }
1114 ToReplace->replaceAllUsesWith(ActualReplacement);
1115 ToReplace->eraseFromParent();
1116
1117 MadeChange = true;
1118 }
1119 return MadeChange;
1120 }
1121
1122 // Turns this:
1123 //
1124 // %base = ...
1125 // %ptr = gep %base + 15
1126 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1127 // %base' = relocate(%tok, i32 4, i32 4)
1128 // %ptr' = relocate(%tok, i32 4, i32 5)
1129 // %val = load %ptr'
1130 //
1131 // into this:
1132 //
1133 // %base = ...
1134 // %ptr = gep %base + 15
1135 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1136 // %base' = gc.relocate(%tok, i32 4, i32 4)
1137 // %ptr' = gep %base' + 15
1138 // %val = load %ptr'
simplifyOffsetableRelocate(GCStatepointInst & I)1139 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1140 bool MadeChange = false;
1141 SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1142 for (auto *U : I.users())
1143 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1144 // Collect all the relocate calls associated with a statepoint
1145 AllRelocateCalls.push_back(Relocate);
1146
1147 // We need at least one base pointer relocation + one derived pointer
1148 // relocation to mangle
1149 if (AllRelocateCalls.size() < 2)
1150 return false;
1151
1152 // RelocateInstMap is a mapping from the base relocate instruction to the
1153 // corresponding derived relocate instructions
1154 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1155 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1156 if (RelocateInstMap.empty())
1157 return false;
1158
1159 for (auto &Item : RelocateInstMap)
1160 // Item.first is the RelocatedBase to offset against
1161 // Item.second is the vector of Targets to replace
1162 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1163 return MadeChange;
1164 }
1165
1166 /// Sink the specified cast instruction into its user blocks.
SinkCast(CastInst * CI)1167 static bool SinkCast(CastInst *CI) {
1168 BasicBlock *DefBB = CI->getParent();
1169
1170 /// InsertedCasts - Only insert a cast in each block once.
1171 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1172
1173 bool MadeChange = false;
1174 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1175 UI != E; ) {
1176 Use &TheUse = UI.getUse();
1177 Instruction *User = cast<Instruction>(*UI);
1178
1179 // Figure out which BB this cast is used in. For PHI's this is the
1180 // appropriate predecessor block.
1181 BasicBlock *UserBB = User->getParent();
1182 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1183 UserBB = PN->getIncomingBlock(TheUse);
1184 }
1185
1186 // Preincrement use iterator so we don't invalidate it.
1187 ++UI;
1188
1189 // The first insertion point of a block containing an EH pad is after the
1190 // pad. If the pad is the user, we cannot sink the cast past the pad.
1191 if (User->isEHPad())
1192 continue;
1193
1194 // If the block selected to receive the cast is an EH pad that does not
1195 // allow non-PHI instructions before the terminator, we can't sink the
1196 // cast.
1197 if (UserBB->getTerminator()->isEHPad())
1198 continue;
1199
1200 // If this user is in the same block as the cast, don't change the cast.
1201 if (UserBB == DefBB) continue;
1202
1203 // If we have already inserted a cast into this block, use it.
1204 CastInst *&InsertedCast = InsertedCasts[UserBB];
1205
1206 if (!InsertedCast) {
1207 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1208 assert(InsertPt != UserBB->end());
1209 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1210 CI->getType(), "", &*InsertPt);
1211 InsertedCast->setDebugLoc(CI->getDebugLoc());
1212 }
1213
1214 // Replace a use of the cast with a use of the new cast.
1215 TheUse = InsertedCast;
1216 MadeChange = true;
1217 ++NumCastUses;
1218 }
1219
1220 // If we removed all uses, nuke the cast.
1221 if (CI->use_empty()) {
1222 salvageDebugInfo(*CI);
1223 CI->eraseFromParent();
1224 MadeChange = true;
1225 }
1226
1227 return MadeChange;
1228 }
1229
1230 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1231 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1232 /// reduce the number of virtual registers that must be created and coalesced.
1233 ///
1234 /// Return true if any changes are made.
OptimizeNoopCopyExpression(CastInst * CI,const TargetLowering & TLI,const DataLayout & DL)1235 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1236 const DataLayout &DL) {
1237 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1238 // than sinking only nop casts, but is helpful on some platforms.
1239 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1240 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1241 ASC->getDestAddressSpace()))
1242 return false;
1243 }
1244
1245 // If this is a noop copy,
1246 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1247 EVT DstVT = TLI.getValueType(DL, CI->getType());
1248
1249 // This is an fp<->int conversion?
1250 if (SrcVT.isInteger() != DstVT.isInteger())
1251 return false;
1252
1253 // If this is an extension, it will be a zero or sign extension, which
1254 // isn't a noop.
1255 if (SrcVT.bitsLT(DstVT)) return false;
1256
1257 // If these values will be promoted, find out what they will be promoted
1258 // to. This helps us consider truncates on PPC as noop copies when they
1259 // are.
1260 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1261 TargetLowering::TypePromoteInteger)
1262 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1263 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1264 TargetLowering::TypePromoteInteger)
1265 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1266
1267 // If, after promotion, these are the same types, this is a noop copy.
1268 if (SrcVT != DstVT)
1269 return false;
1270
1271 return SinkCast(CI);
1272 }
1273
replaceMathCmpWithIntrinsic(BinaryOperator * BO,Value * Arg0,Value * Arg1,CmpInst * Cmp,Intrinsic::ID IID)1274 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1275 Value *Arg0, Value *Arg1,
1276 CmpInst *Cmp,
1277 Intrinsic::ID IID) {
1278 if (BO->getParent() != Cmp->getParent()) {
1279 // We used to use a dominator tree here to allow multi-block optimization.
1280 // But that was problematic because:
1281 // 1. It could cause a perf regression by hoisting the math op into the
1282 // critical path.
1283 // 2. It could cause a perf regression by creating a value that was live
1284 // across multiple blocks and increasing register pressure.
1285 // 3. Use of a dominator tree could cause large compile-time regression.
1286 // This is because we recompute the DT on every change in the main CGP
1287 // run-loop. The recomputing is probably unnecessary in many cases, so if
1288 // that was fixed, using a DT here would be ok.
1289 return false;
1290 }
1291
1292 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1293 if (BO->getOpcode() == Instruction::Add &&
1294 IID == Intrinsic::usub_with_overflow) {
1295 assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1296 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1297 }
1298
1299 // Insert at the first instruction of the pair.
1300 Instruction *InsertPt = nullptr;
1301 for (Instruction &Iter : *Cmp->getParent()) {
1302 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1303 // the overflow intrinsic are defined.
1304 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1305 InsertPt = &Iter;
1306 break;
1307 }
1308 }
1309 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1310
1311 IRBuilder<> Builder(InsertPt);
1312 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1313 if (BO->getOpcode() != Instruction::Xor) {
1314 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1315 BO->replaceAllUsesWith(Math);
1316 } else
1317 assert(BO->hasOneUse() &&
1318 "Patterns with XOr should use the BO only in the compare");
1319 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1320 Cmp->replaceAllUsesWith(OV);
1321 Cmp->eraseFromParent();
1322 BO->eraseFromParent();
1323 return true;
1324 }
1325
1326 /// Match special-case patterns that check for unsigned add overflow.
matchUAddWithOverflowConstantEdgeCases(CmpInst * Cmp,BinaryOperator * & Add)1327 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1328 BinaryOperator *&Add) {
1329 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1330 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1331 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1332
1333 // We are not expecting non-canonical/degenerate code. Just bail out.
1334 if (isa<Constant>(A))
1335 return false;
1336
1337 ICmpInst::Predicate Pred = Cmp->getPredicate();
1338 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1339 B = ConstantInt::get(B->getType(), 1);
1340 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1341 B = ConstantInt::get(B->getType(), -1);
1342 else
1343 return false;
1344
1345 // Check the users of the variable operand of the compare looking for an add
1346 // with the adjusted constant.
1347 for (User *U : A->users()) {
1348 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1349 Add = cast<BinaryOperator>(U);
1350 return true;
1351 }
1352 }
1353 return false;
1354 }
1355
1356 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1357 /// intrinsic. Return true if any changes were made.
combineToUAddWithOverflow(CmpInst * Cmp,bool & ModifiedDT)1358 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1359 bool &ModifiedDT) {
1360 Value *A, *B;
1361 BinaryOperator *Add;
1362 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1363 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1364 return false;
1365 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1366 A = Add->getOperand(0);
1367 B = Add->getOperand(1);
1368 }
1369
1370 if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1371 TLI->getValueType(*DL, Add->getType()),
1372 Add->hasNUsesOrMore(2)))
1373 return false;
1374
1375 // We don't want to move around uses of condition values this late, so we
1376 // check if it is legal to create the call to the intrinsic in the basic
1377 // block containing the icmp.
1378 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1379 return false;
1380
1381 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1382 Intrinsic::uadd_with_overflow))
1383 return false;
1384
1385 // Reset callers - do not crash by iterating over a dead instruction.
1386 ModifiedDT = true;
1387 return true;
1388 }
1389
combineToUSubWithOverflow(CmpInst * Cmp,bool & ModifiedDT)1390 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1391 bool &ModifiedDT) {
1392 // We are not expecting non-canonical/degenerate code. Just bail out.
1393 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1394 if (isa<Constant>(A) && isa<Constant>(B))
1395 return false;
1396
1397 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1398 ICmpInst::Predicate Pred = Cmp->getPredicate();
1399 if (Pred == ICmpInst::ICMP_UGT) {
1400 std::swap(A, B);
1401 Pred = ICmpInst::ICMP_ULT;
1402 }
1403 // Convert special-case: (A == 0) is the same as (A u< 1).
1404 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1405 B = ConstantInt::get(B->getType(), 1);
1406 Pred = ICmpInst::ICMP_ULT;
1407 }
1408 // Convert special-case: (A != 0) is the same as (0 u< A).
1409 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1410 std::swap(A, B);
1411 Pred = ICmpInst::ICMP_ULT;
1412 }
1413 if (Pred != ICmpInst::ICMP_ULT)
1414 return false;
1415
1416 // Walk the users of a variable operand of a compare looking for a subtract or
1417 // add with that same operand. Also match the 2nd operand of the compare to
1418 // the add/sub, but that may be a negated constant operand of an add.
1419 Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1420 BinaryOperator *Sub = nullptr;
1421 for (User *U : CmpVariableOperand->users()) {
1422 // A - B, A u< B --> usubo(A, B)
1423 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1424 Sub = cast<BinaryOperator>(U);
1425 break;
1426 }
1427
1428 // A + (-C), A u< C (canonicalized form of (sub A, C))
1429 const APInt *CmpC, *AddC;
1430 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1431 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1432 Sub = cast<BinaryOperator>(U);
1433 break;
1434 }
1435 }
1436 if (!Sub)
1437 return false;
1438
1439 if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1440 TLI->getValueType(*DL, Sub->getType()),
1441 Sub->hasNUsesOrMore(2)))
1442 return false;
1443
1444 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1445 Cmp, Intrinsic::usub_with_overflow))
1446 return false;
1447
1448 // Reset callers - do not crash by iterating over a dead instruction.
1449 ModifiedDT = true;
1450 return true;
1451 }
1452
1453 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1454 /// registers that must be created and coalesced. This is a clear win except on
1455 /// targets with multiple condition code registers (PowerPC), where it might
1456 /// lose; some adjustment may be wanted there.
1457 ///
1458 /// Return true if any changes are made.
sinkCmpExpression(CmpInst * Cmp,const TargetLowering & TLI)1459 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1460 if (TLI.hasMultipleConditionRegisters())
1461 return false;
1462
1463 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1464 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1465 return false;
1466
1467 // Only insert a cmp in each block once.
1468 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1469
1470 bool MadeChange = false;
1471 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1472 UI != E; ) {
1473 Use &TheUse = UI.getUse();
1474 Instruction *User = cast<Instruction>(*UI);
1475
1476 // Preincrement use iterator so we don't invalidate it.
1477 ++UI;
1478
1479 // Don't bother for PHI nodes.
1480 if (isa<PHINode>(User))
1481 continue;
1482
1483 // Figure out which BB this cmp is used in.
1484 BasicBlock *UserBB = User->getParent();
1485 BasicBlock *DefBB = Cmp->getParent();
1486
1487 // If this user is in the same block as the cmp, don't change the cmp.
1488 if (UserBB == DefBB) continue;
1489
1490 // If we have already inserted a cmp into this block, use it.
1491 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1492
1493 if (!InsertedCmp) {
1494 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1495 assert(InsertPt != UserBB->end());
1496 InsertedCmp =
1497 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1498 Cmp->getOperand(0), Cmp->getOperand(1), "",
1499 &*InsertPt);
1500 // Propagate the debug info.
1501 InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1502 }
1503
1504 // Replace a use of the cmp with a use of the new cmp.
1505 TheUse = InsertedCmp;
1506 MadeChange = true;
1507 ++NumCmpUses;
1508 }
1509
1510 // If we removed all uses, nuke the cmp.
1511 if (Cmp->use_empty()) {
1512 Cmp->eraseFromParent();
1513 MadeChange = true;
1514 }
1515
1516 return MadeChange;
1517 }
1518
1519 /// For pattern like:
1520 ///
1521 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1522 /// ...
1523 /// DomBB:
1524 /// ...
1525 /// br DomCond, TrueBB, CmpBB
1526 /// CmpBB: (with DomBB being the single predecessor)
1527 /// ...
1528 /// Cmp = icmp eq CmpOp0, CmpOp1
1529 /// ...
1530 ///
1531 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1532 /// different from lowering of icmp eq (PowerPC). This function try to convert
1533 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1534 /// After that, DomCond and Cmp can use the same comparison so reduce one
1535 /// comparison.
1536 ///
1537 /// Return true if any changes are made.
foldICmpWithDominatingICmp(CmpInst * Cmp,const TargetLowering & TLI)1538 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1539 const TargetLowering &TLI) {
1540 if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1541 return false;
1542
1543 ICmpInst::Predicate Pred = Cmp->getPredicate();
1544 if (Pred != ICmpInst::ICMP_EQ)
1545 return false;
1546
1547 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1548 // icmp slt/sgt would introduce more redundant LLVM IR.
1549 for (User *U : Cmp->users()) {
1550 if (isa<BranchInst>(U))
1551 continue;
1552 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1553 continue;
1554 return false;
1555 }
1556
1557 // This is a cheap/incomplete check for dominance - just match a single
1558 // predecessor with a conditional branch.
1559 BasicBlock *CmpBB = Cmp->getParent();
1560 BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1561 if (!DomBB)
1562 return false;
1563
1564 // We want to ensure that the only way control gets to the comparison of
1565 // interest is that a less/greater than comparison on the same operands is
1566 // false.
1567 Value *DomCond;
1568 BasicBlock *TrueBB, *FalseBB;
1569 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1570 return false;
1571 if (CmpBB != FalseBB)
1572 return false;
1573
1574 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1575 ICmpInst::Predicate DomPred;
1576 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1577 return false;
1578 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1579 return false;
1580
1581 // Convert the equality comparison to the opposite of the dominating
1582 // comparison and swap the direction for all branch/select users.
1583 // We have conceptually converted:
1584 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1585 // to
1586 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1587 // And similarly for branches.
1588 for (User *U : Cmp->users()) {
1589 if (auto *BI = dyn_cast<BranchInst>(U)) {
1590 assert(BI->isConditional() && "Must be conditional");
1591 BI->swapSuccessors();
1592 continue;
1593 }
1594 if (auto *SI = dyn_cast<SelectInst>(U)) {
1595 // Swap operands
1596 SI->swapValues();
1597 SI->swapProfMetadata();
1598 continue;
1599 }
1600 llvm_unreachable("Must be a branch or a select");
1601 }
1602 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1603 return true;
1604 }
1605
optimizeCmp(CmpInst * Cmp,bool & ModifiedDT)1606 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) {
1607 if (sinkCmpExpression(Cmp, *TLI))
1608 return true;
1609
1610 if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1611 return true;
1612
1613 if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1614 return true;
1615
1616 if (foldICmpWithDominatingICmp(Cmp, *TLI))
1617 return true;
1618
1619 return false;
1620 }
1621
1622 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1623 /// used in a compare to allow isel to generate better code for targets where
1624 /// this operation can be combined.
1625 ///
1626 /// Return true if any changes are made.
sinkAndCmp0Expression(Instruction * AndI,const TargetLowering & TLI,SetOfInstrs & InsertedInsts)1627 static bool sinkAndCmp0Expression(Instruction *AndI,
1628 const TargetLowering &TLI,
1629 SetOfInstrs &InsertedInsts) {
1630 // Double-check that we're not trying to optimize an instruction that was
1631 // already optimized by some other part of this pass.
1632 assert(!InsertedInsts.count(AndI) &&
1633 "Attempting to optimize already optimized and instruction");
1634 (void) InsertedInsts;
1635
1636 // Nothing to do for single use in same basic block.
1637 if (AndI->hasOneUse() &&
1638 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1639 return false;
1640
1641 // Try to avoid cases where sinking/duplicating is likely to increase register
1642 // pressure.
1643 if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1644 !isa<ConstantInt>(AndI->getOperand(1)) &&
1645 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1646 return false;
1647
1648 for (auto *U : AndI->users()) {
1649 Instruction *User = cast<Instruction>(U);
1650
1651 // Only sink 'and' feeding icmp with 0.
1652 if (!isa<ICmpInst>(User))
1653 return false;
1654
1655 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1656 if (!CmpC || !CmpC->isZero())
1657 return false;
1658 }
1659
1660 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1661 return false;
1662
1663 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1664 LLVM_DEBUG(AndI->getParent()->dump());
1665
1666 // Push the 'and' into the same block as the icmp 0. There should only be
1667 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1668 // others, so we don't need to keep track of which BBs we insert into.
1669 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1670 UI != E; ) {
1671 Use &TheUse = UI.getUse();
1672 Instruction *User = cast<Instruction>(*UI);
1673
1674 // Preincrement use iterator so we don't invalidate it.
1675 ++UI;
1676
1677 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1678
1679 // Keep the 'and' in the same place if the use is already in the same block.
1680 Instruction *InsertPt =
1681 User->getParent() == AndI->getParent() ? AndI : User;
1682 Instruction *InsertedAnd =
1683 BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1684 AndI->getOperand(1), "", InsertPt);
1685 // Propagate the debug info.
1686 InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1687
1688 // Replace a use of the 'and' with a use of the new 'and'.
1689 TheUse = InsertedAnd;
1690 ++NumAndUses;
1691 LLVM_DEBUG(User->getParent()->dump());
1692 }
1693
1694 // We removed all uses, nuke the and.
1695 AndI->eraseFromParent();
1696 return true;
1697 }
1698
1699 /// Check if the candidates could be combined with a shift instruction, which
1700 /// includes:
1701 /// 1. Truncate instruction
1702 /// 2. And instruction and the imm is a mask of the low bits:
1703 /// imm & (imm+1) == 0
isExtractBitsCandidateUse(Instruction * User)1704 static bool isExtractBitsCandidateUse(Instruction *User) {
1705 if (!isa<TruncInst>(User)) {
1706 if (User->getOpcode() != Instruction::And ||
1707 !isa<ConstantInt>(User->getOperand(1)))
1708 return false;
1709
1710 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1711
1712 if ((Cimm & (Cimm + 1)).getBoolValue())
1713 return false;
1714 }
1715 return true;
1716 }
1717
1718 /// Sink both shift and truncate instruction to the use of truncate's BB.
1719 static bool
SinkShiftAndTruncate(BinaryOperator * ShiftI,Instruction * User,ConstantInt * CI,DenseMap<BasicBlock *,BinaryOperator * > & InsertedShifts,const TargetLowering & TLI,const DataLayout & DL)1720 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1721 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1722 const TargetLowering &TLI, const DataLayout &DL) {
1723 BasicBlock *UserBB = User->getParent();
1724 DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1725 auto *TruncI = cast<TruncInst>(User);
1726 bool MadeChange = false;
1727
1728 for (Value::user_iterator TruncUI = TruncI->user_begin(),
1729 TruncE = TruncI->user_end();
1730 TruncUI != TruncE;) {
1731
1732 Use &TruncTheUse = TruncUI.getUse();
1733 Instruction *TruncUser = cast<Instruction>(*TruncUI);
1734 // Preincrement use iterator so we don't invalidate it.
1735
1736 ++TruncUI;
1737
1738 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1739 if (!ISDOpcode)
1740 continue;
1741
1742 // If the use is actually a legal node, there will not be an
1743 // implicit truncate.
1744 // FIXME: always querying the result type is just an
1745 // approximation; some nodes' legality is determined by the
1746 // operand or other means. There's no good way to find out though.
1747 if (TLI.isOperationLegalOrCustom(
1748 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1749 continue;
1750
1751 // Don't bother for PHI nodes.
1752 if (isa<PHINode>(TruncUser))
1753 continue;
1754
1755 BasicBlock *TruncUserBB = TruncUser->getParent();
1756
1757 if (UserBB == TruncUserBB)
1758 continue;
1759
1760 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1761 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1762
1763 if (!InsertedShift && !InsertedTrunc) {
1764 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1765 assert(InsertPt != TruncUserBB->end());
1766 // Sink the shift
1767 if (ShiftI->getOpcode() == Instruction::AShr)
1768 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1769 "", &*InsertPt);
1770 else
1771 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1772 "", &*InsertPt);
1773 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1774
1775 // Sink the trunc
1776 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1777 TruncInsertPt++;
1778 assert(TruncInsertPt != TruncUserBB->end());
1779
1780 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1781 TruncI->getType(), "", &*TruncInsertPt);
1782 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
1783
1784 MadeChange = true;
1785
1786 TruncTheUse = InsertedTrunc;
1787 }
1788 }
1789 return MadeChange;
1790 }
1791
1792 /// Sink the shift *right* instruction into user blocks if the uses could
1793 /// potentially be combined with this shift instruction and generate BitExtract
1794 /// instruction. It will only be applied if the architecture supports BitExtract
1795 /// instruction. Here is an example:
1796 /// BB1:
1797 /// %x.extract.shift = lshr i64 %arg1, 32
1798 /// BB2:
1799 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1800 /// ==>
1801 ///
1802 /// BB2:
1803 /// %x.extract.shift.1 = lshr i64 %arg1, 32
1804 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1805 ///
1806 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
1807 /// instruction.
1808 /// Return true if any changes are made.
OptimizeExtractBits(BinaryOperator * ShiftI,ConstantInt * CI,const TargetLowering & TLI,const DataLayout & DL)1809 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
1810 const TargetLowering &TLI,
1811 const DataLayout &DL) {
1812 BasicBlock *DefBB = ShiftI->getParent();
1813
1814 /// Only insert instructions in each block once.
1815 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
1816
1817 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1818
1819 bool MadeChange = false;
1820 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1821 UI != E;) {
1822 Use &TheUse = UI.getUse();
1823 Instruction *User = cast<Instruction>(*UI);
1824 // Preincrement use iterator so we don't invalidate it.
1825 ++UI;
1826
1827 // Don't bother for PHI nodes.
1828 if (isa<PHINode>(User))
1829 continue;
1830
1831 if (!isExtractBitsCandidateUse(User))
1832 continue;
1833
1834 BasicBlock *UserBB = User->getParent();
1835
1836 if (UserBB == DefBB) {
1837 // If the shift and truncate instruction are in the same BB. The use of
1838 // the truncate(TruncUse) may still introduce another truncate if not
1839 // legal. In this case, we would like to sink both shift and truncate
1840 // instruction to the BB of TruncUse.
1841 // for example:
1842 // BB1:
1843 // i64 shift.result = lshr i64 opnd, imm
1844 // trunc.result = trunc shift.result to i16
1845 //
1846 // BB2:
1847 // ----> We will have an implicit truncate here if the architecture does
1848 // not have i16 compare.
1849 // cmp i16 trunc.result, opnd2
1850 //
1851 if (isa<TruncInst>(User) && shiftIsLegal
1852 // If the type of the truncate is legal, no truncate will be
1853 // introduced in other basic blocks.
1854 &&
1855 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1856 MadeChange =
1857 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1858
1859 continue;
1860 }
1861 // If we have already inserted a shift into this block, use it.
1862 BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1863
1864 if (!InsertedShift) {
1865 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1866 assert(InsertPt != UserBB->end());
1867
1868 if (ShiftI->getOpcode() == Instruction::AShr)
1869 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1870 "", &*InsertPt);
1871 else
1872 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1873 "", &*InsertPt);
1874 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1875
1876 MadeChange = true;
1877 }
1878
1879 // Replace a use of the shift with a use of the new shift.
1880 TheUse = InsertedShift;
1881 }
1882
1883 // If we removed all uses, or there are none, nuke the shift.
1884 if (ShiftI->use_empty()) {
1885 salvageDebugInfo(*ShiftI);
1886 ShiftI->eraseFromParent();
1887 MadeChange = true;
1888 }
1889
1890 return MadeChange;
1891 }
1892
1893 /// If counting leading or trailing zeros is an expensive operation and a zero
1894 /// input is defined, add a check for zero to avoid calling the intrinsic.
1895 ///
1896 /// We want to transform:
1897 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
1898 ///
1899 /// into:
1900 /// entry:
1901 /// %cmpz = icmp eq i64 %A, 0
1902 /// br i1 %cmpz, label %cond.end, label %cond.false
1903 /// cond.false:
1904 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
1905 /// br label %cond.end
1906 /// cond.end:
1907 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
1908 ///
1909 /// If the transform is performed, return true and set ModifiedDT to true.
despeculateCountZeros(IntrinsicInst * CountZeros,const TargetLowering * TLI,const DataLayout * DL,bool & ModifiedDT)1910 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
1911 const TargetLowering *TLI,
1912 const DataLayout *DL,
1913 bool &ModifiedDT) {
1914 // If a zero input is undefined, it doesn't make sense to despeculate that.
1915 if (match(CountZeros->getOperand(1), m_One()))
1916 return false;
1917
1918 // If it's cheap to speculate, there's nothing to do.
1919 auto IntrinsicID = CountZeros->getIntrinsicID();
1920 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
1921 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
1922 return false;
1923
1924 // Only handle legal scalar cases. Anything else requires too much work.
1925 Type *Ty = CountZeros->getType();
1926 unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
1927 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
1928 return false;
1929
1930 // The intrinsic will be sunk behind a compare against zero and branch.
1931 BasicBlock *StartBlock = CountZeros->getParent();
1932 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
1933
1934 // Create another block after the count zero intrinsic. A PHI will be added
1935 // in this block to select the result of the intrinsic or the bit-width
1936 // constant if the input to the intrinsic is zero.
1937 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
1938 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
1939
1940 // Set up a builder to create a compare, conditional branch, and PHI.
1941 IRBuilder<> Builder(CountZeros->getContext());
1942 Builder.SetInsertPoint(StartBlock->getTerminator());
1943 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
1944
1945 // Replace the unconditional branch that was created by the first split with
1946 // a compare against zero and a conditional branch.
1947 Value *Zero = Constant::getNullValue(Ty);
1948 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
1949 Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
1950 StartBlock->getTerminator()->eraseFromParent();
1951
1952 // Create a PHI in the end block to select either the output of the intrinsic
1953 // or the bit width of the operand.
1954 Builder.SetInsertPoint(&EndBlock->front());
1955 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
1956 CountZeros->replaceAllUsesWith(PN);
1957 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
1958 PN->addIncoming(BitWidth, StartBlock);
1959 PN->addIncoming(CountZeros, CallBlock);
1960
1961 // We are explicitly handling the zero case, so we can set the intrinsic's
1962 // undefined zero argument to 'true'. This will also prevent reprocessing the
1963 // intrinsic; we only despeculate when a zero input is defined.
1964 CountZeros->setArgOperand(1, Builder.getTrue());
1965 ModifiedDT = true;
1966 return true;
1967 }
1968
optimizeCallInst(CallInst * CI,bool & ModifiedDT)1969 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
1970 BasicBlock *BB = CI->getParent();
1971
1972 // Lower inline assembly if we can.
1973 // If we found an inline asm expession, and if the target knows how to
1974 // lower it to normal LLVM code, do so now.
1975 if (CI->isInlineAsm()) {
1976 if (TLI->ExpandInlineAsm(CI)) {
1977 // Avoid invalidating the iterator.
1978 CurInstIterator = BB->begin();
1979 // Avoid processing instructions out of order, which could cause
1980 // reuse before a value is defined.
1981 SunkAddrs.clear();
1982 return true;
1983 }
1984 // Sink address computing for memory operands into the block.
1985 if (optimizeInlineAsmInst(CI))
1986 return true;
1987 }
1988
1989 // Align the pointer arguments to this call if the target thinks it's a good
1990 // idea
1991 unsigned MinSize, PrefAlign;
1992 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
1993 for (auto &Arg : CI->arg_operands()) {
1994 // We want to align both objects whose address is used directly and
1995 // objects whose address is used in casts and GEPs, though it only makes
1996 // sense for GEPs if the offset is a multiple of the desired alignment and
1997 // if size - offset meets the size threshold.
1998 if (!Arg->getType()->isPointerTy())
1999 continue;
2000 APInt Offset(DL->getIndexSizeInBits(
2001 cast<PointerType>(Arg->getType())->getAddressSpace()),
2002 0);
2003 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2004 uint64_t Offset2 = Offset.getLimitedValue();
2005 if ((Offset2 & (PrefAlign-1)) != 0)
2006 continue;
2007 AllocaInst *AI;
2008 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
2009 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2010 AI->setAlignment(Align(PrefAlign));
2011 // Global variables can only be aligned if they are defined in this
2012 // object (i.e. they are uniquely initialized in this object), and
2013 // over-aligning global variables that have an explicit section is
2014 // forbidden.
2015 GlobalVariable *GV;
2016 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2017 GV->getPointerAlignment(*DL) < PrefAlign &&
2018 DL->getTypeAllocSize(GV->getValueType()) >=
2019 MinSize + Offset2)
2020 GV->setAlignment(MaybeAlign(PrefAlign));
2021 }
2022 // If this is a memcpy (or similar) then we may be able to improve the
2023 // alignment
2024 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2025 Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2026 MaybeAlign MIDestAlign = MI->getDestAlign();
2027 if (!MIDestAlign || DestAlign > *MIDestAlign)
2028 MI->setDestAlignment(DestAlign);
2029 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2030 MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2031 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2032 if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2033 MTI->setSourceAlignment(SrcAlign);
2034 }
2035 }
2036 }
2037
2038 // If we have a cold call site, try to sink addressing computation into the
2039 // cold block. This interacts with our handling for loads and stores to
2040 // ensure that we can fold all uses of a potential addressing computation
2041 // into their uses. TODO: generalize this to work over profiling data
2042 if (CI->hasFnAttr(Attribute::Cold) &&
2043 !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2044 for (auto &Arg : CI->arg_operands()) {
2045 if (!Arg->getType()->isPointerTy())
2046 continue;
2047 unsigned AS = Arg->getType()->getPointerAddressSpace();
2048 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2049 }
2050
2051 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2052 if (II) {
2053 switch (II->getIntrinsicID()) {
2054 default: break;
2055 case Intrinsic::assume: {
2056 Value *Operand = II->getOperand(0);
2057 II->eraseFromParent();
2058 // Prune the operand, it's most likely dead.
2059 resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2060 RecursivelyDeleteTriviallyDeadInstructions(
2061 Operand, TLInfo, nullptr,
2062 [&](Value *V) { removeAllAssertingVHReferences(V); });
2063 });
2064 return true;
2065 }
2066
2067 case Intrinsic::experimental_widenable_condition: {
2068 // Give up on future widening oppurtunties so that we can fold away dead
2069 // paths and merge blocks before going into block-local instruction
2070 // selection.
2071 if (II->use_empty()) {
2072 II->eraseFromParent();
2073 return true;
2074 }
2075 Constant *RetVal = ConstantInt::getTrue(II->getContext());
2076 resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2077 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2078 });
2079 return true;
2080 }
2081 case Intrinsic::objectsize:
2082 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2083 case Intrinsic::is_constant:
2084 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2085 case Intrinsic::aarch64_stlxr:
2086 case Intrinsic::aarch64_stxr: {
2087 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2088 if (!ExtVal || !ExtVal->hasOneUse() ||
2089 ExtVal->getParent() == CI->getParent())
2090 return false;
2091 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2092 ExtVal->moveBefore(CI);
2093 // Mark this instruction as "inserted by CGP", so that other
2094 // optimizations don't touch it.
2095 InsertedInsts.insert(ExtVal);
2096 return true;
2097 }
2098
2099 case Intrinsic::launder_invariant_group:
2100 case Intrinsic::strip_invariant_group: {
2101 Value *ArgVal = II->getArgOperand(0);
2102 auto it = LargeOffsetGEPMap.find(II);
2103 if (it != LargeOffsetGEPMap.end()) {
2104 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2105 // Make sure not to have to deal with iterator invalidation
2106 // after possibly adding ArgVal to LargeOffsetGEPMap.
2107 auto GEPs = std::move(it->second);
2108 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2109 LargeOffsetGEPMap.erase(II);
2110 }
2111
2112 II->replaceAllUsesWith(ArgVal);
2113 II->eraseFromParent();
2114 return true;
2115 }
2116 case Intrinsic::cttz:
2117 case Intrinsic::ctlz:
2118 // If counting zeros is expensive, try to avoid it.
2119 return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2120 case Intrinsic::fshl:
2121 case Intrinsic::fshr:
2122 return optimizeFunnelShift(II);
2123 case Intrinsic::dbg_value:
2124 return fixupDbgValue(II);
2125 case Intrinsic::vscale: {
2126 // If datalayout has no special restrictions on vector data layout,
2127 // replace `llvm.vscale` by an equivalent constant expression
2128 // to benefit from cheap constant propagation.
2129 Type *ScalableVectorTy =
2130 VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2131 if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2132 auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2133 auto *One = ConstantInt::getSigned(II->getType(), 1);
2134 auto *CGep =
2135 ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2136 II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType()));
2137 II->eraseFromParent();
2138 return true;
2139 }
2140 break;
2141 }
2142 case Intrinsic::masked_gather:
2143 return optimizeGatherScatterInst(II, II->getArgOperand(0));
2144 case Intrinsic::masked_scatter:
2145 return optimizeGatherScatterInst(II, II->getArgOperand(1));
2146 }
2147
2148 SmallVector<Value *, 2> PtrOps;
2149 Type *AccessTy;
2150 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2151 while (!PtrOps.empty()) {
2152 Value *PtrVal = PtrOps.pop_back_val();
2153 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2154 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2155 return true;
2156 }
2157 }
2158
2159 // From here on out we're working with named functions.
2160 if (!CI->getCalledFunction()) return false;
2161
2162 // Lower all default uses of _chk calls. This is very similar
2163 // to what InstCombineCalls does, but here we are only lowering calls
2164 // to fortified library functions (e.g. __memcpy_chk) that have the default
2165 // "don't know" as the objectsize. Anything else should be left alone.
2166 FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2167 IRBuilder<> Builder(CI);
2168 if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2169 CI->replaceAllUsesWith(V);
2170 CI->eraseFromParent();
2171 return true;
2172 }
2173
2174 return false;
2175 }
2176
2177 /// Look for opportunities to duplicate return instructions to the predecessor
2178 /// to enable tail call optimizations. The case it is currently looking for is:
2179 /// @code
2180 /// bb0:
2181 /// %tmp0 = tail call i32 @f0()
2182 /// br label %return
2183 /// bb1:
2184 /// %tmp1 = tail call i32 @f1()
2185 /// br label %return
2186 /// bb2:
2187 /// %tmp2 = tail call i32 @f2()
2188 /// br label %return
2189 /// return:
2190 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2191 /// ret i32 %retval
2192 /// @endcode
2193 ///
2194 /// =>
2195 ///
2196 /// @code
2197 /// bb0:
2198 /// %tmp0 = tail call i32 @f0()
2199 /// ret i32 %tmp0
2200 /// bb1:
2201 /// %tmp1 = tail call i32 @f1()
2202 /// ret i32 %tmp1
2203 /// bb2:
2204 /// %tmp2 = tail call i32 @f2()
2205 /// ret i32 %tmp2
2206 /// @endcode
dupRetToEnableTailCallOpts(BasicBlock * BB,bool & ModifiedDT)2207 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2208 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2209 if (!RetI)
2210 return false;
2211
2212 PHINode *PN = nullptr;
2213 ExtractValueInst *EVI = nullptr;
2214 BitCastInst *BCI = nullptr;
2215 Value *V = RetI->getReturnValue();
2216 if (V) {
2217 BCI = dyn_cast<BitCastInst>(V);
2218 if (BCI)
2219 V = BCI->getOperand(0);
2220
2221 EVI = dyn_cast<ExtractValueInst>(V);
2222 if (EVI) {
2223 V = EVI->getOperand(0);
2224 if (!std::all_of(EVI->idx_begin(), EVI->idx_end(),
2225 [](unsigned idx) { return idx == 0; }))
2226 return false;
2227 }
2228
2229 PN = dyn_cast<PHINode>(V);
2230 if (!PN)
2231 return false;
2232 }
2233
2234 if (PN && PN->getParent() != BB)
2235 return false;
2236
2237 // Make sure there are no instructions between the PHI and return, or that the
2238 // return is the first instruction in the block.
2239 if (PN) {
2240 BasicBlock::iterator BI = BB->begin();
2241 // Skip over debug and the bitcast.
2242 do {
2243 ++BI;
2244 } while (isa<DbgInfoIntrinsic>(BI) || &*BI == BCI || &*BI == EVI);
2245 if (&*BI != RetI)
2246 return false;
2247 } else {
2248 BasicBlock::iterator BI = BB->begin();
2249 while (isa<DbgInfoIntrinsic>(BI)) ++BI;
2250 if (&*BI != RetI)
2251 return false;
2252 }
2253
2254 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2255 /// call.
2256 const Function *F = BB->getParent();
2257 SmallVector<BasicBlock*, 4> TailCallBBs;
2258 if (PN) {
2259 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2260 // Look through bitcasts.
2261 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2262 CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2263 BasicBlock *PredBB = PN->getIncomingBlock(I);
2264 // Make sure the phi value is indeed produced by the tail call.
2265 if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2266 TLI->mayBeEmittedAsTailCall(CI) &&
2267 attributesPermitTailCall(F, CI, RetI, *TLI))
2268 TailCallBBs.push_back(PredBB);
2269 }
2270 } else {
2271 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2272 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
2273 if (!VisitedBBs.insert(*PI).second)
2274 continue;
2275
2276 BasicBlock::InstListType &InstList = (*PI)->getInstList();
2277 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
2278 BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
2279 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
2280 if (RI == RE)
2281 continue;
2282
2283 CallInst *CI = dyn_cast<CallInst>(&*RI);
2284 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2285 attributesPermitTailCall(F, CI, RetI, *TLI))
2286 TailCallBBs.push_back(*PI);
2287 }
2288 }
2289
2290 bool Changed = false;
2291 for (auto const &TailCallBB : TailCallBBs) {
2292 // Make sure the call instruction is followed by an unconditional branch to
2293 // the return block.
2294 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2295 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2296 continue;
2297
2298 // Duplicate the return into TailCallBB.
2299 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2300 assert(!VerifyBFIUpdates ||
2301 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2302 BFI->setBlockFreq(
2303 BB,
2304 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2305 ModifiedDT = Changed = true;
2306 ++NumRetsDup;
2307 }
2308
2309 // If we eliminated all predecessors of the block, delete the block now.
2310 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
2311 BB->eraseFromParent();
2312
2313 return Changed;
2314 }
2315
2316 //===----------------------------------------------------------------------===//
2317 // Memory Optimization
2318 //===----------------------------------------------------------------------===//
2319
2320 namespace {
2321
2322 /// This is an extended version of TargetLowering::AddrMode
2323 /// which holds actual Value*'s for register values.
2324 struct ExtAddrMode : public TargetLowering::AddrMode {
2325 Value *BaseReg = nullptr;
2326 Value *ScaledReg = nullptr;
2327 Value *OriginalValue = nullptr;
2328 bool InBounds = true;
2329
2330 enum FieldName {
2331 NoField = 0x00,
2332 BaseRegField = 0x01,
2333 BaseGVField = 0x02,
2334 BaseOffsField = 0x04,
2335 ScaledRegField = 0x08,
2336 ScaleField = 0x10,
2337 MultipleFields = 0xff
2338 };
2339
2340
2341 ExtAddrMode() = default;
2342
2343 void print(raw_ostream &OS) const;
2344 void dump() const;
2345
compare__anon5bb5e3b10811::ExtAddrMode2346 FieldName compare(const ExtAddrMode &other) {
2347 // First check that the types are the same on each field, as differing types
2348 // is something we can't cope with later on.
2349 if (BaseReg && other.BaseReg &&
2350 BaseReg->getType() != other.BaseReg->getType())
2351 return MultipleFields;
2352 if (BaseGV && other.BaseGV &&
2353 BaseGV->getType() != other.BaseGV->getType())
2354 return MultipleFields;
2355 if (ScaledReg && other.ScaledReg &&
2356 ScaledReg->getType() != other.ScaledReg->getType())
2357 return MultipleFields;
2358
2359 // Conservatively reject 'inbounds' mismatches.
2360 if (InBounds != other.InBounds)
2361 return MultipleFields;
2362
2363 // Check each field to see if it differs.
2364 unsigned Result = NoField;
2365 if (BaseReg != other.BaseReg)
2366 Result |= BaseRegField;
2367 if (BaseGV != other.BaseGV)
2368 Result |= BaseGVField;
2369 if (BaseOffs != other.BaseOffs)
2370 Result |= BaseOffsField;
2371 if (ScaledReg != other.ScaledReg)
2372 Result |= ScaledRegField;
2373 // Don't count 0 as being a different scale, because that actually means
2374 // unscaled (which will already be counted by having no ScaledReg).
2375 if (Scale && other.Scale && Scale != other.Scale)
2376 Result |= ScaleField;
2377
2378 if (countPopulation(Result) > 1)
2379 return MultipleFields;
2380 else
2381 return static_cast<FieldName>(Result);
2382 }
2383
2384 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2385 // with no offset.
isTrivial__anon5bb5e3b10811::ExtAddrMode2386 bool isTrivial() {
2387 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2388 // trivial if at most one of these terms is nonzero, except that BaseGV and
2389 // BaseReg both being zero actually means a null pointer value, which we
2390 // consider to be 'non-zero' here.
2391 return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2392 }
2393
GetFieldAsValue__anon5bb5e3b10811::ExtAddrMode2394 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2395 switch (Field) {
2396 default:
2397 return nullptr;
2398 case BaseRegField:
2399 return BaseReg;
2400 case BaseGVField:
2401 return BaseGV;
2402 case ScaledRegField:
2403 return ScaledReg;
2404 case BaseOffsField:
2405 return ConstantInt::get(IntPtrTy, BaseOffs);
2406 }
2407 }
2408
SetCombinedField__anon5bb5e3b10811::ExtAddrMode2409 void SetCombinedField(FieldName Field, Value *V,
2410 const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2411 switch (Field) {
2412 default:
2413 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2414 break;
2415 case ExtAddrMode::BaseRegField:
2416 BaseReg = V;
2417 break;
2418 case ExtAddrMode::BaseGVField:
2419 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2420 // in the BaseReg field.
2421 assert(BaseReg == nullptr);
2422 BaseReg = V;
2423 BaseGV = nullptr;
2424 break;
2425 case ExtAddrMode::ScaledRegField:
2426 ScaledReg = V;
2427 // If we have a mix of scaled and unscaled addrmodes then we want scale
2428 // to be the scale and not zero.
2429 if (!Scale)
2430 for (const ExtAddrMode &AM : AddrModes)
2431 if (AM.Scale) {
2432 Scale = AM.Scale;
2433 break;
2434 }
2435 break;
2436 case ExtAddrMode::BaseOffsField:
2437 // The offset is no longer a constant, so it goes in ScaledReg with a
2438 // scale of 1.
2439 assert(ScaledReg == nullptr);
2440 ScaledReg = V;
2441 Scale = 1;
2442 BaseOffs = 0;
2443 break;
2444 }
2445 }
2446 };
2447
2448 } // end anonymous namespace
2449
2450 #ifndef NDEBUG
operator <<(raw_ostream & OS,const ExtAddrMode & AM)2451 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2452 AM.print(OS);
2453 return OS;
2454 }
2455 #endif
2456
2457 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const2458 void ExtAddrMode::print(raw_ostream &OS) const {
2459 bool NeedPlus = false;
2460 OS << "[";
2461 if (InBounds)
2462 OS << "inbounds ";
2463 if (BaseGV) {
2464 OS << (NeedPlus ? " + " : "")
2465 << "GV:";
2466 BaseGV->printAsOperand(OS, /*PrintType=*/false);
2467 NeedPlus = true;
2468 }
2469
2470 if (BaseOffs) {
2471 OS << (NeedPlus ? " + " : "")
2472 << BaseOffs;
2473 NeedPlus = true;
2474 }
2475
2476 if (BaseReg) {
2477 OS << (NeedPlus ? " + " : "")
2478 << "Base:";
2479 BaseReg->printAsOperand(OS, /*PrintType=*/false);
2480 NeedPlus = true;
2481 }
2482 if (Scale) {
2483 OS << (NeedPlus ? " + " : "")
2484 << Scale << "*";
2485 ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2486 }
2487
2488 OS << ']';
2489 }
2490
dump() const2491 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2492 print(dbgs());
2493 dbgs() << '\n';
2494 }
2495 #endif
2496
2497 namespace {
2498
2499 /// This class provides transaction based operation on the IR.
2500 /// Every change made through this class is recorded in the internal state and
2501 /// can be undone (rollback) until commit is called.
2502 /// CGP does not check if instructions could be speculatively executed when
2503 /// moved. Preserving the original location would pessimize the debugging
2504 /// experience, as well as negatively impact the quality of sample PGO.
2505 class TypePromotionTransaction {
2506 /// This represents the common interface of the individual transaction.
2507 /// Each class implements the logic for doing one specific modification on
2508 /// the IR via the TypePromotionTransaction.
2509 class TypePromotionAction {
2510 protected:
2511 /// The Instruction modified.
2512 Instruction *Inst;
2513
2514 public:
2515 /// Constructor of the action.
2516 /// The constructor performs the related action on the IR.
TypePromotionAction(Instruction * Inst)2517 TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2518
2519 virtual ~TypePromotionAction() = default;
2520
2521 /// Undo the modification done by this action.
2522 /// When this method is called, the IR must be in the same state as it was
2523 /// before this action was applied.
2524 /// \pre Undoing the action works if and only if the IR is in the exact same
2525 /// state as it was directly after this action was applied.
2526 virtual void undo() = 0;
2527
2528 /// Advocate every change made by this action.
2529 /// When the results on the IR of the action are to be kept, it is important
2530 /// to call this function, otherwise hidden information may be kept forever.
commit()2531 virtual void commit() {
2532 // Nothing to be done, this action is not doing anything.
2533 }
2534 };
2535
2536 /// Utility to remember the position of an instruction.
2537 class InsertionHandler {
2538 /// Position of an instruction.
2539 /// Either an instruction:
2540 /// - Is the first in a basic block: BB is used.
2541 /// - Has a previous instruction: PrevInst is used.
2542 union {
2543 Instruction *PrevInst;
2544 BasicBlock *BB;
2545 } Point;
2546
2547 /// Remember whether or not the instruction had a previous instruction.
2548 bool HasPrevInstruction;
2549
2550 public:
2551 /// Record the position of \p Inst.
InsertionHandler(Instruction * Inst)2552 InsertionHandler(Instruction *Inst) {
2553 BasicBlock::iterator It = Inst->getIterator();
2554 HasPrevInstruction = (It != (Inst->getParent()->begin()));
2555 if (HasPrevInstruction)
2556 Point.PrevInst = &*--It;
2557 else
2558 Point.BB = Inst->getParent();
2559 }
2560
2561 /// Insert \p Inst at the recorded position.
insert(Instruction * Inst)2562 void insert(Instruction *Inst) {
2563 if (HasPrevInstruction) {
2564 if (Inst->getParent())
2565 Inst->removeFromParent();
2566 Inst->insertAfter(Point.PrevInst);
2567 } else {
2568 Instruction *Position = &*Point.BB->getFirstInsertionPt();
2569 if (Inst->getParent())
2570 Inst->moveBefore(Position);
2571 else
2572 Inst->insertBefore(Position);
2573 }
2574 }
2575 };
2576
2577 /// Move an instruction before another.
2578 class InstructionMoveBefore : public TypePromotionAction {
2579 /// Original position of the instruction.
2580 InsertionHandler Position;
2581
2582 public:
2583 /// Move \p Inst before \p Before.
InstructionMoveBefore(Instruction * Inst,Instruction * Before)2584 InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2585 : TypePromotionAction(Inst), Position(Inst) {
2586 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2587 << "\n");
2588 Inst->moveBefore(Before);
2589 }
2590
2591 /// Move the instruction back to its original position.
undo()2592 void undo() override {
2593 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2594 Position.insert(Inst);
2595 }
2596 };
2597
2598 /// Set the operand of an instruction with a new value.
2599 class OperandSetter : public TypePromotionAction {
2600 /// Original operand of the instruction.
2601 Value *Origin;
2602
2603 /// Index of the modified instruction.
2604 unsigned Idx;
2605
2606 public:
2607 /// Set \p Idx operand of \p Inst with \p NewVal.
OperandSetter(Instruction * Inst,unsigned Idx,Value * NewVal)2608 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2609 : TypePromotionAction(Inst), Idx(Idx) {
2610 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2611 << "for:" << *Inst << "\n"
2612 << "with:" << *NewVal << "\n");
2613 Origin = Inst->getOperand(Idx);
2614 Inst->setOperand(Idx, NewVal);
2615 }
2616
2617 /// Restore the original value of the instruction.
undo()2618 void undo() override {
2619 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2620 << "for: " << *Inst << "\n"
2621 << "with: " << *Origin << "\n");
2622 Inst->setOperand(Idx, Origin);
2623 }
2624 };
2625
2626 /// Hide the operands of an instruction.
2627 /// Do as if this instruction was not using any of its operands.
2628 class OperandsHider : public TypePromotionAction {
2629 /// The list of original operands.
2630 SmallVector<Value *, 4> OriginalValues;
2631
2632 public:
2633 /// Remove \p Inst from the uses of the operands of \p Inst.
OperandsHider(Instruction * Inst)2634 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2635 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2636 unsigned NumOpnds = Inst->getNumOperands();
2637 OriginalValues.reserve(NumOpnds);
2638 for (unsigned It = 0; It < NumOpnds; ++It) {
2639 // Save the current operand.
2640 Value *Val = Inst->getOperand(It);
2641 OriginalValues.push_back(Val);
2642 // Set a dummy one.
2643 // We could use OperandSetter here, but that would imply an overhead
2644 // that we are not willing to pay.
2645 Inst->setOperand(It, UndefValue::get(Val->getType()));
2646 }
2647 }
2648
2649 /// Restore the original list of uses.
undo()2650 void undo() override {
2651 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2652 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2653 Inst->setOperand(It, OriginalValues[It]);
2654 }
2655 };
2656
2657 /// Build a truncate instruction.
2658 class TruncBuilder : public TypePromotionAction {
2659 Value *Val;
2660
2661 public:
2662 /// Build a truncate instruction of \p Opnd producing a \p Ty
2663 /// result.
2664 /// trunc Opnd to Ty.
TruncBuilder(Instruction * Opnd,Type * Ty)2665 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2666 IRBuilder<> Builder(Opnd);
2667 Builder.SetCurrentDebugLocation(DebugLoc());
2668 Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2669 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2670 }
2671
2672 /// Get the built value.
getBuiltValue()2673 Value *getBuiltValue() { return Val; }
2674
2675 /// Remove the built instruction.
undo()2676 void undo() override {
2677 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2678 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2679 IVal->eraseFromParent();
2680 }
2681 };
2682
2683 /// Build a sign extension instruction.
2684 class SExtBuilder : public TypePromotionAction {
2685 Value *Val;
2686
2687 public:
2688 /// Build a sign extension instruction of \p Opnd producing a \p Ty
2689 /// result.
2690 /// sext Opnd to Ty.
SExtBuilder(Instruction * InsertPt,Value * Opnd,Type * Ty)2691 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2692 : TypePromotionAction(InsertPt) {
2693 IRBuilder<> Builder(InsertPt);
2694 Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2695 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2696 }
2697
2698 /// Get the built value.
getBuiltValue()2699 Value *getBuiltValue() { return Val; }
2700
2701 /// Remove the built instruction.
undo()2702 void undo() override {
2703 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2704 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2705 IVal->eraseFromParent();
2706 }
2707 };
2708
2709 /// Build a zero extension instruction.
2710 class ZExtBuilder : public TypePromotionAction {
2711 Value *Val;
2712
2713 public:
2714 /// Build a zero extension instruction of \p Opnd producing a \p Ty
2715 /// result.
2716 /// zext Opnd to Ty.
ZExtBuilder(Instruction * InsertPt,Value * Opnd,Type * Ty)2717 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2718 : TypePromotionAction(InsertPt) {
2719 IRBuilder<> Builder(InsertPt);
2720 Builder.SetCurrentDebugLocation(DebugLoc());
2721 Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2722 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2723 }
2724
2725 /// Get the built value.
getBuiltValue()2726 Value *getBuiltValue() { return Val; }
2727
2728 /// Remove the built instruction.
undo()2729 void undo() override {
2730 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2731 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2732 IVal->eraseFromParent();
2733 }
2734 };
2735
2736 /// Mutate an instruction to another type.
2737 class TypeMutator : public TypePromotionAction {
2738 /// Record the original type.
2739 Type *OrigTy;
2740
2741 public:
2742 /// Mutate the type of \p Inst into \p NewTy.
TypeMutator(Instruction * Inst,Type * NewTy)2743 TypeMutator(Instruction *Inst, Type *NewTy)
2744 : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2745 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2746 << "\n");
2747 Inst->mutateType(NewTy);
2748 }
2749
2750 /// Mutate the instruction back to its original type.
undo()2751 void undo() override {
2752 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2753 << "\n");
2754 Inst->mutateType(OrigTy);
2755 }
2756 };
2757
2758 /// Replace the uses of an instruction by another instruction.
2759 class UsesReplacer : public TypePromotionAction {
2760 /// Helper structure to keep track of the replaced uses.
2761 struct InstructionAndIdx {
2762 /// The instruction using the instruction.
2763 Instruction *Inst;
2764
2765 /// The index where this instruction is used for Inst.
2766 unsigned Idx;
2767
InstructionAndIdx__anon5bb5e3b10911::TypePromotionTransaction::UsesReplacer::InstructionAndIdx2768 InstructionAndIdx(Instruction *Inst, unsigned Idx)
2769 : Inst(Inst), Idx(Idx) {}
2770 };
2771
2772 /// Keep track of the original uses (pair Instruction, Index).
2773 SmallVector<InstructionAndIdx, 4> OriginalUses;
2774 /// Keep track of the debug users.
2775 SmallVector<DbgValueInst *, 1> DbgValues;
2776
2777 using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
2778
2779 public:
2780 /// Replace all the use of \p Inst by \p New.
UsesReplacer(Instruction * Inst,Value * New)2781 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
2782 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
2783 << "\n");
2784 // Record the original uses.
2785 for (Use &U : Inst->uses()) {
2786 Instruction *UserI = cast<Instruction>(U.getUser());
2787 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
2788 }
2789 // Record the debug uses separately. They are not in the instruction's
2790 // use list, but they are replaced by RAUW.
2791 findDbgValues(DbgValues, Inst);
2792
2793 // Now, we can replace the uses.
2794 Inst->replaceAllUsesWith(New);
2795 }
2796
2797 /// Reassign the original uses of Inst to Inst.
undo()2798 void undo() override {
2799 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
2800 for (use_iterator UseIt = OriginalUses.begin(),
2801 EndIt = OriginalUses.end();
2802 UseIt != EndIt; ++UseIt) {
2803 UseIt->Inst->setOperand(UseIt->Idx, Inst);
2804 }
2805 // RAUW has replaced all original uses with references to the new value,
2806 // including the debug uses. Since we are undoing the replacements,
2807 // the original debug uses must also be reinstated to maintain the
2808 // correctness and utility of debug value instructions.
2809 for (auto *DVI: DbgValues) {
2810 LLVMContext &Ctx = Inst->getType()->getContext();
2811 auto *MV = MetadataAsValue::get(Ctx, ValueAsMetadata::get(Inst));
2812 DVI->setOperand(0, MV);
2813 }
2814 }
2815 };
2816
2817 /// Remove an instruction from the IR.
2818 class InstructionRemover : public TypePromotionAction {
2819 /// Original position of the instruction.
2820 InsertionHandler Inserter;
2821
2822 /// Helper structure to hide all the link to the instruction. In other
2823 /// words, this helps to do as if the instruction was removed.
2824 OperandsHider Hider;
2825
2826 /// Keep track of the uses replaced, if any.
2827 UsesReplacer *Replacer = nullptr;
2828
2829 /// Keep track of instructions removed.
2830 SetOfInstrs &RemovedInsts;
2831
2832 public:
2833 /// Remove all reference of \p Inst and optionally replace all its
2834 /// uses with New.
2835 /// \p RemovedInsts Keep track of the instructions removed by this Action.
2836 /// \pre If !Inst->use_empty(), then New != nullptr
InstructionRemover(Instruction * Inst,SetOfInstrs & RemovedInsts,Value * New=nullptr)2837 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
2838 Value *New = nullptr)
2839 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
2840 RemovedInsts(RemovedInsts) {
2841 if (New)
2842 Replacer = new UsesReplacer(Inst, New);
2843 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
2844 RemovedInsts.insert(Inst);
2845 /// The instructions removed here will be freed after completing
2846 /// optimizeBlock() for all blocks as we need to keep track of the
2847 /// removed instructions during promotion.
2848 Inst->removeFromParent();
2849 }
2850
~InstructionRemover()2851 ~InstructionRemover() override { delete Replacer; }
2852
2853 /// Resurrect the instruction and reassign it to the proper uses if
2854 /// new value was provided when build this action.
undo()2855 void undo() override {
2856 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
2857 Inserter.insert(Inst);
2858 if (Replacer)
2859 Replacer->undo();
2860 Hider.undo();
2861 RemovedInsts.erase(Inst);
2862 }
2863 };
2864
2865 public:
2866 /// Restoration point.
2867 /// The restoration point is a pointer to an action instead of an iterator
2868 /// because the iterator may be invalidated but not the pointer.
2869 using ConstRestorationPt = const TypePromotionAction *;
2870
TypePromotionTransaction(SetOfInstrs & RemovedInsts)2871 TypePromotionTransaction(SetOfInstrs &RemovedInsts)
2872 : RemovedInsts(RemovedInsts) {}
2873
2874 /// Advocate every changes made in that transaction. Return true if any change
2875 /// happen.
2876 bool commit();
2877
2878 /// Undo all the changes made after the given point.
2879 void rollback(ConstRestorationPt Point);
2880
2881 /// Get the current restoration point.
2882 ConstRestorationPt getRestorationPoint() const;
2883
2884 /// \name API for IR modification with state keeping to support rollback.
2885 /// @{
2886 /// Same as Instruction::setOperand.
2887 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
2888
2889 /// Same as Instruction::eraseFromParent.
2890 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
2891
2892 /// Same as Value::replaceAllUsesWith.
2893 void replaceAllUsesWith(Instruction *Inst, Value *New);
2894
2895 /// Same as Value::mutateType.
2896 void mutateType(Instruction *Inst, Type *NewTy);
2897
2898 /// Same as IRBuilder::createTrunc.
2899 Value *createTrunc(Instruction *Opnd, Type *Ty);
2900
2901 /// Same as IRBuilder::createSExt.
2902 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
2903
2904 /// Same as IRBuilder::createZExt.
2905 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
2906
2907 /// Same as Instruction::moveBefore.
2908 void moveBefore(Instruction *Inst, Instruction *Before);
2909 /// @}
2910
2911 private:
2912 /// The ordered list of actions made so far.
2913 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
2914
2915 using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
2916
2917 SetOfInstrs &RemovedInsts;
2918 };
2919
2920 } // end anonymous namespace
2921
setOperand(Instruction * Inst,unsigned Idx,Value * NewVal)2922 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
2923 Value *NewVal) {
2924 Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
2925 Inst, Idx, NewVal));
2926 }
2927
eraseInstruction(Instruction * Inst,Value * NewVal)2928 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
2929 Value *NewVal) {
2930 Actions.push_back(
2931 std::make_unique<TypePromotionTransaction::InstructionRemover>(
2932 Inst, RemovedInsts, NewVal));
2933 }
2934
replaceAllUsesWith(Instruction * Inst,Value * New)2935 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
2936 Value *New) {
2937 Actions.push_back(
2938 std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
2939 }
2940
mutateType(Instruction * Inst,Type * NewTy)2941 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
2942 Actions.push_back(
2943 std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
2944 }
2945
createTrunc(Instruction * Opnd,Type * Ty)2946 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
2947 Type *Ty) {
2948 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
2949 Value *Val = Ptr->getBuiltValue();
2950 Actions.push_back(std::move(Ptr));
2951 return Val;
2952 }
2953
createSExt(Instruction * Inst,Value * Opnd,Type * Ty)2954 Value *TypePromotionTransaction::createSExt(Instruction *Inst,
2955 Value *Opnd, Type *Ty) {
2956 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
2957 Value *Val = Ptr->getBuiltValue();
2958 Actions.push_back(std::move(Ptr));
2959 return Val;
2960 }
2961
createZExt(Instruction * Inst,Value * Opnd,Type * Ty)2962 Value *TypePromotionTransaction::createZExt(Instruction *Inst,
2963 Value *Opnd, Type *Ty) {
2964 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
2965 Value *Val = Ptr->getBuiltValue();
2966 Actions.push_back(std::move(Ptr));
2967 return Val;
2968 }
2969
moveBefore(Instruction * Inst,Instruction * Before)2970 void TypePromotionTransaction::moveBefore(Instruction *Inst,
2971 Instruction *Before) {
2972 Actions.push_back(
2973 std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
2974 Inst, Before));
2975 }
2976
2977 TypePromotionTransaction::ConstRestorationPt
getRestorationPoint() const2978 TypePromotionTransaction::getRestorationPoint() const {
2979 return !Actions.empty() ? Actions.back().get() : nullptr;
2980 }
2981
commit()2982 bool TypePromotionTransaction::commit() {
2983 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
2984 ++It)
2985 (*It)->commit();
2986 bool Modified = !Actions.empty();
2987 Actions.clear();
2988 return Modified;
2989 }
2990
rollback(TypePromotionTransaction::ConstRestorationPt Point)2991 void TypePromotionTransaction::rollback(
2992 TypePromotionTransaction::ConstRestorationPt Point) {
2993 while (!Actions.empty() && Point != Actions.back().get()) {
2994 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
2995 Curr->undo();
2996 }
2997 }
2998
2999 namespace {
3000
3001 /// A helper class for matching addressing modes.
3002 ///
3003 /// This encapsulates the logic for matching the target-legal addressing modes.
3004 class AddressingModeMatcher {
3005 SmallVectorImpl<Instruction*> &AddrModeInsts;
3006 const TargetLowering &TLI;
3007 const TargetRegisterInfo &TRI;
3008 const DataLayout &DL;
3009
3010 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3011 /// the memory instruction that we're computing this address for.
3012 Type *AccessTy;
3013 unsigned AddrSpace;
3014 Instruction *MemoryInst;
3015
3016 /// This is the addressing mode that we're building up. This is
3017 /// part of the return value of this addressing mode matching stuff.
3018 ExtAddrMode &AddrMode;
3019
3020 /// The instructions inserted by other CodeGenPrepare optimizations.
3021 const SetOfInstrs &InsertedInsts;
3022
3023 /// A map from the instructions to their type before promotion.
3024 InstrToOrigTy &PromotedInsts;
3025
3026 /// The ongoing transaction where every action should be registered.
3027 TypePromotionTransaction &TPT;
3028
3029 // A GEP which has too large offset to be folded into the addressing mode.
3030 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3031
3032 /// This is set to true when we should not do profitability checks.
3033 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3034 bool IgnoreProfitability;
3035
3036 /// True if we are optimizing for size.
3037 bool OptSize;
3038
3039 ProfileSummaryInfo *PSI;
3040 BlockFrequencyInfo *BFI;
3041
AddressingModeMatcher(SmallVectorImpl<Instruction * > & AMI,const TargetLowering & TLI,const TargetRegisterInfo & TRI,Type * AT,unsigned AS,Instruction * MI,ExtAddrMode & AM,const SetOfInstrs & InsertedInsts,InstrToOrigTy & PromotedInsts,TypePromotionTransaction & TPT,std::pair<AssertingVH<GetElementPtrInst>,int64_t> & LargeOffsetGEP,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)3042 AddressingModeMatcher(
3043 SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3044 const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI,
3045 ExtAddrMode &AM, const SetOfInstrs &InsertedInsts,
3046 InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3047 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3048 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3049 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3050 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
3051 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
3052 PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP),
3053 OptSize(OptSize), PSI(PSI), BFI(BFI) {
3054 IgnoreProfitability = false;
3055 }
3056
3057 public:
3058 /// Find the maximal addressing mode that a load/store of V can fold,
3059 /// give an access type of AccessTy. This returns a list of involved
3060 /// instructions in AddrModeInsts.
3061 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3062 /// optimizations.
3063 /// \p PromotedInsts maps the instructions to their type before promotion.
3064 /// \p The ongoing transaction where every action should be registered.
3065 static ExtAddrMode
Match(Value * V,Type * AccessTy,unsigned AS,Instruction * MemoryInst,SmallVectorImpl<Instruction * > & AddrModeInsts,const TargetLowering & TLI,const TargetRegisterInfo & TRI,const SetOfInstrs & InsertedInsts,InstrToOrigTy & PromotedInsts,TypePromotionTransaction & TPT,std::pair<AssertingVH<GetElementPtrInst>,int64_t> & LargeOffsetGEP,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)3066 Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3067 SmallVectorImpl<Instruction *> &AddrModeInsts,
3068 const TargetLowering &TLI, const TargetRegisterInfo &TRI,
3069 const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3070 TypePromotionTransaction &TPT,
3071 std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3072 bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3073 ExtAddrMode Result;
3074
3075 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS,
3076 MemoryInst, Result, InsertedInsts,
3077 PromotedInsts, TPT, LargeOffsetGEP,
3078 OptSize, PSI, BFI)
3079 .matchAddr(V, 0);
3080 (void)Success; assert(Success && "Couldn't select *anything*?");
3081 return Result;
3082 }
3083
3084 private:
3085 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3086 bool matchAddr(Value *Addr, unsigned Depth);
3087 bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3088 bool *MovedAway = nullptr);
3089 bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3090 ExtAddrMode &AMBefore,
3091 ExtAddrMode &AMAfter);
3092 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3093 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3094 Value *PromotedOperand) const;
3095 };
3096
3097 class PhiNodeSet;
3098
3099 /// An iterator for PhiNodeSet.
3100 class PhiNodeSetIterator {
3101 PhiNodeSet * const Set;
3102 size_t CurrentIndex = 0;
3103
3104 public:
3105 /// The constructor. Start should point to either a valid element, or be equal
3106 /// to the size of the underlying SmallVector of the PhiNodeSet.
3107 PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start);
3108 PHINode * operator*() const;
3109 PhiNodeSetIterator& operator++();
3110 bool operator==(const PhiNodeSetIterator &RHS) const;
3111 bool operator!=(const PhiNodeSetIterator &RHS) const;
3112 };
3113
3114 /// Keeps a set of PHINodes.
3115 ///
3116 /// This is a minimal set implementation for a specific use case:
3117 /// It is very fast when there are very few elements, but also provides good
3118 /// performance when there are many. It is similar to SmallPtrSet, but also
3119 /// provides iteration by insertion order, which is deterministic and stable
3120 /// across runs. It is also similar to SmallSetVector, but provides removing
3121 /// elements in O(1) time. This is achieved by not actually removing the element
3122 /// from the underlying vector, so comes at the cost of using more memory, but
3123 /// that is fine, since PhiNodeSets are used as short lived objects.
3124 class PhiNodeSet {
3125 friend class PhiNodeSetIterator;
3126
3127 using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3128 using iterator = PhiNodeSetIterator;
3129
3130 /// Keeps the elements in the order of their insertion in the underlying
3131 /// vector. To achieve constant time removal, it never deletes any element.
3132 SmallVector<PHINode *, 32> NodeList;
3133
3134 /// Keeps the elements in the underlying set implementation. This (and not the
3135 /// NodeList defined above) is the source of truth on whether an element
3136 /// is actually in the collection.
3137 MapType NodeMap;
3138
3139 /// Points to the first valid (not deleted) element when the set is not empty
3140 /// and the value is not zero. Equals to the size of the underlying vector
3141 /// when the set is empty. When the value is 0, as in the beginning, the
3142 /// first element may or may not be valid.
3143 size_t FirstValidElement = 0;
3144
3145 public:
3146 /// Inserts a new element to the collection.
3147 /// \returns true if the element is actually added, i.e. was not in the
3148 /// collection before the operation.
insert(PHINode * Ptr)3149 bool insert(PHINode *Ptr) {
3150 if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3151 NodeList.push_back(Ptr);
3152 return true;
3153 }
3154 return false;
3155 }
3156
3157 /// Removes the element from the collection.
3158 /// \returns whether the element is actually removed, i.e. was in the
3159 /// collection before the operation.
erase(PHINode * Ptr)3160 bool erase(PHINode *Ptr) {
3161 auto it = NodeMap.find(Ptr);
3162 if (it != NodeMap.end()) {
3163 NodeMap.erase(Ptr);
3164 SkipRemovedElements(FirstValidElement);
3165 return true;
3166 }
3167 return false;
3168 }
3169
3170 /// Removes all elements and clears the collection.
clear()3171 void clear() {
3172 NodeMap.clear();
3173 NodeList.clear();
3174 FirstValidElement = 0;
3175 }
3176
3177 /// \returns an iterator that will iterate the elements in the order of
3178 /// insertion.
begin()3179 iterator begin() {
3180 if (FirstValidElement == 0)
3181 SkipRemovedElements(FirstValidElement);
3182 return PhiNodeSetIterator(this, FirstValidElement);
3183 }
3184
3185 /// \returns an iterator that points to the end of the collection.
end()3186 iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3187
3188 /// Returns the number of elements in the collection.
size() const3189 size_t size() const {
3190 return NodeMap.size();
3191 }
3192
3193 /// \returns 1 if the given element is in the collection, and 0 if otherwise.
count(PHINode * Ptr) const3194 size_t count(PHINode *Ptr) const {
3195 return NodeMap.count(Ptr);
3196 }
3197
3198 private:
3199 /// Updates the CurrentIndex so that it will point to a valid element.
3200 ///
3201 /// If the element of NodeList at CurrentIndex is valid, it does not
3202 /// change it. If there are no more valid elements, it updates CurrentIndex
3203 /// to point to the end of the NodeList.
SkipRemovedElements(size_t & CurrentIndex)3204 void SkipRemovedElements(size_t &CurrentIndex) {
3205 while (CurrentIndex < NodeList.size()) {
3206 auto it = NodeMap.find(NodeList[CurrentIndex]);
3207 // If the element has been deleted and added again later, NodeMap will
3208 // point to a different index, so CurrentIndex will still be invalid.
3209 if (it != NodeMap.end() && it->second == CurrentIndex)
3210 break;
3211 ++CurrentIndex;
3212 }
3213 }
3214 };
3215
PhiNodeSetIterator(PhiNodeSet * const Set,size_t Start)3216 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3217 : Set(Set), CurrentIndex(Start) {}
3218
operator *() const3219 PHINode * PhiNodeSetIterator::operator*() const {
3220 assert(CurrentIndex < Set->NodeList.size() &&
3221 "PhiNodeSet access out of range");
3222 return Set->NodeList[CurrentIndex];
3223 }
3224
operator ++()3225 PhiNodeSetIterator& PhiNodeSetIterator::operator++() {
3226 assert(CurrentIndex < Set->NodeList.size() &&
3227 "PhiNodeSet access out of range");
3228 ++CurrentIndex;
3229 Set->SkipRemovedElements(CurrentIndex);
3230 return *this;
3231 }
3232
operator ==(const PhiNodeSetIterator & RHS) const3233 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3234 return CurrentIndex == RHS.CurrentIndex;
3235 }
3236
operator !=(const PhiNodeSetIterator & RHS) const3237 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3238 return !((*this) == RHS);
3239 }
3240
3241 /// Keep track of simplification of Phi nodes.
3242 /// Accept the set of all phi nodes and erase phi node from this set
3243 /// if it is simplified.
3244 class SimplificationTracker {
3245 DenseMap<Value *, Value *> Storage;
3246 const SimplifyQuery &SQ;
3247 // Tracks newly created Phi nodes. The elements are iterated by insertion
3248 // order.
3249 PhiNodeSet AllPhiNodes;
3250 // Tracks newly created Select nodes.
3251 SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3252
3253 public:
SimplificationTracker(const SimplifyQuery & sq)3254 SimplificationTracker(const SimplifyQuery &sq)
3255 : SQ(sq) {}
3256
Get(Value * V)3257 Value *Get(Value *V) {
3258 do {
3259 auto SV = Storage.find(V);
3260 if (SV == Storage.end())
3261 return V;
3262 V = SV->second;
3263 } while (true);
3264 }
3265
Simplify(Value * Val)3266 Value *Simplify(Value *Val) {
3267 SmallVector<Value *, 32> WorkList;
3268 SmallPtrSet<Value *, 32> Visited;
3269 WorkList.push_back(Val);
3270 while (!WorkList.empty()) {
3271 auto *P = WorkList.pop_back_val();
3272 if (!Visited.insert(P).second)
3273 continue;
3274 if (auto *PI = dyn_cast<Instruction>(P))
3275 if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) {
3276 for (auto *U : PI->users())
3277 WorkList.push_back(cast<Value>(U));
3278 Put(PI, V);
3279 PI->replaceAllUsesWith(V);
3280 if (auto *PHI = dyn_cast<PHINode>(PI))
3281 AllPhiNodes.erase(PHI);
3282 if (auto *Select = dyn_cast<SelectInst>(PI))
3283 AllSelectNodes.erase(Select);
3284 PI->eraseFromParent();
3285 }
3286 }
3287 return Get(Val);
3288 }
3289
Put(Value * From,Value * To)3290 void Put(Value *From, Value *To) {
3291 Storage.insert({ From, To });
3292 }
3293
ReplacePhi(PHINode * From,PHINode * To)3294 void ReplacePhi(PHINode *From, PHINode *To) {
3295 Value* OldReplacement = Get(From);
3296 while (OldReplacement != From) {
3297 From = To;
3298 To = dyn_cast<PHINode>(OldReplacement);
3299 OldReplacement = Get(From);
3300 }
3301 assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3302 Put(From, To);
3303 From->replaceAllUsesWith(To);
3304 AllPhiNodes.erase(From);
3305 From->eraseFromParent();
3306 }
3307
newPhiNodes()3308 PhiNodeSet& newPhiNodes() { return AllPhiNodes; }
3309
insertNewPhi(PHINode * PN)3310 void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3311
insertNewSelect(SelectInst * SI)3312 void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3313
countNewPhiNodes() const3314 unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3315
countNewSelectNodes() const3316 unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3317
destroyNewNodes(Type * CommonType)3318 void destroyNewNodes(Type *CommonType) {
3319 // For safe erasing, replace the uses with dummy value first.
3320 auto *Dummy = UndefValue::get(CommonType);
3321 for (auto *I : AllPhiNodes) {
3322 I->replaceAllUsesWith(Dummy);
3323 I->eraseFromParent();
3324 }
3325 AllPhiNodes.clear();
3326 for (auto *I : AllSelectNodes) {
3327 I->replaceAllUsesWith(Dummy);
3328 I->eraseFromParent();
3329 }
3330 AllSelectNodes.clear();
3331 }
3332 };
3333
3334 /// A helper class for combining addressing modes.
3335 class AddressingModeCombiner {
3336 typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3337 typedef std::pair<PHINode *, PHINode *> PHIPair;
3338
3339 private:
3340 /// The addressing modes we've collected.
3341 SmallVector<ExtAddrMode, 16> AddrModes;
3342
3343 /// The field in which the AddrModes differ, when we have more than one.
3344 ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3345
3346 /// Are the AddrModes that we have all just equal to their original values?
3347 bool AllAddrModesTrivial = true;
3348
3349 /// Common Type for all different fields in addressing modes.
3350 Type *CommonType;
3351
3352 /// SimplifyQuery for simplifyInstruction utility.
3353 const SimplifyQuery &SQ;
3354
3355 /// Original Address.
3356 Value *Original;
3357
3358 public:
AddressingModeCombiner(const SimplifyQuery & _SQ,Value * OriginalValue)3359 AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3360 : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {}
3361
3362 /// Get the combined AddrMode
getAddrMode() const3363 const ExtAddrMode &getAddrMode() const {
3364 return AddrModes[0];
3365 }
3366
3367 /// Add a new AddrMode if it's compatible with the AddrModes we already
3368 /// have.
3369 /// \return True iff we succeeded in doing so.
addNewAddrMode(ExtAddrMode & NewAddrMode)3370 bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3371 // Take note of if we have any non-trivial AddrModes, as we need to detect
3372 // when all AddrModes are trivial as then we would introduce a phi or select
3373 // which just duplicates what's already there.
3374 AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3375
3376 // If this is the first addrmode then everything is fine.
3377 if (AddrModes.empty()) {
3378 AddrModes.emplace_back(NewAddrMode);
3379 return true;
3380 }
3381
3382 // Figure out how different this is from the other address modes, which we
3383 // can do just by comparing against the first one given that we only care
3384 // about the cumulative difference.
3385 ExtAddrMode::FieldName ThisDifferentField =
3386 AddrModes[0].compare(NewAddrMode);
3387 if (DifferentField == ExtAddrMode::NoField)
3388 DifferentField = ThisDifferentField;
3389 else if (DifferentField != ThisDifferentField)
3390 DifferentField = ExtAddrMode::MultipleFields;
3391
3392 // If NewAddrMode differs in more than one dimension we cannot handle it.
3393 bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3394
3395 // If Scale Field is different then we reject.
3396 CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3397
3398 // We also must reject the case when base offset is different and
3399 // scale reg is not null, we cannot handle this case due to merge of
3400 // different offsets will be used as ScaleReg.
3401 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3402 !NewAddrMode.ScaledReg);
3403
3404 // We also must reject the case when GV is different and BaseReg installed
3405 // due to we want to use base reg as a merge of GV values.
3406 CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3407 !NewAddrMode.HasBaseReg);
3408
3409 // Even if NewAddMode is the same we still need to collect it due to
3410 // original value is different. And later we will need all original values
3411 // as anchors during finding the common Phi node.
3412 if (CanHandle)
3413 AddrModes.emplace_back(NewAddrMode);
3414 else
3415 AddrModes.clear();
3416
3417 return CanHandle;
3418 }
3419
3420 /// Combine the addressing modes we've collected into a single
3421 /// addressing mode.
3422 /// \return True iff we successfully combined them or we only had one so
3423 /// didn't need to combine them anyway.
combineAddrModes()3424 bool combineAddrModes() {
3425 // If we have no AddrModes then they can't be combined.
3426 if (AddrModes.size() == 0)
3427 return false;
3428
3429 // A single AddrMode can trivially be combined.
3430 if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3431 return true;
3432
3433 // If the AddrModes we collected are all just equal to the value they are
3434 // derived from then combining them wouldn't do anything useful.
3435 if (AllAddrModesTrivial)
3436 return false;
3437
3438 if (!addrModeCombiningAllowed())
3439 return false;
3440
3441 // Build a map between <original value, basic block where we saw it> to
3442 // value of base register.
3443 // Bail out if there is no common type.
3444 FoldAddrToValueMapping Map;
3445 if (!initializeMap(Map))
3446 return false;
3447
3448 Value *CommonValue = findCommon(Map);
3449 if (CommonValue)
3450 AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3451 return CommonValue != nullptr;
3452 }
3453
3454 private:
3455 /// Initialize Map with anchor values. For address seen
3456 /// we set the value of different field saw in this address.
3457 /// At the same time we find a common type for different field we will
3458 /// use to create new Phi/Select nodes. Keep it in CommonType field.
3459 /// Return false if there is no common type found.
initializeMap(FoldAddrToValueMapping & Map)3460 bool initializeMap(FoldAddrToValueMapping &Map) {
3461 // Keep track of keys where the value is null. We will need to replace it
3462 // with constant null when we know the common type.
3463 SmallVector<Value *, 2> NullValue;
3464 Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3465 for (auto &AM : AddrModes) {
3466 Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3467 if (DV) {
3468 auto *Type = DV->getType();
3469 if (CommonType && CommonType != Type)
3470 return false;
3471 CommonType = Type;
3472 Map[AM.OriginalValue] = DV;
3473 } else {
3474 NullValue.push_back(AM.OriginalValue);
3475 }
3476 }
3477 assert(CommonType && "At least one non-null value must be!");
3478 for (auto *V : NullValue)
3479 Map[V] = Constant::getNullValue(CommonType);
3480 return true;
3481 }
3482
3483 /// We have mapping between value A and other value B where B was a field in
3484 /// addressing mode represented by A. Also we have an original value C
3485 /// representing an address we start with. Traversing from C through phi and
3486 /// selects we ended up with A's in a map. This utility function tries to find
3487 /// a value V which is a field in addressing mode C and traversing through phi
3488 /// nodes and selects we will end up in corresponded values B in a map.
3489 /// The utility will create a new Phi/Selects if needed.
3490 // The simple example looks as follows:
3491 // BB1:
3492 // p1 = b1 + 40
3493 // br cond BB2, BB3
3494 // BB2:
3495 // p2 = b2 + 40
3496 // br BB3
3497 // BB3:
3498 // p = phi [p1, BB1], [p2, BB2]
3499 // v = load p
3500 // Map is
3501 // p1 -> b1
3502 // p2 -> b2
3503 // Request is
3504 // p -> ?
3505 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
findCommon(FoldAddrToValueMapping & Map)3506 Value *findCommon(FoldAddrToValueMapping &Map) {
3507 // Tracks the simplification of newly created phi nodes. The reason we use
3508 // this mapping is because we will add new created Phi nodes in AddrToBase.
3509 // Simplification of Phi nodes is recursive, so some Phi node may
3510 // be simplified after we added it to AddrToBase. In reality this
3511 // simplification is possible only if original phi/selects were not
3512 // simplified yet.
3513 // Using this mapping we can find the current value in AddrToBase.
3514 SimplificationTracker ST(SQ);
3515
3516 // First step, DFS to create PHI nodes for all intermediate blocks.
3517 // Also fill traverse order for the second step.
3518 SmallVector<Value *, 32> TraverseOrder;
3519 InsertPlaceholders(Map, TraverseOrder, ST);
3520
3521 // Second Step, fill new nodes by merged values and simplify if possible.
3522 FillPlaceholders(Map, TraverseOrder, ST);
3523
3524 if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3525 ST.destroyNewNodes(CommonType);
3526 return nullptr;
3527 }
3528
3529 // Now we'd like to match New Phi nodes to existed ones.
3530 unsigned PhiNotMatchedCount = 0;
3531 if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3532 ST.destroyNewNodes(CommonType);
3533 return nullptr;
3534 }
3535
3536 auto *Result = ST.Get(Map.find(Original)->second);
3537 if (Result) {
3538 NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3539 NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3540 }
3541 return Result;
3542 }
3543
3544 /// Try to match PHI node to Candidate.
3545 /// Matcher tracks the matched Phi nodes.
MatchPhiNode(PHINode * PHI,PHINode * Candidate,SmallSetVector<PHIPair,8> & Matcher,PhiNodeSet & PhiNodesToMatch)3546 bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3547 SmallSetVector<PHIPair, 8> &Matcher,
3548 PhiNodeSet &PhiNodesToMatch) {
3549 SmallVector<PHIPair, 8> WorkList;
3550 Matcher.insert({ PHI, Candidate });
3551 SmallSet<PHINode *, 8> MatchedPHIs;
3552 MatchedPHIs.insert(PHI);
3553 WorkList.push_back({ PHI, Candidate });
3554 SmallSet<PHIPair, 8> Visited;
3555 while (!WorkList.empty()) {
3556 auto Item = WorkList.pop_back_val();
3557 if (!Visited.insert(Item).second)
3558 continue;
3559 // We iterate over all incoming values to Phi to compare them.
3560 // If values are different and both of them Phi and the first one is a
3561 // Phi we added (subject to match) and both of them is in the same basic
3562 // block then we can match our pair if values match. So we state that
3563 // these values match and add it to work list to verify that.
3564 for (auto B : Item.first->blocks()) {
3565 Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3566 Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3567 if (FirstValue == SecondValue)
3568 continue;
3569
3570 PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3571 PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3572
3573 // One of them is not Phi or
3574 // The first one is not Phi node from the set we'd like to match or
3575 // Phi nodes from different basic blocks then
3576 // we will not be able to match.
3577 if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3578 FirstPhi->getParent() != SecondPhi->getParent())
3579 return false;
3580
3581 // If we already matched them then continue.
3582 if (Matcher.count({ FirstPhi, SecondPhi }))
3583 continue;
3584 // So the values are different and does not match. So we need them to
3585 // match. (But we register no more than one match per PHI node, so that
3586 // we won't later try to replace them twice.)
3587 if (MatchedPHIs.insert(FirstPhi).second)
3588 Matcher.insert({ FirstPhi, SecondPhi });
3589 // But me must check it.
3590 WorkList.push_back({ FirstPhi, SecondPhi });
3591 }
3592 }
3593 return true;
3594 }
3595
3596 /// For the given set of PHI nodes (in the SimplificationTracker) try
3597 /// to find their equivalents.
3598 /// Returns false if this matching fails and creation of new Phi is disabled.
MatchPhiSet(SimplificationTracker & ST,bool AllowNewPhiNodes,unsigned & PhiNotMatchedCount)3599 bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3600 unsigned &PhiNotMatchedCount) {
3601 // Matched and PhiNodesToMatch iterate their elements in a deterministic
3602 // order, so the replacements (ReplacePhi) are also done in a deterministic
3603 // order.
3604 SmallSetVector<PHIPair, 8> Matched;
3605 SmallPtrSet<PHINode *, 8> WillNotMatch;
3606 PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3607 while (PhiNodesToMatch.size()) {
3608 PHINode *PHI = *PhiNodesToMatch.begin();
3609
3610 // Add us, if no Phi nodes in the basic block we do not match.
3611 WillNotMatch.clear();
3612 WillNotMatch.insert(PHI);
3613
3614 // Traverse all Phis until we found equivalent or fail to do that.
3615 bool IsMatched = false;
3616 for (auto &P : PHI->getParent()->phis()) {
3617 if (&P == PHI)
3618 continue;
3619 if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3620 break;
3621 // If it does not match, collect all Phi nodes from matcher.
3622 // if we end up with no match, them all these Phi nodes will not match
3623 // later.
3624 for (auto M : Matched)
3625 WillNotMatch.insert(M.first);
3626 Matched.clear();
3627 }
3628 if (IsMatched) {
3629 // Replace all matched values and erase them.
3630 for (auto MV : Matched)
3631 ST.ReplacePhi(MV.first, MV.second);
3632 Matched.clear();
3633 continue;
3634 }
3635 // If we are not allowed to create new nodes then bail out.
3636 if (!AllowNewPhiNodes)
3637 return false;
3638 // Just remove all seen values in matcher. They will not match anything.
3639 PhiNotMatchedCount += WillNotMatch.size();
3640 for (auto *P : WillNotMatch)
3641 PhiNodesToMatch.erase(P);
3642 }
3643 return true;
3644 }
3645 /// Fill the placeholders with values from predecessors and simplify them.
FillPlaceholders(FoldAddrToValueMapping & Map,SmallVectorImpl<Value * > & TraverseOrder,SimplificationTracker & ST)3646 void FillPlaceholders(FoldAddrToValueMapping &Map,
3647 SmallVectorImpl<Value *> &TraverseOrder,
3648 SimplificationTracker &ST) {
3649 while (!TraverseOrder.empty()) {
3650 Value *Current = TraverseOrder.pop_back_val();
3651 assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3652 Value *V = Map[Current];
3653
3654 if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3655 // CurrentValue also must be Select.
3656 auto *CurrentSelect = cast<SelectInst>(Current);
3657 auto *TrueValue = CurrentSelect->getTrueValue();
3658 assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3659 Select->setTrueValue(ST.Get(Map[TrueValue]));
3660 auto *FalseValue = CurrentSelect->getFalseValue();
3661 assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3662 Select->setFalseValue(ST.Get(Map[FalseValue]));
3663 } else {
3664 // Must be a Phi node then.
3665 auto *PHI = cast<PHINode>(V);
3666 // Fill the Phi node with values from predecessors.
3667 for (auto *B : predecessors(PHI->getParent())) {
3668 Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3669 assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3670 PHI->addIncoming(ST.Get(Map[PV]), B);
3671 }
3672 }
3673 Map[Current] = ST.Simplify(V);
3674 }
3675 }
3676
3677 /// Starting from original value recursively iterates over def-use chain up to
3678 /// known ending values represented in a map. For each traversed phi/select
3679 /// inserts a placeholder Phi or Select.
3680 /// Reports all new created Phi/Select nodes by adding them to set.
3681 /// Also reports and order in what values have been traversed.
InsertPlaceholders(FoldAddrToValueMapping & Map,SmallVectorImpl<Value * > & TraverseOrder,SimplificationTracker & ST)3682 void InsertPlaceholders(FoldAddrToValueMapping &Map,
3683 SmallVectorImpl<Value *> &TraverseOrder,
3684 SimplificationTracker &ST) {
3685 SmallVector<Value *, 32> Worklist;
3686 assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3687 "Address must be a Phi or Select node");
3688 auto *Dummy = UndefValue::get(CommonType);
3689 Worklist.push_back(Original);
3690 while (!Worklist.empty()) {
3691 Value *Current = Worklist.pop_back_val();
3692 // if it is already visited or it is an ending value then skip it.
3693 if (Map.find(Current) != Map.end())
3694 continue;
3695 TraverseOrder.push_back(Current);
3696
3697 // CurrentValue must be a Phi node or select. All others must be covered
3698 // by anchors.
3699 if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3700 // Is it OK to get metadata from OrigSelect?!
3701 // Create a Select placeholder with dummy value.
3702 SelectInst *Select = SelectInst::Create(
3703 CurrentSelect->getCondition(), Dummy, Dummy,
3704 CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3705 Map[Current] = Select;
3706 ST.insertNewSelect(Select);
3707 // We are interested in True and False values.
3708 Worklist.push_back(CurrentSelect->getTrueValue());
3709 Worklist.push_back(CurrentSelect->getFalseValue());
3710 } else {
3711 // It must be a Phi node then.
3712 PHINode *CurrentPhi = cast<PHINode>(Current);
3713 unsigned PredCount = CurrentPhi->getNumIncomingValues();
3714 PHINode *PHI =
3715 PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3716 Map[Current] = PHI;
3717 ST.insertNewPhi(PHI);
3718 for (Value *P : CurrentPhi->incoming_values())
3719 Worklist.push_back(P);
3720 }
3721 }
3722 }
3723
addrModeCombiningAllowed()3724 bool addrModeCombiningAllowed() {
3725 if (DisableComplexAddrModes)
3726 return false;
3727 switch (DifferentField) {
3728 default:
3729 return false;
3730 case ExtAddrMode::BaseRegField:
3731 return AddrSinkCombineBaseReg;
3732 case ExtAddrMode::BaseGVField:
3733 return AddrSinkCombineBaseGV;
3734 case ExtAddrMode::BaseOffsField:
3735 return AddrSinkCombineBaseOffs;
3736 case ExtAddrMode::ScaledRegField:
3737 return AddrSinkCombineScaledReg;
3738 }
3739 }
3740 };
3741 } // end anonymous namespace
3742
3743 /// Try adding ScaleReg*Scale to the current addressing mode.
3744 /// Return true and update AddrMode if this addr mode is legal for the target,
3745 /// false if not.
matchScaledValue(Value * ScaleReg,int64_t Scale,unsigned Depth)3746 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3747 unsigned Depth) {
3748 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3749 // mode. Just process that directly.
3750 if (Scale == 1)
3751 return matchAddr(ScaleReg, Depth);
3752
3753 // If the scale is 0, it takes nothing to add this.
3754 if (Scale == 0)
3755 return true;
3756
3757 // If we already have a scale of this value, we can add to it, otherwise, we
3758 // need an available scale field.
3759 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3760 return false;
3761
3762 ExtAddrMode TestAddrMode = AddrMode;
3763
3764 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3765 // [A+B + A*7] -> [B+A*8].
3766 TestAddrMode.Scale += Scale;
3767 TestAddrMode.ScaledReg = ScaleReg;
3768
3769 // If the new address isn't legal, bail out.
3770 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3771 return false;
3772
3773 // It was legal, so commit it.
3774 AddrMode = TestAddrMode;
3775
3776 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3777 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3778 // X*Scale + C*Scale to addr mode.
3779 ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
3780 if (isa<Instruction>(ScaleReg) && // not a constant expr.
3781 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
3782 CI->getValue().isSignedIntN(64)) {
3783 TestAddrMode.InBounds = false;
3784 TestAddrMode.ScaledReg = AddLHS;
3785 TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
3786
3787 // If this addressing mode is legal, commit it and remember that we folded
3788 // this instruction.
3789 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
3790 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
3791 AddrMode = TestAddrMode;
3792 return true;
3793 }
3794 }
3795
3796 // Otherwise, not (x+c)*scale, just return what we have.
3797 return true;
3798 }
3799
3800 /// This is a little filter, which returns true if an addressing computation
3801 /// involving I might be folded into a load/store accessing it.
3802 /// This doesn't need to be perfect, but needs to accept at least
3803 /// the set of instructions that MatchOperationAddr can.
MightBeFoldableInst(Instruction * I)3804 static bool MightBeFoldableInst(Instruction *I) {
3805 switch (I->getOpcode()) {
3806 case Instruction::BitCast:
3807 case Instruction::AddrSpaceCast:
3808 // Don't touch identity bitcasts.
3809 if (I->getType() == I->getOperand(0)->getType())
3810 return false;
3811 return I->getType()->isIntOrPtrTy();
3812 case Instruction::PtrToInt:
3813 // PtrToInt is always a noop, as we know that the int type is pointer sized.
3814 return true;
3815 case Instruction::IntToPtr:
3816 // We know the input is intptr_t, so this is foldable.
3817 return true;
3818 case Instruction::Add:
3819 return true;
3820 case Instruction::Mul:
3821 case Instruction::Shl:
3822 // Can only handle X*C and X << C.
3823 return isa<ConstantInt>(I->getOperand(1));
3824 case Instruction::GetElementPtr:
3825 return true;
3826 default:
3827 return false;
3828 }
3829 }
3830
3831 /// Check whether or not \p Val is a legal instruction for \p TLI.
3832 /// \note \p Val is assumed to be the product of some type promotion.
3833 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
3834 /// to be legal, as the non-promoted value would have had the same state.
isPromotedInstructionLegal(const TargetLowering & TLI,const DataLayout & DL,Value * Val)3835 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
3836 const DataLayout &DL, Value *Val) {
3837 Instruction *PromotedInst = dyn_cast<Instruction>(Val);
3838 if (!PromotedInst)
3839 return false;
3840 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
3841 // If the ISDOpcode is undefined, it was undefined before the promotion.
3842 if (!ISDOpcode)
3843 return true;
3844 // Otherwise, check if the promoted instruction is legal or not.
3845 return TLI.isOperationLegalOrCustom(
3846 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
3847 }
3848
3849 namespace {
3850
3851 /// Hepler class to perform type promotion.
3852 class TypePromotionHelper {
3853 /// Utility function to add a promoted instruction \p ExtOpnd to
3854 /// \p PromotedInsts and record the type of extension we have seen.
addPromotedInst(InstrToOrigTy & PromotedInsts,Instruction * ExtOpnd,bool IsSExt)3855 static void addPromotedInst(InstrToOrigTy &PromotedInsts,
3856 Instruction *ExtOpnd,
3857 bool IsSExt) {
3858 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
3859 InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
3860 if (It != PromotedInsts.end()) {
3861 // If the new extension is same as original, the information in
3862 // PromotedInsts[ExtOpnd] is still correct.
3863 if (It->second.getInt() == ExtTy)
3864 return;
3865
3866 // Now the new extension is different from old extension, we make
3867 // the type information invalid by setting extension type to
3868 // BothExtension.
3869 ExtTy = BothExtension;
3870 }
3871 PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
3872 }
3873
3874 /// Utility function to query the original type of instruction \p Opnd
3875 /// with a matched extension type. If the extension doesn't match, we
3876 /// cannot use the information we had on the original type.
3877 /// BothExtension doesn't match any extension type.
getOrigType(const InstrToOrigTy & PromotedInsts,Instruction * Opnd,bool IsSExt)3878 static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
3879 Instruction *Opnd,
3880 bool IsSExt) {
3881 ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
3882 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
3883 if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
3884 return It->second.getPointer();
3885 return nullptr;
3886 }
3887
3888 /// Utility function to check whether or not a sign or zero extension
3889 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
3890 /// either using the operands of \p Inst or promoting \p Inst.
3891 /// The type of the extension is defined by \p IsSExt.
3892 /// In other words, check if:
3893 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
3894 /// #1 Promotion applies:
3895 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
3896 /// #2 Operand reuses:
3897 /// ext opnd1 to ConsideredExtType.
3898 /// \p PromotedInsts maps the instructions to their type before promotion.
3899 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
3900 const InstrToOrigTy &PromotedInsts, bool IsSExt);
3901
3902 /// Utility function to determine if \p OpIdx should be promoted when
3903 /// promoting \p Inst.
shouldExtOperand(const Instruction * Inst,int OpIdx)3904 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
3905 return !(isa<SelectInst>(Inst) && OpIdx == 0);
3906 }
3907
3908 /// Utility function to promote the operand of \p Ext when this
3909 /// operand is a promotable trunc or sext or zext.
3910 /// \p PromotedInsts maps the instructions to their type before promotion.
3911 /// \p CreatedInstsCost[out] contains the cost of all instructions
3912 /// created to promote the operand of Ext.
3913 /// Newly added extensions are inserted in \p Exts.
3914 /// Newly added truncates are inserted in \p Truncs.
3915 /// Should never be called directly.
3916 /// \return The promoted value which is used instead of Ext.
3917 static Value *promoteOperandForTruncAndAnyExt(
3918 Instruction *Ext, TypePromotionTransaction &TPT,
3919 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3920 SmallVectorImpl<Instruction *> *Exts,
3921 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
3922
3923 /// Utility function to promote the operand of \p Ext when this
3924 /// operand is promotable and is not a supported trunc or sext.
3925 /// \p PromotedInsts maps the instructions to their type before promotion.
3926 /// \p CreatedInstsCost[out] contains the cost of all the instructions
3927 /// created to promote the operand of Ext.
3928 /// Newly added extensions are inserted in \p Exts.
3929 /// Newly added truncates are inserted in \p Truncs.
3930 /// Should never be called directly.
3931 /// \return The promoted value which is used instead of Ext.
3932 static Value *promoteOperandForOther(Instruction *Ext,
3933 TypePromotionTransaction &TPT,
3934 InstrToOrigTy &PromotedInsts,
3935 unsigned &CreatedInstsCost,
3936 SmallVectorImpl<Instruction *> *Exts,
3937 SmallVectorImpl<Instruction *> *Truncs,
3938 const TargetLowering &TLI, bool IsSExt);
3939
3940 /// \see promoteOperandForOther.
signExtendOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)3941 static Value *signExtendOperandForOther(
3942 Instruction *Ext, TypePromotionTransaction &TPT,
3943 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3944 SmallVectorImpl<Instruction *> *Exts,
3945 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3946 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3947 Exts, Truncs, TLI, true);
3948 }
3949
3950 /// \see promoteOperandForOther.
zeroExtendOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)3951 static Value *zeroExtendOperandForOther(
3952 Instruction *Ext, TypePromotionTransaction &TPT,
3953 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3954 SmallVectorImpl<Instruction *> *Exts,
3955 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3956 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3957 Exts, Truncs, TLI, false);
3958 }
3959
3960 public:
3961 /// Type for the utility function that promotes the operand of Ext.
3962 using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
3963 InstrToOrigTy &PromotedInsts,
3964 unsigned &CreatedInstsCost,
3965 SmallVectorImpl<Instruction *> *Exts,
3966 SmallVectorImpl<Instruction *> *Truncs,
3967 const TargetLowering &TLI);
3968
3969 /// Given a sign/zero extend instruction \p Ext, return the appropriate
3970 /// action to promote the operand of \p Ext instead of using Ext.
3971 /// \return NULL if no promotable action is possible with the current
3972 /// sign extension.
3973 /// \p InsertedInsts keeps track of all the instructions inserted by the
3974 /// other CodeGenPrepare optimizations. This information is important
3975 /// because we do not want to promote these instructions as CodeGenPrepare
3976 /// will reinsert them later. Thus creating an infinite loop: create/remove.
3977 /// \p PromotedInsts maps the instructions to their type before promotion.
3978 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
3979 const TargetLowering &TLI,
3980 const InstrToOrigTy &PromotedInsts);
3981 };
3982
3983 } // end anonymous namespace
3984
canGetThrough(const Instruction * Inst,Type * ConsideredExtType,const InstrToOrigTy & PromotedInsts,bool IsSExt)3985 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
3986 Type *ConsideredExtType,
3987 const InstrToOrigTy &PromotedInsts,
3988 bool IsSExt) {
3989 // The promotion helper does not know how to deal with vector types yet.
3990 // To be able to fix that, we would need to fix the places where we
3991 // statically extend, e.g., constants and such.
3992 if (Inst->getType()->isVectorTy())
3993 return false;
3994
3995 // We can always get through zext.
3996 if (isa<ZExtInst>(Inst))
3997 return true;
3998
3999 // sext(sext) is ok too.
4000 if (IsSExt && isa<SExtInst>(Inst))
4001 return true;
4002
4003 // We can get through binary operator, if it is legal. In other words, the
4004 // binary operator must have a nuw or nsw flag.
4005 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
4006 if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) &&
4007 ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4008 (IsSExt && BinOp->hasNoSignedWrap())))
4009 return true;
4010
4011 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4012 if ((Inst->getOpcode() == Instruction::And ||
4013 Inst->getOpcode() == Instruction::Or))
4014 return true;
4015
4016 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4017 if (Inst->getOpcode() == Instruction::Xor) {
4018 const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
4019 // Make sure it is not a NOT.
4020 if (Cst && !Cst->getValue().isAllOnesValue())
4021 return true;
4022 }
4023
4024 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4025 // It may change a poisoned value into a regular value, like
4026 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4027 // poisoned value regular value
4028 // It should be OK since undef covers valid value.
4029 if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4030 return true;
4031
4032 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4033 // It may change a poisoned value into a regular value, like
4034 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4035 // poisoned value regular value
4036 // It should be OK since undef covers valid value.
4037 if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4038 const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4039 if (ExtInst->hasOneUse()) {
4040 const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4041 if (AndInst && AndInst->getOpcode() == Instruction::And) {
4042 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4043 if (Cst &&
4044 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4045 return true;
4046 }
4047 }
4048 }
4049
4050 // Check if we can do the following simplification.
4051 // ext(trunc(opnd)) --> ext(opnd)
4052 if (!isa<TruncInst>(Inst))
4053 return false;
4054
4055 Value *OpndVal = Inst->getOperand(0);
4056 // Check if we can use this operand in the extension.
4057 // If the type is larger than the result type of the extension, we cannot.
4058 if (!OpndVal->getType()->isIntegerTy() ||
4059 OpndVal->getType()->getIntegerBitWidth() >
4060 ConsideredExtType->getIntegerBitWidth())
4061 return false;
4062
4063 // If the operand of the truncate is not an instruction, we will not have
4064 // any information on the dropped bits.
4065 // (Actually we could for constant but it is not worth the extra logic).
4066 Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4067 if (!Opnd)
4068 return false;
4069
4070 // Check if the source of the type is narrow enough.
4071 // I.e., check that trunc just drops extended bits of the same kind of
4072 // the extension.
4073 // #1 get the type of the operand and check the kind of the extended bits.
4074 const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4075 if (OpndType)
4076 ;
4077 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4078 OpndType = Opnd->getOperand(0)->getType();
4079 else
4080 return false;
4081
4082 // #2 check that the truncate just drops extended bits.
4083 return Inst->getType()->getIntegerBitWidth() >=
4084 OpndType->getIntegerBitWidth();
4085 }
4086
getAction(Instruction * Ext,const SetOfInstrs & InsertedInsts,const TargetLowering & TLI,const InstrToOrigTy & PromotedInsts)4087 TypePromotionHelper::Action TypePromotionHelper::getAction(
4088 Instruction *Ext, const SetOfInstrs &InsertedInsts,
4089 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4090 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4091 "Unexpected instruction type");
4092 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4093 Type *ExtTy = Ext->getType();
4094 bool IsSExt = isa<SExtInst>(Ext);
4095 // If the operand of the extension is not an instruction, we cannot
4096 // get through.
4097 // If it, check we can get through.
4098 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4099 return nullptr;
4100
4101 // Do not promote if the operand has been added by codegenprepare.
4102 // Otherwise, it means we are undoing an optimization that is likely to be
4103 // redone, thus causing potential infinite loop.
4104 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4105 return nullptr;
4106
4107 // SExt or Trunc instructions.
4108 // Return the related handler.
4109 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4110 isa<ZExtInst>(ExtOpnd))
4111 return promoteOperandForTruncAndAnyExt;
4112
4113 // Regular instruction.
4114 // Abort early if we will have to insert non-free instructions.
4115 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4116 return nullptr;
4117 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4118 }
4119
promoteOperandForTruncAndAnyExt(Instruction * SExt,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI)4120 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4121 Instruction *SExt, TypePromotionTransaction &TPT,
4122 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4123 SmallVectorImpl<Instruction *> *Exts,
4124 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4125 // By construction, the operand of SExt is an instruction. Otherwise we cannot
4126 // get through it and this method should not be called.
4127 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4128 Value *ExtVal = SExt;
4129 bool HasMergedNonFreeExt = false;
4130 if (isa<ZExtInst>(SExtOpnd)) {
4131 // Replace s|zext(zext(opnd))
4132 // => zext(opnd).
4133 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4134 Value *ZExt =
4135 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4136 TPT.replaceAllUsesWith(SExt, ZExt);
4137 TPT.eraseInstruction(SExt);
4138 ExtVal = ZExt;
4139 } else {
4140 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4141 // => z|sext(opnd).
4142 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4143 }
4144 CreatedInstsCost = 0;
4145
4146 // Remove dead code.
4147 if (SExtOpnd->use_empty())
4148 TPT.eraseInstruction(SExtOpnd);
4149
4150 // Check if the extension is still needed.
4151 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4152 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4153 if (ExtInst) {
4154 if (Exts)
4155 Exts->push_back(ExtInst);
4156 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4157 }
4158 return ExtVal;
4159 }
4160
4161 // At this point we have: ext ty opnd to ty.
4162 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4163 Value *NextVal = ExtInst->getOperand(0);
4164 TPT.eraseInstruction(ExtInst, NextVal);
4165 return NextVal;
4166 }
4167
promoteOperandForOther(Instruction * Ext,TypePromotionTransaction & TPT,InstrToOrigTy & PromotedInsts,unsigned & CreatedInstsCost,SmallVectorImpl<Instruction * > * Exts,SmallVectorImpl<Instruction * > * Truncs,const TargetLowering & TLI,bool IsSExt)4168 Value *TypePromotionHelper::promoteOperandForOther(
4169 Instruction *Ext, TypePromotionTransaction &TPT,
4170 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4171 SmallVectorImpl<Instruction *> *Exts,
4172 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4173 bool IsSExt) {
4174 // By construction, the operand of Ext is an instruction. Otherwise we cannot
4175 // get through it and this method should not be called.
4176 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4177 CreatedInstsCost = 0;
4178 if (!ExtOpnd->hasOneUse()) {
4179 // ExtOpnd will be promoted.
4180 // All its uses, but Ext, will need to use a truncated value of the
4181 // promoted version.
4182 // Create the truncate now.
4183 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4184 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4185 // Insert it just after the definition.
4186 ITrunc->moveAfter(ExtOpnd);
4187 if (Truncs)
4188 Truncs->push_back(ITrunc);
4189 }
4190
4191 TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4192 // Restore the operand of Ext (which has been replaced by the previous call
4193 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4194 TPT.setOperand(Ext, 0, ExtOpnd);
4195 }
4196
4197 // Get through the Instruction:
4198 // 1. Update its type.
4199 // 2. Replace the uses of Ext by Inst.
4200 // 3. Extend each operand that needs to be extended.
4201
4202 // Remember the original type of the instruction before promotion.
4203 // This is useful to know that the high bits are sign extended bits.
4204 addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4205 // Step #1.
4206 TPT.mutateType(ExtOpnd, Ext->getType());
4207 // Step #2.
4208 TPT.replaceAllUsesWith(Ext, ExtOpnd);
4209 // Step #3.
4210 Instruction *ExtForOpnd = Ext;
4211
4212 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4213 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4214 ++OpIdx) {
4215 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4216 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4217 !shouldExtOperand(ExtOpnd, OpIdx)) {
4218 LLVM_DEBUG(dbgs() << "No need to propagate\n");
4219 continue;
4220 }
4221 // Check if we can statically extend the operand.
4222 Value *Opnd = ExtOpnd->getOperand(OpIdx);
4223 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4224 LLVM_DEBUG(dbgs() << "Statically extend\n");
4225 unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4226 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4227 : Cst->getValue().zext(BitWidth);
4228 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4229 continue;
4230 }
4231 // UndefValue are typed, so we have to statically sign extend them.
4232 if (isa<UndefValue>(Opnd)) {
4233 LLVM_DEBUG(dbgs() << "Statically extend\n");
4234 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4235 continue;
4236 }
4237
4238 // Otherwise we have to explicitly sign extend the operand.
4239 // Check if Ext was reused to extend an operand.
4240 if (!ExtForOpnd) {
4241 // If yes, create a new one.
4242 LLVM_DEBUG(dbgs() << "More operands to ext\n");
4243 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4244 : TPT.createZExt(Ext, Opnd, Ext->getType());
4245 if (!isa<Instruction>(ValForExtOpnd)) {
4246 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4247 continue;
4248 }
4249 ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4250 }
4251 if (Exts)
4252 Exts->push_back(ExtForOpnd);
4253 TPT.setOperand(ExtForOpnd, 0, Opnd);
4254
4255 // Move the sign extension before the insertion point.
4256 TPT.moveBefore(ExtForOpnd, ExtOpnd);
4257 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4258 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4259 // If more sext are required, new instructions will have to be created.
4260 ExtForOpnd = nullptr;
4261 }
4262 if (ExtForOpnd == Ext) {
4263 LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4264 TPT.eraseInstruction(Ext);
4265 }
4266 return ExtOpnd;
4267 }
4268
4269 /// Check whether or not promoting an instruction to a wider type is profitable.
4270 /// \p NewCost gives the cost of extension instructions created by the
4271 /// promotion.
4272 /// \p OldCost gives the cost of extension instructions before the promotion
4273 /// plus the number of instructions that have been
4274 /// matched in the addressing mode the promotion.
4275 /// \p PromotedOperand is the value that has been promoted.
4276 /// \return True if the promotion is profitable, false otherwise.
isPromotionProfitable(unsigned NewCost,unsigned OldCost,Value * PromotedOperand) const4277 bool AddressingModeMatcher::isPromotionProfitable(
4278 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4279 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4280 << '\n');
4281 // The cost of the new extensions is greater than the cost of the
4282 // old extension plus what we folded.
4283 // This is not profitable.
4284 if (NewCost > OldCost)
4285 return false;
4286 if (NewCost < OldCost)
4287 return true;
4288 // The promotion is neutral but it may help folding the sign extension in
4289 // loads for instance.
4290 // Check that we did not create an illegal instruction.
4291 return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4292 }
4293
4294 /// Given an instruction or constant expr, see if we can fold the operation
4295 /// into the addressing mode. If so, update the addressing mode and return
4296 /// true, otherwise return false without modifying AddrMode.
4297 /// If \p MovedAway is not NULL, it contains the information of whether or
4298 /// not AddrInst has to be folded into the addressing mode on success.
4299 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4300 /// because it has been moved away.
4301 /// Thus AddrInst must not be added in the matched instructions.
4302 /// This state can happen when AddrInst is a sext, since it may be moved away.
4303 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4304 /// not be referenced anymore.
matchOperationAddr(User * AddrInst,unsigned Opcode,unsigned Depth,bool * MovedAway)4305 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4306 unsigned Depth,
4307 bool *MovedAway) {
4308 // Avoid exponential behavior on extremely deep expression trees.
4309 if (Depth >= 5) return false;
4310
4311 // By default, all matched instructions stay in place.
4312 if (MovedAway)
4313 *MovedAway = false;
4314
4315 switch (Opcode) {
4316 case Instruction::PtrToInt:
4317 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4318 return matchAddr(AddrInst->getOperand(0), Depth);
4319 case Instruction::IntToPtr: {
4320 auto AS = AddrInst->getType()->getPointerAddressSpace();
4321 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4322 // This inttoptr is a no-op if the integer type is pointer sized.
4323 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4324 return matchAddr(AddrInst->getOperand(0), Depth);
4325 return false;
4326 }
4327 case Instruction::BitCast:
4328 // BitCast is always a noop, and we can handle it as long as it is
4329 // int->int or pointer->pointer (we don't want int<->fp or something).
4330 if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4331 // Don't touch identity bitcasts. These were probably put here by LSR,
4332 // and we don't want to mess around with them. Assume it knows what it
4333 // is doing.
4334 AddrInst->getOperand(0)->getType() != AddrInst->getType())
4335 return matchAddr(AddrInst->getOperand(0), Depth);
4336 return false;
4337 case Instruction::AddrSpaceCast: {
4338 unsigned SrcAS
4339 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4340 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4341 if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4342 return matchAddr(AddrInst->getOperand(0), Depth);
4343 return false;
4344 }
4345 case Instruction::Add: {
4346 // Check to see if we can merge in the RHS then the LHS. If so, we win.
4347 ExtAddrMode BackupAddrMode = AddrMode;
4348 unsigned OldSize = AddrModeInsts.size();
4349 // Start a transaction at this point.
4350 // The LHS may match but not the RHS.
4351 // Therefore, we need a higher level restoration point to undo partially
4352 // matched operation.
4353 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4354 TPT.getRestorationPoint();
4355
4356 AddrMode.InBounds = false;
4357 if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
4358 matchAddr(AddrInst->getOperand(0), Depth+1))
4359 return true;
4360
4361 // Restore the old addr mode info.
4362 AddrMode = BackupAddrMode;
4363 AddrModeInsts.resize(OldSize);
4364 TPT.rollback(LastKnownGood);
4365
4366 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4367 if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
4368 matchAddr(AddrInst->getOperand(1), Depth+1))
4369 return true;
4370
4371 // Otherwise we definitely can't merge the ADD in.
4372 AddrMode = BackupAddrMode;
4373 AddrModeInsts.resize(OldSize);
4374 TPT.rollback(LastKnownGood);
4375 break;
4376 }
4377 //case Instruction::Or:
4378 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4379 //break;
4380 case Instruction::Mul:
4381 case Instruction::Shl: {
4382 // Can only handle X*C and X << C.
4383 AddrMode.InBounds = false;
4384 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4385 if (!RHS || RHS->getBitWidth() > 64)
4386 return false;
4387 int64_t Scale = RHS->getSExtValue();
4388 if (Opcode == Instruction::Shl)
4389 Scale = 1LL << Scale;
4390
4391 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4392 }
4393 case Instruction::GetElementPtr: {
4394 // Scan the GEP. We check it if it contains constant offsets and at most
4395 // one variable offset.
4396 int VariableOperand = -1;
4397 unsigned VariableScale = 0;
4398
4399 int64_t ConstantOffset = 0;
4400 gep_type_iterator GTI = gep_type_begin(AddrInst);
4401 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4402 if (StructType *STy = GTI.getStructTypeOrNull()) {
4403 const StructLayout *SL = DL.getStructLayout(STy);
4404 unsigned Idx =
4405 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4406 ConstantOffset += SL->getElementOffset(Idx);
4407 } else {
4408 TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType());
4409 if (TS.isNonZero()) {
4410 // The optimisations below currently only work for fixed offsets.
4411 if (TS.isScalable())
4412 return false;
4413 int64_t TypeSize = TS.getFixedSize();
4414 if (ConstantInt *CI =
4415 dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4416 const APInt &CVal = CI->getValue();
4417 if (CVal.getMinSignedBits() <= 64) {
4418 ConstantOffset += CVal.getSExtValue() * TypeSize;
4419 continue;
4420 }
4421 }
4422 // We only allow one variable index at the moment.
4423 if (VariableOperand != -1)
4424 return false;
4425
4426 // Remember the variable index.
4427 VariableOperand = i;
4428 VariableScale = TypeSize;
4429 }
4430 }
4431 }
4432
4433 // A common case is for the GEP to only do a constant offset. In this case,
4434 // just add it to the disp field and check validity.
4435 if (VariableOperand == -1) {
4436 AddrMode.BaseOffs += ConstantOffset;
4437 if (ConstantOffset == 0 ||
4438 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4439 // Check to see if we can fold the base pointer in too.
4440 if (matchAddr(AddrInst->getOperand(0), Depth+1)) {
4441 if (!cast<GEPOperator>(AddrInst)->isInBounds())
4442 AddrMode.InBounds = false;
4443 return true;
4444 }
4445 } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4446 TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4447 ConstantOffset > 0) {
4448 // Record GEPs with non-zero offsets as candidates for splitting in the
4449 // event that the offset cannot fit into the r+i addressing mode.
4450 // Simple and common case that only one GEP is used in calculating the
4451 // address for the memory access.
4452 Value *Base = AddrInst->getOperand(0);
4453 auto *BaseI = dyn_cast<Instruction>(Base);
4454 auto *GEP = cast<GetElementPtrInst>(AddrInst);
4455 if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4456 (BaseI && !isa<CastInst>(BaseI) &&
4457 !isa<GetElementPtrInst>(BaseI))) {
4458 // Make sure the parent block allows inserting non-PHI instructions
4459 // before the terminator.
4460 BasicBlock *Parent =
4461 BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4462 if (!Parent->getTerminator()->isEHPad())
4463 LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4464 }
4465 }
4466 AddrMode.BaseOffs -= ConstantOffset;
4467 return false;
4468 }
4469
4470 // Save the valid addressing mode in case we can't match.
4471 ExtAddrMode BackupAddrMode = AddrMode;
4472 unsigned OldSize = AddrModeInsts.size();
4473
4474 // See if the scale and offset amount is valid for this target.
4475 AddrMode.BaseOffs += ConstantOffset;
4476 if (!cast<GEPOperator>(AddrInst)->isInBounds())
4477 AddrMode.InBounds = false;
4478
4479 // Match the base operand of the GEP.
4480 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
4481 // If it couldn't be matched, just stuff the value in a register.
4482 if (AddrMode.HasBaseReg) {
4483 AddrMode = BackupAddrMode;
4484 AddrModeInsts.resize(OldSize);
4485 return false;
4486 }
4487 AddrMode.HasBaseReg = true;
4488 AddrMode.BaseReg = AddrInst->getOperand(0);
4489 }
4490
4491 // Match the remaining variable portion of the GEP.
4492 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4493 Depth)) {
4494 // If it couldn't be matched, try stuffing the base into a register
4495 // instead of matching it, and retrying the match of the scale.
4496 AddrMode = BackupAddrMode;
4497 AddrModeInsts.resize(OldSize);
4498 if (AddrMode.HasBaseReg)
4499 return false;
4500 AddrMode.HasBaseReg = true;
4501 AddrMode.BaseReg = AddrInst->getOperand(0);
4502 AddrMode.BaseOffs += ConstantOffset;
4503 if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4504 VariableScale, Depth)) {
4505 // If even that didn't work, bail.
4506 AddrMode = BackupAddrMode;
4507 AddrModeInsts.resize(OldSize);
4508 return false;
4509 }
4510 }
4511
4512 return true;
4513 }
4514 case Instruction::SExt:
4515 case Instruction::ZExt: {
4516 Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4517 if (!Ext)
4518 return false;
4519
4520 // Try to move this ext out of the way of the addressing mode.
4521 // Ask for a method for doing so.
4522 TypePromotionHelper::Action TPH =
4523 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4524 if (!TPH)
4525 return false;
4526
4527 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4528 TPT.getRestorationPoint();
4529 unsigned CreatedInstsCost = 0;
4530 unsigned ExtCost = !TLI.isExtFree(Ext);
4531 Value *PromotedOperand =
4532 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4533 // SExt has been moved away.
4534 // Thus either it will be rematched later in the recursive calls or it is
4535 // gone. Anyway, we must not fold it into the addressing mode at this point.
4536 // E.g.,
4537 // op = add opnd, 1
4538 // idx = ext op
4539 // addr = gep base, idx
4540 // is now:
4541 // promotedOpnd = ext opnd <- no match here
4542 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4543 // addr = gep base, op <- match
4544 if (MovedAway)
4545 *MovedAway = true;
4546
4547 assert(PromotedOperand &&
4548 "TypePromotionHelper should have filtered out those cases");
4549
4550 ExtAddrMode BackupAddrMode = AddrMode;
4551 unsigned OldSize = AddrModeInsts.size();
4552
4553 if (!matchAddr(PromotedOperand, Depth) ||
4554 // The total of the new cost is equal to the cost of the created
4555 // instructions.
4556 // The total of the old cost is equal to the cost of the extension plus
4557 // what we have saved in the addressing mode.
4558 !isPromotionProfitable(CreatedInstsCost,
4559 ExtCost + (AddrModeInsts.size() - OldSize),
4560 PromotedOperand)) {
4561 AddrMode = BackupAddrMode;
4562 AddrModeInsts.resize(OldSize);
4563 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4564 TPT.rollback(LastKnownGood);
4565 return false;
4566 }
4567 return true;
4568 }
4569 }
4570 return false;
4571 }
4572
4573 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4574 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4575 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4576 /// for the target.
4577 ///
matchAddr(Value * Addr,unsigned Depth)4578 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4579 // Start a transaction at this point that we will rollback if the matching
4580 // fails.
4581 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4582 TPT.getRestorationPoint();
4583 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4584 if (CI->getValue().isSignedIntN(64)) {
4585 // Fold in immediates if legal for the target.
4586 AddrMode.BaseOffs += CI->getSExtValue();
4587 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4588 return true;
4589 AddrMode.BaseOffs -= CI->getSExtValue();
4590 }
4591 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4592 // If this is a global variable, try to fold it into the addressing mode.
4593 if (!AddrMode.BaseGV) {
4594 AddrMode.BaseGV = GV;
4595 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4596 return true;
4597 AddrMode.BaseGV = nullptr;
4598 }
4599 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4600 ExtAddrMode BackupAddrMode = AddrMode;
4601 unsigned OldSize = AddrModeInsts.size();
4602
4603 // Check to see if it is possible to fold this operation.
4604 bool MovedAway = false;
4605 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4606 // This instruction may have been moved away. If so, there is nothing
4607 // to check here.
4608 if (MovedAway)
4609 return true;
4610 // Okay, it's possible to fold this. Check to see if it is actually
4611 // *profitable* to do so. We use a simple cost model to avoid increasing
4612 // register pressure too much.
4613 if (I->hasOneUse() ||
4614 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4615 AddrModeInsts.push_back(I);
4616 return true;
4617 }
4618
4619 // It isn't profitable to do this, roll back.
4620 //cerr << "NOT FOLDING: " << *I;
4621 AddrMode = BackupAddrMode;
4622 AddrModeInsts.resize(OldSize);
4623 TPT.rollback(LastKnownGood);
4624 }
4625 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4626 if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4627 return true;
4628 TPT.rollback(LastKnownGood);
4629 } else if (isa<ConstantPointerNull>(Addr)) {
4630 // Null pointer gets folded without affecting the addressing mode.
4631 return true;
4632 }
4633
4634 // Worse case, the target should support [reg] addressing modes. :)
4635 if (!AddrMode.HasBaseReg) {
4636 AddrMode.HasBaseReg = true;
4637 AddrMode.BaseReg = Addr;
4638 // Still check for legality in case the target supports [imm] but not [i+r].
4639 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4640 return true;
4641 AddrMode.HasBaseReg = false;
4642 AddrMode.BaseReg = nullptr;
4643 }
4644
4645 // If the base register is already taken, see if we can do [r+r].
4646 if (AddrMode.Scale == 0) {
4647 AddrMode.Scale = 1;
4648 AddrMode.ScaledReg = Addr;
4649 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4650 return true;
4651 AddrMode.Scale = 0;
4652 AddrMode.ScaledReg = nullptr;
4653 }
4654 // Couldn't match.
4655 TPT.rollback(LastKnownGood);
4656 return false;
4657 }
4658
4659 /// Check to see if all uses of OpVal by the specified inline asm call are due
4660 /// to memory operands. If so, return true, otherwise return false.
IsOperandAMemoryOperand(CallInst * CI,InlineAsm * IA,Value * OpVal,const TargetLowering & TLI,const TargetRegisterInfo & TRI)4661 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4662 const TargetLowering &TLI,
4663 const TargetRegisterInfo &TRI) {
4664 const Function *F = CI->getFunction();
4665 TargetLowering::AsmOperandInfoVector TargetConstraints =
4666 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4667
4668 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
4669 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
4670
4671 // Compute the constraint code and ConstraintType to use.
4672 TLI.ComputeConstraintToUse(OpInfo, SDValue());
4673
4674 // If this asm operand is our Value*, and if it isn't an indirect memory
4675 // operand, we can't fold it!
4676 if (OpInfo.CallOperandVal == OpVal &&
4677 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4678 !OpInfo.isIndirect))
4679 return false;
4680 }
4681
4682 return true;
4683 }
4684
4685 // Max number of memory uses to look at before aborting the search to conserve
4686 // compile time.
4687 static constexpr int MaxMemoryUsesToScan = 20;
4688
4689 /// Recursively walk all the uses of I until we find a memory use.
4690 /// If we find an obviously non-foldable instruction, return true.
4691 /// Add the ultimately found memory instructions to MemoryUses.
FindAllMemoryUses(Instruction * I,SmallVectorImpl<std::pair<Instruction *,unsigned>> & MemoryUses,SmallPtrSetImpl<Instruction * > & ConsideredInsts,const TargetLowering & TLI,const TargetRegisterInfo & TRI,bool OptSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI,int SeenInsts=0)4692 static bool FindAllMemoryUses(
4693 Instruction *I,
4694 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
4695 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4696 const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4697 BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4698 // If we already considered this instruction, we're done.
4699 if (!ConsideredInsts.insert(I).second)
4700 return false;
4701
4702 // If this is an obviously unfoldable instruction, bail out.
4703 if (!MightBeFoldableInst(I))
4704 return true;
4705
4706 // Loop over all the uses, recursively processing them.
4707 for (Use &U : I->uses()) {
4708 // Conservatively return true if we're seeing a large number or a deep chain
4709 // of users. This avoids excessive compilation times in pathological cases.
4710 if (SeenInsts++ >= MaxMemoryUsesToScan)
4711 return true;
4712
4713 Instruction *UserI = cast<Instruction>(U.getUser());
4714 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4715 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
4716 continue;
4717 }
4718
4719 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
4720 unsigned opNo = U.getOperandNo();
4721 if (opNo != StoreInst::getPointerOperandIndex())
4722 return true; // Storing addr, not into addr.
4723 MemoryUses.push_back(std::make_pair(SI, opNo));
4724 continue;
4725 }
4726
4727 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
4728 unsigned opNo = U.getOperandNo();
4729 if (opNo != AtomicRMWInst::getPointerOperandIndex())
4730 return true; // Storing addr, not into addr.
4731 MemoryUses.push_back(std::make_pair(RMW, opNo));
4732 continue;
4733 }
4734
4735 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
4736 unsigned opNo = U.getOperandNo();
4737 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
4738 return true; // Storing addr, not into addr.
4739 MemoryUses.push_back(std::make_pair(CmpX, opNo));
4740 continue;
4741 }
4742
4743 if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
4744 if (CI->hasFnAttr(Attribute::Cold)) {
4745 // If this is a cold call, we can sink the addressing calculation into
4746 // the cold path. See optimizeCallInst
4747 bool OptForSize = OptSize ||
4748 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
4749 if (!OptForSize)
4750 continue;
4751 }
4752
4753 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
4754 if (!IA) return true;
4755
4756 // If this is a memory operand, we're cool, otherwise bail out.
4757 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
4758 return true;
4759 continue;
4760 }
4761
4762 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4763 PSI, BFI, SeenInsts))
4764 return true;
4765 }
4766
4767 return false;
4768 }
4769
4770 /// Return true if Val is already known to be live at the use site that we're
4771 /// folding it into. If so, there is no cost to include it in the addressing
4772 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4773 /// instruction already.
valueAlreadyLiveAtInst(Value * Val,Value * KnownLive1,Value * KnownLive2)4774 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
4775 Value *KnownLive2) {
4776 // If Val is either of the known-live values, we know it is live!
4777 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
4778 return true;
4779
4780 // All values other than instructions and arguments (e.g. constants) are live.
4781 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
4782
4783 // If Val is a constant sized alloca in the entry block, it is live, this is
4784 // true because it is just a reference to the stack/frame pointer, which is
4785 // live for the whole function.
4786 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
4787 if (AI->isStaticAlloca())
4788 return true;
4789
4790 // Check to see if this value is already used in the memory instruction's
4791 // block. If so, it's already live into the block at the very least, so we
4792 // can reasonably fold it.
4793 return Val->isUsedInBasicBlock(MemoryInst->getParent());
4794 }
4795
4796 /// It is possible for the addressing mode of the machine to fold the specified
4797 /// instruction into a load or store that ultimately uses it.
4798 /// However, the specified instruction has multiple uses.
4799 /// Given this, it may actually increase register pressure to fold it
4800 /// into the load. For example, consider this code:
4801 ///
4802 /// X = ...
4803 /// Y = X+1
4804 /// use(Y) -> nonload/store
4805 /// Z = Y+1
4806 /// load Z
4807 ///
4808 /// In this case, Y has multiple uses, and can be folded into the load of Z
4809 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
4810 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
4811 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
4812 /// number of computations either.
4813 ///
4814 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
4815 /// X was live across 'load Z' for other reasons, we actually *would* want to
4816 /// fold the addressing mode in the Z case. This would make Y die earlier.
4817 bool AddressingModeMatcher::
isProfitableToFoldIntoAddressingMode(Instruction * I,ExtAddrMode & AMBefore,ExtAddrMode & AMAfter)4818 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
4819 ExtAddrMode &AMAfter) {
4820 if (IgnoreProfitability) return true;
4821
4822 // AMBefore is the addressing mode before this instruction was folded into it,
4823 // and AMAfter is the addressing mode after the instruction was folded. Get
4824 // the set of registers referenced by AMAfter and subtract out those
4825 // referenced by AMBefore: this is the set of values which folding in this
4826 // address extends the lifetime of.
4827 //
4828 // Note that there are only two potential values being referenced here,
4829 // BaseReg and ScaleReg (global addresses are always available, as are any
4830 // folded immediates).
4831 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
4832
4833 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
4834 // lifetime wasn't extended by adding this instruction.
4835 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4836 BaseReg = nullptr;
4837 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4838 ScaledReg = nullptr;
4839
4840 // If folding this instruction (and it's subexprs) didn't extend any live
4841 // ranges, we're ok with it.
4842 if (!BaseReg && !ScaledReg)
4843 return true;
4844
4845 // If all uses of this instruction can have the address mode sunk into them,
4846 // we can remove the addressing mode and effectively trade one live register
4847 // for another (at worst.) In this context, folding an addressing mode into
4848 // the use is just a particularly nice way of sinking it.
4849 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
4850 SmallPtrSet<Instruction*, 16> ConsideredInsts;
4851 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4852 PSI, BFI))
4853 return false; // Has a non-memory, non-foldable use!
4854
4855 // Now that we know that all uses of this instruction are part of a chain of
4856 // computation involving only operations that could theoretically be folded
4857 // into a memory use, loop over each of these memory operation uses and see
4858 // if they could *actually* fold the instruction. The assumption is that
4859 // addressing modes are cheap and that duplicating the computation involved
4860 // many times is worthwhile, even on a fastpath. For sinking candidates
4861 // (i.e. cold call sites), this serves as a way to prevent excessive code
4862 // growth since most architectures have some reasonable small and fast way to
4863 // compute an effective address. (i.e LEA on x86)
4864 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
4865 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
4866 Instruction *User = MemoryUses[i].first;
4867 unsigned OpNo = MemoryUses[i].second;
4868
4869 // Get the access type of this use. If the use isn't a pointer, we don't
4870 // know what it accesses.
4871 Value *Address = User->getOperand(OpNo);
4872 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
4873 if (!AddrTy)
4874 return false;
4875 Type *AddressAccessTy = AddrTy->getElementType();
4876 unsigned AS = AddrTy->getAddressSpace();
4877
4878 // Do a match against the root of this address, ignoring profitability. This
4879 // will tell us if the addressing mode for the memory operation will
4880 // *actually* cover the shared instruction.
4881 ExtAddrMode Result;
4882 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
4883 0);
4884 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4885 TPT.getRestorationPoint();
4886 AddressingModeMatcher Matcher(
4887 MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result,
4888 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, BFI);
4889 Matcher.IgnoreProfitability = true;
4890 bool Success = Matcher.matchAddr(Address, 0);
4891 (void)Success; assert(Success && "Couldn't select *anything*?");
4892
4893 // The match was to check the profitability, the changes made are not
4894 // part of the original matcher. Therefore, they should be dropped
4895 // otherwise the original matcher will not present the right state.
4896 TPT.rollback(LastKnownGood);
4897
4898 // If the match didn't cover I, then it won't be shared by it.
4899 if (!is_contained(MatchedAddrModeInsts, I))
4900 return false;
4901
4902 MatchedAddrModeInsts.clear();
4903 }
4904
4905 return true;
4906 }
4907
4908 /// Return true if the specified values are defined in a
4909 /// different basic block than BB.
IsNonLocalValue(Value * V,BasicBlock * BB)4910 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
4911 if (Instruction *I = dyn_cast<Instruction>(V))
4912 return I->getParent() != BB;
4913 return false;
4914 }
4915
4916 /// Sink addressing mode computation immediate before MemoryInst if doing so
4917 /// can be done without increasing register pressure. The need for the
4918 /// register pressure constraint means this can end up being an all or nothing
4919 /// decision for all uses of the same addressing computation.
4920 ///
4921 /// Load and Store Instructions often have addressing modes that can do
4922 /// significant amounts of computation. As such, instruction selection will try
4923 /// to get the load or store to do as much computation as possible for the
4924 /// program. The problem is that isel can only see within a single block. As
4925 /// such, we sink as much legal addressing mode work into the block as possible.
4926 ///
4927 /// This method is used to optimize both load/store and inline asms with memory
4928 /// operands. It's also used to sink addressing computations feeding into cold
4929 /// call sites into their (cold) basic block.
4930 ///
4931 /// The motivation for handling sinking into cold blocks is that doing so can
4932 /// both enable other address mode sinking (by satisfying the register pressure
4933 /// constraint above), and reduce register pressure globally (by removing the
4934 /// addressing mode computation from the fast path entirely.).
optimizeMemoryInst(Instruction * MemoryInst,Value * Addr,Type * AccessTy,unsigned AddrSpace)4935 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
4936 Type *AccessTy, unsigned AddrSpace) {
4937 Value *Repl = Addr;
4938
4939 // Try to collapse single-value PHI nodes. This is necessary to undo
4940 // unprofitable PRE transformations.
4941 SmallVector<Value*, 8> worklist;
4942 SmallPtrSet<Value*, 16> Visited;
4943 worklist.push_back(Addr);
4944
4945 // Use a worklist to iteratively look through PHI and select nodes, and
4946 // ensure that the addressing mode obtained from the non-PHI/select roots of
4947 // the graph are compatible.
4948 bool PhiOrSelectSeen = false;
4949 SmallVector<Instruction*, 16> AddrModeInsts;
4950 const SimplifyQuery SQ(*DL, TLInfo);
4951 AddressingModeCombiner AddrModes(SQ, Addr);
4952 TypePromotionTransaction TPT(RemovedInsts);
4953 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4954 TPT.getRestorationPoint();
4955 while (!worklist.empty()) {
4956 Value *V = worklist.back();
4957 worklist.pop_back();
4958
4959 // We allow traversing cyclic Phi nodes.
4960 // In case of success after this loop we ensure that traversing through
4961 // Phi nodes ends up with all cases to compute address of the form
4962 // BaseGV + Base + Scale * Index + Offset
4963 // where Scale and Offset are constans and BaseGV, Base and Index
4964 // are exactly the same Values in all cases.
4965 // It means that BaseGV, Scale and Offset dominate our memory instruction
4966 // and have the same value as they had in address computation represented
4967 // as Phi. So we can safely sink address computation to memory instruction.
4968 if (!Visited.insert(V).second)
4969 continue;
4970
4971 // For a PHI node, push all of its incoming values.
4972 if (PHINode *P = dyn_cast<PHINode>(V)) {
4973 for (Value *IncValue : P->incoming_values())
4974 worklist.push_back(IncValue);
4975 PhiOrSelectSeen = true;
4976 continue;
4977 }
4978 // Similar for select.
4979 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
4980 worklist.push_back(SI->getFalseValue());
4981 worklist.push_back(SI->getTrueValue());
4982 PhiOrSelectSeen = true;
4983 continue;
4984 }
4985
4986 // For non-PHIs, determine the addressing mode being computed. Note that
4987 // the result may differ depending on what other uses our candidate
4988 // addressing instructions might have.
4989 AddrModeInsts.clear();
4990 std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
4991 0);
4992 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
4993 V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI,
4994 InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
4995 BFI.get());
4996
4997 GetElementPtrInst *GEP = LargeOffsetGEP.first;
4998 if (GEP && !NewGEPBases.count(GEP)) {
4999 // If splitting the underlying data structure can reduce the offset of a
5000 // GEP, collect the GEP. Skip the GEPs that are the new bases of
5001 // previously split data structures.
5002 LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5003 if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end())
5004 LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size();
5005 }
5006
5007 NewAddrMode.OriginalValue = V;
5008 if (!AddrModes.addNewAddrMode(NewAddrMode))
5009 break;
5010 }
5011
5012 // Try to combine the AddrModes we've collected. If we couldn't collect any,
5013 // or we have multiple but either couldn't combine them or combining them
5014 // wouldn't do anything useful, bail out now.
5015 if (!AddrModes.combineAddrModes()) {
5016 TPT.rollback(LastKnownGood);
5017 return false;
5018 }
5019 bool Modified = TPT.commit();
5020
5021 // Get the combined AddrMode (or the only AddrMode, if we only had one).
5022 ExtAddrMode AddrMode = AddrModes.getAddrMode();
5023
5024 // If all the instructions matched are already in this BB, don't do anything.
5025 // If we saw a Phi node then it is not local definitely, and if we saw a select
5026 // then we want to push the address calculation past it even if it's already
5027 // in this BB.
5028 if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5029 return IsNonLocalValue(V, MemoryInst->getParent());
5030 })) {
5031 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5032 << "\n");
5033 return Modified;
5034 }
5035
5036 // Insert this computation right after this user. Since our caller is
5037 // scanning from the top of the BB to the bottom, reuse of the expr are
5038 // guaranteed to happen later.
5039 IRBuilder<> Builder(MemoryInst);
5040
5041 // Now that we determined the addressing expression we want to use and know
5042 // that we have to sink it into this block. Check to see if we have already
5043 // done this for some other load/store instr in this block. If so, reuse
5044 // the computation. Before attempting reuse, check if the address is valid
5045 // as it may have been erased.
5046
5047 WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5048
5049 Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5050 if (SunkAddr) {
5051 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5052 << " for " << *MemoryInst << "\n");
5053 if (SunkAddr->getType() != Addr->getType())
5054 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5055 } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5056 SubtargetInfo->addrSinkUsingGEPs())) {
5057 // By default, we use the GEP-based method when AA is used later. This
5058 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5059 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5060 << " for " << *MemoryInst << "\n");
5061 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5062 Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5063
5064 // First, find the pointer.
5065 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5066 ResultPtr = AddrMode.BaseReg;
5067 AddrMode.BaseReg = nullptr;
5068 }
5069
5070 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5071 // We can't add more than one pointer together, nor can we scale a
5072 // pointer (both of which seem meaningless).
5073 if (ResultPtr || AddrMode.Scale != 1)
5074 return Modified;
5075
5076 ResultPtr = AddrMode.ScaledReg;
5077 AddrMode.Scale = 0;
5078 }
5079
5080 // It is only safe to sign extend the BaseReg if we know that the math
5081 // required to create it did not overflow before we extend it. Since
5082 // the original IR value was tossed in favor of a constant back when
5083 // the AddrMode was created we need to bail out gracefully if widths
5084 // do not match instead of extending it.
5085 //
5086 // (See below for code to add the scale.)
5087 if (AddrMode.Scale) {
5088 Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5089 if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5090 cast<IntegerType>(ScaledRegTy)->getBitWidth())
5091 return Modified;
5092 }
5093
5094 if (AddrMode.BaseGV) {
5095 if (ResultPtr)
5096 return Modified;
5097
5098 ResultPtr = AddrMode.BaseGV;
5099 }
5100
5101 // If the real base value actually came from an inttoptr, then the matcher
5102 // will look through it and provide only the integer value. In that case,
5103 // use it here.
5104 if (!DL->isNonIntegralPointerType(Addr->getType())) {
5105 if (!ResultPtr && AddrMode.BaseReg) {
5106 ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5107 "sunkaddr");
5108 AddrMode.BaseReg = nullptr;
5109 } else if (!ResultPtr && AddrMode.Scale == 1) {
5110 ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5111 "sunkaddr");
5112 AddrMode.Scale = 0;
5113 }
5114 }
5115
5116 if (!ResultPtr &&
5117 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
5118 SunkAddr = Constant::getNullValue(Addr->getType());
5119 } else if (!ResultPtr) {
5120 return Modified;
5121 } else {
5122 Type *I8PtrTy =
5123 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5124 Type *I8Ty = Builder.getInt8Ty();
5125
5126 // Start with the base register. Do this first so that subsequent address
5127 // matching finds it last, which will prevent it from trying to match it
5128 // as the scaled value in case it happens to be a mul. That would be
5129 // problematic if we've sunk a different mul for the scale, because then
5130 // we'd end up sinking both muls.
5131 if (AddrMode.BaseReg) {
5132 Value *V = AddrMode.BaseReg;
5133 if (V->getType() != IntPtrTy)
5134 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5135
5136 ResultIndex = V;
5137 }
5138
5139 // Add the scale value.
5140 if (AddrMode.Scale) {
5141 Value *V = AddrMode.ScaledReg;
5142 if (V->getType() == IntPtrTy) {
5143 // done.
5144 } else {
5145 assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5146 cast<IntegerType>(V->getType())->getBitWidth() &&
5147 "We can't transform if ScaledReg is too narrow");
5148 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5149 }
5150
5151 if (AddrMode.Scale != 1)
5152 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5153 "sunkaddr");
5154 if (ResultIndex)
5155 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5156 else
5157 ResultIndex = V;
5158 }
5159
5160 // Add in the Base Offset if present.
5161 if (AddrMode.BaseOffs) {
5162 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5163 if (ResultIndex) {
5164 // We need to add this separately from the scale above to help with
5165 // SDAG consecutive load/store merging.
5166 if (ResultPtr->getType() != I8PtrTy)
5167 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5168 ResultPtr =
5169 AddrMode.InBounds
5170 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5171 "sunkaddr")
5172 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5173 }
5174
5175 ResultIndex = V;
5176 }
5177
5178 if (!ResultIndex) {
5179 SunkAddr = ResultPtr;
5180 } else {
5181 if (ResultPtr->getType() != I8PtrTy)
5182 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5183 SunkAddr =
5184 AddrMode.InBounds
5185 ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5186 "sunkaddr")
5187 : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5188 }
5189
5190 if (SunkAddr->getType() != Addr->getType())
5191 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5192 }
5193 } else {
5194 // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5195 // non-integral pointers, so in that case bail out now.
5196 Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5197 Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5198 PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5199 PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5200 if (DL->isNonIntegralPointerType(Addr->getType()) ||
5201 (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5202 (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5203 (AddrMode.BaseGV &&
5204 DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5205 return Modified;
5206
5207 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5208 << " for " << *MemoryInst << "\n");
5209 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5210 Value *Result = nullptr;
5211
5212 // Start with the base register. Do this first so that subsequent address
5213 // matching finds it last, which will prevent it from trying to match it
5214 // as the scaled value in case it happens to be a mul. That would be
5215 // problematic if we've sunk a different mul for the scale, because then
5216 // we'd end up sinking both muls.
5217 if (AddrMode.BaseReg) {
5218 Value *V = AddrMode.BaseReg;
5219 if (V->getType()->isPointerTy())
5220 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5221 if (V->getType() != IntPtrTy)
5222 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5223 Result = V;
5224 }
5225
5226 // Add the scale value.
5227 if (AddrMode.Scale) {
5228 Value *V = AddrMode.ScaledReg;
5229 if (V->getType() == IntPtrTy) {
5230 // done.
5231 } else if (V->getType()->isPointerTy()) {
5232 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5233 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5234 cast<IntegerType>(V->getType())->getBitWidth()) {
5235 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5236 } else {
5237 // It is only safe to sign extend the BaseReg if we know that the math
5238 // required to create it did not overflow before we extend it. Since
5239 // the original IR value was tossed in favor of a constant back when
5240 // the AddrMode was created we need to bail out gracefully if widths
5241 // do not match instead of extending it.
5242 Instruction *I = dyn_cast_or_null<Instruction>(Result);
5243 if (I && (Result != AddrMode.BaseReg))
5244 I->eraseFromParent();
5245 return Modified;
5246 }
5247 if (AddrMode.Scale != 1)
5248 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5249 "sunkaddr");
5250 if (Result)
5251 Result = Builder.CreateAdd(Result, V, "sunkaddr");
5252 else
5253 Result = V;
5254 }
5255
5256 // Add in the BaseGV if present.
5257 if (AddrMode.BaseGV) {
5258 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
5259 if (Result)
5260 Result = Builder.CreateAdd(Result, V, "sunkaddr");
5261 else
5262 Result = V;
5263 }
5264
5265 // Add in the Base Offset if present.
5266 if (AddrMode.BaseOffs) {
5267 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5268 if (Result)
5269 Result = Builder.CreateAdd(Result, V, "sunkaddr");
5270 else
5271 Result = V;
5272 }
5273
5274 if (!Result)
5275 SunkAddr = Constant::getNullValue(Addr->getType());
5276 else
5277 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
5278 }
5279
5280 MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
5281 // Store the newly computed address into the cache. In the case we reused a
5282 // value, this should be idempotent.
5283 SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
5284
5285 // If we have no uses, recursively delete the value and all dead instructions
5286 // using it.
5287 if (Repl->use_empty()) {
5288 resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
5289 RecursivelyDeleteTriviallyDeadInstructions(
5290 Repl, TLInfo, nullptr,
5291 [&](Value *V) { removeAllAssertingVHReferences(V); });
5292 });
5293 }
5294 ++NumMemoryInsts;
5295 return true;
5296 }
5297
5298 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5299 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5300 /// only handle a 2 operand GEP in the same basic block or a splat constant
5301 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5302 /// index.
5303 ///
5304 /// If the existing GEP has a vector base pointer that is splat, we can look
5305 /// through the splat to find the scalar pointer. If we can't find a scalar
5306 /// pointer there's nothing we can do.
5307 ///
5308 /// If we have a GEP with more than 2 indices where the middle indices are all
5309 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5310 ///
5311 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5312 /// followed by a GEP with an all zeroes vector index. This will enable
5313 /// SelectionDAGBuilder to use a the scalar GEP as the uniform base and have a
5314 /// zero index.
optimizeGatherScatterInst(Instruction * MemoryInst,Value * Ptr)5315 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
5316 Value *Ptr) {
5317 // FIXME: Support scalable vectors.
5318 if (isa<ScalableVectorType>(Ptr->getType()))
5319 return false;
5320
5321 Value *NewAddr;
5322
5323 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
5324 // Don't optimize GEPs that don't have indices.
5325 if (!GEP->hasIndices())
5326 return false;
5327
5328 // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5329 // FIXME: We should support this by sinking the GEP.
5330 if (MemoryInst->getParent() != GEP->getParent())
5331 return false;
5332
5333 SmallVector<Value *, 2> Ops(GEP->op_begin(), GEP->op_end());
5334
5335 bool RewriteGEP = false;
5336
5337 if (Ops[0]->getType()->isVectorTy()) {
5338 Ops[0] = getSplatValue(Ops[0]);
5339 if (!Ops[0])
5340 return false;
5341 RewriteGEP = true;
5342 }
5343
5344 unsigned FinalIndex = Ops.size() - 1;
5345
5346 // Ensure all but the last index is 0.
5347 // FIXME: This isn't strictly required. All that's required is that they are
5348 // all scalars or splats.
5349 for (unsigned i = 1; i < FinalIndex; ++i) {
5350 auto *C = dyn_cast<Constant>(Ops[i]);
5351 if (!C)
5352 return false;
5353 if (isa<VectorType>(C->getType()))
5354 C = C->getSplatValue();
5355 auto *CI = dyn_cast_or_null<ConstantInt>(C);
5356 if (!CI || !CI->isZero())
5357 return false;
5358 // Scalarize the index if needed.
5359 Ops[i] = CI;
5360 }
5361
5362 // Try to scalarize the final index.
5363 if (Ops[FinalIndex]->getType()->isVectorTy()) {
5364 if (Value *V = getSplatValue(Ops[FinalIndex])) {
5365 auto *C = dyn_cast<ConstantInt>(V);
5366 // Don't scalarize all zeros vector.
5367 if (!C || !C->isZero()) {
5368 Ops[FinalIndex] = V;
5369 RewriteGEP = true;
5370 }
5371 }
5372 }
5373
5374 // If we made any changes or the we have extra operands, we need to generate
5375 // new instructions.
5376 if (!RewriteGEP && Ops.size() == 2)
5377 return false;
5378
5379 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
5380
5381 IRBuilder<> Builder(MemoryInst);
5382
5383 Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
5384
5385 // If the final index isn't a vector, emit a scalar GEP containing all ops
5386 // and a vector GEP with all zeroes final index.
5387 if (!Ops[FinalIndex]->getType()->isVectorTy()) {
5388 NewAddr = Builder.CreateGEP(Ops[0], makeArrayRef(Ops).drop_front());
5389 auto *IndexTy = FixedVectorType::get(ScalarIndexTy, NumElts);
5390 NewAddr = Builder.CreateGEP(NewAddr, Constant::getNullValue(IndexTy));
5391 } else {
5392 Value *Base = Ops[0];
5393 Value *Index = Ops[FinalIndex];
5394
5395 // Create a scalar GEP if there are more than 2 operands.
5396 if (Ops.size() != 2) {
5397 // Replace the last index with 0.
5398 Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy);
5399 Base = Builder.CreateGEP(Base, makeArrayRef(Ops).drop_front());
5400 }
5401
5402 // Now create the GEP with scalar pointer and vector index.
5403 NewAddr = Builder.CreateGEP(Base, Index);
5404 }
5405 } else if (!isa<Constant>(Ptr)) {
5406 // Not a GEP, maybe its a splat and we can create a GEP to enable
5407 // SelectionDAGBuilder to use it as a uniform base.
5408 Value *V = getSplatValue(Ptr);
5409 if (!V)
5410 return false;
5411
5412 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
5413
5414 IRBuilder<> Builder(MemoryInst);
5415
5416 // Emit a vector GEP with a scalar pointer and all 0s vector index.
5417 Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
5418 auto *IndexTy = FixedVectorType::get(ScalarIndexTy, NumElts);
5419 NewAddr = Builder.CreateGEP(V, Constant::getNullValue(IndexTy));
5420 } else {
5421 // Constant, SelectionDAGBuilder knows to check if its a splat.
5422 return false;
5423 }
5424
5425 MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
5426
5427 // If we have no uses, recursively delete the value and all dead instructions
5428 // using it.
5429 if (Ptr->use_empty())
5430 RecursivelyDeleteTriviallyDeadInstructions(
5431 Ptr, TLInfo, nullptr,
5432 [&](Value *V) { removeAllAssertingVHReferences(V); });
5433
5434 return true;
5435 }
5436
5437 /// If there are any memory operands, use OptimizeMemoryInst to sink their
5438 /// address computing into the block when possible / profitable.
optimizeInlineAsmInst(CallInst * CS)5439 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
5440 bool MadeChange = false;
5441
5442 const TargetRegisterInfo *TRI =
5443 TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
5444 TargetLowering::AsmOperandInfoVector TargetConstraints =
5445 TLI->ParseConstraints(*DL, TRI, *CS);
5446 unsigned ArgNo = 0;
5447 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
5448 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
5449
5450 // Compute the constraint code and ConstraintType to use.
5451 TLI->ComputeConstraintToUse(OpInfo, SDValue());
5452
5453 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5454 OpInfo.isIndirect) {
5455 Value *OpVal = CS->getArgOperand(ArgNo++);
5456 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
5457 } else if (OpInfo.Type == InlineAsm::isInput)
5458 ArgNo++;
5459 }
5460
5461 return MadeChange;
5462 }
5463
5464 /// Check if all the uses of \p Val are equivalent (or free) zero or
5465 /// sign extensions.
hasSameExtUse(Value * Val,const TargetLowering & TLI)5466 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
5467 assert(!Val->use_empty() && "Input must have at least one use");
5468 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
5469 bool IsSExt = isa<SExtInst>(FirstUser);
5470 Type *ExtTy = FirstUser->getType();
5471 for (const User *U : Val->users()) {
5472 const Instruction *UI = cast<Instruction>(U);
5473 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
5474 return false;
5475 Type *CurTy = UI->getType();
5476 // Same input and output types: Same instruction after CSE.
5477 if (CurTy == ExtTy)
5478 continue;
5479
5480 // If IsSExt is true, we are in this situation:
5481 // a = Val
5482 // b = sext ty1 a to ty2
5483 // c = sext ty1 a to ty3
5484 // Assuming ty2 is shorter than ty3, this could be turned into:
5485 // a = Val
5486 // b = sext ty1 a to ty2
5487 // c = sext ty2 b to ty3
5488 // However, the last sext is not free.
5489 if (IsSExt)
5490 return false;
5491
5492 // This is a ZExt, maybe this is free to extend from one type to another.
5493 // In that case, we would not account for a different use.
5494 Type *NarrowTy;
5495 Type *LargeTy;
5496 if (ExtTy->getScalarType()->getIntegerBitWidth() >
5497 CurTy->getScalarType()->getIntegerBitWidth()) {
5498 NarrowTy = CurTy;
5499 LargeTy = ExtTy;
5500 } else {
5501 NarrowTy = ExtTy;
5502 LargeTy = CurTy;
5503 }
5504
5505 if (!TLI.isZExtFree(NarrowTy, LargeTy))
5506 return false;
5507 }
5508 // All uses are the same or can be derived from one another for free.
5509 return true;
5510 }
5511
5512 /// Try to speculatively promote extensions in \p Exts and continue
5513 /// promoting through newly promoted operands recursively as far as doing so is
5514 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
5515 /// When some promotion happened, \p TPT contains the proper state to revert
5516 /// them.
5517 ///
5518 /// \return true if some promotion happened, false otherwise.
tryToPromoteExts(TypePromotionTransaction & TPT,const SmallVectorImpl<Instruction * > & Exts,SmallVectorImpl<Instruction * > & ProfitablyMovedExts,unsigned CreatedInstsCost)5519 bool CodeGenPrepare::tryToPromoteExts(
5520 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
5521 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
5522 unsigned CreatedInstsCost) {
5523 bool Promoted = false;
5524
5525 // Iterate over all the extensions to try to promote them.
5526 for (auto *I : Exts) {
5527 // Early check if we directly have ext(load).
5528 if (isa<LoadInst>(I->getOperand(0))) {
5529 ProfitablyMovedExts.push_back(I);
5530 continue;
5531 }
5532
5533 // Check whether or not we want to do any promotion. The reason we have
5534 // this check inside the for loop is to catch the case where an extension
5535 // is directly fed by a load because in such case the extension can be moved
5536 // up without any promotion on its operands.
5537 if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
5538 return false;
5539
5540 // Get the action to perform the promotion.
5541 TypePromotionHelper::Action TPH =
5542 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
5543 // Check if we can promote.
5544 if (!TPH) {
5545 // Save the current extension as we cannot move up through its operand.
5546 ProfitablyMovedExts.push_back(I);
5547 continue;
5548 }
5549
5550 // Save the current state.
5551 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5552 TPT.getRestorationPoint();
5553 SmallVector<Instruction *, 4> NewExts;
5554 unsigned NewCreatedInstsCost = 0;
5555 unsigned ExtCost = !TLI->isExtFree(I);
5556 // Promote.
5557 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
5558 &NewExts, nullptr, *TLI);
5559 assert(PromotedVal &&
5560 "TypePromotionHelper should have filtered out those cases");
5561
5562 // We would be able to merge only one extension in a load.
5563 // Therefore, if we have more than 1 new extension we heuristically
5564 // cut this search path, because it means we degrade the code quality.
5565 // With exactly 2, the transformation is neutral, because we will merge
5566 // one extension but leave one. However, we optimistically keep going,
5567 // because the new extension may be removed too.
5568 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
5569 // FIXME: It would be possible to propagate a negative value instead of
5570 // conservatively ceiling it to 0.
5571 TotalCreatedInstsCost =
5572 std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
5573 if (!StressExtLdPromotion &&
5574 (TotalCreatedInstsCost > 1 ||
5575 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
5576 // This promotion is not profitable, rollback to the previous state, and
5577 // save the current extension in ProfitablyMovedExts as the latest
5578 // speculative promotion turned out to be unprofitable.
5579 TPT.rollback(LastKnownGood);
5580 ProfitablyMovedExts.push_back(I);
5581 continue;
5582 }
5583 // Continue promoting NewExts as far as doing so is profitable.
5584 SmallVector<Instruction *, 2> NewlyMovedExts;
5585 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
5586 bool NewPromoted = false;
5587 for (auto *ExtInst : NewlyMovedExts) {
5588 Instruction *MovedExt = cast<Instruction>(ExtInst);
5589 Value *ExtOperand = MovedExt->getOperand(0);
5590 // If we have reached to a load, we need this extra profitability check
5591 // as it could potentially be merged into an ext(load).
5592 if (isa<LoadInst>(ExtOperand) &&
5593 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
5594 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
5595 continue;
5596
5597 ProfitablyMovedExts.push_back(MovedExt);
5598 NewPromoted = true;
5599 }
5600
5601 // If none of speculative promotions for NewExts is profitable, rollback
5602 // and save the current extension (I) as the last profitable extension.
5603 if (!NewPromoted) {
5604 TPT.rollback(LastKnownGood);
5605 ProfitablyMovedExts.push_back(I);
5606 continue;
5607 }
5608 // The promotion is profitable.
5609 Promoted = true;
5610 }
5611 return Promoted;
5612 }
5613
5614 /// Merging redundant sexts when one is dominating the other.
mergeSExts(Function & F)5615 bool CodeGenPrepare::mergeSExts(Function &F) {
5616 bool Changed = false;
5617 for (auto &Entry : ValToSExtendedUses) {
5618 SExts &Insts = Entry.second;
5619 SExts CurPts;
5620 for (Instruction *Inst : Insts) {
5621 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
5622 Inst->getOperand(0) != Entry.first)
5623 continue;
5624 bool inserted = false;
5625 for (auto &Pt : CurPts) {
5626 if (getDT(F).dominates(Inst, Pt)) {
5627 Pt->replaceAllUsesWith(Inst);
5628 RemovedInsts.insert(Pt);
5629 Pt->removeFromParent();
5630 Pt = Inst;
5631 inserted = true;
5632 Changed = true;
5633 break;
5634 }
5635 if (!getDT(F).dominates(Pt, Inst))
5636 // Give up if we need to merge in a common dominator as the
5637 // experiments show it is not profitable.
5638 continue;
5639 Inst->replaceAllUsesWith(Pt);
5640 RemovedInsts.insert(Inst);
5641 Inst->removeFromParent();
5642 inserted = true;
5643 Changed = true;
5644 break;
5645 }
5646 if (!inserted)
5647 CurPts.push_back(Inst);
5648 }
5649 }
5650 return Changed;
5651 }
5652
5653 // Splitting large data structures so that the GEPs accessing them can have
5654 // smaller offsets so that they can be sunk to the same blocks as their users.
5655 // For example, a large struct starting from %base is split into two parts
5656 // where the second part starts from %new_base.
5657 //
5658 // Before:
5659 // BB0:
5660 // %base =
5661 //
5662 // BB1:
5663 // %gep0 = gep %base, off0
5664 // %gep1 = gep %base, off1
5665 // %gep2 = gep %base, off2
5666 //
5667 // BB2:
5668 // %load1 = load %gep0
5669 // %load2 = load %gep1
5670 // %load3 = load %gep2
5671 //
5672 // After:
5673 // BB0:
5674 // %base =
5675 // %new_base = gep %base, off0
5676 //
5677 // BB1:
5678 // %new_gep0 = %new_base
5679 // %new_gep1 = gep %new_base, off1 - off0
5680 // %new_gep2 = gep %new_base, off2 - off0
5681 //
5682 // BB2:
5683 // %load1 = load i32, i32* %new_gep0
5684 // %load2 = load i32, i32* %new_gep1
5685 // %load3 = load i32, i32* %new_gep2
5686 //
5687 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
5688 // their offsets are smaller enough to fit into the addressing mode.
splitLargeGEPOffsets()5689 bool CodeGenPrepare::splitLargeGEPOffsets() {
5690 bool Changed = false;
5691 for (auto &Entry : LargeOffsetGEPMap) {
5692 Value *OldBase = Entry.first;
5693 SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
5694 &LargeOffsetGEPs = Entry.second;
5695 auto compareGEPOffset =
5696 [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
5697 const std::pair<GetElementPtrInst *, int64_t> &RHS) {
5698 if (LHS.first == RHS.first)
5699 return false;
5700 if (LHS.second != RHS.second)
5701 return LHS.second < RHS.second;
5702 return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
5703 };
5704 // Sorting all the GEPs of the same data structures based on the offsets.
5705 llvm::sort(LargeOffsetGEPs, compareGEPOffset);
5706 LargeOffsetGEPs.erase(
5707 std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
5708 LargeOffsetGEPs.end());
5709 // Skip if all the GEPs have the same offsets.
5710 if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
5711 continue;
5712 GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
5713 int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
5714 Value *NewBaseGEP = nullptr;
5715
5716 auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
5717 while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
5718 GetElementPtrInst *GEP = LargeOffsetGEP->first;
5719 int64_t Offset = LargeOffsetGEP->second;
5720 if (Offset != BaseOffset) {
5721 TargetLowering::AddrMode AddrMode;
5722 AddrMode.BaseOffs = Offset - BaseOffset;
5723 // The result type of the GEP might not be the type of the memory
5724 // access.
5725 if (!TLI->isLegalAddressingMode(*DL, AddrMode,
5726 GEP->getResultElementType(),
5727 GEP->getAddressSpace())) {
5728 // We need to create a new base if the offset to the current base is
5729 // too large to fit into the addressing mode. So, a very large struct
5730 // may be split into several parts.
5731 BaseGEP = GEP;
5732 BaseOffset = Offset;
5733 NewBaseGEP = nullptr;
5734 }
5735 }
5736
5737 // Generate a new GEP to replace the current one.
5738 LLVMContext &Ctx = GEP->getContext();
5739 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
5740 Type *I8PtrTy =
5741 Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace());
5742 Type *I8Ty = Type::getInt8Ty(Ctx);
5743
5744 if (!NewBaseGEP) {
5745 // Create a new base if we don't have one yet. Find the insertion
5746 // pointer for the new base first.
5747 BasicBlock::iterator NewBaseInsertPt;
5748 BasicBlock *NewBaseInsertBB;
5749 if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
5750 // If the base of the struct is an instruction, the new base will be
5751 // inserted close to it.
5752 NewBaseInsertBB = BaseI->getParent();
5753 if (isa<PHINode>(BaseI))
5754 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5755 else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
5756 NewBaseInsertBB =
5757 SplitEdge(NewBaseInsertBB, Invoke->getNormalDest());
5758 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5759 } else
5760 NewBaseInsertPt = std::next(BaseI->getIterator());
5761 } else {
5762 // If the current base is an argument or global value, the new base
5763 // will be inserted to the entry block.
5764 NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
5765 NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
5766 }
5767 IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
5768 // Create a new base.
5769 Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset);
5770 NewBaseGEP = OldBase;
5771 if (NewBaseGEP->getType() != I8PtrTy)
5772 NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
5773 NewBaseGEP =
5774 NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep");
5775 NewGEPBases.insert(NewBaseGEP);
5776 }
5777
5778 IRBuilder<> Builder(GEP);
5779 Value *NewGEP = NewBaseGEP;
5780 if (Offset == BaseOffset) {
5781 if (GEP->getType() != I8PtrTy)
5782 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
5783 } else {
5784 // Calculate the new offset for the new GEP.
5785 Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset);
5786 NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index);
5787
5788 if (GEP->getType() != I8PtrTy)
5789 NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
5790 }
5791 GEP->replaceAllUsesWith(NewGEP);
5792 LargeOffsetGEPID.erase(GEP);
5793 LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
5794 GEP->eraseFromParent();
5795 Changed = true;
5796 }
5797 }
5798 return Changed;
5799 }
5800
optimizePhiType(PHINode * I,SmallPtrSetImpl<PHINode * > & Visited,SmallPtrSetImpl<Instruction * > & DeletedInstrs)5801 bool CodeGenPrepare::optimizePhiType(
5802 PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
5803 SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
5804 // We are looking for a collection on interconnected phi nodes that together
5805 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
5806 // are of the same type. Convert the whole set of nodes to the type of the
5807 // bitcast.
5808 Type *PhiTy = I->getType();
5809 Type *ConvertTy = nullptr;
5810 if (Visited.count(I) ||
5811 (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
5812 return false;
5813
5814 SmallVector<Instruction *, 4> Worklist;
5815 Worklist.push_back(cast<Instruction>(I));
5816 SmallPtrSet<PHINode *, 4> PhiNodes;
5817 PhiNodes.insert(I);
5818 Visited.insert(I);
5819 SmallPtrSet<Instruction *, 4> Defs;
5820 SmallPtrSet<Instruction *, 4> Uses;
5821 // This works by adding extra bitcasts between load/stores and removing
5822 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
5823 // we can get in the situation where we remove a bitcast in one iteration
5824 // just to add it again in the next. We need to ensure that at least one
5825 // bitcast we remove are anchored to something that will not change back.
5826 bool AnyAnchored = false;
5827
5828 while (!Worklist.empty()) {
5829 Instruction *II = Worklist.pop_back_val();
5830
5831 if (auto *Phi = dyn_cast<PHINode>(II)) {
5832 // Handle Defs, which might also be PHI's
5833 for (Value *V : Phi->incoming_values()) {
5834 if (auto *OpPhi = dyn_cast<PHINode>(V)) {
5835 if (!PhiNodes.count(OpPhi)) {
5836 if (Visited.count(OpPhi))
5837 return false;
5838 PhiNodes.insert(OpPhi);
5839 Visited.insert(OpPhi);
5840 Worklist.push_back(OpPhi);
5841 }
5842 } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
5843 if (!OpLoad->isSimple())
5844 return false;
5845 if (!Defs.count(OpLoad)) {
5846 Defs.insert(OpLoad);
5847 Worklist.push_back(OpLoad);
5848 }
5849 } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
5850 if (!Defs.count(OpEx)) {
5851 Defs.insert(OpEx);
5852 Worklist.push_back(OpEx);
5853 }
5854 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
5855 if (!ConvertTy)
5856 ConvertTy = OpBC->getOperand(0)->getType();
5857 if (OpBC->getOperand(0)->getType() != ConvertTy)
5858 return false;
5859 if (!Defs.count(OpBC)) {
5860 Defs.insert(OpBC);
5861 Worklist.push_back(OpBC);
5862 AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
5863 !isa<ExtractElementInst>(OpBC->getOperand(0));
5864 }
5865 } else if (!isa<UndefValue>(V)) {
5866 return false;
5867 }
5868 }
5869 }
5870
5871 // Handle uses which might also be phi's
5872 for (User *V : II->users()) {
5873 if (auto *OpPhi = dyn_cast<PHINode>(V)) {
5874 if (!PhiNodes.count(OpPhi)) {
5875 if (Visited.count(OpPhi))
5876 return false;
5877 PhiNodes.insert(OpPhi);
5878 Visited.insert(OpPhi);
5879 Worklist.push_back(OpPhi);
5880 }
5881 } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
5882 if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
5883 return false;
5884 Uses.insert(OpStore);
5885 } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
5886 if (!ConvertTy)
5887 ConvertTy = OpBC->getType();
5888 if (OpBC->getType() != ConvertTy)
5889 return false;
5890 Uses.insert(OpBC);
5891 AnyAnchored |=
5892 any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
5893 } else {
5894 return false;
5895 }
5896 }
5897 }
5898
5899 if (!ConvertTy || !AnyAnchored || !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
5900 return false;
5901
5902 LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to "
5903 << *ConvertTy << "\n");
5904
5905 // Create all the new phi nodes of the new type, and bitcast any loads to the
5906 // correct type.
5907 ValueToValueMap ValMap;
5908 ValMap[UndefValue::get(PhiTy)] = UndefValue::get(ConvertTy);
5909 for (Instruction *D : Defs) {
5910 if (isa<BitCastInst>(D)) {
5911 ValMap[D] = D->getOperand(0);
5912 DeletedInstrs.insert(D);
5913 } else {
5914 ValMap[D] =
5915 new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode());
5916 }
5917 }
5918 for (PHINode *Phi : PhiNodes)
5919 ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
5920 Phi->getName() + ".tc", Phi);
5921 // Pipe together all the PhiNodes.
5922 for (PHINode *Phi : PhiNodes) {
5923 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
5924 for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
5925 NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
5926 Phi->getIncomingBlock(i));
5927 Visited.insert(NewPhi);
5928 }
5929 // And finally pipe up the stores and bitcasts
5930 for (Instruction *U : Uses) {
5931 if (isa<BitCastInst>(U)) {
5932 DeletedInstrs.insert(U);
5933 U->replaceAllUsesWith(ValMap[U->getOperand(0)]);
5934 } else {
5935 U->setOperand(0,
5936 new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U));
5937 }
5938 }
5939
5940 // Save the removed phis to be deleted later.
5941 for (PHINode *Phi : PhiNodes)
5942 DeletedInstrs.insert(Phi);
5943 return true;
5944 }
5945
optimizePhiTypes(Function & F)5946 bool CodeGenPrepare::optimizePhiTypes(Function &F) {
5947 if (!OptimizePhiTypes)
5948 return false;
5949
5950 bool Changed = false;
5951 SmallPtrSet<PHINode *, 4> Visited;
5952 SmallPtrSet<Instruction *, 4> DeletedInstrs;
5953
5954 // Attempt to optimize all the phis in the functions to the correct type.
5955 for (auto &BB : F)
5956 for (auto &Phi : BB.phis())
5957 Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
5958
5959 // Remove any old phi's that have been converted.
5960 for (auto *I : DeletedInstrs) {
5961 I->replaceAllUsesWith(UndefValue::get(I->getType()));
5962 I->eraseFromParent();
5963 }
5964
5965 return Changed;
5966 }
5967
5968 /// Return true, if an ext(load) can be formed from an extension in
5969 /// \p MovedExts.
canFormExtLd(const SmallVectorImpl<Instruction * > & MovedExts,LoadInst * & LI,Instruction * & Inst,bool HasPromoted)5970 bool CodeGenPrepare::canFormExtLd(
5971 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
5972 Instruction *&Inst, bool HasPromoted) {
5973 for (auto *MovedExtInst : MovedExts) {
5974 if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
5975 LI = cast<LoadInst>(MovedExtInst->getOperand(0));
5976 Inst = MovedExtInst;
5977 break;
5978 }
5979 }
5980 if (!LI)
5981 return false;
5982
5983 // If they're already in the same block, there's nothing to do.
5984 // Make the cheap checks first if we did not promote.
5985 // If we promoted, we need to check if it is indeed profitable.
5986 if (!HasPromoted && LI->getParent() == Inst->getParent())
5987 return false;
5988
5989 return TLI->isExtLoad(LI, Inst, *DL);
5990 }
5991
5992 /// Move a zext or sext fed by a load into the same basic block as the load,
5993 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
5994 /// extend into the load.
5995 ///
5996 /// E.g.,
5997 /// \code
5998 /// %ld = load i32* %addr
5999 /// %add = add nuw i32 %ld, 4
6000 /// %zext = zext i32 %add to i64
6001 // \endcode
6002 /// =>
6003 /// \code
6004 /// %ld = load i32* %addr
6005 /// %zext = zext i32 %ld to i64
6006 /// %add = add nuw i64 %zext, 4
6007 /// \encode
6008 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6009 /// allow us to match zext(load i32*) to i64.
6010 ///
6011 /// Also, try to promote the computations used to obtain a sign extended
6012 /// value used into memory accesses.
6013 /// E.g.,
6014 /// \code
6015 /// a = add nsw i32 b, 3
6016 /// d = sext i32 a to i64
6017 /// e = getelementptr ..., i64 d
6018 /// \endcode
6019 /// =>
6020 /// \code
6021 /// f = sext i32 b to i64
6022 /// a = add nsw i64 f, 3
6023 /// e = getelementptr ..., i64 a
6024 /// \endcode
6025 ///
6026 /// \p Inst[in/out] the extension may be modified during the process if some
6027 /// promotions apply.
optimizeExt(Instruction * & Inst)6028 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6029 bool AllowPromotionWithoutCommonHeader = false;
6030 /// See if it is an interesting sext operations for the address type
6031 /// promotion before trying to promote it, e.g., the ones with the right
6032 /// type and used in memory accesses.
6033 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6034 *Inst, AllowPromotionWithoutCommonHeader);
6035 TypePromotionTransaction TPT(RemovedInsts);
6036 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6037 TPT.getRestorationPoint();
6038 SmallVector<Instruction *, 1> Exts;
6039 SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6040 Exts.push_back(Inst);
6041
6042 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6043
6044 // Look for a load being extended.
6045 LoadInst *LI = nullptr;
6046 Instruction *ExtFedByLoad;
6047
6048 // Try to promote a chain of computation if it allows to form an extended
6049 // load.
6050 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6051 assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6052 TPT.commit();
6053 // Move the extend into the same block as the load.
6054 ExtFedByLoad->moveAfter(LI);
6055 ++NumExtsMoved;
6056 Inst = ExtFedByLoad;
6057 return true;
6058 }
6059
6060 // Continue promoting SExts if known as considerable depending on targets.
6061 if (ATPConsiderable &&
6062 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6063 HasPromoted, TPT, SpeculativelyMovedExts))
6064 return true;
6065
6066 TPT.rollback(LastKnownGood);
6067 return false;
6068 }
6069
6070 // Perform address type promotion if doing so is profitable.
6071 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6072 // instructions that sign extended the same initial value. However, if
6073 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6074 // extension is just profitable.
performAddressTypePromotion(Instruction * & Inst,bool AllowPromotionWithoutCommonHeader,bool HasPromoted,TypePromotionTransaction & TPT,SmallVectorImpl<Instruction * > & SpeculativelyMovedExts)6075 bool CodeGenPrepare::performAddressTypePromotion(
6076 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6077 bool HasPromoted, TypePromotionTransaction &TPT,
6078 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6079 bool Promoted = false;
6080 SmallPtrSet<Instruction *, 1> UnhandledExts;
6081 bool AllSeenFirst = true;
6082 for (auto *I : SpeculativelyMovedExts) {
6083 Value *HeadOfChain = I->getOperand(0);
6084 DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6085 SeenChainsForSExt.find(HeadOfChain);
6086 // If there is an unhandled SExt which has the same header, try to promote
6087 // it as well.
6088 if (AlreadySeen != SeenChainsForSExt.end()) {
6089 if (AlreadySeen->second != nullptr)
6090 UnhandledExts.insert(AlreadySeen->second);
6091 AllSeenFirst = false;
6092 }
6093 }
6094
6095 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6096 SpeculativelyMovedExts.size() == 1)) {
6097 TPT.commit();
6098 if (HasPromoted)
6099 Promoted = true;
6100 for (auto *I : SpeculativelyMovedExts) {
6101 Value *HeadOfChain = I->getOperand(0);
6102 SeenChainsForSExt[HeadOfChain] = nullptr;
6103 ValToSExtendedUses[HeadOfChain].push_back(I);
6104 }
6105 // Update Inst as promotion happen.
6106 Inst = SpeculativelyMovedExts.pop_back_val();
6107 } else {
6108 // This is the first chain visited from the header, keep the current chain
6109 // as unhandled. Defer to promote this until we encounter another SExt
6110 // chain derived from the same header.
6111 for (auto *I : SpeculativelyMovedExts) {
6112 Value *HeadOfChain = I->getOperand(0);
6113 SeenChainsForSExt[HeadOfChain] = Inst;
6114 }
6115 return false;
6116 }
6117
6118 if (!AllSeenFirst && !UnhandledExts.empty())
6119 for (auto *VisitedSExt : UnhandledExts) {
6120 if (RemovedInsts.count(VisitedSExt))
6121 continue;
6122 TypePromotionTransaction TPT(RemovedInsts);
6123 SmallVector<Instruction *, 1> Exts;
6124 SmallVector<Instruction *, 2> Chains;
6125 Exts.push_back(VisitedSExt);
6126 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6127 TPT.commit();
6128 if (HasPromoted)
6129 Promoted = true;
6130 for (auto *I : Chains) {
6131 Value *HeadOfChain = I->getOperand(0);
6132 // Mark this as handled.
6133 SeenChainsForSExt[HeadOfChain] = nullptr;
6134 ValToSExtendedUses[HeadOfChain].push_back(I);
6135 }
6136 }
6137 return Promoted;
6138 }
6139
optimizeExtUses(Instruction * I)6140 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6141 BasicBlock *DefBB = I->getParent();
6142
6143 // If the result of a {s|z}ext and its source are both live out, rewrite all
6144 // other uses of the source with result of extension.
6145 Value *Src = I->getOperand(0);
6146 if (Src->hasOneUse())
6147 return false;
6148
6149 // Only do this xform if truncating is free.
6150 if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6151 return false;
6152
6153 // Only safe to perform the optimization if the source is also defined in
6154 // this block.
6155 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6156 return false;
6157
6158 bool DefIsLiveOut = false;
6159 for (User *U : I->users()) {
6160 Instruction *UI = cast<Instruction>(U);
6161
6162 // Figure out which BB this ext is used in.
6163 BasicBlock *UserBB = UI->getParent();
6164 if (UserBB == DefBB) continue;
6165 DefIsLiveOut = true;
6166 break;
6167 }
6168 if (!DefIsLiveOut)
6169 return false;
6170
6171 // Make sure none of the uses are PHI nodes.
6172 for (User *U : Src->users()) {
6173 Instruction *UI = cast<Instruction>(U);
6174 BasicBlock *UserBB = UI->getParent();
6175 if (UserBB == DefBB) continue;
6176 // Be conservative. We don't want this xform to end up introducing
6177 // reloads just before load / store instructions.
6178 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
6179 return false;
6180 }
6181
6182 // InsertedTruncs - Only insert one trunc in each block once.
6183 DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
6184
6185 bool MadeChange = false;
6186 for (Use &U : Src->uses()) {
6187 Instruction *User = cast<Instruction>(U.getUser());
6188
6189 // Figure out which BB this ext is used in.
6190 BasicBlock *UserBB = User->getParent();
6191 if (UserBB == DefBB) continue;
6192
6193 // Both src and def are live in this block. Rewrite the use.
6194 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
6195
6196 if (!InsertedTrunc) {
6197 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
6198 assert(InsertPt != UserBB->end());
6199 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
6200 InsertedInsts.insert(InsertedTrunc);
6201 }
6202
6203 // Replace a use of the {s|z}ext source with a use of the result.
6204 U = InsertedTrunc;
6205 ++NumExtUses;
6206 MadeChange = true;
6207 }
6208
6209 return MadeChange;
6210 }
6211
6212 // Find loads whose uses only use some of the loaded value's bits. Add an "and"
6213 // just after the load if the target can fold this into one extload instruction,
6214 // with the hope of eliminating some of the other later "and" instructions using
6215 // the loaded value. "and"s that are made trivially redundant by the insertion
6216 // of the new "and" are removed by this function, while others (e.g. those whose
6217 // path from the load goes through a phi) are left for isel to potentially
6218 // remove.
6219 //
6220 // For example:
6221 //
6222 // b0:
6223 // x = load i32
6224 // ...
6225 // b1:
6226 // y = and x, 0xff
6227 // z = use y
6228 //
6229 // becomes:
6230 //
6231 // b0:
6232 // x = load i32
6233 // x' = and x, 0xff
6234 // ...
6235 // b1:
6236 // z = use x'
6237 //
6238 // whereas:
6239 //
6240 // b0:
6241 // x1 = load i32
6242 // ...
6243 // b1:
6244 // x2 = load i32
6245 // ...
6246 // b2:
6247 // x = phi x1, x2
6248 // y = and x, 0xff
6249 //
6250 // becomes (after a call to optimizeLoadExt for each load):
6251 //
6252 // b0:
6253 // x1 = load i32
6254 // x1' = and x1, 0xff
6255 // ...
6256 // b1:
6257 // x2 = load i32
6258 // x2' = and x2, 0xff
6259 // ...
6260 // b2:
6261 // x = phi x1', x2'
6262 // y = and x, 0xff
optimizeLoadExt(LoadInst * Load)6263 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
6264 if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
6265 return false;
6266
6267 // Skip loads we've already transformed.
6268 if (Load->hasOneUse() &&
6269 InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
6270 return false;
6271
6272 // Look at all uses of Load, looking through phis, to determine how many bits
6273 // of the loaded value are needed.
6274 SmallVector<Instruction *, 8> WorkList;
6275 SmallPtrSet<Instruction *, 16> Visited;
6276 SmallVector<Instruction *, 8> AndsToMaybeRemove;
6277 for (auto *U : Load->users())
6278 WorkList.push_back(cast<Instruction>(U));
6279
6280 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
6281 unsigned BitWidth = LoadResultVT.getSizeInBits();
6282 APInt DemandBits(BitWidth, 0);
6283 APInt WidestAndBits(BitWidth, 0);
6284
6285 while (!WorkList.empty()) {
6286 Instruction *I = WorkList.back();
6287 WorkList.pop_back();
6288
6289 // Break use-def graph loops.
6290 if (!Visited.insert(I).second)
6291 continue;
6292
6293 // For a PHI node, push all of its users.
6294 if (auto *Phi = dyn_cast<PHINode>(I)) {
6295 for (auto *U : Phi->users())
6296 WorkList.push_back(cast<Instruction>(U));
6297 continue;
6298 }
6299
6300 switch (I->getOpcode()) {
6301 case Instruction::And: {
6302 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
6303 if (!AndC)
6304 return false;
6305 APInt AndBits = AndC->getValue();
6306 DemandBits |= AndBits;
6307 // Keep track of the widest and mask we see.
6308 if (AndBits.ugt(WidestAndBits))
6309 WidestAndBits = AndBits;
6310 if (AndBits == WidestAndBits && I->getOperand(0) == Load)
6311 AndsToMaybeRemove.push_back(I);
6312 break;
6313 }
6314
6315 case Instruction::Shl: {
6316 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
6317 if (!ShlC)
6318 return false;
6319 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
6320 DemandBits.setLowBits(BitWidth - ShiftAmt);
6321 break;
6322 }
6323
6324 case Instruction::Trunc: {
6325 EVT TruncVT = TLI->getValueType(*DL, I->getType());
6326 unsigned TruncBitWidth = TruncVT.getSizeInBits();
6327 DemandBits.setLowBits(TruncBitWidth);
6328 break;
6329 }
6330
6331 default:
6332 return false;
6333 }
6334 }
6335
6336 uint32_t ActiveBits = DemandBits.getActiveBits();
6337 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6338 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
6339 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6340 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6341 // followed by an AND.
6342 // TODO: Look into removing this restriction by fixing backends to either
6343 // return false for isLoadExtLegal for i1 or have them select this pattern to
6344 // a single instruction.
6345 //
6346 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6347 // mask, since these are the only ands that will be removed by isel.
6348 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
6349 WidestAndBits != DemandBits)
6350 return false;
6351
6352 LLVMContext &Ctx = Load->getType()->getContext();
6353 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
6354 EVT TruncVT = TLI->getValueType(*DL, TruncTy);
6355
6356 // Reject cases that won't be matched as extloads.
6357 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
6358 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
6359 return false;
6360
6361 IRBuilder<> Builder(Load->getNextNode());
6362 auto *NewAnd = cast<Instruction>(
6363 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
6364 // Mark this instruction as "inserted by CGP", so that other
6365 // optimizations don't touch it.
6366 InsertedInsts.insert(NewAnd);
6367
6368 // Replace all uses of load with new and (except for the use of load in the
6369 // new and itself).
6370 Load->replaceAllUsesWith(NewAnd);
6371 NewAnd->setOperand(0, Load);
6372
6373 // Remove any and instructions that are now redundant.
6374 for (auto *And : AndsToMaybeRemove)
6375 // Check that the and mask is the same as the one we decided to put on the
6376 // new and.
6377 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
6378 And->replaceAllUsesWith(NewAnd);
6379 if (&*CurInstIterator == And)
6380 CurInstIterator = std::next(And->getIterator());
6381 And->eraseFromParent();
6382 ++NumAndUses;
6383 }
6384
6385 ++NumAndsAdded;
6386 return true;
6387 }
6388
6389 /// Check if V (an operand of a select instruction) is an expensive instruction
6390 /// that is only used once.
sinkSelectOperand(const TargetTransformInfo * TTI,Value * V)6391 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
6392 auto *I = dyn_cast<Instruction>(V);
6393 // If it's safe to speculatively execute, then it should not have side
6394 // effects; therefore, it's safe to sink and possibly *not* execute.
6395 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
6396 TTI->getUserCost(I, TargetTransformInfo::TCK_SizeAndLatency) >=
6397 TargetTransformInfo::TCC_Expensive;
6398 }
6399
6400 /// Returns true if a SelectInst should be turned into an explicit branch.
isFormingBranchFromSelectProfitable(const TargetTransformInfo * TTI,const TargetLowering * TLI,SelectInst * SI)6401 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
6402 const TargetLowering *TLI,
6403 SelectInst *SI) {
6404 // If even a predictable select is cheap, then a branch can't be cheaper.
6405 if (!TLI->isPredictableSelectExpensive())
6406 return false;
6407
6408 // FIXME: This should use the same heuristics as IfConversion to determine
6409 // whether a select is better represented as a branch.
6410
6411 // If metadata tells us that the select condition is obviously predictable,
6412 // then we want to replace the select with a branch.
6413 uint64_t TrueWeight, FalseWeight;
6414 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) {
6415 uint64_t Max = std::max(TrueWeight, FalseWeight);
6416 uint64_t Sum = TrueWeight + FalseWeight;
6417 if (Sum != 0) {
6418 auto Probability = BranchProbability::getBranchProbability(Max, Sum);
6419 if (Probability > TLI->getPredictableBranchThreshold())
6420 return true;
6421 }
6422 }
6423
6424 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
6425
6426 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
6427 // comparison condition. If the compare has more than one use, there's
6428 // probably another cmov or setcc around, so it's not worth emitting a branch.
6429 if (!Cmp || !Cmp->hasOneUse())
6430 return false;
6431
6432 // If either operand of the select is expensive and only needed on one side
6433 // of the select, we should form a branch.
6434 if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
6435 sinkSelectOperand(TTI, SI->getFalseValue()))
6436 return true;
6437
6438 return false;
6439 }
6440
6441 /// If \p isTrue is true, return the true value of \p SI, otherwise return
6442 /// false value of \p SI. If the true/false value of \p SI is defined by any
6443 /// select instructions in \p Selects, look through the defining select
6444 /// instruction until the true/false value is not defined in \p Selects.
getTrueOrFalseValue(SelectInst * SI,bool isTrue,const SmallPtrSet<const Instruction *,2> & Selects)6445 static Value *getTrueOrFalseValue(
6446 SelectInst *SI, bool isTrue,
6447 const SmallPtrSet<const Instruction *, 2> &Selects) {
6448 Value *V = nullptr;
6449
6450 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
6451 DefSI = dyn_cast<SelectInst>(V)) {
6452 assert(DefSI->getCondition() == SI->getCondition() &&
6453 "The condition of DefSI does not match with SI");
6454 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
6455 }
6456
6457 assert(V && "Failed to get select true/false value");
6458 return V;
6459 }
6460
optimizeShiftInst(BinaryOperator * Shift)6461 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
6462 assert(Shift->isShift() && "Expected a shift");
6463
6464 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
6465 // general vector shifts, and (3) the shift amount is a select-of-splatted
6466 // values, hoist the shifts before the select:
6467 // shift Op0, (select Cond, TVal, FVal) -->
6468 // select Cond, (shift Op0, TVal), (shift Op0, FVal)
6469 //
6470 // This is inverting a generic IR transform when we know that the cost of a
6471 // general vector shift is more than the cost of 2 shift-by-scalars.
6472 // We can't do this effectively in SDAG because we may not be able to
6473 // determine if the select operands are splats from within a basic block.
6474 Type *Ty = Shift->getType();
6475 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6476 return false;
6477 Value *Cond, *TVal, *FVal;
6478 if (!match(Shift->getOperand(1),
6479 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6480 return false;
6481 if (!isSplatValue(TVal) || !isSplatValue(FVal))
6482 return false;
6483
6484 IRBuilder<> Builder(Shift);
6485 BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
6486 Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
6487 Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
6488 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6489 Shift->replaceAllUsesWith(NewSel);
6490 Shift->eraseFromParent();
6491 return true;
6492 }
6493
optimizeFunnelShift(IntrinsicInst * Fsh)6494 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
6495 Intrinsic::ID Opcode = Fsh->getIntrinsicID();
6496 assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
6497 "Expected a funnel shift");
6498
6499 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
6500 // than general vector shifts, and (3) the shift amount is select-of-splatted
6501 // values, hoist the funnel shifts before the select:
6502 // fsh Op0, Op1, (select Cond, TVal, FVal) -->
6503 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
6504 //
6505 // This is inverting a generic IR transform when we know that the cost of a
6506 // general vector shift is more than the cost of 2 shift-by-scalars.
6507 // We can't do this effectively in SDAG because we may not be able to
6508 // determine if the select operands are splats from within a basic block.
6509 Type *Ty = Fsh->getType();
6510 if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6511 return false;
6512 Value *Cond, *TVal, *FVal;
6513 if (!match(Fsh->getOperand(2),
6514 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6515 return false;
6516 if (!isSplatValue(TVal) || !isSplatValue(FVal))
6517 return false;
6518
6519 IRBuilder<> Builder(Fsh);
6520 Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
6521 Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, TVal });
6522 Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, { X, Y, FVal });
6523 Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6524 Fsh->replaceAllUsesWith(NewSel);
6525 Fsh->eraseFromParent();
6526 return true;
6527 }
6528
6529 /// If we have a SelectInst that will likely profit from branch prediction,
6530 /// turn it into a branch.
optimizeSelectInst(SelectInst * SI)6531 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
6532 if (DisableSelectToBranch)
6533 return false;
6534
6535 // Find all consecutive select instructions that share the same condition.
6536 SmallVector<SelectInst *, 2> ASI;
6537 ASI.push_back(SI);
6538 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
6539 It != SI->getParent()->end(); ++It) {
6540 SelectInst *I = dyn_cast<SelectInst>(&*It);
6541 if (I && SI->getCondition() == I->getCondition()) {
6542 ASI.push_back(I);
6543 } else {
6544 break;
6545 }
6546 }
6547
6548 SelectInst *LastSI = ASI.back();
6549 // Increment the current iterator to skip all the rest of select instructions
6550 // because they will be either "not lowered" or "all lowered" to branch.
6551 CurInstIterator = std::next(LastSI->getIterator());
6552
6553 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
6554
6555 // Can we convert the 'select' to CF ?
6556 if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
6557 return false;
6558
6559 TargetLowering::SelectSupportKind SelectKind;
6560 if (VectorCond)
6561 SelectKind = TargetLowering::VectorMaskSelect;
6562 else if (SI->getType()->isVectorTy())
6563 SelectKind = TargetLowering::ScalarCondVectorVal;
6564 else
6565 SelectKind = TargetLowering::ScalarValSelect;
6566
6567 if (TLI->isSelectSupported(SelectKind) &&
6568 (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
6569 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
6570 return false;
6571
6572 // The DominatorTree needs to be rebuilt by any consumers after this
6573 // transformation. We simply reset here rather than setting the ModifiedDT
6574 // flag to avoid restarting the function walk in runOnFunction for each
6575 // select optimized.
6576 DT.reset();
6577
6578 // Transform a sequence like this:
6579 // start:
6580 // %cmp = cmp uge i32 %a, %b
6581 // %sel = select i1 %cmp, i32 %c, i32 %d
6582 //
6583 // Into:
6584 // start:
6585 // %cmp = cmp uge i32 %a, %b
6586 // %cmp.frozen = freeze %cmp
6587 // br i1 %cmp.frozen, label %select.true, label %select.false
6588 // select.true:
6589 // br label %select.end
6590 // select.false:
6591 // br label %select.end
6592 // select.end:
6593 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
6594 //
6595 // %cmp should be frozen, otherwise it may introduce undefined behavior.
6596 // In addition, we may sink instructions that produce %c or %d from
6597 // the entry block into the destination(s) of the new branch.
6598 // If the true or false blocks do not contain a sunken instruction, that
6599 // block and its branch may be optimized away. In that case, one side of the
6600 // first branch will point directly to select.end, and the corresponding PHI
6601 // predecessor block will be the start block.
6602
6603 // First, we split the block containing the select into 2 blocks.
6604 BasicBlock *StartBlock = SI->getParent();
6605 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI));
6606 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
6607 BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency());
6608
6609 // Delete the unconditional branch that was just created by the split.
6610 StartBlock->getTerminator()->eraseFromParent();
6611
6612 // These are the new basic blocks for the conditional branch.
6613 // At least one will become an actual new basic block.
6614 BasicBlock *TrueBlock = nullptr;
6615 BasicBlock *FalseBlock = nullptr;
6616 BranchInst *TrueBranch = nullptr;
6617 BranchInst *FalseBranch = nullptr;
6618
6619 // Sink expensive instructions into the conditional blocks to avoid executing
6620 // them speculatively.
6621 for (SelectInst *SI : ASI) {
6622 if (sinkSelectOperand(TTI, SI->getTrueValue())) {
6623 if (TrueBlock == nullptr) {
6624 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
6625 EndBlock->getParent(), EndBlock);
6626 TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
6627 TrueBranch->setDebugLoc(SI->getDebugLoc());
6628 }
6629 auto *TrueInst = cast<Instruction>(SI->getTrueValue());
6630 TrueInst->moveBefore(TrueBranch);
6631 }
6632 if (sinkSelectOperand(TTI, SI->getFalseValue())) {
6633 if (FalseBlock == nullptr) {
6634 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
6635 EndBlock->getParent(), EndBlock);
6636 FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6637 FalseBranch->setDebugLoc(SI->getDebugLoc());
6638 }
6639 auto *FalseInst = cast<Instruction>(SI->getFalseValue());
6640 FalseInst->moveBefore(FalseBranch);
6641 }
6642 }
6643
6644 // If there was nothing to sink, then arbitrarily choose the 'false' side
6645 // for a new input value to the PHI.
6646 if (TrueBlock == FalseBlock) {
6647 assert(TrueBlock == nullptr &&
6648 "Unexpected basic block transform while optimizing select");
6649
6650 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
6651 EndBlock->getParent(), EndBlock);
6652 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
6653 FalseBranch->setDebugLoc(SI->getDebugLoc());
6654 }
6655
6656 // Insert the real conditional branch based on the original condition.
6657 // If we did not create a new block for one of the 'true' or 'false' paths
6658 // of the condition, it means that side of the branch goes to the end block
6659 // directly and the path originates from the start block from the point of
6660 // view of the new PHI.
6661 BasicBlock *TT, *FT;
6662 if (TrueBlock == nullptr) {
6663 TT = EndBlock;
6664 FT = FalseBlock;
6665 TrueBlock = StartBlock;
6666 } else if (FalseBlock == nullptr) {
6667 TT = TrueBlock;
6668 FT = EndBlock;
6669 FalseBlock = StartBlock;
6670 } else {
6671 TT = TrueBlock;
6672 FT = FalseBlock;
6673 }
6674 IRBuilder<> IB(SI);
6675 auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
6676 IB.CreateCondBr(CondFr, TT, FT, SI);
6677
6678 SmallPtrSet<const Instruction *, 2> INS;
6679 INS.insert(ASI.begin(), ASI.end());
6680 // Use reverse iterator because later select may use the value of the
6681 // earlier select, and we need to propagate value through earlier select
6682 // to get the PHI operand.
6683 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) {
6684 SelectInst *SI = *It;
6685 // The select itself is replaced with a PHI Node.
6686 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
6687 PN->takeName(SI);
6688 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
6689 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
6690 PN->setDebugLoc(SI->getDebugLoc());
6691
6692 SI->replaceAllUsesWith(PN);
6693 SI->eraseFromParent();
6694 INS.erase(SI);
6695 ++NumSelectsExpanded;
6696 }
6697
6698 // Instruct OptimizeBlock to skip to the next block.
6699 CurInstIterator = StartBlock->end();
6700 return true;
6701 }
6702
6703 /// Some targets only accept certain types for splat inputs. For example a VDUP
6704 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
6705 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
optimizeShuffleVectorInst(ShuffleVectorInst * SVI)6706 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
6707 if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
6708 m_Undef(), m_ZeroMask())))
6709 return false;
6710 Type *NewType = TLI->shouldConvertSplatType(SVI);
6711 if (!NewType)
6712 return false;
6713
6714 auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
6715 assert(!NewType->isVectorTy() && "Expected a scalar type!");
6716 assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
6717 "Expected a type of the same size!");
6718 auto *NewVecType =
6719 FixedVectorType::get(NewType, SVIVecType->getNumElements());
6720
6721 // Create a bitcast (shuffle (insert (bitcast(..))))
6722 IRBuilder<> Builder(SVI->getContext());
6723 Builder.SetInsertPoint(SVI);
6724 Value *BC1 = Builder.CreateBitCast(
6725 cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
6726 Value *Insert = Builder.CreateInsertElement(UndefValue::get(NewVecType), BC1,
6727 (uint64_t)0);
6728 Value *Shuffle = Builder.CreateShuffleVector(
6729 Insert, UndefValue::get(NewVecType), SVI->getShuffleMask());
6730 Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
6731
6732 SVI->replaceAllUsesWith(BC2);
6733 RecursivelyDeleteTriviallyDeadInstructions(
6734 SVI, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); });
6735
6736 // Also hoist the bitcast up to its operand if it they are not in the same
6737 // block.
6738 if (auto *BCI = dyn_cast<Instruction>(BC1))
6739 if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
6740 if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
6741 !Op->isTerminator() && !Op->isEHPad())
6742 BCI->moveAfter(Op);
6743
6744 return true;
6745 }
6746
tryToSinkFreeOperands(Instruction * I)6747 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
6748 // If the operands of I can be folded into a target instruction together with
6749 // I, duplicate and sink them.
6750 SmallVector<Use *, 4> OpsToSink;
6751 if (!TLI->shouldSinkOperands(I, OpsToSink))
6752 return false;
6753
6754 // OpsToSink can contain multiple uses in a use chain (e.g.
6755 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
6756 // uses must come first, so we process the ops in reverse order so as to not
6757 // create invalid IR.
6758 BasicBlock *TargetBB = I->getParent();
6759 bool Changed = false;
6760 SmallVector<Use *, 4> ToReplace;
6761 for (Use *U : reverse(OpsToSink)) {
6762 auto *UI = cast<Instruction>(U->get());
6763 if (UI->getParent() == TargetBB || isa<PHINode>(UI))
6764 continue;
6765 ToReplace.push_back(U);
6766 }
6767
6768 SetVector<Instruction *> MaybeDead;
6769 DenseMap<Instruction *, Instruction *> NewInstructions;
6770 Instruction *InsertPoint = I;
6771 for (Use *U : ToReplace) {
6772 auto *UI = cast<Instruction>(U->get());
6773 Instruction *NI = UI->clone();
6774 NewInstructions[UI] = NI;
6775 MaybeDead.insert(UI);
6776 LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
6777 NI->insertBefore(InsertPoint);
6778 InsertPoint = NI;
6779 InsertedInsts.insert(NI);
6780
6781 // Update the use for the new instruction, making sure that we update the
6782 // sunk instruction uses, if it is part of a chain that has already been
6783 // sunk.
6784 Instruction *OldI = cast<Instruction>(U->getUser());
6785 if (NewInstructions.count(OldI))
6786 NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
6787 else
6788 U->set(NI);
6789 Changed = true;
6790 }
6791
6792 // Remove instructions that are dead after sinking.
6793 for (auto *I : MaybeDead) {
6794 if (!I->hasNUsesOrMore(1)) {
6795 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
6796 I->eraseFromParent();
6797 }
6798 }
6799
6800 return Changed;
6801 }
6802
optimizeSwitchInst(SwitchInst * SI)6803 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
6804 Value *Cond = SI->getCondition();
6805 Type *OldType = Cond->getType();
6806 LLVMContext &Context = Cond->getContext();
6807 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType));
6808 unsigned RegWidth = RegType.getSizeInBits();
6809
6810 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
6811 return false;
6812
6813 // If the register width is greater than the type width, expand the condition
6814 // of the switch instruction and each case constant to the width of the
6815 // register. By widening the type of the switch condition, subsequent
6816 // comparisons (for case comparisons) will not need to be extended to the
6817 // preferred register width, so we will potentially eliminate N-1 extends,
6818 // where N is the number of cases in the switch.
6819 auto *NewType = Type::getIntNTy(Context, RegWidth);
6820
6821 // Zero-extend the switch condition and case constants unless the switch
6822 // condition is a function argument that is already being sign-extended.
6823 // In that case, we can avoid an unnecessary mask/extension by sign-extending
6824 // everything instead.
6825 Instruction::CastOps ExtType = Instruction::ZExt;
6826 if (auto *Arg = dyn_cast<Argument>(Cond))
6827 if (Arg->hasSExtAttr())
6828 ExtType = Instruction::SExt;
6829
6830 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
6831 ExtInst->insertBefore(SI);
6832 ExtInst->setDebugLoc(SI->getDebugLoc());
6833 SI->setCondition(ExtInst);
6834 for (auto Case : SI->cases()) {
6835 APInt NarrowConst = Case.getCaseValue()->getValue();
6836 APInt WideConst = (ExtType == Instruction::ZExt) ?
6837 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth);
6838 Case.setValue(ConstantInt::get(Context, WideConst));
6839 }
6840
6841 return true;
6842 }
6843
6844
6845 namespace {
6846
6847 /// Helper class to promote a scalar operation to a vector one.
6848 /// This class is used to move downward extractelement transition.
6849 /// E.g.,
6850 /// a = vector_op <2 x i32>
6851 /// b = extractelement <2 x i32> a, i32 0
6852 /// c = scalar_op b
6853 /// store c
6854 ///
6855 /// =>
6856 /// a = vector_op <2 x i32>
6857 /// c = vector_op a (equivalent to scalar_op on the related lane)
6858 /// * d = extractelement <2 x i32> c, i32 0
6859 /// * store d
6860 /// Assuming both extractelement and store can be combine, we get rid of the
6861 /// transition.
6862 class VectorPromoteHelper {
6863 /// DataLayout associated with the current module.
6864 const DataLayout &DL;
6865
6866 /// Used to perform some checks on the legality of vector operations.
6867 const TargetLowering &TLI;
6868
6869 /// Used to estimated the cost of the promoted chain.
6870 const TargetTransformInfo &TTI;
6871
6872 /// The transition being moved downwards.
6873 Instruction *Transition;
6874
6875 /// The sequence of instructions to be promoted.
6876 SmallVector<Instruction *, 4> InstsToBePromoted;
6877
6878 /// Cost of combining a store and an extract.
6879 unsigned StoreExtractCombineCost;
6880
6881 /// Instruction that will be combined with the transition.
6882 Instruction *CombineInst = nullptr;
6883
6884 /// The instruction that represents the current end of the transition.
6885 /// Since we are faking the promotion until we reach the end of the chain
6886 /// of computation, we need a way to get the current end of the transition.
getEndOfTransition() const6887 Instruction *getEndOfTransition() const {
6888 if (InstsToBePromoted.empty())
6889 return Transition;
6890 return InstsToBePromoted.back();
6891 }
6892
6893 /// Return the index of the original value in the transition.
6894 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
6895 /// c, is at index 0.
getTransitionOriginalValueIdx() const6896 unsigned getTransitionOriginalValueIdx() const {
6897 assert(isa<ExtractElementInst>(Transition) &&
6898 "Other kind of transitions are not supported yet");
6899 return 0;
6900 }
6901
6902 /// Return the index of the index in the transition.
6903 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
6904 /// is at index 1.
getTransitionIdx() const6905 unsigned getTransitionIdx() const {
6906 assert(isa<ExtractElementInst>(Transition) &&
6907 "Other kind of transitions are not supported yet");
6908 return 1;
6909 }
6910
6911 /// Get the type of the transition.
6912 /// This is the type of the original value.
6913 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
6914 /// transition is <2 x i32>.
getTransitionType() const6915 Type *getTransitionType() const {
6916 return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
6917 }
6918
6919 /// Promote \p ToBePromoted by moving \p Def downward through.
6920 /// I.e., we have the following sequence:
6921 /// Def = Transition <ty1> a to <ty2>
6922 /// b = ToBePromoted <ty2> Def, ...
6923 /// =>
6924 /// b = ToBePromoted <ty1> a, ...
6925 /// Def = Transition <ty1> ToBePromoted to <ty2>
6926 void promoteImpl(Instruction *ToBePromoted);
6927
6928 /// Check whether or not it is profitable to promote all the
6929 /// instructions enqueued to be promoted.
isProfitableToPromote()6930 bool isProfitableToPromote() {
6931 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
6932 unsigned Index = isa<ConstantInt>(ValIdx)
6933 ? cast<ConstantInt>(ValIdx)->getZExtValue()
6934 : -1;
6935 Type *PromotedType = getTransitionType();
6936
6937 StoreInst *ST = cast<StoreInst>(CombineInst);
6938 unsigned AS = ST->getPointerAddressSpace();
6939 unsigned Align = ST->getAlignment();
6940 // Check if this store is supported.
6941 if (!TLI.allowsMisalignedMemoryAccesses(
6942 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
6943 Align)) {
6944 // If this is not supported, there is no way we can combine
6945 // the extract with the store.
6946 return false;
6947 }
6948
6949 // The scalar chain of computation has to pay for the transition
6950 // scalar to vector.
6951 // The vector chain has to account for the combining cost.
6952 uint64_t ScalarCost =
6953 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
6954 uint64_t VectorCost = StoreExtractCombineCost;
6955 enum TargetTransformInfo::TargetCostKind CostKind =
6956 TargetTransformInfo::TCK_RecipThroughput;
6957 for (const auto &Inst : InstsToBePromoted) {
6958 // Compute the cost.
6959 // By construction, all instructions being promoted are arithmetic ones.
6960 // Moreover, one argument is a constant that can be viewed as a splat
6961 // constant.
6962 Value *Arg0 = Inst->getOperand(0);
6963 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
6964 isa<ConstantFP>(Arg0);
6965 TargetTransformInfo::OperandValueKind Arg0OVK =
6966 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
6967 : TargetTransformInfo::OK_AnyValue;
6968 TargetTransformInfo::OperandValueKind Arg1OVK =
6969 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
6970 : TargetTransformInfo::OK_AnyValue;
6971 ScalarCost += TTI.getArithmeticInstrCost(
6972 Inst->getOpcode(), Inst->getType(), CostKind, Arg0OVK, Arg1OVK);
6973 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
6974 CostKind,
6975 Arg0OVK, Arg1OVK);
6976 }
6977 LLVM_DEBUG(
6978 dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
6979 << ScalarCost << "\nVector: " << VectorCost << '\n');
6980 return ScalarCost > VectorCost;
6981 }
6982
6983 /// Generate a constant vector with \p Val with the same
6984 /// number of elements as the transition.
6985 /// \p UseSplat defines whether or not \p Val should be replicated
6986 /// across the whole vector.
6987 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
6988 /// otherwise we generate a vector with as many undef as possible:
6989 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
6990 /// used at the index of the extract.
getConstantVector(Constant * Val,bool UseSplat) const6991 Value *getConstantVector(Constant *Val, bool UseSplat) const {
6992 unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
6993 if (!UseSplat) {
6994 // If we cannot determine where the constant must be, we have to
6995 // use a splat constant.
6996 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
6997 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
6998 ExtractIdx = CstVal->getSExtValue();
6999 else
7000 UseSplat = true;
7001 }
7002
7003 ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7004 if (UseSplat)
7005 return ConstantVector::getSplat(EC, Val);
7006
7007 if (!EC.isScalable()) {
7008 SmallVector<Constant *, 4> ConstVec;
7009 UndefValue *UndefVal = UndefValue::get(Val->getType());
7010 for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7011 if (Idx == ExtractIdx)
7012 ConstVec.push_back(Val);
7013 else
7014 ConstVec.push_back(UndefVal);
7015 }
7016 return ConstantVector::get(ConstVec);
7017 } else
7018 llvm_unreachable(
7019 "Generate scalable vector for non-splat is unimplemented");
7020 }
7021
7022 /// Check if promoting to a vector type an operand at \p OperandIdx
7023 /// in \p Use can trigger undefined behavior.
canCauseUndefinedBehavior(const Instruction * Use,unsigned OperandIdx)7024 static bool canCauseUndefinedBehavior(const Instruction *Use,
7025 unsigned OperandIdx) {
7026 // This is not safe to introduce undef when the operand is on
7027 // the right hand side of a division-like instruction.
7028 if (OperandIdx != 1)
7029 return false;
7030 switch (Use->getOpcode()) {
7031 default:
7032 return false;
7033 case Instruction::SDiv:
7034 case Instruction::UDiv:
7035 case Instruction::SRem:
7036 case Instruction::URem:
7037 return true;
7038 case Instruction::FDiv:
7039 case Instruction::FRem:
7040 return !Use->hasNoNaNs();
7041 }
7042 llvm_unreachable(nullptr);
7043 }
7044
7045 public:
VectorPromoteHelper(const DataLayout & DL,const TargetLowering & TLI,const TargetTransformInfo & TTI,Instruction * Transition,unsigned CombineCost)7046 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
7047 const TargetTransformInfo &TTI, Instruction *Transition,
7048 unsigned CombineCost)
7049 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
7050 StoreExtractCombineCost(CombineCost) {
7051 assert(Transition && "Do not know how to promote null");
7052 }
7053
7054 /// Check if we can promote \p ToBePromoted to \p Type.
canPromote(const Instruction * ToBePromoted) const7055 bool canPromote(const Instruction *ToBePromoted) const {
7056 // We could support CastInst too.
7057 return isa<BinaryOperator>(ToBePromoted);
7058 }
7059
7060 /// Check if it is profitable to promote \p ToBePromoted
7061 /// by moving downward the transition through.
shouldPromote(const Instruction * ToBePromoted) const7062 bool shouldPromote(const Instruction *ToBePromoted) const {
7063 // Promote only if all the operands can be statically expanded.
7064 // Indeed, we do not want to introduce any new kind of transitions.
7065 for (const Use &U : ToBePromoted->operands()) {
7066 const Value *Val = U.get();
7067 if (Val == getEndOfTransition()) {
7068 // If the use is a division and the transition is on the rhs,
7069 // we cannot promote the operation, otherwise we may create a
7070 // division by zero.
7071 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
7072 return false;
7073 continue;
7074 }
7075 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
7076 !isa<ConstantFP>(Val))
7077 return false;
7078 }
7079 // Check that the resulting operation is legal.
7080 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
7081 if (!ISDOpcode)
7082 return false;
7083 return StressStoreExtract ||
7084 TLI.isOperationLegalOrCustom(
7085 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
7086 }
7087
7088 /// Check whether or not \p Use can be combined
7089 /// with the transition.
7090 /// I.e., is it possible to do Use(Transition) => AnotherUse?
canCombine(const Instruction * Use)7091 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
7092
7093 /// Record \p ToBePromoted as part of the chain to be promoted.
enqueueForPromotion(Instruction * ToBePromoted)7094 void enqueueForPromotion(Instruction *ToBePromoted) {
7095 InstsToBePromoted.push_back(ToBePromoted);
7096 }
7097
7098 /// Set the instruction that will be combined with the transition.
recordCombineInstruction(Instruction * ToBeCombined)7099 void recordCombineInstruction(Instruction *ToBeCombined) {
7100 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
7101 CombineInst = ToBeCombined;
7102 }
7103
7104 /// Promote all the instructions enqueued for promotion if it is
7105 /// is profitable.
7106 /// \return True if the promotion happened, false otherwise.
promote()7107 bool promote() {
7108 // Check if there is something to promote.
7109 // Right now, if we do not have anything to combine with,
7110 // we assume the promotion is not profitable.
7111 if (InstsToBePromoted.empty() || !CombineInst)
7112 return false;
7113
7114 // Check cost.
7115 if (!StressStoreExtract && !isProfitableToPromote())
7116 return false;
7117
7118 // Promote.
7119 for (auto &ToBePromoted : InstsToBePromoted)
7120 promoteImpl(ToBePromoted);
7121 InstsToBePromoted.clear();
7122 return true;
7123 }
7124 };
7125
7126 } // end anonymous namespace
7127
promoteImpl(Instruction * ToBePromoted)7128 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
7129 // At this point, we know that all the operands of ToBePromoted but Def
7130 // can be statically promoted.
7131 // For Def, we need to use its parameter in ToBePromoted:
7132 // b = ToBePromoted ty1 a
7133 // Def = Transition ty1 b to ty2
7134 // Move the transition down.
7135 // 1. Replace all uses of the promoted operation by the transition.
7136 // = ... b => = ... Def.
7137 assert(ToBePromoted->getType() == Transition->getType() &&
7138 "The type of the result of the transition does not match "
7139 "the final type");
7140 ToBePromoted->replaceAllUsesWith(Transition);
7141 // 2. Update the type of the uses.
7142 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7143 Type *TransitionTy = getTransitionType();
7144 ToBePromoted->mutateType(TransitionTy);
7145 // 3. Update all the operands of the promoted operation with promoted
7146 // operands.
7147 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7148 for (Use &U : ToBePromoted->operands()) {
7149 Value *Val = U.get();
7150 Value *NewVal = nullptr;
7151 if (Val == Transition)
7152 NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
7153 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
7154 isa<ConstantFP>(Val)) {
7155 // Use a splat constant if it is not safe to use undef.
7156 NewVal = getConstantVector(
7157 cast<Constant>(Val),
7158 isa<UndefValue>(Val) ||
7159 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
7160 } else
7161 llvm_unreachable("Did you modified shouldPromote and forgot to update "
7162 "this?");
7163 ToBePromoted->setOperand(U.getOperandNo(), NewVal);
7164 }
7165 Transition->moveAfter(ToBePromoted);
7166 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
7167 }
7168
7169 /// Some targets can do store(extractelement) with one instruction.
7170 /// Try to push the extractelement towards the stores when the target
7171 /// has this feature and this is profitable.
optimizeExtractElementInst(Instruction * Inst)7172 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
7173 unsigned CombineCost = std::numeric_limits<unsigned>::max();
7174 if (DisableStoreExtract ||
7175 (!StressStoreExtract &&
7176 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
7177 Inst->getOperand(1), CombineCost)))
7178 return false;
7179
7180 // At this point we know that Inst is a vector to scalar transition.
7181 // Try to move it down the def-use chain, until:
7182 // - We can combine the transition with its single use
7183 // => we got rid of the transition.
7184 // - We escape the current basic block
7185 // => we would need to check that we are moving it at a cheaper place and
7186 // we do not do that for now.
7187 BasicBlock *Parent = Inst->getParent();
7188 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
7189 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
7190 // If the transition has more than one use, assume this is not going to be
7191 // beneficial.
7192 while (Inst->hasOneUse()) {
7193 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
7194 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
7195
7196 if (ToBePromoted->getParent() != Parent) {
7197 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7198 << ToBePromoted->getParent()->getName()
7199 << ") than the transition (" << Parent->getName()
7200 << ").\n");
7201 return false;
7202 }
7203
7204 if (VPH.canCombine(ToBePromoted)) {
7205 LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
7206 << "will be combined with: " << *ToBePromoted << '\n');
7207 VPH.recordCombineInstruction(ToBePromoted);
7208 bool Changed = VPH.promote();
7209 NumStoreExtractExposed += Changed;
7210 return Changed;
7211 }
7212
7213 LLVM_DEBUG(dbgs() << "Try promoting.\n");
7214 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
7215 return false;
7216
7217 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7218
7219 VPH.enqueueForPromotion(ToBePromoted);
7220 Inst = ToBePromoted;
7221 }
7222 return false;
7223 }
7224
7225 /// For the instruction sequence of store below, F and I values
7226 /// are bundled together as an i64 value before being stored into memory.
7227 /// Sometimes it is more efficient to generate separate stores for F and I,
7228 /// which can remove the bitwise instructions or sink them to colder places.
7229 ///
7230 /// (store (or (zext (bitcast F to i32) to i64),
7231 /// (shl (zext I to i64), 32)), addr) -->
7232 /// (store F, addr) and (store I, addr+4)
7233 ///
7234 /// Similarly, splitting for other merged store can also be beneficial, like:
7235 /// For pair of {i32, i32}, i64 store --> two i32 stores.
7236 /// For pair of {i32, i16}, i64 store --> two i32 stores.
7237 /// For pair of {i16, i16}, i32 store --> two i16 stores.
7238 /// For pair of {i16, i8}, i32 store --> two i16 stores.
7239 /// For pair of {i8, i8}, i16 store --> two i8 stores.
7240 ///
7241 /// We allow each target to determine specifically which kind of splitting is
7242 /// supported.
7243 ///
7244 /// The store patterns are commonly seen from the simple code snippet below
7245 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7246 /// void goo(const std::pair<int, float> &);
7247 /// hoo() {
7248 /// ...
7249 /// goo(std::make_pair(tmp, ftmp));
7250 /// ...
7251 /// }
7252 ///
7253 /// Although we already have similar splitting in DAG Combine, we duplicate
7254 /// it in CodeGenPrepare to catch the case in which pattern is across
7255 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7256 /// during code expansion.
splitMergedValStore(StoreInst & SI,const DataLayout & DL,const TargetLowering & TLI)7257 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
7258 const TargetLowering &TLI) {
7259 // Handle simple but common cases only.
7260 Type *StoreType = SI.getValueOperand()->getType();
7261
7262 // The code below assumes shifting a value by <number of bits>,
7263 // whereas scalable vectors would have to be shifted by
7264 // <2log(vscale) + number of bits> in order to store the
7265 // low/high parts. Bailing out for now.
7266 if (isa<ScalableVectorType>(StoreType))
7267 return false;
7268
7269 if (!DL.typeSizeEqualsStoreSize(StoreType) ||
7270 DL.getTypeSizeInBits(StoreType) == 0)
7271 return false;
7272
7273 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
7274 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
7275 if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
7276 return false;
7277
7278 // Don't split the store if it is volatile.
7279 if (SI.isVolatile())
7280 return false;
7281
7282 // Match the following patterns:
7283 // (store (or (zext LValue to i64),
7284 // (shl (zext HValue to i64), 32)), HalfValBitSize)
7285 // or
7286 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7287 // (zext LValue to i64),
7288 // Expect both operands of OR and the first operand of SHL have only
7289 // one use.
7290 Value *LValue, *HValue;
7291 if (!match(SI.getValueOperand(),
7292 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
7293 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
7294 m_SpecificInt(HalfValBitSize))))))
7295 return false;
7296
7297 // Check LValue and HValue are int with size less or equal than 32.
7298 if (!LValue->getType()->isIntegerTy() ||
7299 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
7300 !HValue->getType()->isIntegerTy() ||
7301 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
7302 return false;
7303
7304 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
7305 // as the input of target query.
7306 auto *LBC = dyn_cast<BitCastInst>(LValue);
7307 auto *HBC = dyn_cast<BitCastInst>(HValue);
7308 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
7309 : EVT::getEVT(LValue->getType());
7310 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
7311 : EVT::getEVT(HValue->getType());
7312 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
7313 return false;
7314
7315 // Start to split store.
7316 IRBuilder<> Builder(SI.getContext());
7317 Builder.SetInsertPoint(&SI);
7318
7319 // If LValue/HValue is a bitcast in another BB, create a new one in current
7320 // BB so it may be merged with the splitted stores by dag combiner.
7321 if (LBC && LBC->getParent() != SI.getParent())
7322 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
7323 if (HBC && HBC->getParent() != SI.getParent())
7324 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
7325
7326 bool IsLE = SI.getModule()->getDataLayout().isLittleEndian();
7327 auto CreateSplitStore = [&](Value *V, bool Upper) {
7328 V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
7329 Value *Addr = Builder.CreateBitCast(
7330 SI.getOperand(1),
7331 SplitStoreType->getPointerTo(SI.getPointerAddressSpace()));
7332 Align Alignment = SI.getAlign();
7333 const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
7334 if (IsOffsetStore) {
7335 Addr = Builder.CreateGEP(
7336 SplitStoreType, Addr,
7337 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
7338
7339 // When splitting the store in half, naturally one half will retain the
7340 // alignment of the original wider store, regardless of whether it was
7341 // over-aligned or not, while the other will require adjustment.
7342 Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
7343 }
7344 Builder.CreateAlignedStore(V, Addr, Alignment);
7345 };
7346
7347 CreateSplitStore(LValue, false);
7348 CreateSplitStore(HValue, true);
7349
7350 // Delete the old store.
7351 SI.eraseFromParent();
7352 return true;
7353 }
7354
7355 // Return true if the GEP has two operands, the first operand is of a sequential
7356 // type, and the second operand is a constant.
GEPSequentialConstIndexed(GetElementPtrInst * GEP)7357 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
7358 gep_type_iterator I = gep_type_begin(*GEP);
7359 return GEP->getNumOperands() == 2 &&
7360 I.isSequential() &&
7361 isa<ConstantInt>(GEP->getOperand(1));
7362 }
7363
7364 // Try unmerging GEPs to reduce liveness interference (register pressure) across
7365 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
7366 // reducing liveness interference across those edges benefits global register
7367 // allocation. Currently handles only certain cases.
7368 //
7369 // For example, unmerge %GEPI and %UGEPI as below.
7370 //
7371 // ---------- BEFORE ----------
7372 // SrcBlock:
7373 // ...
7374 // %GEPIOp = ...
7375 // ...
7376 // %GEPI = gep %GEPIOp, Idx
7377 // ...
7378 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
7379 // (* %GEPI is alive on the indirectbr edges due to other uses ahead)
7380 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by
7381 // %UGEPI)
7382 //
7383 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
7384 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
7385 // ...
7386 //
7387 // DstBi:
7388 // ...
7389 // %UGEPI = gep %GEPIOp, UIdx
7390 // ...
7391 // ---------------------------
7392 //
7393 // ---------- AFTER ----------
7394 // SrcBlock:
7395 // ... (same as above)
7396 // (* %GEPI is still alive on the indirectbr edges)
7397 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
7398 // unmerging)
7399 // ...
7400 //
7401 // DstBi:
7402 // ...
7403 // %UGEPI = gep %GEPI, (UIdx-Idx)
7404 // ...
7405 // ---------------------------
7406 //
7407 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
7408 // no longer alive on them.
7409 //
7410 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
7411 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
7412 // not to disable further simplications and optimizations as a result of GEP
7413 // merging.
7414 //
7415 // Note this unmerging may increase the length of the data flow critical path
7416 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
7417 // between the register pressure and the length of data-flow critical
7418 // path. Restricting this to the uncommon IndirectBr case would minimize the
7419 // impact of potentially longer critical path, if any, and the impact on compile
7420 // time.
tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst * GEPI,const TargetTransformInfo * TTI)7421 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
7422 const TargetTransformInfo *TTI) {
7423 BasicBlock *SrcBlock = GEPI->getParent();
7424 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
7425 // (non-IndirectBr) cases exit early here.
7426 if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
7427 return false;
7428 // Check that GEPI is a simple gep with a single constant index.
7429 if (!GEPSequentialConstIndexed(GEPI))
7430 return false;
7431 ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
7432 // Check that GEPI is a cheap one.
7433 if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
7434 TargetTransformInfo::TCK_SizeAndLatency)
7435 > TargetTransformInfo::TCC_Basic)
7436 return false;
7437 Value *GEPIOp = GEPI->getOperand(0);
7438 // Check that GEPIOp is an instruction that's also defined in SrcBlock.
7439 if (!isa<Instruction>(GEPIOp))
7440 return false;
7441 auto *GEPIOpI = cast<Instruction>(GEPIOp);
7442 if (GEPIOpI->getParent() != SrcBlock)
7443 return false;
7444 // Check that GEP is used outside the block, meaning it's alive on the
7445 // IndirectBr edge(s).
7446 if (find_if(GEPI->users(), [&](User *Usr) {
7447 if (auto *I = dyn_cast<Instruction>(Usr)) {
7448 if (I->getParent() != SrcBlock) {
7449 return true;
7450 }
7451 }
7452 return false;
7453 }) == GEPI->users().end())
7454 return false;
7455 // The second elements of the GEP chains to be unmerged.
7456 std::vector<GetElementPtrInst *> UGEPIs;
7457 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
7458 // on IndirectBr edges.
7459 for (User *Usr : GEPIOp->users()) {
7460 if (Usr == GEPI) continue;
7461 // Check if Usr is an Instruction. If not, give up.
7462 if (!isa<Instruction>(Usr))
7463 return false;
7464 auto *UI = cast<Instruction>(Usr);
7465 // Check if Usr in the same block as GEPIOp, which is fine, skip.
7466 if (UI->getParent() == SrcBlock)
7467 continue;
7468 // Check if Usr is a GEP. If not, give up.
7469 if (!isa<GetElementPtrInst>(Usr))
7470 return false;
7471 auto *UGEPI = cast<GetElementPtrInst>(Usr);
7472 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
7473 // the pointer operand to it. If so, record it in the vector. If not, give
7474 // up.
7475 if (!GEPSequentialConstIndexed(UGEPI))
7476 return false;
7477 if (UGEPI->getOperand(0) != GEPIOp)
7478 return false;
7479 if (GEPIIdx->getType() !=
7480 cast<ConstantInt>(UGEPI->getOperand(1))->getType())
7481 return false;
7482 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7483 if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
7484 TargetTransformInfo::TCK_SizeAndLatency)
7485 > TargetTransformInfo::TCC_Basic)
7486 return false;
7487 UGEPIs.push_back(UGEPI);
7488 }
7489 if (UGEPIs.size() == 0)
7490 return false;
7491 // Check the materializing cost of (Uidx-Idx).
7492 for (GetElementPtrInst *UGEPI : UGEPIs) {
7493 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7494 APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
7495 unsigned ImmCost =
7496 TTI->getIntImmCost(NewIdx, GEPIIdx->getType(),
7497 TargetTransformInfo::TCK_SizeAndLatency);
7498 if (ImmCost > TargetTransformInfo::TCC_Basic)
7499 return false;
7500 }
7501 // Now unmerge between GEPI and UGEPIs.
7502 for (GetElementPtrInst *UGEPI : UGEPIs) {
7503 UGEPI->setOperand(0, GEPI);
7504 ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
7505 Constant *NewUGEPIIdx =
7506 ConstantInt::get(GEPIIdx->getType(),
7507 UGEPIIdx->getValue() - GEPIIdx->getValue());
7508 UGEPI->setOperand(1, NewUGEPIIdx);
7509 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
7510 // inbounds to avoid UB.
7511 if (!GEPI->isInBounds()) {
7512 UGEPI->setIsInBounds(false);
7513 }
7514 }
7515 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
7516 // alive on IndirectBr edges).
7517 assert(find_if(GEPIOp->users(), [&](User *Usr) {
7518 return cast<Instruction>(Usr)->getParent() != SrcBlock;
7519 }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock");
7520 return true;
7521 }
7522
optimizeInst(Instruction * I,bool & ModifiedDT)7523 bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
7524 // Bail out if we inserted the instruction to prevent optimizations from
7525 // stepping on each other's toes.
7526 if (InsertedInsts.count(I))
7527 return false;
7528
7529 // TODO: Move into the switch on opcode below here.
7530 if (PHINode *P = dyn_cast<PHINode>(I)) {
7531 // It is possible for very late stage optimizations (such as SimplifyCFG)
7532 // to introduce PHI nodes too late to be cleaned up. If we detect such a
7533 // trivial PHI, go ahead and zap it here.
7534 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) {
7535 LargeOffsetGEPMap.erase(P);
7536 P->replaceAllUsesWith(V);
7537 P->eraseFromParent();
7538 ++NumPHIsElim;
7539 return true;
7540 }
7541 return false;
7542 }
7543
7544 if (CastInst *CI = dyn_cast<CastInst>(I)) {
7545 // If the source of the cast is a constant, then this should have
7546 // already been constant folded. The only reason NOT to constant fold
7547 // it is if something (e.g. LSR) was careful to place the constant
7548 // evaluation in a block other than then one that uses it (e.g. to hoist
7549 // the address of globals out of a loop). If this is the case, we don't
7550 // want to forward-subst the cast.
7551 if (isa<Constant>(CI->getOperand(0)))
7552 return false;
7553
7554 if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
7555 return true;
7556
7557 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7558 /// Sink a zext or sext into its user blocks if the target type doesn't
7559 /// fit in one register
7560 if (TLI->getTypeAction(CI->getContext(),
7561 TLI->getValueType(*DL, CI->getType())) ==
7562 TargetLowering::TypeExpandInteger) {
7563 return SinkCast(CI);
7564 } else {
7565 bool MadeChange = optimizeExt(I);
7566 return MadeChange | optimizeExtUses(I);
7567 }
7568 }
7569 return false;
7570 }
7571
7572 if (auto *Cmp = dyn_cast<CmpInst>(I))
7573 if (optimizeCmp(Cmp, ModifiedDT))
7574 return true;
7575
7576 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
7577 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7578 bool Modified = optimizeLoadExt(LI);
7579 unsigned AS = LI->getPointerAddressSpace();
7580 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
7581 return Modified;
7582 }
7583
7584 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
7585 if (splitMergedValStore(*SI, *DL, *TLI))
7586 return true;
7587 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
7588 unsigned AS = SI->getPointerAddressSpace();
7589 return optimizeMemoryInst(I, SI->getOperand(1),
7590 SI->getOperand(0)->getType(), AS);
7591 }
7592
7593 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
7594 unsigned AS = RMW->getPointerAddressSpace();
7595 return optimizeMemoryInst(I, RMW->getPointerOperand(),
7596 RMW->getType(), AS);
7597 }
7598
7599 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
7600 unsigned AS = CmpX->getPointerAddressSpace();
7601 return optimizeMemoryInst(I, CmpX->getPointerOperand(),
7602 CmpX->getCompareOperand()->getType(), AS);
7603 }
7604
7605 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
7606
7607 if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
7608 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
7609
7610 // TODO: Move this into the switch on opcode - it handles shifts already.
7611 if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
7612 BinOp->getOpcode() == Instruction::LShr)) {
7613 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
7614 if (CI && TLI->hasExtractBitsInsn())
7615 if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
7616 return true;
7617 }
7618
7619 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
7620 if (GEPI->hasAllZeroIndices()) {
7621 /// The GEP operand must be a pointer, so must its result -> BitCast
7622 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
7623 GEPI->getName(), GEPI);
7624 NC->setDebugLoc(GEPI->getDebugLoc());
7625 GEPI->replaceAllUsesWith(NC);
7626 GEPI->eraseFromParent();
7627 ++NumGEPsElim;
7628 optimizeInst(NC, ModifiedDT);
7629 return true;
7630 }
7631 if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
7632 return true;
7633 }
7634 return false;
7635 }
7636
7637 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
7638 // freeze(icmp a, const)) -> icmp (freeze a), const
7639 // This helps generate efficient conditional jumps.
7640 Instruction *CmpI = nullptr;
7641 if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
7642 CmpI = II;
7643 else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
7644 CmpI = F->getFastMathFlags().none() ? F : nullptr;
7645
7646 if (CmpI && CmpI->hasOneUse()) {
7647 auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
7648 bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
7649 isa<ConstantPointerNull>(Op0);
7650 bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
7651 isa<ConstantPointerNull>(Op1);
7652 if (Const0 || Const1) {
7653 if (!Const0 || !Const1) {
7654 auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI);
7655 F->takeName(FI);
7656 CmpI->setOperand(Const0 ? 1 : 0, F);
7657 }
7658 FI->replaceAllUsesWith(CmpI);
7659 FI->eraseFromParent();
7660 return true;
7661 }
7662 }
7663 return false;
7664 }
7665
7666 if (tryToSinkFreeOperands(I))
7667 return true;
7668
7669 switch (I->getOpcode()) {
7670 case Instruction::Shl:
7671 case Instruction::LShr:
7672 case Instruction::AShr:
7673 return optimizeShiftInst(cast<BinaryOperator>(I));
7674 case Instruction::Call:
7675 return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
7676 case Instruction::Select:
7677 return optimizeSelectInst(cast<SelectInst>(I));
7678 case Instruction::ShuffleVector:
7679 return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
7680 case Instruction::Switch:
7681 return optimizeSwitchInst(cast<SwitchInst>(I));
7682 case Instruction::ExtractElement:
7683 return optimizeExtractElementInst(cast<ExtractElementInst>(I));
7684 }
7685
7686 return false;
7687 }
7688
7689 /// Given an OR instruction, check to see if this is a bitreverse
7690 /// idiom. If so, insert the new intrinsic and return true.
makeBitReverse(Instruction & I)7691 bool CodeGenPrepare::makeBitReverse(Instruction &I) {
7692 if (!I.getType()->isIntegerTy() ||
7693 !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
7694 TLI->getValueType(*DL, I.getType(), true)))
7695 return false;
7696
7697 SmallVector<Instruction*, 4> Insts;
7698 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
7699 return false;
7700 Instruction *LastInst = Insts.back();
7701 I.replaceAllUsesWith(LastInst);
7702 RecursivelyDeleteTriviallyDeadInstructions(
7703 &I, TLInfo, nullptr, [&](Value *V) { removeAllAssertingVHReferences(V); });
7704 return true;
7705 }
7706
7707 // In this pass we look for GEP and cast instructions that are used
7708 // across basic blocks and rewrite them to improve basic-block-at-a-time
7709 // selection.
optimizeBlock(BasicBlock & BB,bool & ModifiedDT)7710 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
7711 SunkAddrs.clear();
7712 bool MadeChange = false;
7713
7714 CurInstIterator = BB.begin();
7715 while (CurInstIterator != BB.end()) {
7716 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
7717 if (ModifiedDT)
7718 return true;
7719 }
7720
7721 bool MadeBitReverse = true;
7722 while (MadeBitReverse) {
7723 MadeBitReverse = false;
7724 for (auto &I : reverse(BB)) {
7725 if (makeBitReverse(I)) {
7726 MadeBitReverse = MadeChange = true;
7727 break;
7728 }
7729 }
7730 }
7731 MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
7732
7733 return MadeChange;
7734 }
7735
7736 // Some CGP optimizations may move or alter what's computed in a block. Check
7737 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
fixupDbgValue(Instruction * I)7738 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
7739 assert(isa<DbgValueInst>(I));
7740 DbgValueInst &DVI = *cast<DbgValueInst>(I);
7741
7742 // Does this dbg.value refer to a sunk address calculation?
7743 Value *Location = DVI.getVariableLocation();
7744 WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
7745 Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
7746 if (SunkAddr) {
7747 // Point dbg.value at locally computed address, which should give the best
7748 // opportunity to be accurately lowered. This update may change the type of
7749 // pointer being referred to; however this makes no difference to debugging
7750 // information, and we can't generate bitcasts that may affect codegen.
7751 DVI.setOperand(0, MetadataAsValue::get(DVI.getContext(),
7752 ValueAsMetadata::get(SunkAddr)));
7753 return true;
7754 }
7755 return false;
7756 }
7757
7758 // A llvm.dbg.value may be using a value before its definition, due to
7759 // optimizations in this pass and others. Scan for such dbg.values, and rescue
7760 // them by moving the dbg.value to immediately after the value definition.
7761 // FIXME: Ideally this should never be necessary, and this has the potential
7762 // to re-order dbg.value intrinsics.
placeDbgValues(Function & F)7763 bool CodeGenPrepare::placeDbgValues(Function &F) {
7764 bool MadeChange = false;
7765 DominatorTree DT(F);
7766
7767 for (BasicBlock &BB : F) {
7768 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
7769 Instruction *Insn = &*BI++;
7770 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
7771 if (!DVI)
7772 continue;
7773
7774 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
7775
7776 if (!VI || VI->isTerminator())
7777 continue;
7778
7779 // If VI is a phi in a block with an EHPad terminator, we can't insert
7780 // after it.
7781 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
7782 continue;
7783
7784 // If the defining instruction dominates the dbg.value, we do not need
7785 // to move the dbg.value.
7786 if (DT.dominates(VI, DVI))
7787 continue;
7788
7789 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
7790 << *DVI << ' ' << *VI);
7791 DVI->removeFromParent();
7792 if (isa<PHINode>(VI))
7793 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
7794 else
7795 DVI->insertAfter(VI);
7796 MadeChange = true;
7797 ++NumDbgValueMoved;
7798 }
7799 }
7800 return MadeChange;
7801 }
7802
7803 /// Scale down both weights to fit into uint32_t.
scaleWeights(uint64_t & NewTrue,uint64_t & NewFalse)7804 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
7805 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
7806 uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
7807 NewTrue = NewTrue / Scale;
7808 NewFalse = NewFalse / Scale;
7809 }
7810
7811 /// Some targets prefer to split a conditional branch like:
7812 /// \code
7813 /// %0 = icmp ne i32 %a, 0
7814 /// %1 = icmp ne i32 %b, 0
7815 /// %or.cond = or i1 %0, %1
7816 /// br i1 %or.cond, label %TrueBB, label %FalseBB
7817 /// \endcode
7818 /// into multiple branch instructions like:
7819 /// \code
7820 /// bb1:
7821 /// %0 = icmp ne i32 %a, 0
7822 /// br i1 %0, label %TrueBB, label %bb2
7823 /// bb2:
7824 /// %1 = icmp ne i32 %b, 0
7825 /// br i1 %1, label %TrueBB, label %FalseBB
7826 /// \endcode
7827 /// This usually allows instruction selection to do even further optimizations
7828 /// and combine the compare with the branch instruction. Currently this is
7829 /// applied for targets which have "cheap" jump instructions.
7830 ///
7831 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
7832 ///
splitBranchCondition(Function & F,bool & ModifiedDT)7833 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) {
7834 if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
7835 return false;
7836
7837 bool MadeChange = false;
7838 for (auto &BB : F) {
7839 // Does this BB end with the following?
7840 // %cond1 = icmp|fcmp|binary instruction ...
7841 // %cond2 = icmp|fcmp|binary instruction ...
7842 // %cond.or = or|and i1 %cond1, cond2
7843 // br i1 %cond.or label %dest1, label %dest2"
7844 BinaryOperator *LogicOp;
7845 BasicBlock *TBB, *FBB;
7846 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
7847 continue;
7848
7849 auto *Br1 = cast<BranchInst>(BB.getTerminator());
7850 if (Br1->getMetadata(LLVMContext::MD_unpredictable))
7851 continue;
7852
7853 // The merging of mostly empty BB can cause a degenerate branch.
7854 if (TBB == FBB)
7855 continue;
7856
7857 unsigned Opc;
7858 Value *Cond1, *Cond2;
7859 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
7860 m_OneUse(m_Value(Cond2)))))
7861 Opc = Instruction::And;
7862 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
7863 m_OneUse(m_Value(Cond2)))))
7864 Opc = Instruction::Or;
7865 else
7866 continue;
7867
7868 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
7869 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
7870 continue;
7871
7872 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
7873
7874 // Create a new BB.
7875 auto *TmpBB =
7876 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
7877 BB.getParent(), BB.getNextNode());
7878
7879 // Update original basic block by using the first condition directly by the
7880 // branch instruction and removing the no longer needed and/or instruction.
7881 Br1->setCondition(Cond1);
7882 LogicOp->eraseFromParent();
7883
7884 // Depending on the condition we have to either replace the true or the
7885 // false successor of the original branch instruction.
7886 if (Opc == Instruction::And)
7887 Br1->setSuccessor(0, TmpBB);
7888 else
7889 Br1->setSuccessor(1, TmpBB);
7890
7891 // Fill in the new basic block.
7892 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
7893 if (auto *I = dyn_cast<Instruction>(Cond2)) {
7894 I->removeFromParent();
7895 I->insertBefore(Br2);
7896 }
7897
7898 // Update PHI nodes in both successors. The original BB needs to be
7899 // replaced in one successor's PHI nodes, because the branch comes now from
7900 // the newly generated BB (NewBB). In the other successor we need to add one
7901 // incoming edge to the PHI nodes, because both branch instructions target
7902 // now the same successor. Depending on the original branch condition
7903 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
7904 // we perform the correct update for the PHI nodes.
7905 // This doesn't change the successor order of the just created branch
7906 // instruction (or any other instruction).
7907 if (Opc == Instruction::Or)
7908 std::swap(TBB, FBB);
7909
7910 // Replace the old BB with the new BB.
7911 TBB->replacePhiUsesWith(&BB, TmpBB);
7912
7913 // Add another incoming edge form the new BB.
7914 for (PHINode &PN : FBB->phis()) {
7915 auto *Val = PN.getIncomingValueForBlock(&BB);
7916 PN.addIncoming(Val, TmpBB);
7917 }
7918
7919 // Update the branch weights (from SelectionDAGBuilder::
7920 // FindMergedConditions).
7921 if (Opc == Instruction::Or) {
7922 // Codegen X | Y as:
7923 // BB1:
7924 // jmp_if_X TBB
7925 // jmp TmpBB
7926 // TmpBB:
7927 // jmp_if_Y TBB
7928 // jmp FBB
7929 //
7930
7931 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
7932 // The requirement is that
7933 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
7934 // = TrueProb for original BB.
7935 // Assuming the original weights are A and B, one choice is to set BB1's
7936 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
7937 // assumes that
7938 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
7939 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
7940 // TmpBB, but the math is more complicated.
7941 uint64_t TrueWeight, FalseWeight;
7942 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
7943 uint64_t NewTrueWeight = TrueWeight;
7944 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
7945 scaleWeights(NewTrueWeight, NewFalseWeight);
7946 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
7947 .createBranchWeights(TrueWeight, FalseWeight));
7948
7949 NewTrueWeight = TrueWeight;
7950 NewFalseWeight = 2 * FalseWeight;
7951 scaleWeights(NewTrueWeight, NewFalseWeight);
7952 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
7953 .createBranchWeights(TrueWeight, FalseWeight));
7954 }
7955 } else {
7956 // Codegen X & Y as:
7957 // BB1:
7958 // jmp_if_X TmpBB
7959 // jmp FBB
7960 // TmpBB:
7961 // jmp_if_Y TBB
7962 // jmp FBB
7963 //
7964 // This requires creation of TmpBB after CurBB.
7965
7966 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
7967 // The requirement is that
7968 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
7969 // = FalseProb for original BB.
7970 // Assuming the original weights are A and B, one choice is to set BB1's
7971 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
7972 // assumes that
7973 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
7974 uint64_t TrueWeight, FalseWeight;
7975 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
7976 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
7977 uint64_t NewFalseWeight = FalseWeight;
7978 scaleWeights(NewTrueWeight, NewFalseWeight);
7979 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
7980 .createBranchWeights(TrueWeight, FalseWeight));
7981
7982 NewTrueWeight = 2 * TrueWeight;
7983 NewFalseWeight = FalseWeight;
7984 scaleWeights(NewTrueWeight, NewFalseWeight);
7985 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
7986 .createBranchWeights(TrueWeight, FalseWeight));
7987 }
7988 }
7989
7990 ModifiedDT = true;
7991 MadeChange = true;
7992
7993 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
7994 TmpBB->dump());
7995 }
7996 return MadeChange;
7997 }
7998