1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible. It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe. This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform. It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 // 1. Moving loop invariant loads and calls out of loops. If we can determine
25 // that a load or call inside of a loop never aliases anything stored to,
26 // we can hoist it or sink it like any other instruction.
27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of
28 // the loop, we try to move the store to happen AFTER the loop instead of
29 // inside of the loop. This can only happen if a few conditions are true:
30 // A. The pointer stored through is loop invariant
31 // B. There are no stores or loads in the loop which _may_ alias the
32 // pointer. There are no calls in the loop which mod/ref the pointer.
33 // If these conditions are true, we can promote the loads and stores in the
34 // loop of the pointer to use a temporary alloca'd variable. We then use
35 // the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/PriorityWorklist.h"
41 #include "llvm/ADT/SetOperations.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/Analysis/AliasAnalysis.h"
44 #include "llvm/Analysis/AliasSetTracker.h"
45 #include "llvm/Analysis/AssumptionCache.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/GuardUtils.h"
48 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
49 #include "llvm/Analysis/Loads.h"
50 #include "llvm/Analysis/LoopInfo.h"
51 #include "llvm/Analysis/LoopIterator.h"
52 #include "llvm/Analysis/LoopNestAnalysis.h"
53 #include "llvm/Analysis/LoopPass.h"
54 #include "llvm/Analysis/MemorySSA.h"
55 #include "llvm/Analysis/MemorySSAUpdater.h"
56 #include "llvm/Analysis/MustExecute.h"
57 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/TargetLibraryInfo.h"
60 #include "llvm/Analysis/TargetTransformInfo.h"
61 #include "llvm/Analysis/ValueTracking.h"
62 #include "llvm/IR/CFG.h"
63 #include "llvm/IR/Constants.h"
64 #include "llvm/IR/DataLayout.h"
65 #include "llvm/IR/DebugInfoMetadata.h"
66 #include "llvm/IR/DerivedTypes.h"
67 #include "llvm/IR/Dominators.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/IntrinsicInst.h"
70 #include "llvm/IR/IRBuilder.h"
71 #include "llvm/IR/LLVMContext.h"
72 #include "llvm/IR/Metadata.h"
73 #include "llvm/IR/PatternMatch.h"
74 #include "llvm/IR/PredIteratorCache.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Debug.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Target/TargetOptions.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
83 #include "llvm/Transforms/Utils/Local.h"
84 #include "llvm/Transforms/Utils/LoopUtils.h"
85 #include "llvm/Transforms/Utils/SSAUpdater.h"
86 #include <algorithm>
87 #include <utility>
88 using namespace llvm;
89
90 namespace llvm {
91 class LPMUpdater;
92 } // namespace llvm
93
94 #define DEBUG_TYPE "licm"
95
96 STATISTIC(NumCreatedBlocks, "Number of blocks created");
97 STATISTIC(NumClonedBranches, "Number of branches cloned");
98 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102 STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103 STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104 STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105 STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107 STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109 STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111 STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113
114 /// Memory promotion is enabled by default.
115 static cl::opt<bool>
116 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
117 cl::desc("Disable memory promotion in LICM pass"));
118
119 static cl::opt<bool> ControlFlowHoisting(
120 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
121 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
122
123 static cl::opt<bool>
124 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
125 cl::desc("Force thread model single in LICM pass"));
126
127 static cl::opt<uint32_t> MaxNumUsesTraversed(
128 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
129 cl::desc("Max num uses visited for identifying load "
130 "invariance in loop using invariant start (default = 8)"));
131
132 static cl::opt<unsigned> FPAssociationUpperLimit(
133 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
134 cl::desc(
135 "Set upper limit for the number of transformations performed "
136 "during a single round of hoisting the reassociated expressions."));
137
138 // Experimental option to allow imprecision in LICM in pathological cases, in
139 // exchange for faster compile. This is to be removed if MemorySSA starts to
140 // address the same issue. LICM calls MemorySSAWalker's
141 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
142 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
143 // which may not be precise, since optimizeUses is capped. The result is
144 // correct, but we may not get as "far up" as possible to get which access is
145 // clobbering the one queried.
146 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
147 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
148 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
149 "for faster compile. Caps the MemorySSA clobbering calls."));
150
151 // Experimentally, memory promotion carries less importance than sinking and
152 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
153 // compile time.
154 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
155 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
156 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
157 "effect. When MSSA in LICM is enabled, then this is the maximum "
158 "number of accesses allowed to be present in a loop in order to "
159 "enable memory promotion."));
160
161 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
162 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
163 const LoopSafetyInfo *SafetyInfo,
164 TargetTransformInfo *TTI,
165 bool &FoldableInLoop, bool LoopNestMode);
166 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
167 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
168 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
169 OptimizationRemarkEmitter *ORE);
170 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
171 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
172 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE);
173 static bool isSafeToExecuteUnconditionally(
174 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
175 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
176 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
177 AssumptionCache *AC, bool AllowSpeculation);
178 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
179 Loop *CurLoop, Instruction &I,
180 SinkAndHoistLICMFlags &Flags,
181 bool InvariantGroup);
182 static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
183 MemoryUse &MU);
184 /// Aggregates various functions for hoisting computations out of loop.
185 static bool hoistArithmetics(Instruction &I, Loop &L,
186 ICFLoopSafetyInfo &SafetyInfo,
187 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
188 DominatorTree *DT);
189 static Instruction *cloneInstructionInExitBlock(
190 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
191 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
192
193 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
194 MemorySSAUpdater &MSSAU);
195
196 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
197 ICFLoopSafetyInfo &SafetyInfo,
198 MemorySSAUpdater &MSSAU, ScalarEvolution *SE);
199
200 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
201 function_ref<void(Instruction *)> Fn);
202 using PointersAndHasReadsOutsideSet =
203 std::pair<SmallSetVector<Value *, 8>, bool>;
204 static SmallVector<PointersAndHasReadsOutsideSet, 0>
205 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
206
207 namespace {
208 struct LoopInvariantCodeMotion {
209 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
210 AssumptionCache *AC, TargetLibraryInfo *TLI,
211 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
212 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
213
LoopInvariantCodeMotion__anone158e6e00111::LoopInvariantCodeMotion214 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
215 unsigned LicmMssaNoAccForPromotionCap,
216 bool LicmAllowSpeculation)
217 : LicmMssaOptCap(LicmMssaOptCap),
218 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
219 LicmAllowSpeculation(LicmAllowSpeculation) {}
220
221 private:
222 unsigned LicmMssaOptCap;
223 unsigned LicmMssaNoAccForPromotionCap;
224 bool LicmAllowSpeculation;
225 };
226
227 struct LegacyLICMPass : public LoopPass {
228 static char ID; // Pass identification, replacement for typeid
LegacyLICMPass__anone158e6e00111::LegacyLICMPass229 LegacyLICMPass(
230 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
231 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
232 bool LicmAllowSpeculation = true)
233 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
234 LicmAllowSpeculation) {
235 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
236 }
237
runOnLoop__anone158e6e00111::LegacyLICMPass238 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
239 if (skipLoop(L))
240 return false;
241
242 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
243 << L->getHeader()->getNameOrAsOperand() << "\n");
244
245 Function *F = L->getHeader()->getParent();
246
247 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
248 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
249 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
250 // pass. Function analyses need to be preserved across loop transformations
251 // but ORE cannot be preserved (see comment before the pass definition).
252 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
253 return LICM.runOnLoop(
254 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
255 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
256 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
257 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
258 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
259 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
260 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
261 }
262
263 /// This transformation requires natural loop information & requires that
264 /// loop preheaders be inserted into the CFG...
265 ///
getAnalysisUsage__anone158e6e00111::LegacyLICMPass266 void getAnalysisUsage(AnalysisUsage &AU) const override {
267 AU.addPreserved<DominatorTreeWrapperPass>();
268 AU.addPreserved<LoopInfoWrapperPass>();
269 AU.addRequired<TargetLibraryInfoWrapperPass>();
270 AU.addRequired<MemorySSAWrapperPass>();
271 AU.addPreserved<MemorySSAWrapperPass>();
272 AU.addRequired<TargetTransformInfoWrapperPass>();
273 AU.addRequired<AssumptionCacheTracker>();
274 getLoopAnalysisUsage(AU);
275 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
276 AU.addPreserved<LazyBlockFrequencyInfoPass>();
277 AU.addPreserved<LazyBranchProbabilityInfoPass>();
278 }
279
280 private:
281 LoopInvariantCodeMotion LICM;
282 };
283 } // namespace
284
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)285 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
286 LoopStandardAnalysisResults &AR, LPMUpdater &) {
287 if (!AR.MSSA)
288 report_fatal_error("LICM requires MemorySSA (loop-mssa)",
289 /*GenCrashDiag*/false);
290
291 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
292 // pass. Function analyses need to be preserved across loop transformations
293 // but ORE cannot be preserved (see comment before the pass definition).
294 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
295
296 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
297 Opts.AllowSpeculation);
298 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
299 &AR.SE, AR.MSSA, &ORE))
300 return PreservedAnalyses::all();
301
302 auto PA = getLoopPassPreservedAnalyses();
303 PA.preserve<MemorySSAAnalysis>();
304
305 return PA;
306 }
307
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)308 void LICMPass::printPipeline(
309 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
310 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
311 OS, MapClassName2PassName);
312
313 OS << '<';
314 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
315 OS << '>';
316 }
317
run(LoopNest & LN,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)318 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
319 LoopStandardAnalysisResults &AR,
320 LPMUpdater &) {
321 if (!AR.MSSA)
322 report_fatal_error("LNICM requires MemorySSA (loop-mssa)",
323 /*GenCrashDiag*/false);
324
325 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
326 // pass. Function analyses need to be preserved across loop transformations
327 // but ORE cannot be preserved (see comment before the pass definition).
328 OptimizationRemarkEmitter ORE(LN.getParent());
329
330 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
331 Opts.AllowSpeculation);
332
333 Loop &OutermostLoop = LN.getOutermostLoop();
334 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
335 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
336
337 if (!Changed)
338 return PreservedAnalyses::all();
339
340 auto PA = getLoopPassPreservedAnalyses();
341
342 PA.preserve<DominatorTreeAnalysis>();
343 PA.preserve<LoopAnalysis>();
344 PA.preserve<MemorySSAAnalysis>();
345
346 return PA;
347 }
348
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)349 void LNICMPass::printPipeline(
350 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
351 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
352 OS, MapClassName2PassName);
353
354 OS << '<';
355 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
356 OS << '>';
357 }
358
359 char LegacyLICMPass::ID = 0;
360 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
361 false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)362 INITIALIZE_PASS_DEPENDENCY(LoopPass)
363 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
364 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
365 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
366 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
367 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
368 false)
369
370 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
371
SinkAndHoistLICMFlags(bool IsSink,Loop & L,MemorySSA & MSSA)372 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop &L,
373 MemorySSA &MSSA)
374 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
375 IsSink, L, MSSA) {}
376
SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,unsigned LicmMssaNoAccForPromotionCap,bool IsSink,Loop & L,MemorySSA & MSSA)377 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
378 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
379 Loop &L, MemorySSA &MSSA)
380 : LicmMssaOptCap(LicmMssaOptCap),
381 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
382 IsSink(IsSink) {
383 unsigned AccessCapCount = 0;
384 for (auto *BB : L.getBlocks())
385 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
386 for (const auto &MA : *Accesses) {
387 (void)MA;
388 ++AccessCapCount;
389 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
390 NoOfMemAccTooLarge = true;
391 return;
392 }
393 }
394 }
395
396 /// Hoist expressions out of the specified loop. Note, alias info for inner
397 /// loop is not preserved so it is not a good idea to run LICM multiple
398 /// times on one loop.
runOnLoop(Loop * L,AAResults * AA,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,ScalarEvolution * SE,MemorySSA * MSSA,OptimizationRemarkEmitter * ORE,bool LoopNestMode)399 bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
400 DominatorTree *DT, AssumptionCache *AC,
401 TargetLibraryInfo *TLI,
402 TargetTransformInfo *TTI,
403 ScalarEvolution *SE, MemorySSA *MSSA,
404 OptimizationRemarkEmitter *ORE,
405 bool LoopNestMode) {
406 bool Changed = false;
407
408 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
409
410 // If this loop has metadata indicating that LICM is not to be performed then
411 // just exit.
412 if (hasDisableLICMTransformsHint(L)) {
413 return false;
414 }
415
416 // Don't sink stores from loops with coroutine suspend instructions.
417 // LICM would sink instructions into the default destination of
418 // the coroutine switch. The default destination of the switch is to
419 // handle the case where the coroutine is suspended, by which point the
420 // coroutine frame may have been destroyed. No instruction can be sunk there.
421 // FIXME: This would unfortunately hurt the performance of coroutines, however
422 // there is currently no general solution for this. Similar issues could also
423 // potentially happen in other passes where instructions are being moved
424 // across that edge.
425 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
426 return llvm::any_of(*BB, [](Instruction &I) {
427 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
428 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
429 });
430 });
431
432 MemorySSAUpdater MSSAU(MSSA);
433 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
434 /*IsSink=*/true, *L, *MSSA);
435
436 // Get the preheader block to move instructions into...
437 BasicBlock *Preheader = L->getLoopPreheader();
438
439 // Compute loop safety information.
440 ICFLoopSafetyInfo SafetyInfo;
441 SafetyInfo.computeLoopSafetyInfo(L);
442
443 // We want to visit all of the instructions in this loop... that are not parts
444 // of our subloops (they have already had their invariants hoisted out of
445 // their loop, into this loop, so there is no need to process the BODIES of
446 // the subloops).
447 //
448 // Traverse the body of the loop in depth first order on the dominator tree so
449 // that we are guaranteed to see definitions before we see uses. This allows
450 // us to sink instructions in one pass, without iteration. After sinking
451 // instructions, we perform another pass to hoist them out of the loop.
452 if (L->hasDedicatedExits())
453 Changed |=
454 LoopNestMode
455 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
456 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
457 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
458 MSSAU, &SafetyInfo, Flags, ORE);
459 Flags.setIsSink(false);
460 if (Preheader)
461 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
462 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
463 LicmAllowSpeculation);
464
465 // Now that all loop invariants have been removed from the loop, promote any
466 // memory references to scalars that we can.
467 // Don't sink stores from loops without dedicated block exits. Exits
468 // containing indirect branches are not transformed by loop simplify,
469 // make sure we catch that. An additional load may be generated in the
470 // preheader for SSA updater, so also avoid sinking when no preheader
471 // is available.
472 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
473 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
474 // Figure out the loop exits and their insertion points
475 SmallVector<BasicBlock *, 8> ExitBlocks;
476 L->getUniqueExitBlocks(ExitBlocks);
477
478 // We can't insert into a catchswitch.
479 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
480 return isa<CatchSwitchInst>(Exit->getTerminator());
481 });
482
483 if (!HasCatchSwitch) {
484 SmallVector<BasicBlock::iterator, 8> InsertPts;
485 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
486 InsertPts.reserve(ExitBlocks.size());
487 MSSAInsertPts.reserve(ExitBlocks.size());
488 for (BasicBlock *ExitBlock : ExitBlocks) {
489 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
490 MSSAInsertPts.push_back(nullptr);
491 }
492
493 PredIteratorCache PIC;
494
495 // Promoting one set of accesses may make the pointers for another set
496 // loop invariant, so run this in a loop.
497 bool Promoted = false;
498 bool LocalPromoted;
499 do {
500 LocalPromoted = false;
501 for (auto [PointerMustAliases, HasReadsOutsideSet] :
502 collectPromotionCandidates(MSSA, AA, L)) {
503 LocalPromoted |= promoteLoopAccessesToScalars(
504 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
505 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
506 LicmAllowSpeculation, HasReadsOutsideSet);
507 }
508 Promoted |= LocalPromoted;
509 } while (LocalPromoted);
510
511 // Once we have promoted values across the loop body we have to
512 // recursively reform LCSSA as any nested loop may now have values defined
513 // within the loop used in the outer loop.
514 // FIXME: This is really heavy handed. It would be a bit better to use an
515 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
516 // it as it went.
517 if (Promoted)
518 formLCSSARecursively(*L, *DT, LI, SE);
519
520 Changed |= Promoted;
521 }
522 }
523
524 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
525 // specifically moving instructions across the loop boundary and so it is
526 // especially in need of basic functional correctness checking here.
527 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
528 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
529 "Parent loop not left in LCSSA form after LICM!");
530
531 if (VerifyMemorySSA)
532 MSSA->verifyMemorySSA();
533
534 if (Changed && SE)
535 SE->forgetLoopDispositions();
536 return Changed;
537 }
538
539 /// Walk the specified region of the CFG (defined by all blocks dominated by
540 /// the specified block, and that are in the current loop) in reverse depth
541 /// first order w.r.t the DominatorTree. This allows us to visit uses before
542 /// definitions, allowing us to sink a loop body in one pass without iteration.
543 ///
sinkRegion(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE,Loop * OutermostLoop)544 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
545 DominatorTree *DT, TargetLibraryInfo *TLI,
546 TargetTransformInfo *TTI, Loop *CurLoop,
547 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
548 SinkAndHoistLICMFlags &Flags,
549 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
550
551 // Verify inputs.
552 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
553 CurLoop != nullptr && SafetyInfo != nullptr &&
554 "Unexpected input to sinkRegion.");
555
556 // We want to visit children before parents. We will enqueue all the parents
557 // before their children in the worklist and process the worklist in reverse
558 // order.
559 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
560
561 bool Changed = false;
562 for (DomTreeNode *DTN : reverse(Worklist)) {
563 BasicBlock *BB = DTN->getBlock();
564 // Only need to process the contents of this block if it is not part of a
565 // subloop (which would already have been processed).
566 if (inSubLoop(BB, CurLoop, LI))
567 continue;
568
569 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
570 Instruction &I = *--II;
571
572 // The instruction is not used in the loop if it is dead. In this case,
573 // we just delete it instead of sinking it.
574 if (isInstructionTriviallyDead(&I, TLI)) {
575 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
576 salvageKnowledge(&I);
577 salvageDebugInfo(I);
578 ++II;
579 eraseInstruction(I, *SafetyInfo, MSSAU);
580 Changed = true;
581 continue;
582 }
583
584 // Check to see if we can sink this instruction to the exit blocks
585 // of the loop. We can do this if the all users of the instruction are
586 // outside of the loop. In this case, it doesn't even matter if the
587 // operands of the instruction are loop invariant.
588 //
589 bool FoldableInLoop = false;
590 bool LoopNestMode = OutermostLoop != nullptr;
591 if (!I.mayHaveSideEffects() &&
592 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
593 SafetyInfo, TTI, FoldableInLoop,
594 LoopNestMode) &&
595 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
596 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
597 if (!FoldableInLoop) {
598 ++II;
599 salvageDebugInfo(I);
600 eraseInstruction(I, *SafetyInfo, MSSAU);
601 }
602 Changed = true;
603 }
604 }
605 }
606 }
607 if (VerifyMemorySSA)
608 MSSAU.getMemorySSA()->verifyMemorySSA();
609 return Changed;
610 }
611
sinkRegionForLoopNest(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)612 bool llvm::sinkRegionForLoopNest(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
613 DominatorTree *DT, TargetLibraryInfo *TLI,
614 TargetTransformInfo *TTI, Loop *CurLoop,
615 MemorySSAUpdater &MSSAU,
616 ICFLoopSafetyInfo *SafetyInfo,
617 SinkAndHoistLICMFlags &Flags,
618 OptimizationRemarkEmitter *ORE) {
619
620 bool Changed = false;
621 SmallPriorityWorklist<Loop *, 4> Worklist;
622 Worklist.insert(CurLoop);
623 appendLoopsToWorklist(*CurLoop, Worklist);
624 while (!Worklist.empty()) {
625 Loop *L = Worklist.pop_back_val();
626 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
627 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
628 }
629 return Changed;
630 }
631
632 namespace {
633 // This is a helper class for hoistRegion to make it able to hoist control flow
634 // in order to be able to hoist phis. The way this works is that we initially
635 // start hoisting to the loop preheader, and when we see a loop invariant branch
636 // we make note of this. When we then come to hoist an instruction that's
637 // conditional on such a branch we duplicate the branch and the relevant control
638 // flow, then hoist the instruction into the block corresponding to its original
639 // block in the duplicated control flow.
640 class ControlFlowHoister {
641 private:
642 // Information about the loop we are hoisting from
643 LoopInfo *LI;
644 DominatorTree *DT;
645 Loop *CurLoop;
646 MemorySSAUpdater &MSSAU;
647
648 // A map of blocks in the loop to the block their instructions will be hoisted
649 // to.
650 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
651
652 // The branches that we can hoist, mapped to the block that marks a
653 // convergence point of their control flow.
654 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
655
656 public:
ControlFlowHoister(LoopInfo * LI,DominatorTree * DT,Loop * CurLoop,MemorySSAUpdater & MSSAU)657 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
658 MemorySSAUpdater &MSSAU)
659 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
660
registerPossiblyHoistableBranch(BranchInst * BI)661 void registerPossiblyHoistableBranch(BranchInst *BI) {
662 // We can only hoist conditional branches with loop invariant operands.
663 if (!ControlFlowHoisting || !BI->isConditional() ||
664 !CurLoop->hasLoopInvariantOperands(BI))
665 return;
666
667 // The branch destinations need to be in the loop, and we don't gain
668 // anything by duplicating conditional branches with duplicate successors,
669 // as it's essentially the same as an unconditional branch.
670 BasicBlock *TrueDest = BI->getSuccessor(0);
671 BasicBlock *FalseDest = BI->getSuccessor(1);
672 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
673 TrueDest == FalseDest)
674 return;
675
676 // We can hoist BI if one branch destination is the successor of the other,
677 // or both have common successor which we check by seeing if the
678 // intersection of their successors is non-empty.
679 // TODO: This could be expanded to allowing branches where both ends
680 // eventually converge to a single block.
681 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
682 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
683 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
684 BasicBlock *CommonSucc = nullptr;
685 if (TrueDestSucc.count(FalseDest)) {
686 CommonSucc = FalseDest;
687 } else if (FalseDestSucc.count(TrueDest)) {
688 CommonSucc = TrueDest;
689 } else {
690 set_intersect(TrueDestSucc, FalseDestSucc);
691 // If there's one common successor use that.
692 if (TrueDestSucc.size() == 1)
693 CommonSucc = *TrueDestSucc.begin();
694 // If there's more than one pick whichever appears first in the block list
695 // (we can't use the value returned by TrueDestSucc.begin() as it's
696 // unpredicatable which element gets returned).
697 else if (!TrueDestSucc.empty()) {
698 Function *F = TrueDest->getParent();
699 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
700 auto It = llvm::find_if(*F, IsSucc);
701 assert(It != F->end() && "Could not find successor in function");
702 CommonSucc = &*It;
703 }
704 }
705 // The common successor has to be dominated by the branch, as otherwise
706 // there will be some other path to the successor that will not be
707 // controlled by this branch so any phi we hoist would be controlled by the
708 // wrong condition. This also takes care of avoiding hoisting of loop back
709 // edges.
710 // TODO: In some cases this could be relaxed if the successor is dominated
711 // by another block that's been hoisted and we can guarantee that the
712 // control flow has been replicated exactly.
713 if (CommonSucc && DT->dominates(BI, CommonSucc))
714 HoistableBranches[BI] = CommonSucc;
715 }
716
canHoistPHI(PHINode * PN)717 bool canHoistPHI(PHINode *PN) {
718 // The phi must have loop invariant operands.
719 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
720 return false;
721 // We can hoist phis if the block they are in is the target of hoistable
722 // branches which cover all of the predecessors of the block.
723 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
724 BasicBlock *BB = PN->getParent();
725 for (BasicBlock *PredBB : predecessors(BB))
726 PredecessorBlocks.insert(PredBB);
727 // If we have less predecessor blocks than predecessors then the phi will
728 // have more than one incoming value for the same block which we can't
729 // handle.
730 // TODO: This could be handled be erasing some of the duplicate incoming
731 // values.
732 if (PredecessorBlocks.size() != pred_size(BB))
733 return false;
734 for (auto &Pair : HoistableBranches) {
735 if (Pair.second == BB) {
736 // Which blocks are predecessors via this branch depends on if the
737 // branch is triangle-like or diamond-like.
738 if (Pair.first->getSuccessor(0) == BB) {
739 PredecessorBlocks.erase(Pair.first->getParent());
740 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
741 } else if (Pair.first->getSuccessor(1) == BB) {
742 PredecessorBlocks.erase(Pair.first->getParent());
743 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
744 } else {
745 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
746 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
747 }
748 }
749 }
750 // PredecessorBlocks will now be empty if for every predecessor of BB we
751 // found a hoistable branch source.
752 return PredecessorBlocks.empty();
753 }
754
getOrCreateHoistedBlock(BasicBlock * BB)755 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
756 if (!ControlFlowHoisting)
757 return CurLoop->getLoopPreheader();
758 // If BB has already been hoisted, return that
759 if (HoistDestinationMap.count(BB))
760 return HoistDestinationMap[BB];
761
762 // Check if this block is conditional based on a pending branch
763 auto HasBBAsSuccessor =
764 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
765 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
766 Pair.first->getSuccessor(1) == BB);
767 };
768 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
769
770 // If not involved in a pending branch, hoist to preheader
771 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
772 if (It == HoistableBranches.end()) {
773 LLVM_DEBUG(dbgs() << "LICM using "
774 << InitialPreheader->getNameOrAsOperand()
775 << " as hoist destination for "
776 << BB->getNameOrAsOperand() << "\n");
777 HoistDestinationMap[BB] = InitialPreheader;
778 return InitialPreheader;
779 }
780 BranchInst *BI = It->first;
781 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
782 HoistableBranches.end() &&
783 "BB is expected to be the target of at most one branch");
784
785 LLVMContext &C = BB->getContext();
786 BasicBlock *TrueDest = BI->getSuccessor(0);
787 BasicBlock *FalseDest = BI->getSuccessor(1);
788 BasicBlock *CommonSucc = HoistableBranches[BI];
789 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
790
791 // Create hoisted versions of blocks that currently don't have them
792 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
793 if (HoistDestinationMap.count(Orig))
794 return HoistDestinationMap[Orig];
795 BasicBlock *New =
796 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
797 HoistDestinationMap[Orig] = New;
798 DT->addNewBlock(New, HoistTarget);
799 if (CurLoop->getParentLoop())
800 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
801 ++NumCreatedBlocks;
802 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
803 << " as hoist destination for " << Orig->getName()
804 << "\n");
805 return New;
806 };
807 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
808 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
809 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
810
811 // Link up these blocks with branches.
812 if (!HoistCommonSucc->getTerminator()) {
813 // The new common successor we've generated will branch to whatever that
814 // hoist target branched to.
815 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
816 assert(TargetSucc && "Expected hoist target to have a single successor");
817 HoistCommonSucc->moveBefore(TargetSucc);
818 BranchInst::Create(TargetSucc, HoistCommonSucc);
819 }
820 if (!HoistTrueDest->getTerminator()) {
821 HoistTrueDest->moveBefore(HoistCommonSucc);
822 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
823 }
824 if (!HoistFalseDest->getTerminator()) {
825 HoistFalseDest->moveBefore(HoistCommonSucc);
826 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
827 }
828
829 // If BI is being cloned to what was originally the preheader then
830 // HoistCommonSucc will now be the new preheader.
831 if (HoistTarget == InitialPreheader) {
832 // Phis in the loop header now need to use the new preheader.
833 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
834 MSSAU.wireOldPredecessorsToNewImmediatePredecessor(
835 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
836 // The new preheader dominates the loop header.
837 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
838 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
839 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
840 // The preheader hoist destination is now the new preheader, with the
841 // exception of the hoist destination of this branch.
842 for (auto &Pair : HoistDestinationMap)
843 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
844 Pair.second = HoistCommonSucc;
845 }
846
847 // Now finally clone BI.
848 ReplaceInstWithInst(
849 HoistTarget->getTerminator(),
850 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
851 ++NumClonedBranches;
852
853 assert(CurLoop->getLoopPreheader() &&
854 "Hoisting blocks should not have destroyed preheader");
855 return HoistDestinationMap[BB];
856 }
857 };
858 } // namespace
859
860 /// Walk the specified region of the CFG (defined by all blocks dominated by
861 /// the specified block, and that are in the current loop) in depth first
862 /// order w.r.t the DominatorTree. This allows us to visit definitions before
863 /// uses, allowing us to hoist a loop body in one pass without iteration.
864 ///
hoistRegion(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * TLI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ScalarEvolution * SE,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE,bool LoopNestMode,bool AllowSpeculation)865 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
866 DominatorTree *DT, AssumptionCache *AC,
867 TargetLibraryInfo *TLI, Loop *CurLoop,
868 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
869 ICFLoopSafetyInfo *SafetyInfo,
870 SinkAndHoistLICMFlags &Flags,
871 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
872 bool AllowSpeculation) {
873 // Verify inputs.
874 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
875 CurLoop != nullptr && SafetyInfo != nullptr &&
876 "Unexpected input to hoistRegion.");
877
878 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
879
880 // Keep track of instructions that have been hoisted, as they may need to be
881 // re-hoisted if they end up not dominating all of their uses.
882 SmallVector<Instruction *, 16> HoistedInstructions;
883
884 // For PHI hoisting to work we need to hoist blocks before their successors.
885 // We can do this by iterating through the blocks in the loop in reverse
886 // post-order.
887 LoopBlocksRPO Worklist(CurLoop);
888 Worklist.perform(LI);
889 bool Changed = false;
890 BasicBlock *Preheader = CurLoop->getLoopPreheader();
891 for (BasicBlock *BB : Worklist) {
892 // Only need to process the contents of this block if it is not part of a
893 // subloop (which would already have been processed).
894 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
895 continue;
896
897 for (Instruction &I : llvm::make_early_inc_range(*BB)) {
898 // Try hoisting the instruction out to the preheader. We can only do
899 // this if all of the operands of the instruction are loop invariant and
900 // if it is safe to hoist the instruction. We also check block frequency
901 // to make sure instruction only gets hoisted into colder blocks.
902 // TODO: It may be safe to hoist if we are hoisting to a conditional block
903 // and we have accurately duplicated the control flow from the loop header
904 // to that block.
905 if (CurLoop->hasLoopInvariantOperands(&I) &&
906 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
907 isSafeToExecuteUnconditionally(
908 I, DT, TLI, CurLoop, SafetyInfo, ORE,
909 Preheader->getTerminator(), AC, AllowSpeculation)) {
910 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
911 MSSAU, SE, ORE);
912 HoistedInstructions.push_back(&I);
913 Changed = true;
914 continue;
915 }
916
917 // Attempt to remove floating point division out of the loop by
918 // converting it to a reciprocal multiplication.
919 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
920 CurLoop->isLoopInvariant(I.getOperand(1))) {
921 auto Divisor = I.getOperand(1);
922 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
923 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
924 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
925 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
926 ReciprocalDivisor->insertBefore(&I);
927
928 auto Product =
929 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
930 Product->setFastMathFlags(I.getFastMathFlags());
931 SafetyInfo->insertInstructionTo(Product, I.getParent());
932 Product->insertAfter(&I);
933 I.replaceAllUsesWith(Product);
934 eraseInstruction(I, *SafetyInfo, MSSAU);
935
936 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
937 SafetyInfo, MSSAU, SE, ORE);
938 HoistedInstructions.push_back(ReciprocalDivisor);
939 Changed = true;
940 continue;
941 }
942
943 auto IsInvariantStart = [&](Instruction &I) {
944 using namespace PatternMatch;
945 return I.use_empty() &&
946 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
947 };
948 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
949 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
950 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
951 };
952 if ((IsInvariantStart(I) || isGuard(&I)) &&
953 CurLoop->hasLoopInvariantOperands(&I) &&
954 MustExecuteWithoutWritesBefore(I)) {
955 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
956 MSSAU, SE, ORE);
957 HoistedInstructions.push_back(&I);
958 Changed = true;
959 continue;
960 }
961
962 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
963 if (CFH.canHoistPHI(PN)) {
964 // Redirect incoming blocks first to ensure that we create hoisted
965 // versions of those blocks before we hoist the phi.
966 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
967 PN->setIncomingBlock(
968 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
969 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
970 MSSAU, SE, ORE);
971 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
972 Changed = true;
973 continue;
974 }
975 }
976
977 // Try to reassociate instructions so that part of computations can be
978 // done out of loop.
979 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
980 Changed = true;
981 continue;
982 }
983
984 // Remember possibly hoistable branches so we can actually hoist them
985 // later if needed.
986 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
987 CFH.registerPossiblyHoistableBranch(BI);
988 }
989 }
990
991 // If we hoisted instructions to a conditional block they may not dominate
992 // their uses that weren't hoisted (such as phis where some operands are not
993 // loop invariant). If so make them unconditional by moving them to their
994 // immediate dominator. We iterate through the instructions in reverse order
995 // which ensures that when we rehoist an instruction we rehoist its operands,
996 // and also keep track of where in the block we are rehoisting to make sure
997 // that we rehoist instructions before the instructions that use them.
998 Instruction *HoistPoint = nullptr;
999 if (ControlFlowHoisting) {
1000 for (Instruction *I : reverse(HoistedInstructions)) {
1001 if (!llvm::all_of(I->uses(),
1002 [&](Use &U) { return DT->dominates(I, U); })) {
1003 BasicBlock *Dominator =
1004 DT->getNode(I->getParent())->getIDom()->getBlock();
1005 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1006 if (HoistPoint)
1007 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1008 "New hoist point expected to dominate old hoist point");
1009 HoistPoint = Dominator->getTerminator();
1010 }
1011 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1012 << HoistPoint->getParent()->getNameOrAsOperand()
1013 << ": " << *I << "\n");
1014 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1015 SE);
1016 HoistPoint = I;
1017 Changed = true;
1018 }
1019 }
1020 }
1021 if (VerifyMemorySSA)
1022 MSSAU.getMemorySSA()->verifyMemorySSA();
1023
1024 // Now that we've finished hoisting make sure that LI and DT are still
1025 // valid.
1026 #ifdef EXPENSIVE_CHECKS
1027 if (Changed) {
1028 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1029 "Dominator tree verification failed");
1030 LI->verify(*DT);
1031 }
1032 #endif
1033
1034 return Changed;
1035 }
1036
1037 // Return true if LI is invariant within scope of the loop. LI is invariant if
1038 // CurLoop is dominated by an invariant.start representing the same memory
1039 // location and size as the memory location LI loads from, and also the
1040 // invariant.start has no uses.
isLoadInvariantInLoop(LoadInst * LI,DominatorTree * DT,Loop * CurLoop)1041 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1042 Loop *CurLoop) {
1043 Value *Addr = LI->getPointerOperand();
1044 const DataLayout &DL = LI->getModule()->getDataLayout();
1045 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1046
1047 // It is not currently possible for clang to generate an invariant.start
1048 // intrinsic with scalable vector types because we don't support thread local
1049 // sizeless types and we don't permit sizeless types in structs or classes.
1050 // Furthermore, even if support is added for this in future the intrinsic
1051 // itself is defined to have a size of -1 for variable sized objects. This
1052 // makes it impossible to verify if the intrinsic envelops our region of
1053 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1054 // types would have a -1 parameter, but the former is clearly double the size
1055 // of the latter.
1056 if (LocSizeInBits.isScalable())
1057 return false;
1058
1059 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1060 // uselists for non-local Values in a loop pass.
1061 if (isa<Constant>(Addr))
1062 return false;
1063
1064 unsigned UsesVisited = 0;
1065 // Traverse all uses of the load operand value, to see if invariant.start is
1066 // one of the uses, and whether it dominates the load instruction.
1067 for (auto *U : Addr->users()) {
1068 // Avoid traversing for Load operand with high number of users.
1069 if (++UsesVisited > MaxNumUsesTraversed)
1070 return false;
1071 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1072 // If there are escaping uses of invariant.start instruction, the load maybe
1073 // non-invariant.
1074 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1075 !II->use_empty())
1076 continue;
1077 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1078 // The intrinsic supports having a -1 argument for variable sized objects
1079 // so we should check for that here.
1080 if (InvariantSize->isNegative())
1081 continue;
1082 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1083 // Confirm the invariant.start location size contains the load operand size
1084 // in bits. Also, the invariant.start should dominate the load, and we
1085 // should not hoist the load out of a loop that contains this dominating
1086 // invariant.start.
1087 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1088 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1089 return true;
1090 }
1091
1092 return false;
1093 }
1094
1095 namespace {
1096 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1097 /// sink a given instruction out of a loop. Does not address legality
1098 /// concerns such as aliasing or speculation safety.
isHoistableAndSinkableInst(Instruction & I)1099 bool isHoistableAndSinkableInst(Instruction &I) {
1100 // Only these instructions are hoistable/sinkable.
1101 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1102 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1103 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1104 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1105 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1106 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1107 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1108 }
1109 /// Return true if MSSA knows there are no MemoryDefs in the loop.
isReadOnly(const MemorySSAUpdater & MSSAU,const Loop * L)1110 bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {
1111 for (auto *BB : L->getBlocks())
1112 if (MSSAU.getMemorySSA()->getBlockDefs(BB))
1113 return false;
1114 return true;
1115 }
1116
1117 /// Return true if I is the only Instruction with a MemoryAccess in L.
isOnlyMemoryAccess(const Instruction * I,const Loop * L,const MemorySSAUpdater & MSSAU)1118 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1119 const MemorySSAUpdater &MSSAU) {
1120 for (auto *BB : L->getBlocks())
1121 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1122 int NotAPhi = 0;
1123 for (const auto &Acc : *Accs) {
1124 if (isa<MemoryPhi>(&Acc))
1125 continue;
1126 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1127 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1128 return false;
1129 }
1130 }
1131 return true;
1132 }
1133 }
1134
getClobberingMemoryAccess(MemorySSA & MSSA,BatchAAResults & BAA,SinkAndHoistLICMFlags & Flags,MemoryUseOrDef * MA)1135 static MemoryAccess *getClobberingMemoryAccess(MemorySSA &MSSA,
1136 BatchAAResults &BAA,
1137 SinkAndHoistLICMFlags &Flags,
1138 MemoryUseOrDef *MA) {
1139 // See declaration of SetLicmMssaOptCap for usage details.
1140 if (Flags.tooManyClobberingCalls())
1141 return MA->getDefiningAccess();
1142
1143 MemoryAccess *Source =
1144 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(MA, BAA);
1145 Flags.incrementClobberingCalls();
1146 return Source;
1147 }
1148
canSinkOrHoistInst(Instruction & I,AAResults * AA,DominatorTree * DT,Loop * CurLoop,MemorySSAUpdater & MSSAU,bool TargetExecutesOncePerLoop,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)1149 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1150 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1151 bool TargetExecutesOncePerLoop,
1152 SinkAndHoistLICMFlags &Flags,
1153 OptimizationRemarkEmitter *ORE) {
1154 // If we don't understand the instruction, bail early.
1155 if (!isHoistableAndSinkableInst(I))
1156 return false;
1157
1158 MemorySSA *MSSA = MSSAU.getMemorySSA();
1159 // Loads have extra constraints we have to verify before we can hoist them.
1160 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1161 if (!LI->isUnordered())
1162 return false; // Don't sink/hoist volatile or ordered atomic loads!
1163
1164 // Loads from constant memory are always safe to move, even if they end up
1165 // in the same alias set as something that ends up being modified.
1166 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1167 return true;
1168 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1169 return true;
1170
1171 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1172 return false; // Don't risk duplicating unordered loads
1173
1174 // This checks for an invariant.start dominating the load.
1175 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1176 return true;
1177
1178 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1179
1180 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1181
1182 bool Invalidated = pointerInvalidatedByLoop(
1183 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1184 // Check loop-invariant address because this may also be a sinkable load
1185 // whose address is not necessarily loop-invariant.
1186 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1187 ORE->emit([&]() {
1188 return OptimizationRemarkMissed(
1189 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1190 << "failed to move load with loop-invariant address "
1191 "because the loop may invalidate its value";
1192 });
1193
1194 return !Invalidated;
1195 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1196 // Don't sink or hoist dbg info; it's legal, but not useful.
1197 if (isa<DbgInfoIntrinsic>(I))
1198 return false;
1199
1200 // Don't sink calls which can throw.
1201 if (CI->mayThrow())
1202 return false;
1203
1204 // Convergent attribute has been used on operations that involve
1205 // inter-thread communication which results are implicitly affected by the
1206 // enclosing control flows. It is not safe to hoist or sink such operations
1207 // across control flow.
1208 if (CI->isConvergent())
1209 return false;
1210
1211 using namespace PatternMatch;
1212 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1213 // Assumes don't actually alias anything or throw
1214 return true;
1215
1216 // Handle simple cases by querying alias analysis.
1217 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1218
1219 // FIXME: we don't handle the semantics of thread local well. So that the
1220 // address of thread locals are fake constants in coroutines. So We forbid
1221 // to treat onlyReadsMemory call in coroutines as constants now. Note that
1222 // it is possible to hide a thread local access in a onlyReadsMemory call.
1223 // Remove this check after we handle the semantics of thread locals well.
1224 if (Behavior.onlyReadsMemory() && CI->getFunction()->isPresplitCoroutine())
1225 return false;
1226
1227 if (Behavior.doesNotAccessMemory())
1228 return true;
1229 if (Behavior.onlyReadsMemory()) {
1230 // A readonly argmemonly function only reads from memory pointed to by
1231 // it's arguments with arbitrary offsets. If we can prove there are no
1232 // writes to this memory in the loop, we can hoist or sink.
1233 if (Behavior.onlyAccessesArgPointees()) {
1234 // TODO: expand to writeable arguments
1235 for (Value *Op : CI->args())
1236 if (Op->getType()->isPointerTy() &&
1237 pointerInvalidatedByLoop(
1238 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1239 Flags, /*InvariantGroup=*/false))
1240 return false;
1241 return true;
1242 }
1243
1244 // If this call only reads from memory and there are no writes to memory
1245 // in the loop, we can hoist or sink the call as appropriate.
1246 if (isReadOnly(MSSAU, CurLoop))
1247 return true;
1248 }
1249
1250 // FIXME: This should use mod/ref information to see if we can hoist or
1251 // sink the call.
1252
1253 return false;
1254 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1255 // Fences alias (most) everything to provide ordering. For the moment,
1256 // just give up if there are any other memory operations in the loop.
1257 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1258 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1259 if (!SI->isUnordered())
1260 return false; // Don't sink/hoist volatile or ordered atomic store!
1261
1262 // We can only hoist a store that we can prove writes a value which is not
1263 // read or overwritten within the loop. For those cases, we fallback to
1264 // load store promotion instead. TODO: We can extend this to cases where
1265 // there is exactly one write to the location and that write dominates an
1266 // arbitrary number of reads in the loop.
1267 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1268 return true;
1269 // If there are more accesses than the Promotion cap, then give up as we're
1270 // not walking a list that long.
1271 if (Flags.tooManyMemoryAccesses())
1272 return false;
1273
1274 auto *SIMD = MSSA->getMemoryAccess(SI);
1275 BatchAAResults BAA(*AA);
1276 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, SIMD);
1277 // Make sure there are no clobbers inside the loop.
1278 if (!MSSA->isLiveOnEntryDef(Source) &&
1279 CurLoop->contains(Source->getBlock()))
1280 return false;
1281
1282 // If there are interfering Uses (i.e. their defining access is in the
1283 // loop), or ordered loads (stored as Defs!), don't move this store.
1284 // Could do better here, but this is conservatively correct.
1285 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1286 // moving accesses. Can also extend to dominating uses.
1287 for (auto *BB : CurLoop->getBlocks())
1288 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1289 for (const auto &MA : *Accesses)
1290 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1291 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
1292 const_cast<MemoryUse *>(MU));
1293 if (!MSSA->isLiveOnEntryDef(MD) &&
1294 CurLoop->contains(MD->getBlock()))
1295 return false;
1296 // Disable hoisting past potentially interfering loads. Optimized
1297 // Uses may point to an access outside the loop, as getClobbering
1298 // checks the previous iteration when walking the backedge.
1299 // FIXME: More precise: no Uses that alias SI.
1300 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU))
1301 return false;
1302 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1303 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1304 (void)LI; // Silence warning.
1305 assert(!LI->isUnordered() && "Expected unordered load");
1306 return false;
1307 }
1308 // Any call, while it may not be clobbering SI, it may be a use.
1309 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1310 // Check if the call may read from the memory location written
1311 // to by SI. Check CI's attributes and arguments; the number of
1312 // such checks performed is limited above by NoOfMemAccTooLarge.
1313 ModRefInfo MRI = BAA.getModRefInfo(CI, MemoryLocation::get(SI));
1314 if (isModOrRefSet(MRI))
1315 return false;
1316 }
1317 }
1318 }
1319 return true;
1320 }
1321
1322 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1323
1324 // We've established mechanical ability and aliasing, it's up to the caller
1325 // to check fault safety
1326 return true;
1327 }
1328
1329 /// Returns true if a PHINode is a trivially replaceable with an
1330 /// Instruction.
1331 /// This is true when all incoming values are that instruction.
1332 /// This pattern occurs most often with LCSSA PHI nodes.
1333 ///
isTriviallyReplaceablePHI(const PHINode & PN,const Instruction & I)1334 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1335 for (const Value *IncValue : PN.incoming_values())
1336 if (IncValue != &I)
1337 return false;
1338
1339 return true;
1340 }
1341
1342 /// Return true if the instruction is foldable in the loop.
isFoldableInLoop(const Instruction & I,const Loop * CurLoop,const TargetTransformInfo * TTI)1343 static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1344 const TargetTransformInfo *TTI) {
1345 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1346 InstructionCost CostI =
1347 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1348 if (CostI != TargetTransformInfo::TCC_Free)
1349 return false;
1350 // For a GEP, we cannot simply use getInstructionCost because currently
1351 // it optimistically assumes that a GEP will fold into addressing mode
1352 // regardless of its users.
1353 const BasicBlock *BB = GEP->getParent();
1354 for (const User *U : GEP->users()) {
1355 const Instruction *UI = cast<Instruction>(U);
1356 if (CurLoop->contains(UI) &&
1357 (BB != UI->getParent() ||
1358 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1359 return false;
1360 }
1361 return true;
1362 }
1363
1364 return false;
1365 }
1366
1367 /// Return true if the only users of this instruction are outside of
1368 /// the loop. If this is true, we can sink the instruction to the exit
1369 /// blocks of the loop.
1370 ///
1371 /// We also return true if the instruction could be folded away in lowering.
1372 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
isNotUsedOrFoldableInLoop(const Instruction & I,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,TargetTransformInfo * TTI,bool & FoldableInLoop,bool LoopNestMode)1373 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1374 const LoopSafetyInfo *SafetyInfo,
1375 TargetTransformInfo *TTI,
1376 bool &FoldableInLoop, bool LoopNestMode) {
1377 const auto &BlockColors = SafetyInfo->getBlockColors();
1378 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1379 for (const User *U : I.users()) {
1380 const Instruction *UI = cast<Instruction>(U);
1381 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1382 const BasicBlock *BB = PN->getParent();
1383 // We cannot sink uses in catchswitches.
1384 if (isa<CatchSwitchInst>(BB->getTerminator()))
1385 return false;
1386
1387 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1388 // phi use is too muddled.
1389 if (isa<CallInst>(I))
1390 if (!BlockColors.empty() &&
1391 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1392 return false;
1393
1394 if (LoopNestMode) {
1395 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1396 UI->getNumOperands() == 1) {
1397 if (!CurLoop->contains(UI))
1398 break;
1399 UI = cast<Instruction>(UI->user_back());
1400 }
1401 }
1402 }
1403
1404 if (CurLoop->contains(UI)) {
1405 if (IsFoldable) {
1406 FoldableInLoop = true;
1407 continue;
1408 }
1409 return false;
1410 }
1411 }
1412 return true;
1413 }
1414
cloneInstructionInExitBlock(Instruction & I,BasicBlock & ExitBlock,PHINode & PN,const LoopInfo * LI,const LoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU)1415 static Instruction *cloneInstructionInExitBlock(
1416 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1417 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1418 Instruction *New;
1419 if (auto *CI = dyn_cast<CallInst>(&I)) {
1420 const auto &BlockColors = SafetyInfo->getBlockColors();
1421
1422 // Sinking call-sites need to be handled differently from other
1423 // instructions. The cloned call-site needs a funclet bundle operand
1424 // appropriate for its location in the CFG.
1425 SmallVector<OperandBundleDef, 1> OpBundles;
1426 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1427 BundleIdx != BundleEnd; ++BundleIdx) {
1428 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1429 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1430 continue;
1431
1432 OpBundles.emplace_back(Bundle);
1433 }
1434
1435 if (!BlockColors.empty()) {
1436 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1437 assert(CV.size() == 1 && "non-unique color for exit block!");
1438 BasicBlock *BBColor = CV.front();
1439 Instruction *EHPad = BBColor->getFirstNonPHI();
1440 if (EHPad->isEHPad())
1441 OpBundles.emplace_back("funclet", EHPad);
1442 }
1443
1444 New = CallInst::Create(CI, OpBundles);
1445 } else {
1446 New = I.clone();
1447 }
1448
1449 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1450 if (!I.getName().empty())
1451 New->setName(I.getName() + ".le");
1452
1453 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1454 // Create a new MemoryAccess and let MemorySSA set its defining access.
1455 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1456 New, nullptr, New->getParent(), MemorySSA::Beginning);
1457 if (NewMemAcc) {
1458 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1459 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1460 else {
1461 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1462 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1463 }
1464 }
1465 }
1466
1467 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1468 // this is particularly cheap because we can rip off the PHI node that we're
1469 // replacing for the number and blocks of the predecessors.
1470 // OPT: If this shows up in a profile, we can instead finish sinking all
1471 // invariant instructions, and then walk their operands to re-establish
1472 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1473 // sinking bottom-up.
1474 for (Use &Op : New->operands())
1475 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1476 auto *OInst = cast<Instruction>(Op.get());
1477 PHINode *OpPN =
1478 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1479 OInst->getName() + ".lcssa");
1480 OpPN->insertBefore(ExitBlock.begin());
1481 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1482 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1483 Op = OpPN;
1484 }
1485 return New;
1486 }
1487
eraseInstruction(Instruction & I,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU)1488 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1489 MemorySSAUpdater &MSSAU) {
1490 MSSAU.removeMemoryAccess(&I);
1491 SafetyInfo.removeInstruction(&I);
1492 I.eraseFromParent();
1493 }
1494
moveInstructionBefore(Instruction & I,BasicBlock::iterator Dest,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,ScalarEvolution * SE)1495 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
1496 ICFLoopSafetyInfo &SafetyInfo,
1497 MemorySSAUpdater &MSSAU,
1498 ScalarEvolution *SE) {
1499 SafetyInfo.removeInstruction(&I);
1500 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1501 I.moveBefore(*Dest->getParent(), Dest);
1502 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1503 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1504 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1505 MemorySSA::BeforeTerminator);
1506 if (SE)
1507 SE->forgetBlockAndLoopDispositions(&I);
1508 }
1509
sinkThroughTriviallyReplaceablePHI(PHINode * TPN,Instruction * I,LoopInfo * LI,SmallDenseMap<BasicBlock *,Instruction *,32> & SunkCopies,const LoopSafetyInfo * SafetyInfo,const Loop * CurLoop,MemorySSAUpdater & MSSAU)1510 static Instruction *sinkThroughTriviallyReplaceablePHI(
1511 PHINode *TPN, Instruction *I, LoopInfo *LI,
1512 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1513 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1514 MemorySSAUpdater &MSSAU) {
1515 assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1516 "Expect only trivially replaceable PHI");
1517 BasicBlock *ExitBlock = TPN->getParent();
1518 Instruction *New;
1519 auto It = SunkCopies.find(ExitBlock);
1520 if (It != SunkCopies.end())
1521 New = It->second;
1522 else
1523 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1524 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1525 return New;
1526 }
1527
canSplitPredecessors(PHINode * PN,LoopSafetyInfo * SafetyInfo)1528 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1529 BasicBlock *BB = PN->getParent();
1530 if (!BB->canSplitPredecessors())
1531 return false;
1532 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1533 // it require updating BlockColors for all offspring blocks accordingly. By
1534 // skipping such corner case, we can make updating BlockColors after splitting
1535 // predecessor fairly simple.
1536 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1537 return false;
1538 for (BasicBlock *BBPred : predecessors(BB)) {
1539 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1540 return false;
1541 }
1542 return true;
1543 }
1544
splitPredecessorsOfLoopExit(PHINode * PN,DominatorTree * DT,LoopInfo * LI,const Loop * CurLoop,LoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU)1545 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1546 LoopInfo *LI, const Loop *CurLoop,
1547 LoopSafetyInfo *SafetyInfo,
1548 MemorySSAUpdater *MSSAU) {
1549 #ifndef NDEBUG
1550 SmallVector<BasicBlock *, 32> ExitBlocks;
1551 CurLoop->getUniqueExitBlocks(ExitBlocks);
1552 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1553 ExitBlocks.end());
1554 #endif
1555 BasicBlock *ExitBB = PN->getParent();
1556 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1557
1558 // Split predecessors of the loop exit to make instructions in the loop are
1559 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1560 // loop in the canonical form where each predecessor of each exit block should
1561 // be contained within the loop. For example, this will convert the loop below
1562 // from
1563 //
1564 // LB1:
1565 // %v1 =
1566 // br %LE, %LB2
1567 // LB2:
1568 // %v2 =
1569 // br %LE, %LB1
1570 // LE:
1571 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1572 //
1573 // to
1574 //
1575 // LB1:
1576 // %v1 =
1577 // br %LE.split, %LB2
1578 // LB2:
1579 // %v2 =
1580 // br %LE.split2, %LB1
1581 // LE.split:
1582 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1583 // br %LE
1584 // LE.split2:
1585 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1586 // br %LE
1587 // LE:
1588 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1589 //
1590 const auto &BlockColors = SafetyInfo->getBlockColors();
1591 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1592 while (!PredBBs.empty()) {
1593 BasicBlock *PredBB = *PredBBs.begin();
1594 assert(CurLoop->contains(PredBB) &&
1595 "Expect all predecessors are in the loop");
1596 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1597 BasicBlock *NewPred = SplitBlockPredecessors(
1598 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1599 // Since we do not allow splitting EH-block with BlockColors in
1600 // canSplitPredecessors(), we can simply assign predecessor's color to
1601 // the new block.
1602 if (!BlockColors.empty())
1603 // Grab a reference to the ColorVector to be inserted before getting the
1604 // reference to the vector we are copying because inserting the new
1605 // element in BlockColors might cause the map to be reallocated.
1606 SafetyInfo->copyColors(NewPred, PredBB);
1607 }
1608 PredBBs.remove(PredBB);
1609 }
1610 }
1611
1612 /// When an instruction is found to only be used outside of the loop, this
1613 /// function moves it to the exit blocks and patches up SSA form as needed.
1614 /// This method is guaranteed to remove the original instruction from its
1615 /// position, and may either delete it or move it to outside of the loop.
1616 ///
sink(Instruction & I,LoopInfo * LI,DominatorTree * DT,const Loop * CurLoop,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU,OptimizationRemarkEmitter * ORE)1617 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1618 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1619 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE) {
1620 bool Changed = false;
1621 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1622
1623 // Iterate over users to be ready for actual sinking. Replace users via
1624 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1625 SmallPtrSet<Instruction *, 8> VisitedUsers;
1626 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1627 auto *User = cast<Instruction>(*UI);
1628 Use &U = UI.getUse();
1629 ++UI;
1630
1631 if (VisitedUsers.count(User) || CurLoop->contains(User))
1632 continue;
1633
1634 if (!DT->isReachableFromEntry(User->getParent())) {
1635 U = PoisonValue::get(I.getType());
1636 Changed = true;
1637 continue;
1638 }
1639
1640 // The user must be a PHI node.
1641 PHINode *PN = cast<PHINode>(User);
1642
1643 // Surprisingly, instructions can be used outside of loops without any
1644 // exits. This can only happen in PHI nodes if the incoming block is
1645 // unreachable.
1646 BasicBlock *BB = PN->getIncomingBlock(U);
1647 if (!DT->isReachableFromEntry(BB)) {
1648 U = PoisonValue::get(I.getType());
1649 Changed = true;
1650 continue;
1651 }
1652
1653 VisitedUsers.insert(PN);
1654 if (isTriviallyReplaceablePHI(*PN, I))
1655 continue;
1656
1657 if (!canSplitPredecessors(PN, SafetyInfo))
1658 return Changed;
1659
1660 // Split predecessors of the PHI so that we can make users trivially
1661 // replaceable.
1662 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1663
1664 // Should rebuild the iterators, as they may be invalidated by
1665 // splitPredecessorsOfLoopExit().
1666 UI = I.user_begin();
1667 UE = I.user_end();
1668 }
1669
1670 if (VisitedUsers.empty())
1671 return Changed;
1672
1673 ORE->emit([&]() {
1674 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1675 << "sinking " << ore::NV("Inst", &I);
1676 });
1677 if (isa<LoadInst>(I))
1678 ++NumMovedLoads;
1679 else if (isa<CallInst>(I))
1680 ++NumMovedCalls;
1681 ++NumSunk;
1682
1683 #ifndef NDEBUG
1684 SmallVector<BasicBlock *, 32> ExitBlocks;
1685 CurLoop->getUniqueExitBlocks(ExitBlocks);
1686 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1687 ExitBlocks.end());
1688 #endif
1689
1690 // Clones of this instruction. Don't create more than one per exit block!
1691 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1692
1693 // If this instruction is only used outside of the loop, then all users are
1694 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1695 // the instruction.
1696 // First check if I is worth sinking for all uses. Sink only when it is worth
1697 // across all uses.
1698 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1699 for (auto *UI : Users) {
1700 auto *User = cast<Instruction>(UI);
1701
1702 if (CurLoop->contains(User))
1703 continue;
1704
1705 PHINode *PN = cast<PHINode>(User);
1706 assert(ExitBlockSet.count(PN->getParent()) &&
1707 "The LCSSA PHI is not in an exit block!");
1708
1709 // The PHI must be trivially replaceable.
1710 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1711 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1712 // As we sink the instruction out of the BB, drop its debug location.
1713 New->dropLocation();
1714 PN->replaceAllUsesWith(New);
1715 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1716 Changed = true;
1717 }
1718 return Changed;
1719 }
1720
1721 /// When an instruction is found to only use loop invariant operands that
1722 /// is safe to hoist, this instruction is called to do the dirty work.
1723 ///
hoist(Instruction & I,const DominatorTree * DT,const Loop * CurLoop,BasicBlock * Dest,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU,ScalarEvolution * SE,OptimizationRemarkEmitter * ORE)1724 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1725 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1726 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
1727 OptimizationRemarkEmitter *ORE) {
1728 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1729 << I << "\n");
1730 ORE->emit([&]() {
1731 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1732 << ore::NV("Inst", &I);
1733 });
1734
1735 // Metadata can be dependent on conditions we are hoisting above.
1736 // Conservatively strip all metadata on the instruction unless we were
1737 // guaranteed to execute I if we entered the loop, in which case the metadata
1738 // is valid in the loop preheader.
1739 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1740 // then moving to the preheader means we should strip attributes on the call
1741 // that can cause UB since we may be hoisting above conditions that allowed
1742 // inferring those attributes. They may not be valid at the preheader.
1743 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1744 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1745 // time in isGuaranteedToExecute if we don't actually have anything to
1746 // drop. It is a compile time optimization, not required for correctness.
1747 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1748 I.dropUBImplyingAttrsAndMetadata();
1749
1750 if (isa<PHINode>(I))
1751 // Move the new node to the end of the phi list in the destination block.
1752 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1753 else
1754 // Move the new node to the destination block, before its terminator.
1755 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1756 MSSAU, SE);
1757
1758 I.updateLocationAfterHoist();
1759
1760 if (isa<LoadInst>(I))
1761 ++NumMovedLoads;
1762 else if (isa<CallInst>(I))
1763 ++NumMovedCalls;
1764 ++NumHoisted;
1765 }
1766
1767 /// Only sink or hoist an instruction if it is not a trapping instruction,
1768 /// or if the instruction is known not to trap when moved to the preheader.
1769 /// or if it is a trapping instruction and is guaranteed to execute.
isSafeToExecuteUnconditionally(Instruction & Inst,const DominatorTree * DT,const TargetLibraryInfo * TLI,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE,const Instruction * CtxI,AssumptionCache * AC,bool AllowSpeculation)1770 static bool isSafeToExecuteUnconditionally(
1771 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1772 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1773 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1774 AssumptionCache *AC, bool AllowSpeculation) {
1775 if (AllowSpeculation &&
1776 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1777 return true;
1778
1779 bool GuaranteedToExecute =
1780 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1781
1782 if (!GuaranteedToExecute) {
1783 auto *LI = dyn_cast<LoadInst>(&Inst);
1784 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1785 ORE->emit([&]() {
1786 return OptimizationRemarkMissed(
1787 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1788 << "failed to hoist load with loop-invariant address "
1789 "because load is conditionally executed";
1790 });
1791 }
1792
1793 return GuaranteedToExecute;
1794 }
1795
1796 namespace {
1797 class LoopPromoter : public LoadAndStorePromoter {
1798 Value *SomePtr; // Designated pointer to store to.
1799 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1800 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1801 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1802 PredIteratorCache &PredCache;
1803 MemorySSAUpdater &MSSAU;
1804 LoopInfo &LI;
1805 DebugLoc DL;
1806 Align Alignment;
1807 bool UnorderedAtomic;
1808 AAMDNodes AATags;
1809 ICFLoopSafetyInfo &SafetyInfo;
1810 bool CanInsertStoresInExitBlocks;
1811 ArrayRef<const Instruction *> Uses;
1812
1813 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1814 // (if legal) if doing so would add an out-of-loop use to an instruction
1815 // defined in-loop.
maybeInsertLCSSAPHI(Value * V,BasicBlock * BB) const1816 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1817 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1818 return V;
1819
1820 Instruction *I = cast<Instruction>(V);
1821 // We need to create an LCSSA PHI node for the incoming value and
1822 // store that.
1823 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1824 I->getName() + ".lcssa");
1825 PN->insertBefore(BB->begin());
1826 for (BasicBlock *Pred : PredCache.get(BB))
1827 PN->addIncoming(I, Pred);
1828 return PN;
1829 }
1830
1831 public:
LoopPromoter(Value * SP,ArrayRef<const Instruction * > Insts,SSAUpdater & S,SmallVectorImpl<BasicBlock * > & LEB,SmallVectorImpl<BasicBlock::iterator> & LIP,SmallVectorImpl<MemoryAccess * > & MSSAIP,PredIteratorCache & PIC,MemorySSAUpdater & MSSAU,LoopInfo & li,DebugLoc dl,Align Alignment,bool UnorderedAtomic,const AAMDNodes & AATags,ICFLoopSafetyInfo & SafetyInfo,bool CanInsertStoresInExitBlocks)1832 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1833 SmallVectorImpl<BasicBlock *> &LEB,
1834 SmallVectorImpl<BasicBlock::iterator> &LIP,
1835 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1836 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1837 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1838 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1839 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1840 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1841 LI(li), DL(std::move(dl)), Alignment(Alignment),
1842 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1843 SafetyInfo(SafetyInfo),
1844 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1845
insertStoresInLoopExitBlocks()1846 void insertStoresInLoopExitBlocks() {
1847 // Insert stores after in the loop exit blocks. Each exit block gets a
1848 // store of the live-out values that feed them. Since we've already told
1849 // the SSA updater about the defs in the loop and the preheader
1850 // definition, it is all set and we can start using it.
1851 DIAssignID *NewID = nullptr;
1852 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1853 BasicBlock *ExitBlock = LoopExitBlocks[i];
1854 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1855 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1856 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1857 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1858 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1859 if (UnorderedAtomic)
1860 NewSI->setOrdering(AtomicOrdering::Unordered);
1861 NewSI->setAlignment(Alignment);
1862 NewSI->setDebugLoc(DL);
1863 // Attach DIAssignID metadata to the new store, generating it on the
1864 // first loop iteration.
1865 if (i == 0) {
1866 // NewSI will have its DIAssignID set here if there are any stores in
1867 // Uses with a DIAssignID attachment. This merged ID will then be
1868 // attached to the other inserted stores (in the branch below).
1869 NewSI->mergeDIAssignID(Uses);
1870 NewID = cast_or_null<DIAssignID>(
1871 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1872 } else {
1873 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1874 // above.
1875 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1876 }
1877
1878 if (AATags)
1879 NewSI->setAAMetadata(AATags);
1880
1881 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1882 MemoryAccess *NewMemAcc;
1883 if (!MSSAInsertPoint) {
1884 NewMemAcc = MSSAU.createMemoryAccessInBB(
1885 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1886 } else {
1887 NewMemAcc =
1888 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1889 }
1890 MSSAInsertPts[i] = NewMemAcc;
1891 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1892 // FIXME: true for safety, false may still be correct.
1893 }
1894 }
1895
doExtraRewritesBeforeFinalDeletion()1896 void doExtraRewritesBeforeFinalDeletion() override {
1897 if (CanInsertStoresInExitBlocks)
1898 insertStoresInLoopExitBlocks();
1899 }
1900
instructionDeleted(Instruction * I) const1901 void instructionDeleted(Instruction *I) const override {
1902 SafetyInfo.removeInstruction(I);
1903 MSSAU.removeMemoryAccess(I);
1904 }
1905
shouldDelete(Instruction * I) const1906 bool shouldDelete(Instruction *I) const override {
1907 if (isa<StoreInst>(I))
1908 return CanInsertStoresInExitBlocks;
1909 return true;
1910 }
1911 };
1912
isNotCapturedBeforeOrInLoop(const Value * V,const Loop * L,DominatorTree * DT)1913 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1914 DominatorTree *DT) {
1915 // We can perform the captured-before check against any instruction in the
1916 // loop header, as the loop header is reachable from any instruction inside
1917 // the loop.
1918 // TODO: ReturnCaptures=true shouldn't be necessary here.
1919 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1920 /* StoreCaptures */ true,
1921 L->getHeader()->getTerminator(), DT);
1922 }
1923
1924 /// Return true if we can prove that a caller cannot inspect the object if an
1925 /// unwind occurs inside the loop.
isNotVisibleOnUnwindInLoop(const Value * Object,const Loop * L,DominatorTree * DT)1926 bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1927 DominatorTree *DT) {
1928 bool RequiresNoCaptureBeforeUnwind;
1929 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1930 return false;
1931
1932 return !RequiresNoCaptureBeforeUnwind ||
1933 isNotCapturedBeforeOrInLoop(Object, L, DT);
1934 }
1935
isThreadLocalObject(const Value * Object,const Loop * L,DominatorTree * DT,TargetTransformInfo * TTI)1936 bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1937 TargetTransformInfo *TTI) {
1938 // The object must be function-local to start with, and then not captured
1939 // before/in the loop.
1940 return (isIdentifiedFunctionLocal(Object) &&
1941 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1942 (TTI->isSingleThreaded() || SingleThread);
1943 }
1944
1945 } // namespace
1946
1947 /// Try to promote memory values to scalars by sinking stores out of the
1948 /// loop and moving loads to before the loop. We do this by looping over
1949 /// the stores in the loop, looking for stores to Must pointers which are
1950 /// loop invariant.
1951 ///
promoteLoopAccessesToScalars(const SmallSetVector<Value *,8> & PointerMustAliases,SmallVectorImpl<BasicBlock * > & ExitBlocks,SmallVectorImpl<BasicBlock::iterator> & InsertPts,SmallVectorImpl<MemoryAccess * > & MSSAInsertPts,PredIteratorCache & PIC,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,const TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE,bool AllowSpeculation,bool HasReadsOutsideSet)1952 bool llvm::promoteLoopAccessesToScalars(
1953 const SmallSetVector<Value *, 8> &PointerMustAliases,
1954 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1955 SmallVectorImpl<BasicBlock::iterator> &InsertPts,
1956 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1957 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1958 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1959 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1960 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1961 bool HasReadsOutsideSet) {
1962 // Verify inputs.
1963 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1964 SafetyInfo != nullptr &&
1965 "Unexpected Input to promoteLoopAccessesToScalars");
1966
1967 LLVM_DEBUG({
1968 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1969 for (Value *Ptr : PointerMustAliases)
1970 dbgs() << " " << *Ptr << "\n";
1971 });
1972 ++NumPromotionCandidates;
1973
1974 Value *SomePtr = *PointerMustAliases.begin();
1975 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1976
1977 // It is not safe to promote a load/store from the loop if the load/store is
1978 // conditional. For example, turning:
1979 //
1980 // for () { if (c) *P += 1; }
1981 //
1982 // into:
1983 //
1984 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1985 //
1986 // is not safe, because *P may only be valid to access if 'c' is true.
1987 //
1988 // The safety property divides into two parts:
1989 // p1) The memory may not be dereferenceable on entry to the loop. In this
1990 // case, we can't insert the required load in the preheader.
1991 // p2) The memory model does not allow us to insert a store along any dynamic
1992 // path which did not originally have one.
1993 //
1994 // If at least one store is guaranteed to execute, both properties are
1995 // satisfied, and promotion is legal.
1996 //
1997 // This, however, is not a necessary condition. Even if no store/load is
1998 // guaranteed to execute, we can still establish these properties.
1999 // We can establish (p1) by proving that hoisting the load into the preheader
2000 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2001 // can use any access within the alias set to prove dereferenceability,
2002 // since they're all must alias.
2003 //
2004 // There are two ways establish (p2):
2005 // a) Prove the location is thread-local. In this case the memory model
2006 // requirement does not apply, and stores are safe to insert.
2007 // b) Prove a store dominates every exit block. In this case, if an exit
2008 // blocks is reached, the original dynamic path would have taken us through
2009 // the store, so inserting a store into the exit block is safe. Note that this
2010 // is different from the store being guaranteed to execute. For instance,
2011 // if an exception is thrown on the first iteration of the loop, the original
2012 // store is never executed, but the exit blocks are not executed either.
2013
2014 bool DereferenceableInPH = false;
2015 bool StoreIsGuanteedToExecute = false;
2016 bool FoundLoadToPromote = false;
2017 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2018 enum {
2019 StoreSafe,
2020 StoreUnsafe,
2021 StoreSafetyUnknown,
2022 } StoreSafety = StoreSafetyUnknown;
2023
2024 SmallVector<Instruction *, 64> LoopUses;
2025
2026 // We start with an alignment of one and try to find instructions that allow
2027 // us to prove better alignment.
2028 Align Alignment;
2029 // Keep track of which types of access we see
2030 bool SawUnorderedAtomic = false;
2031 bool SawNotAtomic = false;
2032 AAMDNodes AATags;
2033
2034 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2035
2036 // If there are reads outside the promoted set, then promoting stores is
2037 // definitely not safe.
2038 if (HasReadsOutsideSet)
2039 StoreSafety = StoreUnsafe;
2040
2041 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2042 // If a loop can throw, we have to insert a store along each unwind edge.
2043 // That said, we can't actually make the unwind edge explicit. Therefore,
2044 // we have to prove that the store is dead along the unwind edge. We do
2045 // this by proving that the caller can't have a reference to the object
2046 // after return and thus can't possibly load from the object.
2047 Value *Object = getUnderlyingObject(SomePtr);
2048 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2049 StoreSafety = StoreUnsafe;
2050 }
2051
2052 // Check that all accesses to pointers in the alias set use the same type.
2053 // We cannot (yet) promote a memory location that is loaded and stored in
2054 // different sizes. While we are at it, collect alignment and AA info.
2055 Type *AccessTy = nullptr;
2056 for (Value *ASIV : PointerMustAliases) {
2057 for (Use &U : ASIV->uses()) {
2058 // Ignore instructions that are outside the loop.
2059 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2060 if (!UI || !CurLoop->contains(UI))
2061 continue;
2062
2063 // If there is an non-load/store instruction in the loop, we can't promote
2064 // it.
2065 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2066 if (!Load->isUnordered())
2067 return false;
2068
2069 SawUnorderedAtomic |= Load->isAtomic();
2070 SawNotAtomic |= !Load->isAtomic();
2071 FoundLoadToPromote = true;
2072
2073 Align InstAlignment = Load->getAlign();
2074
2075 // Note that proving a load safe to speculate requires proving
2076 // sufficient alignment at the target location. Proving it guaranteed
2077 // to execute does as well. Thus we can increase our guaranteed
2078 // alignment as well.
2079 if (!DereferenceableInPH || (InstAlignment > Alignment))
2080 if (isSafeToExecuteUnconditionally(
2081 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2082 Preheader->getTerminator(), AC, AllowSpeculation)) {
2083 DereferenceableInPH = true;
2084 Alignment = std::max(Alignment, InstAlignment);
2085 }
2086 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2087 // Stores *of* the pointer are not interesting, only stores *to* the
2088 // pointer.
2089 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2090 continue;
2091 if (!Store->isUnordered())
2092 return false;
2093
2094 SawUnorderedAtomic |= Store->isAtomic();
2095 SawNotAtomic |= !Store->isAtomic();
2096
2097 // If the store is guaranteed to execute, both properties are satisfied.
2098 // We may want to check if a store is guaranteed to execute even if we
2099 // already know that promotion is safe, since it may have higher
2100 // alignment than any other guaranteed stores, in which case we can
2101 // raise the alignment on the promoted store.
2102 Align InstAlignment = Store->getAlign();
2103 bool GuaranteedToExecute =
2104 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2105 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2106 if (GuaranteedToExecute) {
2107 DereferenceableInPH = true;
2108 if (StoreSafety == StoreSafetyUnknown)
2109 StoreSafety = StoreSafe;
2110 Alignment = std::max(Alignment, InstAlignment);
2111 }
2112
2113 // If a store dominates all exit blocks, it is safe to sink.
2114 // As explained above, if an exit block was executed, a dominating
2115 // store must have been executed at least once, so we are not
2116 // introducing stores on paths that did not have them.
2117 // Note that this only looks at explicit exit blocks. If we ever
2118 // start sinking stores into unwind edges (see above), this will break.
2119 if (StoreSafety == StoreSafetyUnknown &&
2120 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2121 return DT->dominates(Store->getParent(), Exit);
2122 }))
2123 StoreSafety = StoreSafe;
2124
2125 // If the store is not guaranteed to execute, we may still get
2126 // deref info through it.
2127 if (!DereferenceableInPH) {
2128 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2129 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2130 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2131 }
2132 } else
2133 continue; // Not a load or store.
2134
2135 if (!AccessTy)
2136 AccessTy = getLoadStoreType(UI);
2137 else if (AccessTy != getLoadStoreType(UI))
2138 return false;
2139
2140 // Merge the AA tags.
2141 if (LoopUses.empty()) {
2142 // On the first load/store, just take its AA tags.
2143 AATags = UI->getAAMetadata();
2144 } else if (AATags) {
2145 AATags = AATags.merge(UI->getAAMetadata());
2146 }
2147
2148 LoopUses.push_back(UI);
2149 }
2150 }
2151
2152 // If we found both an unordered atomic instruction and a non-atomic memory
2153 // access, bail. We can't blindly promote non-atomic to atomic since we
2154 // might not be able to lower the result. We can't downgrade since that
2155 // would violate memory model. Also, align 0 is an error for atomics.
2156 if (SawUnorderedAtomic && SawNotAtomic)
2157 return false;
2158
2159 // If we're inserting an atomic load in the preheader, we must be able to
2160 // lower it. We're only guaranteed to be able to lower naturally aligned
2161 // atomics.
2162 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2163 return false;
2164
2165 // If we couldn't prove we can hoist the load, bail.
2166 if (!DereferenceableInPH) {
2167 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2168 return false;
2169 }
2170
2171 // We know we can hoist the load, but don't have a guaranteed store.
2172 // Check whether the location is writable and thread-local. If it is, then we
2173 // can insert stores along paths which originally didn't have them without
2174 // violating the memory model.
2175 if (StoreSafety == StoreSafetyUnknown) {
2176 Value *Object = getUnderlyingObject(SomePtr);
2177 bool ExplicitlyDereferenceableOnly;
2178 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2179 (!ExplicitlyDereferenceableOnly ||
2180 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2181 isThreadLocalObject(Object, CurLoop, DT, TTI))
2182 StoreSafety = StoreSafe;
2183 }
2184
2185 // If we've still failed to prove we can sink the store, hoist the load
2186 // only, if possible.
2187 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2188 // If we cannot hoist the load either, give up.
2189 return false;
2190
2191 // Lets do the promotion!
2192 if (StoreSafety == StoreSafe) {
2193 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2194 << '\n');
2195 ++NumLoadStorePromoted;
2196 } else {
2197 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2198 << '\n');
2199 ++NumLoadPromoted;
2200 }
2201
2202 ORE->emit([&]() {
2203 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2204 LoopUses[0])
2205 << "Moving accesses to memory location out of the loop";
2206 });
2207
2208 // Look at all the loop uses, and try to merge their locations.
2209 std::vector<DILocation *> LoopUsesLocs;
2210 for (auto *U : LoopUses)
2211 LoopUsesLocs.push_back(U->getDebugLoc().get());
2212 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2213
2214 // We use the SSAUpdater interface to insert phi nodes as required.
2215 SmallVector<PHINode *, 16> NewPHIs;
2216 SSAUpdater SSA(&NewPHIs);
2217 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2218 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2219 SawUnorderedAtomic, AATags, *SafetyInfo,
2220 StoreSafety == StoreSafe);
2221
2222 // Set up the preheader to have a definition of the value. It is the live-out
2223 // value from the preheader that uses in the loop will use.
2224 LoadInst *PreheaderLoad = nullptr;
2225 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2226 PreheaderLoad =
2227 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2228 Preheader->getTerminator());
2229 if (SawUnorderedAtomic)
2230 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2231 PreheaderLoad->setAlignment(Alignment);
2232 PreheaderLoad->setDebugLoc(DebugLoc());
2233 if (AATags)
2234 PreheaderLoad->setAAMetadata(AATags);
2235
2236 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2237 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2238 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2239 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2240 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2241 } else {
2242 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2243 }
2244
2245 if (VerifyMemorySSA)
2246 MSSAU.getMemorySSA()->verifyMemorySSA();
2247 // Rewrite all the loads in the loop and remember all the definitions from
2248 // stores in the loop.
2249 Promoter.run(LoopUses);
2250
2251 if (VerifyMemorySSA)
2252 MSSAU.getMemorySSA()->verifyMemorySSA();
2253 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2254 if (PreheaderLoad && PreheaderLoad->use_empty())
2255 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2256
2257 return true;
2258 }
2259
foreachMemoryAccess(MemorySSA * MSSA,Loop * L,function_ref<void (Instruction *)> Fn)2260 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2261 function_ref<void(Instruction *)> Fn) {
2262 for (const BasicBlock *BB : L->blocks())
2263 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2264 for (const auto &Access : *Accesses)
2265 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2266 Fn(MUD->getMemoryInst());
2267 }
2268
2269 // The bool indicates whether there might be reads outside the set, in which
2270 // case only loads may be promoted.
2271 static SmallVector<PointersAndHasReadsOutsideSet, 0>
collectPromotionCandidates(MemorySSA * MSSA,AliasAnalysis * AA,Loop * L)2272 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2273 BatchAAResults BatchAA(*AA);
2274 AliasSetTracker AST(BatchAA);
2275
2276 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2277 if (const auto *SI = dyn_cast<StoreInst>(I))
2278 return L->isLoopInvariant(SI->getPointerOperand());
2279 if (const auto *LI = dyn_cast<LoadInst>(I))
2280 return L->isLoopInvariant(LI->getPointerOperand());
2281 return false;
2282 };
2283
2284 // Populate AST with potentially promotable accesses.
2285 SmallPtrSet<Value *, 16> AttemptingPromotion;
2286 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2287 if (IsPotentiallyPromotable(I)) {
2288 AttemptingPromotion.insert(I);
2289 AST.add(I);
2290 }
2291 });
2292
2293 // We're only interested in must-alias sets that contain a mod.
2294 SmallVector<PointerIntPair<const AliasSet *, 1, bool>, 8> Sets;
2295 for (AliasSet &AS : AST)
2296 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2297 Sets.push_back({&AS, false});
2298
2299 if (Sets.empty())
2300 return {}; // Nothing to promote...
2301
2302 // Discard any sets for which there is an aliasing non-promotable access.
2303 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2304 if (AttemptingPromotion.contains(I))
2305 return;
2306
2307 llvm::erase_if(Sets, [&](PointerIntPair<const AliasSet *, 1, bool> &Pair) {
2308 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2309 // Cannot promote if there are writes outside the set.
2310 if (isModSet(MR))
2311 return true;
2312 if (isRefSet(MR)) {
2313 // Remember reads outside the set.
2314 Pair.setInt(true);
2315 // If this is a mod-only set and there are reads outside the set,
2316 // we will not be able to promote, so bail out early.
2317 return !Pair.getPointer()->isRef();
2318 }
2319 return false;
2320 });
2321 });
2322
2323 SmallVector<std::pair<SmallSetVector<Value *, 8>, bool>, 0> Result;
2324 for (auto [Set, HasReadsOutsideSet] : Sets) {
2325 SmallSetVector<Value *, 8> PointerMustAliases;
2326 for (const auto &MemLoc : *Set)
2327 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2328 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2329 }
2330
2331 return Result;
2332 }
2333
pointerInvalidatedByLoop(MemorySSA * MSSA,MemoryUse * MU,Loop * CurLoop,Instruction & I,SinkAndHoistLICMFlags & Flags,bool InvariantGroup)2334 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
2335 Loop *CurLoop, Instruction &I,
2336 SinkAndHoistLICMFlags &Flags,
2337 bool InvariantGroup) {
2338 // For hoisting, use the walker to determine safety
2339 if (!Flags.getIsSink()) {
2340 // If hoisting an invariant group, we only need to check that there
2341 // is no store to the loaded pointer between the start of the loop,
2342 // and the load (since all values must be the same).
2343
2344 // This can be checked in two conditions:
2345 // 1) if the memoryaccess is outside the loop
2346 // 2) the earliest access is at the loop header,
2347 // if the memory loaded is the phi node
2348
2349 BatchAAResults BAA(MSSA->getAA());
2350 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2351 return !MSSA->isLiveOnEntryDef(Source) &&
2352 CurLoop->contains(Source->getBlock()) &&
2353 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2354 }
2355
2356 // For sinking, we'd need to check all Defs below this use. The getClobbering
2357 // call will look on the backedge of the loop, but will check aliasing with
2358 // the instructions on the previous iteration.
2359 // For example:
2360 // for (i ... )
2361 // load a[i] ( Use (LoE)
2362 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2363 // i++;
2364 // The load sees no clobbering inside the loop, as the backedge alias check
2365 // does phi translation, and will check aliasing against store a[i-1].
2366 // However sinking the load outside the loop, below the store is incorrect.
2367
2368 // For now, only sink if there are no Defs in the loop, and the existing ones
2369 // precede the use and are in the same block.
2370 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2371 // needs PostDominatorTreeAnalysis.
2372 // FIXME: More precise: no Defs that alias this Use.
2373 if (Flags.tooManyMemoryAccesses())
2374 return true;
2375 for (auto *BB : CurLoop->getBlocks())
2376 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2377 return true;
2378 // When sinking, the source block may not be part of the loop so check it.
2379 if (!CurLoop->contains(&I))
2380 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2381
2382 return false;
2383 }
2384
pointerInvalidatedByBlock(BasicBlock & BB,MemorySSA & MSSA,MemoryUse & MU)2385 bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU) {
2386 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2387 for (const auto &MA : *Accesses)
2388 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2389 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2390 return true;
2391 return false;
2392 }
2393
2394 /// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2395 /// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2396 /// minimun can be computed outside of loop, and X is not a loop-invariant.
hoistMinMax(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU)2397 static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2398 MemorySSAUpdater &MSSAU) {
2399 bool Inverse = false;
2400 using namespace PatternMatch;
2401 Value *Cond1, *Cond2;
2402 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2403 Inverse = true;
2404 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2405 // Do nothing
2406 } else
2407 return false;
2408
2409 auto MatchICmpAgainstInvariant = [&](Value *C, ICmpInst::Predicate &P,
2410 Value *&LHS, Value *&RHS) {
2411 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2412 return false;
2413 if (!LHS->getType()->isIntegerTy())
2414 return false;
2415 if (!ICmpInst::isRelational(P))
2416 return false;
2417 if (L.isLoopInvariant(LHS)) {
2418 std::swap(LHS, RHS);
2419 P = ICmpInst::getSwappedPredicate(P);
2420 }
2421 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2422 return false;
2423 if (Inverse)
2424 P = ICmpInst::getInversePredicate(P);
2425 return true;
2426 };
2427 ICmpInst::Predicate P1, P2;
2428 Value *LHS1, *LHS2, *RHS1, *RHS2;
2429 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2430 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2431 return false;
2432 if (P1 != P2 || LHS1 != LHS2)
2433 return false;
2434
2435 // Everything is fine, we can do the transform.
2436 bool UseMin = ICmpInst::isLT(P1) || ICmpInst::isLE(P1);
2437 assert(
2438 (UseMin || ICmpInst::isGT(P1) || ICmpInst::isGE(P1)) &&
2439 "Relational predicate is either less (or equal) or greater (or equal)!");
2440 Intrinsic::ID id = ICmpInst::isSigned(P1)
2441 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2442 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2443 auto *Preheader = L.getLoopPreheader();
2444 assert(Preheader && "Loop is not in simplify form?");
2445 IRBuilder<> Builder(Preheader->getTerminator());
2446 // We are about to create a new guaranteed use for RHS2 which might not exist
2447 // before (if it was a non-taken input of logical and/or instruction). If it
2448 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2449 // introduced, so they don't need this.
2450 if (isa<SelectInst>(I))
2451 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2452 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2453 id, RHS1, RHS2, nullptr, StringRef("invariant.") +
2454 (ICmpInst::isSigned(P1) ? "s" : "u") +
2455 (UseMin ? "min" : "max"));
2456 Builder.SetInsertPoint(&I);
2457 ICmpInst::Predicate P = P1;
2458 if (Inverse)
2459 P = ICmpInst::getInversePredicate(P);
2460 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2461 NewCond->takeName(&I);
2462 I.replaceAllUsesWith(NewCond);
2463 eraseInstruction(I, SafetyInfo, MSSAU);
2464 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU);
2465 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU);
2466 return true;
2467 }
2468
2469 /// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2470 /// this allows hoisting the inner GEP.
hoistGEP(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2471 static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2472 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2473 DominatorTree *DT) {
2474 auto *GEP = dyn_cast<GetElementPtrInst>(&I);
2475 if (!GEP)
2476 return false;
2477
2478 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2479 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2480 return false;
2481
2482 Value *SrcPtr = Src->getPointerOperand();
2483 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2484 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2485 return false;
2486
2487 // This can only happen if !AllowSpeculation, otherwise this would already be
2488 // handled.
2489 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2490 // The flag exists to prevent metadata dropping, which is not relevant here.
2491 if (all_of(Src->indices(), LoopInvariant))
2492 return false;
2493
2494 // The swapped GEPs are inbounds if both original GEPs are inbounds
2495 // and the sign of the offsets is the same. For simplicity, only
2496 // handle both offsets being non-negative.
2497 const DataLayout &DL = GEP->getModule()->getDataLayout();
2498 auto NonNegative = [&](Value *V) {
2499 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2500 };
2501 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2502 all_of(Src->indices(), NonNegative) &&
2503 all_of(GEP->indices(), NonNegative);
2504
2505 BasicBlock *Preheader = L.getLoopPreheader();
2506 IRBuilder<> Builder(Preheader->getTerminator());
2507 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2508 SmallVector<Value *>(GEP->indices()),
2509 "invariant.gep", IsInBounds);
2510 Builder.SetInsertPoint(GEP);
2511 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2512 SmallVector<Value *>(Src->indices()), "gep",
2513 IsInBounds);
2514 GEP->replaceAllUsesWith(NewGEP);
2515 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2516 eraseInstruction(*Src, SafetyInfo, MSSAU);
2517 return true;
2518 }
2519
2520 /// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2521 /// C1 and C2 are loop invariants and LV is a loop-variant.
hoistAdd(ICmpInst::Predicate Pred,Value * VariantLHS,Value * InvariantRHS,ICmpInst & ICmp,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2522 static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2523 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2524 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2525 AssumptionCache *AC, DominatorTree *DT) {
2526 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2527 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2528 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2529
2530 // Try to represent VariantLHS as sum of invariant and variant operands.
2531 using namespace PatternMatch;
2532 Value *VariantOp, *InvariantOp;
2533 if (!match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2534 return false;
2535
2536 // LHS itself is a loop-variant, try to represent it in the form:
2537 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2538 if (L.isLoopInvariant(VariantOp))
2539 std::swap(VariantOp, InvariantOp);
2540 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2541 return false;
2542
2543 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2544 // freely move values from left side of inequality to right side (just as in
2545 // normal linear arithmetics). Overflows make things much more complicated, so
2546 // we want to avoid this.
2547 auto &DL = L.getHeader()->getModule()->getDataLayout();
2548 bool ProvedNoOverflowAfterReassociate =
2549 computeOverflowForSignedSub(InvariantRHS, InvariantOp,
2550 SimplifyQuery(DL, DT, AC, &ICmp)) ==
2551 llvm::OverflowResult::NeverOverflows;
2552 if (!ProvedNoOverflowAfterReassociate)
2553 return false;
2554 auto *Preheader = L.getLoopPreheader();
2555 assert(Preheader && "Loop is not in simplify form?");
2556 IRBuilder<> Builder(Preheader->getTerminator());
2557 Value *NewCmpOp = Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2558 /*HasNUW*/ false, /*HasNSW*/ true);
2559 ICmp.setPredicate(Pred);
2560 ICmp.setOperand(0, VariantOp);
2561 ICmp.setOperand(1, NewCmpOp);
2562 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2563 return true;
2564 }
2565
2566 /// Try to reassociate and hoist the following two patterns:
2567 /// LV - C1 < C2 --> LV < C1 + C2,
2568 /// C1 - LV < C2 --> LV > C1 - C2.
hoistSub(ICmpInst::Predicate Pred,Value * VariantLHS,Value * InvariantRHS,ICmpInst & ICmp,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2569 static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2570 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2571 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2572 AssumptionCache *AC, DominatorTree *DT) {
2573 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2574 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2575 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2576
2577 // Try to represent VariantLHS as sum of invariant and variant operands.
2578 using namespace PatternMatch;
2579 Value *VariantOp, *InvariantOp;
2580 if (!match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2581 return false;
2582
2583 bool VariantSubtracted = false;
2584 // LHS itself is a loop-variant, try to represent it in the form:
2585 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2586 // the variant operand goes with minus, we use a slightly different scheme.
2587 if (L.isLoopInvariant(VariantOp)) {
2588 std::swap(VariantOp, InvariantOp);
2589 VariantSubtracted = true;
2590 Pred = ICmpInst::getSwappedPredicate(Pred);
2591 }
2592 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2593 return false;
2594
2595 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2596 // freely move values from left side of inequality to right side (just as in
2597 // normal linear arithmetics). Overflows make things much more complicated, so
2598 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2599 // "C1 - C2" does not overflow.
2600 auto &DL = L.getHeader()->getModule()->getDataLayout();
2601 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2602 if (VariantSubtracted) {
2603 // C1 - LV < C2 --> LV > C1 - C2
2604 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2605 llvm::OverflowResult::NeverOverflows)
2606 return false;
2607 } else {
2608 // LV - C1 < C2 --> LV < C1 + C2
2609 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2610 llvm::OverflowResult::NeverOverflows)
2611 return false;
2612 }
2613 auto *Preheader = L.getLoopPreheader();
2614 assert(Preheader && "Loop is not in simplify form?");
2615 IRBuilder<> Builder(Preheader->getTerminator());
2616 Value *NewCmpOp =
2617 VariantSubtracted
2618 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2619 /*HasNUW*/ false, /*HasNSW*/ true)
2620 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2621 /*HasNUW*/ false, /*HasNSW*/ true);
2622 ICmp.setPredicate(Pred);
2623 ICmp.setOperand(0, VariantOp);
2624 ICmp.setOperand(1, NewCmpOp);
2625 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2626 return true;
2627 }
2628
2629 /// Reassociate and hoist add/sub expressions.
hoistAddSub(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2630 static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2631 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2632 DominatorTree *DT) {
2633 using namespace PatternMatch;
2634 ICmpInst::Predicate Pred;
2635 Value *LHS, *RHS;
2636 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2637 return false;
2638
2639 // TODO: Support unsigned predicates?
2640 if (!ICmpInst::isSigned(Pred))
2641 return false;
2642
2643 // Put variant operand to LHS position.
2644 if (L.isLoopInvariant(LHS)) {
2645 std::swap(LHS, RHS);
2646 Pred = ICmpInst::getSwappedPredicate(Pred);
2647 }
2648 // We want to delete the initial operation after reassociation, so only do it
2649 // if it has no other uses.
2650 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2651 return false;
2652
2653 // TODO: We could go with smarter context, taking common dominator of all I's
2654 // users instead of I itself.
2655 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2656 return true;
2657
2658 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2659 return true;
2660
2661 return false;
2662 }
2663
2664 /// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2665 /// A1, A2, ... and C are loop invariants into expressions like
2666 /// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2667 /// invariant expressions. This functions returns true only if any hoisting has
2668 /// actually occured.
hoistFPAssociation(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2669 static bool hoistFPAssociation(Instruction &I, Loop &L,
2670 ICFLoopSafetyInfo &SafetyInfo,
2671 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2672 DominatorTree *DT) {
2673 using namespace PatternMatch;
2674 Value *VariantOp = nullptr, *InvariantOp = nullptr;
2675
2676 if (!match(&I, m_FMul(m_Value(VariantOp), m_Value(InvariantOp))) ||
2677 !I.hasAllowReassoc() || !I.hasNoSignedZeros())
2678 return false;
2679 if (L.isLoopInvariant(VariantOp))
2680 std::swap(VariantOp, InvariantOp);
2681 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2682 return false;
2683 Value *Factor = InvariantOp;
2684
2685 // First, we need to make sure we should do the transformation.
2686 SmallVector<Use *> Changes;
2687 SmallVector<BinaryOperator *> Worklist;
2688 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2689 Worklist.push_back(VariantBinOp);
2690 while (!Worklist.empty()) {
2691 BinaryOperator *BO = Worklist.pop_back_val();
2692 if (!BO->hasOneUse() || !BO->hasAllowReassoc() || !BO->hasNoSignedZeros())
2693 return false;
2694 BinaryOperator *Op0, *Op1;
2695 if (match(BO, m_FAdd(m_BinOp(Op0), m_BinOp(Op1)))) {
2696 Worklist.push_back(Op0);
2697 Worklist.push_back(Op1);
2698 continue;
2699 }
2700 if (BO->getOpcode() != Instruction::FMul || L.isLoopInvariant(BO))
2701 return false;
2702 Use &U0 = BO->getOperandUse(0);
2703 Use &U1 = BO->getOperandUse(1);
2704 if (L.isLoopInvariant(U0))
2705 Changes.push_back(&U0);
2706 else if (L.isLoopInvariant(U1))
2707 Changes.push_back(&U1);
2708 else
2709 return false;
2710 if (Changes.size() > FPAssociationUpperLimit)
2711 return false;
2712 }
2713 if (Changes.empty())
2714 return false;
2715
2716 // We know we should do it so let's do the transformation.
2717 auto *Preheader = L.getLoopPreheader();
2718 assert(Preheader && "Loop is not in simplify form?");
2719 IRBuilder<> Builder(Preheader->getTerminator());
2720 for (auto *U : Changes) {
2721 assert(L.isLoopInvariant(U->get()));
2722 Instruction *Ins = cast<Instruction>(U->getUser());
2723 U->set(Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul"));
2724 }
2725 I.replaceAllUsesWith(VariantOp);
2726 eraseInstruction(I, SafetyInfo, MSSAU);
2727 return true;
2728 }
2729
hoistArithmetics(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2730 static bool hoistArithmetics(Instruction &I, Loop &L,
2731 ICFLoopSafetyInfo &SafetyInfo,
2732 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2733 DominatorTree *DT) {
2734 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2735 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2736 // expression out of the loop.
2737 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2738 ++NumHoisted;
2739 ++NumMinMaxHoisted;
2740 return true;
2741 }
2742
2743 // Try to hoist GEPs by reassociation.
2744 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2745 ++NumHoisted;
2746 ++NumGEPsHoisted;
2747 return true;
2748 }
2749
2750 // Try to hoist add/sub's by reassociation.
2751 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2752 ++NumHoisted;
2753 ++NumAddSubHoisted;
2754 return true;
2755 }
2756
2757 if (hoistFPAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2758 ++NumHoisted;
2759 ++NumFPAssociationsHoisted;
2760 return true;
2761 }
2762
2763 return false;
2764 }
2765
2766 /// Little predicate that returns true if the specified basic block is in
2767 /// a subloop of the current one, not the current one itself.
2768 ///
inSubLoop(BasicBlock * BB,Loop * CurLoop,LoopInfo * LI)2769 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2770 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2771 return LI->getLoopFor(BB) != CurLoop;
2772 }
2773