1 //===-- LoopUtils.cpp - Loop Utility functions -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines common loop utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/LoopUtils.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/ADT/PriorityWorklist.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/BasicAliasAnalysis.h"
23 #include "llvm/Analysis/DomTreeUpdater.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopAccessAnalysis.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/LoopPass.h"
29 #include "llvm/Analysis/MemorySSA.h"
30 #include "llvm/Analysis/MemorySSAUpdater.h"
31 #include "llvm/Analysis/MustExecute.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/DIBuilder.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Operator.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/ValueHandle.h"
46 #include "llvm/InitializePasses.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/KnownBits.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
53
54 using namespace llvm;
55 using namespace llvm::PatternMatch;
56
57 #define DEBUG_TYPE "loop-utils"
58
59 static const char *LLVMLoopDisableNonforced = "llvm.loop.disable_nonforced";
60 static const char *LLVMLoopDisableLICM = "llvm.licm.disable";
61
formDedicatedExitBlocks(Loop * L,DominatorTree * DT,LoopInfo * LI,MemorySSAUpdater * MSSAU,bool PreserveLCSSA)62 bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
63 MemorySSAUpdater *MSSAU,
64 bool PreserveLCSSA) {
65 bool Changed = false;
66
67 // We re-use a vector for the in-loop predecesosrs.
68 SmallVector<BasicBlock *, 4> InLoopPredecessors;
69
70 auto RewriteExit = [&](BasicBlock *BB) {
71 assert(InLoopPredecessors.empty() &&
72 "Must start with an empty predecessors list!");
73 auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); });
74
75 // See if there are any non-loop predecessors of this exit block and
76 // keep track of the in-loop predecessors.
77 bool IsDedicatedExit = true;
78 for (auto *PredBB : predecessors(BB))
79 if (L->contains(PredBB)) {
80 if (isa<IndirectBrInst>(PredBB->getTerminator()))
81 // We cannot rewrite exiting edges from an indirectbr.
82 return false;
83 if (isa<CallBrInst>(PredBB->getTerminator()))
84 // We cannot rewrite exiting edges from a callbr.
85 return false;
86
87 InLoopPredecessors.push_back(PredBB);
88 } else {
89 IsDedicatedExit = false;
90 }
91
92 assert(!InLoopPredecessors.empty() && "Must have *some* loop predecessor!");
93
94 // Nothing to do if this is already a dedicated exit.
95 if (IsDedicatedExit)
96 return false;
97
98 auto *NewExitBB = SplitBlockPredecessors(
99 BB, InLoopPredecessors, ".loopexit", DT, LI, MSSAU, PreserveLCSSA);
100
101 if (!NewExitBB)
102 LLVM_DEBUG(
103 dbgs() << "WARNING: Can't create a dedicated exit block for loop: "
104 << *L << "\n");
105 else
106 LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
107 << NewExitBB->getName() << "\n");
108 return true;
109 };
110
111 // Walk the exit blocks directly rather than building up a data structure for
112 // them, but only visit each one once.
113 SmallPtrSet<BasicBlock *, 4> Visited;
114 for (auto *BB : L->blocks())
115 for (auto *SuccBB : successors(BB)) {
116 // We're looking for exit blocks so skip in-loop successors.
117 if (L->contains(SuccBB))
118 continue;
119
120 // Visit each exit block exactly once.
121 if (!Visited.insert(SuccBB).second)
122 continue;
123
124 Changed |= RewriteExit(SuccBB);
125 }
126
127 return Changed;
128 }
129
130 /// Returns the instructions that use values defined in the loop.
findDefsUsedOutsideOfLoop(Loop * L)131 SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
132 SmallVector<Instruction *, 8> UsedOutside;
133
134 for (auto *Block : L->getBlocks())
135 // FIXME: I believe that this could use copy_if if the Inst reference could
136 // be adapted into a pointer.
137 for (auto &Inst : *Block) {
138 auto Users = Inst.users();
139 if (any_of(Users, [&](User *U) {
140 auto *Use = cast<Instruction>(U);
141 return !L->contains(Use->getParent());
142 }))
143 UsedOutside.push_back(&Inst);
144 }
145
146 return UsedOutside;
147 }
148
getLoopAnalysisUsage(AnalysisUsage & AU)149 void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) {
150 // By definition, all loop passes need the LoopInfo analysis and the
151 // Dominator tree it depends on. Because they all participate in the loop
152 // pass manager, they must also preserve these.
153 AU.addRequired<DominatorTreeWrapperPass>();
154 AU.addPreserved<DominatorTreeWrapperPass>();
155 AU.addRequired<LoopInfoWrapperPass>();
156 AU.addPreserved<LoopInfoWrapperPass>();
157
158 // We must also preserve LoopSimplify and LCSSA. We locally access their IDs
159 // here because users shouldn't directly get them from this header.
160 extern char &LoopSimplifyID;
161 extern char &LCSSAID;
162 AU.addRequiredID(LoopSimplifyID);
163 AU.addPreservedID(LoopSimplifyID);
164 AU.addRequiredID(LCSSAID);
165 AU.addPreservedID(LCSSAID);
166 // This is used in the LPPassManager to perform LCSSA verification on passes
167 // which preserve lcssa form
168 AU.addRequired<LCSSAVerificationPass>();
169 AU.addPreserved<LCSSAVerificationPass>();
170
171 // Loop passes are designed to run inside of a loop pass manager which means
172 // that any function analyses they require must be required by the first loop
173 // pass in the manager (so that it is computed before the loop pass manager
174 // runs) and preserved by all loop pasess in the manager. To make this
175 // reasonably robust, the set needed for most loop passes is maintained here.
176 // If your loop pass requires an analysis not listed here, you will need to
177 // carefully audit the loop pass manager nesting structure that results.
178 AU.addRequired<AAResultsWrapperPass>();
179 AU.addPreserved<AAResultsWrapperPass>();
180 AU.addPreserved<BasicAAWrapperPass>();
181 AU.addPreserved<GlobalsAAWrapperPass>();
182 AU.addPreserved<SCEVAAWrapperPass>();
183 AU.addRequired<ScalarEvolutionWrapperPass>();
184 AU.addPreserved<ScalarEvolutionWrapperPass>();
185 // FIXME: When all loop passes preserve MemorySSA, it can be required and
186 // preserved here instead of the individual handling in each pass.
187 }
188
189 /// Manually defined generic "LoopPass" dependency initialization. This is used
190 /// to initialize the exact set of passes from above in \c
191 /// getLoopAnalysisUsage. It can be used within a loop pass's initialization
192 /// with:
193 ///
194 /// INITIALIZE_PASS_DEPENDENCY(LoopPass)
195 ///
196 /// As-if "LoopPass" were a pass.
initializeLoopPassPass(PassRegistry & Registry)197 void llvm::initializeLoopPassPass(PassRegistry &Registry) {
198 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
199 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
200 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
201 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
202 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
203 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
204 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
205 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
206 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
207 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
208 }
209
210 /// Create MDNode for input string.
createStringMetadata(Loop * TheLoop,StringRef Name,unsigned V)211 static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
212 LLVMContext &Context = TheLoop->getHeader()->getContext();
213 Metadata *MDs[] = {
214 MDString::get(Context, Name),
215 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Context), V))};
216 return MDNode::get(Context, MDs);
217 }
218
219 /// Set input string into loop metadata by keeping other values intact.
220 /// If the string is already in loop metadata update value if it is
221 /// different.
addStringMetadataToLoop(Loop * TheLoop,const char * StringMD,unsigned V)222 void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *StringMD,
223 unsigned V) {
224 SmallVector<Metadata *, 4> MDs(1);
225 // If the loop already has metadata, retain it.
226 MDNode *LoopID = TheLoop->getLoopID();
227 if (LoopID) {
228 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
229 MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
230 // If it is of form key = value, try to parse it.
231 if (Node->getNumOperands() == 2) {
232 MDString *S = dyn_cast<MDString>(Node->getOperand(0));
233 if (S && S->getString().equals(StringMD)) {
234 ConstantInt *IntMD =
235 mdconst::extract_or_null<ConstantInt>(Node->getOperand(1));
236 if (IntMD && IntMD->getSExtValue() == V)
237 // It is already in place. Do nothing.
238 return;
239 // We need to update the value, so just skip it here and it will
240 // be added after copying other existed nodes.
241 continue;
242 }
243 }
244 MDs.push_back(Node);
245 }
246 }
247 // Add new metadata.
248 MDs.push_back(createStringMetadata(TheLoop, StringMD, V));
249 // Replace current metadata node with new one.
250 LLVMContext &Context = TheLoop->getHeader()->getContext();
251 MDNode *NewLoopID = MDNode::get(Context, MDs);
252 // Set operand 0 to refer to the loop id itself.
253 NewLoopID->replaceOperandWith(0, NewLoopID);
254 TheLoop->setLoopID(NewLoopID);
255 }
256
257 Optional<ElementCount>
getOptionalElementCountLoopAttribute(const Loop * TheLoop)258 llvm::getOptionalElementCountLoopAttribute(const Loop *TheLoop) {
259 Optional<int> Width =
260 getOptionalIntLoopAttribute(TheLoop, "llvm.loop.vectorize.width");
261
262 if (Width.hasValue()) {
263 Optional<int> IsScalable = getOptionalIntLoopAttribute(
264 TheLoop, "llvm.loop.vectorize.scalable.enable");
265 return ElementCount::get(*Width, IsScalable.getValueOr(false));
266 }
267
268 return None;
269 }
270
makeFollowupLoopID(MDNode * OrigLoopID,ArrayRef<StringRef> FollowupOptions,const char * InheritOptionsExceptPrefix,bool AlwaysNew)271 Optional<MDNode *> llvm::makeFollowupLoopID(
272 MDNode *OrigLoopID, ArrayRef<StringRef> FollowupOptions,
273 const char *InheritOptionsExceptPrefix, bool AlwaysNew) {
274 if (!OrigLoopID) {
275 if (AlwaysNew)
276 return nullptr;
277 return None;
278 }
279
280 assert(OrigLoopID->getOperand(0) == OrigLoopID);
281
282 bool InheritAllAttrs = !InheritOptionsExceptPrefix;
283 bool InheritSomeAttrs =
284 InheritOptionsExceptPrefix && InheritOptionsExceptPrefix[0] != '\0';
285 SmallVector<Metadata *, 8> MDs;
286 MDs.push_back(nullptr);
287
288 bool Changed = false;
289 if (InheritAllAttrs || InheritSomeAttrs) {
290 for (const MDOperand &Existing : drop_begin(OrigLoopID->operands())) {
291 MDNode *Op = cast<MDNode>(Existing.get());
292
293 auto InheritThisAttribute = [InheritSomeAttrs,
294 InheritOptionsExceptPrefix](MDNode *Op) {
295 if (!InheritSomeAttrs)
296 return false;
297
298 // Skip malformatted attribute metadata nodes.
299 if (Op->getNumOperands() == 0)
300 return true;
301 Metadata *NameMD = Op->getOperand(0).get();
302 if (!isa<MDString>(NameMD))
303 return true;
304 StringRef AttrName = cast<MDString>(NameMD)->getString();
305
306 // Do not inherit excluded attributes.
307 return !AttrName.startswith(InheritOptionsExceptPrefix);
308 };
309
310 if (InheritThisAttribute(Op))
311 MDs.push_back(Op);
312 else
313 Changed = true;
314 }
315 } else {
316 // Modified if we dropped at least one attribute.
317 Changed = OrigLoopID->getNumOperands() > 1;
318 }
319
320 bool HasAnyFollowup = false;
321 for (StringRef OptionName : FollowupOptions) {
322 MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName);
323 if (!FollowupNode)
324 continue;
325
326 HasAnyFollowup = true;
327 for (const MDOperand &Option : drop_begin(FollowupNode->operands())) {
328 MDs.push_back(Option.get());
329 Changed = true;
330 }
331 }
332
333 // Attributes of the followup loop not specified explicity, so signal to the
334 // transformation pass to add suitable attributes.
335 if (!AlwaysNew && !HasAnyFollowup)
336 return None;
337
338 // If no attributes were added or remove, the previous loop Id can be reused.
339 if (!AlwaysNew && !Changed)
340 return OrigLoopID;
341
342 // No attributes is equivalent to having no !llvm.loop metadata at all.
343 if (MDs.size() == 1)
344 return nullptr;
345
346 // Build the new loop ID.
347 MDTuple *FollowupLoopID = MDNode::get(OrigLoopID->getContext(), MDs);
348 FollowupLoopID->replaceOperandWith(0, FollowupLoopID);
349 return FollowupLoopID;
350 }
351
hasDisableAllTransformsHint(const Loop * L)352 bool llvm::hasDisableAllTransformsHint(const Loop *L) {
353 return getBooleanLoopAttribute(L, LLVMLoopDisableNonforced);
354 }
355
hasDisableLICMTransformsHint(const Loop * L)356 bool llvm::hasDisableLICMTransformsHint(const Loop *L) {
357 return getBooleanLoopAttribute(L, LLVMLoopDisableLICM);
358 }
359
hasUnrollTransformation(const Loop * L)360 TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
361 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable"))
362 return TM_SuppressedByUser;
363
364 Optional<int> Count =
365 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
366 if (Count.hasValue())
367 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
368
369 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
370 return TM_ForcedByUser;
371
372 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.full"))
373 return TM_ForcedByUser;
374
375 if (hasDisableAllTransformsHint(L))
376 return TM_Disable;
377
378 return TM_Unspecified;
379 }
380
hasUnrollAndJamTransformation(const Loop * L)381 TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
382 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.disable"))
383 return TM_SuppressedByUser;
384
385 Optional<int> Count =
386 getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
387 if (Count.hasValue())
388 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
389
390 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
391 return TM_ForcedByUser;
392
393 if (hasDisableAllTransformsHint(L))
394 return TM_Disable;
395
396 return TM_Unspecified;
397 }
398
hasVectorizeTransformation(const Loop * L)399 TransformationMode llvm::hasVectorizeTransformation(const Loop *L) {
400 Optional<bool> Enable =
401 getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable");
402
403 if (Enable == false)
404 return TM_SuppressedByUser;
405
406 Optional<ElementCount> VectorizeWidth =
407 getOptionalElementCountLoopAttribute(L);
408 Optional<int> InterleaveCount =
409 getOptionalIntLoopAttribute(L, "llvm.loop.interleave.count");
410
411 // 'Forcing' vector width and interleave count to one effectively disables
412 // this tranformation.
413 if (Enable == true && VectorizeWidth && VectorizeWidth->isScalar() &&
414 InterleaveCount == 1)
415 return TM_SuppressedByUser;
416
417 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
418 return TM_Disable;
419
420 if (Enable == true)
421 return TM_ForcedByUser;
422
423 if ((VectorizeWidth && VectorizeWidth->isScalar()) && InterleaveCount == 1)
424 return TM_Disable;
425
426 if ((VectorizeWidth && VectorizeWidth->isVector()) || InterleaveCount > 1)
427 return TM_Enable;
428
429 if (hasDisableAllTransformsHint(L))
430 return TM_Disable;
431
432 return TM_Unspecified;
433 }
434
hasDistributeTransformation(const Loop * L)435 TransformationMode llvm::hasDistributeTransformation(const Loop *L) {
436 if (getBooleanLoopAttribute(L, "llvm.loop.distribute.enable"))
437 return TM_ForcedByUser;
438
439 if (hasDisableAllTransformsHint(L))
440 return TM_Disable;
441
442 return TM_Unspecified;
443 }
444
hasLICMVersioningTransformation(const Loop * L)445 TransformationMode llvm::hasLICMVersioningTransformation(const Loop *L) {
446 if (getBooleanLoopAttribute(L, "llvm.loop.licm_versioning.disable"))
447 return TM_SuppressedByUser;
448
449 if (hasDisableAllTransformsHint(L))
450 return TM_Disable;
451
452 return TM_Unspecified;
453 }
454
455 /// Does a BFS from a given node to all of its children inside a given loop.
456 /// The returned vector of nodes includes the starting point.
457 SmallVector<DomTreeNode *, 16>
collectChildrenInLoop(DomTreeNode * N,const Loop * CurLoop)458 llvm::collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop) {
459 SmallVector<DomTreeNode *, 16> Worklist;
460 auto AddRegionToWorklist = [&](DomTreeNode *DTN) {
461 // Only include subregions in the top level loop.
462 BasicBlock *BB = DTN->getBlock();
463 if (CurLoop->contains(BB))
464 Worklist.push_back(DTN);
465 };
466
467 AddRegionToWorklist(N);
468
469 for (size_t I = 0; I < Worklist.size(); I++) {
470 for (DomTreeNode *Child : Worklist[I]->children())
471 AddRegionToWorklist(Child);
472 }
473
474 return Worklist;
475 }
476
deleteDeadLoop(Loop * L,DominatorTree * DT,ScalarEvolution * SE,LoopInfo * LI,MemorySSA * MSSA)477 void llvm::deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
478 LoopInfo *LI, MemorySSA *MSSA) {
479 assert((!DT || L->isLCSSAForm(*DT)) && "Expected LCSSA!");
480 auto *Preheader = L->getLoopPreheader();
481 assert(Preheader && "Preheader should exist!");
482
483 std::unique_ptr<MemorySSAUpdater> MSSAU;
484 if (MSSA)
485 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
486
487 // Now that we know the removal is safe, remove the loop by changing the
488 // branch from the preheader to go to the single exit block.
489 //
490 // Because we're deleting a large chunk of code at once, the sequence in which
491 // we remove things is very important to avoid invalidation issues.
492
493 // Tell ScalarEvolution that the loop is deleted. Do this before
494 // deleting the loop so that ScalarEvolution can look at the loop
495 // to determine what it needs to clean up.
496 if (SE)
497 SE->forgetLoop(L);
498
499 auto *OldBr = dyn_cast<BranchInst>(Preheader->getTerminator());
500 assert(OldBr && "Preheader must end with a branch");
501 assert(OldBr->isUnconditional() && "Preheader must have a single successor");
502 // Connect the preheader to the exit block. Keep the old edge to the header
503 // around to perform the dominator tree update in two separate steps
504 // -- #1 insertion of the edge preheader -> exit and #2 deletion of the edge
505 // preheader -> header.
506 //
507 //
508 // 0. Preheader 1. Preheader 2. Preheader
509 // | | | |
510 // V | V |
511 // Header <--\ | Header <--\ | Header <--\
512 // | | | | | | | | | | |
513 // | V | | | V | | | V |
514 // | Body --/ | | Body --/ | | Body --/
515 // V V V V V
516 // Exit Exit Exit
517 //
518 // By doing this is two separate steps we can perform the dominator tree
519 // update without using the batch update API.
520 //
521 // Even when the loop is never executed, we cannot remove the edge from the
522 // source block to the exit block. Consider the case where the unexecuted loop
523 // branches back to an outer loop. If we deleted the loop and removed the edge
524 // coming to this inner loop, this will break the outer loop structure (by
525 // deleting the backedge of the outer loop). If the outer loop is indeed a
526 // non-loop, it will be deleted in a future iteration of loop deletion pass.
527 IRBuilder<> Builder(OldBr);
528
529 auto *ExitBlock = L->getUniqueExitBlock();
530 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
531 if (ExitBlock) {
532 assert(ExitBlock && "Should have a unique exit block!");
533 assert(L->hasDedicatedExits() && "Loop should have dedicated exits!");
534
535 Builder.CreateCondBr(Builder.getFalse(), L->getHeader(), ExitBlock);
536 // Remove the old branch. The conditional branch becomes a new terminator.
537 OldBr->eraseFromParent();
538
539 // Rewrite phis in the exit block to get their inputs from the Preheader
540 // instead of the exiting block.
541 for (PHINode &P : ExitBlock->phis()) {
542 // Set the zero'th element of Phi to be from the preheader and remove all
543 // other incoming values. Given the loop has dedicated exits, all other
544 // incoming values must be from the exiting blocks.
545 int PredIndex = 0;
546 P.setIncomingBlock(PredIndex, Preheader);
547 // Removes all incoming values from all other exiting blocks (including
548 // duplicate values from an exiting block).
549 // Nuke all entries except the zero'th entry which is the preheader entry.
550 // NOTE! We need to remove Incoming Values in the reverse order as done
551 // below, to keep the indices valid for deletion (removeIncomingValues
552 // updates getNumIncomingValues and shifts all values down into the
553 // operand being deleted).
554 for (unsigned i = 0, e = P.getNumIncomingValues() - 1; i != e; ++i)
555 P.removeIncomingValue(e - i, false);
556
557 assert((P.getNumIncomingValues() == 1 &&
558 P.getIncomingBlock(PredIndex) == Preheader) &&
559 "Should have exactly one value and that's from the preheader!");
560 }
561
562 if (DT) {
563 DTU.applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}});
564 if (MSSA) {
565 MSSAU->applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}},
566 *DT);
567 if (VerifyMemorySSA)
568 MSSA->verifyMemorySSA();
569 }
570 }
571
572 // Disconnect the loop body by branching directly to its exit.
573 Builder.SetInsertPoint(Preheader->getTerminator());
574 Builder.CreateBr(ExitBlock);
575 // Remove the old branch.
576 Preheader->getTerminator()->eraseFromParent();
577 } else {
578 assert(L->hasNoExitBlocks() &&
579 "Loop should have either zero or one exit blocks.");
580
581 Builder.SetInsertPoint(OldBr);
582 Builder.CreateUnreachable();
583 Preheader->getTerminator()->eraseFromParent();
584 }
585
586 if (DT) {
587 DTU.applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}});
588 if (MSSA) {
589 MSSAU->applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}},
590 *DT);
591 SmallSetVector<BasicBlock *, 8> DeadBlockSet(L->block_begin(),
592 L->block_end());
593 MSSAU->removeBlocks(DeadBlockSet);
594 if (VerifyMemorySSA)
595 MSSA->verifyMemorySSA();
596 }
597 }
598
599 // Use a map to unique and a vector to guarantee deterministic ordering.
600 llvm::SmallDenseSet<std::pair<DIVariable *, DIExpression *>, 4> DeadDebugSet;
601 llvm::SmallVector<DbgVariableIntrinsic *, 4> DeadDebugInst;
602
603 if (ExitBlock) {
604 // Given LCSSA form is satisfied, we should not have users of instructions
605 // within the dead loop outside of the loop. However, LCSSA doesn't take
606 // unreachable uses into account. We handle them here.
607 // We could do it after drop all references (in this case all users in the
608 // loop will be already eliminated and we have less work to do but according
609 // to API doc of User::dropAllReferences only valid operation after dropping
610 // references, is deletion. So let's substitute all usages of
611 // instruction from the loop with undef value of corresponding type first.
612 for (auto *Block : L->blocks())
613 for (Instruction &I : *Block) {
614 auto *Undef = UndefValue::get(I.getType());
615 for (Value::use_iterator UI = I.use_begin(), E = I.use_end();
616 UI != E;) {
617 Use &U = *UI;
618 ++UI;
619 if (auto *Usr = dyn_cast<Instruction>(U.getUser()))
620 if (L->contains(Usr->getParent()))
621 continue;
622 // If we have a DT then we can check that uses outside a loop only in
623 // unreachable block.
624 if (DT)
625 assert(!DT->isReachableFromEntry(U) &&
626 "Unexpected user in reachable block");
627 U.set(Undef);
628 }
629 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I);
630 if (!DVI)
631 continue;
632 auto Key =
633 DeadDebugSet.find({DVI->getVariable(), DVI->getExpression()});
634 if (Key != DeadDebugSet.end())
635 continue;
636 DeadDebugSet.insert({DVI->getVariable(), DVI->getExpression()});
637 DeadDebugInst.push_back(DVI);
638 }
639
640 // After the loop has been deleted all the values defined and modified
641 // inside the loop are going to be unavailable.
642 // Since debug values in the loop have been deleted, inserting an undef
643 // dbg.value truncates the range of any dbg.value before the loop where the
644 // loop used to be. This is particularly important for constant values.
645 DIBuilder DIB(*ExitBlock->getModule());
646 Instruction *InsertDbgValueBefore = ExitBlock->getFirstNonPHI();
647 assert(InsertDbgValueBefore &&
648 "There should be a non-PHI instruction in exit block, else these "
649 "instructions will have no parent.");
650 for (auto *DVI : DeadDebugInst)
651 DIB.insertDbgValueIntrinsic(UndefValue::get(Builder.getInt32Ty()),
652 DVI->getVariable(), DVI->getExpression(),
653 DVI->getDebugLoc(), InsertDbgValueBefore);
654 }
655
656 // Remove the block from the reference counting scheme, so that we can
657 // delete it freely later.
658 for (auto *Block : L->blocks())
659 Block->dropAllReferences();
660
661 if (MSSA && VerifyMemorySSA)
662 MSSA->verifyMemorySSA();
663
664 if (LI) {
665 // Erase the instructions and the blocks without having to worry
666 // about ordering because we already dropped the references.
667 // NOTE: This iteration is safe because erasing the block does not remove
668 // its entry from the loop's block list. We do that in the next section.
669 for (Loop::block_iterator LpI = L->block_begin(), LpE = L->block_end();
670 LpI != LpE; ++LpI)
671 (*LpI)->eraseFromParent();
672
673 // Finally, the blocks from loopinfo. This has to happen late because
674 // otherwise our loop iterators won't work.
675
676 SmallPtrSet<BasicBlock *, 8> blocks;
677 blocks.insert(L->block_begin(), L->block_end());
678 for (BasicBlock *BB : blocks)
679 LI->removeBlock(BB);
680
681 // The last step is to update LoopInfo now that we've eliminated this loop.
682 // Note: LoopInfo::erase remove the given loop and relink its subloops with
683 // its parent. While removeLoop/removeChildLoop remove the given loop but
684 // not relink its subloops, which is what we want.
685 if (Loop *ParentLoop = L->getParentLoop()) {
686 Loop::iterator I = find(*ParentLoop, L);
687 assert(I != ParentLoop->end() && "Couldn't find loop");
688 ParentLoop->removeChildLoop(I);
689 } else {
690 Loop::iterator I = find(*LI, L);
691 assert(I != LI->end() && "Couldn't find loop");
692 LI->removeLoop(I);
693 }
694 LI->destroy(L);
695 }
696 }
697
getOutermostLoop(Loop * L)698 static Loop *getOutermostLoop(Loop *L) {
699 while (Loop *Parent = L->getParentLoop())
700 L = Parent;
701 return L;
702 }
703
breakLoopBackedge(Loop * L,DominatorTree & DT,ScalarEvolution & SE,LoopInfo & LI,MemorySSA * MSSA)704 void llvm::breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
705 LoopInfo &LI, MemorySSA *MSSA) {
706 auto *Latch = L->getLoopLatch();
707 assert(Latch && "multiple latches not yet supported");
708 auto *Header = L->getHeader();
709 Loop *OutermostLoop = getOutermostLoop(L);
710
711 SE.forgetLoop(L);
712
713 // Note: By splitting the backedge, and then explicitly making it unreachable
714 // we gracefully handle corner cases such as non-bottom tested loops and the
715 // like. We also have the benefit of being able to reuse existing well tested
716 // code. It might be worth special casing the common bottom tested case at
717 // some point to avoid code churn.
718
719 std::unique_ptr<MemorySSAUpdater> MSSAU;
720 if (MSSA)
721 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
722
723 auto *BackedgeBB = SplitEdge(Latch, Header, &DT, &LI, MSSAU.get());
724
725 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
726 (void)changeToUnreachable(BackedgeBB->getTerminator(),
727 /*PreserveLCSSA*/ true, &DTU, MSSAU.get());
728
729 // Erase (and destroy) this loop instance. Handles relinking sub-loops
730 // and blocks within the loop as needed.
731 LI.erase(L);
732
733 // If the loop we broke had a parent, then changeToUnreachable might have
734 // caused a block to be removed from the parent loop (see loop_nest_lcssa
735 // test case in zero-btc.ll for an example), thus changing the parent's
736 // exit blocks. If that happened, we need to rebuild LCSSA on the outermost
737 // loop which might have a had a block removed.
738 if (OutermostLoop != L)
739 formLCSSARecursively(*OutermostLoop, DT, &LI, &SE);
740 }
741
742
743 /// Checks if \p L has single exit through latch block except possibly
744 /// "deoptimizing" exits. Returns branch instruction terminating the loop
745 /// latch if above check is successful, nullptr otherwise.
getExpectedExitLoopLatchBranch(Loop * L)746 static BranchInst *getExpectedExitLoopLatchBranch(Loop *L) {
747 BasicBlock *Latch = L->getLoopLatch();
748 if (!Latch)
749 return nullptr;
750
751 BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
752 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
753 return nullptr;
754
755 assert((LatchBR->getSuccessor(0) == L->getHeader() ||
756 LatchBR->getSuccessor(1) == L->getHeader()) &&
757 "At least one edge out of the latch must go to the header");
758
759 SmallVector<BasicBlock *, 4> ExitBlocks;
760 L->getUniqueNonLatchExitBlocks(ExitBlocks);
761 if (any_of(ExitBlocks, [](const BasicBlock *EB) {
762 return !EB->getTerminatingDeoptimizeCall();
763 }))
764 return nullptr;
765
766 return LatchBR;
767 }
768
769 Optional<unsigned>
getLoopEstimatedTripCount(Loop * L,unsigned * EstimatedLoopInvocationWeight)770 llvm::getLoopEstimatedTripCount(Loop *L,
771 unsigned *EstimatedLoopInvocationWeight) {
772 // Support loops with an exiting latch and other existing exists only
773 // deoptimize.
774 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
775 if (!LatchBranch)
776 return None;
777
778 // To estimate the number of times the loop body was executed, we want to
779 // know the number of times the backedge was taken, vs. the number of times
780 // we exited the loop.
781 uint64_t BackedgeTakenWeight, LatchExitWeight;
782 if (!LatchBranch->extractProfMetadata(BackedgeTakenWeight, LatchExitWeight))
783 return None;
784
785 if (LatchBranch->getSuccessor(0) != L->getHeader())
786 std::swap(BackedgeTakenWeight, LatchExitWeight);
787
788 if (!LatchExitWeight)
789 return None;
790
791 if (EstimatedLoopInvocationWeight)
792 *EstimatedLoopInvocationWeight = LatchExitWeight;
793
794 // Estimated backedge taken count is a ratio of the backedge taken weight by
795 // the weight of the edge exiting the loop, rounded to nearest.
796 uint64_t BackedgeTakenCount =
797 llvm::divideNearest(BackedgeTakenWeight, LatchExitWeight);
798 // Estimated trip count is one plus estimated backedge taken count.
799 return BackedgeTakenCount + 1;
800 }
801
setLoopEstimatedTripCount(Loop * L,unsigned EstimatedTripCount,unsigned EstimatedloopInvocationWeight)802 bool llvm::setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
803 unsigned EstimatedloopInvocationWeight) {
804 // Support loops with an exiting latch and other existing exists only
805 // deoptimize.
806 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
807 if (!LatchBranch)
808 return false;
809
810 // Calculate taken and exit weights.
811 unsigned LatchExitWeight = 0;
812 unsigned BackedgeTakenWeight = 0;
813
814 if (EstimatedTripCount > 0) {
815 LatchExitWeight = EstimatedloopInvocationWeight;
816 BackedgeTakenWeight = (EstimatedTripCount - 1) * LatchExitWeight;
817 }
818
819 // Make a swap if back edge is taken when condition is "false".
820 if (LatchBranch->getSuccessor(0) != L->getHeader())
821 std::swap(BackedgeTakenWeight, LatchExitWeight);
822
823 MDBuilder MDB(LatchBranch->getContext());
824
825 // Set/Update profile metadata.
826 LatchBranch->setMetadata(
827 LLVMContext::MD_prof,
828 MDB.createBranchWeights(BackedgeTakenWeight, LatchExitWeight));
829
830 return true;
831 }
832
hasIterationCountInvariantInParent(Loop * InnerLoop,ScalarEvolution & SE)833 bool llvm::hasIterationCountInvariantInParent(Loop *InnerLoop,
834 ScalarEvolution &SE) {
835 Loop *OuterL = InnerLoop->getParentLoop();
836 if (!OuterL)
837 return true;
838
839 // Get the backedge taken count for the inner loop
840 BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
841 const SCEV *InnerLoopBECountSC = SE.getExitCount(InnerLoop, InnerLoopLatch);
842 if (isa<SCEVCouldNotCompute>(InnerLoopBECountSC) ||
843 !InnerLoopBECountSC->getType()->isIntegerTy())
844 return false;
845
846 // Get whether count is invariant to the outer loop
847 ScalarEvolution::LoopDisposition LD =
848 SE.getLoopDisposition(InnerLoopBECountSC, OuterL);
849 if (LD != ScalarEvolution::LoopInvariant)
850 return false;
851
852 return true;
853 }
854
createMinMaxOp(IRBuilderBase & Builder,RecurKind RK,Value * Left,Value * Right)855 Value *llvm::createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
856 Value *Right) {
857 CmpInst::Predicate Pred;
858 switch (RK) {
859 default:
860 llvm_unreachable("Unknown min/max recurrence kind");
861 case RecurKind::UMin:
862 Pred = CmpInst::ICMP_ULT;
863 break;
864 case RecurKind::UMax:
865 Pred = CmpInst::ICMP_UGT;
866 break;
867 case RecurKind::SMin:
868 Pred = CmpInst::ICMP_SLT;
869 break;
870 case RecurKind::SMax:
871 Pred = CmpInst::ICMP_SGT;
872 break;
873 case RecurKind::FMin:
874 Pred = CmpInst::FCMP_OLT;
875 break;
876 case RecurKind::FMax:
877 Pred = CmpInst::FCMP_OGT;
878 break;
879 }
880
881 Value *Cmp = Builder.CreateCmp(Pred, Left, Right, "rdx.minmax.cmp");
882 Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select");
883 return Select;
884 }
885
886 // Helper to generate an ordered reduction.
getOrderedReduction(IRBuilderBase & Builder,Value * Acc,Value * Src,unsigned Op,RecurKind RdxKind,ArrayRef<Value * > RedOps)887 Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
888 unsigned Op, RecurKind RdxKind,
889 ArrayRef<Value *> RedOps) {
890 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
891
892 // Extract and apply reduction ops in ascending order:
893 // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1]
894 Value *Result = Acc;
895 for (unsigned ExtractIdx = 0; ExtractIdx != VF; ++ExtractIdx) {
896 Value *Ext =
897 Builder.CreateExtractElement(Src, Builder.getInt32(ExtractIdx));
898
899 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
900 Result = Builder.CreateBinOp((Instruction::BinaryOps)Op, Result, Ext,
901 "bin.rdx");
902 } else {
903 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
904 "Invalid min/max");
905 Result = createMinMaxOp(Builder, RdxKind, Result, Ext);
906 }
907
908 if (!RedOps.empty())
909 propagateIRFlags(Result, RedOps);
910 }
911
912 return Result;
913 }
914
915 // Helper to generate a log2 shuffle reduction.
getShuffleReduction(IRBuilderBase & Builder,Value * Src,unsigned Op,RecurKind RdxKind,ArrayRef<Value * > RedOps)916 Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
917 unsigned Op, RecurKind RdxKind,
918 ArrayRef<Value *> RedOps) {
919 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
920 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
921 // and vector ops, reducing the set of values being computed by half each
922 // round.
923 assert(isPowerOf2_32(VF) &&
924 "Reduction emission only supported for pow2 vectors!");
925 Value *TmpVec = Src;
926 SmallVector<int, 32> ShuffleMask(VF);
927 for (unsigned i = VF; i != 1; i >>= 1) {
928 // Move the upper half of the vector to the lower half.
929 for (unsigned j = 0; j != i / 2; ++j)
930 ShuffleMask[j] = i / 2 + j;
931
932 // Fill the rest of the mask with undef.
933 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
934
935 Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf");
936
937 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
938 // The builder propagates its fast-math-flags setting.
939 TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
940 "bin.rdx");
941 } else {
942 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
943 "Invalid min/max");
944 TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf);
945 }
946 if (!RedOps.empty())
947 propagateIRFlags(TmpVec, RedOps);
948
949 // We may compute the reassociated scalar ops in a way that does not
950 // preserve nsw/nuw etc. Conservatively, drop those flags.
951 if (auto *ReductionInst = dyn_cast<Instruction>(TmpVec))
952 ReductionInst->dropPoisonGeneratingFlags();
953 }
954 // The result is in the first element of the vector.
955 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
956 }
957
createSimpleTargetReduction(IRBuilderBase & Builder,const TargetTransformInfo * TTI,Value * Src,RecurKind RdxKind,ArrayRef<Value * > RedOps)958 Value *llvm::createSimpleTargetReduction(IRBuilderBase &Builder,
959 const TargetTransformInfo *TTI,
960 Value *Src, RecurKind RdxKind,
961 ArrayRef<Value *> RedOps) {
962 TargetTransformInfo::ReductionFlags RdxFlags;
963 RdxFlags.IsMaxOp = RdxKind == RecurKind::SMax || RdxKind == RecurKind::UMax ||
964 RdxKind == RecurKind::FMax;
965 RdxFlags.IsSigned = RdxKind == RecurKind::SMax || RdxKind == RecurKind::SMin;
966
967 auto *SrcVecEltTy = cast<VectorType>(Src->getType())->getElementType();
968 switch (RdxKind) {
969 case RecurKind::Add:
970 return Builder.CreateAddReduce(Src);
971 case RecurKind::Mul:
972 return Builder.CreateMulReduce(Src);
973 case RecurKind::And:
974 return Builder.CreateAndReduce(Src);
975 case RecurKind::Or:
976 return Builder.CreateOrReduce(Src);
977 case RecurKind::Xor:
978 return Builder.CreateXorReduce(Src);
979 case RecurKind::FAdd:
980 return Builder.CreateFAddReduce(ConstantFP::getNegativeZero(SrcVecEltTy),
981 Src);
982 case RecurKind::FMul:
983 return Builder.CreateFMulReduce(ConstantFP::get(SrcVecEltTy, 1.0), Src);
984 case RecurKind::SMax:
985 return Builder.CreateIntMaxReduce(Src, true);
986 case RecurKind::SMin:
987 return Builder.CreateIntMinReduce(Src, true);
988 case RecurKind::UMax:
989 return Builder.CreateIntMaxReduce(Src, false);
990 case RecurKind::UMin:
991 return Builder.CreateIntMinReduce(Src, false);
992 case RecurKind::FMax:
993 return Builder.CreateFPMaxReduce(Src);
994 case RecurKind::FMin:
995 return Builder.CreateFPMinReduce(Src);
996 default:
997 llvm_unreachable("Unhandled opcode");
998 }
999 }
1000
createTargetReduction(IRBuilderBase & B,const TargetTransformInfo * TTI,const RecurrenceDescriptor & Desc,Value * Src)1001 Value *llvm::createTargetReduction(IRBuilderBase &B,
1002 const TargetTransformInfo *TTI,
1003 const RecurrenceDescriptor &Desc,
1004 Value *Src) {
1005 // TODO: Support in-order reductions based on the recurrence descriptor.
1006 // All ops in the reduction inherit fast-math-flags from the recurrence
1007 // descriptor.
1008 IRBuilderBase::FastMathFlagGuard FMFGuard(B);
1009 B.setFastMathFlags(Desc.getFastMathFlags());
1010 return createSimpleTargetReduction(B, TTI, Src, Desc.getRecurrenceKind());
1011 }
1012
createOrderedReduction(IRBuilderBase & B,const RecurrenceDescriptor & Desc,Value * Src,Value * Start)1013 Value *llvm::createOrderedReduction(IRBuilderBase &B,
1014 const RecurrenceDescriptor &Desc,
1015 Value *Src, Value *Start) {
1016 assert(Desc.getRecurrenceKind() == RecurKind::FAdd &&
1017 "Unexpected reduction kind");
1018 assert(Src->getType()->isVectorTy() && "Expected a vector type");
1019 assert(!Start->getType()->isVectorTy() && "Expected a scalar type");
1020
1021 return B.CreateFAddReduce(Start, Src);
1022 }
1023
propagateIRFlags(Value * I,ArrayRef<Value * > VL,Value * OpValue)1024 void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue) {
1025 auto *VecOp = dyn_cast<Instruction>(I);
1026 if (!VecOp)
1027 return;
1028 auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0])
1029 : dyn_cast<Instruction>(OpValue);
1030 if (!Intersection)
1031 return;
1032 const unsigned Opcode = Intersection->getOpcode();
1033 VecOp->copyIRFlags(Intersection);
1034 for (auto *V : VL) {
1035 auto *Instr = dyn_cast<Instruction>(V);
1036 if (!Instr)
1037 continue;
1038 if (OpValue == nullptr || Opcode == Instr->getOpcode())
1039 VecOp->andIRFlags(V);
1040 }
1041 }
1042
isKnownNegativeInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE)1043 bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L,
1044 ScalarEvolution &SE) {
1045 const SCEV *Zero = SE.getZero(S->getType());
1046 return SE.isAvailableAtLoopEntry(S, L) &&
1047 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero);
1048 }
1049
isKnownNonNegativeInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE)1050 bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
1051 ScalarEvolution &SE) {
1052 const SCEV *Zero = SE.getZero(S->getType());
1053 return SE.isAvailableAtLoopEntry(S, L) &&
1054 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero);
1055 }
1056
cannotBeMinInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool Signed)1057 bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1058 bool Signed) {
1059 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1060 APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
1061 APInt::getMinValue(BitWidth);
1062 auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1063 return SE.isAvailableAtLoopEntry(S, L) &&
1064 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1065 SE.getConstant(Min));
1066 }
1067
cannotBeMaxInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool Signed)1068 bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1069 bool Signed) {
1070 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1071 APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
1072 APInt::getMaxValue(BitWidth);
1073 auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1074 return SE.isAvailableAtLoopEntry(S, L) &&
1075 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1076 SE.getConstant(Max));
1077 }
1078
1079 //===----------------------------------------------------------------------===//
1080 // rewriteLoopExitValues - Optimize IV users outside the loop.
1081 // As a side effect, reduces the amount of IV processing within the loop.
1082 //===----------------------------------------------------------------------===//
1083
1084 // Return true if the SCEV expansion generated by the rewriter can replace the
1085 // original value. SCEV guarantees that it produces the same value, but the way
1086 // it is produced may be illegal IR. Ideally, this function will only be
1087 // called for verification.
isValidRewrite(ScalarEvolution * SE,Value * FromVal,Value * ToVal)1088 static bool isValidRewrite(ScalarEvolution *SE, Value *FromVal, Value *ToVal) {
1089 // If an SCEV expression subsumed multiple pointers, its expansion could
1090 // reassociate the GEP changing the base pointer. This is illegal because the
1091 // final address produced by a GEP chain must be inbounds relative to its
1092 // underlying object. Otherwise basic alias analysis, among other things,
1093 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
1094 // producing an expression involving multiple pointers. Until then, we must
1095 // bail out here.
1096 //
1097 // Retrieve the pointer operand of the GEP. Don't use getUnderlyingObject
1098 // because it understands lcssa phis while SCEV does not.
1099 Value *FromPtr = FromVal;
1100 Value *ToPtr = ToVal;
1101 if (auto *GEP = dyn_cast<GEPOperator>(FromVal))
1102 FromPtr = GEP->getPointerOperand();
1103
1104 if (auto *GEP = dyn_cast<GEPOperator>(ToVal))
1105 ToPtr = GEP->getPointerOperand();
1106
1107 if (FromPtr != FromVal || ToPtr != ToVal) {
1108 // Quickly check the common case
1109 if (FromPtr == ToPtr)
1110 return true;
1111
1112 // SCEV may have rewritten an expression that produces the GEP's pointer
1113 // operand. That's ok as long as the pointer operand has the same base
1114 // pointer. Unlike getUnderlyingObject(), getPointerBase() will find the
1115 // base of a recurrence. This handles the case in which SCEV expansion
1116 // converts a pointer type recurrence into a nonrecurrent pointer base
1117 // indexed by an integer recurrence.
1118
1119 // If the GEP base pointer is a vector of pointers, abort.
1120 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
1121 return false;
1122
1123 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
1124 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
1125 if (FromBase == ToBase)
1126 return true;
1127
1128 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: GEP rewrite bail out "
1129 << *FromBase << " != " << *ToBase << "\n");
1130
1131 return false;
1132 }
1133 return true;
1134 }
1135
hasHardUserWithinLoop(const Loop * L,const Instruction * I)1136 static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) {
1137 SmallPtrSet<const Instruction *, 8> Visited;
1138 SmallVector<const Instruction *, 8> WorkList;
1139 Visited.insert(I);
1140 WorkList.push_back(I);
1141 while (!WorkList.empty()) {
1142 const Instruction *Curr = WorkList.pop_back_val();
1143 // This use is outside the loop, nothing to do.
1144 if (!L->contains(Curr))
1145 continue;
1146 // Do we assume it is a "hard" use which will not be eliminated easily?
1147 if (Curr->mayHaveSideEffects())
1148 return true;
1149 // Otherwise, add all its users to worklist.
1150 for (auto U : Curr->users()) {
1151 auto *UI = cast<Instruction>(U);
1152 if (Visited.insert(UI).second)
1153 WorkList.push_back(UI);
1154 }
1155 }
1156 return false;
1157 }
1158
1159 // Collect information about PHI nodes which can be transformed in
1160 // rewriteLoopExitValues.
1161 struct RewritePhi {
1162 PHINode *PN; // For which PHI node is this replacement?
1163 unsigned Ith; // For which incoming value?
1164 const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
1165 Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
1166 bool HighCost; // Is this expansion a high-cost?
1167
1168 Value *Expansion = nullptr;
1169 bool ValidRewrite = false;
1170
RewritePhiRewritePhi1171 RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
1172 bool H)
1173 : PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
1174 HighCost(H) {}
1175 };
1176
1177 // Check whether it is possible to delete the loop after rewriting exit
1178 // value. If it is possible, ignore ReplaceExitValue and do rewriting
1179 // aggressively.
canLoopBeDeleted(Loop * L,SmallVector<RewritePhi,8> & RewritePhiSet)1180 static bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) {
1181 BasicBlock *Preheader = L->getLoopPreheader();
1182 // If there is no preheader, the loop will not be deleted.
1183 if (!Preheader)
1184 return false;
1185
1186 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1.
1187 // We obviate multiple ExitingBlocks case for simplicity.
1188 // TODO: If we see testcase with multiple ExitingBlocks can be deleted
1189 // after exit value rewriting, we can enhance the logic here.
1190 SmallVector<BasicBlock *, 4> ExitingBlocks;
1191 L->getExitingBlocks(ExitingBlocks);
1192 SmallVector<BasicBlock *, 8> ExitBlocks;
1193 L->getUniqueExitBlocks(ExitBlocks);
1194 if (ExitBlocks.size() != 1 || ExitingBlocks.size() != 1)
1195 return false;
1196
1197 BasicBlock *ExitBlock = ExitBlocks[0];
1198 BasicBlock::iterator BI = ExitBlock->begin();
1199 while (PHINode *P = dyn_cast<PHINode>(BI)) {
1200 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]);
1201
1202 // If the Incoming value of P is found in RewritePhiSet, we know it
1203 // could be rewritten to use a loop invariant value in transformation
1204 // phase later. Skip it in the loop invariant check below.
1205 bool found = false;
1206 for (const RewritePhi &Phi : RewritePhiSet) {
1207 if (!Phi.ValidRewrite)
1208 continue;
1209 unsigned i = Phi.Ith;
1210 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
1211 found = true;
1212 break;
1213 }
1214 }
1215
1216 Instruction *I;
1217 if (!found && (I = dyn_cast<Instruction>(Incoming)))
1218 if (!L->hasLoopInvariantOperands(I))
1219 return false;
1220
1221 ++BI;
1222 }
1223
1224 for (auto *BB : L->blocks())
1225 if (llvm::any_of(*BB, [](Instruction &I) {
1226 return I.mayHaveSideEffects();
1227 }))
1228 return false;
1229
1230 return true;
1231 }
1232
rewriteLoopExitValues(Loop * L,LoopInfo * LI,TargetLibraryInfo * TLI,ScalarEvolution * SE,const TargetTransformInfo * TTI,SCEVExpander & Rewriter,DominatorTree * DT,ReplaceExitVal ReplaceExitValue,SmallVector<WeakTrackingVH,16> & DeadInsts)1233 int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
1234 ScalarEvolution *SE,
1235 const TargetTransformInfo *TTI,
1236 SCEVExpander &Rewriter, DominatorTree *DT,
1237 ReplaceExitVal ReplaceExitValue,
1238 SmallVector<WeakTrackingVH, 16> &DeadInsts) {
1239 // Check a pre-condition.
1240 assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
1241 "Indvars did not preserve LCSSA!");
1242
1243 SmallVector<BasicBlock*, 8> ExitBlocks;
1244 L->getUniqueExitBlocks(ExitBlocks);
1245
1246 SmallVector<RewritePhi, 8> RewritePhiSet;
1247 // Find all values that are computed inside the loop, but used outside of it.
1248 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
1249 // the exit blocks of the loop to find them.
1250 for (BasicBlock *ExitBB : ExitBlocks) {
1251 // If there are no PHI nodes in this exit block, then no values defined
1252 // inside the loop are used on this path, skip it.
1253 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
1254 if (!PN) continue;
1255
1256 unsigned NumPreds = PN->getNumIncomingValues();
1257
1258 // Iterate over all of the PHI nodes.
1259 BasicBlock::iterator BBI = ExitBB->begin();
1260 while ((PN = dyn_cast<PHINode>(BBI++))) {
1261 if (PN->use_empty())
1262 continue; // dead use, don't replace it
1263
1264 if (!SE->isSCEVable(PN->getType()))
1265 continue;
1266
1267 // It's necessary to tell ScalarEvolution about this explicitly so that
1268 // it can walk the def-use list and forget all SCEVs, as it may not be
1269 // watching the PHI itself. Once the new exit value is in place, there
1270 // may not be a def-use connection between the loop and every instruction
1271 // which got a SCEVAddRecExpr for that loop.
1272 SE->forgetValue(PN);
1273
1274 // Iterate over all of the values in all the PHI nodes.
1275 for (unsigned i = 0; i != NumPreds; ++i) {
1276 // If the value being merged in is not integer or is not defined
1277 // in the loop, skip it.
1278 Value *InVal = PN->getIncomingValue(i);
1279 if (!isa<Instruction>(InVal))
1280 continue;
1281
1282 // If this pred is for a subloop, not L itself, skip it.
1283 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
1284 continue; // The Block is in a subloop, skip it.
1285
1286 // Check that InVal is defined in the loop.
1287 Instruction *Inst = cast<Instruction>(InVal);
1288 if (!L->contains(Inst))
1289 continue;
1290
1291 // Okay, this instruction has a user outside of the current loop
1292 // and varies predictably *inside* the loop. Evaluate the value it
1293 // contains when the loop exits, if possible. We prefer to start with
1294 // expressions which are true for all exits (so as to maximize
1295 // expression reuse by the SCEVExpander), but resort to per-exit
1296 // evaluation if that fails.
1297 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
1298 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1299 !SE->isLoopInvariant(ExitValue, L) ||
1300 !isSafeToExpand(ExitValue, *SE)) {
1301 // TODO: This should probably be sunk into SCEV in some way; maybe a
1302 // getSCEVForExit(SCEV*, L, ExitingBB)? It can be generalized for
1303 // most SCEV expressions and other recurrence types (e.g. shift
1304 // recurrences). Is there existing code we can reuse?
1305 const SCEV *ExitCount = SE->getExitCount(L, PN->getIncomingBlock(i));
1306 if (isa<SCEVCouldNotCompute>(ExitCount))
1307 continue;
1308 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Inst)))
1309 if (AddRec->getLoop() == L)
1310 ExitValue = AddRec->evaluateAtIteration(ExitCount, *SE);
1311 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1312 !SE->isLoopInvariant(ExitValue, L) ||
1313 !isSafeToExpand(ExitValue, *SE))
1314 continue;
1315 }
1316
1317 // Computing the value outside of the loop brings no benefit if it is
1318 // definitely used inside the loop in a way which can not be optimized
1319 // away. Avoid doing so unless we know we have a value which computes
1320 // the ExitValue already. TODO: This should be merged into SCEV
1321 // expander to leverage its knowledge of existing expressions.
1322 if (ReplaceExitValue != AlwaysRepl && !isa<SCEVConstant>(ExitValue) &&
1323 !isa<SCEVUnknown>(ExitValue) && hasHardUserWithinLoop(L, Inst))
1324 continue;
1325
1326 // Check if expansions of this SCEV would count as being high cost.
1327 bool HighCost = Rewriter.isHighCostExpansion(
1328 ExitValue, L, SCEVCheapExpansionBudget, TTI, Inst);
1329
1330 // Note that we must not perform expansions until after
1331 // we query *all* the costs, because if we perform temporary expansion
1332 // inbetween, one that we might not intend to keep, said expansion
1333 // *may* affect cost calculation of the the next SCEV's we'll query,
1334 // and next SCEV may errneously get smaller cost.
1335
1336 // Collect all the candidate PHINodes to be rewritten.
1337 RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost);
1338 }
1339 }
1340 }
1341
1342 // Now that we've done preliminary filtering and billed all the SCEV's,
1343 // we can perform the last sanity check - the expansion must be valid.
1344 for (RewritePhi &Phi : RewritePhiSet) {
1345 Phi.Expansion = Rewriter.expandCodeFor(Phi.ExpansionSCEV, Phi.PN->getType(),
1346 Phi.ExpansionPoint);
1347
1348 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = "
1349 << *(Phi.Expansion) << '\n'
1350 << " LoopVal = " << *(Phi.ExpansionPoint) << "\n");
1351
1352 // FIXME: isValidRewrite() is a hack. it should be an assert, eventually.
1353 Phi.ValidRewrite = isValidRewrite(SE, Phi.ExpansionPoint, Phi.Expansion);
1354 if (!Phi.ValidRewrite) {
1355 DeadInsts.push_back(Phi.Expansion);
1356 continue;
1357 }
1358
1359 #ifndef NDEBUG
1360 // If we reuse an instruction from a loop which is neither L nor one of
1361 // its containing loops, we end up breaking LCSSA form for this loop by
1362 // creating a new use of its instruction.
1363 if (auto *ExitInsn = dyn_cast<Instruction>(Phi.Expansion))
1364 if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
1365 if (EVL != L)
1366 assert(EVL->contains(L) && "LCSSA breach detected!");
1367 #endif
1368 }
1369
1370 // TODO: after isValidRewrite() is an assertion, evaluate whether
1371 // it is beneficial to change how we calculate high-cost:
1372 // if we have SCEV 'A' which we know we will expand, should we calculate
1373 // the cost of other SCEV's after expanding SCEV 'A',
1374 // thus potentially giving cost bonus to those other SCEV's?
1375
1376 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
1377 int NumReplaced = 0;
1378
1379 // Transformation.
1380 for (const RewritePhi &Phi : RewritePhiSet) {
1381 if (!Phi.ValidRewrite)
1382 continue;
1383
1384 PHINode *PN = Phi.PN;
1385 Value *ExitVal = Phi.Expansion;
1386
1387 // Only do the rewrite when the ExitValue can be expanded cheaply.
1388 // If LoopCanBeDel is true, rewrite exit value aggressively.
1389 if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) {
1390 DeadInsts.push_back(ExitVal);
1391 continue;
1392 }
1393
1394 NumReplaced++;
1395 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith));
1396 PN->setIncomingValue(Phi.Ith, ExitVal);
1397
1398 // If this instruction is dead now, delete it. Don't do it now to avoid
1399 // invalidating iterators.
1400 if (isInstructionTriviallyDead(Inst, TLI))
1401 DeadInsts.push_back(Inst);
1402
1403 // Replace PN with ExitVal if that is legal and does not break LCSSA.
1404 if (PN->getNumIncomingValues() == 1 &&
1405 LI->replacementPreservesLCSSAForm(PN, ExitVal)) {
1406 PN->replaceAllUsesWith(ExitVal);
1407 PN->eraseFromParent();
1408 }
1409 }
1410
1411 // The insertion point instruction may have been deleted; clear it out
1412 // so that the rewriter doesn't trip over it later.
1413 Rewriter.clearInsertPoint();
1414 return NumReplaced;
1415 }
1416
1417 /// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
1418 /// \p OrigLoop.
setProfileInfoAfterUnrolling(Loop * OrigLoop,Loop * UnrolledLoop,Loop * RemainderLoop,uint64_t UF)1419 void llvm::setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
1420 Loop *RemainderLoop, uint64_t UF) {
1421 assert(UF > 0 && "Zero unrolled factor is not supported");
1422 assert(UnrolledLoop != RemainderLoop &&
1423 "Unrolled and Remainder loops are expected to distinct");
1424
1425 // Get number of iterations in the original scalar loop.
1426 unsigned OrigLoopInvocationWeight = 0;
1427 Optional<unsigned> OrigAverageTripCount =
1428 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
1429 if (!OrigAverageTripCount)
1430 return;
1431
1432 // Calculate number of iterations in unrolled loop.
1433 unsigned UnrolledAverageTripCount = *OrigAverageTripCount / UF;
1434 // Calculate number of iterations for remainder loop.
1435 unsigned RemainderAverageTripCount = *OrigAverageTripCount % UF;
1436
1437 setLoopEstimatedTripCount(UnrolledLoop, UnrolledAverageTripCount,
1438 OrigLoopInvocationWeight);
1439 setLoopEstimatedTripCount(RemainderLoop, RemainderAverageTripCount,
1440 OrigLoopInvocationWeight);
1441 }
1442
1443 /// Utility that implements appending of loops onto a worklist.
1444 /// Loops are added in preorder (analogous for reverse postorder for trees),
1445 /// and the worklist is processed LIFO.
1446 template <typename RangeT>
appendReversedLoopsToWorklist(RangeT && Loops,SmallPriorityWorklist<Loop *,4> & Worklist)1447 void llvm::appendReversedLoopsToWorklist(
1448 RangeT &&Loops, SmallPriorityWorklist<Loop *, 4> &Worklist) {
1449 // We use an internal worklist to build up the preorder traversal without
1450 // recursion.
1451 SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
1452
1453 // We walk the initial sequence of loops in reverse because we generally want
1454 // to visit defs before uses and the worklist is LIFO.
1455 for (Loop *RootL : Loops) {
1456 assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
1457 assert(PreOrderWorklist.empty() &&
1458 "Must start with an empty preorder walk worklist.");
1459 PreOrderWorklist.push_back(RootL);
1460 do {
1461 Loop *L = PreOrderWorklist.pop_back_val();
1462 PreOrderWorklist.append(L->begin(), L->end());
1463 PreOrderLoops.push_back(L);
1464 } while (!PreOrderWorklist.empty());
1465
1466 Worklist.insert(std::move(PreOrderLoops));
1467 PreOrderLoops.clear();
1468 }
1469 }
1470
1471 template <typename RangeT>
appendLoopsToWorklist(RangeT && Loops,SmallPriorityWorklist<Loop *,4> & Worklist)1472 void llvm::appendLoopsToWorklist(RangeT &&Loops,
1473 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1474 appendReversedLoopsToWorklist(reverse(Loops), Worklist);
1475 }
1476
1477 template void llvm::appendLoopsToWorklist<ArrayRef<Loop *> &>(
1478 ArrayRef<Loop *> &Loops, SmallPriorityWorklist<Loop *, 4> &Worklist);
1479
1480 template void
1481 llvm::appendLoopsToWorklist<Loop &>(Loop &L,
1482 SmallPriorityWorklist<Loop *, 4> &Worklist);
1483
appendLoopsToWorklist(LoopInfo & LI,SmallPriorityWorklist<Loop *,4> & Worklist)1484 void llvm::appendLoopsToWorklist(LoopInfo &LI,
1485 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1486 appendReversedLoopsToWorklist(LI, Worklist);
1487 }
1488
cloneLoop(Loop * L,Loop * PL,ValueToValueMapTy & VM,LoopInfo * LI,LPPassManager * LPM)1489 Loop *llvm::cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
1490 LoopInfo *LI, LPPassManager *LPM) {
1491 Loop &New = *LI->AllocateLoop();
1492 if (PL)
1493 PL->addChildLoop(&New);
1494 else
1495 LI->addTopLevelLoop(&New);
1496
1497 if (LPM)
1498 LPM->addLoop(New);
1499
1500 // Add all of the blocks in L to the new loop.
1501 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
1502 I != E; ++I)
1503 if (LI->getLoopFor(*I) == L)
1504 New.addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), *LI);
1505
1506 // Add all of the subloops to the new loop.
1507 for (Loop *I : *L)
1508 cloneLoop(I, &New, VM, LI, LPM);
1509
1510 return &New;
1511 }
1512
1513 /// IR Values for the lower and upper bounds of a pointer evolution. We
1514 /// need to use value-handles because SCEV expansion can invalidate previously
1515 /// expanded values. Thus expansion of a pointer can invalidate the bounds for
1516 /// a previous one.
1517 struct PointerBounds {
1518 TrackingVH<Value> Start;
1519 TrackingVH<Value> End;
1520 };
1521
1522 /// Expand code for the lower and upper bound of the pointer group \p CG
1523 /// in \p TheLoop. \return the values for the bounds.
expandBounds(const RuntimeCheckingPtrGroup * CG,Loop * TheLoop,Instruction * Loc,SCEVExpander & Exp)1524 static PointerBounds expandBounds(const RuntimeCheckingPtrGroup *CG,
1525 Loop *TheLoop, Instruction *Loc,
1526 SCEVExpander &Exp) {
1527 LLVMContext &Ctx = Loc->getContext();
1528 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, CG->AddressSpace);
1529
1530 Value *Start = nullptr, *End = nullptr;
1531 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
1532 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
1533 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
1534 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n");
1535 return {Start, End};
1536 }
1537
1538 /// Turns a collection of checks into a collection of expanded upper and
1539 /// lower bounds for both pointers in the check.
1540 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4>
expandBounds(const SmallVectorImpl<RuntimePointerCheck> & PointerChecks,Loop * L,Instruction * Loc,SCEVExpander & Exp)1541 expandBounds(const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, Loop *L,
1542 Instruction *Loc, SCEVExpander &Exp) {
1543 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1544
1545 // Here we're relying on the SCEV Expander's cache to only emit code for the
1546 // same bounds once.
1547 transform(PointerChecks, std::back_inserter(ChecksWithBounds),
1548 [&](const RuntimePointerCheck &Check) {
1549 PointerBounds First = expandBounds(Check.first, L, Loc, Exp),
1550 Second = expandBounds(Check.second, L, Loc, Exp);
1551 return std::make_pair(First, Second);
1552 });
1553
1554 return ChecksWithBounds;
1555 }
1556
addRuntimeChecks(Instruction * Loc,Loop * TheLoop,const SmallVectorImpl<RuntimePointerCheck> & PointerChecks,SCEVExpander & Exp)1557 std::pair<Instruction *, Instruction *> llvm::addRuntimeChecks(
1558 Instruction *Loc, Loop *TheLoop,
1559 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
1560 SCEVExpander &Exp) {
1561 // TODO: Move noalias annotation code from LoopVersioning here and share with LV if possible.
1562 // TODO: Pass RtPtrChecking instead of PointerChecks and SE separately, if possible
1563 auto ExpandedChecks = expandBounds(PointerChecks, TheLoop, Loc, Exp);
1564
1565 LLVMContext &Ctx = Loc->getContext();
1566 Instruction *FirstInst = nullptr;
1567 IRBuilder<> ChkBuilder(Loc);
1568 // Our instructions might fold to a constant.
1569 Value *MemoryRuntimeCheck = nullptr;
1570
1571 // FIXME: this helper is currently a duplicate of the one in
1572 // LoopVectorize.cpp.
1573 auto GetFirstInst = [](Instruction *FirstInst, Value *V,
1574 Instruction *Loc) -> Instruction * {
1575 if (FirstInst)
1576 return FirstInst;
1577 if (Instruction *I = dyn_cast<Instruction>(V))
1578 return I->getParent() == Loc->getParent() ? I : nullptr;
1579 return nullptr;
1580 };
1581
1582 for (const auto &Check : ExpandedChecks) {
1583 const PointerBounds &A = Check.first, &B = Check.second;
1584 // Check if two pointers (A and B) conflict where conflict is computed as:
1585 // start(A) <= end(B) && start(B) <= end(A)
1586 unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
1587 unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
1588
1589 assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
1590 (AS1 == A.End->getType()->getPointerAddressSpace()) &&
1591 "Trying to bounds check pointers with different address spaces");
1592
1593 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1594 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1595
1596 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
1597 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
1598 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc");
1599 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc");
1600
1601 // [A|B].Start points to the first accessed byte under base [A|B].
1602 // [A|B].End points to the last accessed byte, plus one.
1603 // There is no conflict when the intervals are disjoint:
1604 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
1605 //
1606 // bound0 = (B.Start < A.End)
1607 // bound1 = (A.Start < B.End)
1608 // IsConflict = bound0 & bound1
1609 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
1610 FirstInst = GetFirstInst(FirstInst, Cmp0, Loc);
1611 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
1612 FirstInst = GetFirstInst(FirstInst, Cmp1, Loc);
1613 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1614 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1615 if (MemoryRuntimeCheck) {
1616 IsConflict =
1617 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
1618 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1619 }
1620 MemoryRuntimeCheck = IsConflict;
1621 }
1622
1623 if (!MemoryRuntimeCheck)
1624 return std::make_pair(nullptr, nullptr);
1625
1626 // We have to do this trickery because the IRBuilder might fold the check to a
1627 // constant expression in which case there is no Instruction anchored in a
1628 // the block.
1629 Instruction *Check =
1630 BinaryOperator::CreateAnd(MemoryRuntimeCheck, ConstantInt::getTrue(Ctx));
1631 ChkBuilder.Insert(Check, "memcheck.conflict");
1632 FirstInst = GetFirstInst(FirstInst, Check, Loc);
1633 return std::make_pair(FirstInst, Check);
1634 }
1635
hasPartialIVCondition(Loop & L,unsigned MSSAThreshold,MemorySSA & MSSA,AAResults & AA)1636 Optional<IVConditionInfo> llvm::hasPartialIVCondition(Loop &L,
1637 unsigned MSSAThreshold,
1638 MemorySSA &MSSA,
1639 AAResults &AA) {
1640 auto *TI = dyn_cast<BranchInst>(L.getHeader()->getTerminator());
1641 if (!TI || !TI->isConditional())
1642 return {};
1643
1644 auto *CondI = dyn_cast<CmpInst>(TI->getCondition());
1645 // The case with the condition outside the loop should already be handled
1646 // earlier.
1647 if (!CondI || !L.contains(CondI))
1648 return {};
1649
1650 SmallVector<Instruction *> InstToDuplicate;
1651 InstToDuplicate.push_back(CondI);
1652
1653 SmallVector<Value *, 4> WorkList;
1654 WorkList.append(CondI->op_begin(), CondI->op_end());
1655
1656 SmallVector<MemoryAccess *, 4> AccessesToCheck;
1657 SmallVector<MemoryLocation, 4> AccessedLocs;
1658 while (!WorkList.empty()) {
1659 Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val());
1660 if (!I || !L.contains(I))
1661 continue;
1662
1663 // TODO: support additional instructions.
1664 if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I))
1665 return {};
1666
1667 // Do not duplicate volatile and atomic loads.
1668 if (auto *LI = dyn_cast<LoadInst>(I))
1669 if (LI->isVolatile() || LI->isAtomic())
1670 return {};
1671
1672 InstToDuplicate.push_back(I);
1673 if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) {
1674 if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) {
1675 // Queue the defining access to check for alias checks.
1676 AccessesToCheck.push_back(MemUse->getDefiningAccess());
1677 AccessedLocs.push_back(MemoryLocation::get(I));
1678 } else {
1679 // MemoryDefs may clobber the location or may be atomic memory
1680 // operations. Bail out.
1681 return {};
1682 }
1683 }
1684 WorkList.append(I->op_begin(), I->op_end());
1685 }
1686
1687 if (InstToDuplicate.empty())
1688 return {};
1689
1690 SmallVector<BasicBlock *, 4> ExitingBlocks;
1691 L.getExitingBlocks(ExitingBlocks);
1692 auto HasNoClobbersOnPath =
1693 [&L, &AA, &AccessedLocs, &ExitingBlocks, &InstToDuplicate,
1694 MSSAThreshold](BasicBlock *Succ, BasicBlock *Header,
1695 SmallVector<MemoryAccess *, 4> AccessesToCheck)
1696 -> Optional<IVConditionInfo> {
1697 IVConditionInfo Info;
1698 // First, collect all blocks in the loop that are on a patch from Succ
1699 // to the header.
1700 SmallVector<BasicBlock *, 4> WorkList;
1701 WorkList.push_back(Succ);
1702 WorkList.push_back(Header);
1703 SmallPtrSet<BasicBlock *, 4> Seen;
1704 Seen.insert(Header);
1705 Info.PathIsNoop &=
1706 all_of(*Header, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1707
1708 while (!WorkList.empty()) {
1709 BasicBlock *Current = WorkList.pop_back_val();
1710 if (!L.contains(Current))
1711 continue;
1712 const auto &SeenIns = Seen.insert(Current);
1713 if (!SeenIns.second)
1714 continue;
1715
1716 Info.PathIsNoop &= all_of(
1717 *Current, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1718 WorkList.append(succ_begin(Current), succ_end(Current));
1719 }
1720
1721 // Require at least 2 blocks on a path through the loop. This skips
1722 // paths that directly exit the loop.
1723 if (Seen.size() < 2)
1724 return {};
1725
1726 // Next, check if there are any MemoryDefs that are on the path through
1727 // the loop (in the Seen set) and they may-alias any of the locations in
1728 // AccessedLocs. If that is the case, they may modify the condition and
1729 // partial unswitching is not possible.
1730 SmallPtrSet<MemoryAccess *, 4> SeenAccesses;
1731 while (!AccessesToCheck.empty()) {
1732 MemoryAccess *Current = AccessesToCheck.pop_back_val();
1733 auto SeenI = SeenAccesses.insert(Current);
1734 if (!SeenI.second || !Seen.contains(Current->getBlock()))
1735 continue;
1736
1737 // Bail out if exceeded the threshold.
1738 if (SeenAccesses.size() >= MSSAThreshold)
1739 return {};
1740
1741 // MemoryUse are read-only accesses.
1742 if (isa<MemoryUse>(Current))
1743 continue;
1744
1745 // For a MemoryDef, check if is aliases any of the location feeding
1746 // the original condition.
1747 if (auto *CurrentDef = dyn_cast<MemoryDef>(Current)) {
1748 if (any_of(AccessedLocs, [&AA, CurrentDef](MemoryLocation &Loc) {
1749 return isModSet(
1750 AA.getModRefInfo(CurrentDef->getMemoryInst(), Loc));
1751 }))
1752 return {};
1753 }
1754
1755 for (Use &U : Current->uses())
1756 AccessesToCheck.push_back(cast<MemoryAccess>(U.getUser()));
1757 }
1758
1759 // We could also allow loops with known trip counts without mustprogress,
1760 // but ScalarEvolution may not be available.
1761 Info.PathIsNoop &= isMustProgress(&L);
1762
1763 // If the path is considered a no-op so far, check if it reaches a
1764 // single exit block without any phis. This ensures no values from the
1765 // loop are used outside of the loop.
1766 if (Info.PathIsNoop) {
1767 for (auto *Exiting : ExitingBlocks) {
1768 if (!Seen.contains(Exiting))
1769 continue;
1770 for (auto *Succ : successors(Exiting)) {
1771 if (L.contains(Succ))
1772 continue;
1773
1774 Info.PathIsNoop &= llvm::empty(Succ->phis()) &&
1775 (!Info.ExitForPath || Info.ExitForPath == Succ);
1776 if (!Info.PathIsNoop)
1777 break;
1778 assert((!Info.ExitForPath || Info.ExitForPath == Succ) &&
1779 "cannot have multiple exit blocks");
1780 Info.ExitForPath = Succ;
1781 }
1782 }
1783 }
1784 if (!Info.ExitForPath)
1785 Info.PathIsNoop = false;
1786
1787 Info.InstToDuplicate = InstToDuplicate;
1788 return Info;
1789 };
1790
1791 // If we branch to the same successor, partial unswitching will not be
1792 // beneficial.
1793 if (TI->getSuccessor(0) == TI->getSuccessor(1))
1794 return {};
1795
1796 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(0), L.getHeader(),
1797 AccessesToCheck)) {
1798 Info->KnownValue = ConstantInt::getTrue(TI->getContext());
1799 return Info;
1800 }
1801 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(1), L.getHeader(),
1802 AccessesToCheck)) {
1803 Info->KnownValue = ConstantInt::getFalse(TI->getContext());
1804 return Info;
1805 }
1806
1807 return {};
1808 }
1809