1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass reassociates n-ary add expressions and eliminates the redundancy
10 // exposed by the reassociation.
11 //
12 // A motivating example:
13 //
14 // void foo(int a, int b) {
15 // bar(a + b);
16 // bar((a + 2) + b);
17 // }
18 //
19 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
20 // the above code to
21 //
22 // int t = a + b;
23 // bar(t);
24 // bar(t + 2);
25 //
26 // However, the Reassociate pass is unable to do that because it processes each
27 // instruction individually and believes (a + 2) + b is the best form according
28 // to its rank system.
29 //
30 // To address this limitation, NaryReassociate reassociates an expression in a
31 // form that reuses existing instructions. As a result, NaryReassociate can
32 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
33 // (a + b) is computed before.
34 //
35 // NaryReassociate works as follows. For every instruction in the form of (a +
36 // b) + c, it checks whether a + c or b + c is already computed by a dominating
37 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
38 // c) + a and removes the redundancy accordingly. To efficiently look up whether
39 // an expression is computed before, we store each instruction seen and its SCEV
40 // into an SCEV-to-instruction map.
41 //
42 // Although the algorithm pattern-matches only ternary additions, it
43 // automatically handles many >3-ary expressions by walking through the function
44 // in the depth-first order. For example, given
45 //
46 // (a + c) + d
47 // ((a + b) + c) + d
48 //
49 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
50 // ((a + c) + b) + d into ((a + c) + d) + b.
51 //
52 // Finally, the above dominator-based algorithm may need to be run multiple
53 // iterations before emitting optimal code. One source of this need is that we
54 // only split an operand when it is used only once. The above algorithm can
55 // eliminate an instruction and decrease the usage count of its operands. As a
56 // result, an instruction that previously had multiple uses may become a
57 // single-use instruction and thus eligible for split consideration. For
58 // example,
59 //
60 // ac = a + c
61 // ab = a + b
62 // abc = ab + c
63 // ab2 = ab + b
64 // ab2c = ab2 + c
65 //
66 // In the first iteration, we cannot reassociate abc to ac+b because ab is used
67 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
68 // result, ab2 becomes dead and ab will be used only once in the second
69 // iteration.
70 //
71 // Limitations and TODO items:
72 //
73 // 1) We only considers n-ary adds and muls for now. This should be extended
74 // and generalized.
75 //
76 //===----------------------------------------------------------------------===//
77
78 #include "llvm/Transforms/Scalar/NaryReassociate.h"
79 #include "llvm/ADT/DepthFirstIterator.h"
80 #include "llvm/ADT/SmallVector.h"
81 #include "llvm/Analysis/AssumptionCache.h"
82 #include "llvm/Analysis/ScalarEvolution.h"
83 #include "llvm/Analysis/TargetLibraryInfo.h"
84 #include "llvm/Analysis/TargetTransformInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/IR/BasicBlock.h"
87 #include "llvm/IR/Constants.h"
88 #include "llvm/IR/DataLayout.h"
89 #include "llvm/IR/DerivedTypes.h"
90 #include "llvm/IR/Dominators.h"
91 #include "llvm/IR/Function.h"
92 #include "llvm/IR/GetElementPtrTypeIterator.h"
93 #include "llvm/IR/IRBuilder.h"
94 #include "llvm/IR/InstrTypes.h"
95 #include "llvm/IR/Instruction.h"
96 #include "llvm/IR/Instructions.h"
97 #include "llvm/IR/Module.h"
98 #include "llvm/IR/Operator.h"
99 #include "llvm/IR/PatternMatch.h"
100 #include "llvm/IR/Type.h"
101 #include "llvm/IR/Value.h"
102 #include "llvm/IR/ValueHandle.h"
103 #include "llvm/InitializePasses.h"
104 #include "llvm/Pass.h"
105 #include "llvm/Support/Casting.h"
106 #include "llvm/Support/ErrorHandling.h"
107 #include "llvm/Transforms/Scalar.h"
108 #include "llvm/Transforms/Utils/Local.h"
109 #include <cassert>
110 #include <cstdint>
111
112 using namespace llvm;
113 using namespace PatternMatch;
114
115 #define DEBUG_TYPE "nary-reassociate"
116
117 namespace {
118
119 class NaryReassociateLegacyPass : public FunctionPass {
120 public:
121 static char ID;
122
NaryReassociateLegacyPass()123 NaryReassociateLegacyPass() : FunctionPass(ID) {
124 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry());
125 }
126
doInitialization(Module & M)127 bool doInitialization(Module &M) override {
128 return false;
129 }
130
131 bool runOnFunction(Function &F) override;
132
getAnalysisUsage(AnalysisUsage & AU) const133 void getAnalysisUsage(AnalysisUsage &AU) const override {
134 AU.addPreserved<DominatorTreeWrapperPass>();
135 AU.addPreserved<ScalarEvolutionWrapperPass>();
136 AU.addPreserved<TargetLibraryInfoWrapperPass>();
137 AU.addRequired<AssumptionCacheTracker>();
138 AU.addRequired<DominatorTreeWrapperPass>();
139 AU.addRequired<ScalarEvolutionWrapperPass>();
140 AU.addRequired<TargetLibraryInfoWrapperPass>();
141 AU.addRequired<TargetTransformInfoWrapperPass>();
142 AU.setPreservesCFG();
143 }
144
145 private:
146 NaryReassociatePass Impl;
147 };
148
149 } // end anonymous namespace
150
151 char NaryReassociateLegacyPass::ID = 0;
152
153 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate",
154 "Nary reassociation", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)155 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
156 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
157 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
158 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
159 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
160 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate",
161 "Nary reassociation", false, false)
162
163 FunctionPass *llvm::createNaryReassociatePass() {
164 return new NaryReassociateLegacyPass();
165 }
166
runOnFunction(Function & F)167 bool NaryReassociateLegacyPass::runOnFunction(Function &F) {
168 if (skipFunction(F))
169 return false;
170
171 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
172 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
173 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
174 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
175 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
176
177 return Impl.runImpl(F, AC, DT, SE, TLI, TTI);
178 }
179
run(Function & F,FunctionAnalysisManager & AM)180 PreservedAnalyses NaryReassociatePass::run(Function &F,
181 FunctionAnalysisManager &AM) {
182 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
183 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
184 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
185 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
186 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
187
188 if (!runImpl(F, AC, DT, SE, TLI, TTI))
189 return PreservedAnalyses::all();
190
191 PreservedAnalyses PA;
192 PA.preserveSet<CFGAnalyses>();
193 PA.preserve<ScalarEvolutionAnalysis>();
194 return PA;
195 }
196
runImpl(Function & F,AssumptionCache * AC_,DominatorTree * DT_,ScalarEvolution * SE_,TargetLibraryInfo * TLI_,TargetTransformInfo * TTI_)197 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_,
198 DominatorTree *DT_, ScalarEvolution *SE_,
199 TargetLibraryInfo *TLI_,
200 TargetTransformInfo *TTI_) {
201 AC = AC_;
202 DT = DT_;
203 SE = SE_;
204 TLI = TLI_;
205 TTI = TTI_;
206 DL = &F.getParent()->getDataLayout();
207
208 bool Changed = false, ChangedInThisIteration;
209 do {
210 ChangedInThisIteration = doOneIteration(F);
211 Changed |= ChangedInThisIteration;
212 } while (ChangedInThisIteration);
213 return Changed;
214 }
215
doOneIteration(Function & F)216 bool NaryReassociatePass::doOneIteration(Function &F) {
217 bool Changed = false;
218 SeenExprs.clear();
219 // Process the basic blocks in a depth first traversal of the dominator
220 // tree. This order ensures that all bases of a candidate are in Candidates
221 // when we process it.
222 SmallVector<WeakTrackingVH, 16> DeadInsts;
223 for (const auto Node : depth_first(DT)) {
224 BasicBlock *BB = Node->getBlock();
225 for (auto I = BB->begin(); I != BB->end(); ++I) {
226 Instruction *OrigI = &*I;
227 const SCEV *OrigSCEV = nullptr;
228 if (Instruction *NewI = tryReassociate(OrigI, OrigSCEV)) {
229 Changed = true;
230 OrigI->replaceAllUsesWith(NewI);
231
232 // Add 'OrigI' to the list of dead instructions.
233 DeadInsts.push_back(WeakTrackingVH(OrigI));
234 // Add the rewritten instruction to SeenExprs; the original
235 // instruction is deleted.
236 const SCEV *NewSCEV = SE->getSCEV(NewI);
237 SeenExprs[NewSCEV].push_back(WeakTrackingVH(NewI));
238
239 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
240 // is equivalent to I. However, ScalarEvolution::getSCEV may
241 // weaken nsw causing NewSCEV not to equal OldSCEV. For example,
242 // suppose we reassociate
243 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4
244 // to
245 // NewI = &a[sext(i)] + sext(j).
246 //
247 // ScalarEvolution computes
248 // getSCEV(I) = a + 4 * sext(i + j)
249 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j)
250 // which are different SCEVs.
251 //
252 // To alleviate this issue of ScalarEvolution not always capturing
253 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can
254 // map both SCEV before and after tryReassociate(I) to I.
255 //
256 // This improvement is exercised in @reassociate_gep_nsw in
257 // nary-gep.ll.
258 if (NewSCEV != OrigSCEV)
259 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(NewI));
260 } else if (OrigSCEV)
261 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(OrigI));
262 }
263 }
264 // Delete all dead instructions from 'DeadInsts'.
265 // Please note ScalarEvolution is updated along the way.
266 RecursivelyDeleteTriviallyDeadInstructionsPermissive(
267 DeadInsts, TLI, nullptr, [this](Value *V) { SE->forgetValue(V); });
268
269 return Changed;
270 }
271
tryReassociate(Instruction * I,const SCEV * & OrigSCEV)272 Instruction *NaryReassociatePass::tryReassociate(Instruction * I,
273 const SCEV *&OrigSCEV) {
274
275 if (!SE->isSCEVable(I->getType()))
276 return nullptr;
277
278 switch (I->getOpcode()) {
279 case Instruction::Add:
280 case Instruction::Mul:
281 OrigSCEV = SE->getSCEV(I);
282 return tryReassociateBinaryOp(cast<BinaryOperator>(I));
283 case Instruction::GetElementPtr:
284 OrigSCEV = SE->getSCEV(I);
285 return tryReassociateGEP(cast<GetElementPtrInst>(I));
286 default:
287 return nullptr;
288 }
289
290 llvm_unreachable("should not be reached");
291 return nullptr;
292 }
293
isGEPFoldable(GetElementPtrInst * GEP,const TargetTransformInfo * TTI)294 static bool isGEPFoldable(GetElementPtrInst *GEP,
295 const TargetTransformInfo *TTI) {
296 SmallVector<const Value *, 4> Indices(GEP->indices());
297 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
298 Indices) == TargetTransformInfo::TCC_Free;
299 }
300
tryReassociateGEP(GetElementPtrInst * GEP)301 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
302 // Not worth reassociating GEP if it is foldable.
303 if (isGEPFoldable(GEP, TTI))
304 return nullptr;
305
306 gep_type_iterator GTI = gep_type_begin(*GEP);
307 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
308 if (GTI.isSequential()) {
309 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
310 GTI.getIndexedType())) {
311 return NewGEP;
312 }
313 }
314 }
315 return nullptr;
316 }
317
requiresSignExtension(Value * Index,GetElementPtrInst * GEP)318 bool NaryReassociatePass::requiresSignExtension(Value *Index,
319 GetElementPtrInst *GEP) {
320 unsigned PointerSizeInBits =
321 DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace());
322 return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
323 }
324
325 GetElementPtrInst *
tryReassociateGEPAtIndex(GetElementPtrInst * GEP,unsigned I,Type * IndexedType)326 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
327 unsigned I, Type *IndexedType) {
328 Value *IndexToSplit = GEP->getOperand(I + 1);
329 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) {
330 IndexToSplit = SExt->getOperand(0);
331 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
332 // zext can be treated as sext if the source is non-negative.
333 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT))
334 IndexToSplit = ZExt->getOperand(0);
335 }
336
337 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
338 // If the I-th index needs sext and the underlying add is not equipped with
339 // nsw, we cannot split the add because
340 // sext(LHS + RHS) != sext(LHS) + sext(RHS).
341 if (requiresSignExtension(IndexToSplit, GEP) &&
342 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) !=
343 OverflowResult::NeverOverflows)
344 return nullptr;
345
346 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
347 // IndexToSplit = LHS + RHS.
348 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
349 return NewGEP;
350 // Symmetrically, try IndexToSplit = RHS + LHS.
351 if (LHS != RHS) {
352 if (auto *NewGEP =
353 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType))
354 return NewGEP;
355 }
356 }
357 return nullptr;
358 }
359
360 GetElementPtrInst *
tryReassociateGEPAtIndex(GetElementPtrInst * GEP,unsigned I,Value * LHS,Value * RHS,Type * IndexedType)361 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
362 unsigned I, Value *LHS,
363 Value *RHS, Type *IndexedType) {
364 // Look for GEP's closest dominator that has the same SCEV as GEP except that
365 // the I-th index is replaced with LHS.
366 SmallVector<const SCEV *, 4> IndexExprs;
367 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
368 IndexExprs.push_back(SE->getSCEV(*Index));
369 // Replace the I-th index with LHS.
370 IndexExprs[I] = SE->getSCEV(LHS);
371 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
372 DL->getTypeSizeInBits(LHS->getType()).getFixedSize() <
373 DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedSize()) {
374 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
375 // zext if the source operand is proved non-negative. We should do that
376 // consistently so that CandidateExpr more likely appears before. See
377 // @reassociate_gep_assume for an example of this canonicalization.
378 IndexExprs[I] =
379 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType());
380 }
381 const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
382 IndexExprs);
383
384 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
385 if (Candidate == nullptr)
386 return nullptr;
387
388 IRBuilder<> Builder(GEP);
389 // Candidate does not necessarily have the same pointer type as GEP. Use
390 // bitcast or pointer cast to make sure they have the same type, so that the
391 // later RAUW doesn't complain.
392 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType());
393 assert(Candidate->getType() == GEP->getType());
394
395 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
396 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
397 Type *ElementType = GEP->getResultElementType();
398 uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
399 // Another less rare case: because I is not necessarily the last index of the
400 // GEP, the size of the type at the I-th index (IndexedSize) is not
401 // necessarily divisible by ElementSize. For example,
402 //
403 // #pragma pack(1)
404 // struct S {
405 // int a[3];
406 // int64 b[8];
407 // };
408 // #pragma pack()
409 //
410 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
411 //
412 // TODO: bail out on this case for now. We could emit uglygep.
413 if (IndexedSize % ElementSize != 0)
414 return nullptr;
415
416 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
417 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
418 if (RHS->getType() != IntPtrTy)
419 RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
420 if (IndexedSize != ElementSize) {
421 RHS = Builder.CreateMul(
422 RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
423 }
424 GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(
425 Builder.CreateGEP(GEP->getResultElementType(), Candidate, RHS));
426 NewGEP->setIsInBounds(GEP->isInBounds());
427 NewGEP->takeName(GEP);
428 return NewGEP;
429 }
430
tryReassociateBinaryOp(BinaryOperator * I)431 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) {
432 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
433 // There is no need to reassociate 0.
434 if (SE->getSCEV(I)->isZero())
435 return nullptr;
436 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I))
437 return NewI;
438 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I))
439 return NewI;
440 return nullptr;
441 }
442
tryReassociateBinaryOp(Value * LHS,Value * RHS,BinaryOperator * I)443 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
444 BinaryOperator *I) {
445 Value *A = nullptr, *B = nullptr;
446 // To be conservative, we reassociate I only when it is the only user of (A op
447 // B).
448 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
449 // I = (A op B) op RHS
450 // = (A op RHS) op B or (B op RHS) op A
451 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
452 const SCEV *RHSExpr = SE->getSCEV(RHS);
453 if (BExpr != RHSExpr) {
454 if (auto *NewI =
455 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
456 return NewI;
457 }
458 if (AExpr != RHSExpr) {
459 if (auto *NewI =
460 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I))
461 return NewI;
462 }
463 }
464 return nullptr;
465 }
466
tryReassociatedBinaryOp(const SCEV * LHSExpr,Value * RHS,BinaryOperator * I)467 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
468 Value *RHS,
469 BinaryOperator *I) {
470 // Look for the closest dominator LHS of I that computes LHSExpr, and replace
471 // I with LHS op RHS.
472 auto *LHS = findClosestMatchingDominator(LHSExpr, I);
473 if (LHS == nullptr)
474 return nullptr;
475
476 Instruction *NewI = nullptr;
477 switch (I->getOpcode()) {
478 case Instruction::Add:
479 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I);
480 break;
481 case Instruction::Mul:
482 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I);
483 break;
484 default:
485 llvm_unreachable("Unexpected instruction.");
486 }
487 NewI->takeName(I);
488 return NewI;
489 }
490
matchTernaryOp(BinaryOperator * I,Value * V,Value * & Op1,Value * & Op2)491 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
492 Value *&Op1, Value *&Op2) {
493 switch (I->getOpcode()) {
494 case Instruction::Add:
495 return match(V, m_Add(m_Value(Op1), m_Value(Op2)));
496 case Instruction::Mul:
497 return match(V, m_Mul(m_Value(Op1), m_Value(Op2)));
498 default:
499 llvm_unreachable("Unexpected instruction.");
500 }
501 return false;
502 }
503
getBinarySCEV(BinaryOperator * I,const SCEV * LHS,const SCEV * RHS)504 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
505 const SCEV *LHS,
506 const SCEV *RHS) {
507 switch (I->getOpcode()) {
508 case Instruction::Add:
509 return SE->getAddExpr(LHS, RHS);
510 case Instruction::Mul:
511 return SE->getMulExpr(LHS, RHS);
512 default:
513 llvm_unreachable("Unexpected instruction.");
514 }
515 return nullptr;
516 }
517
518 Instruction *
findClosestMatchingDominator(const SCEV * CandidateExpr,Instruction * Dominatee)519 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
520 Instruction *Dominatee) {
521 auto Pos = SeenExprs.find(CandidateExpr);
522 if (Pos == SeenExprs.end())
523 return nullptr;
524
525 auto &Candidates = Pos->second;
526 // Because we process the basic blocks in pre-order of the dominator tree, a
527 // candidate that doesn't dominate the current instruction won't dominate any
528 // future instruction either. Therefore, we pop it out of the stack. This
529 // optimization makes the algorithm O(n).
530 while (!Candidates.empty()) {
531 // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's
532 // removed
533 // during rewriting.
534 if (Value *Candidate = Candidates.back()) {
535 Instruction *CandidateInstruction = cast<Instruction>(Candidate);
536 if (DT->dominates(CandidateInstruction, Dominatee))
537 return CandidateInstruction;
538 }
539 Candidates.pop_back();
540 }
541 return nullptr;
542 }
543