1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/BinaryFormat/Dwarf.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/ConstantRange.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DIBuilder.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/PseudoProbe.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90 
91 #define DEBUG_TYPE "local"
92 
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95 
96 static cl::opt<bool> PHICSEDebugHash(
97     "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99     cl::init(true),
100 #else
101     cl::init(false),
102 #endif
103     cl::Hidden,
104     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105              "function is well-behaved w.r.t. its isEqual predicate"));
106 
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109     cl::desc(
110         "When the basic block contains not more than this number of PHI nodes, "
111         "perform a (faster!) exhaustive search instead of set-driven one."));
112 
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms.
115 static const unsigned BitPartRecursionMaxDepth = 48;
116 
117 //===----------------------------------------------------------------------===//
118 //  Local constant propagation.
119 //
120 
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination.  This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
ConstantFoldTerminator(BasicBlock * BB,bool DeleteDeadConditions,const TargetLibraryInfo * TLI,DomTreeUpdater * DTU)128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129                                   const TargetLibraryInfo *TLI,
130                                   DomTreeUpdater *DTU) {
131   Instruction *T = BB->getTerminator();
132   IRBuilder<> Builder(T);
133 
134   // Branch - See if we are conditional jumping on constant
135   if (auto *BI = dyn_cast<BranchInst>(T)) {
136     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
137 
138     BasicBlock *Dest1 = BI->getSuccessor(0);
139     BasicBlock *Dest2 = BI->getSuccessor(1);
140 
141     if (Dest2 == Dest1) {       // Conditional branch to same location?
142       // This branch matches something like this:
143       //     br bool %cond, label %Dest, label %Dest
144       // and changes it into:  br label %Dest
145 
146       // Let the basic block know that we are letting go of one copy of it.
147       assert(BI->getParent() && "Terminator not inserted in block!");
148       Dest1->removePredecessor(BI->getParent());
149 
150       // Replace the conditional branch with an unconditional one.
151       BranchInst *NewBI = Builder.CreateBr(Dest1);
152 
153       // Transfer the metadata to the new branch instruction.
154       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
155                                 LLVMContext::MD_annotation});
156 
157       Value *Cond = BI->getCondition();
158       BI->eraseFromParent();
159       if (DeleteDeadConditions)
160         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
161       return true;
162     }
163 
164     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
165       // Are we branching on constant?
166       // YES.  Change to unconditional branch...
167       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
168       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
169 
170       // Let the basic block know that we are letting go of it.  Based on this,
171       // it will adjust it's PHI nodes.
172       OldDest->removePredecessor(BB);
173 
174       // Replace the conditional branch with an unconditional one.
175       BranchInst *NewBI = Builder.CreateBr(Destination);
176 
177       // Transfer the metadata to the new branch instruction.
178       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
179                                 LLVMContext::MD_annotation});
180 
181       BI->eraseFromParent();
182       if (DTU)
183         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
184       return true;
185     }
186 
187     return false;
188   }
189 
190   if (auto *SI = dyn_cast<SwitchInst>(T)) {
191     // If we are switching on a constant, we can convert the switch to an
192     // unconditional branch.
193     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
194     BasicBlock *DefaultDest = SI->getDefaultDest();
195     BasicBlock *TheOnlyDest = DefaultDest;
196 
197     // If the default is unreachable, ignore it when searching for TheOnlyDest.
198     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
199         SI->getNumCases() > 0) {
200       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
201     }
202 
203     bool Changed = false;
204 
205     // Figure out which case it goes to.
206     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
207       // Found case matching a constant operand?
208       if (i->getCaseValue() == CI) {
209         TheOnlyDest = i->getCaseSuccessor();
210         break;
211       }
212 
213       // Check to see if this branch is going to the same place as the default
214       // dest.  If so, eliminate it as an explicit compare.
215       if (i->getCaseSuccessor() == DefaultDest) {
216         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
217         unsigned NCases = SI->getNumCases();
218         // Fold the case metadata into the default if there will be any branches
219         // left, unless the metadata doesn't match the switch.
220         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
221           // Collect branch weights into a vector.
222           SmallVector<uint32_t, 8> Weights;
223           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
224                ++MD_i) {
225             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
226             Weights.push_back(CI->getValue().getZExtValue());
227           }
228           // Merge weight of this case to the default weight.
229           unsigned idx = i->getCaseIndex();
230           Weights[0] += Weights[idx+1];
231           // Remove weight for this case.
232           std::swap(Weights[idx+1], Weights.back());
233           Weights.pop_back();
234           SI->setMetadata(LLVMContext::MD_prof,
235                           MDBuilder(BB->getContext()).
236                           createBranchWeights(Weights));
237         }
238         // Remove this entry.
239         BasicBlock *ParentBB = SI->getParent();
240         DefaultDest->removePredecessor(ParentBB);
241         i = SI->removeCase(i);
242         e = SI->case_end();
243         Changed = true;
244         continue;
245       }
246 
247       // Otherwise, check to see if the switch only branches to one destination.
248       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
249       // destinations.
250       if (i->getCaseSuccessor() != TheOnlyDest)
251         TheOnlyDest = nullptr;
252 
253       // Increment this iterator as we haven't removed the case.
254       ++i;
255     }
256 
257     if (CI && !TheOnlyDest) {
258       // Branching on a constant, but not any of the cases, go to the default
259       // successor.
260       TheOnlyDest = SI->getDefaultDest();
261     }
262 
263     // If we found a single destination that we can fold the switch into, do so
264     // now.
265     if (TheOnlyDest) {
266       // Insert the new branch.
267       Builder.CreateBr(TheOnlyDest);
268       BasicBlock *BB = SI->getParent();
269 
270       SmallSet<BasicBlock *, 8> RemovedSuccessors;
271 
272       // Remove entries from PHI nodes which we no longer branch to...
273       BasicBlock *SuccToKeep = TheOnlyDest;
274       for (BasicBlock *Succ : successors(SI)) {
275         if (DTU && Succ != TheOnlyDest)
276           RemovedSuccessors.insert(Succ);
277         // Found case matching a constant operand?
278         if (Succ == SuccToKeep) {
279           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
280         } else {
281           Succ->removePredecessor(BB);
282         }
283       }
284 
285       // Delete the old switch.
286       Value *Cond = SI->getCondition();
287       SI->eraseFromParent();
288       if (DeleteDeadConditions)
289         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
290       if (DTU) {
291         std::vector<DominatorTree::UpdateType> Updates;
292         Updates.reserve(RemovedSuccessors.size());
293         for (auto *RemovedSuccessor : RemovedSuccessors)
294           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
295         DTU->applyUpdates(Updates);
296       }
297       return true;
298     }
299 
300     if (SI->getNumCases() == 1) {
301       // Otherwise, we can fold this switch into a conditional branch
302       // instruction if it has only one non-default destination.
303       auto FirstCase = *SI->case_begin();
304       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
305           FirstCase.getCaseValue(), "cond");
306 
307       // Insert the new branch.
308       BranchInst *NewBr = Builder.CreateCondBr(Cond,
309                                                FirstCase.getCaseSuccessor(),
310                                                SI->getDefaultDest());
311       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
312       if (MD && MD->getNumOperands() == 3) {
313         ConstantInt *SICase =
314             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
315         ConstantInt *SIDef =
316             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
317         assert(SICase && SIDef);
318         // The TrueWeight should be the weight for the single case of SI.
319         NewBr->setMetadata(LLVMContext::MD_prof,
320                         MDBuilder(BB->getContext()).
321                         createBranchWeights(SICase->getValue().getZExtValue(),
322                                             SIDef->getValue().getZExtValue()));
323       }
324 
325       // Update make.implicit metadata to the newly-created conditional branch.
326       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
327       if (MakeImplicitMD)
328         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
329 
330       // Delete the old switch.
331       SI->eraseFromParent();
332       return true;
333     }
334     return Changed;
335   }
336 
337   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
338     // indirectbr blockaddress(@F, @BB) -> br label @BB
339     if (auto *BA =
340           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
341       BasicBlock *TheOnlyDest = BA->getBasicBlock();
342       SmallSet<BasicBlock *, 8> RemovedSuccessors;
343 
344       // Insert the new branch.
345       Builder.CreateBr(TheOnlyDest);
346 
347       BasicBlock *SuccToKeep = TheOnlyDest;
348       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
349         BasicBlock *DestBB = IBI->getDestination(i);
350         if (DTU && DestBB != TheOnlyDest)
351           RemovedSuccessors.insert(DestBB);
352         if (IBI->getDestination(i) == SuccToKeep) {
353           SuccToKeep = nullptr;
354         } else {
355           DestBB->removePredecessor(BB);
356         }
357       }
358       Value *Address = IBI->getAddress();
359       IBI->eraseFromParent();
360       if (DeleteDeadConditions)
361         // Delete pointer cast instructions.
362         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
363 
364       // Also zap the blockaddress constant if there are no users remaining,
365       // otherwise the destination is still marked as having its address taken.
366       if (BA->use_empty())
367         BA->destroyConstant();
368 
369       // If we didn't find our destination in the IBI successor list, then we
370       // have undefined behavior.  Replace the unconditional branch with an
371       // 'unreachable' instruction.
372       if (SuccToKeep) {
373         BB->getTerminator()->eraseFromParent();
374         new UnreachableInst(BB->getContext(), BB);
375       }
376 
377       if (DTU) {
378         std::vector<DominatorTree::UpdateType> Updates;
379         Updates.reserve(RemovedSuccessors.size());
380         for (auto *RemovedSuccessor : RemovedSuccessors)
381           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
382         DTU->applyUpdates(Updates);
383       }
384       return true;
385     }
386   }
387 
388   return false;
389 }
390 
391 //===----------------------------------------------------------------------===//
392 //  Local dead code elimination.
393 //
394 
395 /// isInstructionTriviallyDead - Return true if the result produced by the
396 /// instruction is not used, and the instruction has no side effects.
397 ///
isInstructionTriviallyDead(Instruction * I,const TargetLibraryInfo * TLI)398 bool llvm::isInstructionTriviallyDead(Instruction *I,
399                                       const TargetLibraryInfo *TLI) {
400   if (!I->use_empty())
401     return false;
402   return wouldInstructionBeTriviallyDead(I, TLI);
403 }
404 
wouldInstructionBeTriviallyDead(Instruction * I,const TargetLibraryInfo * TLI)405 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
406                                            const TargetLibraryInfo *TLI) {
407   if (I->isTerminator())
408     return false;
409 
410   // We don't want the landingpad-like instructions removed by anything this
411   // general.
412   if (I->isEHPad())
413     return false;
414 
415   // We don't want debug info removed by anything this general, unless
416   // debug info is empty.
417   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
418     if (DDI->getAddress())
419       return false;
420     return true;
421   }
422   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
423     if (DVI->hasArgList() || DVI->getValue(0))
424       return false;
425     return true;
426   }
427   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
428     if (DLI->getLabel())
429       return false;
430     return true;
431   }
432 
433   if (!I->willReturn())
434     return false;
435 
436   if (!I->mayHaveSideEffects())
437     return true;
438 
439   // Special case intrinsics that "may have side effects" but can be deleted
440   // when dead.
441   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
442     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
443     if (II->getIntrinsicID() == Intrinsic::stacksave ||
444         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
445       return true;
446 
447     if (II->isLifetimeStartOrEnd()) {
448       auto *Arg = II->getArgOperand(1);
449       // Lifetime intrinsics are dead when their right-hand is undef.
450       if (isa<UndefValue>(Arg))
451         return true;
452       // If the right-hand is an alloc, global, or argument and the only uses
453       // are lifetime intrinsics then the intrinsics are dead.
454       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
455         return llvm::all_of(Arg->uses(), [](Use &Use) {
456           if (IntrinsicInst *IntrinsicUse =
457                   dyn_cast<IntrinsicInst>(Use.getUser()))
458             return IntrinsicUse->isLifetimeStartOrEnd();
459           return false;
460         });
461       return false;
462     }
463 
464     // Assumptions are dead if their condition is trivially true.  Guards on
465     // true are operationally no-ops.  In the future we can consider more
466     // sophisticated tradeoffs for guards considering potential for check
467     // widening, but for now we keep things simple.
468     if ((II->getIntrinsicID() == Intrinsic::assume &&
469          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
470         II->getIntrinsicID() == Intrinsic::experimental_guard) {
471       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
472         return !Cond->isZero();
473 
474       return false;
475     }
476   }
477 
478   if (isAllocLikeFn(I, TLI))
479     return true;
480 
481   if (CallInst *CI = isFreeCall(I, TLI))
482     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
483       return C->isNullValue() || isa<UndefValue>(C);
484 
485   if (auto *Call = dyn_cast<CallBase>(I))
486     if (isMathLibCallNoop(Call, TLI))
487       return true;
488 
489   return false;
490 }
491 
492 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
493 /// trivially dead instruction, delete it.  If that makes any of its operands
494 /// trivially dead, delete them too, recursively.  Return true if any
495 /// instructions were deleted.
RecursivelyDeleteTriviallyDeadInstructions(Value * V,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)496 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
497     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
498     std::function<void(Value *)> AboutToDeleteCallback) {
499   Instruction *I = dyn_cast<Instruction>(V);
500   if (!I || !isInstructionTriviallyDead(I, TLI))
501     return false;
502 
503   SmallVector<WeakTrackingVH, 16> DeadInsts;
504   DeadInsts.push_back(I);
505   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
506                                              AboutToDeleteCallback);
507 
508   return true;
509 }
510 
RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)511 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
512     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
513     MemorySSAUpdater *MSSAU,
514     std::function<void(Value *)> AboutToDeleteCallback) {
515   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
516   for (; S != E; ++S) {
517     auto *I = cast<Instruction>(DeadInsts[S]);
518     if (!isInstructionTriviallyDead(I)) {
519       DeadInsts[S] = nullptr;
520       ++Alive;
521     }
522   }
523   if (Alive == E)
524     return false;
525   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
526                                              AboutToDeleteCallback);
527   return true;
528 }
529 
RecursivelyDeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)530 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
531     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
532     MemorySSAUpdater *MSSAU,
533     std::function<void(Value *)> AboutToDeleteCallback) {
534   // Process the dead instruction list until empty.
535   while (!DeadInsts.empty()) {
536     Value *V = DeadInsts.pop_back_val();
537     Instruction *I = cast_or_null<Instruction>(V);
538     if (!I)
539       continue;
540     assert(isInstructionTriviallyDead(I, TLI) &&
541            "Live instruction found in dead worklist!");
542     assert(I->use_empty() && "Instructions with uses are not dead.");
543 
544     // Don't lose the debug info while deleting the instructions.
545     salvageDebugInfo(*I);
546 
547     if (AboutToDeleteCallback)
548       AboutToDeleteCallback(I);
549 
550     // Null out all of the instruction's operands to see if any operand becomes
551     // dead as we go.
552     for (Use &OpU : I->operands()) {
553       Value *OpV = OpU.get();
554       OpU.set(nullptr);
555 
556       if (!OpV->use_empty())
557         continue;
558 
559       // If the operand is an instruction that became dead as we nulled out the
560       // operand, and if it is 'trivially' dead, delete it in a future loop
561       // iteration.
562       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
563         if (isInstructionTriviallyDead(OpI, TLI))
564           DeadInsts.push_back(OpI);
565     }
566     if (MSSAU)
567       MSSAU->removeMemoryAccess(I);
568 
569     I->eraseFromParent();
570   }
571 }
572 
replaceDbgUsesWithUndef(Instruction * I)573 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
574   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
575   findDbgUsers(DbgUsers, I);
576   for (auto *DII : DbgUsers) {
577     Value *Undef = UndefValue::get(I->getType());
578     DII->replaceVariableLocationOp(I, Undef);
579   }
580   return !DbgUsers.empty();
581 }
582 
583 /// areAllUsesEqual - Check whether the uses of a value are all the same.
584 /// This is similar to Instruction::hasOneUse() except this will also return
585 /// true when there are no uses or multiple uses that all refer to the same
586 /// value.
areAllUsesEqual(Instruction * I)587 static bool areAllUsesEqual(Instruction *I) {
588   Value::user_iterator UI = I->user_begin();
589   Value::user_iterator UE = I->user_end();
590   if (UI == UE)
591     return true;
592 
593   User *TheUse = *UI;
594   for (++UI; UI != UE; ++UI) {
595     if (*UI != TheUse)
596       return false;
597   }
598   return true;
599 }
600 
601 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
602 /// dead PHI node, due to being a def-use chain of single-use nodes that
603 /// either forms a cycle or is terminated by a trivially dead instruction,
604 /// delete it.  If that makes any of its operands trivially dead, delete them
605 /// too, recursively.  Return true if a change was made.
RecursivelyDeleteDeadPHINode(PHINode * PN,const TargetLibraryInfo * TLI,llvm::MemorySSAUpdater * MSSAU)606 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
607                                         const TargetLibraryInfo *TLI,
608                                         llvm::MemorySSAUpdater *MSSAU) {
609   SmallPtrSet<Instruction*, 4> Visited;
610   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
611        I = cast<Instruction>(*I->user_begin())) {
612     if (I->use_empty())
613       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
614 
615     // If we find an instruction more than once, we're on a cycle that
616     // won't prove fruitful.
617     if (!Visited.insert(I).second) {
618       // Break the cycle and delete the instruction and its operands.
619       I->replaceAllUsesWith(UndefValue::get(I->getType()));
620       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
621       return true;
622     }
623   }
624   return false;
625 }
626 
627 static bool
simplifyAndDCEInstruction(Instruction * I,SmallSetVector<Instruction *,16> & WorkList,const DataLayout & DL,const TargetLibraryInfo * TLI)628 simplifyAndDCEInstruction(Instruction *I,
629                           SmallSetVector<Instruction *, 16> &WorkList,
630                           const DataLayout &DL,
631                           const TargetLibraryInfo *TLI) {
632   if (isInstructionTriviallyDead(I, TLI)) {
633     salvageDebugInfo(*I);
634 
635     // Null out all of the instruction's operands to see if any operand becomes
636     // dead as we go.
637     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
638       Value *OpV = I->getOperand(i);
639       I->setOperand(i, nullptr);
640 
641       if (!OpV->use_empty() || I == OpV)
642         continue;
643 
644       // If the operand is an instruction that became dead as we nulled out the
645       // operand, and if it is 'trivially' dead, delete it in a future loop
646       // iteration.
647       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
648         if (isInstructionTriviallyDead(OpI, TLI))
649           WorkList.insert(OpI);
650     }
651 
652     I->eraseFromParent();
653 
654     return true;
655   }
656 
657   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
658     // Add the users to the worklist. CAREFUL: an instruction can use itself,
659     // in the case of a phi node.
660     for (User *U : I->users()) {
661       if (U != I) {
662         WorkList.insert(cast<Instruction>(U));
663       }
664     }
665 
666     // Replace the instruction with its simplified value.
667     bool Changed = false;
668     if (!I->use_empty()) {
669       I->replaceAllUsesWith(SimpleV);
670       Changed = true;
671     }
672     if (isInstructionTriviallyDead(I, TLI)) {
673       I->eraseFromParent();
674       Changed = true;
675     }
676     return Changed;
677   }
678   return false;
679 }
680 
681 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
682 /// simplify any instructions in it and recursively delete dead instructions.
683 ///
684 /// This returns true if it changed the code, note that it can delete
685 /// instructions in other blocks as well in this block.
SimplifyInstructionsInBlock(BasicBlock * BB,const TargetLibraryInfo * TLI)686 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
687                                        const TargetLibraryInfo *TLI) {
688   bool MadeChange = false;
689   const DataLayout &DL = BB->getModule()->getDataLayout();
690 
691 #ifndef NDEBUG
692   // In debug builds, ensure that the terminator of the block is never replaced
693   // or deleted by these simplifications. The idea of simplification is that it
694   // cannot introduce new instructions, and there is no way to replace the
695   // terminator of a block without introducing a new instruction.
696   AssertingVH<Instruction> TerminatorVH(&BB->back());
697 #endif
698 
699   SmallSetVector<Instruction *, 16> WorkList;
700   // Iterate over the original function, only adding insts to the worklist
701   // if they actually need to be revisited. This avoids having to pre-init
702   // the worklist with the entire function's worth of instructions.
703   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
704        BI != E;) {
705     assert(!BI->isTerminator());
706     Instruction *I = &*BI;
707     ++BI;
708 
709     // We're visiting this instruction now, so make sure it's not in the
710     // worklist from an earlier visit.
711     if (!WorkList.count(I))
712       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
713   }
714 
715   while (!WorkList.empty()) {
716     Instruction *I = WorkList.pop_back_val();
717     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
718   }
719   return MadeChange;
720 }
721 
722 //===----------------------------------------------------------------------===//
723 //  Control Flow Graph Restructuring.
724 //
725 
MergeBasicBlockIntoOnlyPred(BasicBlock * DestBB,DomTreeUpdater * DTU)726 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
727                                        DomTreeUpdater *DTU) {
728 
729   // If BB has single-entry PHI nodes, fold them.
730   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
731     Value *NewVal = PN->getIncomingValue(0);
732     // Replace self referencing PHI with undef, it must be dead.
733     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
734     PN->replaceAllUsesWith(NewVal);
735     PN->eraseFromParent();
736   }
737 
738   BasicBlock *PredBB = DestBB->getSinglePredecessor();
739   assert(PredBB && "Block doesn't have a single predecessor!");
740 
741   bool ReplaceEntryBB = PredBB->isEntryBlock();
742 
743   // DTU updates: Collect all the edges that enter
744   // PredBB. These dominator edges will be redirected to DestBB.
745   SmallVector<DominatorTree::UpdateType, 32> Updates;
746 
747   if (DTU) {
748     SmallPtrSet<BasicBlock *, 2> PredsOfPredBB(pred_begin(PredBB),
749                                                pred_end(PredBB));
750     Updates.reserve(Updates.size() + 2 * PredsOfPredBB.size() + 1);
751     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
752       // This predecessor of PredBB may already have DestBB as a successor.
753       if (PredOfPredBB != PredBB)
754         Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
755     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
756       Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
757     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
758   }
759 
760   // Zap anything that took the address of DestBB.  Not doing this will give the
761   // address an invalid value.
762   if (DestBB->hasAddressTaken()) {
763     BlockAddress *BA = BlockAddress::get(DestBB);
764     Constant *Replacement =
765       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
766     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
767                                                      BA->getType()));
768     BA->destroyConstant();
769   }
770 
771   // Anything that branched to PredBB now branches to DestBB.
772   PredBB->replaceAllUsesWith(DestBB);
773 
774   // Splice all the instructions from PredBB to DestBB.
775   PredBB->getTerminator()->eraseFromParent();
776   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
777   new UnreachableInst(PredBB->getContext(), PredBB);
778 
779   // If the PredBB is the entry block of the function, move DestBB up to
780   // become the entry block after we erase PredBB.
781   if (ReplaceEntryBB)
782     DestBB->moveAfter(PredBB);
783 
784   if (DTU) {
785     assert(PredBB->getInstList().size() == 1 &&
786            isa<UnreachableInst>(PredBB->getTerminator()) &&
787            "The successor list of PredBB isn't empty before "
788            "applying corresponding DTU updates.");
789     DTU->applyUpdatesPermissive(Updates);
790     DTU->deleteBB(PredBB);
791     // Recalculation of DomTree is needed when updating a forward DomTree and
792     // the Entry BB is replaced.
793     if (ReplaceEntryBB && DTU->hasDomTree()) {
794       // The entry block was removed and there is no external interface for
795       // the dominator tree to be notified of this change. In this corner-case
796       // we recalculate the entire tree.
797       DTU->recalculate(*(DestBB->getParent()));
798     }
799   }
800 
801   else {
802     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
803   }
804 }
805 
806 /// Return true if we can choose one of these values to use in place of the
807 /// other. Note that we will always choose the non-undef value to keep.
CanMergeValues(Value * First,Value * Second)808 static bool CanMergeValues(Value *First, Value *Second) {
809   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
810 }
811 
812 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
813 /// branch to Succ, into Succ.
814 ///
815 /// Assumption: Succ is the single successor for BB.
CanPropagatePredecessorsForPHIs(BasicBlock * BB,BasicBlock * Succ)816 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
817   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
818 
819   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
820                     << Succ->getName() << "\n");
821   // Shortcut, if there is only a single predecessor it must be BB and merging
822   // is always safe
823   if (Succ->getSinglePredecessor()) return true;
824 
825   // Make a list of the predecessors of BB
826   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
827 
828   // Look at all the phi nodes in Succ, to see if they present a conflict when
829   // merging these blocks
830   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
831     PHINode *PN = cast<PHINode>(I);
832 
833     // If the incoming value from BB is again a PHINode in
834     // BB which has the same incoming value for *PI as PN does, we can
835     // merge the phi nodes and then the blocks can still be merged
836     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
837     if (BBPN && BBPN->getParent() == BB) {
838       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
839         BasicBlock *IBB = PN->getIncomingBlock(PI);
840         if (BBPreds.count(IBB) &&
841             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
842                             PN->getIncomingValue(PI))) {
843           LLVM_DEBUG(dbgs()
844                      << "Can't fold, phi node " << PN->getName() << " in "
845                      << Succ->getName() << " is conflicting with "
846                      << BBPN->getName() << " with regard to common predecessor "
847                      << IBB->getName() << "\n");
848           return false;
849         }
850       }
851     } else {
852       Value* Val = PN->getIncomingValueForBlock(BB);
853       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
854         // See if the incoming value for the common predecessor is equal to the
855         // one for BB, in which case this phi node will not prevent the merging
856         // of the block.
857         BasicBlock *IBB = PN->getIncomingBlock(PI);
858         if (BBPreds.count(IBB) &&
859             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
860           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
861                             << " in " << Succ->getName()
862                             << " is conflicting with regard to common "
863                             << "predecessor " << IBB->getName() << "\n");
864           return false;
865         }
866       }
867     }
868   }
869 
870   return true;
871 }
872 
873 using PredBlockVector = SmallVector<BasicBlock *, 16>;
874 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
875 
876 /// Determines the value to use as the phi node input for a block.
877 ///
878 /// Select between \p OldVal any value that we know flows from \p BB
879 /// to a particular phi on the basis of which one (if either) is not
880 /// undef. Update IncomingValues based on the selected value.
881 ///
882 /// \param OldVal The value we are considering selecting.
883 /// \param BB The block that the value flows in from.
884 /// \param IncomingValues A map from block-to-value for other phi inputs
885 /// that we have examined.
886 ///
887 /// \returns the selected value.
selectIncomingValueForBlock(Value * OldVal,BasicBlock * BB,IncomingValueMap & IncomingValues)888 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
889                                           IncomingValueMap &IncomingValues) {
890   if (!isa<UndefValue>(OldVal)) {
891     assert((!IncomingValues.count(BB) ||
892             IncomingValues.find(BB)->second == OldVal) &&
893            "Expected OldVal to match incoming value from BB!");
894 
895     IncomingValues.insert(std::make_pair(BB, OldVal));
896     return OldVal;
897   }
898 
899   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
900   if (It != IncomingValues.end()) return It->second;
901 
902   return OldVal;
903 }
904 
905 /// Create a map from block to value for the operands of a
906 /// given phi.
907 ///
908 /// Create a map from block to value for each non-undef value flowing
909 /// into \p PN.
910 ///
911 /// \param PN The phi we are collecting the map for.
912 /// \param IncomingValues [out] The map from block to value for this phi.
gatherIncomingValuesToPhi(PHINode * PN,IncomingValueMap & IncomingValues)913 static void gatherIncomingValuesToPhi(PHINode *PN,
914                                       IncomingValueMap &IncomingValues) {
915   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
916     BasicBlock *BB = PN->getIncomingBlock(i);
917     Value *V = PN->getIncomingValue(i);
918 
919     if (!isa<UndefValue>(V))
920       IncomingValues.insert(std::make_pair(BB, V));
921   }
922 }
923 
924 /// Replace the incoming undef values to a phi with the values
925 /// from a block-to-value map.
926 ///
927 /// \param PN The phi we are replacing the undefs in.
928 /// \param IncomingValues A map from block to value.
replaceUndefValuesInPhi(PHINode * PN,const IncomingValueMap & IncomingValues)929 static void replaceUndefValuesInPhi(PHINode *PN,
930                                     const IncomingValueMap &IncomingValues) {
931   SmallVector<unsigned> TrueUndefOps;
932   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
933     Value *V = PN->getIncomingValue(i);
934 
935     if (!isa<UndefValue>(V)) continue;
936 
937     BasicBlock *BB = PN->getIncomingBlock(i);
938     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
939 
940     // Keep track of undef/poison incoming values. Those must match, so we fix
941     // them up below if needed.
942     // Note: this is conservatively correct, but we could try harder and group
943     // the undef values per incoming basic block.
944     if (It == IncomingValues.end()) {
945       TrueUndefOps.push_back(i);
946       continue;
947     }
948 
949     // There is a defined value for this incoming block, so map this undef
950     // incoming value to the defined value.
951     PN->setIncomingValue(i, It->second);
952   }
953 
954   // If there are both undef and poison values incoming, then convert those
955   // values to undef. It is invalid to have different values for the same
956   // incoming block.
957   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
958     return isa<PoisonValue>(PN->getIncomingValue(i));
959   });
960   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
961     for (unsigned i : TrueUndefOps)
962       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
963   }
964 }
965 
966 /// Replace a value flowing from a block to a phi with
967 /// potentially multiple instances of that value flowing from the
968 /// block's predecessors to the phi.
969 ///
970 /// \param BB The block with the value flowing into the phi.
971 /// \param BBPreds The predecessors of BB.
972 /// \param PN The phi that we are updating.
redirectValuesFromPredecessorsToPhi(BasicBlock * BB,const PredBlockVector & BBPreds,PHINode * PN)973 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
974                                                 const PredBlockVector &BBPreds,
975                                                 PHINode *PN) {
976   Value *OldVal = PN->removeIncomingValue(BB, false);
977   assert(OldVal && "No entry in PHI for Pred BB!");
978 
979   IncomingValueMap IncomingValues;
980 
981   // We are merging two blocks - BB, and the block containing PN - and
982   // as a result we need to redirect edges from the predecessors of BB
983   // to go to the block containing PN, and update PN
984   // accordingly. Since we allow merging blocks in the case where the
985   // predecessor and successor blocks both share some predecessors,
986   // and where some of those common predecessors might have undef
987   // values flowing into PN, we want to rewrite those values to be
988   // consistent with the non-undef values.
989 
990   gatherIncomingValuesToPhi(PN, IncomingValues);
991 
992   // If this incoming value is one of the PHI nodes in BB, the new entries
993   // in the PHI node are the entries from the old PHI.
994   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
995     PHINode *OldValPN = cast<PHINode>(OldVal);
996     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
997       // Note that, since we are merging phi nodes and BB and Succ might
998       // have common predecessors, we could end up with a phi node with
999       // identical incoming branches. This will be cleaned up later (and
1000       // will trigger asserts if we try to clean it up now, without also
1001       // simplifying the corresponding conditional branch).
1002       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1003       Value *PredVal = OldValPN->getIncomingValue(i);
1004       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1005                                                     IncomingValues);
1006 
1007       // And add a new incoming value for this predecessor for the
1008       // newly retargeted branch.
1009       PN->addIncoming(Selected, PredBB);
1010     }
1011   } else {
1012     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1013       // Update existing incoming values in PN for this
1014       // predecessor of BB.
1015       BasicBlock *PredBB = BBPreds[i];
1016       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1017                                                     IncomingValues);
1018 
1019       // And add a new incoming value for this predecessor for the
1020       // newly retargeted branch.
1021       PN->addIncoming(Selected, PredBB);
1022     }
1023   }
1024 
1025   replaceUndefValuesInPhi(PN, IncomingValues);
1026 }
1027 
TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock * BB,DomTreeUpdater * DTU)1028 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1029                                                    DomTreeUpdater *DTU) {
1030   assert(BB != &BB->getParent()->getEntryBlock() &&
1031          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1032 
1033   // We can't eliminate infinite loops.
1034   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1035   if (BB == Succ) return false;
1036 
1037   // Check to see if merging these blocks would cause conflicts for any of the
1038   // phi nodes in BB or Succ. If not, we can safely merge.
1039   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1040 
1041   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1042   // has uses which will not disappear when the PHI nodes are merged.  It is
1043   // possible to handle such cases, but difficult: it requires checking whether
1044   // BB dominates Succ, which is non-trivial to calculate in the case where
1045   // Succ has multiple predecessors.  Also, it requires checking whether
1046   // constructing the necessary self-referential PHI node doesn't introduce any
1047   // conflicts; this isn't too difficult, but the previous code for doing this
1048   // was incorrect.
1049   //
1050   // Note that if this check finds a live use, BB dominates Succ, so BB is
1051   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1052   // folding the branch isn't profitable in that case anyway.
1053   if (!Succ->getSinglePredecessor()) {
1054     BasicBlock::iterator BBI = BB->begin();
1055     while (isa<PHINode>(*BBI)) {
1056       for (Use &U : BBI->uses()) {
1057         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1058           if (PN->getIncomingBlock(U) != BB)
1059             return false;
1060         } else {
1061           return false;
1062         }
1063       }
1064       ++BBI;
1065     }
1066   }
1067 
1068   // We cannot fold the block if it's a branch to an already present callbr
1069   // successor because that creates duplicate successors.
1070   for (BasicBlock *PredBB : predecessors(BB)) {
1071     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1072       if (Succ == CBI->getDefaultDest())
1073         return false;
1074       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1075         if (Succ == CBI->getIndirectDest(i))
1076           return false;
1077     }
1078   }
1079 
1080   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1081 
1082   SmallVector<DominatorTree::UpdateType, 32> Updates;
1083   if (DTU) {
1084     // All predecessors of BB will be moved to Succ.
1085     SmallPtrSet<BasicBlock *, 8> PredsOfBB(pred_begin(BB), pred_end(BB));
1086     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1087     Updates.reserve(Updates.size() + 2 * PredsOfBB.size() + 1);
1088     for (auto *PredOfBB : PredsOfBB)
1089       // This predecessor of BB may already have Succ as a successor.
1090       if (!PredsOfSucc.contains(PredOfBB))
1091         Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1092     for (auto *PredOfBB : PredsOfBB)
1093       Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1094     Updates.push_back({DominatorTree::Delete, BB, Succ});
1095   }
1096 
1097   if (isa<PHINode>(Succ->begin())) {
1098     // If there is more than one pred of succ, and there are PHI nodes in
1099     // the successor, then we need to add incoming edges for the PHI nodes
1100     //
1101     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1102 
1103     // Loop over all of the PHI nodes in the successor of BB.
1104     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1105       PHINode *PN = cast<PHINode>(I);
1106 
1107       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1108     }
1109   }
1110 
1111   if (Succ->getSinglePredecessor()) {
1112     // BB is the only predecessor of Succ, so Succ will end up with exactly
1113     // the same predecessors BB had.
1114 
1115     // Copy over any phi, debug or lifetime instruction.
1116     BB->getTerminator()->eraseFromParent();
1117     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1118                                BB->getInstList());
1119   } else {
1120     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1121       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1122       assert(PN->use_empty() && "There shouldn't be any uses here!");
1123       PN->eraseFromParent();
1124     }
1125   }
1126 
1127   // If the unconditional branch we replaced contains llvm.loop metadata, we
1128   // add the metadata to the branch instructions in the predecessors.
1129   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1130   Instruction *TI = BB->getTerminator();
1131   if (TI)
1132     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1133       for (BasicBlock *Pred : predecessors(BB))
1134         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1135 
1136   // For AutoFDO, since BB is going to be removed, we won't be able to sample
1137   // it. To avoid assigning a zero weight for BB, move all its pseudo probes
1138   // into Succ and mark them dangling. This should allow the counts inference a
1139   // chance to get a more reasonable weight for BB.
1140   moveAndDanglePseudoProbes(BB, &*Succ->getFirstInsertionPt());
1141 
1142   // Everything that jumped to BB now goes to Succ.
1143   BB->replaceAllUsesWith(Succ);
1144   if (!Succ->hasName()) Succ->takeName(BB);
1145 
1146   // Clear the successor list of BB to match updates applying to DTU later.
1147   if (BB->getTerminator())
1148     BB->getInstList().pop_back();
1149   new UnreachableInst(BB->getContext(), BB);
1150   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1151                            "applying corresponding DTU updates.");
1152 
1153   if (DTU)
1154     DTU->applyUpdates(Updates);
1155 
1156   DeleteDeadBlock(BB, DTU);
1157 
1158   return true;
1159 }
1160 
EliminateDuplicatePHINodesNaiveImpl(BasicBlock * BB)1161 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1162   // This implementation doesn't currently consider undef operands
1163   // specially. Theoretically, two phis which are identical except for
1164   // one having an undef where the other doesn't could be collapsed.
1165 
1166   bool Changed = false;
1167 
1168   // Examine each PHI.
1169   // Note that increment of I must *NOT* be in the iteration_expression, since
1170   // we don't want to immediately advance when we restart from the beginning.
1171   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1172     ++I;
1173     // Is there an identical PHI node in this basic block?
1174     // Note that we only look in the upper square's triangle,
1175     // we already checked that the lower triangle PHI's aren't identical.
1176     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1177       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1178         continue;
1179       // A duplicate. Replace this PHI with the base PHI.
1180       ++NumPHICSEs;
1181       DuplicatePN->replaceAllUsesWith(PN);
1182       DuplicatePN->eraseFromParent();
1183       Changed = true;
1184 
1185       // The RAUW can change PHIs that we already visited.
1186       I = BB->begin();
1187       break; // Start over from the beginning.
1188     }
1189   }
1190   return Changed;
1191 }
1192 
EliminateDuplicatePHINodesSetBasedImpl(BasicBlock * BB)1193 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1194   // This implementation doesn't currently consider undef operands
1195   // specially. Theoretically, two phis which are identical except for
1196   // one having an undef where the other doesn't could be collapsed.
1197 
1198   struct PHIDenseMapInfo {
1199     static PHINode *getEmptyKey() {
1200       return DenseMapInfo<PHINode *>::getEmptyKey();
1201     }
1202 
1203     static PHINode *getTombstoneKey() {
1204       return DenseMapInfo<PHINode *>::getTombstoneKey();
1205     }
1206 
1207     static bool isSentinel(PHINode *PN) {
1208       return PN == getEmptyKey() || PN == getTombstoneKey();
1209     }
1210 
1211     // WARNING: this logic must be kept in sync with
1212     //          Instruction::isIdenticalToWhenDefined()!
1213     static unsigned getHashValueImpl(PHINode *PN) {
1214       // Compute a hash value on the operands. Instcombine will likely have
1215       // sorted them, which helps expose duplicates, but we have to check all
1216       // the operands to be safe in case instcombine hasn't run.
1217       return static_cast<unsigned>(hash_combine(
1218           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1219           hash_combine_range(PN->block_begin(), PN->block_end())));
1220     }
1221 
1222     static unsigned getHashValue(PHINode *PN) {
1223 #ifndef NDEBUG
1224       // If -phicse-debug-hash was specified, return a constant -- this
1225       // will force all hashing to collide, so we'll exhaustively search
1226       // the table for a match, and the assertion in isEqual will fire if
1227       // there's a bug causing equal keys to hash differently.
1228       if (PHICSEDebugHash)
1229         return 0;
1230 #endif
1231       return getHashValueImpl(PN);
1232     }
1233 
1234     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1235       if (isSentinel(LHS) || isSentinel(RHS))
1236         return LHS == RHS;
1237       return LHS->isIdenticalTo(RHS);
1238     }
1239 
1240     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1241       // These comparisons are nontrivial, so assert that equality implies
1242       // hash equality (DenseMap demands this as an invariant).
1243       bool Result = isEqualImpl(LHS, RHS);
1244       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1245              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1246       return Result;
1247     }
1248   };
1249 
1250   // Set of unique PHINodes.
1251   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1252   PHISet.reserve(4 * PHICSENumPHISmallSize);
1253 
1254   // Examine each PHI.
1255   bool Changed = false;
1256   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1257     auto Inserted = PHISet.insert(PN);
1258     if (!Inserted.second) {
1259       // A duplicate. Replace this PHI with its duplicate.
1260       ++NumPHICSEs;
1261       PN->replaceAllUsesWith(*Inserted.first);
1262       PN->eraseFromParent();
1263       Changed = true;
1264 
1265       // The RAUW can change PHIs that we already visited. Start over from the
1266       // beginning.
1267       PHISet.clear();
1268       I = BB->begin();
1269     }
1270   }
1271 
1272   return Changed;
1273 }
1274 
EliminateDuplicatePHINodes(BasicBlock * BB)1275 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1276   if (
1277 #ifndef NDEBUG
1278       !PHICSEDebugHash &&
1279 #endif
1280       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1281     return EliminateDuplicatePHINodesNaiveImpl(BB);
1282   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1283 }
1284 
1285 /// If the specified pointer points to an object that we control, try to modify
1286 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1287 /// the value after the operation, which may be lower than PrefAlign.
1288 ///
1289 /// Increating value alignment isn't often possible though. If alignment is
1290 /// important, a more reliable approach is to simply align all global variables
1291 /// and allocation instructions to their preferred alignment from the beginning.
tryEnforceAlignment(Value * V,Align PrefAlign,const DataLayout & DL)1292 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1293                                  const DataLayout &DL) {
1294   V = V->stripPointerCasts();
1295 
1296   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1297     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1298     // than the current alignment, as the known bits calculation should have
1299     // already taken it into account. However, this is not always the case,
1300     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1301     // doesn't.
1302     Align CurrentAlign = AI->getAlign();
1303     if (PrefAlign <= CurrentAlign)
1304       return CurrentAlign;
1305 
1306     // If the preferred alignment is greater than the natural stack alignment
1307     // then don't round up. This avoids dynamic stack realignment.
1308     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1309       return CurrentAlign;
1310     AI->setAlignment(PrefAlign);
1311     return PrefAlign;
1312   }
1313 
1314   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1315     // TODO: as above, this shouldn't be necessary.
1316     Align CurrentAlign = GO->getPointerAlignment(DL);
1317     if (PrefAlign <= CurrentAlign)
1318       return CurrentAlign;
1319 
1320     // If there is a large requested alignment and we can, bump up the alignment
1321     // of the global.  If the memory we set aside for the global may not be the
1322     // memory used by the final program then it is impossible for us to reliably
1323     // enforce the preferred alignment.
1324     if (!GO->canIncreaseAlignment())
1325       return CurrentAlign;
1326 
1327     GO->setAlignment(PrefAlign);
1328     return PrefAlign;
1329   }
1330 
1331   return Align(1);
1332 }
1333 
getOrEnforceKnownAlignment(Value * V,MaybeAlign PrefAlign,const DataLayout & DL,const Instruction * CxtI,AssumptionCache * AC,const DominatorTree * DT)1334 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1335                                        const DataLayout &DL,
1336                                        const Instruction *CxtI,
1337                                        AssumptionCache *AC,
1338                                        const DominatorTree *DT) {
1339   assert(V->getType()->isPointerTy() &&
1340          "getOrEnforceKnownAlignment expects a pointer!");
1341 
1342   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1343   unsigned TrailZ = Known.countMinTrailingZeros();
1344 
1345   // Avoid trouble with ridiculously large TrailZ values, such as
1346   // those computed from a null pointer.
1347   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1348   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1349 
1350   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1351 
1352   if (PrefAlign && *PrefAlign > Alignment)
1353     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1354 
1355   // We don't need to make any adjustment.
1356   return Alignment;
1357 }
1358 
1359 ///===---------------------------------------------------------------------===//
1360 ///  Dbg Intrinsic utilities
1361 ///
1362 
1363 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
PhiHasDebugValue(DILocalVariable * DIVar,DIExpression * DIExpr,PHINode * APN)1364 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1365                              DIExpression *DIExpr,
1366                              PHINode *APN) {
1367   // Since we can't guarantee that the original dbg.declare instrinsic
1368   // is removed by LowerDbgDeclare(), we need to make sure that we are
1369   // not inserting the same dbg.value intrinsic over and over.
1370   SmallVector<DbgValueInst *, 1> DbgValues;
1371   findDbgValues(DbgValues, APN);
1372   for (auto *DVI : DbgValues) {
1373     assert(is_contained(DVI->getValues(), APN));
1374     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1375       return true;
1376   }
1377   return false;
1378 }
1379 
1380 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1381 /// (or fragment of the variable) described by \p DII.
1382 ///
1383 /// This is primarily intended as a helper for the different
1384 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1385 /// converted describes an alloca'd variable, so we need to use the
1386 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1387 /// identified as covering an n-bit fragment, if the store size of i1 is at
1388 /// least n bits.
valueCoversEntireFragment(Type * ValTy,DbgVariableIntrinsic * DII)1389 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1390   const DataLayout &DL = DII->getModule()->getDataLayout();
1391   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1392   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1393     assert(!ValueSize.isScalable() &&
1394            "Fragments don't work on scalable types.");
1395     return ValueSize.getFixedSize() >= *FragmentSize;
1396   }
1397   // We can't always calculate the size of the DI variable (e.g. if it is a
1398   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1399   // intead.
1400   if (DII->isAddressOfVariable()) {
1401     // DII should have exactly 1 location when it is an address.
1402     assert(DII->getNumVariableLocationOps() == 1 &&
1403            "address of variable must have exactly 1 location operand.");
1404     if (auto *AI =
1405             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1406       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1407         assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1408                "Both sizes should agree on the scalable flag.");
1409         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1410       }
1411     }
1412   }
1413   // Could not determine size of variable. Conservatively return false.
1414   return false;
1415 }
1416 
1417 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1418 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1419 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1420 /// case this DebugLoc leaks into any adjacent instructions.
getDebugValueLoc(DbgVariableIntrinsic * DII,Instruction * Src)1421 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1422   // Original dbg.declare must have a location.
1423   const DebugLoc &DeclareLoc = DII->getDebugLoc();
1424   MDNode *Scope = DeclareLoc.getScope();
1425   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1426   // Produce an unknown location with the correct scope / inlinedAt fields.
1427   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1428 }
1429 
1430 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1431 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,StoreInst * SI,DIBuilder & Builder)1432 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1433                                            StoreInst *SI, DIBuilder &Builder) {
1434   assert(DII->isAddressOfVariable());
1435   auto *DIVar = DII->getVariable();
1436   assert(DIVar && "Missing variable");
1437   auto *DIExpr = DII->getExpression();
1438   Value *DV = SI->getValueOperand();
1439 
1440   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1441 
1442   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1443     // FIXME: If storing to a part of the variable described by the dbg.declare,
1444     // then we want to insert a dbg.value for the corresponding fragment.
1445     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1446                       << *DII << '\n');
1447     // For now, when there is a store to parts of the variable (but we do not
1448     // know which part) we insert an dbg.value instrinsic to indicate that we
1449     // know nothing about the variable's content.
1450     DV = UndefValue::get(DV->getType());
1451     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1452     return;
1453   }
1454 
1455   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1456 }
1457 
1458 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1459 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,LoadInst * LI,DIBuilder & Builder)1460 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1461                                            LoadInst *LI, DIBuilder &Builder) {
1462   auto *DIVar = DII->getVariable();
1463   auto *DIExpr = DII->getExpression();
1464   assert(DIVar && "Missing variable");
1465 
1466   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1467     // FIXME: If only referring to a part of the variable described by the
1468     // dbg.declare, then we want to insert a dbg.value for the corresponding
1469     // fragment.
1470     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1471                       << *DII << '\n');
1472     return;
1473   }
1474 
1475   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1476 
1477   // We are now tracking the loaded value instead of the address. In the
1478   // future if multi-location support is added to the IR, it might be
1479   // preferable to keep tracking both the loaded value and the original
1480   // address in case the alloca can not be elided.
1481   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1482       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1483   DbgValue->insertAfter(LI);
1484 }
1485 
1486 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1487 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,PHINode * APN,DIBuilder & Builder)1488 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1489                                            PHINode *APN, DIBuilder &Builder) {
1490   auto *DIVar = DII->getVariable();
1491   auto *DIExpr = DII->getExpression();
1492   assert(DIVar && "Missing variable");
1493 
1494   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1495     return;
1496 
1497   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1498     // FIXME: If only referring to a part of the variable described by the
1499     // dbg.declare, then we want to insert a dbg.value for the corresponding
1500     // fragment.
1501     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1502                       << *DII << '\n');
1503     return;
1504   }
1505 
1506   BasicBlock *BB = APN->getParent();
1507   auto InsertionPt = BB->getFirstInsertionPt();
1508 
1509   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1510 
1511   // The block may be a catchswitch block, which does not have a valid
1512   // insertion point.
1513   // FIXME: Insert dbg.value markers in the successors when appropriate.
1514   if (InsertionPt != BB->end())
1515     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1516 }
1517 
1518 /// Determine whether this alloca is either a VLA or an array.
isArray(AllocaInst * AI)1519 static bool isArray(AllocaInst *AI) {
1520   return AI->isArrayAllocation() ||
1521          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1522 }
1523 
1524 /// Determine whether this alloca is a structure.
isStructure(AllocaInst * AI)1525 static bool isStructure(AllocaInst *AI) {
1526   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1527 }
1528 
1529 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1530 /// of llvm.dbg.value intrinsics.
LowerDbgDeclare(Function & F)1531 bool llvm::LowerDbgDeclare(Function &F) {
1532   bool Changed = false;
1533   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1534   SmallVector<DbgDeclareInst *, 4> Dbgs;
1535   for (auto &FI : F)
1536     for (Instruction &BI : FI)
1537       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1538         Dbgs.push_back(DDI);
1539 
1540   if (Dbgs.empty())
1541     return Changed;
1542 
1543   for (auto &I : Dbgs) {
1544     DbgDeclareInst *DDI = I;
1545     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1546     // If this is an alloca for a scalar variable, insert a dbg.value
1547     // at each load and store to the alloca and erase the dbg.declare.
1548     // The dbg.values allow tracking a variable even if it is not
1549     // stored on the stack, while the dbg.declare can only describe
1550     // the stack slot (and at a lexical-scope granularity). Later
1551     // passes will attempt to elide the stack slot.
1552     if (!AI || isArray(AI) || isStructure(AI))
1553       continue;
1554 
1555     // A volatile load/store means that the alloca can't be elided anyway.
1556     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1557           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1558             return LI->isVolatile();
1559           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1560             return SI->isVolatile();
1561           return false;
1562         }))
1563       continue;
1564 
1565     SmallVector<const Value *, 8> WorkList;
1566     WorkList.push_back(AI);
1567     while (!WorkList.empty()) {
1568       const Value *V = WorkList.pop_back_val();
1569       for (auto &AIUse : V->uses()) {
1570         User *U = AIUse.getUser();
1571         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1572           if (AIUse.getOperandNo() == 1)
1573             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1574         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1575           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1576         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1577           // This is a call by-value or some other instruction that takes a
1578           // pointer to the variable. Insert a *value* intrinsic that describes
1579           // the variable by dereferencing the alloca.
1580           if (!CI->isLifetimeStartOrEnd()) {
1581             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1582             auto *DerefExpr =
1583                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1584             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1585                                         NewLoc, CI);
1586           }
1587         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1588           if (BI->getType()->isPointerTy())
1589             WorkList.push_back(BI);
1590         }
1591       }
1592     }
1593     DDI->eraseFromParent();
1594     Changed = true;
1595   }
1596 
1597   if (Changed)
1598   for (BasicBlock &BB : F)
1599     RemoveRedundantDbgInstrs(&BB);
1600 
1601   return Changed;
1602 }
1603 
1604 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
insertDebugValuesForPHIs(BasicBlock * BB,SmallVectorImpl<PHINode * > & InsertedPHIs)1605 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1606                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1607   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1608   if (InsertedPHIs.size() == 0)
1609     return;
1610 
1611   // Map existing PHI nodes to their dbg.values.
1612   ValueToValueMapTy DbgValueMap;
1613   for (auto &I : *BB) {
1614     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1615       for (Value *V : DbgII->location_ops())
1616         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1617           DbgValueMap.insert({Loc, DbgII});
1618     }
1619   }
1620   if (DbgValueMap.size() == 0)
1621     return;
1622 
1623   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1624   // so that if a dbg.value is being rewritten to use more than one of the
1625   // inserted PHIs in the same destination BB, we can update the same dbg.value
1626   // with all the new PHIs instead of creating one copy for each.
1627   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1628             DbgVariableIntrinsic *>
1629       NewDbgValueMap;
1630   // Then iterate through the new PHIs and look to see if they use one of the
1631   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1632   // propagate the info through the new PHI. If we use more than one new PHI in
1633   // a single destination BB with the same old dbg.value, merge the updates so
1634   // that we get a single new dbg.value with all the new PHIs.
1635   for (auto PHI : InsertedPHIs) {
1636     BasicBlock *Parent = PHI->getParent();
1637     // Avoid inserting an intrinsic into an EH block.
1638     if (Parent->getFirstNonPHI()->isEHPad())
1639       continue;
1640     for (auto VI : PHI->operand_values()) {
1641       auto V = DbgValueMap.find(VI);
1642       if (V != DbgValueMap.end()) {
1643         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1644         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1645         if (NewDI == NewDbgValueMap.end()) {
1646           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1647           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1648         }
1649         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1650         // If PHI contains VI as an operand more than once, we may
1651         // replaced it in NewDbgII; confirm that it is present.
1652         if (is_contained(NewDbgII->location_ops(), VI))
1653           NewDbgII->replaceVariableLocationOp(VI, PHI);
1654       }
1655     }
1656   }
1657   // Insert thew new dbg.values into their destination blocks.
1658   for (auto DI : NewDbgValueMap) {
1659     BasicBlock *Parent = DI.first.first;
1660     auto *NewDbgII = DI.second;
1661     auto InsertionPt = Parent->getFirstInsertionPt();
1662     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1663     NewDbgII->insertBefore(&*InsertionPt);
1664   }
1665 }
1666 
replaceDbgDeclare(Value * Address,Value * NewAddress,DIBuilder & Builder,uint8_t DIExprFlags,int Offset)1667 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1668                              DIBuilder &Builder, uint8_t DIExprFlags,
1669                              int Offset) {
1670   auto DbgAddrs = FindDbgAddrUses(Address);
1671   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1672     const DebugLoc &Loc = DII->getDebugLoc();
1673     auto *DIVar = DII->getVariable();
1674     auto *DIExpr = DII->getExpression();
1675     assert(DIVar && "Missing variable");
1676     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1677     // Insert llvm.dbg.declare immediately before DII, and remove old
1678     // llvm.dbg.declare.
1679     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1680     DII->eraseFromParent();
1681   }
1682   return !DbgAddrs.empty();
1683 }
1684 
replaceOneDbgValueForAlloca(DbgValueInst * DVI,Value * NewAddress,DIBuilder & Builder,int Offset)1685 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1686                                         DIBuilder &Builder, int Offset) {
1687   const DebugLoc &Loc = DVI->getDebugLoc();
1688   auto *DIVar = DVI->getVariable();
1689   auto *DIExpr = DVI->getExpression();
1690   assert(DIVar && "Missing variable");
1691 
1692   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1693   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1694   // it and give up.
1695   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1696       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1697     return;
1698 
1699   // Insert the offset before the first deref.
1700   // We could just change the offset argument of dbg.value, but it's unsigned...
1701   if (Offset)
1702     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1703 
1704   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1705   DVI->eraseFromParent();
1706 }
1707 
replaceDbgValueForAlloca(AllocaInst * AI,Value * NewAllocaAddress,DIBuilder & Builder,int Offset)1708 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1709                                     DIBuilder &Builder, int Offset) {
1710   if (auto *L = LocalAsMetadata::getIfExists(AI))
1711     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1712       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1713         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1714           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1715 }
1716 
1717 /// Where possible to salvage debug information for \p I do so
1718 /// and return True. If not possible mark undef and return False.
salvageDebugInfo(Instruction & I)1719 void llvm::salvageDebugInfo(Instruction &I) {
1720   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1721   findDbgUsers(DbgUsers, &I);
1722   salvageDebugInfoForDbgValues(I, DbgUsers);
1723 }
1724 
salvageDebugInfoForDbgValues(Instruction & I,ArrayRef<DbgVariableIntrinsic * > DbgUsers)1725 void llvm::salvageDebugInfoForDbgValues(
1726     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1727   bool Salvaged = false;
1728 
1729   for (auto *DII : DbgUsers) {
1730     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1731     // are implicitly pointing out the value as a DWARF memory location
1732     // description.
1733     bool StackValue = isa<DbgValueInst>(DII);
1734     auto DIILocation = DII->location_ops();
1735     assert(
1736         is_contained(DIILocation, &I) &&
1737         "DbgVariableIntrinsic must use salvaged instruction as its location");
1738     unsigned LocNo = std::distance(DIILocation.begin(), find(DIILocation, &I));
1739     SmallVector<Value *, 4> AdditionalValues;
1740     DIExpression *SalvagedExpr = salvageDebugInfoImpl(
1741         I, DII->getExpression(), StackValue, LocNo, AdditionalValues);
1742 
1743     // salvageDebugInfoImpl should fail on examining the first element of
1744     // DbgUsers, or none of them.
1745     if (!SalvagedExpr)
1746       break;
1747 
1748     DII->replaceVariableLocationOp(&I, I.getOperand(0));
1749     if (AdditionalValues.empty()) {
1750       DII->setExpression(SalvagedExpr);
1751     } else if (isa<DbgValueInst>(DII)) {
1752       DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
1753     } else {
1754       // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
1755       // currently only valid for stack value expressions.
1756       Value *Undef = UndefValue::get(I.getOperand(0)->getType());
1757       DII->replaceVariableLocationOp(I.getOperand(0), Undef);
1758     }
1759     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1760     Salvaged = true;
1761   }
1762 
1763   if (Salvaged)
1764     return;
1765 
1766   for (auto *DII : DbgUsers) {
1767     Value *Undef = UndefValue::get(I.getType());
1768     DII->replaceVariableLocationOp(&I, Undef);
1769   }
1770 }
1771 
getSalvageOpsForGEP(GetElementPtrInst * GEP,const DataLayout & DL,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues)1772 bool getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1773                          uint64_t CurrentLocOps,
1774                          SmallVectorImpl<uint64_t> &Opcodes,
1775                          SmallVectorImpl<Value *> &AdditionalValues) {
1776   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1777   // Rewrite a GEP into a DIExpression.
1778   SmallDenseMap<Value *, APInt, 8> VariableOffsets;
1779   APInt ConstantOffset(BitWidth, 0);
1780   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
1781     return false;
1782   if (!VariableOffsets.empty() && !CurrentLocOps) {
1783     Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
1784     CurrentLocOps = 1;
1785   }
1786   for (auto Offset : VariableOffsets) {
1787     AdditionalValues.push_back(Offset.first);
1788     assert(Offset.second.isStrictlyPositive() &&
1789            "Expected strictly positive multiplier for offset.");
1790     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
1791                     Offset.second.getZExtValue(), dwarf::DW_OP_mul,
1792                     dwarf::DW_OP_plus});
1793   }
1794   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1795   return true;
1796 }
1797 
getDwarfOpForBinOp(Instruction::BinaryOps Opcode)1798 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1799   switch (Opcode) {
1800   case Instruction::Add:
1801     return dwarf::DW_OP_plus;
1802   case Instruction::Sub:
1803     return dwarf::DW_OP_minus;
1804   case Instruction::Mul:
1805     return dwarf::DW_OP_mul;
1806   case Instruction::SDiv:
1807     return dwarf::DW_OP_div;
1808   case Instruction::SRem:
1809     return dwarf::DW_OP_mod;
1810   case Instruction::Or:
1811     return dwarf::DW_OP_or;
1812   case Instruction::And:
1813     return dwarf::DW_OP_and;
1814   case Instruction::Xor:
1815     return dwarf::DW_OP_xor;
1816   case Instruction::Shl:
1817     return dwarf::DW_OP_shl;
1818   case Instruction::LShr:
1819     return dwarf::DW_OP_shr;
1820   case Instruction::AShr:
1821     return dwarf::DW_OP_shra;
1822   default:
1823     // TODO: Salvage from each kind of binop we know about.
1824     return 0;
1825   }
1826 }
1827 
getSalvageOpsForBinOp(BinaryOperator * BI,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues)1828 bool getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
1829                            SmallVectorImpl<uint64_t> &Opcodes,
1830                            SmallVectorImpl<Value *> &AdditionalValues) {
1831   // Handle binary operations with constant integer operands as a special case.
1832   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1833   // Values wider than 64 bits cannot be represented within a DIExpression.
1834   if (ConstInt && ConstInt->getBitWidth() > 64)
1835     return false;
1836 
1837   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1838   // Push any Constant Int operand onto the expression stack.
1839   if (ConstInt) {
1840     uint64_t Val = ConstInt->getSExtValue();
1841     // Add or Sub Instructions with a constant operand can potentially be
1842     // simplified.
1843     if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1844       uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1845       DIExpression::appendOffset(Opcodes, Offset);
1846       return true;
1847     }
1848     Opcodes.append({dwarf::DW_OP_constu, Val});
1849   } else {
1850     if (!CurrentLocOps) {
1851       Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
1852       CurrentLocOps = 1;
1853     }
1854     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
1855     AdditionalValues.push_back(BI->getOperand(1));
1856   }
1857 
1858   // Add salvaged binary operator to expression stack, if it has a valid
1859   // representation in a DIExpression.
1860   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1861   if (!DwarfBinOp)
1862     return false;
1863   Opcodes.push_back(DwarfBinOp);
1864 
1865   return true;
1866 }
1867 
1868 DIExpression *
salvageDebugInfoImpl(Instruction & I,DIExpression * SrcDIExpr,bool WithStackValue,unsigned LocNo,SmallVectorImpl<Value * > & AdditionalValues)1869 llvm::salvageDebugInfoImpl(Instruction &I, DIExpression *SrcDIExpr,
1870                            bool WithStackValue, unsigned LocNo,
1871                            SmallVectorImpl<Value *> &AdditionalValues) {
1872   uint64_t CurrentLocOps = SrcDIExpr->getNumLocationOperands();
1873   auto &M = *I.getModule();
1874   auto &DL = M.getDataLayout();
1875 
1876   // Apply a vector of opcodes to the source DIExpression.
1877   auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * {
1878     DIExpression *DIExpr = SrcDIExpr;
1879     if (!Ops.empty()) {
1880       DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, LocNo, WithStackValue);
1881     }
1882     return DIExpr;
1883   };
1884 
1885   // initializer-list helper for applying operators to the source DIExpression.
1886   auto applyOps = [&](ArrayRef<uint64_t> Opcodes) {
1887     SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end());
1888     return doSalvage(Ops);
1889   };
1890 
1891   if (auto *CI = dyn_cast<CastInst>(&I)) {
1892     // No-op casts are irrelevant for debug info.
1893     if (CI->isNoopCast(DL))
1894       return SrcDIExpr;
1895 
1896     Type *Type = CI->getType();
1897     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1898     if (Type->isVectorTy() ||
1899         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1900       return nullptr;
1901 
1902     Value *FromValue = CI->getOperand(0);
1903     unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1904     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1905 
1906     return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1907                                             isa<SExtInst>(&I)));
1908   }
1909 
1910   SmallVector<uint64_t, 8> Ops;
1911   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1912     if (getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues))
1913       return doSalvage(Ops);
1914   } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1915     if (getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues))
1916       return doSalvage(Ops);
1917   }
1918   // *Not* to do: we should not attempt to salvage load instructions,
1919   // because the validity and lifetime of a dbg.value containing
1920   // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1921   return nullptr;
1922 }
1923 
1924 /// A replacement for a dbg.value expression.
1925 using DbgValReplacement = Optional<DIExpression *>;
1926 
1927 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1928 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1929 /// changes are made.
rewriteDebugUsers(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT,function_ref<DbgValReplacement (DbgVariableIntrinsic & DII)> RewriteExpr)1930 static bool rewriteDebugUsers(
1931     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1932     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1933   // Find debug users of From.
1934   SmallVector<DbgVariableIntrinsic *, 1> Users;
1935   findDbgUsers(Users, &From);
1936   if (Users.empty())
1937     return false;
1938 
1939   // Prevent use-before-def of To.
1940   bool Changed = false;
1941   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1942   if (isa<Instruction>(&To)) {
1943     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1944 
1945     for (auto *DII : Users) {
1946       // It's common to see a debug user between From and DomPoint. Move it
1947       // after DomPoint to preserve the variable update without any reordering.
1948       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1949         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1950         DII->moveAfter(&DomPoint);
1951         Changed = true;
1952 
1953       // Users which otherwise aren't dominated by the replacement value must
1954       // be salvaged or deleted.
1955       } else if (!DT.dominates(&DomPoint, DII)) {
1956         UndefOrSalvage.insert(DII);
1957       }
1958     }
1959   }
1960 
1961   // Update debug users without use-before-def risk.
1962   for (auto *DII : Users) {
1963     if (UndefOrSalvage.count(DII))
1964       continue;
1965 
1966     DbgValReplacement DVR = RewriteExpr(*DII);
1967     if (!DVR)
1968       continue;
1969 
1970     DII->replaceVariableLocationOp(&From, &To);
1971     DII->setExpression(*DVR);
1972     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
1973     Changed = true;
1974   }
1975 
1976   if (!UndefOrSalvage.empty()) {
1977     // Try to salvage the remaining debug users.
1978     salvageDebugInfo(From);
1979     Changed = true;
1980   }
1981 
1982   return Changed;
1983 }
1984 
1985 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1986 /// losslessly preserve the bits and semantics of the value. This predicate is
1987 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1988 ///
1989 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1990 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1991 /// and also does not allow lossless pointer <-> integer conversions.
isBitCastSemanticsPreserving(const DataLayout & DL,Type * FromTy,Type * ToTy)1992 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1993                                          Type *ToTy) {
1994   // Trivially compatible types.
1995   if (FromTy == ToTy)
1996     return true;
1997 
1998   // Handle compatible pointer <-> integer conversions.
1999   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2000     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2001     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2002                               !DL.isNonIntegralPointerType(ToTy);
2003     return SameSize && LosslessConversion;
2004   }
2005 
2006   // TODO: This is not exhaustive.
2007   return false;
2008 }
2009 
replaceAllDbgUsesWith(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT)2010 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2011                                  Instruction &DomPoint, DominatorTree &DT) {
2012   // Exit early if From has no debug users.
2013   if (!From.isUsedByMetadata())
2014     return false;
2015 
2016   assert(&From != &To && "Can't replace something with itself");
2017 
2018   Type *FromTy = From.getType();
2019   Type *ToTy = To.getType();
2020 
2021   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2022     return DII.getExpression();
2023   };
2024 
2025   // Handle no-op conversions.
2026   Module &M = *From.getModule();
2027   const DataLayout &DL = M.getDataLayout();
2028   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2029     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2030 
2031   // Handle integer-to-integer widening and narrowing.
2032   // FIXME: Use DW_OP_convert when it's available everywhere.
2033   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2034     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2035     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2036     assert(FromBits != ToBits && "Unexpected no-op conversion");
2037 
2038     // When the width of the result grows, assume that a debugger will only
2039     // access the low `FromBits` bits when inspecting the source variable.
2040     if (FromBits < ToBits)
2041       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2042 
2043     // The width of the result has shrunk. Use sign/zero extension to describe
2044     // the source variable's high bits.
2045     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2046       DILocalVariable *Var = DII.getVariable();
2047 
2048       // Without knowing signedness, sign/zero extension isn't possible.
2049       auto Signedness = Var->getSignedness();
2050       if (!Signedness)
2051         return None;
2052 
2053       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2054       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2055                                      Signed);
2056     };
2057     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2058   }
2059 
2060   // TODO: Floating-point conversions, vectors.
2061   return false;
2062 }
2063 
2064 std::pair<unsigned, unsigned>
removeAllNonTerminatorAndEHPadInstructions(BasicBlock * BB)2065 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2066   unsigned NumDeadInst = 0;
2067   unsigned NumDeadDbgInst = 0;
2068   // Delete the instructions backwards, as it has a reduced likelihood of
2069   // having to update as many def-use and use-def chains.
2070   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2071   while (EndInst != &BB->front()) {
2072     // Delete the next to last instruction.
2073     Instruction *Inst = &*--EndInst->getIterator();
2074     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2075       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2076     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2077       EndInst = Inst;
2078       continue;
2079     }
2080     if (isa<DbgInfoIntrinsic>(Inst))
2081       ++NumDeadDbgInst;
2082     else
2083       ++NumDeadInst;
2084     Inst->eraseFromParent();
2085   }
2086   return {NumDeadInst, NumDeadDbgInst};
2087 }
2088 
changeToUnreachable(Instruction * I,bool UseLLVMTrap,bool PreserveLCSSA,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)2089 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
2090                                    bool PreserveLCSSA, DomTreeUpdater *DTU,
2091                                    MemorySSAUpdater *MSSAU) {
2092   BasicBlock *BB = I->getParent();
2093 
2094   if (MSSAU)
2095     MSSAU->changeToUnreachable(I);
2096 
2097   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2098 
2099   // Loop over all of the successors, removing BB's entry from any PHI
2100   // nodes.
2101   for (BasicBlock *Successor : successors(BB)) {
2102     Successor->removePredecessor(BB, PreserveLCSSA);
2103     if (DTU)
2104       UniqueSuccessors.insert(Successor);
2105   }
2106   // Insert a call to llvm.trap right before this.  This turns the undefined
2107   // behavior into a hard fail instead of falling through into random code.
2108   if (UseLLVMTrap) {
2109     Function *TrapFn =
2110       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
2111     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
2112     CallTrap->setDebugLoc(I->getDebugLoc());
2113   }
2114   auto *UI = new UnreachableInst(I->getContext(), I);
2115   UI->setDebugLoc(I->getDebugLoc());
2116 
2117   // All instructions after this are dead.
2118   unsigned NumInstrsRemoved = 0;
2119   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2120   while (BBI != BBE) {
2121     if (!BBI->use_empty())
2122       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2123     BB->getInstList().erase(BBI++);
2124     ++NumInstrsRemoved;
2125   }
2126   if (DTU) {
2127     SmallVector<DominatorTree::UpdateType, 8> Updates;
2128     Updates.reserve(UniqueSuccessors.size());
2129     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2130       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2131     DTU->applyUpdates(Updates);
2132   }
2133   return NumInstrsRemoved;
2134 }
2135 
createCallMatchingInvoke(InvokeInst * II)2136 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2137   SmallVector<Value *, 8> Args(II->args());
2138   SmallVector<OperandBundleDef, 1> OpBundles;
2139   II->getOperandBundlesAsDefs(OpBundles);
2140   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2141                                        II->getCalledOperand(), Args, OpBundles);
2142   NewCall->setCallingConv(II->getCallingConv());
2143   NewCall->setAttributes(II->getAttributes());
2144   NewCall->setDebugLoc(II->getDebugLoc());
2145   NewCall->copyMetadata(*II);
2146 
2147   // If the invoke had profile metadata, try converting them for CallInst.
2148   uint64_t TotalWeight;
2149   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2150     // Set the total weight if it fits into i32, otherwise reset.
2151     MDBuilder MDB(NewCall->getContext());
2152     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2153                           ? nullptr
2154                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2155     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2156   }
2157 
2158   return NewCall;
2159 }
2160 
2161 /// changeToCall - Convert the specified invoke into a normal call.
changeToCall(InvokeInst * II,DomTreeUpdater * DTU)2162 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2163   CallInst *NewCall = createCallMatchingInvoke(II);
2164   NewCall->takeName(II);
2165   NewCall->insertBefore(II);
2166   II->replaceAllUsesWith(NewCall);
2167 
2168   // Follow the call by a branch to the normal destination.
2169   BasicBlock *NormalDestBB = II->getNormalDest();
2170   BranchInst::Create(NormalDestBB, II);
2171 
2172   // Update PHI nodes in the unwind destination
2173   BasicBlock *BB = II->getParent();
2174   BasicBlock *UnwindDestBB = II->getUnwindDest();
2175   UnwindDestBB->removePredecessor(BB);
2176   II->eraseFromParent();
2177   if (DTU)
2178     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2179 }
2180 
changeToInvokeAndSplitBasicBlock(CallInst * CI,BasicBlock * UnwindEdge,DomTreeUpdater * DTU)2181 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2182                                                    BasicBlock *UnwindEdge,
2183                                                    DomTreeUpdater *DTU) {
2184   BasicBlock *BB = CI->getParent();
2185 
2186   // Convert this function call into an invoke instruction.  First, split the
2187   // basic block.
2188   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2189                                  CI->getName() + ".noexc");
2190 
2191   // Delete the unconditional branch inserted by SplitBlock
2192   BB->getInstList().pop_back();
2193 
2194   // Create the new invoke instruction.
2195   SmallVector<Value *, 8> InvokeArgs(CI->args());
2196   SmallVector<OperandBundleDef, 1> OpBundles;
2197 
2198   CI->getOperandBundlesAsDefs(OpBundles);
2199 
2200   // Note: we're round tripping operand bundles through memory here, and that
2201   // can potentially be avoided with a cleverer API design that we do not have
2202   // as of this time.
2203 
2204   InvokeInst *II =
2205       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2206                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2207   II->setDebugLoc(CI->getDebugLoc());
2208   II->setCallingConv(CI->getCallingConv());
2209   II->setAttributes(CI->getAttributes());
2210 
2211   if (DTU)
2212     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2213 
2214   // Make sure that anything using the call now uses the invoke!  This also
2215   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2216   CI->replaceAllUsesWith(II);
2217 
2218   // Delete the original call
2219   Split->getInstList().pop_front();
2220   return Split;
2221 }
2222 
markAliveBlocks(Function & F,SmallPtrSetImpl<BasicBlock * > & Reachable,DomTreeUpdater * DTU=nullptr)2223 static bool markAliveBlocks(Function &F,
2224                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2225                             DomTreeUpdater *DTU = nullptr) {
2226   SmallVector<BasicBlock*, 128> Worklist;
2227   BasicBlock *BB = &F.front();
2228   Worklist.push_back(BB);
2229   Reachable.insert(BB);
2230   bool Changed = false;
2231   do {
2232     BB = Worklist.pop_back_val();
2233 
2234     // Do a quick scan of the basic block, turning any obviously unreachable
2235     // instructions into LLVM unreachable insts.  The instruction combining pass
2236     // canonicalizes unreachable insts into stores to null or undef.
2237     for (Instruction &I : *BB) {
2238       if (auto *CI = dyn_cast<CallInst>(&I)) {
2239         Value *Callee = CI->getCalledOperand();
2240         // Handle intrinsic calls.
2241         if (Function *F = dyn_cast<Function>(Callee)) {
2242           auto IntrinsicID = F->getIntrinsicID();
2243           // Assumptions that are known to be false are equivalent to
2244           // unreachable. Also, if the condition is undefined, then we make the
2245           // choice most beneficial to the optimizer, and choose that to also be
2246           // unreachable.
2247           if (IntrinsicID == Intrinsic::assume) {
2248             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2249               // Don't insert a call to llvm.trap right before the unreachable.
2250               changeToUnreachable(CI, false, false, DTU);
2251               Changed = true;
2252               break;
2253             }
2254           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2255             // A call to the guard intrinsic bails out of the current
2256             // compilation unit if the predicate passed to it is false. If the
2257             // predicate is a constant false, then we know the guard will bail
2258             // out of the current compile unconditionally, so all code following
2259             // it is dead.
2260             //
2261             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2262             // guards to treat `undef` as `false` since a guard on `undef` can
2263             // still be useful for widening.
2264             if (match(CI->getArgOperand(0), m_Zero()))
2265               if (!isa<UnreachableInst>(CI->getNextNode())) {
2266                 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2267                                     false, DTU);
2268                 Changed = true;
2269                 break;
2270               }
2271           }
2272         } else if ((isa<ConstantPointerNull>(Callee) &&
2273                     !NullPointerIsDefined(CI->getFunction())) ||
2274                    isa<UndefValue>(Callee)) {
2275           changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2276           Changed = true;
2277           break;
2278         }
2279         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2280           // If we found a call to a no-return function, insert an unreachable
2281           // instruction after it.  Make sure there isn't *already* one there
2282           // though.
2283           if (!isa<UnreachableInst>(CI->getNextNode())) {
2284             // Don't insert a call to llvm.trap right before the unreachable.
2285             changeToUnreachable(CI->getNextNode(), false, false, DTU);
2286             Changed = true;
2287           }
2288           break;
2289         }
2290       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2291         // Store to undef and store to null are undefined and used to signal
2292         // that they should be changed to unreachable by passes that can't
2293         // modify the CFG.
2294 
2295         // Don't touch volatile stores.
2296         if (SI->isVolatile()) continue;
2297 
2298         Value *Ptr = SI->getOperand(1);
2299 
2300         if (isa<UndefValue>(Ptr) ||
2301             (isa<ConstantPointerNull>(Ptr) &&
2302              !NullPointerIsDefined(SI->getFunction(),
2303                                    SI->getPointerAddressSpace()))) {
2304           changeToUnreachable(SI, true, false, DTU);
2305           Changed = true;
2306           break;
2307         }
2308       }
2309     }
2310 
2311     Instruction *Terminator = BB->getTerminator();
2312     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2313       // Turn invokes that call 'nounwind' functions into ordinary calls.
2314       Value *Callee = II->getCalledOperand();
2315       if ((isa<ConstantPointerNull>(Callee) &&
2316            !NullPointerIsDefined(BB->getParent())) ||
2317           isa<UndefValue>(Callee)) {
2318         changeToUnreachable(II, true, false, DTU);
2319         Changed = true;
2320       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2321         if (II->use_empty() && II->onlyReadsMemory()) {
2322           // jump to the normal destination branch.
2323           BasicBlock *NormalDestBB = II->getNormalDest();
2324           BasicBlock *UnwindDestBB = II->getUnwindDest();
2325           BranchInst::Create(NormalDestBB, II);
2326           UnwindDestBB->removePredecessor(II->getParent());
2327           II->eraseFromParent();
2328           if (DTU)
2329             DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2330         } else
2331           changeToCall(II, DTU);
2332         Changed = true;
2333       }
2334     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2335       // Remove catchpads which cannot be reached.
2336       struct CatchPadDenseMapInfo {
2337         static CatchPadInst *getEmptyKey() {
2338           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2339         }
2340 
2341         static CatchPadInst *getTombstoneKey() {
2342           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2343         }
2344 
2345         static unsigned getHashValue(CatchPadInst *CatchPad) {
2346           return static_cast<unsigned>(hash_combine_range(
2347               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2348         }
2349 
2350         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2351           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2352               RHS == getEmptyKey() || RHS == getTombstoneKey())
2353             return LHS == RHS;
2354           return LHS->isIdenticalTo(RHS);
2355         }
2356       };
2357 
2358       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2359       // Set of unique CatchPads.
2360       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2361                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2362           HandlerSet;
2363       detail::DenseSetEmpty Empty;
2364       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2365                                              E = CatchSwitch->handler_end();
2366            I != E; ++I) {
2367         BasicBlock *HandlerBB = *I;
2368         if (DTU)
2369           ++NumPerSuccessorCases[HandlerBB];
2370         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2371         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2372           if (DTU)
2373             --NumPerSuccessorCases[HandlerBB];
2374           CatchSwitch->removeHandler(I);
2375           --I;
2376           --E;
2377           Changed = true;
2378         }
2379       }
2380       if (DTU) {
2381         std::vector<DominatorTree::UpdateType> Updates;
2382         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2383           if (I.second == 0)
2384             Updates.push_back({DominatorTree::Delete, BB, I.first});
2385         DTU->applyUpdates(Updates);
2386       }
2387     }
2388 
2389     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2390     for (BasicBlock *Successor : successors(BB))
2391       if (Reachable.insert(Successor).second)
2392         Worklist.push_back(Successor);
2393   } while (!Worklist.empty());
2394   return Changed;
2395 }
2396 
removeUnwindEdge(BasicBlock * BB,DomTreeUpdater * DTU)2397 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2398   Instruction *TI = BB->getTerminator();
2399 
2400   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2401     changeToCall(II, DTU);
2402     return;
2403   }
2404 
2405   Instruction *NewTI;
2406   BasicBlock *UnwindDest;
2407 
2408   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2409     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2410     UnwindDest = CRI->getUnwindDest();
2411   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2412     auto *NewCatchSwitch = CatchSwitchInst::Create(
2413         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2414         CatchSwitch->getName(), CatchSwitch);
2415     for (BasicBlock *PadBB : CatchSwitch->handlers())
2416       NewCatchSwitch->addHandler(PadBB);
2417 
2418     NewTI = NewCatchSwitch;
2419     UnwindDest = CatchSwitch->getUnwindDest();
2420   } else {
2421     llvm_unreachable("Could not find unwind successor");
2422   }
2423 
2424   NewTI->takeName(TI);
2425   NewTI->setDebugLoc(TI->getDebugLoc());
2426   UnwindDest->removePredecessor(BB);
2427   TI->replaceAllUsesWith(NewTI);
2428   TI->eraseFromParent();
2429   if (DTU)
2430     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2431 }
2432 
2433 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2434 /// if they are in a dead cycle.  Return true if a change was made, false
2435 /// otherwise.
removeUnreachableBlocks(Function & F,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)2436 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2437                                    MemorySSAUpdater *MSSAU) {
2438   SmallPtrSet<BasicBlock *, 16> Reachable;
2439   bool Changed = markAliveBlocks(F, Reachable, DTU);
2440 
2441   // If there are unreachable blocks in the CFG...
2442   if (Reachable.size() == F.size())
2443     return Changed;
2444 
2445   assert(Reachable.size() < F.size());
2446 
2447   // Are there any blocks left to actually delete?
2448   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2449   for (BasicBlock &BB : F) {
2450     // Skip reachable basic blocks
2451     if (Reachable.count(&BB))
2452       continue;
2453     // Skip already-deleted blocks
2454     if (DTU && DTU->isBBPendingDeletion(&BB))
2455       continue;
2456     BlocksToRemove.insert(&BB);
2457   }
2458 
2459   if (BlocksToRemove.empty())
2460     return Changed;
2461 
2462   Changed = true;
2463   NumRemoved += BlocksToRemove.size();
2464 
2465   if (MSSAU)
2466     MSSAU->removeBlocks(BlocksToRemove);
2467 
2468   DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2469 
2470   return Changed;
2471 }
2472 
combineMetadata(Instruction * K,const Instruction * J,ArrayRef<unsigned> KnownIDs,bool DoesKMove)2473 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2474                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2475   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2476   K->dropUnknownNonDebugMetadata(KnownIDs);
2477   K->getAllMetadataOtherThanDebugLoc(Metadata);
2478   for (const auto &MD : Metadata) {
2479     unsigned Kind = MD.first;
2480     MDNode *JMD = J->getMetadata(Kind);
2481     MDNode *KMD = MD.second;
2482 
2483     switch (Kind) {
2484       default:
2485         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2486         break;
2487       case LLVMContext::MD_dbg:
2488         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2489       case LLVMContext::MD_tbaa:
2490         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2491         break;
2492       case LLVMContext::MD_alias_scope:
2493         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2494         break;
2495       case LLVMContext::MD_noalias:
2496       case LLVMContext::MD_mem_parallel_loop_access:
2497         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2498         break;
2499       case LLVMContext::MD_access_group:
2500         K->setMetadata(LLVMContext::MD_access_group,
2501                        intersectAccessGroups(K, J));
2502         break;
2503       case LLVMContext::MD_range:
2504 
2505         // If K does move, use most generic range. Otherwise keep the range of
2506         // K.
2507         if (DoesKMove)
2508           // FIXME: If K does move, we should drop the range info and nonnull.
2509           //        Currently this function is used with DoesKMove in passes
2510           //        doing hoisting/sinking and the current behavior of using the
2511           //        most generic range is correct in those cases.
2512           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2513         break;
2514       case LLVMContext::MD_fpmath:
2515         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2516         break;
2517       case LLVMContext::MD_invariant_load:
2518         // Only set the !invariant.load if it is present in both instructions.
2519         K->setMetadata(Kind, JMD);
2520         break;
2521       case LLVMContext::MD_nonnull:
2522         // If K does move, keep nonull if it is present in both instructions.
2523         if (DoesKMove)
2524           K->setMetadata(Kind, JMD);
2525         break;
2526       case LLVMContext::MD_invariant_group:
2527         // Preserve !invariant.group in K.
2528         break;
2529       case LLVMContext::MD_align:
2530         K->setMetadata(Kind,
2531           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2532         break;
2533       case LLVMContext::MD_dereferenceable:
2534       case LLVMContext::MD_dereferenceable_or_null:
2535         K->setMetadata(Kind,
2536           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2537         break;
2538       case LLVMContext::MD_preserve_access_index:
2539         // Preserve !preserve.access.index in K.
2540         break;
2541     }
2542   }
2543   // Set !invariant.group from J if J has it. If both instructions have it
2544   // then we will just pick it from J - even when they are different.
2545   // Also make sure that K is load or store - f.e. combining bitcast with load
2546   // could produce bitcast with invariant.group metadata, which is invalid.
2547   // FIXME: we should try to preserve both invariant.group md if they are
2548   // different, but right now instruction can only have one invariant.group.
2549   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2550     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2551       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2552 }
2553 
combineMetadataForCSE(Instruction * K,const Instruction * J,bool KDominatesJ)2554 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2555                                  bool KDominatesJ) {
2556   unsigned KnownIDs[] = {
2557       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2558       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2559       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2560       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2561       LLVMContext::MD_dereferenceable,
2562       LLVMContext::MD_dereferenceable_or_null,
2563       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2564   combineMetadata(K, J, KnownIDs, KDominatesJ);
2565 }
2566 
copyMetadataForLoad(LoadInst & Dest,const LoadInst & Source)2567 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2568   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2569   Source.getAllMetadata(MD);
2570   MDBuilder MDB(Dest.getContext());
2571   Type *NewType = Dest.getType();
2572   const DataLayout &DL = Source.getModule()->getDataLayout();
2573   for (const auto &MDPair : MD) {
2574     unsigned ID = MDPair.first;
2575     MDNode *N = MDPair.second;
2576     // Note, essentially every kind of metadata should be preserved here! This
2577     // routine is supposed to clone a load instruction changing *only its type*.
2578     // The only metadata it makes sense to drop is metadata which is invalidated
2579     // when the pointer type changes. This should essentially never be the case
2580     // in LLVM, but we explicitly switch over only known metadata to be
2581     // conservatively correct. If you are adding metadata to LLVM which pertains
2582     // to loads, you almost certainly want to add it here.
2583     switch (ID) {
2584     case LLVMContext::MD_dbg:
2585     case LLVMContext::MD_tbaa:
2586     case LLVMContext::MD_prof:
2587     case LLVMContext::MD_fpmath:
2588     case LLVMContext::MD_tbaa_struct:
2589     case LLVMContext::MD_invariant_load:
2590     case LLVMContext::MD_alias_scope:
2591     case LLVMContext::MD_noalias:
2592     case LLVMContext::MD_nontemporal:
2593     case LLVMContext::MD_mem_parallel_loop_access:
2594     case LLVMContext::MD_access_group:
2595       // All of these directly apply.
2596       Dest.setMetadata(ID, N);
2597       break;
2598 
2599     case LLVMContext::MD_nonnull:
2600       copyNonnullMetadata(Source, N, Dest);
2601       break;
2602 
2603     case LLVMContext::MD_align:
2604     case LLVMContext::MD_dereferenceable:
2605     case LLVMContext::MD_dereferenceable_or_null:
2606       // These only directly apply if the new type is also a pointer.
2607       if (NewType->isPointerTy())
2608         Dest.setMetadata(ID, N);
2609       break;
2610 
2611     case LLVMContext::MD_range:
2612       copyRangeMetadata(DL, Source, N, Dest);
2613       break;
2614     }
2615   }
2616 }
2617 
patchReplacementInstruction(Instruction * I,Value * Repl)2618 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2619   auto *ReplInst = dyn_cast<Instruction>(Repl);
2620   if (!ReplInst)
2621     return;
2622 
2623   // Patch the replacement so that it is not more restrictive than the value
2624   // being replaced.
2625   // Note that if 'I' is a load being replaced by some operation,
2626   // for example, by an arithmetic operation, then andIRFlags()
2627   // would just erase all math flags from the original arithmetic
2628   // operation, which is clearly not wanted and not needed.
2629   if (!isa<LoadInst>(I))
2630     ReplInst->andIRFlags(I);
2631 
2632   // FIXME: If both the original and replacement value are part of the
2633   // same control-flow region (meaning that the execution of one
2634   // guarantees the execution of the other), then we can combine the
2635   // noalias scopes here and do better than the general conservative
2636   // answer used in combineMetadata().
2637 
2638   // In general, GVN unifies expressions over different control-flow
2639   // regions, and so we need a conservative combination of the noalias
2640   // scopes.
2641   static const unsigned KnownIDs[] = {
2642       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2643       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2644       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2645       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2646       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2647   combineMetadata(ReplInst, I, KnownIDs, false);
2648 }
2649 
2650 template <typename RootType, typename DominatesFn>
replaceDominatedUsesWith(Value * From,Value * To,const RootType & Root,const DominatesFn & Dominates)2651 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2652                                          const RootType &Root,
2653                                          const DominatesFn &Dominates) {
2654   assert(From->getType() == To->getType());
2655 
2656   unsigned Count = 0;
2657   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2658        UI != UE;) {
2659     Use &U = *UI++;
2660     if (!Dominates(Root, U))
2661       continue;
2662     U.set(To);
2663     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2664                       << "' as " << *To << " in " << *U << "\n");
2665     ++Count;
2666   }
2667   return Count;
2668 }
2669 
replaceNonLocalUsesWith(Instruction * From,Value * To)2670 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2671    assert(From->getType() == To->getType());
2672    auto *BB = From->getParent();
2673    unsigned Count = 0;
2674 
2675   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2676        UI != UE;) {
2677     Use &U = *UI++;
2678     auto *I = cast<Instruction>(U.getUser());
2679     if (I->getParent() == BB)
2680       continue;
2681     U.set(To);
2682     ++Count;
2683   }
2684   return Count;
2685 }
2686 
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlockEdge & Root)2687 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2688                                         DominatorTree &DT,
2689                                         const BasicBlockEdge &Root) {
2690   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2691     return DT.dominates(Root, U);
2692   };
2693   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2694 }
2695 
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlock * BB)2696 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2697                                         DominatorTree &DT,
2698                                         const BasicBlock *BB) {
2699   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2700     return DT.dominates(BB, U);
2701   };
2702   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2703 }
2704 
callsGCLeafFunction(const CallBase * Call,const TargetLibraryInfo & TLI)2705 bool llvm::callsGCLeafFunction(const CallBase *Call,
2706                                const TargetLibraryInfo &TLI) {
2707   // Check if the function is specifically marked as a gc leaf function.
2708   if (Call->hasFnAttr("gc-leaf-function"))
2709     return true;
2710   if (const Function *F = Call->getCalledFunction()) {
2711     if (F->hasFnAttribute("gc-leaf-function"))
2712       return true;
2713 
2714     if (auto IID = F->getIntrinsicID()) {
2715       // Most LLVM intrinsics do not take safepoints.
2716       return IID != Intrinsic::experimental_gc_statepoint &&
2717              IID != Intrinsic::experimental_deoptimize &&
2718              IID != Intrinsic::memcpy_element_unordered_atomic &&
2719              IID != Intrinsic::memmove_element_unordered_atomic;
2720     }
2721   }
2722 
2723   // Lib calls can be materialized by some passes, and won't be
2724   // marked as 'gc-leaf-function.' All available Libcalls are
2725   // GC-leaf.
2726   LibFunc LF;
2727   if (TLI.getLibFunc(*Call, LF)) {
2728     return TLI.has(LF);
2729   }
2730 
2731   return false;
2732 }
2733 
copyNonnullMetadata(const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)2734 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2735                                LoadInst &NewLI) {
2736   auto *NewTy = NewLI.getType();
2737 
2738   // This only directly applies if the new type is also a pointer.
2739   if (NewTy->isPointerTy()) {
2740     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2741     return;
2742   }
2743 
2744   // The only other translation we can do is to integral loads with !range
2745   // metadata.
2746   if (!NewTy->isIntegerTy())
2747     return;
2748 
2749   MDBuilder MDB(NewLI.getContext());
2750   const Value *Ptr = OldLI.getPointerOperand();
2751   auto *ITy = cast<IntegerType>(NewTy);
2752   auto *NullInt = ConstantExpr::getPtrToInt(
2753       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2754   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2755   NewLI.setMetadata(LLVMContext::MD_range,
2756                     MDB.createRange(NonNullInt, NullInt));
2757 }
2758 
copyRangeMetadata(const DataLayout & DL,const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)2759 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2760                              MDNode *N, LoadInst &NewLI) {
2761   auto *NewTy = NewLI.getType();
2762 
2763   // Give up unless it is converted to a pointer where there is a single very
2764   // valuable mapping we can do reliably.
2765   // FIXME: It would be nice to propagate this in more ways, but the type
2766   // conversions make it hard.
2767   if (!NewTy->isPointerTy())
2768     return;
2769 
2770   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2771   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2772     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2773     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2774   }
2775 }
2776 
dropDebugUsers(Instruction & I)2777 void llvm::dropDebugUsers(Instruction &I) {
2778   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2779   findDbgUsers(DbgUsers, &I);
2780   for (auto *DII : DbgUsers)
2781     DII->eraseFromParent();
2782 }
2783 
hoistAllInstructionsInto(BasicBlock * DomBlock,Instruction * InsertPt,BasicBlock * BB)2784 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2785                                     BasicBlock *BB) {
2786   // Since we are moving the instructions out of its basic block, we do not
2787   // retain their original debug locations (DILocations) and debug intrinsic
2788   // instructions.
2789   //
2790   // Doing so would degrade the debugging experience and adversely affect the
2791   // accuracy of profiling information.
2792   //
2793   // Currently, when hoisting the instructions, we take the following actions:
2794   // - Remove their debug intrinsic instructions.
2795   // - Set their debug locations to the values from the insertion point.
2796   //
2797   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2798   // need to be deleted, is because there will not be any instructions with a
2799   // DILocation in either branch left after performing the transformation. We
2800   // can only insert a dbg.value after the two branches are joined again.
2801   //
2802   // See PR38762, PR39243 for more details.
2803   //
2804   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2805   // encode predicated DIExpressions that yield different results on different
2806   // code paths.
2807 
2808   // A hoisted conditional probe should be treated as dangling so that it will
2809   // not be over-counted when the samples collected on the non-conditional path
2810   // are counted towards the conditional path. We leave it for the counts
2811   // inference algorithm to figure out a proper count for a danglng probe.
2812   moveAndDanglePseudoProbes(BB, InsertPt);
2813 
2814   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2815     Instruction *I = &*II;
2816     I->dropUnknownNonDebugMetadata();
2817     if (I->isUsedByMetadata())
2818       dropDebugUsers(*I);
2819     if (isa<DbgInfoIntrinsic>(I)) {
2820       // Remove DbgInfo Intrinsics.
2821       II = I->eraseFromParent();
2822       continue;
2823     }
2824     I->setDebugLoc(InsertPt->getDebugLoc());
2825     ++II;
2826   }
2827   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2828                                  BB->begin(),
2829                                  BB->getTerminator()->getIterator());
2830 }
2831 
2832 namespace {
2833 
2834 /// A potential constituent of a bitreverse or bswap expression. See
2835 /// collectBitParts for a fuller explanation.
2836 struct BitPart {
BitPart__anone379fcda0a11::BitPart2837   BitPart(Value *P, unsigned BW) : Provider(P) {
2838     Provenance.resize(BW);
2839   }
2840 
2841   /// The Value that this is a bitreverse/bswap of.
2842   Value *Provider;
2843 
2844   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2845   /// in Provider becomes bit B in the result of this expression.
2846   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2847 
2848   enum { Unset = -1 };
2849 };
2850 
2851 } // end anonymous namespace
2852 
2853 /// Analyze the specified subexpression and see if it is capable of providing
2854 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2855 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2856 /// the output of the expression came from a corresponding bit in some other
2857 /// value. This function is recursive, and the end result is a mapping of
2858 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2859 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2860 ///
2861 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2862 /// that the expression deposits the low byte of %X into the high byte of the
2863 /// result and that all other bits are zero. This expression is accepted and a
2864 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2865 /// [0-7].
2866 ///
2867 /// For vector types, all analysis is performed at the per-element level. No
2868 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2869 /// constant masks must be splatted across all elements.
2870 ///
2871 /// To avoid revisiting values, the BitPart results are memoized into the
2872 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2873 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2874 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2875 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2876 /// type instead to provide the same functionality.
2877 ///
2878 /// Because we pass around references into \c BPS, we must use a container that
2879 /// does not invalidate internal references (std::map instead of DenseMap).
2880 static const Optional<BitPart> &
collectBitParts(Value * V,bool MatchBSwaps,bool MatchBitReversals,std::map<Value *,Optional<BitPart>> & BPS,int Depth,bool & FoundRoot)2881 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2882                 std::map<Value *, Optional<BitPart>> &BPS, int Depth,
2883                 bool &FoundRoot) {
2884   auto I = BPS.find(V);
2885   if (I != BPS.end())
2886     return I->second;
2887 
2888   auto &Result = BPS[V] = None;
2889   auto BitWidth = V->getType()->getScalarSizeInBits();
2890 
2891   // Can't do integer/elements > 128 bits.
2892   if (BitWidth > 128)
2893     return Result;
2894 
2895   // Prevent stack overflow by limiting the recursion depth
2896   if (Depth == BitPartRecursionMaxDepth) {
2897     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2898     return Result;
2899   }
2900 
2901   if (auto *I = dyn_cast<Instruction>(V)) {
2902     Value *X, *Y;
2903     const APInt *C;
2904 
2905     // If this is an or instruction, it may be an inner node of the bswap.
2906     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2907       // Check we have both sources and they are from the same provider.
2908       const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2909                                       Depth + 1, FoundRoot);
2910       if (!A || !A->Provider)
2911         return Result;
2912 
2913       const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
2914                                       Depth + 1, FoundRoot);
2915       if (!B || A->Provider != B->Provider)
2916         return Result;
2917 
2918       // Try and merge the two together.
2919       Result = BitPart(A->Provider, BitWidth);
2920       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2921         if (A->Provenance[BitIdx] != BitPart::Unset &&
2922             B->Provenance[BitIdx] != BitPart::Unset &&
2923             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2924           return Result = None;
2925 
2926         if (A->Provenance[BitIdx] == BitPart::Unset)
2927           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2928         else
2929           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2930       }
2931 
2932       return Result;
2933     }
2934 
2935     // If this is a logical shift by a constant, recurse then shift the result.
2936     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2937       const APInt &BitShift = *C;
2938 
2939       // Ensure the shift amount is defined.
2940       if (BitShift.uge(BitWidth))
2941         return Result;
2942 
2943       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
2944       if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
2945         return Result;
2946 
2947       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2948                                         Depth + 1, FoundRoot);
2949       if (!Res)
2950         return Result;
2951       Result = Res;
2952 
2953       // Perform the "shift" on BitProvenance.
2954       auto &P = Result->Provenance;
2955       if (I->getOpcode() == Instruction::Shl) {
2956         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2957         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2958       } else {
2959         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2960         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2961       }
2962 
2963       return Result;
2964     }
2965 
2966     // If this is a logical 'and' with a mask that clears bits, recurse then
2967     // unset the appropriate bits.
2968     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2969       const APInt &AndMask = *C;
2970 
2971       // Check that the mask allows a multiple of 8 bits for a bswap, for an
2972       // early exit.
2973       unsigned NumMaskedBits = AndMask.countPopulation();
2974       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
2975         return Result;
2976 
2977       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2978                                         Depth + 1, FoundRoot);
2979       if (!Res)
2980         return Result;
2981       Result = Res;
2982 
2983       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2984         // If the AndMask is zero for this bit, clear the bit.
2985         if (AndMask[BitIdx] == 0)
2986           Result->Provenance[BitIdx] = BitPart::Unset;
2987       return Result;
2988     }
2989 
2990     // If this is a zext instruction zero extend the result.
2991     if (match(V, m_ZExt(m_Value(X)))) {
2992       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2993                                         Depth + 1, FoundRoot);
2994       if (!Res)
2995         return Result;
2996 
2997       Result = BitPart(Res->Provider, BitWidth);
2998       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
2999       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3000         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3001       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3002         Result->Provenance[BitIdx] = BitPart::Unset;
3003       return Result;
3004     }
3005 
3006     // If this is a truncate instruction, extract the lower bits.
3007     if (match(V, m_Trunc(m_Value(X)))) {
3008       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3009                                         Depth + 1, FoundRoot);
3010       if (!Res)
3011         return Result;
3012 
3013       Result = BitPart(Res->Provider, BitWidth);
3014       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3015         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3016       return Result;
3017     }
3018 
3019     // BITREVERSE - most likely due to us previous matching a partial
3020     // bitreverse.
3021     if (match(V, m_BitReverse(m_Value(X)))) {
3022       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3023                                         Depth + 1, FoundRoot);
3024       if (!Res)
3025         return Result;
3026 
3027       Result = BitPart(Res->Provider, BitWidth);
3028       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3029         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3030       return Result;
3031     }
3032 
3033     // BSWAP - most likely due to us previous matching a partial bswap.
3034     if (match(V, m_BSwap(m_Value(X)))) {
3035       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3036                                         Depth + 1, FoundRoot);
3037       if (!Res)
3038         return Result;
3039 
3040       unsigned ByteWidth = BitWidth / 8;
3041       Result = BitPart(Res->Provider, BitWidth);
3042       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3043         unsigned ByteBitOfs = ByteIdx * 8;
3044         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3045           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3046               Res->Provenance[ByteBitOfs + BitIdx];
3047       }
3048       return Result;
3049     }
3050 
3051     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3052     // amount (modulo).
3053     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3054     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3055     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3056         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3057       // We can treat fshr as a fshl by flipping the modulo amount.
3058       unsigned ModAmt = C->urem(BitWidth);
3059       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3060         ModAmt = BitWidth - ModAmt;
3061 
3062       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3063       if (!MatchBitReversals && (ModAmt % 8) != 0)
3064         return Result;
3065 
3066       // Check we have both sources and they are from the same provider.
3067       const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3068                                         Depth + 1, FoundRoot);
3069       if (!LHS || !LHS->Provider)
3070         return Result;
3071 
3072       const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3073                                         Depth + 1, FoundRoot);
3074       if (!RHS || LHS->Provider != RHS->Provider)
3075         return Result;
3076 
3077       unsigned StartBitRHS = BitWidth - ModAmt;
3078       Result = BitPart(LHS->Provider, BitWidth);
3079       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3080         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3081       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3082         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3083       return Result;
3084     }
3085   }
3086 
3087   // If we've already found a root input value then we're never going to merge
3088   // these back together.
3089   if (FoundRoot)
3090     return Result;
3091 
3092   // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3093   // be the root input value to the bswap/bitreverse.
3094   FoundRoot = true;
3095   Result = BitPart(V, BitWidth);
3096   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3097     Result->Provenance[BitIdx] = BitIdx;
3098   return Result;
3099 }
3100 
bitTransformIsCorrectForBSwap(unsigned From,unsigned To,unsigned BitWidth)3101 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3102                                           unsigned BitWidth) {
3103   if (From % 8 != To % 8)
3104     return false;
3105   // Convert from bit indices to byte indices and check for a byte reversal.
3106   From >>= 3;
3107   To >>= 3;
3108   BitWidth >>= 3;
3109   return From == BitWidth - To - 1;
3110 }
3111 
bitTransformIsCorrectForBitReverse(unsigned From,unsigned To,unsigned BitWidth)3112 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3113                                                unsigned BitWidth) {
3114   return From == BitWidth - To - 1;
3115 }
3116 
recognizeBSwapOrBitReverseIdiom(Instruction * I,bool MatchBSwaps,bool MatchBitReversals,SmallVectorImpl<Instruction * > & InsertedInsts)3117 bool llvm::recognizeBSwapOrBitReverseIdiom(
3118     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3119     SmallVectorImpl<Instruction *> &InsertedInsts) {
3120   if (!match(I, m_Or(m_Value(), m_Value())) &&
3121       !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3122       !match(I, m_FShr(m_Value(), m_Value(), m_Value())))
3123     return false;
3124   if (!MatchBSwaps && !MatchBitReversals)
3125     return false;
3126   Type *ITy = I->getType();
3127   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3128     return false;  // Can't do integer/elements > 128 bits.
3129 
3130   Type *DemandedTy = ITy;
3131   if (I->hasOneUse())
3132     if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3133       DemandedTy = Trunc->getType();
3134 
3135   // Try to find all the pieces corresponding to the bswap.
3136   bool FoundRoot = false;
3137   std::map<Value *, Optional<BitPart>> BPS;
3138   const auto &Res =
3139       collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3140   if (!Res)
3141     return false;
3142   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3143   assert(all_of(BitProvenance,
3144                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3145          "Illegal bit provenance index");
3146 
3147   // If the upper bits are zero, then attempt to perform as a truncated op.
3148   if (BitProvenance.back() == BitPart::Unset) {
3149     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3150       BitProvenance = BitProvenance.drop_back();
3151     if (BitProvenance.empty())
3152       return false; // TODO - handle null value?
3153     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3154     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3155       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3156   }
3157 
3158   // Check BitProvenance hasn't found a source larger than the result type.
3159   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3160   if (DemandedBW > ITy->getScalarSizeInBits())
3161     return false;
3162 
3163   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3164   // only byteswap values with an even number of bytes.
3165   APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3166   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3167   bool OKForBitReverse = MatchBitReversals;
3168   for (unsigned BitIdx = 0;
3169        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3170     if (BitProvenance[BitIdx] == BitPart::Unset) {
3171       DemandedMask.clearBit(BitIdx);
3172       continue;
3173     }
3174     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3175                                                 DemandedBW);
3176     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3177                                                           BitIdx, DemandedBW);
3178   }
3179 
3180   Intrinsic::ID Intrin;
3181   if (OKForBSwap)
3182     Intrin = Intrinsic::bswap;
3183   else if (OKForBitReverse)
3184     Intrin = Intrinsic::bitreverse;
3185   else
3186     return false;
3187 
3188   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3189   Value *Provider = Res->Provider;
3190 
3191   // We may need to truncate the provider.
3192   if (DemandedTy != Provider->getType()) {
3193     auto *Trunc =
3194         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3195     InsertedInsts.push_back(Trunc);
3196     Provider = Trunc;
3197   }
3198 
3199   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3200   InsertedInsts.push_back(Result);
3201 
3202   if (!DemandedMask.isAllOnesValue()) {
3203     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3204     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3205     InsertedInsts.push_back(Result);
3206   }
3207 
3208   // We may need to zeroextend back to the result type.
3209   if (ITy != Result->getType()) {
3210     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3211     InsertedInsts.push_back(ExtInst);
3212   }
3213 
3214   return true;
3215 }
3216 
3217 // CodeGen has special handling for some string functions that may replace
3218 // them with target-specific intrinsics.  Since that'd skip our interceptors
3219 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3220 // we mark affected calls as NoBuiltin, which will disable optimization
3221 // in CodeGen.
maybeMarkSanitizerLibraryCallNoBuiltin(CallInst * CI,const TargetLibraryInfo * TLI)3222 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3223     CallInst *CI, const TargetLibraryInfo *TLI) {
3224   Function *F = CI->getCalledFunction();
3225   LibFunc Func;
3226   if (F && !F->hasLocalLinkage() && F->hasName() &&
3227       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3228       !F->doesNotAccessMemory())
3229     CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
3230 }
3231 
canReplaceOperandWithVariable(const Instruction * I,unsigned OpIdx)3232 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3233   // We can't have a PHI with a metadata type.
3234   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3235     return false;
3236 
3237   // Early exit.
3238   if (!isa<Constant>(I->getOperand(OpIdx)))
3239     return true;
3240 
3241   switch (I->getOpcode()) {
3242   default:
3243     return true;
3244   case Instruction::Call:
3245   case Instruction::Invoke: {
3246     const auto &CB = cast<CallBase>(*I);
3247 
3248     // Can't handle inline asm. Skip it.
3249     if (CB.isInlineAsm())
3250       return false;
3251 
3252     // Constant bundle operands may need to retain their constant-ness for
3253     // correctness.
3254     if (CB.isBundleOperand(OpIdx))
3255       return false;
3256 
3257     if (OpIdx < CB.getNumArgOperands()) {
3258       // Some variadic intrinsics require constants in the variadic arguments,
3259       // which currently aren't markable as immarg.
3260       if (isa<IntrinsicInst>(CB) &&
3261           OpIdx >= CB.getFunctionType()->getNumParams()) {
3262         // This is known to be OK for stackmap.
3263         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3264       }
3265 
3266       // gcroot is a special case, since it requires a constant argument which
3267       // isn't also required to be a simple ConstantInt.
3268       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3269         return false;
3270 
3271       // Some intrinsic operands are required to be immediates.
3272       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3273     }
3274 
3275     // It is never allowed to replace the call argument to an intrinsic, but it
3276     // may be possible for a call.
3277     return !isa<IntrinsicInst>(CB);
3278   }
3279   case Instruction::ShuffleVector:
3280     // Shufflevector masks are constant.
3281     return OpIdx != 2;
3282   case Instruction::Switch:
3283   case Instruction::ExtractValue:
3284     // All operands apart from the first are constant.
3285     return OpIdx == 0;
3286   case Instruction::InsertValue:
3287     // All operands apart from the first and the second are constant.
3288     return OpIdx < 2;
3289   case Instruction::Alloca:
3290     // Static allocas (constant size in the entry block) are handled by
3291     // prologue/epilogue insertion so they're free anyway. We definitely don't
3292     // want to make them non-constant.
3293     return !cast<AllocaInst>(I)->isStaticAlloca();
3294   case Instruction::GetElementPtr:
3295     if (OpIdx == 0)
3296       return true;
3297     gep_type_iterator It = gep_type_begin(I);
3298     for (auto E = std::next(It, OpIdx); It != E; ++It)
3299       if (It.isStruct())
3300         return false;
3301     return true;
3302   }
3303 }
3304 
invertCondition(Value * Condition)3305 Value *llvm::invertCondition(Value *Condition) {
3306   // First: Check if it's a constant
3307   if (Constant *C = dyn_cast<Constant>(Condition))
3308     return ConstantExpr::getNot(C);
3309 
3310   // Second: If the condition is already inverted, return the original value
3311   Value *NotCondition;
3312   if (match(Condition, m_Not(m_Value(NotCondition))))
3313     return NotCondition;
3314 
3315   BasicBlock *Parent = nullptr;
3316   Instruction *Inst = dyn_cast<Instruction>(Condition);
3317   if (Inst)
3318     Parent = Inst->getParent();
3319   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3320     Parent = &Arg->getParent()->getEntryBlock();
3321   assert(Parent && "Unsupported condition to invert");
3322 
3323   // Third: Check all the users for an invert
3324   for (User *U : Condition->users())
3325     if (Instruction *I = dyn_cast<Instruction>(U))
3326       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3327         return I;
3328 
3329   // Last option: Create a new instruction
3330   auto *Inverted =
3331       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3332   if (Inst && !isa<PHINode>(Inst))
3333     Inverted->insertAfter(Inst);
3334   else
3335     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3336   return Inverted;
3337 }
3338 
inferAttributesFromOthers(Function & F)3339 bool llvm::inferAttributesFromOthers(Function &F) {
3340   // Note: We explicitly check for attributes rather than using cover functions
3341   // because some of the cover functions include the logic being implemented.
3342 
3343   bool Changed = false;
3344   // readnone + not convergent implies nosync
3345   if (!F.hasFnAttribute(Attribute::NoSync) &&
3346       F.doesNotAccessMemory() && !F.isConvergent()) {
3347     F.setNoSync();
3348     Changed = true;
3349   }
3350 
3351   // readonly implies nofree
3352   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3353     F.setDoesNotFreeMemory();
3354     Changed = true;
3355   }
3356 
3357   // willreturn implies mustprogress
3358   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3359     F.setMustProgress();
3360     Changed = true;
3361   }
3362 
3363   // TODO: There are a bunch of cases of restrictive memory effects we
3364   // can infer by inspecting arguments of argmemonly-ish functions.
3365 
3366   return Changed;
3367 }
3368