1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/MemorySSAUpdater.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/BinaryFormat/Dwarf.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/CFG.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DIBuilder.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfo.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/KnownBits.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
77 #include "llvm/Transforms/Utils/ValueMapper.h"
78 #include <algorithm>
79 #include <cassert>
80 #include <cstdint>
81 #include <iterator>
82 #include <map>
83 #include <utility>
84 
85 using namespace llvm;
86 using namespace llvm::PatternMatch;
87 
88 #define DEBUG_TYPE "local"
89 
90 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
91 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
92 
93 static cl::opt<bool> PHICSEDebugHash(
94     "phicse-debug-hash",
95 #ifdef EXPENSIVE_CHECKS
96     cl::init(true),
97 #else
98     cl::init(false),
99 #endif
100     cl::Hidden,
101     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
102              "function is well-behaved w.r.t. its isEqual predicate"));
103 
104 static cl::opt<unsigned> PHICSENumPHISmallSize(
105     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
106     cl::desc(
107         "When the basic block contains not more than this number of PHI nodes, "
108         "perform a (faster!) exhaustive search instead of set-driven one."));
109 
110 // Max recursion depth for collectBitParts used when detecting bswap and
111 // bitreverse idioms.
112 static const unsigned BitPartRecursionMaxDepth = 48;
113 
114 //===----------------------------------------------------------------------===//
115 //  Local constant propagation.
116 //
117 
118 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
119 /// constant value, convert it into an unconditional branch to the constant
120 /// destination.  This is a nontrivial operation because the successors of this
121 /// basic block must have their PHI nodes updated.
122 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
123 /// conditions and indirectbr addresses this might make dead if
124 /// DeleteDeadConditions is true.
125 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
126                                   const TargetLibraryInfo *TLI,
127                                   DomTreeUpdater *DTU) {
128   Instruction *T = BB->getTerminator();
129   IRBuilder<> Builder(T);
130 
131   // Branch - See if we are conditional jumping on constant
132   if (auto *BI = dyn_cast<BranchInst>(T)) {
133     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
134 
135     BasicBlock *Dest1 = BI->getSuccessor(0);
136     BasicBlock *Dest2 = BI->getSuccessor(1);
137 
138     if (Dest2 == Dest1) {       // Conditional branch to same location?
139       // This branch matches something like this:
140       //     br bool %cond, label %Dest, label %Dest
141       // and changes it into:  br label %Dest
142 
143       // Let the basic block know that we are letting go of one copy of it.
144       assert(BI->getParent() && "Terminator not inserted in block!");
145       Dest1->removePredecessor(BI->getParent());
146 
147       // Replace the conditional branch with an unconditional one.
148       BranchInst *NewBI = Builder.CreateBr(Dest1);
149 
150       // Transfer the metadata to the new branch instruction.
151       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
152                                 LLVMContext::MD_annotation});
153 
154       Value *Cond = BI->getCondition();
155       BI->eraseFromParent();
156       if (DeleteDeadConditions)
157         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
158       return true;
159     }
160 
161     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
162       // Are we branching on constant?
163       // YES.  Change to unconditional branch...
164       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
165       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
166 
167       // Let the basic block know that we are letting go of it.  Based on this,
168       // it will adjust it's PHI nodes.
169       OldDest->removePredecessor(BB);
170 
171       // Replace the conditional branch with an unconditional one.
172       BranchInst *NewBI = Builder.CreateBr(Destination);
173 
174       // Transfer the metadata to the new branch instruction.
175       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
176                                 LLVMContext::MD_annotation});
177 
178       BI->eraseFromParent();
179       if (DTU)
180         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
181       return true;
182     }
183 
184     return false;
185   }
186 
187   if (auto *SI = dyn_cast<SwitchInst>(T)) {
188     // If we are switching on a constant, we can convert the switch to an
189     // unconditional branch.
190     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
191     BasicBlock *DefaultDest = SI->getDefaultDest();
192     BasicBlock *TheOnlyDest = DefaultDest;
193 
194     // If the default is unreachable, ignore it when searching for TheOnlyDest.
195     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
196         SI->getNumCases() > 0) {
197       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
198     }
199 
200     bool Changed = false;
201 
202     // Figure out which case it goes to.
203     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
204       // Found case matching a constant operand?
205       if (i->getCaseValue() == CI) {
206         TheOnlyDest = i->getCaseSuccessor();
207         break;
208       }
209 
210       // Check to see if this branch is going to the same place as the default
211       // dest.  If so, eliminate it as an explicit compare.
212       if (i->getCaseSuccessor() == DefaultDest) {
213         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
214         unsigned NCases = SI->getNumCases();
215         // Fold the case metadata into the default if there will be any branches
216         // left, unless the metadata doesn't match the switch.
217         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
218           // Collect branch weights into a vector.
219           SmallVector<uint32_t, 8> Weights;
220           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
221                ++MD_i) {
222             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
223             Weights.push_back(CI->getValue().getZExtValue());
224           }
225           // Merge weight of this case to the default weight.
226           unsigned idx = i->getCaseIndex();
227           Weights[0] += Weights[idx+1];
228           // Remove weight for this case.
229           std::swap(Weights[idx+1], Weights.back());
230           Weights.pop_back();
231           SI->setMetadata(LLVMContext::MD_prof,
232                           MDBuilder(BB->getContext()).
233                           createBranchWeights(Weights));
234         }
235         // Remove this entry.
236         BasicBlock *ParentBB = SI->getParent();
237         DefaultDest->removePredecessor(ParentBB);
238         i = SI->removeCase(i);
239         e = SI->case_end();
240         Changed = true;
241         continue;
242       }
243 
244       // Otherwise, check to see if the switch only branches to one destination.
245       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
246       // destinations.
247       if (i->getCaseSuccessor() != TheOnlyDest)
248         TheOnlyDest = nullptr;
249 
250       // Increment this iterator as we haven't removed the case.
251       ++i;
252     }
253 
254     if (CI && !TheOnlyDest) {
255       // Branching on a constant, but not any of the cases, go to the default
256       // successor.
257       TheOnlyDest = SI->getDefaultDest();
258     }
259 
260     // If we found a single destination that we can fold the switch into, do so
261     // now.
262     if (TheOnlyDest) {
263       // Insert the new branch.
264       Builder.CreateBr(TheOnlyDest);
265       BasicBlock *BB = SI->getParent();
266 
267       SmallSet<BasicBlock *, 8> RemovedSuccessors;
268 
269       // Remove entries from PHI nodes which we no longer branch to...
270       BasicBlock *SuccToKeep = TheOnlyDest;
271       for (BasicBlock *Succ : successors(SI)) {
272         if (DTU && Succ != TheOnlyDest)
273           RemovedSuccessors.insert(Succ);
274         // Found case matching a constant operand?
275         if (Succ == SuccToKeep) {
276           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
277         } else {
278           Succ->removePredecessor(BB);
279         }
280       }
281 
282       // Delete the old switch.
283       Value *Cond = SI->getCondition();
284       SI->eraseFromParent();
285       if (DeleteDeadConditions)
286         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
287       if (DTU) {
288         std::vector<DominatorTree::UpdateType> Updates;
289         Updates.reserve(RemovedSuccessors.size());
290         for (auto *RemovedSuccessor : RemovedSuccessors)
291           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
292         DTU->applyUpdates(Updates);
293       }
294       return true;
295     }
296 
297     if (SI->getNumCases() == 1) {
298       // Otherwise, we can fold this switch into a conditional branch
299       // instruction if it has only one non-default destination.
300       auto FirstCase = *SI->case_begin();
301       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
302           FirstCase.getCaseValue(), "cond");
303 
304       // Insert the new branch.
305       BranchInst *NewBr = Builder.CreateCondBr(Cond,
306                                                FirstCase.getCaseSuccessor(),
307                                                SI->getDefaultDest());
308       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
309       if (MD && MD->getNumOperands() == 3) {
310         ConstantInt *SICase =
311             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
312         ConstantInt *SIDef =
313             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
314         assert(SICase && SIDef);
315         // The TrueWeight should be the weight for the single case of SI.
316         NewBr->setMetadata(LLVMContext::MD_prof,
317                         MDBuilder(BB->getContext()).
318                         createBranchWeights(SICase->getValue().getZExtValue(),
319                                             SIDef->getValue().getZExtValue()));
320       }
321 
322       // Update make.implicit metadata to the newly-created conditional branch.
323       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
324       if (MakeImplicitMD)
325         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
326 
327       // Delete the old switch.
328       SI->eraseFromParent();
329       return true;
330     }
331     return Changed;
332   }
333 
334   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
335     // indirectbr blockaddress(@F, @BB) -> br label @BB
336     if (auto *BA =
337           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
338       BasicBlock *TheOnlyDest = BA->getBasicBlock();
339       SmallSet<BasicBlock *, 8> RemovedSuccessors;
340 
341       // Insert the new branch.
342       Builder.CreateBr(TheOnlyDest);
343 
344       BasicBlock *SuccToKeep = TheOnlyDest;
345       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
346         BasicBlock *DestBB = IBI->getDestination(i);
347         if (DTU && DestBB != TheOnlyDest)
348           RemovedSuccessors.insert(DestBB);
349         if (IBI->getDestination(i) == SuccToKeep) {
350           SuccToKeep = nullptr;
351         } else {
352           DestBB->removePredecessor(BB);
353         }
354       }
355       Value *Address = IBI->getAddress();
356       IBI->eraseFromParent();
357       if (DeleteDeadConditions)
358         // Delete pointer cast instructions.
359         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
360 
361       // Also zap the blockaddress constant if there are no users remaining,
362       // otherwise the destination is still marked as having its address taken.
363       if (BA->use_empty())
364         BA->destroyConstant();
365 
366       // If we didn't find our destination in the IBI successor list, then we
367       // have undefined behavior.  Replace the unconditional branch with an
368       // 'unreachable' instruction.
369       if (SuccToKeep) {
370         BB->getTerminator()->eraseFromParent();
371         new UnreachableInst(BB->getContext(), BB);
372       }
373 
374       if (DTU) {
375         std::vector<DominatorTree::UpdateType> Updates;
376         Updates.reserve(RemovedSuccessors.size());
377         for (auto *RemovedSuccessor : RemovedSuccessors)
378           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
379         DTU->applyUpdates(Updates);
380       }
381       return true;
382     }
383   }
384 
385   return false;
386 }
387 
388 //===----------------------------------------------------------------------===//
389 //  Local dead code elimination.
390 //
391 
392 /// isInstructionTriviallyDead - Return true if the result produced by the
393 /// instruction is not used, and the instruction has no side effects.
394 ///
395 bool llvm::isInstructionTriviallyDead(Instruction *I,
396                                       const TargetLibraryInfo *TLI) {
397   if (!I->use_empty())
398     return false;
399   return wouldInstructionBeTriviallyDead(I, TLI);
400 }
401 
402 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
403     Instruction *I, const TargetLibraryInfo *TLI) {
404   // Instructions that are "markers" and have implied meaning on code around
405   // them (without explicit uses), are not dead on unused paths.
406   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
407     if (II->getIntrinsicID() == Intrinsic::stacksave ||
408         II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
409         II->isLifetimeStartOrEnd())
410       return false;
411   return wouldInstructionBeTriviallyDead(I, TLI);
412 }
413 
414 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
415                                            const TargetLibraryInfo *TLI) {
416   if (I->isTerminator())
417     return false;
418 
419   // We don't want the landingpad-like instructions removed by anything this
420   // general.
421   if (I->isEHPad())
422     return false;
423 
424   // We don't want debug info removed by anything this general, unless
425   // debug info is empty.
426   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
427     if (DDI->getAddress())
428       return false;
429     return true;
430   }
431   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
432     if (DVI->hasArgList() || DVI->getValue(0))
433       return false;
434     return true;
435   }
436   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
437     if (DLI->getLabel())
438       return false;
439     return true;
440   }
441 
442   if (!I->willReturn())
443     return false;
444 
445   if (!I->mayHaveSideEffects())
446     return true;
447 
448   // Special case intrinsics that "may have side effects" but can be deleted
449   // when dead.
450   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
451     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
452     if (II->getIntrinsicID() == Intrinsic::stacksave ||
453         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
454       return true;
455 
456     if (II->isLifetimeStartOrEnd()) {
457       auto *Arg = II->getArgOperand(1);
458       // Lifetime intrinsics are dead when their right-hand is undef.
459       if (isa<UndefValue>(Arg))
460         return true;
461       // If the right-hand is an alloc, global, or argument and the only uses
462       // are lifetime intrinsics then the intrinsics are dead.
463       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
464         return llvm::all_of(Arg->uses(), [](Use &Use) {
465           if (IntrinsicInst *IntrinsicUse =
466                   dyn_cast<IntrinsicInst>(Use.getUser()))
467             return IntrinsicUse->isLifetimeStartOrEnd();
468           return false;
469         });
470       return false;
471     }
472 
473     // Assumptions are dead if their condition is trivially true.  Guards on
474     // true are operationally no-ops.  In the future we can consider more
475     // sophisticated tradeoffs for guards considering potential for check
476     // widening, but for now we keep things simple.
477     if ((II->getIntrinsicID() == Intrinsic::assume &&
478          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
479         II->getIntrinsicID() == Intrinsic::experimental_guard) {
480       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
481         return !Cond->isZero();
482 
483       return false;
484     }
485 
486     if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
487       Optional<fp::ExceptionBehavior> ExBehavior = FPI->getExceptionBehavior();
488       return *ExBehavior != fp::ebStrict;
489     }
490   }
491 
492   if (isAllocationFn(I, TLI) && isAllocRemovable(cast<CallBase>(I), TLI))
493     return true;
494 
495   if (CallInst *CI = isFreeCall(I, TLI))
496     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
497       return C->isNullValue() || isa<UndefValue>(C);
498 
499   if (auto *Call = dyn_cast<CallBase>(I))
500     if (isMathLibCallNoop(Call, TLI))
501       return true;
502 
503   // Non-volatile atomic loads from constants can be removed.
504   if (auto *LI = dyn_cast<LoadInst>(I))
505     if (auto *GV = dyn_cast<GlobalVariable>(
506             LI->getPointerOperand()->stripPointerCasts()))
507       if (!LI->isVolatile() && GV->isConstant())
508         return true;
509 
510   return false;
511 }
512 
513 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
514 /// trivially dead instruction, delete it.  If that makes any of its operands
515 /// trivially dead, delete them too, recursively.  Return true if any
516 /// instructions were deleted.
517 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
518     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
519     std::function<void(Value *)> AboutToDeleteCallback) {
520   Instruction *I = dyn_cast<Instruction>(V);
521   if (!I || !isInstructionTriviallyDead(I, TLI))
522     return false;
523 
524   SmallVector<WeakTrackingVH, 16> DeadInsts;
525   DeadInsts.push_back(I);
526   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
527                                              AboutToDeleteCallback);
528 
529   return true;
530 }
531 
532 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
533     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
534     MemorySSAUpdater *MSSAU,
535     std::function<void(Value *)> AboutToDeleteCallback) {
536   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
537   for (; S != E; ++S) {
538     auto *I = dyn_cast<Instruction>(DeadInsts[S]);
539     if (!I || !isInstructionTriviallyDead(I)) {
540       DeadInsts[S] = nullptr;
541       ++Alive;
542     }
543   }
544   if (Alive == E)
545     return false;
546   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
547                                              AboutToDeleteCallback);
548   return true;
549 }
550 
551 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
552     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
553     MemorySSAUpdater *MSSAU,
554     std::function<void(Value *)> AboutToDeleteCallback) {
555   // Process the dead instruction list until empty.
556   while (!DeadInsts.empty()) {
557     Value *V = DeadInsts.pop_back_val();
558     Instruction *I = cast_or_null<Instruction>(V);
559     if (!I)
560       continue;
561     assert(isInstructionTriviallyDead(I, TLI) &&
562            "Live instruction found in dead worklist!");
563     assert(I->use_empty() && "Instructions with uses are not dead.");
564 
565     // Don't lose the debug info while deleting the instructions.
566     salvageDebugInfo(*I);
567 
568     if (AboutToDeleteCallback)
569       AboutToDeleteCallback(I);
570 
571     // Null out all of the instruction's operands to see if any operand becomes
572     // dead as we go.
573     for (Use &OpU : I->operands()) {
574       Value *OpV = OpU.get();
575       OpU.set(nullptr);
576 
577       if (!OpV->use_empty())
578         continue;
579 
580       // If the operand is an instruction that became dead as we nulled out the
581       // operand, and if it is 'trivially' dead, delete it in a future loop
582       // iteration.
583       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
584         if (isInstructionTriviallyDead(OpI, TLI))
585           DeadInsts.push_back(OpI);
586     }
587     if (MSSAU)
588       MSSAU->removeMemoryAccess(I);
589 
590     I->eraseFromParent();
591   }
592 }
593 
594 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
595   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
596   findDbgUsers(DbgUsers, I);
597   for (auto *DII : DbgUsers) {
598     Value *Undef = UndefValue::get(I->getType());
599     DII->replaceVariableLocationOp(I, Undef);
600   }
601   return !DbgUsers.empty();
602 }
603 
604 /// areAllUsesEqual - Check whether the uses of a value are all the same.
605 /// This is similar to Instruction::hasOneUse() except this will also return
606 /// true when there are no uses or multiple uses that all refer to the same
607 /// value.
608 static bool areAllUsesEqual(Instruction *I) {
609   Value::user_iterator UI = I->user_begin();
610   Value::user_iterator UE = I->user_end();
611   if (UI == UE)
612     return true;
613 
614   User *TheUse = *UI;
615   for (++UI; UI != UE; ++UI) {
616     if (*UI != TheUse)
617       return false;
618   }
619   return true;
620 }
621 
622 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
623 /// dead PHI node, due to being a def-use chain of single-use nodes that
624 /// either forms a cycle or is terminated by a trivially dead instruction,
625 /// delete it.  If that makes any of its operands trivially dead, delete them
626 /// too, recursively.  Return true if a change was made.
627 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
628                                         const TargetLibraryInfo *TLI,
629                                         llvm::MemorySSAUpdater *MSSAU) {
630   SmallPtrSet<Instruction*, 4> Visited;
631   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
632        I = cast<Instruction>(*I->user_begin())) {
633     if (I->use_empty())
634       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
635 
636     // If we find an instruction more than once, we're on a cycle that
637     // won't prove fruitful.
638     if (!Visited.insert(I).second) {
639       // Break the cycle and delete the instruction and its operands.
640       I->replaceAllUsesWith(UndefValue::get(I->getType()));
641       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
642       return true;
643     }
644   }
645   return false;
646 }
647 
648 static bool
649 simplifyAndDCEInstruction(Instruction *I,
650                           SmallSetVector<Instruction *, 16> &WorkList,
651                           const DataLayout &DL,
652                           const TargetLibraryInfo *TLI) {
653   if (isInstructionTriviallyDead(I, TLI)) {
654     salvageDebugInfo(*I);
655 
656     // Null out all of the instruction's operands to see if any operand becomes
657     // dead as we go.
658     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
659       Value *OpV = I->getOperand(i);
660       I->setOperand(i, nullptr);
661 
662       if (!OpV->use_empty() || I == OpV)
663         continue;
664 
665       // If the operand is an instruction that became dead as we nulled out the
666       // operand, and if it is 'trivially' dead, delete it in a future loop
667       // iteration.
668       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
669         if (isInstructionTriviallyDead(OpI, TLI))
670           WorkList.insert(OpI);
671     }
672 
673     I->eraseFromParent();
674 
675     return true;
676   }
677 
678   if (Value *SimpleV = simplifyInstruction(I, DL)) {
679     // Add the users to the worklist. CAREFUL: an instruction can use itself,
680     // in the case of a phi node.
681     for (User *U : I->users()) {
682       if (U != I) {
683         WorkList.insert(cast<Instruction>(U));
684       }
685     }
686 
687     // Replace the instruction with its simplified value.
688     bool Changed = false;
689     if (!I->use_empty()) {
690       I->replaceAllUsesWith(SimpleV);
691       Changed = true;
692     }
693     if (isInstructionTriviallyDead(I, TLI)) {
694       I->eraseFromParent();
695       Changed = true;
696     }
697     return Changed;
698   }
699   return false;
700 }
701 
702 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
703 /// simplify any instructions in it and recursively delete dead instructions.
704 ///
705 /// This returns true if it changed the code, note that it can delete
706 /// instructions in other blocks as well in this block.
707 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
708                                        const TargetLibraryInfo *TLI) {
709   bool MadeChange = false;
710   const DataLayout &DL = BB->getModule()->getDataLayout();
711 
712 #ifndef NDEBUG
713   // In debug builds, ensure that the terminator of the block is never replaced
714   // or deleted by these simplifications. The idea of simplification is that it
715   // cannot introduce new instructions, and there is no way to replace the
716   // terminator of a block without introducing a new instruction.
717   AssertingVH<Instruction> TerminatorVH(&BB->back());
718 #endif
719 
720   SmallSetVector<Instruction *, 16> WorkList;
721   // Iterate over the original function, only adding insts to the worklist
722   // if they actually need to be revisited. This avoids having to pre-init
723   // the worklist with the entire function's worth of instructions.
724   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
725        BI != E;) {
726     assert(!BI->isTerminator());
727     Instruction *I = &*BI;
728     ++BI;
729 
730     // We're visiting this instruction now, so make sure it's not in the
731     // worklist from an earlier visit.
732     if (!WorkList.count(I))
733       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
734   }
735 
736   while (!WorkList.empty()) {
737     Instruction *I = WorkList.pop_back_val();
738     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
739   }
740   return MadeChange;
741 }
742 
743 //===----------------------------------------------------------------------===//
744 //  Control Flow Graph Restructuring.
745 //
746 
747 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
748                                        DomTreeUpdater *DTU) {
749 
750   // If BB has single-entry PHI nodes, fold them.
751   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
752     Value *NewVal = PN->getIncomingValue(0);
753     // Replace self referencing PHI with undef, it must be dead.
754     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
755     PN->replaceAllUsesWith(NewVal);
756     PN->eraseFromParent();
757   }
758 
759   BasicBlock *PredBB = DestBB->getSinglePredecessor();
760   assert(PredBB && "Block doesn't have a single predecessor!");
761 
762   bool ReplaceEntryBB = PredBB->isEntryBlock();
763 
764   // DTU updates: Collect all the edges that enter
765   // PredBB. These dominator edges will be redirected to DestBB.
766   SmallVector<DominatorTree::UpdateType, 32> Updates;
767 
768   if (DTU) {
769     // To avoid processing the same predecessor more than once.
770     SmallPtrSet<BasicBlock *, 2> SeenPreds;
771     Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
772     for (BasicBlock *PredOfPredBB : predecessors(PredBB))
773       // This predecessor of PredBB may already have DestBB as a successor.
774       if (PredOfPredBB != PredBB)
775         if (SeenPreds.insert(PredOfPredBB).second)
776           Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
777     SeenPreds.clear();
778     for (BasicBlock *PredOfPredBB : predecessors(PredBB))
779       if (SeenPreds.insert(PredOfPredBB).second)
780         Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
781     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
782   }
783 
784   // Zap anything that took the address of DestBB.  Not doing this will give the
785   // address an invalid value.
786   if (DestBB->hasAddressTaken()) {
787     BlockAddress *BA = BlockAddress::get(DestBB);
788     Constant *Replacement =
789       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
790     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
791                                                      BA->getType()));
792     BA->destroyConstant();
793   }
794 
795   // Anything that branched to PredBB now branches to DestBB.
796   PredBB->replaceAllUsesWith(DestBB);
797 
798   // Splice all the instructions from PredBB to DestBB.
799   PredBB->getTerminator()->eraseFromParent();
800   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
801   new UnreachableInst(PredBB->getContext(), PredBB);
802 
803   // If the PredBB is the entry block of the function, move DestBB up to
804   // become the entry block after we erase PredBB.
805   if (ReplaceEntryBB)
806     DestBB->moveAfter(PredBB);
807 
808   if (DTU) {
809     assert(PredBB->getInstList().size() == 1 &&
810            isa<UnreachableInst>(PredBB->getTerminator()) &&
811            "The successor list of PredBB isn't empty before "
812            "applying corresponding DTU updates.");
813     DTU->applyUpdatesPermissive(Updates);
814     DTU->deleteBB(PredBB);
815     // Recalculation of DomTree is needed when updating a forward DomTree and
816     // the Entry BB is replaced.
817     if (ReplaceEntryBB && DTU->hasDomTree()) {
818       // The entry block was removed and there is no external interface for
819       // the dominator tree to be notified of this change. In this corner-case
820       // we recalculate the entire tree.
821       DTU->recalculate(*(DestBB->getParent()));
822     }
823   }
824 
825   else {
826     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
827   }
828 }
829 
830 /// Return true if we can choose one of these values to use in place of the
831 /// other. Note that we will always choose the non-undef value to keep.
832 static bool CanMergeValues(Value *First, Value *Second) {
833   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
834 }
835 
836 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
837 /// branch to Succ, into Succ.
838 ///
839 /// Assumption: Succ is the single successor for BB.
840 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
841   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
842 
843   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
844                     << Succ->getName() << "\n");
845   // Shortcut, if there is only a single predecessor it must be BB and merging
846   // is always safe
847   if (Succ->getSinglePredecessor()) return true;
848 
849   // Make a list of the predecessors of BB
850   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
851 
852   // Look at all the phi nodes in Succ, to see if they present a conflict when
853   // merging these blocks
854   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
855     PHINode *PN = cast<PHINode>(I);
856 
857     // If the incoming value from BB is again a PHINode in
858     // BB which has the same incoming value for *PI as PN does, we can
859     // merge the phi nodes and then the blocks can still be merged
860     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
861     if (BBPN && BBPN->getParent() == BB) {
862       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
863         BasicBlock *IBB = PN->getIncomingBlock(PI);
864         if (BBPreds.count(IBB) &&
865             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
866                             PN->getIncomingValue(PI))) {
867           LLVM_DEBUG(dbgs()
868                      << "Can't fold, phi node " << PN->getName() << " in "
869                      << Succ->getName() << " is conflicting with "
870                      << BBPN->getName() << " with regard to common predecessor "
871                      << IBB->getName() << "\n");
872           return false;
873         }
874       }
875     } else {
876       Value* Val = PN->getIncomingValueForBlock(BB);
877       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
878         // See if the incoming value for the common predecessor is equal to the
879         // one for BB, in which case this phi node will not prevent the merging
880         // of the block.
881         BasicBlock *IBB = PN->getIncomingBlock(PI);
882         if (BBPreds.count(IBB) &&
883             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
884           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
885                             << " in " << Succ->getName()
886                             << " is conflicting with regard to common "
887                             << "predecessor " << IBB->getName() << "\n");
888           return false;
889         }
890       }
891     }
892   }
893 
894   return true;
895 }
896 
897 using PredBlockVector = SmallVector<BasicBlock *, 16>;
898 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
899 
900 /// Determines the value to use as the phi node input for a block.
901 ///
902 /// Select between \p OldVal any value that we know flows from \p BB
903 /// to a particular phi on the basis of which one (if either) is not
904 /// undef. Update IncomingValues based on the selected value.
905 ///
906 /// \param OldVal The value we are considering selecting.
907 /// \param BB The block that the value flows in from.
908 /// \param IncomingValues A map from block-to-value for other phi inputs
909 /// that we have examined.
910 ///
911 /// \returns the selected value.
912 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
913                                           IncomingValueMap &IncomingValues) {
914   if (!isa<UndefValue>(OldVal)) {
915     assert((!IncomingValues.count(BB) ||
916             IncomingValues.find(BB)->second == OldVal) &&
917            "Expected OldVal to match incoming value from BB!");
918 
919     IncomingValues.insert(std::make_pair(BB, OldVal));
920     return OldVal;
921   }
922 
923   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
924   if (It != IncomingValues.end()) return It->second;
925 
926   return OldVal;
927 }
928 
929 /// Create a map from block to value for the operands of a
930 /// given phi.
931 ///
932 /// Create a map from block to value for each non-undef value flowing
933 /// into \p PN.
934 ///
935 /// \param PN The phi we are collecting the map for.
936 /// \param IncomingValues [out] The map from block to value for this phi.
937 static void gatherIncomingValuesToPhi(PHINode *PN,
938                                       IncomingValueMap &IncomingValues) {
939   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
940     BasicBlock *BB = PN->getIncomingBlock(i);
941     Value *V = PN->getIncomingValue(i);
942 
943     if (!isa<UndefValue>(V))
944       IncomingValues.insert(std::make_pair(BB, V));
945   }
946 }
947 
948 /// Replace the incoming undef values to a phi with the values
949 /// from a block-to-value map.
950 ///
951 /// \param PN The phi we are replacing the undefs in.
952 /// \param IncomingValues A map from block to value.
953 static void replaceUndefValuesInPhi(PHINode *PN,
954                                     const IncomingValueMap &IncomingValues) {
955   SmallVector<unsigned> TrueUndefOps;
956   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
957     Value *V = PN->getIncomingValue(i);
958 
959     if (!isa<UndefValue>(V)) continue;
960 
961     BasicBlock *BB = PN->getIncomingBlock(i);
962     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
963 
964     // Keep track of undef/poison incoming values. Those must match, so we fix
965     // them up below if needed.
966     // Note: this is conservatively correct, but we could try harder and group
967     // the undef values per incoming basic block.
968     if (It == IncomingValues.end()) {
969       TrueUndefOps.push_back(i);
970       continue;
971     }
972 
973     // There is a defined value for this incoming block, so map this undef
974     // incoming value to the defined value.
975     PN->setIncomingValue(i, It->second);
976   }
977 
978   // If there are both undef and poison values incoming, then convert those
979   // values to undef. It is invalid to have different values for the same
980   // incoming block.
981   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
982     return isa<PoisonValue>(PN->getIncomingValue(i));
983   });
984   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
985     for (unsigned i : TrueUndefOps)
986       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
987   }
988 }
989 
990 /// Replace a value flowing from a block to a phi with
991 /// potentially multiple instances of that value flowing from the
992 /// block's predecessors to the phi.
993 ///
994 /// \param BB The block with the value flowing into the phi.
995 /// \param BBPreds The predecessors of BB.
996 /// \param PN The phi that we are updating.
997 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
998                                                 const PredBlockVector &BBPreds,
999                                                 PHINode *PN) {
1000   Value *OldVal = PN->removeIncomingValue(BB, false);
1001   assert(OldVal && "No entry in PHI for Pred BB!");
1002 
1003   IncomingValueMap IncomingValues;
1004 
1005   // We are merging two blocks - BB, and the block containing PN - and
1006   // as a result we need to redirect edges from the predecessors of BB
1007   // to go to the block containing PN, and update PN
1008   // accordingly. Since we allow merging blocks in the case where the
1009   // predecessor and successor blocks both share some predecessors,
1010   // and where some of those common predecessors might have undef
1011   // values flowing into PN, we want to rewrite those values to be
1012   // consistent with the non-undef values.
1013 
1014   gatherIncomingValuesToPhi(PN, IncomingValues);
1015 
1016   // If this incoming value is one of the PHI nodes in BB, the new entries
1017   // in the PHI node are the entries from the old PHI.
1018   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1019     PHINode *OldValPN = cast<PHINode>(OldVal);
1020     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1021       // Note that, since we are merging phi nodes and BB and Succ might
1022       // have common predecessors, we could end up with a phi node with
1023       // identical incoming branches. This will be cleaned up later (and
1024       // will trigger asserts if we try to clean it up now, without also
1025       // simplifying the corresponding conditional branch).
1026       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1027       Value *PredVal = OldValPN->getIncomingValue(i);
1028       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1029                                                     IncomingValues);
1030 
1031       // And add a new incoming value for this predecessor for the
1032       // newly retargeted branch.
1033       PN->addIncoming(Selected, PredBB);
1034     }
1035   } else {
1036     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1037       // Update existing incoming values in PN for this
1038       // predecessor of BB.
1039       BasicBlock *PredBB = BBPreds[i];
1040       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1041                                                     IncomingValues);
1042 
1043       // And add a new incoming value for this predecessor for the
1044       // newly retargeted branch.
1045       PN->addIncoming(Selected, PredBB);
1046     }
1047   }
1048 
1049   replaceUndefValuesInPhi(PN, IncomingValues);
1050 }
1051 
1052 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1053                                                    DomTreeUpdater *DTU) {
1054   assert(BB != &BB->getParent()->getEntryBlock() &&
1055          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1056 
1057   // We can't eliminate infinite loops.
1058   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1059   if (BB == Succ) return false;
1060 
1061   // Check to see if merging these blocks would cause conflicts for any of the
1062   // phi nodes in BB or Succ. If not, we can safely merge.
1063   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1064 
1065   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1066   // has uses which will not disappear when the PHI nodes are merged.  It is
1067   // possible to handle such cases, but difficult: it requires checking whether
1068   // BB dominates Succ, which is non-trivial to calculate in the case where
1069   // Succ has multiple predecessors.  Also, it requires checking whether
1070   // constructing the necessary self-referential PHI node doesn't introduce any
1071   // conflicts; this isn't too difficult, but the previous code for doing this
1072   // was incorrect.
1073   //
1074   // Note that if this check finds a live use, BB dominates Succ, so BB is
1075   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1076   // folding the branch isn't profitable in that case anyway.
1077   if (!Succ->getSinglePredecessor()) {
1078     BasicBlock::iterator BBI = BB->begin();
1079     while (isa<PHINode>(*BBI)) {
1080       for (Use &U : BBI->uses()) {
1081         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1082           if (PN->getIncomingBlock(U) != BB)
1083             return false;
1084         } else {
1085           return false;
1086         }
1087       }
1088       ++BBI;
1089     }
1090   }
1091 
1092   // We cannot fold the block if it's a branch to an already present callbr
1093   // successor because that creates duplicate successors.
1094   for (BasicBlock *PredBB : predecessors(BB)) {
1095     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1096       if (Succ == CBI->getDefaultDest())
1097         return false;
1098       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1099         if (Succ == CBI->getIndirectDest(i))
1100           return false;
1101     }
1102   }
1103 
1104   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1105 
1106   SmallVector<DominatorTree::UpdateType, 32> Updates;
1107   if (DTU) {
1108     // To avoid processing the same predecessor more than once.
1109     SmallPtrSet<BasicBlock *, 8> SeenPreds;
1110     // All predecessors of BB will be moved to Succ.
1111     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1112     Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1113     for (auto *PredOfBB : predecessors(BB))
1114       // This predecessor of BB may already have Succ as a successor.
1115       if (!PredsOfSucc.contains(PredOfBB))
1116         if (SeenPreds.insert(PredOfBB).second)
1117           Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1118     SeenPreds.clear();
1119     for (auto *PredOfBB : predecessors(BB))
1120       if (SeenPreds.insert(PredOfBB).second)
1121         Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1122     Updates.push_back({DominatorTree::Delete, BB, Succ});
1123   }
1124 
1125   if (isa<PHINode>(Succ->begin())) {
1126     // If there is more than one pred of succ, and there are PHI nodes in
1127     // the successor, then we need to add incoming edges for the PHI nodes
1128     //
1129     const PredBlockVector BBPreds(predecessors(BB));
1130 
1131     // Loop over all of the PHI nodes in the successor of BB.
1132     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1133       PHINode *PN = cast<PHINode>(I);
1134 
1135       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1136     }
1137   }
1138 
1139   if (Succ->getSinglePredecessor()) {
1140     // BB is the only predecessor of Succ, so Succ will end up with exactly
1141     // the same predecessors BB had.
1142 
1143     // Copy over any phi, debug or lifetime instruction.
1144     BB->getTerminator()->eraseFromParent();
1145     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1146                                BB->getInstList());
1147   } else {
1148     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1149       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1150       assert(PN->use_empty() && "There shouldn't be any uses here!");
1151       PN->eraseFromParent();
1152     }
1153   }
1154 
1155   // If the unconditional branch we replaced contains llvm.loop metadata, we
1156   // add the metadata to the branch instructions in the predecessors.
1157   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1158   Instruction *TI = BB->getTerminator();
1159   if (TI)
1160     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1161       for (BasicBlock *Pred : predecessors(BB))
1162         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1163 
1164   // Everything that jumped to BB now goes to Succ.
1165   BB->replaceAllUsesWith(Succ);
1166   if (!Succ->hasName()) Succ->takeName(BB);
1167 
1168   // Clear the successor list of BB to match updates applying to DTU later.
1169   if (BB->getTerminator())
1170     BB->getInstList().pop_back();
1171   new UnreachableInst(BB->getContext(), BB);
1172   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1173                            "applying corresponding DTU updates.");
1174 
1175   if (DTU)
1176     DTU->applyUpdates(Updates);
1177 
1178   DeleteDeadBlock(BB, DTU);
1179 
1180   return true;
1181 }
1182 
1183 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1184   // This implementation doesn't currently consider undef operands
1185   // specially. Theoretically, two phis which are identical except for
1186   // one having an undef where the other doesn't could be collapsed.
1187 
1188   bool Changed = false;
1189 
1190   // Examine each PHI.
1191   // Note that increment of I must *NOT* be in the iteration_expression, since
1192   // we don't want to immediately advance when we restart from the beginning.
1193   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1194     ++I;
1195     // Is there an identical PHI node in this basic block?
1196     // Note that we only look in the upper square's triangle,
1197     // we already checked that the lower triangle PHI's aren't identical.
1198     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1199       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1200         continue;
1201       // A duplicate. Replace this PHI with the base PHI.
1202       ++NumPHICSEs;
1203       DuplicatePN->replaceAllUsesWith(PN);
1204       DuplicatePN->eraseFromParent();
1205       Changed = true;
1206 
1207       // The RAUW can change PHIs that we already visited.
1208       I = BB->begin();
1209       break; // Start over from the beginning.
1210     }
1211   }
1212   return Changed;
1213 }
1214 
1215 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1216   // This implementation doesn't currently consider undef operands
1217   // specially. Theoretically, two phis which are identical except for
1218   // one having an undef where the other doesn't could be collapsed.
1219 
1220   struct PHIDenseMapInfo {
1221     static PHINode *getEmptyKey() {
1222       return DenseMapInfo<PHINode *>::getEmptyKey();
1223     }
1224 
1225     static PHINode *getTombstoneKey() {
1226       return DenseMapInfo<PHINode *>::getTombstoneKey();
1227     }
1228 
1229     static bool isSentinel(PHINode *PN) {
1230       return PN == getEmptyKey() || PN == getTombstoneKey();
1231     }
1232 
1233     // WARNING: this logic must be kept in sync with
1234     //          Instruction::isIdenticalToWhenDefined()!
1235     static unsigned getHashValueImpl(PHINode *PN) {
1236       // Compute a hash value on the operands. Instcombine will likely have
1237       // sorted them, which helps expose duplicates, but we have to check all
1238       // the operands to be safe in case instcombine hasn't run.
1239       return static_cast<unsigned>(hash_combine(
1240           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1241           hash_combine_range(PN->block_begin(), PN->block_end())));
1242     }
1243 
1244     static unsigned getHashValue(PHINode *PN) {
1245 #ifndef NDEBUG
1246       // If -phicse-debug-hash was specified, return a constant -- this
1247       // will force all hashing to collide, so we'll exhaustively search
1248       // the table for a match, and the assertion in isEqual will fire if
1249       // there's a bug causing equal keys to hash differently.
1250       if (PHICSEDebugHash)
1251         return 0;
1252 #endif
1253       return getHashValueImpl(PN);
1254     }
1255 
1256     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1257       if (isSentinel(LHS) || isSentinel(RHS))
1258         return LHS == RHS;
1259       return LHS->isIdenticalTo(RHS);
1260     }
1261 
1262     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1263       // These comparisons are nontrivial, so assert that equality implies
1264       // hash equality (DenseMap demands this as an invariant).
1265       bool Result = isEqualImpl(LHS, RHS);
1266       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1267              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1268       return Result;
1269     }
1270   };
1271 
1272   // Set of unique PHINodes.
1273   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1274   PHISet.reserve(4 * PHICSENumPHISmallSize);
1275 
1276   // Examine each PHI.
1277   bool Changed = false;
1278   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1279     auto Inserted = PHISet.insert(PN);
1280     if (!Inserted.second) {
1281       // A duplicate. Replace this PHI with its duplicate.
1282       ++NumPHICSEs;
1283       PN->replaceAllUsesWith(*Inserted.first);
1284       PN->eraseFromParent();
1285       Changed = true;
1286 
1287       // The RAUW can change PHIs that we already visited. Start over from the
1288       // beginning.
1289       PHISet.clear();
1290       I = BB->begin();
1291     }
1292   }
1293 
1294   return Changed;
1295 }
1296 
1297 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1298   if (
1299 #ifndef NDEBUG
1300       !PHICSEDebugHash &&
1301 #endif
1302       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1303     return EliminateDuplicatePHINodesNaiveImpl(BB);
1304   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1305 }
1306 
1307 /// If the specified pointer points to an object that we control, try to modify
1308 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1309 /// the value after the operation, which may be lower than PrefAlign.
1310 ///
1311 /// Increating value alignment isn't often possible though. If alignment is
1312 /// important, a more reliable approach is to simply align all global variables
1313 /// and allocation instructions to their preferred alignment from the beginning.
1314 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1315                                  const DataLayout &DL) {
1316   V = V->stripPointerCasts();
1317 
1318   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1319     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1320     // than the current alignment, as the known bits calculation should have
1321     // already taken it into account. However, this is not always the case,
1322     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1323     // doesn't.
1324     Align CurrentAlign = AI->getAlign();
1325     if (PrefAlign <= CurrentAlign)
1326       return CurrentAlign;
1327 
1328     // If the preferred alignment is greater than the natural stack alignment
1329     // then don't round up. This avoids dynamic stack realignment.
1330     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1331       return CurrentAlign;
1332     AI->setAlignment(PrefAlign);
1333     return PrefAlign;
1334   }
1335 
1336   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1337     // TODO: as above, this shouldn't be necessary.
1338     Align CurrentAlign = GO->getPointerAlignment(DL);
1339     if (PrefAlign <= CurrentAlign)
1340       return CurrentAlign;
1341 
1342     // If there is a large requested alignment and we can, bump up the alignment
1343     // of the global.  If the memory we set aside for the global may not be the
1344     // memory used by the final program then it is impossible for us to reliably
1345     // enforce the preferred alignment.
1346     if (!GO->canIncreaseAlignment())
1347       return CurrentAlign;
1348 
1349     GO->setAlignment(PrefAlign);
1350     return PrefAlign;
1351   }
1352 
1353   return Align(1);
1354 }
1355 
1356 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1357                                        const DataLayout &DL,
1358                                        const Instruction *CxtI,
1359                                        AssumptionCache *AC,
1360                                        const DominatorTree *DT) {
1361   assert(V->getType()->isPointerTy() &&
1362          "getOrEnforceKnownAlignment expects a pointer!");
1363 
1364   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1365   unsigned TrailZ = Known.countMinTrailingZeros();
1366 
1367   // Avoid trouble with ridiculously large TrailZ values, such as
1368   // those computed from a null pointer.
1369   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1370   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1371 
1372   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1373 
1374   if (PrefAlign && *PrefAlign > Alignment)
1375     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1376 
1377   // We don't need to make any adjustment.
1378   return Alignment;
1379 }
1380 
1381 ///===---------------------------------------------------------------------===//
1382 ///  Dbg Intrinsic utilities
1383 ///
1384 
1385 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1386 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1387                              DIExpression *DIExpr,
1388                              PHINode *APN) {
1389   // Since we can't guarantee that the original dbg.declare intrinsic
1390   // is removed by LowerDbgDeclare(), we need to make sure that we are
1391   // not inserting the same dbg.value intrinsic over and over.
1392   SmallVector<DbgValueInst *, 1> DbgValues;
1393   findDbgValues(DbgValues, APN);
1394   for (auto *DVI : DbgValues) {
1395     assert(is_contained(DVI->getValues(), APN));
1396     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1397       return true;
1398   }
1399   return false;
1400 }
1401 
1402 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1403 /// (or fragment of the variable) described by \p DII.
1404 ///
1405 /// This is primarily intended as a helper for the different
1406 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1407 /// converted describes an alloca'd variable, so we need to use the
1408 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1409 /// identified as covering an n-bit fragment, if the store size of i1 is at
1410 /// least n bits.
1411 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1412   const DataLayout &DL = DII->getModule()->getDataLayout();
1413   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1414   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1415     assert(!ValueSize.isScalable() &&
1416            "Fragments don't work on scalable types.");
1417     return ValueSize.getFixedSize() >= *FragmentSize;
1418   }
1419   // We can't always calculate the size of the DI variable (e.g. if it is a
1420   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1421   // intead.
1422   if (DII->isAddressOfVariable()) {
1423     // DII should have exactly 1 location when it is an address.
1424     assert(DII->getNumVariableLocationOps() == 1 &&
1425            "address of variable must have exactly 1 location operand.");
1426     if (auto *AI =
1427             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1428       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1429         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1430       }
1431     }
1432   }
1433   // Could not determine size of variable. Conservatively return false.
1434   return false;
1435 }
1436 
1437 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1438 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1439 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1440 /// case this DebugLoc leaks into any adjacent instructions.
1441 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1442   // Original dbg.declare must have a location.
1443   const DebugLoc &DeclareLoc = DII->getDebugLoc();
1444   MDNode *Scope = DeclareLoc.getScope();
1445   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1446   // Produce an unknown location with the correct scope / inlinedAt fields.
1447   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1448 }
1449 
1450 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1451 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1452 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1453                                            StoreInst *SI, DIBuilder &Builder) {
1454   assert(DII->isAddressOfVariable());
1455   auto *DIVar = DII->getVariable();
1456   assert(DIVar && "Missing variable");
1457   auto *DIExpr = DII->getExpression();
1458   Value *DV = SI->getValueOperand();
1459 
1460   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1461 
1462   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1463     // FIXME: If storing to a part of the variable described by the dbg.declare,
1464     // then we want to insert a dbg.value for the corresponding fragment.
1465     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1466                       << *DII << '\n');
1467     // For now, when there is a store to parts of the variable (but we do not
1468     // know which part) we insert an dbg.value intrinsic to indicate that we
1469     // know nothing about the variable's content.
1470     DV = UndefValue::get(DV->getType());
1471     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1472     return;
1473   }
1474 
1475   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1476 }
1477 
1478 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1479 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1480 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1481                                            LoadInst *LI, DIBuilder &Builder) {
1482   auto *DIVar = DII->getVariable();
1483   auto *DIExpr = DII->getExpression();
1484   assert(DIVar && "Missing variable");
1485 
1486   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1487     // FIXME: If only referring to a part of the variable described by the
1488     // dbg.declare, then we want to insert a dbg.value for the corresponding
1489     // fragment.
1490     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1491                       << *DII << '\n');
1492     return;
1493   }
1494 
1495   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1496 
1497   // We are now tracking the loaded value instead of the address. In the
1498   // future if multi-location support is added to the IR, it might be
1499   // preferable to keep tracking both the loaded value and the original
1500   // address in case the alloca can not be elided.
1501   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1502       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1503   DbgValue->insertAfter(LI);
1504 }
1505 
1506 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1507 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1508 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1509                                            PHINode *APN, DIBuilder &Builder) {
1510   auto *DIVar = DII->getVariable();
1511   auto *DIExpr = DII->getExpression();
1512   assert(DIVar && "Missing variable");
1513 
1514   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1515     return;
1516 
1517   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1518     // FIXME: If only referring to a part of the variable described by the
1519     // dbg.declare, then we want to insert a dbg.value for the corresponding
1520     // fragment.
1521     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1522                       << *DII << '\n');
1523     return;
1524   }
1525 
1526   BasicBlock *BB = APN->getParent();
1527   auto InsertionPt = BB->getFirstInsertionPt();
1528 
1529   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1530 
1531   // The block may be a catchswitch block, which does not have a valid
1532   // insertion point.
1533   // FIXME: Insert dbg.value markers in the successors when appropriate.
1534   if (InsertionPt != BB->end())
1535     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1536 }
1537 
1538 /// Determine whether this alloca is either a VLA or an array.
1539 static bool isArray(AllocaInst *AI) {
1540   return AI->isArrayAllocation() ||
1541          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1542 }
1543 
1544 /// Determine whether this alloca is a structure.
1545 static bool isStructure(AllocaInst *AI) {
1546   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1547 }
1548 
1549 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1550 /// of llvm.dbg.value intrinsics.
1551 bool llvm::LowerDbgDeclare(Function &F) {
1552   bool Changed = false;
1553   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1554   SmallVector<DbgDeclareInst *, 4> Dbgs;
1555   for (auto &FI : F)
1556     for (Instruction &BI : FI)
1557       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1558         Dbgs.push_back(DDI);
1559 
1560   if (Dbgs.empty())
1561     return Changed;
1562 
1563   for (auto &I : Dbgs) {
1564     DbgDeclareInst *DDI = I;
1565     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1566     // If this is an alloca for a scalar variable, insert a dbg.value
1567     // at each load and store to the alloca and erase the dbg.declare.
1568     // The dbg.values allow tracking a variable even if it is not
1569     // stored on the stack, while the dbg.declare can only describe
1570     // the stack slot (and at a lexical-scope granularity). Later
1571     // passes will attempt to elide the stack slot.
1572     if (!AI || isArray(AI) || isStructure(AI))
1573       continue;
1574 
1575     // A volatile load/store means that the alloca can't be elided anyway.
1576     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1577           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1578             return LI->isVolatile();
1579           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1580             return SI->isVolatile();
1581           return false;
1582         }))
1583       continue;
1584 
1585     SmallVector<const Value *, 8> WorkList;
1586     WorkList.push_back(AI);
1587     while (!WorkList.empty()) {
1588       const Value *V = WorkList.pop_back_val();
1589       for (auto &AIUse : V->uses()) {
1590         User *U = AIUse.getUser();
1591         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1592           if (AIUse.getOperandNo() == 1)
1593             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1594         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1595           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1596         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1597           // This is a call by-value or some other instruction that takes a
1598           // pointer to the variable. Insert a *value* intrinsic that describes
1599           // the variable by dereferencing the alloca.
1600           if (!CI->isLifetimeStartOrEnd()) {
1601             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1602             auto *DerefExpr =
1603                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1604             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1605                                         NewLoc, CI);
1606           }
1607         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1608           if (BI->getType()->isPointerTy())
1609             WorkList.push_back(BI);
1610         }
1611       }
1612     }
1613     DDI->eraseFromParent();
1614     Changed = true;
1615   }
1616 
1617   if (Changed)
1618   for (BasicBlock &BB : F)
1619     RemoveRedundantDbgInstrs(&BB);
1620 
1621   return Changed;
1622 }
1623 
1624 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1625 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1626                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1627   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1628   if (InsertedPHIs.size() == 0)
1629     return;
1630 
1631   // Map existing PHI nodes to their dbg.values.
1632   ValueToValueMapTy DbgValueMap;
1633   for (auto &I : *BB) {
1634     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1635       for (Value *V : DbgII->location_ops())
1636         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1637           DbgValueMap.insert({Loc, DbgII});
1638     }
1639   }
1640   if (DbgValueMap.size() == 0)
1641     return;
1642 
1643   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1644   // so that if a dbg.value is being rewritten to use more than one of the
1645   // inserted PHIs in the same destination BB, we can update the same dbg.value
1646   // with all the new PHIs instead of creating one copy for each.
1647   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1648             DbgVariableIntrinsic *>
1649       NewDbgValueMap;
1650   // Then iterate through the new PHIs and look to see if they use one of the
1651   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1652   // propagate the info through the new PHI. If we use more than one new PHI in
1653   // a single destination BB with the same old dbg.value, merge the updates so
1654   // that we get a single new dbg.value with all the new PHIs.
1655   for (auto PHI : InsertedPHIs) {
1656     BasicBlock *Parent = PHI->getParent();
1657     // Avoid inserting an intrinsic into an EH block.
1658     if (Parent->getFirstNonPHI()->isEHPad())
1659       continue;
1660     for (auto VI : PHI->operand_values()) {
1661       auto V = DbgValueMap.find(VI);
1662       if (V != DbgValueMap.end()) {
1663         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1664         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1665         if (NewDI == NewDbgValueMap.end()) {
1666           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1667           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1668         }
1669         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1670         // If PHI contains VI as an operand more than once, we may
1671         // replaced it in NewDbgII; confirm that it is present.
1672         if (is_contained(NewDbgII->location_ops(), VI))
1673           NewDbgII->replaceVariableLocationOp(VI, PHI);
1674       }
1675     }
1676   }
1677   // Insert thew new dbg.values into their destination blocks.
1678   for (auto DI : NewDbgValueMap) {
1679     BasicBlock *Parent = DI.first.first;
1680     auto *NewDbgII = DI.second;
1681     auto InsertionPt = Parent->getFirstInsertionPt();
1682     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1683     NewDbgII->insertBefore(&*InsertionPt);
1684   }
1685 }
1686 
1687 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1688                              DIBuilder &Builder, uint8_t DIExprFlags,
1689                              int Offset) {
1690   auto DbgAddrs = FindDbgAddrUses(Address);
1691   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1692     const DebugLoc &Loc = DII->getDebugLoc();
1693     auto *DIVar = DII->getVariable();
1694     auto *DIExpr = DII->getExpression();
1695     assert(DIVar && "Missing variable");
1696     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1697     // Insert llvm.dbg.declare immediately before DII, and remove old
1698     // llvm.dbg.declare.
1699     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1700     DII->eraseFromParent();
1701   }
1702   return !DbgAddrs.empty();
1703 }
1704 
1705 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1706                                         DIBuilder &Builder, int Offset) {
1707   const DebugLoc &Loc = DVI->getDebugLoc();
1708   auto *DIVar = DVI->getVariable();
1709   auto *DIExpr = DVI->getExpression();
1710   assert(DIVar && "Missing variable");
1711 
1712   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1713   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1714   // it and give up.
1715   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1716       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1717     return;
1718 
1719   // Insert the offset before the first deref.
1720   // We could just change the offset argument of dbg.value, but it's unsigned...
1721   if (Offset)
1722     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1723 
1724   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1725   DVI->eraseFromParent();
1726 }
1727 
1728 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1729                                     DIBuilder &Builder, int Offset) {
1730   if (auto *L = LocalAsMetadata::getIfExists(AI))
1731     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1732       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1733         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1734           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1735 }
1736 
1737 /// Where possible to salvage debug information for \p I do so
1738 /// and return True. If not possible mark undef and return False.
1739 void llvm::salvageDebugInfo(Instruction &I) {
1740   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1741   findDbgUsers(DbgUsers, &I);
1742   salvageDebugInfoForDbgValues(I, DbgUsers);
1743 }
1744 
1745 void llvm::salvageDebugInfoForDbgValues(
1746     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1747   // These are arbitrary chosen limits on the maximum number of values and the
1748   // maximum size of a debug expression we can salvage up to, used for
1749   // performance reasons.
1750   const unsigned MaxDebugArgs = 16;
1751   const unsigned MaxExpressionSize = 128;
1752   bool Salvaged = false;
1753 
1754   for (auto *DII : DbgUsers) {
1755     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1756     // are implicitly pointing out the value as a DWARF memory location
1757     // description.
1758     bool StackValue = isa<DbgValueInst>(DII);
1759     auto DIILocation = DII->location_ops();
1760     assert(
1761         is_contained(DIILocation, &I) &&
1762         "DbgVariableIntrinsic must use salvaged instruction as its location");
1763     SmallVector<Value *, 4> AdditionalValues;
1764     // `I` may appear more than once in DII's location ops, and each use of `I`
1765     // must be updated in the DIExpression and potentially have additional
1766     // values added; thus we call salvageDebugInfoImpl for each `I` instance in
1767     // DIILocation.
1768     Value *Op0 = nullptr;
1769     DIExpression *SalvagedExpr = DII->getExpression();
1770     auto LocItr = find(DIILocation, &I);
1771     while (SalvagedExpr && LocItr != DIILocation.end()) {
1772       SmallVector<uint64_t, 16> Ops;
1773       unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
1774       uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
1775       Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
1776       if (!Op0)
1777         break;
1778       SalvagedExpr =
1779           DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
1780       LocItr = std::find(++LocItr, DIILocation.end(), &I);
1781     }
1782     // salvageDebugInfoImpl should fail on examining the first element of
1783     // DbgUsers, or none of them.
1784     if (!Op0)
1785       break;
1786 
1787     DII->replaceVariableLocationOp(&I, Op0);
1788     bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
1789     if (AdditionalValues.empty() && IsValidSalvageExpr) {
1790       DII->setExpression(SalvagedExpr);
1791     } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
1792                DII->getNumVariableLocationOps() + AdditionalValues.size() <=
1793                    MaxDebugArgs) {
1794       DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
1795     } else {
1796       // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
1797       // currently only valid for stack value expressions.
1798       // Also do not salvage if the resulting DIArgList would contain an
1799       // unreasonably large number of values.
1800       Value *Undef = UndefValue::get(I.getOperand(0)->getType());
1801       DII->replaceVariableLocationOp(I.getOperand(0), Undef);
1802     }
1803     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1804     Salvaged = true;
1805   }
1806 
1807   if (Salvaged)
1808     return;
1809 
1810   for (auto *DII : DbgUsers) {
1811     Value *Undef = UndefValue::get(I.getType());
1812     DII->replaceVariableLocationOp(&I, Undef);
1813   }
1814 }
1815 
1816 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1817                            uint64_t CurrentLocOps,
1818                            SmallVectorImpl<uint64_t> &Opcodes,
1819                            SmallVectorImpl<Value *> &AdditionalValues) {
1820   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1821   // Rewrite a GEP into a DIExpression.
1822   MapVector<Value *, APInt> VariableOffsets;
1823   APInt ConstantOffset(BitWidth, 0);
1824   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
1825     return nullptr;
1826   if (!VariableOffsets.empty() && !CurrentLocOps) {
1827     Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
1828     CurrentLocOps = 1;
1829   }
1830   for (auto Offset : VariableOffsets) {
1831     AdditionalValues.push_back(Offset.first);
1832     assert(Offset.second.isStrictlyPositive() &&
1833            "Expected strictly positive multiplier for offset.");
1834     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
1835                     Offset.second.getZExtValue(), dwarf::DW_OP_mul,
1836                     dwarf::DW_OP_plus});
1837   }
1838   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1839   return GEP->getOperand(0);
1840 }
1841 
1842 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1843   switch (Opcode) {
1844   case Instruction::Add:
1845     return dwarf::DW_OP_plus;
1846   case Instruction::Sub:
1847     return dwarf::DW_OP_minus;
1848   case Instruction::Mul:
1849     return dwarf::DW_OP_mul;
1850   case Instruction::SDiv:
1851     return dwarf::DW_OP_div;
1852   case Instruction::SRem:
1853     return dwarf::DW_OP_mod;
1854   case Instruction::Or:
1855     return dwarf::DW_OP_or;
1856   case Instruction::And:
1857     return dwarf::DW_OP_and;
1858   case Instruction::Xor:
1859     return dwarf::DW_OP_xor;
1860   case Instruction::Shl:
1861     return dwarf::DW_OP_shl;
1862   case Instruction::LShr:
1863     return dwarf::DW_OP_shr;
1864   case Instruction::AShr:
1865     return dwarf::DW_OP_shra;
1866   default:
1867     // TODO: Salvage from each kind of binop we know about.
1868     return 0;
1869   }
1870 }
1871 
1872 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
1873                              SmallVectorImpl<uint64_t> &Opcodes,
1874                              SmallVectorImpl<Value *> &AdditionalValues) {
1875   // Handle binary operations with constant integer operands as a special case.
1876   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1877   // Values wider than 64 bits cannot be represented within a DIExpression.
1878   if (ConstInt && ConstInt->getBitWidth() > 64)
1879     return nullptr;
1880 
1881   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1882   // Push any Constant Int operand onto the expression stack.
1883   if (ConstInt) {
1884     uint64_t Val = ConstInt->getSExtValue();
1885     // Add or Sub Instructions with a constant operand can potentially be
1886     // simplified.
1887     if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1888       uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1889       DIExpression::appendOffset(Opcodes, Offset);
1890       return BI->getOperand(0);
1891     }
1892     Opcodes.append({dwarf::DW_OP_constu, Val});
1893   } else {
1894     if (!CurrentLocOps) {
1895       Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
1896       CurrentLocOps = 1;
1897     }
1898     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
1899     AdditionalValues.push_back(BI->getOperand(1));
1900   }
1901 
1902   // Add salvaged binary operator to expression stack, if it has a valid
1903   // representation in a DIExpression.
1904   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1905   if (!DwarfBinOp)
1906     return nullptr;
1907   Opcodes.push_back(DwarfBinOp);
1908   return BI->getOperand(0);
1909 }
1910 
1911 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
1912                                   SmallVectorImpl<uint64_t> &Ops,
1913                                   SmallVectorImpl<Value *> &AdditionalValues) {
1914   auto &M = *I.getModule();
1915   auto &DL = M.getDataLayout();
1916 
1917   if (auto *CI = dyn_cast<CastInst>(&I)) {
1918     Value *FromValue = CI->getOperand(0);
1919     // No-op casts are irrelevant for debug info.
1920     if (CI->isNoopCast(DL)) {
1921       return FromValue;
1922     }
1923 
1924     Type *Type = CI->getType();
1925     if (Type->isPointerTy())
1926       Type = DL.getIntPtrType(Type);
1927     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1928     if (Type->isVectorTy() ||
1929         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
1930           isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
1931       return nullptr;
1932 
1933     llvm::Type *FromType = FromValue->getType();
1934     if (FromType->isPointerTy())
1935       FromType = DL.getIntPtrType(FromType);
1936 
1937     unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
1938     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1939 
1940     auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1941                                           isa<SExtInst>(&I));
1942     Ops.append(ExtOps.begin(), ExtOps.end());
1943     return FromValue;
1944   }
1945 
1946   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
1947     return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
1948   if (auto *BI = dyn_cast<BinaryOperator>(&I))
1949     return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
1950 
1951   // *Not* to do: we should not attempt to salvage load instructions,
1952   // because the validity and lifetime of a dbg.value containing
1953   // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1954   return nullptr;
1955 }
1956 
1957 /// A replacement for a dbg.value expression.
1958 using DbgValReplacement = Optional<DIExpression *>;
1959 
1960 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1961 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1962 /// changes are made.
1963 static bool rewriteDebugUsers(
1964     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1965     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1966   // Find debug users of From.
1967   SmallVector<DbgVariableIntrinsic *, 1> Users;
1968   findDbgUsers(Users, &From);
1969   if (Users.empty())
1970     return false;
1971 
1972   // Prevent use-before-def of To.
1973   bool Changed = false;
1974   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1975   if (isa<Instruction>(&To)) {
1976     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1977 
1978     for (auto *DII : Users) {
1979       // It's common to see a debug user between From and DomPoint. Move it
1980       // after DomPoint to preserve the variable update without any reordering.
1981       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1982         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1983         DII->moveAfter(&DomPoint);
1984         Changed = true;
1985 
1986       // Users which otherwise aren't dominated by the replacement value must
1987       // be salvaged or deleted.
1988       } else if (!DT.dominates(&DomPoint, DII)) {
1989         UndefOrSalvage.insert(DII);
1990       }
1991     }
1992   }
1993 
1994   // Update debug users without use-before-def risk.
1995   for (auto *DII : Users) {
1996     if (UndefOrSalvage.count(DII))
1997       continue;
1998 
1999     DbgValReplacement DVR = RewriteExpr(*DII);
2000     if (!DVR)
2001       continue;
2002 
2003     DII->replaceVariableLocationOp(&From, &To);
2004     DII->setExpression(*DVR);
2005     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
2006     Changed = true;
2007   }
2008 
2009   if (!UndefOrSalvage.empty()) {
2010     // Try to salvage the remaining debug users.
2011     salvageDebugInfo(From);
2012     Changed = true;
2013   }
2014 
2015   return Changed;
2016 }
2017 
2018 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2019 /// losslessly preserve the bits and semantics of the value. This predicate is
2020 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2021 ///
2022 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2023 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2024 /// and also does not allow lossless pointer <-> integer conversions.
2025 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2026                                          Type *ToTy) {
2027   // Trivially compatible types.
2028   if (FromTy == ToTy)
2029     return true;
2030 
2031   // Handle compatible pointer <-> integer conversions.
2032   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2033     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2034     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2035                               !DL.isNonIntegralPointerType(ToTy);
2036     return SameSize && LosslessConversion;
2037   }
2038 
2039   // TODO: This is not exhaustive.
2040   return false;
2041 }
2042 
2043 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2044                                  Instruction &DomPoint, DominatorTree &DT) {
2045   // Exit early if From has no debug users.
2046   if (!From.isUsedByMetadata())
2047     return false;
2048 
2049   assert(&From != &To && "Can't replace something with itself");
2050 
2051   Type *FromTy = From.getType();
2052   Type *ToTy = To.getType();
2053 
2054   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2055     return DII.getExpression();
2056   };
2057 
2058   // Handle no-op conversions.
2059   Module &M = *From.getModule();
2060   const DataLayout &DL = M.getDataLayout();
2061   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2062     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2063 
2064   // Handle integer-to-integer widening and narrowing.
2065   // FIXME: Use DW_OP_convert when it's available everywhere.
2066   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2067     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2068     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2069     assert(FromBits != ToBits && "Unexpected no-op conversion");
2070 
2071     // When the width of the result grows, assume that a debugger will only
2072     // access the low `FromBits` bits when inspecting the source variable.
2073     if (FromBits < ToBits)
2074       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2075 
2076     // The width of the result has shrunk. Use sign/zero extension to describe
2077     // the source variable's high bits.
2078     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2079       DILocalVariable *Var = DII.getVariable();
2080 
2081       // Without knowing signedness, sign/zero extension isn't possible.
2082       auto Signedness = Var->getSignedness();
2083       if (!Signedness)
2084         return None;
2085 
2086       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2087       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2088                                      Signed);
2089     };
2090     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2091   }
2092 
2093   // TODO: Floating-point conversions, vectors.
2094   return false;
2095 }
2096 
2097 std::pair<unsigned, unsigned>
2098 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2099   unsigned NumDeadInst = 0;
2100   unsigned NumDeadDbgInst = 0;
2101   // Delete the instructions backwards, as it has a reduced likelihood of
2102   // having to update as many def-use and use-def chains.
2103   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2104   while (EndInst != &BB->front()) {
2105     // Delete the next to last instruction.
2106     Instruction *Inst = &*--EndInst->getIterator();
2107     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2108       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2109     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2110       EndInst = Inst;
2111       continue;
2112     }
2113     if (isa<DbgInfoIntrinsic>(Inst))
2114       ++NumDeadDbgInst;
2115     else
2116       ++NumDeadInst;
2117     Inst->eraseFromParent();
2118   }
2119   return {NumDeadInst, NumDeadDbgInst};
2120 }
2121 
2122 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2123                                    DomTreeUpdater *DTU,
2124                                    MemorySSAUpdater *MSSAU) {
2125   BasicBlock *BB = I->getParent();
2126 
2127   if (MSSAU)
2128     MSSAU->changeToUnreachable(I);
2129 
2130   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2131 
2132   // Loop over all of the successors, removing BB's entry from any PHI
2133   // nodes.
2134   for (BasicBlock *Successor : successors(BB)) {
2135     Successor->removePredecessor(BB, PreserveLCSSA);
2136     if (DTU)
2137       UniqueSuccessors.insert(Successor);
2138   }
2139   auto *UI = new UnreachableInst(I->getContext(), I);
2140   UI->setDebugLoc(I->getDebugLoc());
2141 
2142   // All instructions after this are dead.
2143   unsigned NumInstrsRemoved = 0;
2144   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2145   while (BBI != BBE) {
2146     if (!BBI->use_empty())
2147       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2148     BB->getInstList().erase(BBI++);
2149     ++NumInstrsRemoved;
2150   }
2151   if (DTU) {
2152     SmallVector<DominatorTree::UpdateType, 8> Updates;
2153     Updates.reserve(UniqueSuccessors.size());
2154     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2155       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2156     DTU->applyUpdates(Updates);
2157   }
2158   return NumInstrsRemoved;
2159 }
2160 
2161 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2162   SmallVector<Value *, 8> Args(II->args());
2163   SmallVector<OperandBundleDef, 1> OpBundles;
2164   II->getOperandBundlesAsDefs(OpBundles);
2165   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2166                                        II->getCalledOperand(), Args, OpBundles);
2167   NewCall->setCallingConv(II->getCallingConv());
2168   NewCall->setAttributes(II->getAttributes());
2169   NewCall->setDebugLoc(II->getDebugLoc());
2170   NewCall->copyMetadata(*II);
2171 
2172   // If the invoke had profile metadata, try converting them for CallInst.
2173   uint64_t TotalWeight;
2174   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2175     // Set the total weight if it fits into i32, otherwise reset.
2176     MDBuilder MDB(NewCall->getContext());
2177     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2178                           ? nullptr
2179                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2180     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2181   }
2182 
2183   return NewCall;
2184 }
2185 
2186 // changeToCall - Convert the specified invoke into a normal call.
2187 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2188   CallInst *NewCall = createCallMatchingInvoke(II);
2189   NewCall->takeName(II);
2190   NewCall->insertBefore(II);
2191   II->replaceAllUsesWith(NewCall);
2192 
2193   // Follow the call by a branch to the normal destination.
2194   BasicBlock *NormalDestBB = II->getNormalDest();
2195   BranchInst::Create(NormalDestBB, II);
2196 
2197   // Update PHI nodes in the unwind destination
2198   BasicBlock *BB = II->getParent();
2199   BasicBlock *UnwindDestBB = II->getUnwindDest();
2200   UnwindDestBB->removePredecessor(BB);
2201   II->eraseFromParent();
2202   if (DTU)
2203     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2204   return NewCall;
2205 }
2206 
2207 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2208                                                    BasicBlock *UnwindEdge,
2209                                                    DomTreeUpdater *DTU) {
2210   BasicBlock *BB = CI->getParent();
2211 
2212   // Convert this function call into an invoke instruction.  First, split the
2213   // basic block.
2214   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2215                                  CI->getName() + ".noexc");
2216 
2217   // Delete the unconditional branch inserted by SplitBlock
2218   BB->getInstList().pop_back();
2219 
2220   // Create the new invoke instruction.
2221   SmallVector<Value *, 8> InvokeArgs(CI->args());
2222   SmallVector<OperandBundleDef, 1> OpBundles;
2223 
2224   CI->getOperandBundlesAsDefs(OpBundles);
2225 
2226   // Note: we're round tripping operand bundles through memory here, and that
2227   // can potentially be avoided with a cleverer API design that we do not have
2228   // as of this time.
2229 
2230   InvokeInst *II =
2231       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2232                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2233   II->setDebugLoc(CI->getDebugLoc());
2234   II->setCallingConv(CI->getCallingConv());
2235   II->setAttributes(CI->getAttributes());
2236   II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2237 
2238   if (DTU)
2239     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2240 
2241   // Make sure that anything using the call now uses the invoke!  This also
2242   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2243   CI->replaceAllUsesWith(II);
2244 
2245   // Delete the original call
2246   Split->getInstList().pop_front();
2247   return Split;
2248 }
2249 
2250 static bool markAliveBlocks(Function &F,
2251                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2252                             DomTreeUpdater *DTU = nullptr) {
2253   SmallVector<BasicBlock*, 128> Worklist;
2254   BasicBlock *BB = &F.front();
2255   Worklist.push_back(BB);
2256   Reachable.insert(BB);
2257   bool Changed = false;
2258   do {
2259     BB = Worklist.pop_back_val();
2260 
2261     // Do a quick scan of the basic block, turning any obviously unreachable
2262     // instructions into LLVM unreachable insts.  The instruction combining pass
2263     // canonicalizes unreachable insts into stores to null or undef.
2264     for (Instruction &I : *BB) {
2265       if (auto *CI = dyn_cast<CallInst>(&I)) {
2266         Value *Callee = CI->getCalledOperand();
2267         // Handle intrinsic calls.
2268         if (Function *F = dyn_cast<Function>(Callee)) {
2269           auto IntrinsicID = F->getIntrinsicID();
2270           // Assumptions that are known to be false are equivalent to
2271           // unreachable. Also, if the condition is undefined, then we make the
2272           // choice most beneficial to the optimizer, and choose that to also be
2273           // unreachable.
2274           if (IntrinsicID == Intrinsic::assume) {
2275             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2276               // Don't insert a call to llvm.trap right before the unreachable.
2277               changeToUnreachable(CI, false, DTU);
2278               Changed = true;
2279               break;
2280             }
2281           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2282             // A call to the guard intrinsic bails out of the current
2283             // compilation unit if the predicate passed to it is false. If the
2284             // predicate is a constant false, then we know the guard will bail
2285             // out of the current compile unconditionally, so all code following
2286             // it is dead.
2287             //
2288             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2289             // guards to treat `undef` as `false` since a guard on `undef` can
2290             // still be useful for widening.
2291             if (match(CI->getArgOperand(0), m_Zero()))
2292               if (!isa<UnreachableInst>(CI->getNextNode())) {
2293                 changeToUnreachable(CI->getNextNode(), false, DTU);
2294                 Changed = true;
2295                 break;
2296               }
2297           }
2298         } else if ((isa<ConstantPointerNull>(Callee) &&
2299                     !NullPointerIsDefined(CI->getFunction())) ||
2300                    isa<UndefValue>(Callee)) {
2301           changeToUnreachable(CI, false, DTU);
2302           Changed = true;
2303           break;
2304         }
2305         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2306           // If we found a call to a no-return function, insert an unreachable
2307           // instruction after it.  Make sure there isn't *already* one there
2308           // though.
2309           if (!isa<UnreachableInst>(CI->getNextNode())) {
2310             // Don't insert a call to llvm.trap right before the unreachable.
2311             changeToUnreachable(CI->getNextNode(), false, DTU);
2312             Changed = true;
2313           }
2314           break;
2315         }
2316       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2317         // Store to undef and store to null are undefined and used to signal
2318         // that they should be changed to unreachable by passes that can't
2319         // modify the CFG.
2320 
2321         // Don't touch volatile stores.
2322         if (SI->isVolatile()) continue;
2323 
2324         Value *Ptr = SI->getOperand(1);
2325 
2326         if (isa<UndefValue>(Ptr) ||
2327             (isa<ConstantPointerNull>(Ptr) &&
2328              !NullPointerIsDefined(SI->getFunction(),
2329                                    SI->getPointerAddressSpace()))) {
2330           changeToUnreachable(SI, false, DTU);
2331           Changed = true;
2332           break;
2333         }
2334       }
2335     }
2336 
2337     Instruction *Terminator = BB->getTerminator();
2338     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2339       // Turn invokes that call 'nounwind' functions into ordinary calls.
2340       Value *Callee = II->getCalledOperand();
2341       if ((isa<ConstantPointerNull>(Callee) &&
2342            !NullPointerIsDefined(BB->getParent())) ||
2343           isa<UndefValue>(Callee)) {
2344         changeToUnreachable(II, false, DTU);
2345         Changed = true;
2346       } else {
2347         if (II->doesNotReturn() &&
2348             !isa<UnreachableInst>(II->getNormalDest()->front())) {
2349           // If we found an invoke of a no-return function,
2350           // create a new empty basic block with an `unreachable` terminator,
2351           // and set it as the normal destination for the invoke,
2352           // unless that is already the case.
2353           // Note that the original normal destination could have other uses.
2354           BasicBlock *OrigNormalDest = II->getNormalDest();
2355           OrigNormalDest->removePredecessor(II->getParent());
2356           LLVMContext &Ctx = II->getContext();
2357           BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2358               Ctx, OrigNormalDest->getName() + ".unreachable",
2359               II->getFunction(), OrigNormalDest);
2360           new UnreachableInst(Ctx, UnreachableNormalDest);
2361           II->setNormalDest(UnreachableNormalDest);
2362           if (DTU)
2363             DTU->applyUpdates(
2364                 {{DominatorTree::Delete, BB, OrigNormalDest},
2365                  {DominatorTree::Insert, BB, UnreachableNormalDest}});
2366           Changed = true;
2367         }
2368         if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2369           if (II->use_empty() && !II->mayHaveSideEffects()) {
2370             // jump to the normal destination branch.
2371             BasicBlock *NormalDestBB = II->getNormalDest();
2372             BasicBlock *UnwindDestBB = II->getUnwindDest();
2373             BranchInst::Create(NormalDestBB, II);
2374             UnwindDestBB->removePredecessor(II->getParent());
2375             II->eraseFromParent();
2376             if (DTU)
2377               DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2378           } else
2379             changeToCall(II, DTU);
2380           Changed = true;
2381         }
2382       }
2383     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2384       // Remove catchpads which cannot be reached.
2385       struct CatchPadDenseMapInfo {
2386         static CatchPadInst *getEmptyKey() {
2387           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2388         }
2389 
2390         static CatchPadInst *getTombstoneKey() {
2391           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2392         }
2393 
2394         static unsigned getHashValue(CatchPadInst *CatchPad) {
2395           return static_cast<unsigned>(hash_combine_range(
2396               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2397         }
2398 
2399         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2400           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2401               RHS == getEmptyKey() || RHS == getTombstoneKey())
2402             return LHS == RHS;
2403           return LHS->isIdenticalTo(RHS);
2404         }
2405       };
2406 
2407       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2408       // Set of unique CatchPads.
2409       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2410                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2411           HandlerSet;
2412       detail::DenseSetEmpty Empty;
2413       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2414                                              E = CatchSwitch->handler_end();
2415            I != E; ++I) {
2416         BasicBlock *HandlerBB = *I;
2417         if (DTU)
2418           ++NumPerSuccessorCases[HandlerBB];
2419         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2420         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2421           if (DTU)
2422             --NumPerSuccessorCases[HandlerBB];
2423           CatchSwitch->removeHandler(I);
2424           --I;
2425           --E;
2426           Changed = true;
2427         }
2428       }
2429       if (DTU) {
2430         std::vector<DominatorTree::UpdateType> Updates;
2431         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2432           if (I.second == 0)
2433             Updates.push_back({DominatorTree::Delete, BB, I.first});
2434         DTU->applyUpdates(Updates);
2435       }
2436     }
2437 
2438     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2439     for (BasicBlock *Successor : successors(BB))
2440       if (Reachable.insert(Successor).second)
2441         Worklist.push_back(Successor);
2442   } while (!Worklist.empty());
2443   return Changed;
2444 }
2445 
2446 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2447   Instruction *TI = BB->getTerminator();
2448 
2449   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2450     changeToCall(II, DTU);
2451     return;
2452   }
2453 
2454   Instruction *NewTI;
2455   BasicBlock *UnwindDest;
2456 
2457   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2458     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2459     UnwindDest = CRI->getUnwindDest();
2460   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2461     auto *NewCatchSwitch = CatchSwitchInst::Create(
2462         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2463         CatchSwitch->getName(), CatchSwitch);
2464     for (BasicBlock *PadBB : CatchSwitch->handlers())
2465       NewCatchSwitch->addHandler(PadBB);
2466 
2467     NewTI = NewCatchSwitch;
2468     UnwindDest = CatchSwitch->getUnwindDest();
2469   } else {
2470     llvm_unreachable("Could not find unwind successor");
2471   }
2472 
2473   NewTI->takeName(TI);
2474   NewTI->setDebugLoc(TI->getDebugLoc());
2475   UnwindDest->removePredecessor(BB);
2476   TI->replaceAllUsesWith(NewTI);
2477   TI->eraseFromParent();
2478   if (DTU)
2479     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2480 }
2481 
2482 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2483 /// if they are in a dead cycle.  Return true if a change was made, false
2484 /// otherwise.
2485 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2486                                    MemorySSAUpdater *MSSAU) {
2487   SmallPtrSet<BasicBlock *, 16> Reachable;
2488   bool Changed = markAliveBlocks(F, Reachable, DTU);
2489 
2490   // If there are unreachable blocks in the CFG...
2491   if (Reachable.size() == F.size())
2492     return Changed;
2493 
2494   assert(Reachable.size() < F.size());
2495 
2496   // Are there any blocks left to actually delete?
2497   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2498   for (BasicBlock &BB : F) {
2499     // Skip reachable basic blocks
2500     if (Reachable.count(&BB))
2501       continue;
2502     // Skip already-deleted blocks
2503     if (DTU && DTU->isBBPendingDeletion(&BB))
2504       continue;
2505     BlocksToRemove.insert(&BB);
2506   }
2507 
2508   if (BlocksToRemove.empty())
2509     return Changed;
2510 
2511   Changed = true;
2512   NumRemoved += BlocksToRemove.size();
2513 
2514   if (MSSAU)
2515     MSSAU->removeBlocks(BlocksToRemove);
2516 
2517   DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2518 
2519   return Changed;
2520 }
2521 
2522 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2523                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2524   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2525   K->dropUnknownNonDebugMetadata(KnownIDs);
2526   K->getAllMetadataOtherThanDebugLoc(Metadata);
2527   for (const auto &MD : Metadata) {
2528     unsigned Kind = MD.first;
2529     MDNode *JMD = J->getMetadata(Kind);
2530     MDNode *KMD = MD.second;
2531 
2532     switch (Kind) {
2533       default:
2534         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2535         break;
2536       case LLVMContext::MD_dbg:
2537         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2538       case LLVMContext::MD_tbaa:
2539         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2540         break;
2541       case LLVMContext::MD_alias_scope:
2542         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2543         break;
2544       case LLVMContext::MD_noalias:
2545       case LLVMContext::MD_mem_parallel_loop_access:
2546         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2547         break;
2548       case LLVMContext::MD_access_group:
2549         K->setMetadata(LLVMContext::MD_access_group,
2550                        intersectAccessGroups(K, J));
2551         break;
2552       case LLVMContext::MD_range:
2553 
2554         // If K does move, use most generic range. Otherwise keep the range of
2555         // K.
2556         if (DoesKMove)
2557           // FIXME: If K does move, we should drop the range info and nonnull.
2558           //        Currently this function is used with DoesKMove in passes
2559           //        doing hoisting/sinking and the current behavior of using the
2560           //        most generic range is correct in those cases.
2561           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2562         break;
2563       case LLVMContext::MD_fpmath:
2564         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2565         break;
2566       case LLVMContext::MD_invariant_load:
2567         // Only set the !invariant.load if it is present in both instructions.
2568         K->setMetadata(Kind, JMD);
2569         break;
2570       case LLVMContext::MD_nonnull:
2571         // If K does move, keep nonull if it is present in both instructions.
2572         if (DoesKMove)
2573           K->setMetadata(Kind, JMD);
2574         break;
2575       case LLVMContext::MD_invariant_group:
2576         // Preserve !invariant.group in K.
2577         break;
2578       case LLVMContext::MD_align:
2579         K->setMetadata(Kind,
2580           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2581         break;
2582       case LLVMContext::MD_dereferenceable:
2583       case LLVMContext::MD_dereferenceable_or_null:
2584         K->setMetadata(Kind,
2585           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2586         break;
2587       case LLVMContext::MD_preserve_access_index:
2588         // Preserve !preserve.access.index in K.
2589         break;
2590     }
2591   }
2592   // Set !invariant.group from J if J has it. If both instructions have it
2593   // then we will just pick it from J - even when they are different.
2594   // Also make sure that K is load or store - f.e. combining bitcast with load
2595   // could produce bitcast with invariant.group metadata, which is invalid.
2596   // FIXME: we should try to preserve both invariant.group md if they are
2597   // different, but right now instruction can only have one invariant.group.
2598   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2599     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2600       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2601 }
2602 
2603 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2604                                  bool KDominatesJ) {
2605   unsigned KnownIDs[] = {
2606       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2607       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2608       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2609       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2610       LLVMContext::MD_dereferenceable,
2611       LLVMContext::MD_dereferenceable_or_null,
2612       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2613   combineMetadata(K, J, KnownIDs, KDominatesJ);
2614 }
2615 
2616 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2617   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2618   Source.getAllMetadata(MD);
2619   MDBuilder MDB(Dest.getContext());
2620   Type *NewType = Dest.getType();
2621   const DataLayout &DL = Source.getModule()->getDataLayout();
2622   for (const auto &MDPair : MD) {
2623     unsigned ID = MDPair.first;
2624     MDNode *N = MDPair.second;
2625     // Note, essentially every kind of metadata should be preserved here! This
2626     // routine is supposed to clone a load instruction changing *only its type*.
2627     // The only metadata it makes sense to drop is metadata which is invalidated
2628     // when the pointer type changes. This should essentially never be the case
2629     // in LLVM, but we explicitly switch over only known metadata to be
2630     // conservatively correct. If you are adding metadata to LLVM which pertains
2631     // to loads, you almost certainly want to add it here.
2632     switch (ID) {
2633     case LLVMContext::MD_dbg:
2634     case LLVMContext::MD_tbaa:
2635     case LLVMContext::MD_prof:
2636     case LLVMContext::MD_fpmath:
2637     case LLVMContext::MD_tbaa_struct:
2638     case LLVMContext::MD_invariant_load:
2639     case LLVMContext::MD_alias_scope:
2640     case LLVMContext::MD_noalias:
2641     case LLVMContext::MD_nontemporal:
2642     case LLVMContext::MD_mem_parallel_loop_access:
2643     case LLVMContext::MD_access_group:
2644       // All of these directly apply.
2645       Dest.setMetadata(ID, N);
2646       break;
2647 
2648     case LLVMContext::MD_nonnull:
2649       copyNonnullMetadata(Source, N, Dest);
2650       break;
2651 
2652     case LLVMContext::MD_align:
2653     case LLVMContext::MD_dereferenceable:
2654     case LLVMContext::MD_dereferenceable_or_null:
2655       // These only directly apply if the new type is also a pointer.
2656       if (NewType->isPointerTy())
2657         Dest.setMetadata(ID, N);
2658       break;
2659 
2660     case LLVMContext::MD_range:
2661       copyRangeMetadata(DL, Source, N, Dest);
2662       break;
2663     }
2664   }
2665 }
2666 
2667 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2668   auto *ReplInst = dyn_cast<Instruction>(Repl);
2669   if (!ReplInst)
2670     return;
2671 
2672   // Patch the replacement so that it is not more restrictive than the value
2673   // being replaced.
2674   // Note that if 'I' is a load being replaced by some operation,
2675   // for example, by an arithmetic operation, then andIRFlags()
2676   // would just erase all math flags from the original arithmetic
2677   // operation, which is clearly not wanted and not needed.
2678   if (!isa<LoadInst>(I))
2679     ReplInst->andIRFlags(I);
2680 
2681   // FIXME: If both the original and replacement value are part of the
2682   // same control-flow region (meaning that the execution of one
2683   // guarantees the execution of the other), then we can combine the
2684   // noalias scopes here and do better than the general conservative
2685   // answer used in combineMetadata().
2686 
2687   // In general, GVN unifies expressions over different control-flow
2688   // regions, and so we need a conservative combination of the noalias
2689   // scopes.
2690   static const unsigned KnownIDs[] = {
2691       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2692       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2693       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2694       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2695       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2696   combineMetadata(ReplInst, I, KnownIDs, false);
2697 }
2698 
2699 template <typename RootType, typename DominatesFn>
2700 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2701                                          const RootType &Root,
2702                                          const DominatesFn &Dominates) {
2703   assert(From->getType() == To->getType());
2704 
2705   unsigned Count = 0;
2706   for (Use &U : llvm::make_early_inc_range(From->uses())) {
2707     if (!Dominates(Root, U))
2708       continue;
2709     U.set(To);
2710     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2711                       << "' as " << *To << " in " << *U << "\n");
2712     ++Count;
2713   }
2714   return Count;
2715 }
2716 
2717 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2718    assert(From->getType() == To->getType());
2719    auto *BB = From->getParent();
2720    unsigned Count = 0;
2721 
2722    for (Use &U : llvm::make_early_inc_range(From->uses())) {
2723     auto *I = cast<Instruction>(U.getUser());
2724     if (I->getParent() == BB)
2725       continue;
2726     U.set(To);
2727     ++Count;
2728   }
2729   return Count;
2730 }
2731 
2732 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2733                                         DominatorTree &DT,
2734                                         const BasicBlockEdge &Root) {
2735   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2736     return DT.dominates(Root, U);
2737   };
2738   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2739 }
2740 
2741 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2742                                         DominatorTree &DT,
2743                                         const BasicBlock *BB) {
2744   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2745     return DT.dominates(BB, U);
2746   };
2747   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2748 }
2749 
2750 bool llvm::callsGCLeafFunction(const CallBase *Call,
2751                                const TargetLibraryInfo &TLI) {
2752   // Check if the function is specifically marked as a gc leaf function.
2753   if (Call->hasFnAttr("gc-leaf-function"))
2754     return true;
2755   if (const Function *F = Call->getCalledFunction()) {
2756     if (F->hasFnAttribute("gc-leaf-function"))
2757       return true;
2758 
2759     if (auto IID = F->getIntrinsicID()) {
2760       // Most LLVM intrinsics do not take safepoints.
2761       return IID != Intrinsic::experimental_gc_statepoint &&
2762              IID != Intrinsic::experimental_deoptimize &&
2763              IID != Intrinsic::memcpy_element_unordered_atomic &&
2764              IID != Intrinsic::memmove_element_unordered_atomic;
2765     }
2766   }
2767 
2768   // Lib calls can be materialized by some passes, and won't be
2769   // marked as 'gc-leaf-function.' All available Libcalls are
2770   // GC-leaf.
2771   LibFunc LF;
2772   if (TLI.getLibFunc(*Call, LF)) {
2773     return TLI.has(LF);
2774   }
2775 
2776   return false;
2777 }
2778 
2779 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2780                                LoadInst &NewLI) {
2781   auto *NewTy = NewLI.getType();
2782 
2783   // This only directly applies if the new type is also a pointer.
2784   if (NewTy->isPointerTy()) {
2785     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2786     return;
2787   }
2788 
2789   // The only other translation we can do is to integral loads with !range
2790   // metadata.
2791   if (!NewTy->isIntegerTy())
2792     return;
2793 
2794   MDBuilder MDB(NewLI.getContext());
2795   const Value *Ptr = OldLI.getPointerOperand();
2796   auto *ITy = cast<IntegerType>(NewTy);
2797   auto *NullInt = ConstantExpr::getPtrToInt(
2798       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2799   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2800   NewLI.setMetadata(LLVMContext::MD_range,
2801                     MDB.createRange(NonNullInt, NullInt));
2802 }
2803 
2804 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2805                              MDNode *N, LoadInst &NewLI) {
2806   auto *NewTy = NewLI.getType();
2807 
2808   // Give up unless it is converted to a pointer where there is a single very
2809   // valuable mapping we can do reliably.
2810   // FIXME: It would be nice to propagate this in more ways, but the type
2811   // conversions make it hard.
2812   if (!NewTy->isPointerTy())
2813     return;
2814 
2815   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2816   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2817     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2818     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2819   }
2820 }
2821 
2822 void llvm::dropDebugUsers(Instruction &I) {
2823   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2824   findDbgUsers(DbgUsers, &I);
2825   for (auto *DII : DbgUsers)
2826     DII->eraseFromParent();
2827 }
2828 
2829 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2830                                     BasicBlock *BB) {
2831   // Since we are moving the instructions out of its basic block, we do not
2832   // retain their original debug locations (DILocations) and debug intrinsic
2833   // instructions.
2834   //
2835   // Doing so would degrade the debugging experience and adversely affect the
2836   // accuracy of profiling information.
2837   //
2838   // Currently, when hoisting the instructions, we take the following actions:
2839   // - Remove their debug intrinsic instructions.
2840   // - Set their debug locations to the values from the insertion point.
2841   //
2842   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2843   // need to be deleted, is because there will not be any instructions with a
2844   // DILocation in either branch left after performing the transformation. We
2845   // can only insert a dbg.value after the two branches are joined again.
2846   //
2847   // See PR38762, PR39243 for more details.
2848   //
2849   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2850   // encode predicated DIExpressions that yield different results on different
2851   // code paths.
2852 
2853   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2854     Instruction *I = &*II;
2855     I->dropUndefImplyingAttrsAndUnknownMetadata();
2856     if (I->isUsedByMetadata())
2857       dropDebugUsers(*I);
2858     if (I->isDebugOrPseudoInst()) {
2859       // Remove DbgInfo and pseudo probe Intrinsics.
2860       II = I->eraseFromParent();
2861       continue;
2862     }
2863     I->setDebugLoc(InsertPt->getDebugLoc());
2864     ++II;
2865   }
2866   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2867                                  BB->begin(),
2868                                  BB->getTerminator()->getIterator());
2869 }
2870 
2871 namespace {
2872 
2873 /// A potential constituent of a bitreverse or bswap expression. See
2874 /// collectBitParts for a fuller explanation.
2875 struct BitPart {
2876   BitPart(Value *P, unsigned BW) : Provider(P) {
2877     Provenance.resize(BW);
2878   }
2879 
2880   /// The Value that this is a bitreverse/bswap of.
2881   Value *Provider;
2882 
2883   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2884   /// in Provider becomes bit B in the result of this expression.
2885   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2886 
2887   enum { Unset = -1 };
2888 };
2889 
2890 } // end anonymous namespace
2891 
2892 /// Analyze the specified subexpression and see if it is capable of providing
2893 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2894 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2895 /// the output of the expression came from a corresponding bit in some other
2896 /// value. This function is recursive, and the end result is a mapping of
2897 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2898 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2899 ///
2900 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2901 /// that the expression deposits the low byte of %X into the high byte of the
2902 /// result and that all other bits are zero. This expression is accepted and a
2903 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2904 /// [0-7].
2905 ///
2906 /// For vector types, all analysis is performed at the per-element level. No
2907 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2908 /// constant masks must be splatted across all elements.
2909 ///
2910 /// To avoid revisiting values, the BitPart results are memoized into the
2911 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2912 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2913 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2914 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2915 /// type instead to provide the same functionality.
2916 ///
2917 /// Because we pass around references into \c BPS, we must use a container that
2918 /// does not invalidate internal references (std::map instead of DenseMap).
2919 static const Optional<BitPart> &
2920 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2921                 std::map<Value *, Optional<BitPart>> &BPS, int Depth,
2922                 bool &FoundRoot) {
2923   auto I = BPS.find(V);
2924   if (I != BPS.end())
2925     return I->second;
2926 
2927   auto &Result = BPS[V] = None;
2928   auto BitWidth = V->getType()->getScalarSizeInBits();
2929 
2930   // Can't do integer/elements > 128 bits.
2931   if (BitWidth > 128)
2932     return Result;
2933 
2934   // Prevent stack overflow by limiting the recursion depth
2935   if (Depth == BitPartRecursionMaxDepth) {
2936     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2937     return Result;
2938   }
2939 
2940   if (auto *I = dyn_cast<Instruction>(V)) {
2941     Value *X, *Y;
2942     const APInt *C;
2943 
2944     // If this is an or instruction, it may be an inner node of the bswap.
2945     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2946       // Check we have both sources and they are from the same provider.
2947       const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2948                                       Depth + 1, FoundRoot);
2949       if (!A || !A->Provider)
2950         return Result;
2951 
2952       const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
2953                                       Depth + 1, FoundRoot);
2954       if (!B || A->Provider != B->Provider)
2955         return Result;
2956 
2957       // Try and merge the two together.
2958       Result = BitPart(A->Provider, BitWidth);
2959       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2960         if (A->Provenance[BitIdx] != BitPart::Unset &&
2961             B->Provenance[BitIdx] != BitPart::Unset &&
2962             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2963           return Result = None;
2964 
2965         if (A->Provenance[BitIdx] == BitPart::Unset)
2966           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2967         else
2968           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2969       }
2970 
2971       return Result;
2972     }
2973 
2974     // If this is a logical shift by a constant, recurse then shift the result.
2975     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2976       const APInt &BitShift = *C;
2977 
2978       // Ensure the shift amount is defined.
2979       if (BitShift.uge(BitWidth))
2980         return Result;
2981 
2982       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
2983       if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
2984         return Result;
2985 
2986       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2987                                         Depth + 1, FoundRoot);
2988       if (!Res)
2989         return Result;
2990       Result = Res;
2991 
2992       // Perform the "shift" on BitProvenance.
2993       auto &P = Result->Provenance;
2994       if (I->getOpcode() == Instruction::Shl) {
2995         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2996         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2997       } else {
2998         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2999         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3000       }
3001 
3002       return Result;
3003     }
3004 
3005     // If this is a logical 'and' with a mask that clears bits, recurse then
3006     // unset the appropriate bits.
3007     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3008       const APInt &AndMask = *C;
3009 
3010       // Check that the mask allows a multiple of 8 bits for a bswap, for an
3011       // early exit.
3012       unsigned NumMaskedBits = AndMask.countPopulation();
3013       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3014         return Result;
3015 
3016       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3017                                         Depth + 1, FoundRoot);
3018       if (!Res)
3019         return Result;
3020       Result = Res;
3021 
3022       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3023         // If the AndMask is zero for this bit, clear the bit.
3024         if (AndMask[BitIdx] == 0)
3025           Result->Provenance[BitIdx] = BitPart::Unset;
3026       return Result;
3027     }
3028 
3029     // If this is a zext instruction zero extend the result.
3030     if (match(V, m_ZExt(m_Value(X)))) {
3031       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3032                                         Depth + 1, FoundRoot);
3033       if (!Res)
3034         return Result;
3035 
3036       Result = BitPart(Res->Provider, BitWidth);
3037       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3038       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3039         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3040       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3041         Result->Provenance[BitIdx] = BitPart::Unset;
3042       return Result;
3043     }
3044 
3045     // If this is a truncate instruction, extract the lower bits.
3046     if (match(V, m_Trunc(m_Value(X)))) {
3047       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3048                                         Depth + 1, FoundRoot);
3049       if (!Res)
3050         return Result;
3051 
3052       Result = BitPart(Res->Provider, BitWidth);
3053       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3054         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3055       return Result;
3056     }
3057 
3058     // BITREVERSE - most likely due to us previous matching a partial
3059     // bitreverse.
3060     if (match(V, m_BitReverse(m_Value(X)))) {
3061       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3062                                         Depth + 1, FoundRoot);
3063       if (!Res)
3064         return Result;
3065 
3066       Result = BitPart(Res->Provider, BitWidth);
3067       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3068         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3069       return Result;
3070     }
3071 
3072     // BSWAP - most likely due to us previous matching a partial bswap.
3073     if (match(V, m_BSwap(m_Value(X)))) {
3074       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3075                                         Depth + 1, FoundRoot);
3076       if (!Res)
3077         return Result;
3078 
3079       unsigned ByteWidth = BitWidth / 8;
3080       Result = BitPart(Res->Provider, BitWidth);
3081       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3082         unsigned ByteBitOfs = ByteIdx * 8;
3083         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3084           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3085               Res->Provenance[ByteBitOfs + BitIdx];
3086       }
3087       return Result;
3088     }
3089 
3090     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3091     // amount (modulo).
3092     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3093     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3094     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3095         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3096       // We can treat fshr as a fshl by flipping the modulo amount.
3097       unsigned ModAmt = C->urem(BitWidth);
3098       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3099         ModAmt = BitWidth - ModAmt;
3100 
3101       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3102       if (!MatchBitReversals && (ModAmt % 8) != 0)
3103         return Result;
3104 
3105       // Check we have both sources and they are from the same provider.
3106       const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3107                                         Depth + 1, FoundRoot);
3108       if (!LHS || !LHS->Provider)
3109         return Result;
3110 
3111       const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3112                                         Depth + 1, FoundRoot);
3113       if (!RHS || LHS->Provider != RHS->Provider)
3114         return Result;
3115 
3116       unsigned StartBitRHS = BitWidth - ModAmt;
3117       Result = BitPart(LHS->Provider, BitWidth);
3118       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3119         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3120       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3121         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3122       return Result;
3123     }
3124   }
3125 
3126   // If we've already found a root input value then we're never going to merge
3127   // these back together.
3128   if (FoundRoot)
3129     return Result;
3130 
3131   // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3132   // be the root input value to the bswap/bitreverse.
3133   FoundRoot = true;
3134   Result = BitPart(V, BitWidth);
3135   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3136     Result->Provenance[BitIdx] = BitIdx;
3137   return Result;
3138 }
3139 
3140 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3141                                           unsigned BitWidth) {
3142   if (From % 8 != To % 8)
3143     return false;
3144   // Convert from bit indices to byte indices and check for a byte reversal.
3145   From >>= 3;
3146   To >>= 3;
3147   BitWidth >>= 3;
3148   return From == BitWidth - To - 1;
3149 }
3150 
3151 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3152                                                unsigned BitWidth) {
3153   return From == BitWidth - To - 1;
3154 }
3155 
3156 bool llvm::recognizeBSwapOrBitReverseIdiom(
3157     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3158     SmallVectorImpl<Instruction *> &InsertedInsts) {
3159   if (!match(I, m_Or(m_Value(), m_Value())) &&
3160       !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3161       !match(I, m_FShr(m_Value(), m_Value(), m_Value())))
3162     return false;
3163   if (!MatchBSwaps && !MatchBitReversals)
3164     return false;
3165   Type *ITy = I->getType();
3166   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3167     return false;  // Can't do integer/elements > 128 bits.
3168 
3169   // Try to find all the pieces corresponding to the bswap.
3170   bool FoundRoot = false;
3171   std::map<Value *, Optional<BitPart>> BPS;
3172   const auto &Res =
3173       collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3174   if (!Res)
3175     return false;
3176   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3177   assert(all_of(BitProvenance,
3178                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3179          "Illegal bit provenance index");
3180 
3181   // If the upper bits are zero, then attempt to perform as a truncated op.
3182   Type *DemandedTy = ITy;
3183   if (BitProvenance.back() == BitPart::Unset) {
3184     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3185       BitProvenance = BitProvenance.drop_back();
3186     if (BitProvenance.empty())
3187       return false; // TODO - handle null value?
3188     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3189     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3190       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3191   }
3192 
3193   // Check BitProvenance hasn't found a source larger than the result type.
3194   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3195   if (DemandedBW > ITy->getScalarSizeInBits())
3196     return false;
3197 
3198   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3199   // only byteswap values with an even number of bytes.
3200   APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3201   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3202   bool OKForBitReverse = MatchBitReversals;
3203   for (unsigned BitIdx = 0;
3204        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3205     if (BitProvenance[BitIdx] == BitPart::Unset) {
3206       DemandedMask.clearBit(BitIdx);
3207       continue;
3208     }
3209     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3210                                                 DemandedBW);
3211     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3212                                                           BitIdx, DemandedBW);
3213   }
3214 
3215   Intrinsic::ID Intrin;
3216   if (OKForBSwap)
3217     Intrin = Intrinsic::bswap;
3218   else if (OKForBitReverse)
3219     Intrin = Intrinsic::bitreverse;
3220   else
3221     return false;
3222 
3223   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3224   Value *Provider = Res->Provider;
3225 
3226   // We may need to truncate the provider.
3227   if (DemandedTy != Provider->getType()) {
3228     auto *Trunc =
3229         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3230     InsertedInsts.push_back(Trunc);
3231     Provider = Trunc;
3232   }
3233 
3234   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3235   InsertedInsts.push_back(Result);
3236 
3237   if (!DemandedMask.isAllOnes()) {
3238     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3239     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3240     InsertedInsts.push_back(Result);
3241   }
3242 
3243   // We may need to zeroextend back to the result type.
3244   if (ITy != Result->getType()) {
3245     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3246     InsertedInsts.push_back(ExtInst);
3247   }
3248 
3249   return true;
3250 }
3251 
3252 // CodeGen has special handling for some string functions that may replace
3253 // them with target-specific intrinsics.  Since that'd skip our interceptors
3254 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3255 // we mark affected calls as NoBuiltin, which will disable optimization
3256 // in CodeGen.
3257 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3258     CallInst *CI, const TargetLibraryInfo *TLI) {
3259   Function *F = CI->getCalledFunction();
3260   LibFunc Func;
3261   if (F && !F->hasLocalLinkage() && F->hasName() &&
3262       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3263       !F->doesNotAccessMemory())
3264     CI->addFnAttr(Attribute::NoBuiltin);
3265 }
3266 
3267 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3268   // We can't have a PHI with a metadata type.
3269   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3270     return false;
3271 
3272   // Early exit.
3273   if (!isa<Constant>(I->getOperand(OpIdx)))
3274     return true;
3275 
3276   switch (I->getOpcode()) {
3277   default:
3278     return true;
3279   case Instruction::Call:
3280   case Instruction::Invoke: {
3281     const auto &CB = cast<CallBase>(*I);
3282 
3283     // Can't handle inline asm. Skip it.
3284     if (CB.isInlineAsm())
3285       return false;
3286 
3287     // Constant bundle operands may need to retain their constant-ness for
3288     // correctness.
3289     if (CB.isBundleOperand(OpIdx))
3290       return false;
3291 
3292     if (OpIdx < CB.arg_size()) {
3293       // Some variadic intrinsics require constants in the variadic arguments,
3294       // which currently aren't markable as immarg.
3295       if (isa<IntrinsicInst>(CB) &&
3296           OpIdx >= CB.getFunctionType()->getNumParams()) {
3297         // This is known to be OK for stackmap.
3298         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3299       }
3300 
3301       // gcroot is a special case, since it requires a constant argument which
3302       // isn't also required to be a simple ConstantInt.
3303       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3304         return false;
3305 
3306       // Some intrinsic operands are required to be immediates.
3307       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3308     }
3309 
3310     // It is never allowed to replace the call argument to an intrinsic, but it
3311     // may be possible for a call.
3312     return !isa<IntrinsicInst>(CB);
3313   }
3314   case Instruction::ShuffleVector:
3315     // Shufflevector masks are constant.
3316     return OpIdx != 2;
3317   case Instruction::Switch:
3318   case Instruction::ExtractValue:
3319     // All operands apart from the first are constant.
3320     return OpIdx == 0;
3321   case Instruction::InsertValue:
3322     // All operands apart from the first and the second are constant.
3323     return OpIdx < 2;
3324   case Instruction::Alloca:
3325     // Static allocas (constant size in the entry block) are handled by
3326     // prologue/epilogue insertion so they're free anyway. We definitely don't
3327     // want to make them non-constant.
3328     return !cast<AllocaInst>(I)->isStaticAlloca();
3329   case Instruction::GetElementPtr:
3330     if (OpIdx == 0)
3331       return true;
3332     gep_type_iterator It = gep_type_begin(I);
3333     for (auto E = std::next(It, OpIdx); It != E; ++It)
3334       if (It.isStruct())
3335         return false;
3336     return true;
3337   }
3338 }
3339 
3340 Value *llvm::invertCondition(Value *Condition) {
3341   // First: Check if it's a constant
3342   if (Constant *C = dyn_cast<Constant>(Condition))
3343     return ConstantExpr::getNot(C);
3344 
3345   // Second: If the condition is already inverted, return the original value
3346   Value *NotCondition;
3347   if (match(Condition, m_Not(m_Value(NotCondition))))
3348     return NotCondition;
3349 
3350   BasicBlock *Parent = nullptr;
3351   Instruction *Inst = dyn_cast<Instruction>(Condition);
3352   if (Inst)
3353     Parent = Inst->getParent();
3354   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3355     Parent = &Arg->getParent()->getEntryBlock();
3356   assert(Parent && "Unsupported condition to invert");
3357 
3358   // Third: Check all the users for an invert
3359   for (User *U : Condition->users())
3360     if (Instruction *I = dyn_cast<Instruction>(U))
3361       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3362         return I;
3363 
3364   // Last option: Create a new instruction
3365   auto *Inverted =
3366       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3367   if (Inst && !isa<PHINode>(Inst))
3368     Inverted->insertAfter(Inst);
3369   else
3370     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3371   return Inverted;
3372 }
3373 
3374 bool llvm::inferAttributesFromOthers(Function &F) {
3375   // Note: We explicitly check for attributes rather than using cover functions
3376   // because some of the cover functions include the logic being implemented.
3377 
3378   bool Changed = false;
3379   // readnone + not convergent implies nosync
3380   if (!F.hasFnAttribute(Attribute::NoSync) &&
3381       F.doesNotAccessMemory() && !F.isConvergent()) {
3382     F.setNoSync();
3383     Changed = true;
3384   }
3385 
3386   // readonly implies nofree
3387   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3388     F.setDoesNotFreeMemory();
3389     Changed = true;
3390   }
3391 
3392   // willreturn implies mustprogress
3393   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3394     F.setMustProgress();
3395     Changed = true;
3396   }
3397 
3398   // TODO: There are a bunch of cases of restrictive memory effects we
3399   // can infer by inspecting arguments of argmemonly-ish functions.
3400 
3401   return Changed;
3402 }
3403