1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/TinyPtrVector.h"
28 #include "llvm/Analysis/AssumeBundleQueries.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/DomTreeUpdater.h"
31 #include "llvm/Analysis/EHPersonalities.h"
32 #include "llvm/Analysis/InstructionSimplify.h"
33 #include "llvm/Analysis/LazyValueInfo.h"
34 #include "llvm/Analysis/MemoryBuiltins.h"
35 #include "llvm/Analysis/MemorySSAUpdater.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/BinaryFormat/Dwarf.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/BasicBlock.h"
43 #include "llvm/IR/CFG.h"
44 #include "llvm/IR/Constant.h"
45 #include "llvm/IR/ConstantRange.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DIBuilder.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugInfoMetadata.h"
50 #include "llvm/IR/DebugLoc.h"
51 #include "llvm/IR/DerivedTypes.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GetElementPtrTypeIterator.h"
55 #include "llvm/IR/GlobalObject.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/MDBuilder.h"
64 #include "llvm/IR/Metadata.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90
91 #define DEBUG_TYPE "local"
92
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
96 static cl::opt<bool> PHICSEDebugHash(
97 "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100 #else
101 cl::init(false),
102 #endif
103 cl::Hidden,
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms
115 static const unsigned BitPartRecursionMaxDepth = 64;
116
117 //===----------------------------------------------------------------------===//
118 // Local constant propagation.
119 //
120
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination. This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
ConstantFoldTerminator(BasicBlock * BB,bool DeleteDeadConditions,const TargetLibraryInfo * TLI,DomTreeUpdater * DTU)128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129 const TargetLibraryInfo *TLI,
130 DomTreeUpdater *DTU) {
131 Instruction *T = BB->getTerminator();
132 IRBuilder<> Builder(T);
133
134 // Branch - See if we are conditional jumping on constant
135 if (auto *BI = dyn_cast<BranchInst>(T)) {
136 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
137
138 BasicBlock *Dest1 = BI->getSuccessor(0);
139 BasicBlock *Dest2 = BI->getSuccessor(1);
140
141 if (Dest2 == Dest1) { // Conditional branch to same location?
142 // This branch matches something like this:
143 // br bool %cond, label %Dest, label %Dest
144 // and changes it into: br label %Dest
145
146 // Let the basic block know that we are letting go of one copy of it.
147 assert(BI->getParent() && "Terminator not inserted in block!");
148 Dest1->removePredecessor(BI->getParent());
149
150 // Replace the conditional branch with an unconditional one.
151 Builder.CreateBr(Dest1);
152 Value *Cond = BI->getCondition();
153 BI->eraseFromParent();
154 if (DeleteDeadConditions)
155 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
156 return true;
157 }
158
159 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
160 // Are we branching on constant?
161 // YES. Change to unconditional branch...
162 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
163 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
164
165 // Let the basic block know that we are letting go of it. Based on this,
166 // it will adjust it's PHI nodes.
167 OldDest->removePredecessor(BB);
168
169 // Replace the conditional branch with an unconditional one.
170 Builder.CreateBr(Destination);
171 BI->eraseFromParent();
172 if (DTU)
173 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
174 return true;
175 }
176
177 return false;
178 }
179
180 if (auto *SI = dyn_cast<SwitchInst>(T)) {
181 // If we are switching on a constant, we can convert the switch to an
182 // unconditional branch.
183 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
184 BasicBlock *DefaultDest = SI->getDefaultDest();
185 BasicBlock *TheOnlyDest = DefaultDest;
186
187 // If the default is unreachable, ignore it when searching for TheOnlyDest.
188 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
189 SI->getNumCases() > 0) {
190 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
191 }
192
193 bool Changed = false;
194
195 // Figure out which case it goes to.
196 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
197 // Found case matching a constant operand?
198 if (i->getCaseValue() == CI) {
199 TheOnlyDest = i->getCaseSuccessor();
200 break;
201 }
202
203 // Check to see if this branch is going to the same place as the default
204 // dest. If so, eliminate it as an explicit compare.
205 if (i->getCaseSuccessor() == DefaultDest) {
206 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
207 unsigned NCases = SI->getNumCases();
208 // Fold the case metadata into the default if there will be any branches
209 // left, unless the metadata doesn't match the switch.
210 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
211 // Collect branch weights into a vector.
212 SmallVector<uint32_t, 8> Weights;
213 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
214 ++MD_i) {
215 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
216 Weights.push_back(CI->getValue().getZExtValue());
217 }
218 // Merge weight of this case to the default weight.
219 unsigned idx = i->getCaseIndex();
220 Weights[0] += Weights[idx+1];
221 // Remove weight for this case.
222 std::swap(Weights[idx+1], Weights.back());
223 Weights.pop_back();
224 SI->setMetadata(LLVMContext::MD_prof,
225 MDBuilder(BB->getContext()).
226 createBranchWeights(Weights));
227 }
228 // Remove this entry.
229 BasicBlock *ParentBB = SI->getParent();
230 DefaultDest->removePredecessor(ParentBB);
231 i = SI->removeCase(i);
232 e = SI->case_end();
233 Changed = true;
234 continue;
235 }
236
237 // Otherwise, check to see if the switch only branches to one destination.
238 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
239 // destinations.
240 if (i->getCaseSuccessor() != TheOnlyDest)
241 TheOnlyDest = nullptr;
242
243 // Increment this iterator as we haven't removed the case.
244 ++i;
245 }
246
247 if (CI && !TheOnlyDest) {
248 // Branching on a constant, but not any of the cases, go to the default
249 // successor.
250 TheOnlyDest = SI->getDefaultDest();
251 }
252
253 // If we found a single destination that we can fold the switch into, do so
254 // now.
255 if (TheOnlyDest) {
256 // Insert the new branch.
257 Builder.CreateBr(TheOnlyDest);
258 BasicBlock *BB = SI->getParent();
259
260 SmallSetVector<BasicBlock *, 8> RemovedSuccessors;
261
262 // Remove entries from PHI nodes which we no longer branch to...
263 BasicBlock *SuccToKeep = TheOnlyDest;
264 for (BasicBlock *Succ : successors(SI)) {
265 if (DTU && Succ != TheOnlyDest)
266 RemovedSuccessors.insert(Succ);
267 // Found case matching a constant operand?
268 if (Succ == SuccToKeep) {
269 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
270 } else {
271 Succ->removePredecessor(BB);
272 }
273 }
274
275 // Delete the old switch.
276 Value *Cond = SI->getCondition();
277 SI->eraseFromParent();
278 if (DeleteDeadConditions)
279 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
280 if (DTU) {
281 std::vector<DominatorTree::UpdateType> Updates;
282 Updates.reserve(RemovedSuccessors.size());
283 for (auto *RemovedSuccessor : RemovedSuccessors)
284 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
285 DTU->applyUpdates(Updates);
286 }
287 return true;
288 }
289
290 if (SI->getNumCases() == 1) {
291 // Otherwise, we can fold this switch into a conditional branch
292 // instruction if it has only one non-default destination.
293 auto FirstCase = *SI->case_begin();
294 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
295 FirstCase.getCaseValue(), "cond");
296
297 // Insert the new branch.
298 BranchInst *NewBr = Builder.CreateCondBr(Cond,
299 FirstCase.getCaseSuccessor(),
300 SI->getDefaultDest());
301 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
302 if (MD && MD->getNumOperands() == 3) {
303 ConstantInt *SICase =
304 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
305 ConstantInt *SIDef =
306 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
307 assert(SICase && SIDef);
308 // The TrueWeight should be the weight for the single case of SI.
309 NewBr->setMetadata(LLVMContext::MD_prof,
310 MDBuilder(BB->getContext()).
311 createBranchWeights(SICase->getValue().getZExtValue(),
312 SIDef->getValue().getZExtValue()));
313 }
314
315 // Update make.implicit metadata to the newly-created conditional branch.
316 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
317 if (MakeImplicitMD)
318 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
319
320 // Delete the old switch.
321 SI->eraseFromParent();
322 return true;
323 }
324 return Changed;
325 }
326
327 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
328 // indirectbr blockaddress(@F, @BB) -> br label @BB
329 if (auto *BA =
330 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
331 BasicBlock *TheOnlyDest = BA->getBasicBlock();
332 SmallSetVector<BasicBlock *, 8> RemovedSuccessors;
333
334 // Insert the new branch.
335 Builder.CreateBr(TheOnlyDest);
336
337 BasicBlock *SuccToKeep = TheOnlyDest;
338 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
339 BasicBlock *DestBB = IBI->getDestination(i);
340 if (DTU && DestBB != TheOnlyDest)
341 RemovedSuccessors.insert(DestBB);
342 if (IBI->getDestination(i) == SuccToKeep) {
343 SuccToKeep = nullptr;
344 } else {
345 DestBB->removePredecessor(BB);
346 }
347 }
348 Value *Address = IBI->getAddress();
349 IBI->eraseFromParent();
350 if (DeleteDeadConditions)
351 // Delete pointer cast instructions.
352 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
353
354 // Also zap the blockaddress constant if there are no users remaining,
355 // otherwise the destination is still marked as having its address taken.
356 if (BA->use_empty())
357 BA->destroyConstant();
358
359 // If we didn't find our destination in the IBI successor list, then we
360 // have undefined behavior. Replace the unconditional branch with an
361 // 'unreachable' instruction.
362 if (SuccToKeep) {
363 BB->getTerminator()->eraseFromParent();
364 new UnreachableInst(BB->getContext(), BB);
365 }
366
367 if (DTU) {
368 std::vector<DominatorTree::UpdateType> Updates;
369 Updates.reserve(RemovedSuccessors.size());
370 for (auto *RemovedSuccessor : RemovedSuccessors)
371 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
372 DTU->applyUpdates(Updates);
373 }
374 return true;
375 }
376 }
377
378 return false;
379 }
380
381 //===----------------------------------------------------------------------===//
382 // Local dead code elimination.
383 //
384
385 /// isInstructionTriviallyDead - Return true if the result produced by the
386 /// instruction is not used, and the instruction has no side effects.
387 ///
isInstructionTriviallyDead(Instruction * I,const TargetLibraryInfo * TLI)388 bool llvm::isInstructionTriviallyDead(Instruction *I,
389 const TargetLibraryInfo *TLI) {
390 if (!I->use_empty())
391 return false;
392 return wouldInstructionBeTriviallyDead(I, TLI);
393 }
394
wouldInstructionBeTriviallyDead(Instruction * I,const TargetLibraryInfo * TLI)395 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
396 const TargetLibraryInfo *TLI) {
397 if (I->isTerminator())
398 return false;
399
400 // We don't want the landingpad-like instructions removed by anything this
401 // general.
402 if (I->isEHPad())
403 return false;
404
405 // We don't want debug info removed by anything this general, unless
406 // debug info is empty.
407 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
408 if (DDI->getAddress())
409 return false;
410 return true;
411 }
412 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
413 if (DVI->getValue())
414 return false;
415 return true;
416 }
417 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
418 if (DLI->getLabel())
419 return false;
420 return true;
421 }
422
423 if (!I->willReturn())
424 return false;
425
426 if (!I->mayHaveSideEffects())
427 return true;
428
429 // Special case intrinsics that "may have side effects" but can be deleted
430 // when dead.
431 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
432 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
433 if (II->getIntrinsicID() == Intrinsic::stacksave ||
434 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
435 return true;
436
437 if (II->isLifetimeStartOrEnd()) {
438 auto *Arg = II->getArgOperand(1);
439 // Lifetime intrinsics are dead when their right-hand is undef.
440 if (isa<UndefValue>(Arg))
441 return true;
442 // If the right-hand is an alloc, global, or argument and the only uses
443 // are lifetime intrinsics then the intrinsics are dead.
444 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
445 return llvm::all_of(Arg->uses(), [](Use &Use) {
446 if (IntrinsicInst *IntrinsicUse =
447 dyn_cast<IntrinsicInst>(Use.getUser()))
448 return IntrinsicUse->isLifetimeStartOrEnd();
449 return false;
450 });
451 return false;
452 }
453
454 // Assumptions are dead if their condition is trivially true. Guards on
455 // true are operationally no-ops. In the future we can consider more
456 // sophisticated tradeoffs for guards considering potential for check
457 // widening, but for now we keep things simple.
458 if ((II->getIntrinsicID() == Intrinsic::assume &&
459 isAssumeWithEmptyBundle(*II)) ||
460 II->getIntrinsicID() == Intrinsic::experimental_guard) {
461 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
462 return !Cond->isZero();
463
464 return false;
465 }
466 }
467
468 if (isAllocLikeFn(I, TLI))
469 return true;
470
471 if (CallInst *CI = isFreeCall(I, TLI))
472 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
473 return C->isNullValue() || isa<UndefValue>(C);
474
475 if (auto *Call = dyn_cast<CallBase>(I))
476 if (isMathLibCallNoop(Call, TLI))
477 return true;
478
479 return false;
480 }
481
482 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
483 /// trivially dead instruction, delete it. If that makes any of its operands
484 /// trivially dead, delete them too, recursively. Return true if any
485 /// instructions were deleted.
RecursivelyDeleteTriviallyDeadInstructions(Value * V,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)486 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
487 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
488 std::function<void(Value *)> AboutToDeleteCallback) {
489 Instruction *I = dyn_cast<Instruction>(V);
490 if (!I || !isInstructionTriviallyDead(I, TLI))
491 return false;
492
493 SmallVector<WeakTrackingVH, 16> DeadInsts;
494 DeadInsts.push_back(I);
495 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
496 AboutToDeleteCallback);
497
498 return true;
499 }
500
RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)501 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
502 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
503 MemorySSAUpdater *MSSAU,
504 std::function<void(Value *)> AboutToDeleteCallback) {
505 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
506 for (; S != E; ++S) {
507 auto *I = cast<Instruction>(DeadInsts[S]);
508 if (!isInstructionTriviallyDead(I)) {
509 DeadInsts[S] = nullptr;
510 ++Alive;
511 }
512 }
513 if (Alive == E)
514 return false;
515 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
516 AboutToDeleteCallback);
517 return true;
518 }
519
RecursivelyDeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)520 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
521 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
522 MemorySSAUpdater *MSSAU,
523 std::function<void(Value *)> AboutToDeleteCallback) {
524 // Process the dead instruction list until empty.
525 while (!DeadInsts.empty()) {
526 Value *V = DeadInsts.pop_back_val();
527 Instruction *I = cast_or_null<Instruction>(V);
528 if (!I)
529 continue;
530 assert(isInstructionTriviallyDead(I, TLI) &&
531 "Live instruction found in dead worklist!");
532 assert(I->use_empty() && "Instructions with uses are not dead.");
533
534 // Don't lose the debug info while deleting the instructions.
535 salvageDebugInfo(*I);
536
537 if (AboutToDeleteCallback)
538 AboutToDeleteCallback(I);
539
540 // Null out all of the instruction's operands to see if any operand becomes
541 // dead as we go.
542 for (Use &OpU : I->operands()) {
543 Value *OpV = OpU.get();
544 OpU.set(nullptr);
545
546 if (!OpV->use_empty())
547 continue;
548
549 // If the operand is an instruction that became dead as we nulled out the
550 // operand, and if it is 'trivially' dead, delete it in a future loop
551 // iteration.
552 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
553 if (isInstructionTriviallyDead(OpI, TLI))
554 DeadInsts.push_back(OpI);
555 }
556 if (MSSAU)
557 MSSAU->removeMemoryAccess(I);
558
559 I->eraseFromParent();
560 }
561 }
562
replaceDbgUsesWithUndef(Instruction * I)563 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
564 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
565 findDbgUsers(DbgUsers, I);
566 for (auto *DII : DbgUsers) {
567 Value *Undef = UndefValue::get(I->getType());
568 DII->setOperand(0, MetadataAsValue::get(DII->getContext(),
569 ValueAsMetadata::get(Undef)));
570 }
571 return !DbgUsers.empty();
572 }
573
574 /// areAllUsesEqual - Check whether the uses of a value are all the same.
575 /// This is similar to Instruction::hasOneUse() except this will also return
576 /// true when there are no uses or multiple uses that all refer to the same
577 /// value.
areAllUsesEqual(Instruction * I)578 static bool areAllUsesEqual(Instruction *I) {
579 Value::user_iterator UI = I->user_begin();
580 Value::user_iterator UE = I->user_end();
581 if (UI == UE)
582 return true;
583
584 User *TheUse = *UI;
585 for (++UI; UI != UE; ++UI) {
586 if (*UI != TheUse)
587 return false;
588 }
589 return true;
590 }
591
592 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
593 /// dead PHI node, due to being a def-use chain of single-use nodes that
594 /// either forms a cycle or is terminated by a trivially dead instruction,
595 /// delete it. If that makes any of its operands trivially dead, delete them
596 /// too, recursively. Return true if a change was made.
RecursivelyDeleteDeadPHINode(PHINode * PN,const TargetLibraryInfo * TLI,llvm::MemorySSAUpdater * MSSAU)597 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
598 const TargetLibraryInfo *TLI,
599 llvm::MemorySSAUpdater *MSSAU) {
600 SmallPtrSet<Instruction*, 4> Visited;
601 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
602 I = cast<Instruction>(*I->user_begin())) {
603 if (I->use_empty())
604 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
605
606 // If we find an instruction more than once, we're on a cycle that
607 // won't prove fruitful.
608 if (!Visited.insert(I).second) {
609 // Break the cycle and delete the instruction and its operands.
610 I->replaceAllUsesWith(UndefValue::get(I->getType()));
611 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
612 return true;
613 }
614 }
615 return false;
616 }
617
618 static bool
simplifyAndDCEInstruction(Instruction * I,SmallSetVector<Instruction *,16> & WorkList,const DataLayout & DL,const TargetLibraryInfo * TLI)619 simplifyAndDCEInstruction(Instruction *I,
620 SmallSetVector<Instruction *, 16> &WorkList,
621 const DataLayout &DL,
622 const TargetLibraryInfo *TLI) {
623 if (isInstructionTriviallyDead(I, TLI)) {
624 salvageDebugInfo(*I);
625
626 // Null out all of the instruction's operands to see if any operand becomes
627 // dead as we go.
628 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
629 Value *OpV = I->getOperand(i);
630 I->setOperand(i, nullptr);
631
632 if (!OpV->use_empty() || I == OpV)
633 continue;
634
635 // If the operand is an instruction that became dead as we nulled out the
636 // operand, and if it is 'trivially' dead, delete it in a future loop
637 // iteration.
638 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
639 if (isInstructionTriviallyDead(OpI, TLI))
640 WorkList.insert(OpI);
641 }
642
643 I->eraseFromParent();
644
645 return true;
646 }
647
648 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
649 // Add the users to the worklist. CAREFUL: an instruction can use itself,
650 // in the case of a phi node.
651 for (User *U : I->users()) {
652 if (U != I) {
653 WorkList.insert(cast<Instruction>(U));
654 }
655 }
656
657 // Replace the instruction with its simplified value.
658 bool Changed = false;
659 if (!I->use_empty()) {
660 I->replaceAllUsesWith(SimpleV);
661 Changed = true;
662 }
663 if (isInstructionTriviallyDead(I, TLI)) {
664 I->eraseFromParent();
665 Changed = true;
666 }
667 return Changed;
668 }
669 return false;
670 }
671
672 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
673 /// simplify any instructions in it and recursively delete dead instructions.
674 ///
675 /// This returns true if it changed the code, note that it can delete
676 /// instructions in other blocks as well in this block.
SimplifyInstructionsInBlock(BasicBlock * BB,const TargetLibraryInfo * TLI)677 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
678 const TargetLibraryInfo *TLI) {
679 bool MadeChange = false;
680 const DataLayout &DL = BB->getModule()->getDataLayout();
681
682 #ifndef NDEBUG
683 // In debug builds, ensure that the terminator of the block is never replaced
684 // or deleted by these simplifications. The idea of simplification is that it
685 // cannot introduce new instructions, and there is no way to replace the
686 // terminator of a block without introducing a new instruction.
687 AssertingVH<Instruction> TerminatorVH(&BB->back());
688 #endif
689
690 SmallSetVector<Instruction *, 16> WorkList;
691 // Iterate over the original function, only adding insts to the worklist
692 // if they actually need to be revisited. This avoids having to pre-init
693 // the worklist with the entire function's worth of instructions.
694 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
695 BI != E;) {
696 assert(!BI->isTerminator());
697 Instruction *I = &*BI;
698 ++BI;
699
700 // We're visiting this instruction now, so make sure it's not in the
701 // worklist from an earlier visit.
702 if (!WorkList.count(I))
703 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
704 }
705
706 while (!WorkList.empty()) {
707 Instruction *I = WorkList.pop_back_val();
708 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
709 }
710 return MadeChange;
711 }
712
713 //===----------------------------------------------------------------------===//
714 // Control Flow Graph Restructuring.
715 //
716
MergeBasicBlockIntoOnlyPred(BasicBlock * DestBB,DomTreeUpdater * DTU)717 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
718 DomTreeUpdater *DTU) {
719
720 // If BB has single-entry PHI nodes, fold them.
721 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
722 Value *NewVal = PN->getIncomingValue(0);
723 // Replace self referencing PHI with undef, it must be dead.
724 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
725 PN->replaceAllUsesWith(NewVal);
726 PN->eraseFromParent();
727 }
728
729 BasicBlock *PredBB = DestBB->getSinglePredecessor();
730 assert(PredBB && "Block doesn't have a single predecessor!");
731
732 bool ReplaceEntryBB = false;
733 if (PredBB == &DestBB->getParent()->getEntryBlock())
734 ReplaceEntryBB = true;
735
736 // DTU updates: Collect all the edges that enter
737 // PredBB. These dominator edges will be redirected to DestBB.
738 SmallVector<DominatorTree::UpdateType, 32> Updates;
739
740 if (DTU) {
741 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) {
742 // This predecessor of PredBB may already have DestBB as a successor.
743 if (!llvm::is_contained(successors(*I), DestBB))
744 Updates.push_back({DominatorTree::Insert, *I, DestBB});
745 Updates.push_back({DominatorTree::Delete, *I, PredBB});
746 }
747 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
748 }
749
750 // Zap anything that took the address of DestBB. Not doing this will give the
751 // address an invalid value.
752 if (DestBB->hasAddressTaken()) {
753 BlockAddress *BA = BlockAddress::get(DestBB);
754 Constant *Replacement =
755 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
756 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
757 BA->getType()));
758 BA->destroyConstant();
759 }
760
761 // Anything that branched to PredBB now branches to DestBB.
762 PredBB->replaceAllUsesWith(DestBB);
763
764 // Splice all the instructions from PredBB to DestBB.
765 PredBB->getTerminator()->eraseFromParent();
766 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
767 new UnreachableInst(PredBB->getContext(), PredBB);
768
769 // If the PredBB is the entry block of the function, move DestBB up to
770 // become the entry block after we erase PredBB.
771 if (ReplaceEntryBB)
772 DestBB->moveAfter(PredBB);
773
774 if (DTU) {
775 assert(PredBB->getInstList().size() == 1 &&
776 isa<UnreachableInst>(PredBB->getTerminator()) &&
777 "The successor list of PredBB isn't empty before "
778 "applying corresponding DTU updates.");
779 DTU->applyUpdatesPermissive(Updates);
780 DTU->deleteBB(PredBB);
781 // Recalculation of DomTree is needed when updating a forward DomTree and
782 // the Entry BB is replaced.
783 if (ReplaceEntryBB && DTU->hasDomTree()) {
784 // The entry block was removed and there is no external interface for
785 // the dominator tree to be notified of this change. In this corner-case
786 // we recalculate the entire tree.
787 DTU->recalculate(*(DestBB->getParent()));
788 }
789 }
790
791 else {
792 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
793 }
794 }
795
796 /// Return true if we can choose one of these values to use in place of the
797 /// other. Note that we will always choose the non-undef value to keep.
CanMergeValues(Value * First,Value * Second)798 static bool CanMergeValues(Value *First, Value *Second) {
799 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
800 }
801
802 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
803 /// branch to Succ, into Succ.
804 ///
805 /// Assumption: Succ is the single successor for BB.
CanPropagatePredecessorsForPHIs(BasicBlock * BB,BasicBlock * Succ)806 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
807 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
808
809 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
810 << Succ->getName() << "\n");
811 // Shortcut, if there is only a single predecessor it must be BB and merging
812 // is always safe
813 if (Succ->getSinglePredecessor()) return true;
814
815 // Make a list of the predecessors of BB
816 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
817
818 // Look at all the phi nodes in Succ, to see if they present a conflict when
819 // merging these blocks
820 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
821 PHINode *PN = cast<PHINode>(I);
822
823 // If the incoming value from BB is again a PHINode in
824 // BB which has the same incoming value for *PI as PN does, we can
825 // merge the phi nodes and then the blocks can still be merged
826 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
827 if (BBPN && BBPN->getParent() == BB) {
828 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
829 BasicBlock *IBB = PN->getIncomingBlock(PI);
830 if (BBPreds.count(IBB) &&
831 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
832 PN->getIncomingValue(PI))) {
833 LLVM_DEBUG(dbgs()
834 << "Can't fold, phi node " << PN->getName() << " in "
835 << Succ->getName() << " is conflicting with "
836 << BBPN->getName() << " with regard to common predecessor "
837 << IBB->getName() << "\n");
838 return false;
839 }
840 }
841 } else {
842 Value* Val = PN->getIncomingValueForBlock(BB);
843 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
844 // See if the incoming value for the common predecessor is equal to the
845 // one for BB, in which case this phi node will not prevent the merging
846 // of the block.
847 BasicBlock *IBB = PN->getIncomingBlock(PI);
848 if (BBPreds.count(IBB) &&
849 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
850 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
851 << " in " << Succ->getName()
852 << " is conflicting with regard to common "
853 << "predecessor " << IBB->getName() << "\n");
854 return false;
855 }
856 }
857 }
858 }
859
860 return true;
861 }
862
863 using PredBlockVector = SmallVector<BasicBlock *, 16>;
864 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
865
866 /// Determines the value to use as the phi node input for a block.
867 ///
868 /// Select between \p OldVal any value that we know flows from \p BB
869 /// to a particular phi on the basis of which one (if either) is not
870 /// undef. Update IncomingValues based on the selected value.
871 ///
872 /// \param OldVal The value we are considering selecting.
873 /// \param BB The block that the value flows in from.
874 /// \param IncomingValues A map from block-to-value for other phi inputs
875 /// that we have examined.
876 ///
877 /// \returns the selected value.
selectIncomingValueForBlock(Value * OldVal,BasicBlock * BB,IncomingValueMap & IncomingValues)878 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
879 IncomingValueMap &IncomingValues) {
880 if (!isa<UndefValue>(OldVal)) {
881 assert((!IncomingValues.count(BB) ||
882 IncomingValues.find(BB)->second == OldVal) &&
883 "Expected OldVal to match incoming value from BB!");
884
885 IncomingValues.insert(std::make_pair(BB, OldVal));
886 return OldVal;
887 }
888
889 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
890 if (It != IncomingValues.end()) return It->second;
891
892 return OldVal;
893 }
894
895 /// Create a map from block to value for the operands of a
896 /// given phi.
897 ///
898 /// Create a map from block to value for each non-undef value flowing
899 /// into \p PN.
900 ///
901 /// \param PN The phi we are collecting the map for.
902 /// \param IncomingValues [out] The map from block to value for this phi.
gatherIncomingValuesToPhi(PHINode * PN,IncomingValueMap & IncomingValues)903 static void gatherIncomingValuesToPhi(PHINode *PN,
904 IncomingValueMap &IncomingValues) {
905 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
906 BasicBlock *BB = PN->getIncomingBlock(i);
907 Value *V = PN->getIncomingValue(i);
908
909 if (!isa<UndefValue>(V))
910 IncomingValues.insert(std::make_pair(BB, V));
911 }
912 }
913
914 /// Replace the incoming undef values to a phi with the values
915 /// from a block-to-value map.
916 ///
917 /// \param PN The phi we are replacing the undefs in.
918 /// \param IncomingValues A map from block to value.
replaceUndefValuesInPhi(PHINode * PN,const IncomingValueMap & IncomingValues)919 static void replaceUndefValuesInPhi(PHINode *PN,
920 const IncomingValueMap &IncomingValues) {
921 SmallVector<unsigned> TrueUndefOps;
922 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
923 Value *V = PN->getIncomingValue(i);
924
925 if (!isa<UndefValue>(V)) continue;
926
927 BasicBlock *BB = PN->getIncomingBlock(i);
928 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
929
930 // Keep track of undef/poison incoming values. Those must match, so we fix
931 // them up below if needed.
932 // Note: this is conservatively correct, but we could try harder and group
933 // the undef values per incoming basic block.
934 if (It == IncomingValues.end()) {
935 TrueUndefOps.push_back(i);
936 continue;
937 }
938
939 // There is a defined value for this incoming block, so map this undef
940 // incoming value to the defined value.
941 PN->setIncomingValue(i, It->second);
942 }
943
944 // If there are both undef and poison values incoming, then convert those
945 // values to undef. It is invalid to have different values for the same
946 // incoming block.
947 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
948 return isa<PoisonValue>(PN->getIncomingValue(i));
949 });
950 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
951 for (unsigned i : TrueUndefOps)
952 PN->setIncomingValue(i, UndefValue::get(PN->getType()));
953 }
954 }
955
956 /// Replace a value flowing from a block to a phi with
957 /// potentially multiple instances of that value flowing from the
958 /// block's predecessors to the phi.
959 ///
960 /// \param BB The block with the value flowing into the phi.
961 /// \param BBPreds The predecessors of BB.
962 /// \param PN The phi that we are updating.
redirectValuesFromPredecessorsToPhi(BasicBlock * BB,const PredBlockVector & BBPreds,PHINode * PN)963 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
964 const PredBlockVector &BBPreds,
965 PHINode *PN) {
966 Value *OldVal = PN->removeIncomingValue(BB, false);
967 assert(OldVal && "No entry in PHI for Pred BB!");
968
969 IncomingValueMap IncomingValues;
970
971 // We are merging two blocks - BB, and the block containing PN - and
972 // as a result we need to redirect edges from the predecessors of BB
973 // to go to the block containing PN, and update PN
974 // accordingly. Since we allow merging blocks in the case where the
975 // predecessor and successor blocks both share some predecessors,
976 // and where some of those common predecessors might have undef
977 // values flowing into PN, we want to rewrite those values to be
978 // consistent with the non-undef values.
979
980 gatherIncomingValuesToPhi(PN, IncomingValues);
981
982 // If this incoming value is one of the PHI nodes in BB, the new entries
983 // in the PHI node are the entries from the old PHI.
984 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
985 PHINode *OldValPN = cast<PHINode>(OldVal);
986 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
987 // Note that, since we are merging phi nodes and BB and Succ might
988 // have common predecessors, we could end up with a phi node with
989 // identical incoming branches. This will be cleaned up later (and
990 // will trigger asserts if we try to clean it up now, without also
991 // simplifying the corresponding conditional branch).
992 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
993 Value *PredVal = OldValPN->getIncomingValue(i);
994 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
995 IncomingValues);
996
997 // And add a new incoming value for this predecessor for the
998 // newly retargeted branch.
999 PN->addIncoming(Selected, PredBB);
1000 }
1001 } else {
1002 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1003 // Update existing incoming values in PN for this
1004 // predecessor of BB.
1005 BasicBlock *PredBB = BBPreds[i];
1006 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1007 IncomingValues);
1008
1009 // And add a new incoming value for this predecessor for the
1010 // newly retargeted branch.
1011 PN->addIncoming(Selected, PredBB);
1012 }
1013 }
1014
1015 replaceUndefValuesInPhi(PN, IncomingValues);
1016 }
1017
TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock * BB,DomTreeUpdater * DTU)1018 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1019 DomTreeUpdater *DTU) {
1020 assert(BB != &BB->getParent()->getEntryBlock() &&
1021 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1022
1023 // We can't eliminate infinite loops.
1024 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1025 if (BB == Succ) return false;
1026
1027 // Check to see if merging these blocks would cause conflicts for any of the
1028 // phi nodes in BB or Succ. If not, we can safely merge.
1029 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1030
1031 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1032 // has uses which will not disappear when the PHI nodes are merged. It is
1033 // possible to handle such cases, but difficult: it requires checking whether
1034 // BB dominates Succ, which is non-trivial to calculate in the case where
1035 // Succ has multiple predecessors. Also, it requires checking whether
1036 // constructing the necessary self-referential PHI node doesn't introduce any
1037 // conflicts; this isn't too difficult, but the previous code for doing this
1038 // was incorrect.
1039 //
1040 // Note that if this check finds a live use, BB dominates Succ, so BB is
1041 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1042 // folding the branch isn't profitable in that case anyway.
1043 if (!Succ->getSinglePredecessor()) {
1044 BasicBlock::iterator BBI = BB->begin();
1045 while (isa<PHINode>(*BBI)) {
1046 for (Use &U : BBI->uses()) {
1047 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1048 if (PN->getIncomingBlock(U) != BB)
1049 return false;
1050 } else {
1051 return false;
1052 }
1053 }
1054 ++BBI;
1055 }
1056 }
1057
1058 // We cannot fold the block if it's a branch to an already present callbr
1059 // successor because that creates duplicate successors.
1060 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
1061 if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) {
1062 if (Succ == CBI->getDefaultDest())
1063 return false;
1064 for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1065 if (Succ == CBI->getIndirectDest(i))
1066 return false;
1067 }
1068 }
1069
1070 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1071
1072 SmallVector<DominatorTree::UpdateType, 32> Updates;
1073 if (DTU) {
1074 // All predecessors of BB will be moved to Succ.
1075 SmallSetVector<BasicBlock *, 8> Predecessors(pred_begin(BB), pred_end(BB));
1076 Updates.reserve(Updates.size() + 2 * Predecessors.size());
1077 for (auto *Predecessor : Predecessors) {
1078 // This predecessor of BB may already have Succ as a successor.
1079 if (!llvm::is_contained(successors(Predecessor), Succ))
1080 Updates.push_back({DominatorTree::Insert, Predecessor, Succ});
1081 Updates.push_back({DominatorTree::Delete, Predecessor, BB});
1082 }
1083 Updates.push_back({DominatorTree::Delete, BB, Succ});
1084 }
1085
1086 if (isa<PHINode>(Succ->begin())) {
1087 // If there is more than one pred of succ, and there are PHI nodes in
1088 // the successor, then we need to add incoming edges for the PHI nodes
1089 //
1090 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1091
1092 // Loop over all of the PHI nodes in the successor of BB.
1093 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1094 PHINode *PN = cast<PHINode>(I);
1095
1096 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1097 }
1098 }
1099
1100 if (Succ->getSinglePredecessor()) {
1101 // BB is the only predecessor of Succ, so Succ will end up with exactly
1102 // the same predecessors BB had.
1103
1104 // Copy over any phi, debug or lifetime instruction.
1105 BB->getTerminator()->eraseFromParent();
1106 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1107 BB->getInstList());
1108 } else {
1109 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1110 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1111 assert(PN->use_empty() && "There shouldn't be any uses here!");
1112 PN->eraseFromParent();
1113 }
1114 }
1115
1116 // If the unconditional branch we replaced contains llvm.loop metadata, we
1117 // add the metadata to the branch instructions in the predecessors.
1118 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1119 Instruction *TI = BB->getTerminator();
1120 if (TI)
1121 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1122 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1123 BasicBlock *Pred = *PI;
1124 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1125 }
1126
1127 // Everything that jumped to BB now goes to Succ.
1128 BB->replaceAllUsesWith(Succ);
1129 if (!Succ->hasName()) Succ->takeName(BB);
1130
1131 // Clear the successor list of BB to match updates applying to DTU later.
1132 if (BB->getTerminator())
1133 BB->getInstList().pop_back();
1134 new UnreachableInst(BB->getContext(), BB);
1135 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1136 "applying corresponding DTU updates.");
1137
1138 if (DTU) {
1139 DTU->applyUpdates(Updates);
1140 DTU->deleteBB(BB);
1141 } else {
1142 BB->eraseFromParent(); // Delete the old basic block.
1143 }
1144 return true;
1145 }
1146
EliminateDuplicatePHINodesNaiveImpl(BasicBlock * BB)1147 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1148 // This implementation doesn't currently consider undef operands
1149 // specially. Theoretically, two phis which are identical except for
1150 // one having an undef where the other doesn't could be collapsed.
1151
1152 bool Changed = false;
1153
1154 // Examine each PHI.
1155 // Note that increment of I must *NOT* be in the iteration_expression, since
1156 // we don't want to immediately advance when we restart from the beginning.
1157 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1158 ++I;
1159 // Is there an identical PHI node in this basic block?
1160 // Note that we only look in the upper square's triangle,
1161 // we already checked that the lower triangle PHI's aren't identical.
1162 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1163 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1164 continue;
1165 // A duplicate. Replace this PHI with the base PHI.
1166 ++NumPHICSEs;
1167 DuplicatePN->replaceAllUsesWith(PN);
1168 DuplicatePN->eraseFromParent();
1169 Changed = true;
1170
1171 // The RAUW can change PHIs that we already visited.
1172 I = BB->begin();
1173 break; // Start over from the beginning.
1174 }
1175 }
1176 return Changed;
1177 }
1178
EliminateDuplicatePHINodesSetBasedImpl(BasicBlock * BB)1179 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1180 // This implementation doesn't currently consider undef operands
1181 // specially. Theoretically, two phis which are identical except for
1182 // one having an undef where the other doesn't could be collapsed.
1183
1184 struct PHIDenseMapInfo {
1185 static PHINode *getEmptyKey() {
1186 return DenseMapInfo<PHINode *>::getEmptyKey();
1187 }
1188
1189 static PHINode *getTombstoneKey() {
1190 return DenseMapInfo<PHINode *>::getTombstoneKey();
1191 }
1192
1193 static bool isSentinel(PHINode *PN) {
1194 return PN == getEmptyKey() || PN == getTombstoneKey();
1195 }
1196
1197 // WARNING: this logic must be kept in sync with
1198 // Instruction::isIdenticalToWhenDefined()!
1199 static unsigned getHashValueImpl(PHINode *PN) {
1200 // Compute a hash value on the operands. Instcombine will likely have
1201 // sorted them, which helps expose duplicates, but we have to check all
1202 // the operands to be safe in case instcombine hasn't run.
1203 return static_cast<unsigned>(hash_combine(
1204 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1205 hash_combine_range(PN->block_begin(), PN->block_end())));
1206 }
1207
1208 static unsigned getHashValue(PHINode *PN) {
1209 #ifndef NDEBUG
1210 // If -phicse-debug-hash was specified, return a constant -- this
1211 // will force all hashing to collide, so we'll exhaustively search
1212 // the table for a match, and the assertion in isEqual will fire if
1213 // there's a bug causing equal keys to hash differently.
1214 if (PHICSEDebugHash)
1215 return 0;
1216 #endif
1217 return getHashValueImpl(PN);
1218 }
1219
1220 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1221 if (isSentinel(LHS) || isSentinel(RHS))
1222 return LHS == RHS;
1223 return LHS->isIdenticalTo(RHS);
1224 }
1225
1226 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1227 // These comparisons are nontrivial, so assert that equality implies
1228 // hash equality (DenseMap demands this as an invariant).
1229 bool Result = isEqualImpl(LHS, RHS);
1230 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1231 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1232 return Result;
1233 }
1234 };
1235
1236 // Set of unique PHINodes.
1237 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1238 PHISet.reserve(4 * PHICSENumPHISmallSize);
1239
1240 // Examine each PHI.
1241 bool Changed = false;
1242 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1243 auto Inserted = PHISet.insert(PN);
1244 if (!Inserted.second) {
1245 // A duplicate. Replace this PHI with its duplicate.
1246 ++NumPHICSEs;
1247 PN->replaceAllUsesWith(*Inserted.first);
1248 PN->eraseFromParent();
1249 Changed = true;
1250
1251 // The RAUW can change PHIs that we already visited. Start over from the
1252 // beginning.
1253 PHISet.clear();
1254 I = BB->begin();
1255 }
1256 }
1257
1258 return Changed;
1259 }
1260
EliminateDuplicatePHINodes(BasicBlock * BB)1261 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1262 if (
1263 #ifndef NDEBUG
1264 !PHICSEDebugHash &&
1265 #endif
1266 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1267 return EliminateDuplicatePHINodesNaiveImpl(BB);
1268 return EliminateDuplicatePHINodesSetBasedImpl(BB);
1269 }
1270
1271 /// If the specified pointer points to an object that we control, try to modify
1272 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1273 /// the value after the operation, which may be lower than PrefAlign.
1274 ///
1275 /// Increating value alignment isn't often possible though. If alignment is
1276 /// important, a more reliable approach is to simply align all global variables
1277 /// and allocation instructions to their preferred alignment from the beginning.
tryEnforceAlignment(Value * V,Align PrefAlign,const DataLayout & DL)1278 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1279 const DataLayout &DL) {
1280 V = V->stripPointerCasts();
1281
1282 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1283 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1284 // than the current alignment, as the known bits calculation should have
1285 // already taken it into account. However, this is not always the case,
1286 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1287 // doesn't.
1288 Align CurrentAlign = AI->getAlign();
1289 if (PrefAlign <= CurrentAlign)
1290 return CurrentAlign;
1291
1292 // If the preferred alignment is greater than the natural stack alignment
1293 // then don't round up. This avoids dynamic stack realignment.
1294 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1295 return CurrentAlign;
1296 AI->setAlignment(PrefAlign);
1297 return PrefAlign;
1298 }
1299
1300 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1301 // TODO: as above, this shouldn't be necessary.
1302 Align CurrentAlign = GO->getPointerAlignment(DL);
1303 if (PrefAlign <= CurrentAlign)
1304 return CurrentAlign;
1305
1306 // If there is a large requested alignment and we can, bump up the alignment
1307 // of the global. If the memory we set aside for the global may not be the
1308 // memory used by the final program then it is impossible for us to reliably
1309 // enforce the preferred alignment.
1310 if (!GO->canIncreaseAlignment())
1311 return CurrentAlign;
1312
1313 GO->setAlignment(PrefAlign);
1314 return PrefAlign;
1315 }
1316
1317 return Align(1);
1318 }
1319
getOrEnforceKnownAlignment(Value * V,MaybeAlign PrefAlign,const DataLayout & DL,const Instruction * CxtI,AssumptionCache * AC,const DominatorTree * DT)1320 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1321 const DataLayout &DL,
1322 const Instruction *CxtI,
1323 AssumptionCache *AC,
1324 const DominatorTree *DT) {
1325 assert(V->getType()->isPointerTy() &&
1326 "getOrEnforceKnownAlignment expects a pointer!");
1327
1328 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1329 unsigned TrailZ = Known.countMinTrailingZeros();
1330
1331 // Avoid trouble with ridiculously large TrailZ values, such as
1332 // those computed from a null pointer.
1333 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1334 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1335
1336 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1337
1338 if (PrefAlign && *PrefAlign > Alignment)
1339 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1340
1341 // We don't need to make any adjustment.
1342 return Alignment;
1343 }
1344
1345 ///===---------------------------------------------------------------------===//
1346 /// Dbg Intrinsic utilities
1347 ///
1348
1349 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
PhiHasDebugValue(DILocalVariable * DIVar,DIExpression * DIExpr,PHINode * APN)1350 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1351 DIExpression *DIExpr,
1352 PHINode *APN) {
1353 // Since we can't guarantee that the original dbg.declare instrinsic
1354 // is removed by LowerDbgDeclare(), we need to make sure that we are
1355 // not inserting the same dbg.value intrinsic over and over.
1356 SmallVector<DbgValueInst *, 1> DbgValues;
1357 findDbgValues(DbgValues, APN);
1358 for (auto *DVI : DbgValues) {
1359 assert(DVI->getValue() == APN);
1360 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1361 return true;
1362 }
1363 return false;
1364 }
1365
1366 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1367 /// (or fragment of the variable) described by \p DII.
1368 ///
1369 /// This is primarily intended as a helper for the different
1370 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1371 /// converted describes an alloca'd variable, so we need to use the
1372 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1373 /// identified as covering an n-bit fragment, if the store size of i1 is at
1374 /// least n bits.
valueCoversEntireFragment(Type * ValTy,DbgVariableIntrinsic * DII)1375 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1376 const DataLayout &DL = DII->getModule()->getDataLayout();
1377 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1378 if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1379 assert(!ValueSize.isScalable() &&
1380 "Fragments don't work on scalable types.");
1381 return ValueSize.getFixedSize() >= *FragmentSize;
1382 }
1383 // We can't always calculate the size of the DI variable (e.g. if it is a
1384 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1385 // intead.
1386 if (DII->isAddressOfVariable())
1387 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation()))
1388 if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1389 assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1390 "Both sizes should agree on the scalable flag.");
1391 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1392 }
1393 // Could not determine size of variable. Conservatively return false.
1394 return false;
1395 }
1396
1397 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1398 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1399 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1400 /// case this DebugLoc leaks into any adjacent instructions.
getDebugValueLoc(DbgVariableIntrinsic * DII,Instruction * Src)1401 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1402 // Original dbg.declare must have a location.
1403 DebugLoc DeclareLoc = DII->getDebugLoc();
1404 MDNode *Scope = DeclareLoc.getScope();
1405 DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1406 // Produce an unknown location with the correct scope / inlinedAt fields.
1407 return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1408 }
1409
1410 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1411 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,StoreInst * SI,DIBuilder & Builder)1412 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1413 StoreInst *SI, DIBuilder &Builder) {
1414 assert(DII->isAddressOfVariable());
1415 auto *DIVar = DII->getVariable();
1416 assert(DIVar && "Missing variable");
1417 auto *DIExpr = DII->getExpression();
1418 Value *DV = SI->getValueOperand();
1419
1420 DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1421
1422 if (!valueCoversEntireFragment(DV->getType(), DII)) {
1423 // FIXME: If storing to a part of the variable described by the dbg.declare,
1424 // then we want to insert a dbg.value for the corresponding fragment.
1425 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1426 << *DII << '\n');
1427 // For now, when there is a store to parts of the variable (but we do not
1428 // know which part) we insert an dbg.value instrinsic to indicate that we
1429 // know nothing about the variable's content.
1430 DV = UndefValue::get(DV->getType());
1431 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1432 return;
1433 }
1434
1435 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1436 }
1437
1438 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1439 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,LoadInst * LI,DIBuilder & Builder)1440 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1441 LoadInst *LI, DIBuilder &Builder) {
1442 auto *DIVar = DII->getVariable();
1443 auto *DIExpr = DII->getExpression();
1444 assert(DIVar && "Missing variable");
1445
1446 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1447 // FIXME: If only referring to a part of the variable described by the
1448 // dbg.declare, then we want to insert a dbg.value for the corresponding
1449 // fragment.
1450 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1451 << *DII << '\n');
1452 return;
1453 }
1454
1455 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1456
1457 // We are now tracking the loaded value instead of the address. In the
1458 // future if multi-location support is added to the IR, it might be
1459 // preferable to keep tracking both the loaded value and the original
1460 // address in case the alloca can not be elided.
1461 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1462 LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1463 DbgValue->insertAfter(LI);
1464 }
1465
1466 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1467 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,PHINode * APN,DIBuilder & Builder)1468 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1469 PHINode *APN, DIBuilder &Builder) {
1470 auto *DIVar = DII->getVariable();
1471 auto *DIExpr = DII->getExpression();
1472 assert(DIVar && "Missing variable");
1473
1474 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1475 return;
1476
1477 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1478 // FIXME: If only referring to a part of the variable described by the
1479 // dbg.declare, then we want to insert a dbg.value for the corresponding
1480 // fragment.
1481 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1482 << *DII << '\n');
1483 return;
1484 }
1485
1486 BasicBlock *BB = APN->getParent();
1487 auto InsertionPt = BB->getFirstInsertionPt();
1488
1489 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1490
1491 // The block may be a catchswitch block, which does not have a valid
1492 // insertion point.
1493 // FIXME: Insert dbg.value markers in the successors when appropriate.
1494 if (InsertionPt != BB->end())
1495 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1496 }
1497
1498 /// Determine whether this alloca is either a VLA or an array.
isArray(AllocaInst * AI)1499 static bool isArray(AllocaInst *AI) {
1500 return AI->isArrayAllocation() ||
1501 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1502 }
1503
1504 /// Determine whether this alloca is a structure.
isStructure(AllocaInst * AI)1505 static bool isStructure(AllocaInst *AI) {
1506 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1507 }
1508
1509 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1510 /// of llvm.dbg.value intrinsics.
LowerDbgDeclare(Function & F)1511 bool llvm::LowerDbgDeclare(Function &F) {
1512 bool Changed = false;
1513 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1514 SmallVector<DbgDeclareInst *, 4> Dbgs;
1515 for (auto &FI : F)
1516 for (Instruction &BI : FI)
1517 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1518 Dbgs.push_back(DDI);
1519
1520 if (Dbgs.empty())
1521 return Changed;
1522
1523 for (auto &I : Dbgs) {
1524 DbgDeclareInst *DDI = I;
1525 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1526 // If this is an alloca for a scalar variable, insert a dbg.value
1527 // at each load and store to the alloca and erase the dbg.declare.
1528 // The dbg.values allow tracking a variable even if it is not
1529 // stored on the stack, while the dbg.declare can only describe
1530 // the stack slot (and at a lexical-scope granularity). Later
1531 // passes will attempt to elide the stack slot.
1532 if (!AI || isArray(AI) || isStructure(AI))
1533 continue;
1534
1535 // A volatile load/store means that the alloca can't be elided anyway.
1536 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1537 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1538 return LI->isVolatile();
1539 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1540 return SI->isVolatile();
1541 return false;
1542 }))
1543 continue;
1544
1545 SmallVector<const Value *, 8> WorkList;
1546 WorkList.push_back(AI);
1547 while (!WorkList.empty()) {
1548 const Value *V = WorkList.pop_back_val();
1549 for (auto &AIUse : V->uses()) {
1550 User *U = AIUse.getUser();
1551 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1552 if (AIUse.getOperandNo() == 1)
1553 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1554 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1555 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1556 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1557 // This is a call by-value or some other instruction that takes a
1558 // pointer to the variable. Insert a *value* intrinsic that describes
1559 // the variable by dereferencing the alloca.
1560 if (!CI->isLifetimeStartOrEnd()) {
1561 DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1562 auto *DerefExpr =
1563 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1564 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1565 NewLoc, CI);
1566 }
1567 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1568 if (BI->getType()->isPointerTy())
1569 WorkList.push_back(BI);
1570 }
1571 }
1572 }
1573 DDI->eraseFromParent();
1574 Changed = true;
1575 }
1576
1577 if (Changed)
1578 for (BasicBlock &BB : F)
1579 RemoveRedundantDbgInstrs(&BB);
1580
1581 return Changed;
1582 }
1583
1584 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
insertDebugValuesForPHIs(BasicBlock * BB,SmallVectorImpl<PHINode * > & InsertedPHIs)1585 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1586 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1587 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1588 if (InsertedPHIs.size() == 0)
1589 return;
1590
1591 // Map existing PHI nodes to their dbg.values.
1592 ValueToValueMapTy DbgValueMap;
1593 for (auto &I : *BB) {
1594 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1595 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation()))
1596 DbgValueMap.insert({Loc, DbgII});
1597 }
1598 }
1599 if (DbgValueMap.size() == 0)
1600 return;
1601
1602 // Then iterate through the new PHIs and look to see if they use one of the
1603 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will
1604 // propagate the info through the new PHI.
1605 LLVMContext &C = BB->getContext();
1606 for (auto PHI : InsertedPHIs) {
1607 BasicBlock *Parent = PHI->getParent();
1608 // Avoid inserting an intrinsic into an EH block.
1609 if (Parent->getFirstNonPHI()->isEHPad())
1610 continue;
1611 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI));
1612 for (auto VI : PHI->operand_values()) {
1613 auto V = DbgValueMap.find(VI);
1614 if (V != DbgValueMap.end()) {
1615 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1616 Instruction *NewDbgII = DbgII->clone();
1617 NewDbgII->setOperand(0, PhiMAV);
1618 auto InsertionPt = Parent->getFirstInsertionPt();
1619 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1620 NewDbgII->insertBefore(&*InsertionPt);
1621 }
1622 }
1623 }
1624 }
1625
1626 /// Finds all intrinsics declaring local variables as living in the memory that
1627 /// 'V' points to. This may include a mix of dbg.declare and
1628 /// dbg.addr intrinsics.
FindDbgAddrUses(Value * V)1629 TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1630 // This function is hot. Check whether the value has any metadata to avoid a
1631 // DenseMap lookup.
1632 if (!V->isUsedByMetadata())
1633 return {};
1634 auto *L = LocalAsMetadata::getIfExists(V);
1635 if (!L)
1636 return {};
1637 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1638 if (!MDV)
1639 return {};
1640
1641 TinyPtrVector<DbgVariableIntrinsic *> Declares;
1642 for (User *U : MDV->users()) {
1643 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U))
1644 if (DII->isAddressOfVariable())
1645 Declares.push_back(DII);
1646 }
1647
1648 return Declares;
1649 }
1650
FindDbgDeclareUses(Value * V)1651 TinyPtrVector<DbgDeclareInst *> llvm::FindDbgDeclareUses(Value *V) {
1652 TinyPtrVector<DbgDeclareInst *> DDIs;
1653 for (DbgVariableIntrinsic *DVI : FindDbgAddrUses(V))
1654 if (auto *DDI = dyn_cast<DbgDeclareInst>(DVI))
1655 DDIs.push_back(DDI);
1656 return DDIs;
1657 }
1658
findDbgValues(SmallVectorImpl<DbgValueInst * > & DbgValues,Value * V)1659 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1660 // This function is hot. Check whether the value has any metadata to avoid a
1661 // DenseMap lookup.
1662 if (!V->isUsedByMetadata())
1663 return;
1664 if (auto *L = LocalAsMetadata::getIfExists(V))
1665 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1666 for (User *U : MDV->users())
1667 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1668 DbgValues.push_back(DVI);
1669 }
1670
findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic * > & DbgUsers,Value * V)1671 void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers,
1672 Value *V) {
1673 // This function is hot. Check whether the value has any metadata to avoid a
1674 // DenseMap lookup.
1675 if (!V->isUsedByMetadata())
1676 return;
1677 if (auto *L = LocalAsMetadata::getIfExists(V))
1678 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1679 for (User *U : MDV->users())
1680 if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U))
1681 DbgUsers.push_back(DII);
1682 }
1683
replaceDbgDeclare(Value * Address,Value * NewAddress,DIBuilder & Builder,uint8_t DIExprFlags,int Offset)1684 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1685 DIBuilder &Builder, uint8_t DIExprFlags,
1686 int Offset) {
1687 auto DbgAddrs = FindDbgAddrUses(Address);
1688 for (DbgVariableIntrinsic *DII : DbgAddrs) {
1689 DebugLoc Loc = DII->getDebugLoc();
1690 auto *DIVar = DII->getVariable();
1691 auto *DIExpr = DII->getExpression();
1692 assert(DIVar && "Missing variable");
1693 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1694 // Insert llvm.dbg.declare immediately before DII, and remove old
1695 // llvm.dbg.declare.
1696 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1697 DII->eraseFromParent();
1698 }
1699 return !DbgAddrs.empty();
1700 }
1701
replaceOneDbgValueForAlloca(DbgValueInst * DVI,Value * NewAddress,DIBuilder & Builder,int Offset)1702 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1703 DIBuilder &Builder, int Offset) {
1704 DebugLoc Loc = DVI->getDebugLoc();
1705 auto *DIVar = DVI->getVariable();
1706 auto *DIExpr = DVI->getExpression();
1707 assert(DIVar && "Missing variable");
1708
1709 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1710 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1711 // it and give up.
1712 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1713 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1714 return;
1715
1716 // Insert the offset before the first deref.
1717 // We could just change the offset argument of dbg.value, but it's unsigned...
1718 if (Offset)
1719 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1720
1721 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1722 DVI->eraseFromParent();
1723 }
1724
replaceDbgValueForAlloca(AllocaInst * AI,Value * NewAllocaAddress,DIBuilder & Builder,int Offset)1725 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1726 DIBuilder &Builder, int Offset) {
1727 if (auto *L = LocalAsMetadata::getIfExists(AI))
1728 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1729 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1730 Use &U = *UI++;
1731 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1732 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1733 }
1734 }
1735
1736 /// Wrap \p V in a ValueAsMetadata instance.
wrapValueInMetadata(LLVMContext & C,Value * V)1737 static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) {
1738 return MetadataAsValue::get(C, ValueAsMetadata::get(V));
1739 }
1740
1741 /// Where possible to salvage debug information for \p I do so
1742 /// and return True. If not possible mark undef and return False.
salvageDebugInfo(Instruction & I)1743 void llvm::salvageDebugInfo(Instruction &I) {
1744 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1745 findDbgUsers(DbgUsers, &I);
1746 salvageDebugInfoForDbgValues(I, DbgUsers);
1747 }
1748
salvageDebugInfoForDbgValues(Instruction & I,ArrayRef<DbgVariableIntrinsic * > DbgUsers)1749 void llvm::salvageDebugInfoForDbgValues(
1750 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1751 auto &Ctx = I.getContext();
1752 bool Salvaged = false;
1753 auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); };
1754
1755 for (auto *DII : DbgUsers) {
1756 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1757 // are implicitly pointing out the value as a DWARF memory location
1758 // description.
1759 bool StackValue = isa<DbgValueInst>(DII);
1760
1761 DIExpression *DIExpr =
1762 salvageDebugInfoImpl(I, DII->getExpression(), StackValue);
1763
1764 // salvageDebugInfoImpl should fail on examining the first element of
1765 // DbgUsers, or none of them.
1766 if (!DIExpr)
1767 break;
1768
1769 DII->setOperand(0, wrapMD(I.getOperand(0)));
1770 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr));
1771 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1772 Salvaged = true;
1773 }
1774
1775 if (Salvaged)
1776 return;
1777
1778 for (auto *DII : DbgUsers) {
1779 Value *Undef = UndefValue::get(I.getType());
1780 DII->setOperand(0, MetadataAsValue::get(DII->getContext(),
1781 ValueAsMetadata::get(Undef)));
1782 }
1783 }
1784
salvageDebugInfoImpl(Instruction & I,DIExpression * SrcDIExpr,bool WithStackValue)1785 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I,
1786 DIExpression *SrcDIExpr,
1787 bool WithStackValue) {
1788 auto &M = *I.getModule();
1789 auto &DL = M.getDataLayout();
1790
1791 // Apply a vector of opcodes to the source DIExpression.
1792 auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * {
1793 DIExpression *DIExpr = SrcDIExpr;
1794 if (!Ops.empty()) {
1795 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
1796 }
1797 return DIExpr;
1798 };
1799
1800 // Apply the given offset to the source DIExpression.
1801 auto applyOffset = [&](uint64_t Offset) -> DIExpression * {
1802 SmallVector<uint64_t, 8> Ops;
1803 DIExpression::appendOffset(Ops, Offset);
1804 return doSalvage(Ops);
1805 };
1806
1807 // initializer-list helper for applying operators to the source DIExpression.
1808 auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * {
1809 SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end());
1810 return doSalvage(Ops);
1811 };
1812
1813 if (auto *CI = dyn_cast<CastInst>(&I)) {
1814 // No-op casts are irrelevant for debug info.
1815 if (CI->isNoopCast(DL))
1816 return SrcDIExpr;
1817
1818 Type *Type = CI->getType();
1819 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1820 if (Type->isVectorTy() ||
1821 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1822 return nullptr;
1823
1824 Value *FromValue = CI->getOperand(0);
1825 unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1826 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1827
1828 return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1829 isa<SExtInst>(&I)));
1830 }
1831
1832 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1833 unsigned BitWidth =
1834 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace());
1835 // Rewrite a constant GEP into a DIExpression.
1836 APInt Offset(BitWidth, 0);
1837 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) {
1838 return applyOffset(Offset.getSExtValue());
1839 } else {
1840 return nullptr;
1841 }
1842 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1843 // Rewrite binary operations with constant integer operands.
1844 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1));
1845 if (!ConstInt || ConstInt->getBitWidth() > 64)
1846 return nullptr;
1847
1848 uint64_t Val = ConstInt->getSExtValue();
1849 switch (BI->getOpcode()) {
1850 case Instruction::Add:
1851 return applyOffset(Val);
1852 case Instruction::Sub:
1853 return applyOffset(-int64_t(Val));
1854 case Instruction::Mul:
1855 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul});
1856 case Instruction::SDiv:
1857 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_div});
1858 case Instruction::SRem:
1859 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod});
1860 case Instruction::Or:
1861 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_or});
1862 case Instruction::And:
1863 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_and});
1864 case Instruction::Xor:
1865 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor});
1866 case Instruction::Shl:
1867 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl});
1868 case Instruction::LShr:
1869 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr});
1870 case Instruction::AShr:
1871 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra});
1872 default:
1873 // TODO: Salvage constants from each kind of binop we know about.
1874 return nullptr;
1875 }
1876 // *Not* to do: we should not attempt to salvage load instructions,
1877 // because the validity and lifetime of a dbg.value containing
1878 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1879 }
1880 return nullptr;
1881 }
1882
1883 /// A replacement for a dbg.value expression.
1884 using DbgValReplacement = Optional<DIExpression *>;
1885
1886 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1887 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1888 /// changes are made.
rewriteDebugUsers(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT,function_ref<DbgValReplacement (DbgVariableIntrinsic & DII)> RewriteExpr)1889 static bool rewriteDebugUsers(
1890 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1891 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1892 // Find debug users of From.
1893 SmallVector<DbgVariableIntrinsic *, 1> Users;
1894 findDbgUsers(Users, &From);
1895 if (Users.empty())
1896 return false;
1897
1898 // Prevent use-before-def of To.
1899 bool Changed = false;
1900 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1901 if (isa<Instruction>(&To)) {
1902 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1903
1904 for (auto *DII : Users) {
1905 // It's common to see a debug user between From and DomPoint. Move it
1906 // after DomPoint to preserve the variable update without any reordering.
1907 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1908 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
1909 DII->moveAfter(&DomPoint);
1910 Changed = true;
1911
1912 // Users which otherwise aren't dominated by the replacement value must
1913 // be salvaged or deleted.
1914 } else if (!DT.dominates(&DomPoint, DII)) {
1915 UndefOrSalvage.insert(DII);
1916 }
1917 }
1918 }
1919
1920 // Update debug users without use-before-def risk.
1921 for (auto *DII : Users) {
1922 if (UndefOrSalvage.count(DII))
1923 continue;
1924
1925 LLVMContext &Ctx = DII->getContext();
1926 DbgValReplacement DVR = RewriteExpr(*DII);
1927 if (!DVR)
1928 continue;
1929
1930 DII->setOperand(0, wrapValueInMetadata(Ctx, &To));
1931 DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR));
1932 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
1933 Changed = true;
1934 }
1935
1936 if (!UndefOrSalvage.empty()) {
1937 // Try to salvage the remaining debug users.
1938 salvageDebugInfo(From);
1939 Changed = true;
1940 }
1941
1942 return Changed;
1943 }
1944
1945 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1946 /// losslessly preserve the bits and semantics of the value. This predicate is
1947 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1948 ///
1949 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1950 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1951 /// and also does not allow lossless pointer <-> integer conversions.
isBitCastSemanticsPreserving(const DataLayout & DL,Type * FromTy,Type * ToTy)1952 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1953 Type *ToTy) {
1954 // Trivially compatible types.
1955 if (FromTy == ToTy)
1956 return true;
1957
1958 // Handle compatible pointer <-> integer conversions.
1959 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1960 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1961 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
1962 !DL.isNonIntegralPointerType(ToTy);
1963 return SameSize && LosslessConversion;
1964 }
1965
1966 // TODO: This is not exhaustive.
1967 return false;
1968 }
1969
replaceAllDbgUsesWith(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT)1970 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
1971 Instruction &DomPoint, DominatorTree &DT) {
1972 // Exit early if From has no debug users.
1973 if (!From.isUsedByMetadata())
1974 return false;
1975
1976 assert(&From != &To && "Can't replace something with itself");
1977
1978 Type *FromTy = From.getType();
1979 Type *ToTy = To.getType();
1980
1981 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
1982 return DII.getExpression();
1983 };
1984
1985 // Handle no-op conversions.
1986 Module &M = *From.getModule();
1987 const DataLayout &DL = M.getDataLayout();
1988 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
1989 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1990
1991 // Handle integer-to-integer widening and narrowing.
1992 // FIXME: Use DW_OP_convert when it's available everywhere.
1993 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
1994 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
1995 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
1996 assert(FromBits != ToBits && "Unexpected no-op conversion");
1997
1998 // When the width of the result grows, assume that a debugger will only
1999 // access the low `FromBits` bits when inspecting the source variable.
2000 if (FromBits < ToBits)
2001 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2002
2003 // The width of the result has shrunk. Use sign/zero extension to describe
2004 // the source variable's high bits.
2005 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2006 DILocalVariable *Var = DII.getVariable();
2007
2008 // Without knowing signedness, sign/zero extension isn't possible.
2009 auto Signedness = Var->getSignedness();
2010 if (!Signedness)
2011 return None;
2012
2013 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2014 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2015 Signed);
2016 };
2017 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2018 }
2019
2020 // TODO: Floating-point conversions, vectors.
2021 return false;
2022 }
2023
2024 std::pair<unsigned, unsigned>
removeAllNonTerminatorAndEHPadInstructions(BasicBlock * BB)2025 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2026 unsigned NumDeadInst = 0;
2027 unsigned NumDeadDbgInst = 0;
2028 // Delete the instructions backwards, as it has a reduced likelihood of
2029 // having to update as many def-use and use-def chains.
2030 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2031 while (EndInst != &BB->front()) {
2032 // Delete the next to last instruction.
2033 Instruction *Inst = &*--EndInst->getIterator();
2034 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2035 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2036 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2037 EndInst = Inst;
2038 continue;
2039 }
2040 if (isa<DbgInfoIntrinsic>(Inst))
2041 ++NumDeadDbgInst;
2042 else
2043 ++NumDeadInst;
2044 Inst->eraseFromParent();
2045 }
2046 return {NumDeadInst, NumDeadDbgInst};
2047 }
2048
changeToUnreachable(Instruction * I,bool UseLLVMTrap,bool PreserveLCSSA,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)2049 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
2050 bool PreserveLCSSA, DomTreeUpdater *DTU,
2051 MemorySSAUpdater *MSSAU) {
2052 BasicBlock *BB = I->getParent();
2053
2054 if (MSSAU)
2055 MSSAU->changeToUnreachable(I);
2056
2057 SmallSetVector<BasicBlock *, 8> UniqueSuccessors;
2058
2059 // Loop over all of the successors, removing BB's entry from any PHI
2060 // nodes.
2061 for (BasicBlock *Successor : successors(BB)) {
2062 Successor->removePredecessor(BB, PreserveLCSSA);
2063 if (DTU)
2064 UniqueSuccessors.insert(Successor);
2065 }
2066 // Insert a call to llvm.trap right before this. This turns the undefined
2067 // behavior into a hard fail instead of falling through into random code.
2068 if (UseLLVMTrap) {
2069 Function *TrapFn =
2070 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
2071 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
2072 CallTrap->setDebugLoc(I->getDebugLoc());
2073 }
2074 auto *UI = new UnreachableInst(I->getContext(), I);
2075 UI->setDebugLoc(I->getDebugLoc());
2076
2077 // All instructions after this are dead.
2078 unsigned NumInstrsRemoved = 0;
2079 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2080 while (BBI != BBE) {
2081 if (!BBI->use_empty())
2082 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2083 BB->getInstList().erase(BBI++);
2084 ++NumInstrsRemoved;
2085 }
2086 if (DTU) {
2087 SmallVector<DominatorTree::UpdateType, 8> Updates;
2088 Updates.reserve(UniqueSuccessors.size());
2089 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2090 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2091 DTU->applyUpdates(Updates);
2092 }
2093 return NumInstrsRemoved;
2094 }
2095
createCallMatchingInvoke(InvokeInst * II)2096 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2097 SmallVector<Value *, 8> Args(II->args());
2098 SmallVector<OperandBundleDef, 1> OpBundles;
2099 II->getOperandBundlesAsDefs(OpBundles);
2100 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2101 II->getCalledOperand(), Args, OpBundles);
2102 NewCall->setCallingConv(II->getCallingConv());
2103 NewCall->setAttributes(II->getAttributes());
2104 NewCall->setDebugLoc(II->getDebugLoc());
2105 NewCall->copyMetadata(*II);
2106
2107 // If the invoke had profile metadata, try converting them for CallInst.
2108 uint64_t TotalWeight;
2109 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2110 // Set the total weight if it fits into i32, otherwise reset.
2111 MDBuilder MDB(NewCall->getContext());
2112 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2113 ? nullptr
2114 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2115 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2116 }
2117
2118 return NewCall;
2119 }
2120
2121 /// changeToCall - Convert the specified invoke into a normal call.
changeToCall(InvokeInst * II,DomTreeUpdater * DTU)2122 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2123 CallInst *NewCall = createCallMatchingInvoke(II);
2124 NewCall->takeName(II);
2125 NewCall->insertBefore(II);
2126 II->replaceAllUsesWith(NewCall);
2127
2128 // Follow the call by a branch to the normal destination.
2129 BasicBlock *NormalDestBB = II->getNormalDest();
2130 BranchInst::Create(NormalDestBB, II);
2131
2132 // Update PHI nodes in the unwind destination
2133 BasicBlock *BB = II->getParent();
2134 BasicBlock *UnwindDestBB = II->getUnwindDest();
2135 UnwindDestBB->removePredecessor(BB);
2136 II->eraseFromParent();
2137 if (DTU)
2138 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2139 }
2140
changeToInvokeAndSplitBasicBlock(CallInst * CI,BasicBlock * UnwindEdge)2141 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2142 BasicBlock *UnwindEdge) {
2143 BasicBlock *BB = CI->getParent();
2144
2145 // Convert this function call into an invoke instruction. First, split the
2146 // basic block.
2147 BasicBlock *Split =
2148 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
2149
2150 // Delete the unconditional branch inserted by splitBasicBlock
2151 BB->getInstList().pop_back();
2152
2153 // Create the new invoke instruction.
2154 SmallVector<Value *, 8> InvokeArgs(CI->args());
2155 SmallVector<OperandBundleDef, 1> OpBundles;
2156
2157 CI->getOperandBundlesAsDefs(OpBundles);
2158
2159 // Note: we're round tripping operand bundles through memory here, and that
2160 // can potentially be avoided with a cleverer API design that we do not have
2161 // as of this time.
2162
2163 InvokeInst *II =
2164 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2165 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2166 II->setDebugLoc(CI->getDebugLoc());
2167 II->setCallingConv(CI->getCallingConv());
2168 II->setAttributes(CI->getAttributes());
2169
2170 // Make sure that anything using the call now uses the invoke! This also
2171 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2172 CI->replaceAllUsesWith(II);
2173
2174 // Delete the original call
2175 Split->getInstList().pop_front();
2176 return Split;
2177 }
2178
markAliveBlocks(Function & F,SmallPtrSetImpl<BasicBlock * > & Reachable,DomTreeUpdater * DTU=nullptr)2179 static bool markAliveBlocks(Function &F,
2180 SmallPtrSetImpl<BasicBlock *> &Reachable,
2181 DomTreeUpdater *DTU = nullptr) {
2182 SmallVector<BasicBlock*, 128> Worklist;
2183 BasicBlock *BB = &F.front();
2184 Worklist.push_back(BB);
2185 Reachable.insert(BB);
2186 bool Changed = false;
2187 do {
2188 BB = Worklist.pop_back_val();
2189
2190 // Do a quick scan of the basic block, turning any obviously unreachable
2191 // instructions into LLVM unreachable insts. The instruction combining pass
2192 // canonicalizes unreachable insts into stores to null or undef.
2193 for (Instruction &I : *BB) {
2194 if (auto *CI = dyn_cast<CallInst>(&I)) {
2195 Value *Callee = CI->getCalledOperand();
2196 // Handle intrinsic calls.
2197 if (Function *F = dyn_cast<Function>(Callee)) {
2198 auto IntrinsicID = F->getIntrinsicID();
2199 // Assumptions that are known to be false are equivalent to
2200 // unreachable. Also, if the condition is undefined, then we make the
2201 // choice most beneficial to the optimizer, and choose that to also be
2202 // unreachable.
2203 if (IntrinsicID == Intrinsic::assume) {
2204 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2205 // Don't insert a call to llvm.trap right before the unreachable.
2206 changeToUnreachable(CI, false, false, DTU);
2207 Changed = true;
2208 break;
2209 }
2210 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2211 // A call to the guard intrinsic bails out of the current
2212 // compilation unit if the predicate passed to it is false. If the
2213 // predicate is a constant false, then we know the guard will bail
2214 // out of the current compile unconditionally, so all code following
2215 // it is dead.
2216 //
2217 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2218 // guards to treat `undef` as `false` since a guard on `undef` can
2219 // still be useful for widening.
2220 if (match(CI->getArgOperand(0), m_Zero()))
2221 if (!isa<UnreachableInst>(CI->getNextNode())) {
2222 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2223 false, DTU);
2224 Changed = true;
2225 break;
2226 }
2227 }
2228 } else if ((isa<ConstantPointerNull>(Callee) &&
2229 !NullPointerIsDefined(CI->getFunction())) ||
2230 isa<UndefValue>(Callee)) {
2231 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2232 Changed = true;
2233 break;
2234 }
2235 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2236 // If we found a call to a no-return function, insert an unreachable
2237 // instruction after it. Make sure there isn't *already* one there
2238 // though.
2239 if (!isa<UnreachableInst>(CI->getNextNode())) {
2240 // Don't insert a call to llvm.trap right before the unreachable.
2241 changeToUnreachable(CI->getNextNode(), false, false, DTU);
2242 Changed = true;
2243 }
2244 break;
2245 }
2246 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2247 // Store to undef and store to null are undefined and used to signal
2248 // that they should be changed to unreachable by passes that can't
2249 // modify the CFG.
2250
2251 // Don't touch volatile stores.
2252 if (SI->isVolatile()) continue;
2253
2254 Value *Ptr = SI->getOperand(1);
2255
2256 if (isa<UndefValue>(Ptr) ||
2257 (isa<ConstantPointerNull>(Ptr) &&
2258 !NullPointerIsDefined(SI->getFunction(),
2259 SI->getPointerAddressSpace()))) {
2260 changeToUnreachable(SI, true, false, DTU);
2261 Changed = true;
2262 break;
2263 }
2264 }
2265 }
2266
2267 Instruction *Terminator = BB->getTerminator();
2268 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2269 // Turn invokes that call 'nounwind' functions into ordinary calls.
2270 Value *Callee = II->getCalledOperand();
2271 if ((isa<ConstantPointerNull>(Callee) &&
2272 !NullPointerIsDefined(BB->getParent())) ||
2273 isa<UndefValue>(Callee)) {
2274 changeToUnreachable(II, true, false, DTU);
2275 Changed = true;
2276 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2277 if (II->use_empty() && II->onlyReadsMemory()) {
2278 // jump to the normal destination branch.
2279 BasicBlock *NormalDestBB = II->getNormalDest();
2280 BasicBlock *UnwindDestBB = II->getUnwindDest();
2281 BranchInst::Create(NormalDestBB, II);
2282 UnwindDestBB->removePredecessor(II->getParent());
2283 II->eraseFromParent();
2284 if (DTU)
2285 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2286 } else
2287 changeToCall(II, DTU);
2288 Changed = true;
2289 }
2290 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2291 // Remove catchpads which cannot be reached.
2292 struct CatchPadDenseMapInfo {
2293 static CatchPadInst *getEmptyKey() {
2294 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2295 }
2296
2297 static CatchPadInst *getTombstoneKey() {
2298 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2299 }
2300
2301 static unsigned getHashValue(CatchPadInst *CatchPad) {
2302 return static_cast<unsigned>(hash_combine_range(
2303 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2304 }
2305
2306 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2307 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2308 RHS == getEmptyKey() || RHS == getTombstoneKey())
2309 return LHS == RHS;
2310 return LHS->isIdenticalTo(RHS);
2311 }
2312 };
2313
2314 SmallMapVector<BasicBlock *, int, 8> NumPerSuccessorCases;
2315 // Set of unique CatchPads.
2316 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2317 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2318 HandlerSet;
2319 detail::DenseSetEmpty Empty;
2320 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2321 E = CatchSwitch->handler_end();
2322 I != E; ++I) {
2323 BasicBlock *HandlerBB = *I;
2324 ++NumPerSuccessorCases[HandlerBB];
2325 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2326 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2327 --NumPerSuccessorCases[HandlerBB];
2328 CatchSwitch->removeHandler(I);
2329 --I;
2330 --E;
2331 Changed = true;
2332 }
2333 }
2334 std::vector<DominatorTree::UpdateType> Updates;
2335 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2336 if (I.second == 0)
2337 Updates.push_back({DominatorTree::Delete, BB, I.first});
2338 if (DTU)
2339 DTU->applyUpdates(Updates);
2340 }
2341
2342 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2343 for (BasicBlock *Successor : successors(BB))
2344 if (Reachable.insert(Successor).second)
2345 Worklist.push_back(Successor);
2346 } while (!Worklist.empty());
2347 return Changed;
2348 }
2349
removeUnwindEdge(BasicBlock * BB,DomTreeUpdater * DTU)2350 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2351 Instruction *TI = BB->getTerminator();
2352
2353 if (auto *II = dyn_cast<InvokeInst>(TI)) {
2354 changeToCall(II, DTU);
2355 return;
2356 }
2357
2358 Instruction *NewTI;
2359 BasicBlock *UnwindDest;
2360
2361 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2362 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2363 UnwindDest = CRI->getUnwindDest();
2364 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2365 auto *NewCatchSwitch = CatchSwitchInst::Create(
2366 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2367 CatchSwitch->getName(), CatchSwitch);
2368 for (BasicBlock *PadBB : CatchSwitch->handlers())
2369 NewCatchSwitch->addHandler(PadBB);
2370
2371 NewTI = NewCatchSwitch;
2372 UnwindDest = CatchSwitch->getUnwindDest();
2373 } else {
2374 llvm_unreachable("Could not find unwind successor");
2375 }
2376
2377 NewTI->takeName(TI);
2378 NewTI->setDebugLoc(TI->getDebugLoc());
2379 UnwindDest->removePredecessor(BB);
2380 TI->replaceAllUsesWith(NewTI);
2381 TI->eraseFromParent();
2382 if (DTU)
2383 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2384 }
2385
2386 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2387 /// if they are in a dead cycle. Return true if a change was made, false
2388 /// otherwise.
removeUnreachableBlocks(Function & F,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)2389 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2390 MemorySSAUpdater *MSSAU) {
2391 SmallPtrSet<BasicBlock *, 16> Reachable;
2392 bool Changed = markAliveBlocks(F, Reachable, DTU);
2393
2394 // If there are unreachable blocks in the CFG...
2395 if (Reachable.size() == F.size())
2396 return Changed;
2397
2398 assert(Reachable.size() < F.size());
2399
2400 // Are there any blocks left to actually delete?
2401 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2402 for (BasicBlock &BB : F) {
2403 // Skip reachable basic blocks
2404 if (Reachable.count(&BB))
2405 continue;
2406 // Skip already-deleted blocks
2407 if (DTU && DTU->isBBPendingDeletion(&BB))
2408 continue;
2409 BlocksToRemove.insert(&BB);
2410 }
2411
2412 if (BlocksToRemove.empty())
2413 return Changed;
2414
2415 Changed = true;
2416 NumRemoved += BlocksToRemove.size();
2417
2418 if (MSSAU)
2419 MSSAU->removeBlocks(BlocksToRemove);
2420
2421 // Loop over all of the basic blocks that are up for removal, dropping all of
2422 // their internal references. Update DTU if available.
2423 std::vector<DominatorTree::UpdateType> Updates;
2424 for (auto *BB : BlocksToRemove) {
2425 SmallSetVector<BasicBlock *, 8> UniqueSuccessors;
2426 for (BasicBlock *Successor : successors(BB)) {
2427 // Only remove references to BB in reachable successors of BB.
2428 if (Reachable.count(Successor))
2429 Successor->removePredecessor(BB);
2430 if (DTU)
2431 UniqueSuccessors.insert(Successor);
2432 }
2433 BB->dropAllReferences();
2434 if (DTU) {
2435 Instruction *TI = BB->getTerminator();
2436 assert(TI && "Basic block should have a terminator");
2437 // Terminators like invoke can have users. We have to replace their users,
2438 // before removing them.
2439 if (!TI->use_empty())
2440 TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
2441 TI->eraseFromParent();
2442 new UnreachableInst(BB->getContext(), BB);
2443 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
2444 "applying corresponding DTU updates.");
2445 Updates.reserve(Updates.size() + UniqueSuccessors.size());
2446 for (auto *UniqueSuccessor : UniqueSuccessors)
2447 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2448 }
2449 }
2450
2451 if (DTU) {
2452 DTU->applyUpdates(Updates);
2453 for (auto *BB : BlocksToRemove)
2454 DTU->deleteBB(BB);
2455 } else {
2456 for (auto *BB : BlocksToRemove)
2457 BB->eraseFromParent();
2458 }
2459
2460 return Changed;
2461 }
2462
combineMetadata(Instruction * K,const Instruction * J,ArrayRef<unsigned> KnownIDs,bool DoesKMove)2463 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2464 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2465 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2466 K->dropUnknownNonDebugMetadata(KnownIDs);
2467 K->getAllMetadataOtherThanDebugLoc(Metadata);
2468 for (const auto &MD : Metadata) {
2469 unsigned Kind = MD.first;
2470 MDNode *JMD = J->getMetadata(Kind);
2471 MDNode *KMD = MD.second;
2472
2473 switch (Kind) {
2474 default:
2475 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2476 break;
2477 case LLVMContext::MD_dbg:
2478 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2479 case LLVMContext::MD_tbaa:
2480 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2481 break;
2482 case LLVMContext::MD_alias_scope:
2483 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2484 break;
2485 case LLVMContext::MD_noalias:
2486 case LLVMContext::MD_mem_parallel_loop_access:
2487 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2488 break;
2489 case LLVMContext::MD_access_group:
2490 K->setMetadata(LLVMContext::MD_access_group,
2491 intersectAccessGroups(K, J));
2492 break;
2493 case LLVMContext::MD_range:
2494
2495 // If K does move, use most generic range. Otherwise keep the range of
2496 // K.
2497 if (DoesKMove)
2498 // FIXME: If K does move, we should drop the range info and nonnull.
2499 // Currently this function is used with DoesKMove in passes
2500 // doing hoisting/sinking and the current behavior of using the
2501 // most generic range is correct in those cases.
2502 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2503 break;
2504 case LLVMContext::MD_fpmath:
2505 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2506 break;
2507 case LLVMContext::MD_invariant_load:
2508 // Only set the !invariant.load if it is present in both instructions.
2509 K->setMetadata(Kind, JMD);
2510 break;
2511 case LLVMContext::MD_nonnull:
2512 // If K does move, keep nonull if it is present in both instructions.
2513 if (DoesKMove)
2514 K->setMetadata(Kind, JMD);
2515 break;
2516 case LLVMContext::MD_invariant_group:
2517 // Preserve !invariant.group in K.
2518 break;
2519 case LLVMContext::MD_align:
2520 K->setMetadata(Kind,
2521 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2522 break;
2523 case LLVMContext::MD_dereferenceable:
2524 case LLVMContext::MD_dereferenceable_or_null:
2525 K->setMetadata(Kind,
2526 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2527 break;
2528 case LLVMContext::MD_preserve_access_index:
2529 // Preserve !preserve.access.index in K.
2530 break;
2531 }
2532 }
2533 // Set !invariant.group from J if J has it. If both instructions have it
2534 // then we will just pick it from J - even when they are different.
2535 // Also make sure that K is load or store - f.e. combining bitcast with load
2536 // could produce bitcast with invariant.group metadata, which is invalid.
2537 // FIXME: we should try to preserve both invariant.group md if they are
2538 // different, but right now instruction can only have one invariant.group.
2539 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2540 if (isa<LoadInst>(K) || isa<StoreInst>(K))
2541 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2542 }
2543
combineMetadataForCSE(Instruction * K,const Instruction * J,bool KDominatesJ)2544 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2545 bool KDominatesJ) {
2546 unsigned KnownIDs[] = {
2547 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2548 LLVMContext::MD_noalias, LLVMContext::MD_range,
2549 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
2550 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2551 LLVMContext::MD_dereferenceable,
2552 LLVMContext::MD_dereferenceable_or_null,
2553 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index};
2554 combineMetadata(K, J, KnownIDs, KDominatesJ);
2555 }
2556
copyMetadataForLoad(LoadInst & Dest,const LoadInst & Source)2557 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2558 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2559 Source.getAllMetadata(MD);
2560 MDBuilder MDB(Dest.getContext());
2561 Type *NewType = Dest.getType();
2562 const DataLayout &DL = Source.getModule()->getDataLayout();
2563 for (const auto &MDPair : MD) {
2564 unsigned ID = MDPair.first;
2565 MDNode *N = MDPair.second;
2566 // Note, essentially every kind of metadata should be preserved here! This
2567 // routine is supposed to clone a load instruction changing *only its type*.
2568 // The only metadata it makes sense to drop is metadata which is invalidated
2569 // when the pointer type changes. This should essentially never be the case
2570 // in LLVM, but we explicitly switch over only known metadata to be
2571 // conservatively correct. If you are adding metadata to LLVM which pertains
2572 // to loads, you almost certainly want to add it here.
2573 switch (ID) {
2574 case LLVMContext::MD_dbg:
2575 case LLVMContext::MD_tbaa:
2576 case LLVMContext::MD_prof:
2577 case LLVMContext::MD_fpmath:
2578 case LLVMContext::MD_tbaa_struct:
2579 case LLVMContext::MD_invariant_load:
2580 case LLVMContext::MD_alias_scope:
2581 case LLVMContext::MD_noalias:
2582 case LLVMContext::MD_nontemporal:
2583 case LLVMContext::MD_mem_parallel_loop_access:
2584 case LLVMContext::MD_access_group:
2585 // All of these directly apply.
2586 Dest.setMetadata(ID, N);
2587 break;
2588
2589 case LLVMContext::MD_nonnull:
2590 copyNonnullMetadata(Source, N, Dest);
2591 break;
2592
2593 case LLVMContext::MD_align:
2594 case LLVMContext::MD_dereferenceable:
2595 case LLVMContext::MD_dereferenceable_or_null:
2596 // These only directly apply if the new type is also a pointer.
2597 if (NewType->isPointerTy())
2598 Dest.setMetadata(ID, N);
2599 break;
2600
2601 case LLVMContext::MD_range:
2602 copyRangeMetadata(DL, Source, N, Dest);
2603 break;
2604 }
2605 }
2606 }
2607
patchReplacementInstruction(Instruction * I,Value * Repl)2608 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2609 auto *ReplInst = dyn_cast<Instruction>(Repl);
2610 if (!ReplInst)
2611 return;
2612
2613 // Patch the replacement so that it is not more restrictive than the value
2614 // being replaced.
2615 // Note that if 'I' is a load being replaced by some operation,
2616 // for example, by an arithmetic operation, then andIRFlags()
2617 // would just erase all math flags from the original arithmetic
2618 // operation, which is clearly not wanted and not needed.
2619 if (!isa<LoadInst>(I))
2620 ReplInst->andIRFlags(I);
2621
2622 // FIXME: If both the original and replacement value are part of the
2623 // same control-flow region (meaning that the execution of one
2624 // guarantees the execution of the other), then we can combine the
2625 // noalias scopes here and do better than the general conservative
2626 // answer used in combineMetadata().
2627
2628 // In general, GVN unifies expressions over different control-flow
2629 // regions, and so we need a conservative combination of the noalias
2630 // scopes.
2631 static const unsigned KnownIDs[] = {
2632 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2633 LLVMContext::MD_noalias, LLVMContext::MD_range,
2634 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load,
2635 LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2636 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index};
2637 combineMetadata(ReplInst, I, KnownIDs, false);
2638 }
2639
2640 template <typename RootType, typename DominatesFn>
replaceDominatedUsesWith(Value * From,Value * To,const RootType & Root,const DominatesFn & Dominates)2641 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2642 const RootType &Root,
2643 const DominatesFn &Dominates) {
2644 assert(From->getType() == To->getType());
2645
2646 unsigned Count = 0;
2647 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2648 UI != UE;) {
2649 Use &U = *UI++;
2650 if (!Dominates(Root, U))
2651 continue;
2652 U.set(To);
2653 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2654 << "' as " << *To << " in " << *U << "\n");
2655 ++Count;
2656 }
2657 return Count;
2658 }
2659
replaceNonLocalUsesWith(Instruction * From,Value * To)2660 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2661 assert(From->getType() == To->getType());
2662 auto *BB = From->getParent();
2663 unsigned Count = 0;
2664
2665 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2666 UI != UE;) {
2667 Use &U = *UI++;
2668 auto *I = cast<Instruction>(U.getUser());
2669 if (I->getParent() == BB)
2670 continue;
2671 U.set(To);
2672 ++Count;
2673 }
2674 return Count;
2675 }
2676
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlockEdge & Root)2677 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2678 DominatorTree &DT,
2679 const BasicBlockEdge &Root) {
2680 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2681 return DT.dominates(Root, U);
2682 };
2683 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2684 }
2685
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlock * BB)2686 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2687 DominatorTree &DT,
2688 const BasicBlock *BB) {
2689 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
2690 auto *I = cast<Instruction>(U.getUser())->getParent();
2691 return DT.properlyDominates(BB, I);
2692 };
2693 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
2694 }
2695
callsGCLeafFunction(const CallBase * Call,const TargetLibraryInfo & TLI)2696 bool llvm::callsGCLeafFunction(const CallBase *Call,
2697 const TargetLibraryInfo &TLI) {
2698 // Check if the function is specifically marked as a gc leaf function.
2699 if (Call->hasFnAttr("gc-leaf-function"))
2700 return true;
2701 if (const Function *F = Call->getCalledFunction()) {
2702 if (F->hasFnAttribute("gc-leaf-function"))
2703 return true;
2704
2705 if (auto IID = F->getIntrinsicID()) {
2706 // Most LLVM intrinsics do not take safepoints.
2707 return IID != Intrinsic::experimental_gc_statepoint &&
2708 IID != Intrinsic::experimental_deoptimize &&
2709 IID != Intrinsic::memcpy_element_unordered_atomic &&
2710 IID != Intrinsic::memmove_element_unordered_atomic;
2711 }
2712 }
2713
2714 // Lib calls can be materialized by some passes, and won't be
2715 // marked as 'gc-leaf-function.' All available Libcalls are
2716 // GC-leaf.
2717 LibFunc LF;
2718 if (TLI.getLibFunc(*Call, LF)) {
2719 return TLI.has(LF);
2720 }
2721
2722 return false;
2723 }
2724
copyNonnullMetadata(const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)2725 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2726 LoadInst &NewLI) {
2727 auto *NewTy = NewLI.getType();
2728
2729 // This only directly applies if the new type is also a pointer.
2730 if (NewTy->isPointerTy()) {
2731 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2732 return;
2733 }
2734
2735 // The only other translation we can do is to integral loads with !range
2736 // metadata.
2737 if (!NewTy->isIntegerTy())
2738 return;
2739
2740 MDBuilder MDB(NewLI.getContext());
2741 const Value *Ptr = OldLI.getPointerOperand();
2742 auto *ITy = cast<IntegerType>(NewTy);
2743 auto *NullInt = ConstantExpr::getPtrToInt(
2744 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2745 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2746 NewLI.setMetadata(LLVMContext::MD_range,
2747 MDB.createRange(NonNullInt, NullInt));
2748 }
2749
copyRangeMetadata(const DataLayout & DL,const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)2750 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2751 MDNode *N, LoadInst &NewLI) {
2752 auto *NewTy = NewLI.getType();
2753
2754 // Give up unless it is converted to a pointer where there is a single very
2755 // valuable mapping we can do reliably.
2756 // FIXME: It would be nice to propagate this in more ways, but the type
2757 // conversions make it hard.
2758 if (!NewTy->isPointerTy())
2759 return;
2760
2761 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2762 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2763 MDNode *NN = MDNode::get(OldLI.getContext(), None);
2764 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2765 }
2766 }
2767
dropDebugUsers(Instruction & I)2768 void llvm::dropDebugUsers(Instruction &I) {
2769 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2770 findDbgUsers(DbgUsers, &I);
2771 for (auto *DII : DbgUsers)
2772 DII->eraseFromParent();
2773 }
2774
hoistAllInstructionsInto(BasicBlock * DomBlock,Instruction * InsertPt,BasicBlock * BB)2775 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2776 BasicBlock *BB) {
2777 // Since we are moving the instructions out of its basic block, we do not
2778 // retain their original debug locations (DILocations) and debug intrinsic
2779 // instructions.
2780 //
2781 // Doing so would degrade the debugging experience and adversely affect the
2782 // accuracy of profiling information.
2783 //
2784 // Currently, when hoisting the instructions, we take the following actions:
2785 // - Remove their debug intrinsic instructions.
2786 // - Set their debug locations to the values from the insertion point.
2787 //
2788 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2789 // need to be deleted, is because there will not be any instructions with a
2790 // DILocation in either branch left after performing the transformation. We
2791 // can only insert a dbg.value after the two branches are joined again.
2792 //
2793 // See PR38762, PR39243 for more details.
2794 //
2795 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2796 // encode predicated DIExpressions that yield different results on different
2797 // code paths.
2798 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2799 Instruction *I = &*II;
2800 I->dropUnknownNonDebugMetadata();
2801 if (I->isUsedByMetadata())
2802 dropDebugUsers(*I);
2803 if (isa<DbgInfoIntrinsic>(I)) {
2804 // Remove DbgInfo Intrinsics.
2805 II = I->eraseFromParent();
2806 continue;
2807 }
2808 I->setDebugLoc(InsertPt->getDebugLoc());
2809 ++II;
2810 }
2811 DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2812 BB->begin(),
2813 BB->getTerminator()->getIterator());
2814 }
2815
2816 namespace {
2817
2818 /// A potential constituent of a bitreverse or bswap expression. See
2819 /// collectBitParts for a fuller explanation.
2820 struct BitPart {
BitPart__anon954ca8c50c11::BitPart2821 BitPart(Value *P, unsigned BW) : Provider(P) {
2822 Provenance.resize(BW);
2823 }
2824
2825 /// The Value that this is a bitreverse/bswap of.
2826 Value *Provider;
2827
2828 /// The "provenance" of each bit. Provenance[A] = B means that bit A
2829 /// in Provider becomes bit B in the result of this expression.
2830 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2831
2832 enum { Unset = -1 };
2833 };
2834
2835 } // end anonymous namespace
2836
2837 /// Analyze the specified subexpression and see if it is capable of providing
2838 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2839 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2840 /// the output of the expression came from a corresponding bit in some other
2841 /// value. This function is recursive, and the end result is a mapping of
2842 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2843 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2844 ///
2845 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2846 /// that the expression deposits the low byte of %X into the high byte of the
2847 /// result and that all other bits are zero. This expression is accepted and a
2848 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2849 /// [0-7].
2850 ///
2851 /// For vector types, all analysis is performed at the per-element level. No
2852 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2853 /// constant masks must be splatted across all elements.
2854 ///
2855 /// To avoid revisiting values, the BitPart results are memoized into the
2856 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2857 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2858 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2859 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2860 /// type instead to provide the same functionality.
2861 ///
2862 /// Because we pass around references into \c BPS, we must use a container that
2863 /// does not invalidate internal references (std::map instead of DenseMap).
2864 static const Optional<BitPart> &
collectBitParts(Value * V,bool MatchBSwaps,bool MatchBitReversals,std::map<Value *,Optional<BitPart>> & BPS,int Depth)2865 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2866 std::map<Value *, Optional<BitPart>> &BPS, int Depth) {
2867 auto I = BPS.find(V);
2868 if (I != BPS.end())
2869 return I->second;
2870
2871 auto &Result = BPS[V] = None;
2872 auto BitWidth = V->getType()->getScalarSizeInBits();
2873
2874 // Prevent stack overflow by limiting the recursion depth
2875 if (Depth == BitPartRecursionMaxDepth) {
2876 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2877 return Result;
2878 }
2879
2880 if (auto *I = dyn_cast<Instruction>(V)) {
2881 Value *X, *Y;
2882 const APInt *C;
2883
2884 // If this is an or instruction, it may be an inner node of the bswap.
2885 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2886 const auto &A =
2887 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2888 const auto &B =
2889 collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2890 if (!A || !B)
2891 return Result;
2892
2893 // Try and merge the two together.
2894 if (!A->Provider || A->Provider != B->Provider)
2895 return Result;
2896
2897 Result = BitPart(A->Provider, BitWidth);
2898 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2899 if (A->Provenance[BitIdx] != BitPart::Unset &&
2900 B->Provenance[BitIdx] != BitPart::Unset &&
2901 A->Provenance[BitIdx] != B->Provenance[BitIdx])
2902 return Result = None;
2903
2904 if (A->Provenance[BitIdx] == BitPart::Unset)
2905 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2906 else
2907 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2908 }
2909
2910 return Result;
2911 }
2912
2913 // If this is a logical shift by a constant, recurse then shift the result.
2914 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2915 const APInt &BitShift = *C;
2916
2917 // Ensure the shift amount is defined.
2918 if (BitShift.uge(BitWidth))
2919 return Result;
2920
2921 const auto &Res =
2922 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2923 if (!Res)
2924 return Result;
2925 Result = Res;
2926
2927 // Perform the "shift" on BitProvenance.
2928 auto &P = Result->Provenance;
2929 if (I->getOpcode() == Instruction::Shl) {
2930 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2931 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2932 } else {
2933 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2934 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2935 }
2936
2937 return Result;
2938 }
2939
2940 // If this is a logical 'and' with a mask that clears bits, recurse then
2941 // unset the appropriate bits.
2942 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2943 const APInt &AndMask = *C;
2944
2945 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2946 // early exit.
2947 unsigned NumMaskedBits = AndMask.countPopulation();
2948 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
2949 return Result;
2950
2951 const auto &Res =
2952 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2953 if (!Res)
2954 return Result;
2955 Result = Res;
2956
2957 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2958 // If the AndMask is zero for this bit, clear the bit.
2959 if (AndMask[BitIdx] == 0)
2960 Result->Provenance[BitIdx] = BitPart::Unset;
2961 return Result;
2962 }
2963
2964 // If this is a zext instruction zero extend the result.
2965 if (match(V, m_ZExt(m_Value(X)))) {
2966 const auto &Res =
2967 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2968 if (!Res)
2969 return Result;
2970
2971 Result = BitPart(Res->Provider, BitWidth);
2972 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
2973 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
2974 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
2975 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
2976 Result->Provenance[BitIdx] = BitPart::Unset;
2977 return Result;
2978 }
2979
2980 // BITREVERSE - most likely due to us previous matching a partial
2981 // bitreverse.
2982 if (match(V, m_BitReverse(m_Value(X)))) {
2983 const auto &Res =
2984 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2985 if (!Res)
2986 return Result;
2987
2988 Result = BitPart(Res->Provider, BitWidth);
2989 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2990 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
2991 return Result;
2992 }
2993
2994 // BSWAP - most likely due to us previous matching a partial bswap.
2995 if (match(V, m_BSwap(m_Value(X)))) {
2996 const auto &Res =
2997 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2998 if (!Res)
2999 return Result;
3000
3001 unsigned ByteWidth = BitWidth / 8;
3002 Result = BitPart(Res->Provider, BitWidth);
3003 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3004 unsigned ByteBitOfs = ByteIdx * 8;
3005 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3006 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3007 Res->Provenance[ByteBitOfs + BitIdx];
3008 }
3009 return Result;
3010 }
3011
3012 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3013 // amount (modulo).
3014 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3015 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3016 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3017 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3018 // We can treat fshr as a fshl by flipping the modulo amount.
3019 unsigned ModAmt = C->urem(BitWidth);
3020 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3021 ModAmt = BitWidth - ModAmt;
3022
3023 const auto &LHS =
3024 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3025 const auto &RHS =
3026 collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3027
3028 // Check we have both sources and they are from the same provider.
3029 if (!LHS || !RHS || !LHS->Provider || LHS->Provider != RHS->Provider)
3030 return Result;
3031
3032 unsigned StartBitRHS = BitWidth - ModAmt;
3033 Result = BitPart(LHS->Provider, BitWidth);
3034 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3035 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3036 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3037 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3038 return Result;
3039 }
3040 }
3041
3042 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
3043 // the input value to the bswap/bitreverse.
3044 Result = BitPart(V, BitWidth);
3045 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3046 Result->Provenance[BitIdx] = BitIdx;
3047 return Result;
3048 }
3049
bitTransformIsCorrectForBSwap(unsigned From,unsigned To,unsigned BitWidth)3050 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3051 unsigned BitWidth) {
3052 if (From % 8 != To % 8)
3053 return false;
3054 // Convert from bit indices to byte indices and check for a byte reversal.
3055 From >>= 3;
3056 To >>= 3;
3057 BitWidth >>= 3;
3058 return From == BitWidth - To - 1;
3059 }
3060
bitTransformIsCorrectForBitReverse(unsigned From,unsigned To,unsigned BitWidth)3061 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3062 unsigned BitWidth) {
3063 return From == BitWidth - To - 1;
3064 }
3065
recognizeBSwapOrBitReverseIdiom(Instruction * I,bool MatchBSwaps,bool MatchBitReversals,SmallVectorImpl<Instruction * > & InsertedInsts)3066 bool llvm::recognizeBSwapOrBitReverseIdiom(
3067 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3068 SmallVectorImpl<Instruction *> &InsertedInsts) {
3069 if (Operator::getOpcode(I) != Instruction::Or)
3070 return false;
3071 if (!MatchBSwaps && !MatchBitReversals)
3072 return false;
3073 Type *ITy = I->getType();
3074 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3075 return false; // Can't do integer/elements > 128 bits.
3076
3077 Type *DemandedTy = ITy;
3078 if (I->hasOneUse())
3079 if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3080 DemandedTy = Trunc->getType();
3081
3082 // Try to find all the pieces corresponding to the bswap.
3083 std::map<Value *, Optional<BitPart>> BPS;
3084 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0);
3085 if (!Res)
3086 return false;
3087 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3088 assert(all_of(BitProvenance,
3089 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3090 "Illegal bit provenance index");
3091
3092 // If the upper bits are zero, then attempt to perform as a truncated op.
3093 if (BitProvenance.back() == BitPart::Unset) {
3094 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3095 BitProvenance = BitProvenance.drop_back();
3096 if (BitProvenance.empty())
3097 return false; // TODO - handle null value?
3098 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3099 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3100 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3101 }
3102
3103 // Check BitProvenance hasn't found a source larger than the result type.
3104 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3105 if (DemandedBW > ITy->getScalarSizeInBits())
3106 return false;
3107
3108 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3109 // only byteswap values with an even number of bytes.
3110 APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3111 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3112 bool OKForBitReverse = MatchBitReversals;
3113 for (unsigned BitIdx = 0;
3114 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3115 if (BitProvenance[BitIdx] == BitPart::Unset) {
3116 DemandedMask.clearBit(BitIdx);
3117 continue;
3118 }
3119 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3120 DemandedBW);
3121 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3122 BitIdx, DemandedBW);
3123 }
3124
3125 Intrinsic::ID Intrin;
3126 if (OKForBSwap)
3127 Intrin = Intrinsic::bswap;
3128 else if (OKForBitReverse)
3129 Intrin = Intrinsic::bitreverse;
3130 else
3131 return false;
3132
3133 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3134 Value *Provider = Res->Provider;
3135
3136 // We may need to truncate the provider.
3137 if (DemandedTy != Provider->getType()) {
3138 auto *Trunc =
3139 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3140 InsertedInsts.push_back(Trunc);
3141 Provider = Trunc;
3142 }
3143
3144 Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3145 InsertedInsts.push_back(Result);
3146
3147 if (!DemandedMask.isAllOnesValue()) {
3148 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3149 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3150 InsertedInsts.push_back(Result);
3151 }
3152
3153 // We may need to zeroextend back to the result type.
3154 if (ITy != Result->getType()) {
3155 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3156 InsertedInsts.push_back(ExtInst);
3157 }
3158
3159 return true;
3160 }
3161
3162 // CodeGen has special handling for some string functions that may replace
3163 // them with target-specific intrinsics. Since that'd skip our interceptors
3164 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3165 // we mark affected calls as NoBuiltin, which will disable optimization
3166 // in CodeGen.
maybeMarkSanitizerLibraryCallNoBuiltin(CallInst * CI,const TargetLibraryInfo * TLI)3167 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3168 CallInst *CI, const TargetLibraryInfo *TLI) {
3169 Function *F = CI->getCalledFunction();
3170 LibFunc Func;
3171 if (F && !F->hasLocalLinkage() && F->hasName() &&
3172 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3173 !F->doesNotAccessMemory())
3174 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
3175 }
3176
canReplaceOperandWithVariable(const Instruction * I,unsigned OpIdx)3177 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3178 // We can't have a PHI with a metadata type.
3179 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3180 return false;
3181
3182 // Early exit.
3183 if (!isa<Constant>(I->getOperand(OpIdx)))
3184 return true;
3185
3186 switch (I->getOpcode()) {
3187 default:
3188 return true;
3189 case Instruction::Call:
3190 case Instruction::Invoke: {
3191 const auto &CB = cast<CallBase>(*I);
3192
3193 // Can't handle inline asm. Skip it.
3194 if (CB.isInlineAsm())
3195 return false;
3196
3197 // Constant bundle operands may need to retain their constant-ness for
3198 // correctness.
3199 if (CB.isBundleOperand(OpIdx))
3200 return false;
3201
3202 if (OpIdx < CB.getNumArgOperands()) {
3203 // Some variadic intrinsics require constants in the variadic arguments,
3204 // which currently aren't markable as immarg.
3205 if (isa<IntrinsicInst>(CB) &&
3206 OpIdx >= CB.getFunctionType()->getNumParams()) {
3207 // This is known to be OK for stackmap.
3208 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3209 }
3210
3211 // gcroot is a special case, since it requires a constant argument which
3212 // isn't also required to be a simple ConstantInt.
3213 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3214 return false;
3215
3216 // Some intrinsic operands are required to be immediates.
3217 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3218 }
3219
3220 // It is never allowed to replace the call argument to an intrinsic, but it
3221 // may be possible for a call.
3222 return !isa<IntrinsicInst>(CB);
3223 }
3224 case Instruction::ShuffleVector:
3225 // Shufflevector masks are constant.
3226 return OpIdx != 2;
3227 case Instruction::Switch:
3228 case Instruction::ExtractValue:
3229 // All operands apart from the first are constant.
3230 return OpIdx == 0;
3231 case Instruction::InsertValue:
3232 // All operands apart from the first and the second are constant.
3233 return OpIdx < 2;
3234 case Instruction::Alloca:
3235 // Static allocas (constant size in the entry block) are handled by
3236 // prologue/epilogue insertion so they're free anyway. We definitely don't
3237 // want to make them non-constant.
3238 return !cast<AllocaInst>(I)->isStaticAlloca();
3239 case Instruction::GetElementPtr:
3240 if (OpIdx == 0)
3241 return true;
3242 gep_type_iterator It = gep_type_begin(I);
3243 for (auto E = std::next(It, OpIdx); It != E; ++It)
3244 if (It.isStruct())
3245 return false;
3246 return true;
3247 }
3248 }
3249
invertCondition(Value * Condition)3250 Value *llvm::invertCondition(Value *Condition) {
3251 // First: Check if it's a constant
3252 if (Constant *C = dyn_cast<Constant>(Condition))
3253 return ConstantExpr::getNot(C);
3254
3255 // Second: If the condition is already inverted, return the original value
3256 Value *NotCondition;
3257 if (match(Condition, m_Not(m_Value(NotCondition))))
3258 return NotCondition;
3259
3260 BasicBlock *Parent = nullptr;
3261 Instruction *Inst = dyn_cast<Instruction>(Condition);
3262 if (Inst)
3263 Parent = Inst->getParent();
3264 else if (Argument *Arg = dyn_cast<Argument>(Condition))
3265 Parent = &Arg->getParent()->getEntryBlock();
3266 assert(Parent && "Unsupported condition to invert");
3267
3268 // Third: Check all the users for an invert
3269 for (User *U : Condition->users())
3270 if (Instruction *I = dyn_cast<Instruction>(U))
3271 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3272 return I;
3273
3274 // Last option: Create a new instruction
3275 auto *Inverted =
3276 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3277 if (Inst && !isa<PHINode>(Inst))
3278 Inverted->insertAfter(Inst);
3279 else
3280 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3281 return Inverted;
3282 }
3283